├── .gitignore ├── .net ├── .dockerignore ├── Dockerfile └── docker-compose.yml ├── README.md ├── ceph ├── deploy.dev.sh └── test │ ├── .gitignore │ ├── main.py │ └── requirements.txt ├── dkron ├── build │ ├── Dockerfile │ ├── build.sh │ └── requirements.txt └── deploy │ ├── .gitignore │ └── docker-compose.yml ├── docker ├── .gitignore └── script │ ├── docker_bash.sh │ └── docker_node_ls_alias.sh ├── dts ├── README.md └── seatunnel │ ├── .gitignore │ ├── Dockerfile │ ├── build.sh │ ├── config │ └── plugin_config │ ├── deploy │ ├── docker-compose.yml │ └── examples │ │ ├── fake_to_console.conf │ │ └── v2.streaming.fake.conf │ ├── docker-entrypoint.sh │ ├── maven │ └── settings.xml │ └── plugins │ └── README.md ├── elk └── clean-data.sh ├── gitlab ├── README.md ├── build │ ├── .gitignore │ ├── 12.10.8-ce.0-fixed │ │ └── embedded │ │ │ └── service │ │ │ └── gitlab-rails │ │ │ └── lib │ │ │ └── gitlab │ │ │ └── auth │ │ │ └── ldap │ │ │ ├── access.rb │ │ │ └── config.rb │ ├── Dockerfile │ ├── build.sh │ └── patch │ │ ├── gitlab-12.10.14-ce.0.patch │ │ ├── gitlab-12.10.8-ce.0.patch │ │ ├── gitlab-13.12.15-ce.0.patch │ │ ├── gitlab-15.11.13-ce.0.patch │ │ ├── gitlab-15.4.4-ce.0.patch │ │ ├── gitlab-15.4.6-ce.0.patch │ │ └── gitlab-16.11.10-ce.0.patch ├── gitlab-runner │ ├── Dockerfile.pipeline │ ├── README.md │ ├── build.sh │ └── docker-compose.yml ├── gitlab │ ├── docker-compose.yml │ ├── start.sh │ └── upgrade.sh └── omnibus-gitlab │ └── docker │ ├── .dockerignore │ ├── Dockerfile │ ├── README.md │ ├── RELEASE │ ├── assets │ ├── download-package │ ├── gitlab.rb │ ├── setup │ ├── sshd_config │ ├── update-permissions │ └── wrapper │ ├── docker-compose.yml │ └── marathon.json ├── gpdb ├── README.md ├── build-ce.sh ├── build-ce │ ├── Dockerfile │ ├── entrypoint.sh │ └── etc │ │ ├── .bashrc │ │ ├── etc_cgconfig.d_gpdb.conf │ │ ├── etc_security_limits.conf │ │ ├── etc_selinux_config │ │ └── etc_sysctl.conf ├── build.sh ├── build │ ├── Dockerfile │ ├── entrypoint.sh │ └── etc │ │ ├── .bashrc │ │ ├── etc_cgconfig.d_gpdb.conf │ │ ├── etc_security_limits.conf │ │ ├── etc_selinux_config │ │ └── etc_sysctl.conf ├── clean.sh ├── cron │ ├── .gitignore │ ├── READMD.md │ ├── archive_partition.py │ ├── gpfix_ao_checksum.sh │ ├── gpfix_io_error.sh │ ├── send_alert.py │ ├── send_alert.sh │ └── vacuum_analyse.py ├── deploy.sh ├── deploy │ ├── bin │ │ ├── .env │ │ ├── docker-compose-master.yml │ │ ├── docker-compose-segment.1.yml │ │ ├── docker-compose-segment.2.yml │ │ ├── docker-compose-segment.3.yml │ │ └── docker-compose-standby.yml │ └── config │ │ ├── gpinitsystem_config │ │ ├── gpstop.sh │ │ ├── hostlist │ │ └── seg_hosts └── init.sh ├── grafana └── build │ ├── Dockerfile │ ├── build.sh │ └── patch │ └── 0007-patch-append-data-query-headers-for-metrics-collecti.patch ├── harbor ├── sync.sh └── upgrade.sh ├── helm └── postgres │ ├── .helmignore │ ├── Chart.yaml │ ├── templates │ ├── NOTES.txt │ ├── _helpers.tpl │ ├── deployment.yaml │ ├── hpa.yaml │ ├── ingress.yaml │ ├── service.yaml │ ├── serviceaccount.yaml │ └── tests │ │ └── test-connection.yaml │ └── values.yaml ├── kafka ├── admin │ ├── docker-compose-kafka-webui.yml │ ├── kowl-nginx.conf │ └── kowl.yaml ├── cdc │ ├── Dockerfile │ ├── build.sh │ ├── entrypoint.sh │ └── src │ │ ├── .gitignore │ │ ├── main.py │ │ ├── metrics.py │ │ ├── requirements.txt │ │ └── sanitization.py ├── deploy.dev.sh ├── deploy.dev │ ├── bin │ │ ├── docker-compose-kafka.yml │ │ ├── docker-compose-zookeeper.yml │ │ └── start.sh │ └── cron │ │ ├── increase-replication-factor.sh │ │ ├── kafka-brokers-migrate.sh │ │ ├── kafka-connect-change-log-level.sh │ │ ├── kafka-connect-restart-failed-tasks.sh │ │ ├── kafka-topics-delete.sh │ │ ├── kafka-topics-illegal.sh │ │ └── kafka-topics-replication-change.sh ├── jmx_exporter │ ├── .gitignore │ ├── kafka-agent-all.yml │ ├── kafka-agent.yml │ ├── kafka-connect-agent.yml │ └── zookeeper-agent.yaml └── kafka-connect │ ├── .gitignore │ ├── Dockerfile │ └── build.sh ├── keepalived ├── kettle ├── Dockerfile ├── README.md ├── build.sh └── maven │ └── settings-docker.xml ├── kong ├── config │ ├── declare-config.yml.template │ ├── kong.conf.template │ ├── my-server.kong.conf │ └── nginx_kong.lua └── docker-compose.yml ├── minio └── downloader │ ├── main.py │ └── requirements.txt ├── mongo ├── deploy.dev.sh └── deploy.dev │ ├── docker-compose.yml │ └── setup │ └── mongo-init.js ├── mssql ├── bin │ └── docker-compose.yaml ├── cron │ └── backup.sh └── replication │ ├── .gitignore │ ├── main.py │ └── requirements.txt ├── mysql └── docker-compose.yml ├── nginx ├── .gitignore ├── Dockerfile ├── check_ssl_expiration_time.sh ├── config │ └── nginx.conf └── html │ └── 502.html ├── node ├── .gitignore ├── Dockerfile ├── build.sh ├── dist │ ├── app.js │ └── run.sh ├── package.json └── test │ ├── export-excel.html │ ├── export-excel.js │ └── fn_array_map.js ├── parquet └── diff-parquet │ ├── .gitignore │ ├── convert_tsms.py │ ├── main.py │ ├── requirements.txt │ └── select_by_lsn.py ├── pgadmin4 ├── Dockerfile ├── build.sh ├── docker-compose-ssl.yml ├── docker-compose.yml ├── patch │ ├── pgadmin5-support-gpdb6.patch │ └── pgadmin8-support-gpdb6.patch └── start.sh ├── pgpool ├── .gitignore ├── build.sh ├── build │ ├── Dockerfile │ ├── bin │ │ ├── confd │ │ ├── dumb-init │ │ └── entrypoint.sh │ └── config │ │ ├── confd │ │ ├── conf.d │ │ │ └── pgpool.toml │ │ └── templates │ │ │ └── pgpool.tmpl │ │ └── pgpool │ │ ├── pcp.conf │ │ └── pool_passwd └── docker-compose.yml ├── playground ├── python │ ├── .gitignore │ ├── fill_array_by_column.py │ ├── fill_array_by_row.py │ ├── parse_pdf.py │ ├── webserver_fastapi.py │ ├── webserver_golang.go │ ├── webserver_http_server.py │ └── 题库+答案.pdf ├── shell │ └── show_progress.sh └── sql │ └── 24point.sql ├── postgres ├── build.sh ├── build │ ├── Dockerfile │ ├── bin │ │ ├── entrypoint.sh │ │ ├── reinit_script.sh │ │ └── restore_script.sh │ ├── config │ │ ├── patronictl.yaml │ │ ├── patronictl_pgbackrest.yaml │ │ └── patronictl_pgbackrest_restore.yaml │ └── dict │ │ └── jieba_user.dict ├── deploy.dev.sh ├── deploy.dev │ ├── bin │ │ ├── .env │ │ ├── docker-compose-pg01.yml │ │ └── docker-compose-pg02.yml │ ├── cron │ │ └── .env │ ├── init │ │ └── init.sql │ └── keepalived │ │ ├── docker-compose-keepalived-master.yml │ │ ├── docker-compose-keepalived-standby.yml │ │ └── volumes │ │ ├── check.sh │ │ ├── pgkeepa_backup.conf │ │ └── pgkeepa_master.conf ├── docs │ ├── .gitignore │ ├── PostgreSQL数据库系统优化.md │ └── images │ │ ├── graphics-bloat.png │ │ ├── graphics-vacuum.png │ │ ├── optimizer-execution-plan-tuning.png │ │ └── optimizer-working-principle.png ├── pg_upgrade │ ├── Dockerfile │ ├── build.sh │ └── docker-upgrade ├── recovery.sh ├── script │ ├── archive2file.sh │ ├── archive2gp.sh │ ├── check_logical_replication.py │ ├── delete_table.py │ ├── pg_badger.sh │ ├── pg_badger_cron.sh │ ├── pg_basebackup.sh │ ├── pg_basebackup_cron.sh │ ├── pg_dump.sh │ ├── pg_dump_cron.sh │ └── setvars.sh ├── test │ └── node.js │ │ ├── .gitignore │ │ ├── package.json │ │ └── pg_return_bigint_as_string.js └── upgrade.dev.sh ├── prometheus ├── dashboard │ ├── Greenplum-1696815250007.json │ ├── Kafka Cluster-1696817603382.json │ ├── MinIO Overview-1696817592132.json │ ├── PostgreSQL Cluster-1696817561134.json │ └── Redis Cluster-1696817578798.json ├── deploy.dev.sh ├── deploy.dev │ ├── config │ │ ├── alertmanager │ │ │ ├── alertmanager.yml │ │ │ └── config │ │ │ │ └── wechat.tmpl │ │ ├── greenplum_exporter │ │ │ └── custom-queries.yaml │ │ ├── kafka-lag-exporter │ │ │ ├── application.conf │ │ │ └── logback.xml │ │ ├── node-exporter │ │ │ └── smartmon.sh │ │ ├── postgres_exporter │ │ │ └── custom-queries.yaml │ │ ├── prometheus │ │ │ ├── common-alert-rules.yml │ │ │ ├── flink-alert-rules.yml │ │ │ ├── jvm-alert-rules.yml │ │ │ ├── kafka-alert-rules.yml │ │ │ ├── mongo-alert-rules.yml │ │ │ ├── postgres-alert-rules.yml │ │ │ └── prometheus.yml │ │ └── telegraf │ │ │ └── telegraf.conf │ ├── docker-compose-node.yml │ ├── docker-compose-prometheus.yml │ ├── docker-compose-pushgateway.yml │ ├── reload.sh │ └── start.sh └── proxy-webhook │ ├── .gitignore │ ├── Dockerfile │ ├── Makefile │ ├── README.md │ ├── go.mod │ ├── go.sum │ └── main.go ├── rabbitmq └── connect │ ├── .gitignore │ ├── config │ ├── __init__.py │ └── dev.py │ ├── main.py │ └── requirements.txt ├── redis └── docker-compose.yml ├── repomanager ├── .gitignore ├── conf │ ├── nginx │ │ ├── nexus3.conf │ │ └── verdaccio.conf │ └── verdaccio │ │ └── config.yaml ├── docker-compose.yml └── start.sh ├── samba ├── docker-compose-client.yml ├── docker-compose-server.yml └── test │ ├── Dockerfile │ ├── build.sh │ ├── entrypoint.sh │ └── src │ ├── .gitignore │ ├── main.py │ ├── main2.py │ └── requirements.txt ├── spark ├── build.sh ├── build │ ├── Dockerfile │ ├── conf │ │ ├── log4j.properties │ │ └── spark-defaults.conf │ ├── entrypoint.sh │ └── pom.xml ├── clean.sh ├── cron.sh ├── deploy.dev.sh ├── deploy.dev │ ├── docker-compose-master.yml │ ├── docker-compose-worker.yml │ ├── start.sh │ ├── submit.sh │ └── tasks │ │ └── python │ │ └── pi.py └── init.sh ├── streampark ├── bin │ ├── .env │ └── docker-compose.yaml └── build │ ├── Dockerfile │ └── build.sh ├── telegraf ├── Dockerfile ├── Makefile ├── config │ └── telegraf.conf └── patch │ └── sqlserver.patch ├── tensorflow ├── Dockerfile ├── build.sh └── entrypoint.sh ├── toolbox ├── Dockerfile ├── README.md ├── bcc.sh ├── build.sh └── smart_report.sh ├── xampp ├── Dockerfile ├── build.sh ├── etc │ ├── httpd.conf │ └── php.ini └── www │ ├── Josephu.php │ ├── doubleLink.php │ ├── index.php │ ├── jisuan.php │ ├── run.sh │ ├── singleLink.php │ └── stack.php └── xray ├── build ├── Dockerfile ├── conf │ ├── nginx │ │ └── example.org.conf.tmpl │ └── xray │ │ ├── config_client.json │ │ └── config_server.json └── entrypoint.sh ├── docker-compose.yml └── start.sh /.gitignore: -------------------------------------------------------------------------------- 1 | pkgs 2 | jars 3 | inventory* 4 | .vscode 5 | .DB_Store 6 | .aider* 7 | -------------------------------------------------------------------------------- /.net/.dockerignore: -------------------------------------------------------------------------------- 1 | bin 2 | obj 3 | Dockerfile 4 | -------------------------------------------------------------------------------- /.net/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mcr.microsoft.com/dotnet/core/sdk:3.1 AS build-env 2 | WORKDIR /app 3 | 4 | # Copy csproj and restore as distinct layers 5 | COPY *.csproj ./ 6 | RUN dotnet restore 7 | 8 | # Copy everything else and build 9 | COPY . ./ 10 | RUN dotnet publish -c Release -r linux-x64 --self-contained false --no-restore 11 | 12 | # Build runtime image 13 | FROM mcr.microsoft.com/dotnet/core/sdk:3.1 14 | WORKDIR /app 15 | COPY --from=build-env /app/bin/Release/netcoreapp3.1/linux-x64/ . 16 | ENTRYPOINT ["dotnet", "PrintAgent.WebApp.dll"] 17 | -------------------------------------------------------------------------------- /.net/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2.3' 2 | services: 3 | 4 | print-agent: 5 | image: registry.inventec/development/print-agent:${TAG} 6 | container_name: print-agent 7 | hostname: print-agent 8 | network_mode: host 9 | volumes: 10 | - /dev/usb:/dev/usb 11 | - /etc/localtime:/etc/localtime:ro 12 | privileged: true 13 | restart: always 14 | cpu_count: 1 15 | mem_limit: 1g 16 | 17 | # docker run -ti --name print-agent \ 18 | # --privileged -v /dev/usb:/dev/usb \ 19 | # registry.inventec/development/print-agent:${TAG} 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Dockerfile -------------------------------------------------------------------------------- /ceph/test/.gitignore: -------------------------------------------------------------------------------- 1 | ceph-demo -------------------------------------------------------------------------------- /ceph/test/main.py: -------------------------------------------------------------------------------- 1 | import boto.s3.connection 2 | access_key = 'GW7WXHQ66LKUKHXHAKPM' 3 | secret_key = 'zqAdafdROLbRYENvM5CCUNANEN8oeetGCX797zBn' 4 | 5 | conn = boto.connect_s3( 6 | aws_access_key_id=access_key, 7 | aws_secret_access_key=secret_key, 8 | host='10.191.7.11', 9 | port=7480, 10 | is_secure=False, 11 | calling_format=boto.s3.connection.OrdinaryCallingFormat(), 12 | ) 13 | bucket = conn.create_bucket('spi-rejudge') 14 | for bucket in conn.get_all_buckets(): 15 | print "{name} {created}".format( 16 | name=bucket.name, 17 | created=bucket.creation_date, 18 | ) -------------------------------------------------------------------------------- /ceph/test/requirements.txt: -------------------------------------------------------------------------------- 1 | boto==2.49.0 -------------------------------------------------------------------------------- /dkron/build/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | cd `dirname $0` 4 | 5 | [[ $# -lt 1 ]] && { 6 | echo "Usage: $0 proxy:port" 7 | exit 1 8 | } 9 | 10 | proxy=$1 11 | 12 | REGISTRY=registry.inventec 13 | DKRON_VERSION=3.2.3 14 | OPENJDK_VERSION=8u342-jre-slim-buster 15 | 16 | docker build --rm -f Dockerfile \ 17 | -t ${REGISTRY}/infra/dkron-executor-docker:${DKRON_VERSION} \ 18 | --build-arg http_proxy=http://${proxy} \ 19 | --build-arg https_proxy=http://${proxy} \ 20 | --build-arg DKRON_VERSION=${DKRON_VERSION} \ 21 | --build-arg OPENJDK_VERSION=${OPENJDK_VERSION} \ 22 | . 23 | docker push ${REGISTRY}/infra/dkron-executor-docker:${DKRON_VERSION} 24 | -------------------------------------------------------------------------------- /dkron/build/requirements.txt: -------------------------------------------------------------------------------- 1 | psycopg2-binary -------------------------------------------------------------------------------- /dkron/deploy/.gitignore: -------------------------------------------------------------------------------- 1 | *.json -------------------------------------------------------------------------------- /dkron/deploy/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2.3" 2 | services: 3 | worker: 4 | container_name: dkron 5 | hostname: dkron 6 | image: dkron/dkron:4.0.0-beta4-light 7 | restart: always 8 | cpu_count: 1 9 | mem_limit: 2g 10 | logging: 11 | driver: "json-file" 12 | options: 13 | max-size: 2m 14 | network_mode: host 15 | # environment: 16 | # TZ: Asia/Shanghai 17 | volumes: 18 | # - dkron-resource:/opt/dkron-resource:rw 19 | - /etc/localtime:/etc/localtime:ro 20 | # - ./volume/dkron/data:/var/lib/dkron/data 21 | - /var/run/docker.sock:/var/run/docker.sock 22 | command: | 23 | agent 24 | --server 25 | --log-level debug 26 | --tag docker-cli=true 27 | --tag python=true 28 | --tag kettle=worker 29 | --bind-addr localhost:6231 30 | --rpc-port 6232 31 | --enable-prometheus 32 | --disable-usage-stats 33 | --bootstrap-expect=1 34 | --http-addr :6230 35 | --data-dir /var/lib/dkron/data 36 | 37 | # networks: 38 | # default: 39 | # external: true 40 | # name: infra 41 | 42 | # volumes: 43 | # dkron-resource: 44 | # name: dkron-resource 45 | 46 | # curl -v 'https://infra-datajob.ipt.inventec.net/v1/jobs' > backup.json 47 | # curl -v http://localhost:6230/v1/restore --form 'file=@backup.json' 48 | -------------------------------------------------------------------------------- /docker/.gitignore: -------------------------------------------------------------------------------- 1 | test -------------------------------------------------------------------------------- /docker/script/docker_bash.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ -z $1 ]; then 4 | echo 'No container specified...' 5 | echo 'Usage: '$0' gp|spark|cassandra' 6 | exit 7 | fi 8 | 9 | container='' 10 | if [ "$1" = "gp" ]; 11 | then container='gpdb:' 12 | fi 13 | if [ "$1" = "spark" ]; 14 | then container='spark:' 15 | fi 16 | if [ "$1" = "cassandra" ]; 17 | then container='cassandra:' 18 | fi 19 | 20 | if [ -z $container ]; then 21 | echo 'Unknown container specified...' 22 | echo 'Usage: '$0' gp|spark|cassandra' 23 | exit 24 | fi 25 | 26 | docker exec -it `docker ps|grep $container|sed 's/\s.*//'` bash -------------------------------------------------------------------------------- /docker/script/docker_node_ls_alias.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 获取节点列表 4 | nodes=$(docker node ls --format "{{.Hostname}}") 5 | 6 | # 循环遍历节点 7 | for node in $nodes; do 8 | # 执行 docker inspect node 命令获取节点的alias 9 | alias=$(docker inspect --format "{{.Spec.Labels.alias}}" $node) 10 | 11 | # 输出节点的alias 12 | echo "Node ID: $node, Alias: $alias" 13 | done -------------------------------------------------------------------------------- /dts/seatunnel/.gitignore: -------------------------------------------------------------------------------- 1 | *.jar -------------------------------------------------------------------------------- /dts/seatunnel/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.inventec/proxy/flink:1.18.1-scala_2.12-java17 2 | 3 | COPY docker-entrypoint.sh / 4 | 5 | ARG SEATUNNEL_VERSION 6 | 7 | ENV SEATUNNEL_HOME="/opt/seatunnel" 8 | 9 | RUN wget https://dlcdn.apache.org/seatunnel/${SEATUNNEL_VERSION}/apache-seatunnel-${SEATUNNEL_VERSION}-bin.tar.gz \ 10 | && tar -xzvf apache-seatunnel-${SEATUNNEL_VERSION}-bin.tar.gz \ 11 | && mv apache-seatunnel-${SEATUNNEL_VERSION} ${SEATUNNEL_HOME} \ 12 | && rm -rf apache-seatunnel-${SEATUNNEL_VERSION}-bin.tar.gz 13 | 14 | WORKDIR ${SEATUNNEL_HOME} 15 | 16 | COPY config config 17 | 18 | COPY maven /root/.m2/ 19 | 20 | ENV M2_HOME="/root/.m2" 21 | 22 | RUN sh bin/install-plugin.sh ${SEATUNNEL_VERSION} 23 | 24 | COPY plugins plugins 25 | -------------------------------------------------------------------------------- /dts/seatunnel/build.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | cd `dirname $0` 3 | 4 | PROXY=http://10.190.81.209:3389/ 5 | SEATUNNEL_VERSION=2.3.11 6 | 7 | docker build --rm -f Dockerfile \ 8 | -t registry.inventec/infra/seatunnel:${SEATUNNEL_VERSION} \ 9 | --build-arg http_proxy=${PROXY} \ 10 | --build-arg https_proxy=${PROXY} \ 11 | --build-arg no_proxy=localhost,127.0.0.1,nexus.itc.inventec.net \ 12 | --build-arg SEATUNNEL_VERSION=${SEATUNNEL_VERSION} \ 13 | . 14 | 15 | docker push registry.inventec/infra/seatunnel:${SEATUNNEL_VERSION} 16 | -------------------------------------------------------------------------------- /dts/seatunnel/config/plugin_config: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed to the Apache Software Foundation (ASF) under one or more 3 | # contributor license agreements. See the NOTICE file distributed with 4 | # this work for additional information regarding copyright ownership. 5 | # The ASF licenses this file to You under the Apache License, Version 2.0 6 | # (the "License"); you may not use this file except in compliance with 7 | # the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | # 18 | # This mapping is used to resolve the Jar package name without version (or call artifactId) 19 | # 20 | # corresponding to the module in the user Config, helping SeaTunnel to load the correct Jar package. 21 | # Don't modify the delimiter " -- ", just select the plugin you need 22 | --connectors-v2-- 23 | connector-cdc-sqlserver 24 | connector-cdc-postgres 25 | connector-jdbc 26 | connector-kafka 27 | connector-file-s3 28 | --end-- 29 | -------------------------------------------------------------------------------- /dts/seatunnel/deploy/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | seatunnel: 3 | image: registry.inventec/infra/seatunnel:2.3.9 4 | container_name: seatunnel 5 | volumes: 6 | - ./examples:/opt/tasks 7 | environment: 8 | FLINK_REST_ADDRESS: 10.191.7.13 9 | FLINK_REST_PORT: 30651 10 | command: tail -f /dev/null 11 | restart: always 12 | 13 | networks: 14 | default: 15 | external: true 16 | name: infra 17 | -------------------------------------------------------------------------------- /dts/seatunnel/deploy/examples/fake_to_console.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed to the Apache Software Foundation (ASF) under one or more 3 | # contributor license agreements. See the NOTICE file distributed with 4 | # this work for additional information regarding copyright ownership. 5 | # The ASF licenses this file to You under the Apache License, Version 2.0 6 | # (the "License"); you may not use this file except in compliance with 7 | # the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | ###### 18 | ###### This config file is a demonstration of streaming processing in seatunnel config 19 | ###### 20 | 21 | env { 22 | job.mode = "STREAMING" 23 | parallelism = 2 24 | } 25 | 26 | source { 27 | # This is a example source plugin **only for test and demonstrate the feature source plugin** 28 | FakeSource { 29 | result_table_name = "fake" 30 | row.num = 16 31 | schema = { 32 | fields { 33 | name = "string" 34 | age = "int" 35 | } 36 | } 37 | } 38 | 39 | # If you would like to get more information about how to configure seatunnel and see full list of source plugins, 40 | # please go to https://seatunnel.apache.org/docs/connector-v2/source 41 | } 42 | 43 | transform { 44 | Copy { 45 | source_table_name = "fake" 46 | result_table_name = "fake1" 47 | fields { 48 | name1 = name 49 | } 50 | } 51 | # If you would like to get more information about how to configure seatunnel and see full list of transform plugins, 52 | # please go to https://seatunnel.apache.org/docs/category/transform-v2 53 | } 54 | 55 | sink { 56 | Console { 57 | source_table_name = "fake1" 58 | } 59 | # If you would like to get more information about how to configure seatunnel and see full list of sink plugins, 60 | # please go to https://seatunnel.apache.org/docs/connector-v2/sink 61 | } 62 | -------------------------------------------------------------------------------- /dts/seatunnel/deploy/examples/v2.streaming.fake.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed to the Apache Software Foundation (ASF) under one or more 3 | # contributor license agreements. See the NOTICE file distributed with 4 | # this work for additional information regarding copyright ownership. 5 | # The ASF licenses this file to You under the Apache License, Version 2.0 6 | # (the "License"); you may not use this file except in compliance with 7 | # the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | ###### 18 | ###### This config file is a demonstration of streaming processing in SeaTunnel config 19 | ###### 20 | 21 | env { 22 | # You can set SeaTunnel environment configuration here 23 | parallelism = 1 24 | job.mode = "STREAMING" 25 | checkpoint.interval = 2000 26 | } 27 | 28 | source { 29 | # This is a example source plugin **only for test and demonstrate the feature source plugin** 30 | FakeSource { 31 | parallelism = 1 32 | result_table_name = "fake" 33 | row.num = 16 34 | schema = { 35 | fields { 36 | name = "string" 37 | age = "int" 38 | } 39 | } 40 | } 41 | 42 | # If you would like to get more information about how to configure SeaTunnel and see full list of source plugins, 43 | # please go to https://seatunnel.apache.org/docs/category/source-v2 44 | } 45 | 46 | transform { 47 | FieldMapper { 48 | source_table_name = "fake" 49 | result_table_name = "fake1" 50 | field_mapper = { 51 | age = age 52 | name = new_name 53 | } 54 | } 55 | } 56 | 57 | sink { 58 | Console { 59 | source_table_name = "fake1" 60 | } 61 | 62 | # If you would like to get more information about how to configure SeaTunnel and see full list of sink plugins, 63 | # please go to https://seatunnel.apache.org/docs/category/sink-v2 64 | } 65 | -------------------------------------------------------------------------------- /dts/seatunnel/plugins/README.md: -------------------------------------------------------------------------------- 1 | # Introduction of plugins directory 2 | 3 | This directory used to store some third party jar package dependency by connector running, such as jdbc drivers. 4 | 5 | !!!Attention: If you use Zeta Engine, please add jar to `$SEATUNNEL_HOME/lib/` directory on each node. 6 | 7 | ## directory structure 8 | 9 | The jar dependency by connector need put in `plugins/${connector name}/lib/` dir. 10 | 11 | For example jdbc driver jars need put in `${seatunnel_install_home}/plugins/jdbc/lib/` -------------------------------------------------------------------------------- /elk/clean-data.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | # 变量定义 4 | INDEX_COLLECTION=./indices 5 | TIME_AGO=$(date -d "17 days ago" +%s) 6 | 7 | echo "=======================================================" 8 | 9 | # 获取索引信息 10 | #curl 'localhost:9200/_cat/indices?v' 11 | #curl -XPOST 'localhost:9200//_close' 12 | #curl 'localhost:9200/_cat/indices?v' 13 | 14 | for i in $(curl -s 'localhost:9200/_cat/indices' | awk '{print $3}') 15 | do 16 | # 截取出时间字符串,与当前时间进行比较,筛选出3个月之前的索引 17 | # 1)将shujuguan-service-2018.03.31 => 2018.03.31 18 | indice_date=${i##shujuguan*-} 19 | # 2)2018.03.31 => 2018-03-31 20 | indice_date=${indice_date//./-} 21 | 22 | # 过滤无关项,非时间 23 | if [ "$i" == ".kibana" ]; then 24 | continue 25 | fi 26 | 27 | # 3)data -d "2018-03-03" +%s 28 | indice_timestamp=$(date -d $indice_date +%s) 29 | # 4)compare indice_time to TIME_AGO 30 | if [ $indice_timestamp -lt $TIME_AGO ]; then 31 | echo $i 32 | echo "$i" >> $INDEX_COLLECTION 33 | fi 34 | done 35 | 36 | if [ ! -f $INDEX_COLLECTION ]; then 37 | echo '没有满足条件的历史记录' 38 | exit 39 | fi 40 | 41 | echo `wc -l $INDEX_COLLECTION` 42 | 43 | # 执行删除操作 44 | for i in `cat $INDEX_COLLECTION` 45 | do 46 | uri=localhost:9200/$i?pretty 47 | echo delete $i 48 | curl -XDELETE $uri 49 | sleep 3s 50 | done 51 | 52 | # 删除临时文件 53 | rm $INDEX_COLLECTION 54 | echo '清除脚本执行过程中生成的临时文件' -------------------------------------------------------------------------------- /gitlab/build/.gitignore: -------------------------------------------------------------------------------- 1 | 12.10.8-ce.0 -------------------------------------------------------------------------------- /gitlab/build/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG VERSION 2 | FROM registry.inventec/proxy/gitlab/gitlab-ce:${VERSION} 3 | LABEL maintainer="Zhang.Xing-Long@inventec.com" 4 | ARG VERSION 5 | 6 | COPY patch/gitlab-${VERSION}.patch /tmp 7 | 8 | RUN apt-get update \ 9 | && apt-get install -y patch \ 10 | && rm -rf /var/lib/apt/lists/* \ 11 | && cd /opt/gitlab \ 12 | && patch -p1 < /tmp/gitlab-${VERSION}.patch 13 | -------------------------------------------------------------------------------- /gitlab/build/build.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | cd `dirname $0` 3 | 4 | PROXY=http://10.190.81.209:3389/ 5 | VERSION=12.10.14-ce.0 6 | VERSION=13.12.15-ce.0 7 | VERSION=15.4.4-ce.0 8 | VERSION=15.4.6-ce.0 9 | VERSION=15.11.13-ce.0 10 | VERSION=16.11.10-ce.0 11 | 12 | # diff -urNa ${VERSION} ${VERSION}-fixed > patch/gitlab-${VERSION}.patch 13 | 14 | docker build --rm -f Dockerfile \ 15 | -t registry.inventec/infra/gitlab/gitlab-ce:${VERSION} \ 16 | --build-arg http_proxy=${PROXY} \ 17 | --build-arg https_proxy=${PROXY} \ 18 | --build-arg VERSION=${VERSION} \ 19 | . 20 | 21 | docker push registry.inventec/infra/gitlab/gitlab-ce:${VERSION} 22 | -------------------------------------------------------------------------------- /gitlab/build/patch/gitlab-12.10.14-ce.0.patch: -------------------------------------------------------------------------------- 1 | diff -urNa 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 2 | --- 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 2020-07-12 17:21:07.796983700 +0800 3 | +++ 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 2020-07-12 18:18:04.054508800 +0800 4 | @@ -86,8 +86,16 @@ 5 | @ldap_user ||= find_ldap_user 6 | end 7 | 8 | + # def find_ldap_user 9 | + # Gitlab::Auth::Ldap::Person.find_by_dn(ldap_identity.extern_uid, adapter) 10 | + # end 11 | def find_ldap_user 12 | - Gitlab::Auth::Ldap::Person.find_by_dn(ldap_identity.extern_uid, adapter) 13 | + found_user = Gitlab::Auth::Ldap::Person.find_by_dn(ldap_identity.extern_uid, adapter) 14 | + return found_user if found_user 15 | + 16 | + if ldap_identity 17 | + ::Gitlab::Auth::Ldap::Person.find_by_email(user.email, adapter) 18 | + end 19 | end 20 | 21 | def block_user(user, reason) 22 | diff -urNa 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 23 | --- 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 2020-07-12 17:23:56.500506900 +0800 24 | +++ 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 2020-07-12 17:23:37.017208500 +0800 25 | @@ -39,8 +39,11 @@ 26 | _available_servers 27 | end 28 | 29 | + # def self._available_servers 30 | + # Array.wrap(servers.first) 31 | + # end 32 | def self._available_servers 33 | - Array.wrap(servers.first) 34 | + servers 35 | end 36 | 37 | def self.providers 38 | -------------------------------------------------------------------------------- /gitlab/build/patch/gitlab-12.10.8-ce.0.patch: -------------------------------------------------------------------------------- 1 | diff -urNa 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 2 | --- 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 2020-07-12 17:21:07.796983700 +0800 3 | +++ 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 2020-07-12 18:18:04.054508800 +0800 4 | @@ -86,8 +86,16 @@ 5 | @ldap_user ||= find_ldap_user 6 | end 7 | 8 | + # def find_ldap_user 9 | + # Gitlab::Auth::Ldap::Person.find_by_dn(ldap_identity.extern_uid, adapter) 10 | + # end 11 | def find_ldap_user 12 | - Gitlab::Auth::Ldap::Person.find_by_dn(ldap_identity.extern_uid, adapter) 13 | + found_user = Gitlab::Auth::Ldap::Person.find_by_dn(ldap_identity.extern_uid, adapter) 14 | + return found_user if found_user 15 | + 16 | + if ldap_identity 17 | + ::Gitlab::Auth::Ldap::Person.find_by_email(user.email, adapter) 18 | + end 19 | end 20 | 21 | def block_user(user, reason) 22 | diff -urNa 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 23 | --- 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 2020-07-12 17:23:56.500506900 +0800 24 | +++ 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 2020-07-12 17:23:37.017208500 +0800 25 | @@ -39,8 +39,11 @@ 26 | _available_servers 27 | end 28 | 29 | + # def self._available_servers 30 | + # Array.wrap(servers.first) 31 | + # end 32 | def self._available_servers 33 | - Array.wrap(servers.first) 34 | + servers 35 | end 36 | 37 | def self.providers 38 | -------------------------------------------------------------------------------- /gitlab/build/patch/gitlab-13.12.15-ce.0.patch: -------------------------------------------------------------------------------- 1 | diff -urNa 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 2 | --- 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 2020-07-12 17:21:07.796983700 +0800 3 | +++ 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 2020-07-12 18:18:04.054508800 +0800 4 | @@ -86,8 +86,16 @@ 5 | @ldap_user ||= find_ldap_user 6 | end 7 | 8 | + # def find_ldap_user 9 | + # Gitlab::Auth::Ldap::Person.find_by_dn(ldap_identity.extern_uid, adapter) 10 | + # end 11 | def find_ldap_user 12 | - Gitlab::Auth::Ldap::Person.find_by_dn(ldap_identity.extern_uid, adapter) 13 | + found_user = Gitlab::Auth::Ldap::Person.find_by_dn(ldap_identity.extern_uid, adapter) 14 | + return found_user if found_user 15 | + 16 | + if ldap_identity 17 | + ::Gitlab::Auth::Ldap::Person.find_by_email(user.email, adapter) 18 | + end 19 | end 20 | 21 | def block_user(user, reason) 22 | diff -urNa 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 23 | --- 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 2020-07-12 17:23:56.500506900 +0800 24 | +++ 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 2020-07-12 17:23:37.017208500 +0800 25 | @@ -39,8 +39,11 @@ 26 | _available_servers 27 | end 28 | 29 | + # def self._available_servers 30 | + # Array.wrap(servers.first) 31 | + # end 32 | def self._available_servers 33 | - Array.wrap(servers.first) 34 | + servers 35 | end 36 | 37 | def self.providers 38 | -------------------------------------------------------------------------------- /gitlab/build/patch/gitlab-15.11.13-ce.0.patch: -------------------------------------------------------------------------------- 1 | diff -urNa 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 2 | --- 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 2020-07-12 17:21:07.796983700 +0800 3 | +++ 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 2020-07-12 18:18:04.054508800 +0800 4 | @@ -86,8 +86,16 @@ 5 | @ldap_user ||= find_ldap_user 6 | end 7 | 8 | + # def find_ldap_user 9 | + # Gitlab::Auth::Ldap::Person.find_by_dn(ldap_identity.extern_uid, adapter) 10 | + # end 11 | def find_ldap_user 12 | - Gitlab::Auth::Ldap::Person.find_by_dn(ldap_identity.extern_uid, adapter) 13 | + found_user = Gitlab::Auth::Ldap::Person.find_by_dn(ldap_identity.extern_uid, adapter) 14 | + return found_user if found_user 15 | + 16 | + if ldap_identity 17 | + ::Gitlab::Auth::Ldap::Person.find_by_email(user.email, adapter) 18 | + end 19 | end 20 | 21 | def block_user(user, reason) 22 | diff -urNa 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 23 | --- 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 2020-07-12 17:23:56.500506900 +0800 24 | +++ 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 2020-07-12 17:23:37.017208500 +0800 25 | @@ -39,8 +39,11 @@ 26 | _available_servers 27 | end 28 | 29 | + # def self._available_servers 30 | + # Array.wrap(servers.first) 31 | + # end 32 | def self._available_servers 33 | - Array.wrap(servers.first) 34 | + servers 35 | end 36 | 37 | def self.providers 38 | -------------------------------------------------------------------------------- /gitlab/build/patch/gitlab-15.4.4-ce.0.patch: -------------------------------------------------------------------------------- 1 | diff -urNa 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 2 | --- 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 2020-07-12 17:21:07.796983700 +0800 3 | +++ 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 2020-07-12 18:18:04.054508800 +0800 4 | @@ -86,8 +86,16 @@ 5 | @ldap_user ||= find_ldap_user 6 | end 7 | 8 | + # def find_ldap_user 9 | + # Gitlab::Auth::Ldap::Person.find_by_dn(ldap_identity.extern_uid, adapter) 10 | + # end 11 | def find_ldap_user 12 | - Gitlab::Auth::Ldap::Person.find_by_dn(ldap_identity.extern_uid, adapter) 13 | + found_user = Gitlab::Auth::Ldap::Person.find_by_dn(ldap_identity.extern_uid, adapter) 14 | + return found_user if found_user 15 | + 16 | + if ldap_identity 17 | + ::Gitlab::Auth::Ldap::Person.find_by_email(user.email, adapter) 18 | + end 19 | end 20 | 21 | def block_user(user, reason) 22 | diff -urNa 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 23 | --- 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 2020-07-12 17:23:56.500506900 +0800 24 | +++ 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 2020-07-12 17:23:37.017208500 +0800 25 | @@ -39,8 +39,11 @@ 26 | _available_servers 27 | end 28 | 29 | + # def self._available_servers 30 | + # Array.wrap(servers.first) 31 | + # end 32 | def self._available_servers 33 | - Array.wrap(servers.first) 34 | + servers 35 | end 36 | 37 | def self.providers 38 | -------------------------------------------------------------------------------- /gitlab/build/patch/gitlab-15.4.6-ce.0.patch: -------------------------------------------------------------------------------- 1 | diff -urNa 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 2 | --- 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 2020-07-12 17:21:07.796983700 +0800 3 | +++ 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 2020-07-12 18:18:04.054508800 +0800 4 | @@ -86,8 +86,16 @@ 5 | @ldap_user ||= find_ldap_user 6 | end 7 | 8 | + # def find_ldap_user 9 | + # Gitlab::Auth::Ldap::Person.find_by_dn(ldap_identity.extern_uid, adapter) 10 | + # end 11 | def find_ldap_user 12 | - Gitlab::Auth::Ldap::Person.find_by_dn(ldap_identity.extern_uid, adapter) 13 | + found_user = Gitlab::Auth::Ldap::Person.find_by_dn(ldap_identity.extern_uid, adapter) 14 | + return found_user if found_user 15 | + 16 | + if ldap_identity 17 | + ::Gitlab::Auth::Ldap::Person.find_by_email(user.email, adapter) 18 | + end 19 | end 20 | 21 | def block_user(user, reason) 22 | diff -urNa 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 23 | --- 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 2020-07-12 17:23:56.500506900 +0800 24 | +++ 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 2020-07-12 17:23:37.017208500 +0800 25 | @@ -39,8 +39,11 @@ 26 | _available_servers 27 | end 28 | 29 | + # def self._available_servers 30 | + # Array.wrap(servers.first) 31 | + # end 32 | def self._available_servers 33 | - Array.wrap(servers.first) 34 | + servers 35 | end 36 | 37 | def self.providers 38 | -------------------------------------------------------------------------------- /gitlab/build/patch/gitlab-16.11.10-ce.0.patch: -------------------------------------------------------------------------------- 1 | diff -urNa 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 2 | --- 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 2020-07-12 17:21:07.796983700 +0800 3 | +++ 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/access.rb 2020-07-12 18:18:04.054508800 +0800 4 | @@ -86,8 +86,16 @@ 5 | @ldap_user ||= find_ldap_user 6 | end 7 | 8 | + # def find_ldap_user 9 | + # Gitlab::Auth::Ldap::Person.find_by_dn(ldap_identity.extern_uid, adapter) 10 | + # end 11 | def find_ldap_user 12 | - Gitlab::Auth::Ldap::Person.find_by_dn(ldap_identity.extern_uid, adapter) 13 | + found_user = Gitlab::Auth::Ldap::Person.find_by_dn(ldap_identity.extern_uid, adapter) 14 | + return found_user if found_user 15 | + 16 | + if ldap_identity 17 | + ::Gitlab::Auth::Ldap::Person.find_by_email(user.email, adapter) 18 | + end 19 | end 20 | 21 | def block_user(user, reason) 22 | diff -urNa 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 23 | --- 12.10.8-ce.0/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 2020-07-12 17:23:56.500506900 +0800 24 | +++ 12.10.8-ce.0-fixed/embedded/service/gitlab-rails/lib/gitlab/auth/ldap/config.rb 2020-07-12 17:23:37.017208500 +0800 25 | @@ -39,8 +39,11 @@ 26 | _available_servers 27 | end 28 | 29 | + # def self._available_servers 30 | + # Array.wrap(servers.first) 31 | + # end 32 | def self._available_servers 33 | - Array.wrap(servers.first) 34 | + servers 35 | end 36 | 37 | def self.providers 38 | -------------------------------------------------------------------------------- /gitlab/gitlab-runner/Dockerfile.pipeline: -------------------------------------------------------------------------------- 1 | FROM registry.inventec/proxy/library/alpine:latest 2 | LABEL maintainer="Zhang.Xing-Long@inventec.com" 3 | 4 | RUN apk update && \ 5 | # 新版 docker-cli 需要额外安装 docker-cli-buildx 6 | apk add --no-cache make docker-cli docker-cli-buildx git curl && \ 7 | # 支持 CD 至 Minio 8 | # https://docs.min.io/docs/minio-client-quickstart-guide.html 9 | curl https://dl.min.io/client/mc/release/linux-amd64/mc -o /usr/local/bin/mc && \ 10 | chmod +x /usr/local/bin/mc && \ 11 | # 支持 CD 至 K8S 12 | # https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/ 13 | curl -L "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" -o /usr/local/bin/kubectl && \ 14 | chmod +x /usr/local/bin/kubectl 15 | -------------------------------------------------------------------------------- /gitlab/gitlab-runner/README.md: -------------------------------------------------------------------------------- 1 | ``` 2 | /# gitlab-runner register 3 | Runtime platform arch=amd64 os=linux pid=138 revision=7a6612da version=13.12.0 4 | Running in system-mode. 5 | 6 | Enter the GitLab instance URL (for example, https://gitlab.com/): 7 | ???? 8 | Enter the registration token: 9 | ???? 10 | Enter a description for the runner: 11 | [515d23432676]: infra 12 | Enter tags for the runner (comma-separated): 13 | 14 | Registering runner... succeeded runner=1VsidHSA 15 | Enter an executor: custom, docker-ssh, parallels, docker-ssh+machine, kubernetes, docker, shell, ssh, virtualbox, docker+machine: 16 | docker 17 | Enter the default Docker image (for example, ruby:2.6): 18 | registry.inventec/proxy/library/alpine:latest 19 | Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded! 20 | ``` 21 | 22 | -------------------------------------------------------------------------------- /gitlab/gitlab-runner/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | cd `dirname $0` 4 | 5 | PROXY=http://10.190.81.209:3389/ 6 | 7 | docker build --rm -f Dockerfile.pipeline \ 8 | -t registry.inventec/infra/ci:1.0 \ 9 | --build-arg http_proxy=${PROXY} \ 10 | --build-arg https_proxy=${PROXY} \ 11 | . 12 | docker push registry.inventec/infra/ci:1.0 13 | -------------------------------------------------------------------------------- /gitlab/gitlab-runner/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | services: 3 | 4 | gitlab-runner: 5 | image: registry.inventec/proxy/gitlab/gitlab-runner:v13.12.0 6 | container_name: gitlab-runner 7 | volumes: 8 | - /var/run/docker.sock:/var/run/docker.sock 9 | - /opt/gitlab-runner/config:/etc/gitlab-runner 10 | restart: always -------------------------------------------------------------------------------- /gitlab/gitlab/start.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | # 修改默认的SSH端口号,谨防冲突 4 | # /etc/ssh/sshd_config 5 | 6 | docker-compose up -d -------------------------------------------------------------------------------- /gitlab/omnibus-gitlab/docker/.dockerignore: -------------------------------------------------------------------------------- 1 | *.md 2 | -------------------------------------------------------------------------------- /gitlab/omnibus-gitlab/docker/README.md: -------------------------------------------------------------------------------- 1 | The latest docker guide can be found here: [GitLab Docker images](/doc/docker/README.md). 2 | -------------------------------------------------------------------------------- /gitlab/omnibus-gitlab/docker/RELEASE: -------------------------------------------------------------------------------- 1 | # Ref: 2 | # https://gitlab.com/gitlab-org/omnibus-gitlab/-/blob/master/doc/build/build_docker_image.md 3 | # https://packages.gitlab.com/gitlab/gitlab-ce/packages/ubuntu/focal/gitlab-ce_14.10.5-ce.0_amd64.deb 4 | RELEASE_PACKAGE=gitlab-ce 5 | RELEASE_VERSION=14.10.5-ce.0 6 | DOWNLOAD_URL=https://infra-oss.itc.inventec.net/public/gitlab-ce_14.10.5-ce.0_amd64.deb -------------------------------------------------------------------------------- /gitlab/omnibus-gitlab/docker/assets/download-package: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ ${DOWNLOAD_URL} == *"amazonaws"* ]]; then 4 | echo "Downloading package from Amazon bucket - ${DOWNLOAD_URL}" 5 | wget --quiet ${DOWNLOAD_URL} -O /tmp/gitlab.deb 6 | else 7 | echo "Downloading package as artifact - ${DOWNLOAD_URL}" 8 | # If we are fetching the package which is available as an artifact, we need 9 | # to authenticate to access it. Hence, we pass PRIVATE-TOKEN header. 10 | wget --quiet --header "PRIVATE-TOKEN: ${TRIGGER_PRIVATE_TOKEN}" ${DOWNLOAD_URL} -O /tmp/gitlab.deb 11 | fi 12 | 13 | results=$? 14 | if [ ${results} -ne 0 ]; then 15 | >&2 echo "There was an error downloading ${DOWNLOAD_URL}. Please check the output for more information" 16 | exit ${results} 17 | fi 18 | -------------------------------------------------------------------------------- /gitlab/omnibus-gitlab/docker/assets/gitlab.rb: -------------------------------------------------------------------------------- 1 | # Initialize the env config for each service in case we need to add them in via the wrapper 2 | unless ENV['AWS_CONTAINER_CREDENTIALS_RELATIVE_URI'].nil? 3 | [gitlab_workhorse['env'], gitlab_pages['env'], registry['env'], gitlab_rails['env']].each do |new_env| 4 | new_env['AWS_CONTAINER_CREDENTIALS_RELATIVE_URI'] = ENV['AWS_CONTAINER_CREDENTIALS_RELATIVE_URI'] 5 | new_env['AWS_REGION'] = ENV['AWS_REGION'] 6 | new_env['ECS_CONTAINER_METADATA_URI_V4'] = ENV['ECS_CONTAINER_METADATA_URI_V4'] unless ENV['ECS_CONTAINER_METADATA_URI_V4'].nil? 7 | new_env['ECS_CONTAINER_METADATA_URI'] = ENV['ECS_CONTAINER_METADATA_URI'] 8 | end 9 | end 10 | 11 | # Docker options 12 | ## Prevent Postgres from trying to allocate 25% of total memory 13 | postgresql['shared_buffers'] = '1MB' 14 | 15 | # Disable Prometheus node_exporter inside Docker. 16 | node_exporter['enable'] = false 17 | 18 | # Manage accounts with docker 19 | manage_accounts['enable'] = false 20 | 21 | # Get hostname from shell 22 | host = `hostname`.strip 23 | external_url "http://#{host}" 24 | 25 | # Explicitly disable init detection since we are running on a container 26 | package['detect_init'] = false 27 | 28 | # Explicitly disable attempt to update kernel parameters 29 | package['modify_kernel_parameters'] = false 30 | 31 | # Load custom config from environment variable: GITLAB_OMNIBUS_CONFIG 32 | # Disabling the cop since rubocop considers using eval to be security risk but 33 | # we don't have an easy way out, atleast yet. 34 | eval ENV["GITLAB_OMNIBUS_CONFIG"].to_s # rubocop:disable Security/Eval 35 | 36 | # Load configuration stored in /etc/gitlab/gitlab.rb 37 | from_file("/etc/gitlab/gitlab.rb") 38 | -------------------------------------------------------------------------------- /gitlab/omnibus-gitlab/docker/assets/sshd_config: -------------------------------------------------------------------------------- 1 | Port 22 2 | ChallengeResponseAuthentication no 3 | HostKey /etc/gitlab/ssh_host_rsa_key 4 | HostKey /etc/gitlab/ssh_host_ecdsa_key 5 | HostKey /etc/gitlab/ssh_host_ed25519_key 6 | Protocol 2 7 | PermitRootLogin no 8 | PasswordAuthentication no 9 | MaxStartups 100:30:200 10 | AllowUsers git 11 | PrintMotd no 12 | PrintLastLog no 13 | PubkeyAuthentication yes 14 | AuthorizedKeysFile %h/.ssh/authorized_keys /gitlab-data/ssh/authorized_keys 15 | AuthorizedKeysCommand /opt/gitlab/embedded/service/gitlab-shell/bin/gitlab-shell-authorized-keys-check git %u %k 16 | AuthorizedKeysCommandUser git 17 | 18 | # With "UsePAM yes" the "!" is seen as a password disabled account and not fully locked so ssh public key login works 19 | # Please make sure that the account is created without passwordlogin ("*" in /etc/shadow) or configure pam. 20 | # Issue #5891 https://gitlab.com/gitlab-org/omnibus-gitlab 21 | UsePAM no 22 | 23 | # Disabling use DNS in ssh since it tends to slow connecting 24 | UseDNS no 25 | 26 | # Enable the use of Git protocol v2 27 | AcceptEnv GIT_PROTOCOL 28 | -------------------------------------------------------------------------------- /gitlab/omnibus-gitlab/docker/docker-compose.yml: -------------------------------------------------------------------------------- 1 | web: 2 | image: 'gitlab/gitlab-ce:latest' 3 | restart: always 4 | hostname: 'gitlab.example.com' 5 | environment: 6 | GITLAB_OMNIBUS_CONFIG: | 7 | external_url 'https://gitlab.example.com' 8 | ports: 9 | - '80:80' 10 | - '443:443' 11 | - '22:22' 12 | volumes: 13 | - '/srv/gitlab/config:/etc/gitlab' 14 | - '/srv/gitlab/logs:/var/log/gitlab' 15 | - '/srv/gitlab/data:/var/opt/gitlab' 16 | -------------------------------------------------------------------------------- /gitlab/omnibus-gitlab/docker/marathon.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "/gitlab", 3 | "ports": [0,0], 4 | "cpus": 2, 5 | "mem": 2048.0, 6 | "disk": 10240.0, 7 | "container": { 8 | "type": "DOCKER", 9 | "docker": { 10 | "network": "HOST", 11 | "image": "gitlab/gitlab-ce:latest" 12 | }, 13 | "volumes": [ 14 | { 15 | "containerPath": "/etc/gitlab", 16 | "hostPath": "/var/data/etc/gitlab", 17 | "mode": "RW" 18 | }, 19 | { 20 | "containerPath": "/var/opt/gitlab", 21 | "hostPath": "/var/data/opt/gitlab", 22 | "mode": "RW" 23 | }, 24 | { 25 | "containerPath": "/var/log/gitlab", 26 | "hostPath": "/var/data/log/gitlab", 27 | "mode": "RW" 28 | } 29 | ] 30 | } 31 | } -------------------------------------------------------------------------------- /gpdb/README.md: -------------------------------------------------------------------------------- 1 | ### 构建部署须知: 2 | 3 | 1. 通过 https://network.pivotal.io/products/vmware-greenplum 下载商业版本 或从 https://github.com/greenplum-db/gpdb/releases 下载最新开源版本,保存与 build 目录下的 pkgs 子目录下 4 | 2. 执行 build.sh 或者 build-ce.sh 脚本构建镜像 5 | 3. 采用 Swarm 构建多主机的容器集群,并创建 Overlay 网络 6 | 3. 理清 deploy.sh 中需要执行的操作,以及执行顺序,由于 deploy.sh 依赖 Ansible 进行部署,故需自行完善 inventory 资产配置文件 -------------------------------------------------------------------------------- /gpdb/build-ce.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | cd `dirname $0` 4 | 5 | . ./init.sh 6 | . ./clean.sh 7 | 8 | cd build-ce 9 | docker build --rm \ 10 | -t ${REGISTRY}/${TAGNAME}-ce \ 11 | --build-arg http_proxy=${PROXY} \ 12 | --build-arg https_proxy=${PROXY} \ 13 | . 14 | docker push ${REGISTRY}/$TAGNAME-ce -------------------------------------------------------------------------------- /gpdb/build-ce/etc/.bashrc: -------------------------------------------------------------------------------- 1 | 2 | # Ref:https://gpdb.docs.pivotal.io/5150/install_guide/init_gpdb.html#topic8 3 | source /usr/local/greenplum-db/greenplum_path.sh 4 | export MASTER_DATA_DIRECTORY=/disk1/gpdata/gpmaster/gpseg-1 5 | # (Optional) You may also want to set some client session environment variables 6 | # such as PGPORT, PGUSER and PGDATABASE for convenience. 7 | export PGPORT=5432 8 | # (Optional) If you use RHEL 7 or CentOS 7, add the following line to the end of the .bashrc file 9 | # to enable using the ps command in the greenplum_path.sh environment 10 | export LD_PRELOAD=/lib64/libz.so.1 ps 11 | # Support PXF 12 | # export JAVA_HOME=/etc/alternatives/jre 13 | -------------------------------------------------------------------------------- /gpdb/build-ce/etc/etc_cgconfig.d_gpdb.conf: -------------------------------------------------------------------------------- 1 | group gpdb { 2 | perm { 3 | task { 4 | uid = gpadmin; 5 | gid = gpadmin; 6 | } 7 | admin { 8 | uid = gpadmin; 9 | gid = gpadmin; 10 | } 11 | } 12 | cpu { 13 | } 14 | cpuacct { 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /gpdb/build-ce/etc/etc_security_limits.conf: -------------------------------------------------------------------------------- 1 | 2 | * soft nofile 65536 3 | * hard nofile 65536 4 | * soft nproc 131072 5 | * hard nproc 131072 6 | -------------------------------------------------------------------------------- /gpdb/build-ce/etc/etc_selinux_config: -------------------------------------------------------------------------------- 1 | SELINUX=disabled 2 | -------------------------------------------------------------------------------- /gpdb/build-ce/etc/etc_sysctl.conf: -------------------------------------------------------------------------------- 1 | # https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/install_guide-prep_os.html#the-sysctl.conf-file 2 | 3 | # Failed system call was shmget(key=40002001, size=615342704, 03600). 4 | # This error usually means that PostgreSQL's request for a shared memory segment exceeded your kernel's SHMMAX parameter. 5 | # You can either reduce the request size or reconfigure the kernel with larger SHMMAX. 6 | # To reduce the request size (currently 615342704 bytes), reduce PostgreSQL's shared_buffers parameter (currently 4000) and/or its max_connections parameter (currently 5003). 7 | # If the request size is already small, it's possible that it is less than your kernel's SHMMIN parameter. 8 | # kernel.shmall = _PHYS_PAGES / 2 # See Shared Memory Pages 9 | kernel.shmall = 4000000000 10 | # kernel.shmmax = kernel.shmall * PAGE_SIZE 11 | # kernel.shmmax = 500000000 12 | kernel.shmmax = 1000000000 13 | kernel.shmmni = 4096 14 | vm.overcommit_memory = 2 # See Segment Host Memory 15 | vm.overcommit_ratio = 95 # See Segment Host Memory 16 | 17 | net.ipv4.ip_local_port_range = 1025 65535 # See Port Settings 18 | kernel.sem = 500 2048000 200 40960 19 | kernel.sysrq = 1 20 | kernel.core_uses_pid = 1 21 | kernel.msgmnb = 65536 22 | kernel.msgmax = 65536 23 | kernel.msgmni = 2048 24 | net.ipv4.tcp_syncookies = 1 25 | # CoreOS: WARNING: IPv4 forwarding is disabled. Networking will not work. 26 | net.ipv4.ip_forward = 1 27 | net.ipv4.conf.default.accept_source_route = 0 28 | net.ipv4.tcp_max_syn_backlog = 4096 29 | net.ipv4.conf.all.arp_filter = 1 30 | net.ipv4.ipfrag_high_thresh = 41943040 31 | net.ipv4.ipfrag_low_thresh = 31457280 32 | net.ipv4.ipfrag_time = 60 33 | net.core.netdev_max_backlog = 10000 34 | net.core.rmem_max = 2097152 35 | net.core.wmem_max = 2097152 36 | vm.swappiness = 10 37 | vm.zone_reclaim_mode = 0 38 | vm.dirty_expire_centisecs = 500 39 | vm.dirty_writeback_centisecs = 100 40 | vm.dirty_background_ratio = 0 # See System Memory 41 | vm.dirty_ratio = 0 42 | vm.dirty_background_bytes = 1610612736 43 | vm.dirty_bytes = 4294967296 -------------------------------------------------------------------------------- /gpdb/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | cd `dirname $0` 4 | 5 | . ./init.sh 6 | . ./clean.sh 7 | 8 | cd build 9 | docker build --rm \ 10 | -t ${REGISTRY}/${TAGNAME} \ 11 | --build-arg http_proxy=${PROXY} \ 12 | --build-arg https_proxy=${PROXY} \ 13 | . 14 | docker push ${REGISTRY}/$TAGNAME -------------------------------------------------------------------------------- /gpdb/build/etc/.bashrc: -------------------------------------------------------------------------------- 1 | 2 | # Ref:https://gpdb.docs.pivotal.io/5150/install_guide/init_gpdb.html#topic8 3 | source /usr/local/greenplum-db/greenplum_path.sh 4 | export MASTER_DATA_DIRECTORY=/disk1/gpdata/gpmaster/gpseg-1 5 | # (Optional) You may also want to set some client session environment variables 6 | # such as PGPORT, PGUSER and PGDATABASE for convenience. 7 | export PGPORT=5432 8 | # (Optional) If you use RHEL 7 or CentOS 7, add the following line to the end of the .bashrc file 9 | # to enable using the ps command in the greenplum_path.sh environment 10 | export LD_PRELOAD=/lib64/libz.so.1 ps 11 | # Support PXF 12 | # export JAVA_HOME=/etc/alternatives/jre 13 | -------------------------------------------------------------------------------- /gpdb/build/etc/etc_cgconfig.d_gpdb.conf: -------------------------------------------------------------------------------- 1 | group gpdb { 2 | perm { 3 | task { 4 | uid = gpadmin; 5 | gid = gpadmin; 6 | } 7 | admin { 8 | uid = gpadmin; 9 | gid = gpadmin; 10 | } 11 | } 12 | cpu { 13 | } 14 | cpuacct { 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /gpdb/build/etc/etc_security_limits.conf: -------------------------------------------------------------------------------- 1 | 2 | * soft nofile 65536 3 | * hard nofile 65536 4 | * soft nproc 131072 5 | * hard nproc 131072 6 | -------------------------------------------------------------------------------- /gpdb/build/etc/etc_selinux_config: -------------------------------------------------------------------------------- 1 | SELINUX=disabled 2 | -------------------------------------------------------------------------------- /gpdb/build/etc/etc_sysctl.conf: -------------------------------------------------------------------------------- 1 | # https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/install_guide-prep_os.html#the-sysctl.conf-file 2 | 3 | # Failed system call was shmget(key=40002001, size=615342704, 03600). 4 | # This error usually means that PostgreSQL's request for a shared memory segment exceeded your kernel's SHMMAX parameter. 5 | # You can either reduce the request size or reconfigure the kernel with larger SHMMAX. 6 | # To reduce the request size (currently 615342704 bytes), reduce PostgreSQL's shared_buffers parameter (currently 4000) and/or its max_connections parameter (currently 5003). 7 | # If the request size is already small, it's possible that it is less than your kernel's SHMMIN parameter. 8 | # kernel.shmall = _PHYS_PAGES / 2 # See Shared Memory Pages 9 | kernel.shmall = 4000000000 10 | # kernel.shmmax = kernel.shmall * PAGE_SIZE 11 | # kernel.shmmax = 500000000 12 | kernel.shmmax = 1000000000 13 | kernel.shmmni = 4096 14 | vm.overcommit_memory = 2 # See Segment Host Memory 15 | vm.overcommit_ratio = 95 # See Segment Host Memory 16 | 17 | net.ipv4.ip_local_port_range = 1025 65535 # See Port Settings 18 | kernel.sem = 500 2048000 200 40960 19 | kernel.sysrq = 1 20 | kernel.core_uses_pid = 1 21 | kernel.msgmnb = 65536 22 | kernel.msgmax = 65536 23 | kernel.msgmni = 2048 24 | net.ipv4.tcp_syncookies = 1 25 | # CoreOS: WARNING: IPv4 forwarding is disabled. Networking will not work. 26 | net.ipv4.ip_forward = 1 27 | net.ipv4.conf.default.accept_source_route = 0 28 | net.ipv4.tcp_max_syn_backlog = 4096 29 | net.ipv4.conf.all.arp_filter = 1 30 | net.ipv4.ipfrag_high_thresh = 41943040 31 | net.ipv4.ipfrag_low_thresh = 31457280 32 | net.ipv4.ipfrag_time = 60 33 | net.core.netdev_max_backlog = 10000 34 | net.core.rmem_max = 2097152 35 | net.core.wmem_max = 2097152 36 | vm.swappiness = 10 37 | vm.zone_reclaim_mode = 0 38 | vm.dirty_expire_centisecs = 500 39 | vm.dirty_writeback_centisecs = 100 40 | vm.dirty_background_ratio = 0 # See System Memory 41 | vm.dirty_ratio = 0 42 | vm.dirty_background_bytes = 1610612736 43 | vm.dirty_bytes = 4294967296 -------------------------------------------------------------------------------- /gpdb/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | . ./init.dev.sh 3 | 4 | # 停止运行中的容器 5 | querynum1=`docker ps | grep $TAGNAME | awk '{print $1}' | wc -l` 6 | if [ $querynum1 -gt 0 ]; then 7 | docker stop $(docker ps | grep $TAGNAME | awk '{print $1}') 8 | fi 9 | 10 | # This will remove: 11 | # - all stopped containers 12 | # - all networks not used by at least one container 13 | # - all dangling images 14 | # - all build cache 15 | docker system prune --force -------------------------------------------------------------------------------- /gpdb/cron/.gitignore: -------------------------------------------------------------------------------- 1 | *_test.py -------------------------------------------------------------------------------- /gpdb/cron/send_alert.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python send_alert.py "$@" 4 | -------------------------------------------------------------------------------- /gpdb/deploy/bin/.env: -------------------------------------------------------------------------------- 1 | REGISTRY=bdc4acr.azurecr.io 2 | TAGNAME=infra/gpdb:6.20.5-ce 3 | -------------------------------------------------------------------------------- /gpdb/deploy/bin/docker-compose-master.yml: -------------------------------------------------------------------------------- 1 | version: "2.3" 2 | services: 3 | 4 | gp6mdw: 5 | image: ${REGISTRY}/${TAGNAME} 6 | container_name: gp6mdw 7 | hostname: gp6mdw 8 | ports: 9 | - 5483:5432 10 | volumes: 11 | - /opt/bdcc/greenplum6/config:/opt/greenplum/config 12 | - /data/ssd2/gp6data:/disk1/gpdata 13 | environment: 14 | MIRROR_STRATEGY: Spread 15 | IS_MASTER: "true" 16 | # ENABLE_RESOURCE_GROUPS: "true" 17 | restart: always 18 | # privileged: true 19 | # cpu_count: 28 20 | # mem_limit: 160g 21 | sysctls: 22 | - net.ipv4.ipfrag_time=60 23 | - net.ipv4.ipfrag_high_thresh=12480000 24 | # 以下几项需要在宿主机上修改,不支持容器单独配置 25 | # sysctl 'vm.dirty_background_bytes=1610612736' is not whitelisted 26 | # - vm.overcommit_memory=2 27 | # - vm.overcommit_ratio=80 28 | # - vm.dirty_ratio=0 29 | # - vm.dirty_bytes=4294967296 30 | # - vm.dirty_background_ratio=0 31 | # - vm.dirty_background_bytes=1610612736 32 | cap_add: 33 | - SYS_PTRACE 34 | 35 | networks: 36 | default: 37 | external: true 38 | name: bdc 39 | -------------------------------------------------------------------------------- /gpdb/deploy/bin/docker-compose-segment.1.yml: -------------------------------------------------------------------------------- 1 | version: "2.3" 2 | services: 3 | 4 | gp6sdw1: 5 | image: ${REGISTRY}/${TAGNAME} 6 | container_name: gp6sdw1 7 | hostname: gp6sdw1 8 | volumes: 9 | - /data/hdd3/gp6data:/disk1/gpdata 10 | - /data/hdd4/gp6data:/disk2/gpdata 11 | - /data/ssd3/gp6space:/gp6space 12 | restart: always 13 | # privileged: true 14 | # cpu_count: 28 15 | # mem_limit: 160g 16 | sysctls: 17 | - net.ipv4.ipfrag_time=60 18 | - net.ipv4.ipfrag_high_thresh=12480000 19 | cap_add: 20 | - SYS_PTRACE 21 | 22 | networks: 23 | default: 24 | external: true 25 | name: bdc 26 | -------------------------------------------------------------------------------- /gpdb/deploy/bin/docker-compose-segment.2.yml: -------------------------------------------------------------------------------- 1 | version: "2.3" 2 | services: 3 | 4 | gp6sdw2: 5 | image: ${REGISTRY}/${TAGNAME} 6 | container_name: gp6sdw2 7 | hostname: gp6sdw2 8 | volumes: 9 | - /data/hdd3/gp6data:/disk1/gpdata 10 | - /data/hdd4/gp6data:/disk2/gpdata 11 | - /data/ssd3/gp6space:/gp6space 12 | restart: always 13 | # privileged: true 14 | # cpu_count: 28 15 | # mem_limit: 160g 16 | sysctls: 17 | - net.ipv4.ipfrag_time=60 18 | - net.ipv4.ipfrag_high_thresh=12480000 19 | cap_add: 20 | - SYS_PTRACE 21 | 22 | networks: 23 | default: 24 | external: true 25 | name: bdc 26 | -------------------------------------------------------------------------------- /gpdb/deploy/bin/docker-compose-segment.3.yml: -------------------------------------------------------------------------------- 1 | version: "2.3" 2 | services: 3 | 4 | gp6sdw3: 5 | image: ${REGISTRY}/${TAGNAME} 6 | container_name: gp6sdw3 7 | hostname: gp6sdw3 8 | volumes: 9 | - /data/hdd3/gp6data:/disk1/gpdata 10 | - /data/hdd4/gp6data:/disk2/gpdata 11 | - /data/ssd3/gp6space:/gp6space 12 | restart: always 13 | # privileged: true 14 | # cpu_count: 28 15 | # mem_limit: 160g 16 | sysctls: 17 | - net.ipv4.ipfrag_time=60 18 | - net.ipv4.ipfrag_high_thresh=12480000 19 | cap_add: 20 | - SYS_PTRACE 21 | 22 | networks: 23 | default: 24 | external: true 25 | name: bdc 26 | -------------------------------------------------------------------------------- /gpdb/deploy/bin/docker-compose-standby.yml: -------------------------------------------------------------------------------- 1 | version: "2.3" 2 | services: 3 | 4 | gp6smdw: 5 | image: ${REGISTRY}/${TAGNAME} 6 | container_name: gp6smdw 7 | hostname: gp6smdw 8 | ports: 9 | - 5483:5432 10 | volumes: 11 | - /opt/bdcc/greenplum6/config:/opt/greenplum/config 12 | - /data/ssd2/gp6data:/disk1/gpdata 13 | restart: always 14 | # privileged: true 15 | # cpu_count: 28 16 | # mem_limit: 160g 17 | sysctls: 18 | - net.ipv4.ipfrag_time=60 19 | - net.ipv4.ipfrag_high_thresh=12480000 20 | cap_add: 21 | - SYS_PTRACE 22 | 23 | networks: 24 | default: 25 | external: true 26 | name: bdc 27 | -------------------------------------------------------------------------------- /gpdb/deploy/config/gpstop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # set -e 3 | # set -o pipefail 4 | cd `dirname $0` 5 | 6 | source /usr/local/greenplum-db/greenplum_path.sh 7 | export MASTER_DATA_DIRECTORY=/disk1/gpdata/gpmaster/gpseg-1 8 | 9 | /usr/local/greenplum-db/bin/gpstate -b 10 | /usr/local/greenplum-db/bin/gpstop -M fast -a 11 | -------------------------------------------------------------------------------- /gpdb/deploy/config/hostlist: -------------------------------------------------------------------------------- 1 | gp6mdw 2 | gp6smdw 3 | gp6sdw1 4 | gp6sdw2 5 | gp6sdw3 -------------------------------------------------------------------------------- /gpdb/deploy/config/seg_hosts: -------------------------------------------------------------------------------- 1 | gp6sdw1 2 | gp6sdw2 3 | gp6sdw3 -------------------------------------------------------------------------------- /gpdb/init.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | REGISTRY=registry.inventec 3 | 4 | if [ -n "$1" ]; then 5 | BRANCH=$1 6 | else 7 | # BRANCH=4.3.30.4 8 | # BRANCH=5.15.1 9 | # BRANCH=5.17.0 10 | # BRANCH=5.18.0 11 | # BRANCH=5.21.5 12 | # BRANCH=5.24.0 13 | # BRANCH=6.11.2 14 | # BRANCH=6.12.0 15 | # BRANCH=6.13.0 16 | # BRANCH=6.14.0 17 | # BRANCH=6.16.0 18 | # BRANCH=6.19.1 19 | # BRANCH=6.20.5 20 | # BRANCH=6.23.0 21 | # BRANCH=6.24.4 22 | BRANCH=6.25.3 23 | fi 24 | TAGNAME=infra/gpdb:${BRANCH} 25 | 26 | PROXY=http://10.190.81.209:3389 27 | -------------------------------------------------------------------------------- /grafana/build/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG VERSION 2 | FROM grafana/grafana:${VERSION} 3 | 4 | USER root 5 | 6 | RUN apk update && \ 7 | apk add --no-cache curl jq 8 | 9 | USER grafana 10 | 11 | # RUN for plugin in $(curl -s 'https://grafana.com/api/plugins?orderBy=name' | jq '.items[] | select(.internal == false) | .slug' | tr -d '"'); \ 12 | # do grafana-cli --pluginsDir "${GF_PATHS_PLUGINS}" plugins install $plugin; done 13 | 14 | RUN for p in \ 15 | "agenty-flowcharting-panel" \ 16 | "aidanmountford-html-panel" \ 17 | "bessler-pictureit-panel" \ 18 | "bilibala-echarts-panel" \ 19 | "blackmirror1-singlestat-math-panel" \ 20 | "farski-blendstat-panel" \ 21 | "flant-statusmap-panel" \ 22 | "grafana-clock-panel" \ 23 | "grafana-gitlab-datasource" \ 24 | "grafana-oracle-datasource" \ 25 | "grafana-polystat-panel" \ 26 | "grafana-simple-json-datasource" \ 27 | "grafana-singlestat-panel" \ 28 | "larona-epict-panel" \ 29 | "marcusolsson-csv-datasource" \ 30 | "marcusolsson-dynamictext-panel" \ 31 | "marcusolsson-json-datasource" \ 32 | "michaeldmoore-multistat-panel" \ 33 | "natel-discrete-panel" \ 34 | "natel-plotly-panel" \ 35 | "pierosavi-imageit-panel" \ 36 | "ryantxu-ajax-panel" \ 37 | "simpod-json-datasource" \ 38 | "snuids-radar-panel" \ 39 | "yesoreyeram-boomtheme-panel" \ 40 | ; do \ 41 | echo "Install Plugin $p"; \ 42 | grafana-cli --pluginsDir "${GF_PATHS_PLUGINS}" plugins install "$p"; \ 43 | done 44 | -------------------------------------------------------------------------------- /grafana/build/build.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | cd `dirname $0` 3 | 4 | VERSION=8.3.3.1 5 | 6 | # wget https://github.com/grafana/grafana/archive/refs/tags/v8.3.3.zip 7 | # unzip v8.3.3.zip 8 | # cd grafana-8.3.3 9 | # git apply ../patch/*.patch 10 | # make build-docker-full 11 | docker tag grafana/grafana:dev grafana/grafana:${VERSION} 12 | 13 | docker build --rm -f Dockerfile \ 14 | -t registry.inventec/infra/grafana/grafana:${VERSION}-plus \ 15 | --build-arg VERSION=${VERSION} \ 16 | . 17 | -------------------------------------------------------------------------------- /harbor/sync.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # 目标: 因为 k8s.gcr.io 直接无法访问,需要同步一份镜像到 harbor 5 | 6 | # kubeadm config images list --config /etc/kubernetes/kubeadm-config.yaml 7 | k8s_images=( 8 | kube-apiserver:v1.23.6 9 | kube-controller-manager:v1.23.6 10 | kube-scheduler:v1.23.6 11 | kube-proxy:v1.23.6 12 | pause:3.6 13 | coredns/coredns:v1.8.6 14 | cpa/cluster-proportional-autoscaler-amd64:1.8.5 15 | dns/k8s-dns-node-cache:1.21.1 16 | pause:3.3 17 | ) 18 | 19 | declare -A repository 20 | repository["source"]="k8s.gcr.io" 21 | repository["target"]="registry.inventec/gcr" 22 | 23 | for image in ${k8s_images[@]} 24 | do 25 | echo "===================== Sync ${image} =====================" 26 | docker pull ${repository["source"]}/${image} 27 | docker tag ${repository["source"]}/${image} ${repository["target"]}/${image} 28 | docker push ${repository["target"]}/${image} 29 | done 30 | 31 | -------------------------------------------------------------------------------- /helm/postgres/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /helm/postgres/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: postgres 3 | description: A Helm chart for Kubernetes 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | # Versions are expected to follow Semantic Versioning (https://semver.org/) 18 | version: 0.1.0 19 | 20 | # This is the version number of the application being deployed. This version number should be 21 | # incremented each time you make changes to the application. Versions are not expected to 22 | # follow Semantic Versioning. They should reflect the version the application is using. 23 | # It is recommended to use it with quotes. 24 | appVersion: "1.16.0" 25 | -------------------------------------------------------------------------------- /helm/postgres/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 1. Get the application URL by running these commands: 2 | {{- if .Values.ingress.enabled }} 3 | {{- range $host := .Values.ingress.hosts }} 4 | {{- range .paths }} 5 | http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} 6 | {{- end }} 7 | {{- end }} 8 | {{- else if contains "NodePort" .Values.service.type }} 9 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "postgres.fullname" . }}) 10 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 11 | echo http://$NODE_IP:$NODE_PORT 12 | {{- else if contains "LoadBalancer" .Values.service.type }} 13 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 14 | You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "postgres.fullname" . }}' 15 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "postgres.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") 16 | echo http://$SERVICE_IP:{{ .Values.service.port }} 17 | {{- else if contains "ClusterIP" .Values.service.type }} 18 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "postgres.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 19 | export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") 20 | echo "Visit http://127.0.0.1:8080 to use your application" 21 | kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT 22 | {{- end }} 23 | -------------------------------------------------------------------------------- /helm/postgres/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "postgres.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "postgres.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "postgres.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "postgres.labels" -}} 37 | helm.sh/chart: {{ include "postgres.chart" . }} 38 | {{ include "postgres.selectorLabels" . }} 39 | {{- if .Chart.AppVersion }} 40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 41 | {{- end }} 42 | app.kubernetes.io/managed-by: {{ .Release.Service }} 43 | {{- end }} 44 | 45 | {{/* 46 | Selector labels 47 | */}} 48 | {{- define "postgres.selectorLabels" -}} 49 | app.kubernetes.io/name: {{ include "postgres.name" . }} 50 | app.kubernetes.io/instance: {{ .Release.Name }} 51 | {{- end }} 52 | 53 | {{/* 54 | Create the name of the service account to use 55 | */}} 56 | {{- define "postgres.serviceAccountName" -}} 57 | {{- if .Values.serviceAccount.create }} 58 | {{- default (include "postgres.fullname" .) .Values.serviceAccount.name }} 59 | {{- else }} 60 | {{- default "default" .Values.serviceAccount.name }} 61 | {{- end }} 62 | {{- end }} 63 | -------------------------------------------------------------------------------- /helm/postgres/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ include "postgres.fullname" . }} 5 | labels: 6 | {{- include "postgres.labels" . | nindent 4 }} 7 | spec: 8 | {{- if not .Values.autoscaling.enabled }} 9 | replicas: {{ .Values.replicaCount }} 10 | {{- end }} 11 | selector: 12 | matchLabels: 13 | {{- include "postgres.selectorLabels" . | nindent 6 }} 14 | template: 15 | metadata: 16 | {{- with .Values.podAnnotations }} 17 | annotations: 18 | {{- toYaml . | nindent 8 }} 19 | {{- end }} 20 | labels: 21 | {{- include "postgres.selectorLabels" . | nindent 8 }} 22 | spec: 23 | {{- with .Values.imagePullSecrets }} 24 | imagePullSecrets: 25 | {{- toYaml . | nindent 8 }} 26 | {{- end }} 27 | serviceAccountName: {{ include "postgres.serviceAccountName" . }} 28 | securityContext: 29 | {{- toYaml .Values.podSecurityContext | nindent 8 }} 30 | containers: 31 | - name: {{ .Chart.Name }} 32 | securityContext: 33 | {{- toYaml .Values.securityContext | nindent 12 }} 34 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" 35 | imagePullPolicy: {{ .Values.image.pullPolicy }} 36 | ports: 37 | - name: http 38 | containerPort: {{ .Values.service.port }} 39 | protocol: TCP 40 | livenessProbe: 41 | httpGet: 42 | path: / 43 | port: http 44 | readinessProbe: 45 | httpGet: 46 | path: / 47 | port: http 48 | resources: 49 | {{- toYaml .Values.resources | nindent 12 }} 50 | {{- with .Values.nodeSelector }} 51 | nodeSelector: 52 | {{- toYaml . | nindent 8 }} 53 | {{- end }} 54 | {{- with .Values.affinity }} 55 | affinity: 56 | {{- toYaml . | nindent 8 }} 57 | {{- end }} 58 | {{- with .Values.tolerations }} 59 | tolerations: 60 | {{- toYaml . | nindent 8 }} 61 | {{- end }} 62 | -------------------------------------------------------------------------------- /helm/postgres/templates/hpa.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.autoscaling.enabled }} 2 | apiVersion: autoscaling/v2 3 | kind: HorizontalPodAutoscaler 4 | metadata: 5 | name: {{ include "postgres.fullname" . }} 6 | labels: 7 | {{- include "postgres.labels" . | nindent 4 }} 8 | spec: 9 | scaleTargetRef: 10 | apiVersion: apps/v1 11 | kind: Deployment 12 | name: {{ include "postgres.fullname" . }} 13 | minReplicas: {{ .Values.autoscaling.minReplicas }} 14 | maxReplicas: {{ .Values.autoscaling.maxReplicas }} 15 | metrics: 16 | {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} 17 | - type: Resource 18 | resource: 19 | name: cpu 20 | target: 21 | type: Utilization 22 | averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} 23 | {{- end }} 24 | {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} 25 | - type: Resource 26 | resource: 27 | name: memory 28 | target: 29 | type: Utilization 30 | averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} 31 | {{- end }} 32 | {{- end }} 33 | -------------------------------------------------------------------------------- /helm/postgres/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled -}} 2 | {{- $fullName := include "postgres.fullname" . -}} 3 | {{- $svcPort := .Values.service.port -}} 4 | {{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} 5 | {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} 6 | {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} 7 | {{- end }} 8 | {{- end }} 9 | {{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} 10 | apiVersion: networking.k8s.io/v1 11 | {{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} 12 | apiVersion: networking.k8s.io/v1beta1 13 | {{- else -}} 14 | apiVersion: extensions/v1beta1 15 | {{- end }} 16 | kind: Ingress 17 | metadata: 18 | name: {{ $fullName }} 19 | labels: 20 | {{- include "postgres.labels" . | nindent 4 }} 21 | {{- with .Values.ingress.annotations }} 22 | annotations: 23 | {{- toYaml . | nindent 4 }} 24 | {{- end }} 25 | spec: 26 | {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} 27 | ingressClassName: {{ .Values.ingress.className }} 28 | {{- end }} 29 | {{- if .Values.ingress.tls }} 30 | tls: 31 | {{- range .Values.ingress.tls }} 32 | - hosts: 33 | {{- range .hosts }} 34 | - {{ . | quote }} 35 | {{- end }} 36 | secretName: {{ .secretName }} 37 | {{- end }} 38 | {{- end }} 39 | rules: 40 | {{- range .Values.ingress.hosts }} 41 | - host: {{ .host | quote }} 42 | http: 43 | paths: 44 | {{- range .paths }} 45 | - path: {{ .path }} 46 | {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} 47 | pathType: {{ .pathType }} 48 | {{- end }} 49 | backend: 50 | {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} 51 | service: 52 | name: {{ $fullName }} 53 | port: 54 | number: {{ $svcPort }} 55 | {{- else }} 56 | serviceName: {{ $fullName }} 57 | servicePort: {{ $svcPort }} 58 | {{- end }} 59 | {{- end }} 60 | {{- end }} 61 | {{- end }} 62 | -------------------------------------------------------------------------------- /helm/postgres/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "postgres.fullname" . }} 5 | labels: 6 | {{- include "postgres.labels" . | nindent 4 }} 7 | spec: 8 | type: {{ .Values.service.type }} 9 | ports: 10 | - port: {{ .Values.service.port }} 11 | targetPort: http 12 | protocol: TCP 13 | name: http 14 | selector: 15 | {{- include "postgres.selectorLabels" . | nindent 4 }} 16 | -------------------------------------------------------------------------------- /helm/postgres/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "postgres.serviceAccountName" . }} 6 | labels: 7 | {{- include "postgres.labels" . | nindent 4 }} 8 | {{- with .Values.serviceAccount.annotations }} 9 | annotations: 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | {{- end }} 13 | -------------------------------------------------------------------------------- /helm/postgres/templates/tests/test-connection.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: "{{ include "postgres.fullname" . }}-test-connection" 5 | labels: 6 | {{- include "postgres.labels" . | nindent 4 }} 7 | annotations: 8 | "helm.sh/hook": test 9 | spec: 10 | containers: 11 | - name: wget 12 | image: busybox 13 | command: ['wget'] 14 | args: ['{{ include "postgres.fullname" . }}:{{ .Values.service.port }}'] 15 | restartPolicy: Never 16 | -------------------------------------------------------------------------------- /helm/postgres/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for postgres. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | replicaCount: 1 6 | 7 | image: 8 | repository: nginx 9 | pullPolicy: IfNotPresent 10 | # Overrides the image tag whose default is the chart appVersion. 11 | tag: "" 12 | 13 | imagePullSecrets: [] 14 | nameOverride: "" 15 | fullnameOverride: "" 16 | 17 | serviceAccount: 18 | # Specifies whether a service account should be created 19 | create: true 20 | # Annotations to add to the service account 21 | annotations: {} 22 | # The name of the service account to use. 23 | # If not set and create is true, a name is generated using the fullname template 24 | name: "" 25 | 26 | podAnnotations: {} 27 | 28 | podSecurityContext: {} 29 | # fsGroup: 2000 30 | 31 | securityContext: {} 32 | # capabilities: 33 | # drop: 34 | # - ALL 35 | # readOnlyRootFilesystem: true 36 | # runAsNonRoot: true 37 | # runAsUser: 1000 38 | 39 | service: 40 | type: ClusterIP 41 | port: 80 42 | 43 | ingress: 44 | enabled: false 45 | className: "" 46 | annotations: {} 47 | # kubernetes.io/ingress.class: nginx 48 | # kubernetes.io/tls-acme: "true" 49 | hosts: 50 | - host: chart-example.local 51 | paths: 52 | - path: / 53 | pathType: ImplementationSpecific 54 | tls: [] 55 | # - secretName: chart-example-tls 56 | # hosts: 57 | # - chart-example.local 58 | 59 | resources: {} 60 | # We usually recommend not to specify default resources and to leave this as a conscious 61 | # choice for the user. This also increases chances charts run on environments with little 62 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 63 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 64 | # limits: 65 | # cpu: 100m 66 | # memory: 128Mi 67 | # requests: 68 | # cpu: 100m 69 | # memory: 128Mi 70 | 71 | autoscaling: 72 | enabled: false 73 | minReplicas: 1 74 | maxReplicas: 100 75 | targetCPUUtilizationPercentage: 80 76 | # targetMemoryUtilizationPercentage: 80 77 | 78 | nodeSelector: {} 79 | 80 | tolerations: [] 81 | 82 | affinity: {} 83 | -------------------------------------------------------------------------------- /kafka/admin/docker-compose-kafka-webui.yml: -------------------------------------------------------------------------------- 1 | version: '2.3' 2 | services: 3 | 4 | kowl: 5 | image: registry.inventec/proxy/redpandadata/console:v2.4.5 6 | container_name: kowl 7 | ports: 8 | - 9002:8080 9 | volumes: 10 | - ./kowl.yaml:/etc/kowl/config.yaml 11 | entrypoint: ./console --config.filepath=/etc/kowl/config.yaml 12 | restart: always 13 | cpu_count: 2 14 | mem_limit: 1g 15 | 16 | nginx: 17 | image: registry.inventec/proxy/nginx:1.24-alpine 18 | container_name: kowl-proxy 19 | hostname: nginx 20 | ports: 21 | - 9001:80 22 | volumes: 23 | - ./kowl-nginx.conf:/etc/nginx/conf.d/default.conf 24 | # environment: 25 | # NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE: "true" 26 | restart: always 27 | depends_on: 28 | - kowl 29 | cpu_count: 1 30 | mem_limit: 512m 31 | -------------------------------------------------------------------------------- /kafka/admin/kowl.yaml: -------------------------------------------------------------------------------- 1 | # See: https://github.com/cloudhut/kowl/tree/master/docs/config for reference config files. 2 | kafka: 3 | brokers: 4 | - 10.190.5.106:30001 5 | - 10.190.5.106:30002 6 | # sasl: 7 | # enabled: true 8 | # mechanism: PLAIN 9 | # username: ??? 10 | # password: ??? 11 | 12 | # server: 13 | # listenPort: 8080 14 | 15 | # connect: 16 | # enabled: false 17 | # clusters: 18 | # - name: 10.190.5.106 19 | # url: http://10.190.5.106:8083 20 | # tls: 21 | # enabled: false 22 | # # caFilepath: 23 | # # certFilepath: 24 | # # keyFilepath: 25 | # # insecureSkipTlsVerify: false 26 | # # username: admin 27 | # # password: # This can be set via the via the --connect.clusters.i.password flag as well (i to be replaced with the array index) 28 | # # token: # This can be set via the via the --connect.clusters.i.token flag as well (i to be replaced with the array index) 29 | 30 | # Analytics configures the telemetry service that sends anonymized usage statistics to Redpanda. 31 | # Redpanda uses these statistics to evaluate feature usage. 32 | analytics: 33 | enabled: false 34 | 35 | console: 36 | # 5MB 37 | maxDeserializationPayloadSize: 5120000 -------------------------------------------------------------------------------- /kafka/cdc/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.inventec/proxy/python:3.11-slim 2 | LABEL maintainer="Zhang.Xing-Long@inventec.com" 3 | 4 | USER root 5 | 6 | RUN pip install --no-cache-dir --upgrade pip -i https://nexus.itc.inventec.net/repository/pypi-proxy/simple/ 7 | 8 | WORKDIR /usr/src/app 9 | 10 | COPY src/requirements.txt . 11 | RUN pip install --no-cache-dir -r requirements.txt -i https://nexus.itc.inventec.net/repository/pypi-proxy/simple/ 12 | 13 | COPY src/main.py . 14 | 15 | COPY entrypoint.sh . 16 | RUN chmod +x entrypoint.sh 17 | 18 | ENV KAFKA_CONNECT_SERVICE_URL= \ 19 | KAFKA_CONNECT_BOOTSTRAP_SERVERS= \ 20 | REDPANDA_API= 21 | 22 | ENTRYPOINT [ "./entrypoint.sh" ] 23 | -------------------------------------------------------------------------------- /kafka/cdc/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | cd `dirname $0` 4 | 5 | REGISTRY=registry.inventec 6 | 7 | docker build --rm -f Dockerfile \ 8 | -t ${REGISTRY}/infra/cdc-guard:latest \ 9 | -t ${REGISTRY}/infra/cdc-guard:1.0 \ 10 | . 11 | docker push ${REGISTRY}/infra/cdc-guard:latest 12 | docker push ${REGISTRY}/infra/cdc-guard:1.0 13 | 14 | -------------------------------------------------------------------------------- /kafka/cdc/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # set -e 3 | 4 | python main.py 5 | -------------------------------------------------------------------------------- /kafka/cdc/src/.gitignore: -------------------------------------------------------------------------------- 1 | connect-credentials.properties -------------------------------------------------------------------------------- /kafka/cdc/src/requirements.txt: -------------------------------------------------------------------------------- 1 | pymssql 2 | requests 3 | confluent-kafka 4 | psycopg2-binary -------------------------------------------------------------------------------- /kafka/deploy.dev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd `dirname $0` 3 | # docker node update --label-add alias=bdc01.infra.dev.itc.inventec kubernetes-101 4 | # docker node update --label-add alias=bdc02.infra.dev.itc.inventec kubernetes-102 5 | # docker node update --label-add alias=bdc03.infra.dev.itc.inventec kubernetes-103 6 | # docker node update --label-add alias=bdc04.infra.dev.itc.inventec kubernetes-104 7 | # mkdir /disk/kafka && chmod -R 777 /disk/kafka 8 | 9 | # 推送启动脚本至部署目录 10 | INVENTORY_FILE=../inventory.dev 11 | 12 | # ansible -i $INVENTORY_FILE kafka -m file -a "dest=/disk/kafka mode=777 state=directory" -f 5 -b 13 | # ansible -i $INVENTORY_FILE kafka -m file -a "path=/disk/zookeeper state=absent" -f 5 -b 14 | # ansible -i $INVENTORY_FILE kafka -m file -a "dest=/disk/zookeeper/datalog mode=777 state=directory" -b 15 | # ansible -i $INVENTORY_FILE kafka -m file -a "dest=/disk/zookeeper/logs mode=777 state=directory" -b 16 | 17 | # 传出配置文件 18 | ansible -i $INVENTORY_FILE kafka -m copy -a "src=jmx_exporter dest=/opt/kafka" 19 | ansible -i $INVENTORY_FILE kafka -m copy -a "src=deploy.dev/ dest=/opt/kafka" 20 | 21 | # 执行启动命令 22 | ansible -i $INVENTORY_FILE broker1 -m shell -a "/opt/kafka/bin/start.sh" 23 | -------------------------------------------------------------------------------- /kafka/deploy.dev/bin/docker-compose-zookeeper.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | 3 | networks: 4 | net: 5 | external: true 6 | name: bdc 7 | 8 | services: 9 | 10 | zoo1: 11 | image: ${REGISTRY}/hub/zookeeper:3.4.13 12 | hostname: zoo1 13 | networks: 14 | - net 15 | ports: 16 | - target: 2181 17 | published: 2181 18 | protocol: tcp 19 | mode: host 20 | volumes: 21 | - /disk/zookeeper/data:/data 22 | - /disk/zookeeper/datalog:/datalog 23 | - /disk/zookeeper/logs:/logs 24 | - /etc/localtime:/etc/localtime:ro 25 | environment: 26 | ZOO_AUTOPURGE_PURGEINTERVAL: 1 27 | deploy: 28 | restart_policy: 29 | condition: on-failure 30 | placement: 31 | constraints: 32 | - node.labels.alias == bdc02.infra.dev.itc.inventec 33 | resources: 34 | limits: 35 | cpus: "2" 36 | memory: 4g 37 | -------------------------------------------------------------------------------- /kafka/deploy.dev/bin/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | cd `dirname $0` 4 | 5 | BROKER_CLUSTER_IP=(10.191.7.11 10.191.7.13 10.191.7.14) 6 | BROKER_CLUSTER_VERSION=5.5.11 7 | ZOOKEEPER_CLUSTER=zoo1:2181 8 | HARBOR_REGISTRY=registry.inventec 9 | 10 | # Ref: https://stackoverflow.com/questions/8880603/loop-through-an-array-of-strings-in-bash 11 | for i in "${!BROKER_CLUSTER_IP[@]}" 12 | do 13 | # 设置环境变量,节点的IP 14 | export BROKER_NODE$((i+1))_IP=${BROKER_CLUSTER_IP[i]} 15 | done 16 | 17 | export BROKER_VERSION=${BROKER_CLUSTER_VERSION} 18 | export REGISTRY=${HARBOR_REGISTRY} 19 | export ZOOKEEPER=${ZOOKEEPER_CLUSTER} 20 | 21 | echo "Harbor仓库地址: ${REGISTRY}" 22 | echo "KAFKA集群IP为: ${BROKER_NODE1_IP}, ${BROKER_NODE2_IP}, ${BROKER_NODE3_IP}" 23 | echo "KAFKA版本为: ${BROKER_VERSION}" 24 | echo "ZOOKEEPER版本为: ${ZOOKEEPER}" 25 | 26 | # Ref: https://stackoverflow.com/questions/45804955/zookeeper-refuses-kafka-connection-from-an-old-client 27 | # 确保容器重新创建 28 | # docker stack rm kafka 29 | # docker stack rm zookeeper 30 | 31 | docker stack deploy -c docker-compose-zookeeper.yml zookeeper 32 | docker stack deploy -c docker-compose-kafka.yml kafka 33 | -------------------------------------------------------------------------------- /kafka/deploy.dev/cron/increase-replication-factor.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | server="$1" 4 | shift 1 5 | zookeeper="$1" 6 | shift 1 7 | properties_file="$1" 8 | shift 1 9 | topic="$1" 10 | shift 1 11 | new_rf="$@" 12 | 13 | 14 | if [[ -z "$server" ]] | [[ -z "$zookeeper" ]] | [[ -z "$properties_file" ]] | [[ -z "$topic" ]] | [[ -z "$new_rf" ]]; then 15 | echo "Usage $0 " 16 | exit 1 17 | fi 18 | 19 | RES=$(kafka-topics --bootstrap-server ${server} --command-config ${properties_file} --describe --topic ${topic} | grep Replicas:) 20 | 21 | partitions=$(echo "$RES"| wc -l) 22 | 23 | echo "RES is $RES" 24 | 25 | 26 | DOCUMENT=' 27 | {"version":1, 28 | "partitions":[ 29 | ' 30 | 31 | PARTITIONS="" 32 | 33 | for p in $(seq 0 $(( $partitions - 1 ))); do 34 | 35 | if [ -n "$PARTITIONS" ]; then 36 | PARTITIONS="${PARTITIONS}, " 37 | fi 38 | PARTITIONS="${PARTITIONS}{\"topic\":\"$topic\",\"partition\": $p," 39 | current_leader=$(echo "$RES" | grep -E "Partition: $p\s+" | awk '{ print $6 }') 40 | replicas="[$current_leader" 41 | for r in $new_rf; do 42 | if [ $current_leader != $r ]; then 43 | replicas="${replicas},$r" 44 | fi 45 | done 46 | replicas="${replicas}]" 47 | 48 | PARTITIONS="${PARTITIONS} \"replicas\": $replicas }" 49 | done 50 | 51 | FINAL_DOC="${DOCUMENT}${PARTITIONS}]}" 52 | 53 | echo "$FINAL_DOC" > "${topic}_reassign.$$.json" 54 | 55 | echo "Will make the following reassignment choices:" 56 | echo "$FINAL_DOC" 57 | echo "" 58 | 59 | read -p "Press enter to execute the increase in replication factor..." REPLY 60 | kafka-reassign-partitions --zookeeper $zookeeper --reassignment-json-file "${topic}_reassign.$$.json" --execute 61 | 62 | 63 | sleep 2 64 | echo "Verifying that reassignment has completed...." 65 | CMD="kafka-reassign-partitions --zookeeper $zookeeper --reassignment-json-file ${topic}_reassign.$$.json --verify" 66 | 67 | 68 | while true 69 | do 70 | RES=$($CMD) 71 | if [ $(echo $RES | grep 'still in progress'| echo $?) -ne 0 ]; then 72 | echo "Reassignment of $topic is still in progress. Waiting..." 73 | sleep 5 74 | else 75 | echo "Reassignment completed." 76 | break 77 | fi 78 | done 79 | 80 | printf "\n\n\n\n\n" 81 | -------------------------------------------------------------------------------- /kafka/deploy.dev/cron/kafka-brokers-migrate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd `dirname $0` 3 | 4 | if [ -z $1 ]; then 5 | echo 'No zookeeper service address specified ...' 6 | echo 'Usage: '$0' 192.168.2.16:2181' 7 | exit 8 | fi 9 | 10 | ZK_HOST=$1 11 | 12 | unset KAFKA_OPTS JMX_PORT 13 | 14 | topics=`kafka-topics --bootstrap-server localhost:9092 --list` 15 | 16 | TOPICJSON='{"version":1,"topics":[' 17 | for topic in $topics 18 | do 19 | TOPICJSON+='{"topic":"'${topic}'"},' 20 | done 21 | TOPICJSON=${TOPICJSON:0:-1} 22 | TOPICJSON+=']}' 23 | 24 | echo $TOPICJSON | python -c 'import json, sys; f = open("topics-to-migrate.json", "w"); json.dump(json.load(sys.stdin), f, indent=4); f.close();' 25 | # 生成执行迁移计划 26 | kafka-reassign-partitions --zookeeper $ZK_HOST --generate --topics-to-move-json-file topics-to-migrate.json --broker-list ?,?,? > topics-to-reassign.json.bak 27 | # 仅保留最后一行 28 | tail -n 1 topics-to-reassign.json.bak > topics-to-reassign.json 29 | # 开始执行 30 | # kafka-reassign-partitions --zookeeper $ZK_HOST --execute --reassignment-json-file topics-to-reassign.json 31 | # 验证过程 32 | # kafka-reassign-partitions --zookeeper $ZK_HOST --verify --reassignment-json-file topics-to-reassign.json | grep -v successfully | wc -l 33 | -------------------------------------------------------------------------------- /kafka/deploy.dev/cron/kafka-connect-change-log-level.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | cd `dirname $0` 4 | 5 | if [ -z $1 ]; then 6 | echo 'No kafka connect service address specified ...' 7 | echo 'Usage: '$0' http://10.191.6.53:8083' 8 | exit 9 | fi 10 | 11 | # What time is it Mr Wolf? 12 | date 13 | 14 | KAFKA_CONNECT_SERVICE_URI=$1 15 | 16 | # Change Log Levels - Use the Connect API 17 | # https://docs.confluent.io/platform/current/connect/logging.html#check-log-levels 18 | 19 | curl -sS -v "${KAFKA_CONNECT_SERVICE_URI}/admin/loggers" | jq . 20 | 21 | curl -sS -v "${KAFKA_CONNECT_SERVICE_URI}/admin/loggers/io.debezium.connector.sqlserver.SqlServerStreamingChangeEventSource" | jq . 22 | 23 | curl -sS -v -X PUT -H "Content-Type: application/json" \ 24 | "${KAFKA_CONNECT_SERVICE_URI}/admin/loggers/io.debezium.connector.sqlserver.SqlServerStreamingChangeEventSource" \ 25 | -d '{"level": "DEBUG"}' | jq '.' 26 | 27 | sleep 10 28 | 29 | curl -sS -v -X PUT -H "Content-Type: application/json" \ 30 | "${KAFKA_CONNECT_SERVICE_URI}/admin/loggers/io.debezium.connector.sqlserver.SqlServerStreamingChangeEventSource" \ 31 | -d '{"level": "ERROR"}' | jq '.' 32 | 33 | 34 | # ./kafka-connect-change-log-level.sh http://10.13.4.123:8083 35 | # ./kafka-connect-change-log-level.sh http://10.13.52.28:8083 36 | # ./kafka-connect-change-log-level.sh http://10.99.169.119:8083 37 | # ./kafka-connect-change-log-level.sh http://10.45.35.33:8083 -------------------------------------------------------------------------------- /kafka/deploy.dev/cron/kafka-connect-restart-failed-tasks.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | cd `dirname $0` 4 | 5 | if [ -z $1 ]; then 6 | echo 'No kafka connect service address specified ...' 7 | echo 'Usage: '$0' http://10.191.6.53:8083' 8 | exit 9 | fi 10 | 11 | # What time is it Mr Wolf? 12 | date 13 | 14 | KAFKA_CONNECT_SERVICE_URI=$1 15 | 16 | # List current connectors and status 17 | # curl -sS "${KAFKA_CONNECT_SERVICE_URI}/connectors" | \ 18 | # jq '.[]' | \ 19 | # xargs -I{connector_name} curl -sS "${KAFKA_CONNECT_SERVICE_URI}/connectors/"{connector_name}"/status" | \ 20 | # jq -c -M '[.name, .connector.state, .tasks[].state] | join(":|:")' | \ 21 | # column -s : -t | \ 22 | # tr -d "\"" | \ 23 | # sort 24 | 25 | # Restart any connector tasks that are FAILED 26 | # curl -sS "${KAFKA_CONNECT_SERVICE_URI}/connectors" | \ 27 | # jq '.[]' | \ 28 | # xargs -I{connector_name} curl -sS "${KAFKA_CONNECT_SERVICE_URI}/connectors/"{connector_name}"/status" | \ 29 | # jq -c -M '{name: .name, task: .tasks[]} | select(.task.state=="FAILED") | [.name, .task.id|tostring] | join("/tasks/")' | \ 30 | # tr -d "\"" | \ 31 | # xargs -I{connector_and_task} curl -v -X POST "${KAFKA_CONNECT_SERVICE_URI}/connectors/"{connector_and_task}"/restart" 32 | 33 | # List current connectors and status 34 | curl -sS "${KAFKA_CONNECT_SERVICE_URI}/connectors?expand=status&expand=info" | \ 35 | jq '.[] | [.status.type, .status.name, .status.connector.state, .status.tasks[].state] | join(":|:")' | \ 36 | tr -d "\"" | sort | \ 37 | column -s ":" -t 38 | 39 | # Restart any connector tasks that are FAILED 40 | # Works for Apache Kafka >= 2.3.0 41 | curl -sS "${KAFKA_CONNECT_SERVICE_URI}/connectors?expand=status" | \ 42 | jq '.[] | {name: .status.name, task: .status.tasks[]} | select(.task.state=="FAILED") | [.name, .task.id|tostring] | join("/tasks/")' | \ 43 | tr -d "\"" | \ 44 | xargs -I{connector_and_task} curl -v -X POST "${KAFKA_CONNECT_SERVICE_URI}/connectors/"{connector_and_task}"/restart" 45 | # xargs -I{connector_and_task} echo "${KAFKA_CONNECT_SERVICE_URI}/connectors/"{connector_and_task}"/restart" 46 | -------------------------------------------------------------------------------- /kafka/deploy.dev/cron/kafka-topics-delete.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd `dirname $0` 3 | 4 | if [ -z $1 ]; then 5 | echo 'No zookeeper service address specified ...' 6 | echo 'Usage: '$0' 192.168.2.16:2181' 7 | exit 8 | fi 9 | 10 | ZK_HOST=$1 11 | 12 | unset KAFKA_OPTS JMX_PORT 13 | 14 | for t in $(kafka-topics --list --bootstrap-server localhost:19092 | grep -E '^CDC-(\w+)-(\w+)-(\w+)\.dbo\.(\w+)$') 15 | do echo "[`date`]DELETE TOPIC: $t"; kafka-topics --zookeeper $ZK_HOST --delete --topic $t; done 16 | -------------------------------------------------------------------------------- /kafka/deploy.dev/cron/kafka-topics-illegal.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd `dirname $0` 3 | 4 | if [ -z $1 ]; then 5 | echo 'No zookeeper service address specified ...' 6 | echo 'Usage: '$0' 192.168.2.16:2181' 7 | exit 8 | fi 9 | 10 | ZK_HOST=$1 11 | 12 | unset KAFKA_OPTS JMX_PORT 13 | 14 | # 减少不必要的查询 15 | topics=$(kafka-topics --zookeeper $ZK_HOST --describe | grep -E 'ReplicationFactor: 1' | awk '{ print $2}') 16 | # or topics=$(kafka-topics --bootstrap-server localhost:9093 --command-config /kafka/tools/tools.properties --describe) 17 | 18 | for t in $topics; do 19 | replicsNum=`kafka-topics --zookeeper $ZK_HOST --describe --topic $t|grep ReplicationFactor|awk '{print $6}'` 20 | 21 | partitionCount=`kafka-topics --zookeeper $ZK_HOST --describe --topic $t|grep PartitionCount|awk '{print $4}'` 22 | 23 | if [ "${replicsNum}" -lt 2 ]; then 24 | echo "Topic: ${t} Replics: ${replicsNum} Partitions: ${partitionCount}" 25 | fi 26 | 27 | # if [ $PartitionCount -lt 3 ]; then 28 | # kafka-topics --zookeeper $ZK_HOST --alter --partitions 3 --topic $t 29 | # fi 30 | done -------------------------------------------------------------------------------- /kafka/deploy.dev/cron/kafka-topics-replication-change.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Sample usage: 4 | # ./bulk-topic-replication-change.sh node1:9091 zk1:2181 ~/ssl_command.config 1 2 3 4 5 6 5 | 6 | unset KAFKA_OPTS JMX_PORT 7 | 8 | broker="$1" 9 | shift 1 10 | zookeeper="$1" 11 | shift 1 12 | properties_file="$1" 13 | shift 1 14 | new_rf="$@" 15 | 16 | if [ -z "$broker" ] | [ -z "$zookeeper" ] | [ -z "$properties_file" ] | [ -z "$new_rf" ]; then 17 | echo "Usage $0 " 18 | echo "" 19 | exit 1 20 | fi 21 | 22 | #FIXME: Regex pattern 'ReplicationFactor: [12345]' means I want to increase RF for any topics with replication factor of 1,2,3,4, or 5 23 | topic_list=$(kafka-topics --bootstrap-server $broker --command-config $properties_file -describe | grep -E 'ReplicationFactor: [1]' | awk '{ print $2}') 24 | 25 | LOG_FILE="bulk-change.log" 26 | for t in $topic_list; do 27 | echo $t | tee -a $LOG_FILE 28 | bash ./increase-replication-factor.sh $broker $zookeeper $properties_file $t $new_rf | tee -a $LOG_FILE 29 | done 30 | -------------------------------------------------------------------------------- /kafka/jmx_exporter/.gitignore: -------------------------------------------------------------------------------- 1 | *.jar -------------------------------------------------------------------------------- /kafka/jmx_exporter/zookeeper-agent.yaml: -------------------------------------------------------------------------------- 1 | startDelaySeconds: 0 2 | ssl: false 3 | lowercaseOutputName: true 4 | lowercaseOutputLabelNames: false 5 | 6 | rules: 7 | # replicated Zookeeper 8 | - pattern: "org.apache.ZooKeeperService<>(\\w+)" 9 | name: "zookeeper_$2" 10 | type: GAUGE 11 | - pattern: "org.apache.ZooKeeperService<>(\\w+)" 12 | name: "zookeeper_$3" 13 | type: GAUGE 14 | labels: 15 | replicaId: "$2" 16 | - pattern: "org.apache.ZooKeeperService<>(Packets\\w+)" 17 | name: "zookeeper_$4" 18 | type: COUNTER 19 | labels: 20 | replicaId: "$2" 21 | memberType: "$3" 22 | - pattern: "org.apache.ZooKeeperService<>(\\w+)" 23 | name: "zookeeper_$4" 24 | type: GAUGE 25 | labels: 26 | replicaId: "$2" 27 | memberType: "$3" 28 | - pattern: "org.apache.ZooKeeperService<>(\\w+)" 29 | name: "zookeeper_$4_$5" 30 | type: GAUGE 31 | labels: 32 | replicaId: "$2" 33 | memberType: "$3" 34 | # # standalone Zookeeper 35 | # - pattern: "org.apache.ZooKeeperService<>(\\w+)" 36 | # type: GAUGE 37 | # name: "zookeeper_$2" 38 | # - pattern: "org.apache.ZooKeeperService<>(\\w+)" 39 | # type: GAUGE 40 | # name: "zookeeper_$2" -------------------------------------------------------------------------------- /kafka/kafka-connect/.gitignore: -------------------------------------------------------------------------------- 1 | plugins -------------------------------------------------------------------------------- /kafka/kafka-connect/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG TAG 2 | 3 | # Ref: https://docs.confluent.io/platform/current/installation/versions-interoperability.html 4 | FROM registry.inventec/proxy/confluentinc/cp-kafka-connect:${TAG} 5 | LABEL maintainer="Zhang.Xing-Long@inventec.com" 6 | 7 | WORKDIR /usr/share/confluent-hub-components 8 | 9 | ENV CONNECT_PLUGIN_PATH="/usr/share/java,/usr/share/confluent-hub-components" 10 | RUN \ 11 | # Remove the old connector plugin 12 | # https://docs.confluent.io/platform/current/connect/upgrade.html 13 | rm -rf /usr/share/java/kafka-connect-jdbc && \ 14 | # https://www.confluent.io/hub/confluentinc/kafka-connect-jdbc 15 | confluent-hub install --no-prompt confluentinc/kafka-connect-jdbc:10.7.6 && \ 16 | # https://www.confluent.io/hub/debezium/debezium-connector-sqlserver 17 | confluent-hub install --no-prompt debezium/debezium-connector-sqlserver:2.4.2 && \ 18 | # https://www.confluent.io/hub/debezium/debezium-connector-postgresql 19 | confluent-hub install --no-prompt debezium/debezium-connector-postgresql:2.4.2 && \ 20 | # https://www.confluent.io/hub/mongodb/kafka-connect-mongodb 21 | confluent-hub install --no-prompt mongodb/kafka-connect-mongodb:1.11.2 && \ 22 | # https://www.confluent.io/hub/debezium/debezium-connector-mongodb 23 | confluent-hub install --no-prompt debezium/debezium-connector-mongodb:2.4.2 && \ 24 | # https://www.confluent.io/hub/jcustenborder/kafka-connect-transform-common 25 | confluent-hub install --no-prompt jcustenborder/kafka-connect-transform-common:0.1.0.58 26 | 27 | -------------------------------------------------------------------------------- /kafka/kafka-connect/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | cd `dirname $0` 4 | 5 | REGISTRY=registry.inventec/infra 6 | TAG=7.5.3 7 | PROXY=http://10.190.81.209:3389/ 8 | 9 | docker build --rm -f Dockerfile \ 10 | -t ${REGISTRY}/confluentinc/cp-kafka-connect:${TAG} \ 11 | --build-arg TAG=${TAG} \ 12 | --build-arg http_proxy=${PROXY} \ 13 | --build-arg https_proxy=${PROXY} \ 14 | . 15 | docker push ${REGISTRY}/confluentinc/cp-kafka-connect:${TAG} 16 | -------------------------------------------------------------------------------- /keepalived: -------------------------------------------------------------------------------- 1 | postgres/deploy.dev/keepalived -------------------------------------------------------------------------------- /kettle/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.inventec/proxy/maven:3.9-eclipse-temurin-11 AS builder 2 | 3 | # 安装依赖工具 4 | RUN apt-get update && apt-get install -y \ 5 | git \ 6 | zip \ 7 | wget \ 8 | && rm -rf /var/lib/apt/lists/* 9 | 10 | COPY maven/settings-docker.xml /usr/share/maven/ref/ 11 | 12 | WORKDIR /workspace 13 | 14 | ARG KETTLE_BRANCH=9.5 15 | 16 | # build maven-parent-poms 17 | RUN git clone --depth 1 --branch ${KETTLE_BRANCH} https://github.com/pentaho/maven-parent-poms.git && \ 18 | cd maven-parent-poms && \ 19 | mvn clean install -DskipTests 20 | 21 | # fix: 额外的依赖 22 | # build https://github.com/pentaho/metastore.git 23 | RUN git clone --depth 1 --branch ${KETTLE_BRANCH} https://github.com/pentaho/metastore.git && \ 24 | cd metastore && \ 25 | mvn clean install -DskipTests 26 | 27 | # fix: 额外的依赖,真不知道 kettle 怎么 build 出来的 28 | # ??? 29 | 30 | # build kettle 31 | RUN git clone --depth 1 --branch ${KETTLE_BRANCH} https://github.com/pentaho/pentaho-kettle.git && \ 32 | cd pentaho-kettle && \ 33 | mvn clean package -DskipTests 34 | 35 | # # 阶段 2:构建最小运行时镜像 36 | # FROM registry.inventec/proxy/openjdk:25-ea-11-jdk-slim AS runtime 37 | 38 | # WORKDIR /opt 39 | 40 | # COPY --from=builder /workspace/pentaho-kettle/assemblies/client/target/pdi-ce-*-SNAPSHOT.zip /opt/pdi-ce.zip 41 | 42 | # RUN apt-get update && apt-get install -y unzip && unzip /opt/pdi-ce.zip -d /opt && rm /opt/pdi-ce.zip 43 | 44 | # WORKDIR /opt/pdi/data-integration 45 | 46 | # # 设置默认执行 kitchen(可更换为 spoon.sh 或 pan.sh) 47 | # CMD ["./kitchen.sh"] 48 | -------------------------------------------------------------------------------- /kettle/README.md: -------------------------------------------------------------------------------- 1 | # 坑 2 | 3 | ## Build 4 | 5 | ### v9.5 BUILD FAILURE 6 | 7 | ```bash 8 | [INFO] ------------------------------------------------------------------------ 9 | [INFO] BUILD FAILURE 10 | [INFO] ------------------------------------------------------------------------ 11 | [INFO] Total time: 01:35 min 12 | [INFO] Finished at: 2025-06-09T04:55:13Z 13 | [INFO] ------------------------------------------------------------------------ 14 | [ERROR] Failed to execute goal on project kettle-core: Could not resolve dependencies for project pentaho-kettle:kettle-core:jar:9.5.0.0-SNAPSHOT 15 | [ERROR] dependency: org.pentaho:pentaho-encryption-support:jar:9.5.0.0-SNAPSHOT (compile) 16 | [ERROR] Could not find artifact org.pentaho:pentaho-encryption-support:jar:9.5.0.0-SNAPSHOT in pentaho-public (https://repo.orl.eng.hitachivantara.com/artifactory/pnt-mvn/) 17 | [ERROR] 18 | [ERROR] -> [Help 1] 19 | [ERROR] 20 | [ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch. 21 | [ERROR] Re-run Maven using the -X switch to enable full debug logging. 22 | [ERROR] 23 | [ERROR] For more information about the errors and possible solutions, please read the following articles: 24 | [ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/DependencyResolutionException 25 | [ERROR] 26 | [ERROR] After correcting the problems, you can resume the build with the command 27 | [ERROR] mvn -rf :kettle-core 28 | The command '/bin/sh -c git clone --depth 1 --branch ${KETTLE_BRANCH} https://github.com/pentaho/pentaho-kettle.git && cd pentaho-kettle && mvn clean package -DskipTests' returned a non-zero code: 1 29 | ``` 30 | 31 | ### v9.4 BUILD FAILURE 32 | 33 | 依赖包还是下载不全,个别包没有上推到公共仓库 34 | -------------------------------------------------------------------------------- /kettle/build.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | cd `dirname $0` 3 | 4 | PROXY=http://10.190.81.209:3389/ 5 | KETTLE_BRANCH=9.5 6 | 7 | docker build --rm -f Dockerfile \ 8 | -t registry.inventec/infra/kettle:${KETTLE_BRANCH} \ 9 | --build-arg http_proxy=${PROXY} \ 10 | --build-arg https_proxy=${PROXY} \ 11 | --build-arg no_proxy=localhost,127.0.0.1,nexus.itc.inventec.net \ 12 | --build-arg KETTLE_BRANCH=${KETTLE_BRANCH} \ 13 | . 14 | 15 | -------------------------------------------------------------------------------- /kettle/maven/settings-docker.xml: -------------------------------------------------------------------------------- 1 | 5 | /usr/share/maven/ref/repository 6 | 7 | 8 | 9 | nexus-public 10 | nexus repository 11 | https://nexus.itc.inventec.net/repository/maven-public/ 12 | central 13 | 14 | 15 | -------------------------------------------------------------------------------- /kong/config/my-server.kong.conf: -------------------------------------------------------------------------------- 1 | # custom server 2 | upstream gpcc { 3 | server 10.3.205.90:28080; 4 | } 5 | 6 | server { 7 | listen 8000; 8 | server_name infra-gpcc.itc.inventec; 9 | access_log /dev/stdout; 10 | error_log /dev/stderr notice; 11 | 12 | location / { 13 | proxy_pass http://gpcc; 14 | proxy_http_version 1.1; 15 | proxy_set_header Host $host; 16 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 17 | proxy_set_header X-Real-IP $remote_addr; 18 | } 19 | 20 | location /gpcc_v2/websocket { 21 | proxy_pass http://gpcc; 22 | proxy_http_version 1.1; 23 | # Ref: http://nginx.org/en/docs/varindex.html 24 | proxy_set_header Host $host; 25 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 26 | proxy_set_header X-Real-IP $remote_addr; 27 | proxy_set_header Origin ""; 28 | # http://nginx.org/en/docs/http/websocket.html 29 | proxy_set_header Upgrade $http_upgrade; 30 | proxy_set_header Connection "upgrade"; 31 | } 32 | 33 | } -------------------------------------------------------------------------------- /kong/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2.3' 2 | services: 3 | kong: 4 | image: registry.inventec/hub/kong:1.5.1 5 | container_name: kong 6 | restart: always 7 | logging: 8 | driver: 'json-file' 9 | options: 10 | max-size: 10m 11 | ports: 12 | - 80:8000 13 | - 8443:8443 14 | - 8001:8001 15 | - 8444:8444 16 | volumes: 17 | # 当且仅当修改Nginx全局配置的时候才需要修改该文件 18 | - ./config/nginx_kong.lua:/usr/local/share/lua/5.1/kong/templates/nginx_kong.lua 19 | # 修改http/server块中配置,可以修改此配置,但不包括location块中配置 20 | # - ./config/kong.conf:/etc/kong/kong.conf 21 | # 声明需要反向代理的服务,注册一下需要挂载到网关的服务 22 | # - ./config/declare-config.yaml:/usr/local/kong/declarative/kong.yml 23 | # 如果反向代理的配置含有特殊的处理,可以采用自定义的方式 24 | - ./config/my-server.kong.conf:/etc/kong/my-server.kong.conf 25 | environment: 26 | - KONG_DATABASE=off 27 | - KONG_PROXY_ERROR_LOG=/dev/stderr 28 | - KONG_ADMIN_ERROR_LOG=/dev/stderr 29 | - KONG_PROXY_ACCESS_LOG=/dev/null 30 | - KONG_ADMIN_ACCESS_LOG=/dev/null 31 | - KONG_ADMIN_LISTEN=0.0.0.0:8001, 0.0.0.0:8444 ssl 32 | # - KONG_DECLARATIVE_CONFIG=/usr/local/kong/declarative/kong.yml 33 | - KONG_NGINX_HTTP_INCLUDE=/etc/kong/my-server.kong.conf 34 | 35 | # networks: 36 | # default: 37 | # external: 38 | # name: bdc 39 | 40 | # 修改配置之后,可以采用如下方式进行配置热更新 41 | # docker exec kong kong reload -------------------------------------------------------------------------------- /minio/downloader/requirements.txt: -------------------------------------------------------------------------------- 1 | minio==7.2.15 2 | psycopg2-binary -------------------------------------------------------------------------------- /mongo/deploy.dev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 推送启动脚本至部署目录 4 | INVENTORY_FILE=../inventory.dev 5 | 6 | # 传出配置文件 7 | ansible -i $INVENTORY_FILE mongo -m copy -a "src=deploy.dev/ dest=/opt/mongo" 8 | # ansible -i $INVENTORY_FILE mongo -m file -a "path=/data/ssd1/mongo state=absent" 9 | ansible -i $INVENTORY_FILE mongo -m file -a "dest=/data/ssd1/mongo/configdb owner=999 group=999 mode=700 state=directory" -b 10 | ansible -i $INVENTORY_FILE mongo -m file -a "dest=/data/ssd1/mongo/db owner=999 group=999 mode=700 state=directory" -b 11 | ansible -i $INVENTORY_FILE mongo -m file -a "dest=/data/ssd1/mongo/logs owner=999 group=999 mode=700 state=directory" -b 12 | 13 | # 执行启动命令 14 | ansible -i $INVENTORY_FILE mongo -m shell -a "cd /opt/mongo && docker-compose up -d" 15 | -------------------------------------------------------------------------------- /mongo/deploy.dev/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2.3' 2 | services: 3 | 4 | mongo: 5 | image: mongo:4.0.14 6 | container_name: mongo 7 | hostname: mongo 8 | # network_mode: host 9 | ports: 10 | - "27017:27017" 11 | volumes: 12 | - /data/ssd1/mongo/configdb:/data/configdb 13 | - /data/ssd1/mongo/db:/data/db 14 | - /data/ssd1/mongo/logs:/data/logs 15 | - /etc/localtime:/etc/localtime:ro 16 | - ./setup:/docker-entrypoint-initdb.d 17 | command: --port 27017 --dbpath /data/db --logpath /data/logs/mongod.log --logappend --auth 18 | restart: always 19 | cpu_count: 2 20 | mem_limit: 4g 21 | -------------------------------------------------------------------------------- /mongo/deploy.dev/setup/mongo-init.js: -------------------------------------------------------------------------------- 1 | db = db.getSiblingDB('admin'); 2 | db.createUser( 3 | { 4 | user: "mongoadmin", 5 | pwd: "mongoadmin", 6 | roles: [ 7 | { role: "root", db: "admin" } 8 | ] 9 | } 10 | ); 11 | 12 | // Enable MongoDB's free cloud-based monitoring service, which will then receive and display 13 | // metrics about your deployment (disk utilization, CPU, operation statistics, etc). 14 | 15 | // The monitoring data will be available on a MongoDB website with a unique URL accessible to you 16 | // and anyone you share the URL with. MongoDB may use this information to make product 17 | // improvements and to suggest MongoDB products and deployment options to you. 18 | 19 | // To enable free monitoring, run the following command: db.enableFreeMonitoring() 20 | // To permanently disable this reminder, run the following command: db.disableFreeMonitoring() 21 | db.disableFreeMonitoring() 22 | 23 | // Sort operation used more than the maximum 33554432 bytes of RAM. Add an index, or specify a smaller limit. 24 | db.adminCommand({setParameter:1, internalQueryExecMaxBlockingSortBytes:335544320}) -------------------------------------------------------------------------------- /mssql/bin/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | sqlserver: 3 | image: mcr.microsoft.com/mssql/server:2019-latest 4 | container_name: mssql 5 | ports: 6 | - "1433:1433" 7 | environment: 8 | # https://learn.microsoft.com/en-us/sql/linux/sql-server-linux-configure-environment-variables?view=sql-server-ver16 9 | ACCEPT_EULA: "Y" 10 | MSSQL_SA_PASSWORD: "YourStrong!Passw0rd" 11 | MSSQL_PID: "Developer" 12 | MSSQL_AGENT_ENABLED: "true" 13 | volumes: 14 | - sqlserver_data:/var/opt/mssql 15 | - ./backups:/var/opt/mssql/backups 16 | 17 | volumes: 18 | sqlserver_data: 19 | -------------------------------------------------------------------------------- /mssql/cron/backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 参数设置 4 | INSTANCE="10.190.81.165" 5 | USERNAME="sa" 6 | PASSWORD="YourStrong!Passw0rd" 7 | BACKUP_DIR="/tmp/mssql/backup" 8 | BUCKET_NAME="infra-backup" 9 | MINIO_ENDPOINT="https://infra-oss.itc.inventec.net" 10 | MAX_JOBS=4 11 | 12 | # 创建备份文件夹 13 | mkdir -p $BACKUP_DIR 14 | 15 | # 获取所有数据库列表 16 | DATABASES=$(sqlcmd -S $INSTANCE -U $USERNAME -P $PASSWORD -Q "SELECT name FROM sys.databases WHERE name NOT IN ('master', 'tempdb', 'model', 'msdb')" -h -1) 17 | 18 | # 备份和上传函数 19 | backup_and_upload() { 20 | DATABASE_NAME=$1 21 | BACKUP_PATH="$BACKUP_DIR/$DATABASE_NAME.bak" 22 | 23 | # 备份数据库 24 | echo "开始备份数据库:$DATABASE_NAME" 25 | sqlcmd -S $INSTANCE -U $USERNAME -P $PASSWORD -Q "BACKUP DATABASE [$DATABASE_NAME] TO DISK = N'$BACKUP_PATH' WITH INIT, COMPRESSION;" 26 | 27 | # 上传到 MinIO 28 | echo "上传备份文件到 MinIO:$BACKUP_PATH" 29 | aws s3 cp $BACKUP_PATH s3://$BUCKET_NAME/$DATABASE_NAME.bak --endpoint-url $MINIO_ENDPOINT 30 | 31 | echo "数据库 $DATABASE_NAME 备份并上传完成。" 32 | } 33 | 34 | # 使用 parallel 并行执行备份 35 | export -f backup_and_upload 36 | export INSTANCE USERNAME PASSWORD BACKUP_DIR BUCKET_NAME MINIO_ENDPOINT 37 | echo "$DATABASES" | parallel -j $MAX_JOBS backup_and_upload 38 | -------------------------------------------------------------------------------- /mssql/replication/.gitignore: -------------------------------------------------------------------------------- 1 | *.yml -------------------------------------------------------------------------------- /mssql/replication/requirements.txt: -------------------------------------------------------------------------------- 1 | pyyaml -------------------------------------------------------------------------------- /mysql/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2.3' 2 | services: 3 | mysql: 4 | image: registry.inventec/hub/mysql:5.7.28 5 | container_name: mysql 6 | hostname: mysql 7 | network_mode: host 8 | # ports: 9 | # - "3306:3306" 10 | volumes: 11 | - /etc/localtime:/etc/localtime 12 | - /data/ssd/mysql/data:/var/lib/mysql 13 | environment: 14 | MYSQL_RANDOM_ROOT_PASSWORD: "yes" 15 | LANG: "en_US.UTF-8" 16 | command: 17 | - --character-set-server=utf8mb4 18 | - --collation-server=utf8mb4_general_ci 19 | - --skip-character-set-client-handshake 20 | - --innodb-buffer-pool-size=6G 21 | - --innodb-buffer-pool-instances=6 22 | - --max-connections=200 23 | - --explicit_defaults_for_timestamp 24 | - --sql-mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION 25 | restart: always 26 | cpu_count: 4 27 | mem_limit: 8g 28 | -------------------------------------------------------------------------------- /nginx/.gitignore: -------------------------------------------------------------------------------- 1 | certbot-auto -------------------------------------------------------------------------------- /nginx/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:stable 2 | COPY ./config/nginx.conf /etc/nginx/ 3 | COPY ./html/502.html /usr/share/nginx/html/ 4 | EXPOSE 80 443 5 | CMD ["nginx", "-g", "daemon off;"] -------------------------------------------------------------------------------- /nginx/check_ssl_expiration_time.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | for line in $(cat domain.txt) 4 | do 5 | domain=$(echo ${line} | awk -F':' '{print $1}') 6 | ip_pool=$(echo ${line} | awk -F '[a-z]:' '{print $2}' | sed 's/\,/ /g') 7 | for ip in ${ip_pool} 8 | do 9 | echo -e "\e[33m---------------start to check---------------\e[0m" 10 | echo -e "ip:${ip}\ndomain:${domain}" 11 | 12 | text=$(echo | openssl s_client -servername ${domain} -connect ${ip}:443 2>/dev/null | openssl x509 -noout -dates ) 13 | # 判断命令是否执行成功,执行成功的话 text 变量里面是有内容的 14 | if [[ ${text} ]] 15 | then 16 | end_date=$(echo "$text" | grep -i "notAfter" | awk -F '=' '{print $2}') # 证书过期时间 17 | end_timestamp=$(date -d "$end_date" +%s) # 转换成时间戳 18 | 19 | current_timestamp=$(date +%s) # 当前时间戳 20 | 21 | # 如果证书过期时间减去当前时间的天数小于七天的话,则提示需要准备更换证书了 22 | remain_date=$(( (${end_timestamp} - ${current_timestamp}) / 86400 )) 23 | if [[ ${remain_date} -lt 7 && ${remain_date} -ge 0 ]] 24 | then 25 | echo -e "\e[31m剩余时间小于七天!请及时更换证书!\e[0m" 26 | echo -e "\e[31mip: ${ip}, ${domain}\e[0m" 27 | elif [[ ${remain_date} -lt 0 ]] 28 | then 29 | echo -e "\e[31m证书已过期!请及时更换证书!\e[0m" 30 | else 31 | echo -e "\e[32m剩余天数为:${remain_date}\e[0m" 32 | fi 33 | else 34 | echo -e "\e[31mError!${ip}\e[0m" 35 | echo -e "\e[31m${domain}\e[0m" 36 | fi 37 | done 38 | done 39 | -------------------------------------------------------------------------------- /node/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | *.xlsx 3 | package-lock.json 4 | .DS_Store -------------------------------------------------------------------------------- /node/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:7 2 | 3 | ARG NODE_DIR=/opt 4 | WORKDIR $NODE_DIR 5 | 6 | RUN cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \ 7 | && echo 'Asia/Shanghai' > /etc/timezone \ 8 | && yum install -y net-tools wget \ 9 | && yum clean all \ 10 | && wget https://nodejs.org/dist/v8.11.3/node-v8.11.3-linux-x64.tar.xz \ 11 | && xz -d node-v8.11.3-linux-x64.tar.xz \ 12 | && tar -xvf node-v8.11.3-linux-x64.tar \ 13 | && ln -s node-v8.11.3-linux-x64 node \ 14 | && rm node-v8.11.3-linux-x64.tar 15 | 16 | # 设置环境变量 17 | ENV LANG en_US.UTF-8 18 | ENV PATH /bin:/usr/sbin:/usr/local/sbin:/usr/local/share/bin:/opt/node/bin:$PATH 19 | ENV NODE_ENV production 20 | 21 | ARG WORK_DIR=/opt/genesis 22 | RUN mkdir -p $WORK_DIR 23 | WORKDIR $WORK_DIR 24 | 25 | # 使用taobao npm安装依赖 26 | COPY package.json . 27 | RUN npm config set registry https://registry.npm.taobao.org \ 28 | && npm install --production \ 29 | && npm cache clean --force 30 | 31 | # 拷贝源码至容器中,但会自动忽略.dockerignore中指明的文件以及文件夹 32 | COPY ./dist . 33 | RUN chmod +x run.sh 34 | 35 | # 声明镜像内服务所监听的端口,但是此处只是声明,并不会自动完成端口映射 36 | EXPOSE 8888 37 | 38 | ENTRYPOINT [ "./run.sh" ] 39 | -------------------------------------------------------------------------------- /node/build.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | NAME=cobol/nodejs:8.11 3 | 4 | # 添加TS编译 5 | # gulp build 6 | 7 | docker build --rm -f Dockerfile -t $NAME . 8 | -------------------------------------------------------------------------------- /node/dist/app.js: -------------------------------------------------------------------------------- 1 | var http = require("http"); 2 | 3 | http 4 | .createServer(function(request, response) { 5 | // 发送 HTTP 头部 6 | // HTTP 状态值: 200 : OK 7 | // 内容类型: text/plain 8 | response.writeHead(200, { "Content-Type": "text/plain" }); 9 | 10 | // 发送响应数据 "Hello World" 11 | response.end("Hello World\n"); 12 | }) 13 | .listen(8888); 14 | 15 | // 终端打印如下信息 16 | console.log("Server running ..."); 17 | -------------------------------------------------------------------------------- /node/dist/run.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | ./node_modules/.bin/pm2 start app.js --no-daemon -------------------------------------------------------------------------------- /node/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nodejs", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "", 10 | "license": "ISC", 11 | "dependencies": { 12 | "xlsx": "https://cdn.sheetjs.com/xlsx-0.20.1/xlsx-0.20.1.tgz" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /node/test/export-excel.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 37 | -------------------------------------------------------------------------------- /node/test/export-excel.js: -------------------------------------------------------------------------------- 1 | const XLSX = require("xlsx"); 2 | 3 | (async () => { 4 | const originWorkbook = XLSX.readFile('demo.xlsx'); 5 | 6 | /* get first worksheet */ 7 | const sheetName = originWorkbook.SheetNames[0]; 8 | const worksheet = originWorkbook.Sheets[sheetName]; 9 | 10 | /* generate worksheet and workbook */ 11 | const newWorkbook = XLSX.utils.book_new(); 12 | 13 | XLSX.utils.book_append_sheet(newWorkbook, worksheet, sheetName); 14 | 15 | /* fix headers */ 16 | // XLSX.utils.sheet_add_aoa(worksheet, [["Name", "Birthday"]], { origin: "A1" }); 17 | 18 | /* calculate column width */ 19 | // const max_width = rows.reduce((w, r) => Math.max(w, r.name.length), 10); 20 | // worksheet["!cols"] = [{ wch: max_width }]; 21 | 22 | /* create an XLSX file and try to save to Presidents.xlsx */ 23 | XLSX.writeFile(newWorkbook, "demo-resave1.xlsx", { compression: false }); 24 | XLSX.writeFile(newWorkbook, "demo-resave2.xlsx", { compression: true }); 25 | })(); -------------------------------------------------------------------------------- /node/test/fn_array_map.js: -------------------------------------------------------------------------------- 1 | function randomArray(len) { return Array(len).fill(0).map(() => Math.floor(Math.random() * 2e9) - 1e9) } 2 | function empty(ar) { while (ar.length) ar.pop() } 3 | function map1(arr, fn) { const newArr = []; for (let i = 0; i < arr.length; ++i) { newArr.push(fn(arr[i], i)) } return newArr; } 4 | function map2(arr, fn) { const newArr = []; for (let i = 0; i < arr.length; ++i) { newArr[i] = fn(arr[i], i); } return newArr; } 5 | function map3(arr, fn) { const newArr = Array(arr.length); for (let i = 0; i < arr.length; ++i) { newArr[i] = fn(arr[i], i); } return newArr; } 6 | function map4(arr, fn) { const newArr = new Int32Array(arr.length); for (let i = 0; i < arr.length; ++i) { newArr[i] = fn(arr[i], i); } return newArr; } 7 | function map5(arr, fn) { for (let i = 0; i < arr.length; ++i) { arr[i] = fn(arr[i], i); } return arr; } 8 | function map6(arr, fn) { const newArr = Array(arr.length); for (const i in arr) { newArr[i] = fn(arr[i], i); } return arr; } 9 | function map7(arr, fn) { return arr.map(fn) } 10 | var arr = randomArray(5e6); var start = Date.now(); map1(arr, x => x + 1); console.log('took', Date.now() - start); empty(arr) 11 | var arr = randomArray(5e6); var start = Date.now(); map2(arr, x => x + 1); console.log('took', Date.now() - start); empty(arr) 12 | var arr = randomArray(5e6); var start = Date.now(); map3(arr, x => x + 1); console.log('took', Date.now() - start); empty(arr) 13 | var arr = randomArray(5e6); var start = Date.now(); map4(arr, x => x + 1); console.log('took', Date.now() - start); empty(arr) 14 | var arr = randomArray(5e6); var start = Date.now(); map5(arr, x => x + 1); console.log('took', Date.now() - start); empty(arr) 15 | var arr = randomArray(2e6); var start = Date.now(); map6(arr, x => x + 1); console.log('took', Date.now() - start); empty(arr) 16 | var arr = randomArray(5e6); var start = Date.now(); map7(arr, x => x + 1); console.log('took', Date.now() - start); empty(arr) 17 | 18 | // 作者:力扣官方题解 19 | // 链接:https://leetcode.cn/problems/apply-transform-over-each-element-in-array/solutions/2496242/zhuan-huan-shu-zu-zhong-de-mei-ge-yuan-s-o2k7/ 20 | // 来源:力扣(LeetCode) 21 | // 著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。 22 | 23 | /* 24 | took 143 25 | took 141 26 | took 34 27 | took 22 28 | took 10 -- 原地更新,空间复杂度最低 29 | took 359 30 | took 8 31 | */ 32 | -------------------------------------------------------------------------------- /parquet/diff-parquet/.gitignore: -------------------------------------------------------------------------------- 1 | cache 2 | meta.json 3 | .env -------------------------------------------------------------------------------- /parquet/diff-parquet/convert_tsms.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | from datetime import datetime 4 | 5 | import pyarrow as pa 6 | import pyarrow.compute as pc 7 | import pyarrow.parquet as pq 8 | import pytz 9 | from dotenv import load_dotenv 10 | 11 | load_dotenv() 12 | 13 | LOCAL_TIMEZONE = pytz.timezone(os.getenv("TIMEZONE", "Asia/Shanghai")) 14 | 15 | def parse_tsms(ts_value): 16 | if isinstance(ts_value, str): 17 | return LOCAL_TIMEZONE.localize(datetime.strptime(ts_value, "%Y-%m-%d %H:%M:%S")) 18 | elif isinstance(ts_value, datetime): 19 | if ts_value.tzinfo is None: 20 | return LOCAL_TIMEZONE.localize(ts_value) 21 | return ts_value.astimezone(LOCAL_TIMEZONE) 22 | else: 23 | raise ValueError(f"不支持的 tsms 格式: {ts_value}") 24 | 25 | def convert_tsms(input_file, output_file): 26 | try: 27 | # 读取 Parquet 文件 28 | table = pq.read_table(input_file) 29 | 30 | tsms_column = table['tsms'] 31 | 32 | # 将 tsms 字段转换为带时区的时间 33 | tsms_converted = [parse_tsms(tsms_str) for tsms_str in tsms_column.to_pylist()] 34 | 35 | # 将转换后的时间戳列转换为 pyarrow timestamp 类型 36 | tsms_timestamp = pa.array(tsms_converted, type=pa.timestamp('ms')) 37 | 38 | # 替换原始的 tsms 列为新的转换后的列 39 | new_table = table.set_column(table.schema.get_field_index('tsms'), 'tsms', tsms_timestamp) 40 | 41 | # 保存修改后的 Parquet 文件到输出目录 42 | pq.write_table(new_table, output_file) 43 | print(f"成功转换并保存:{input_file} -> {output_file}") 44 | 45 | except Exception as e: 46 | print(f"处理文件 {input_file} 时发生错误: {e}") 47 | 48 | def main(): 49 | # 定义输入目录和输出目录 50 | input_directory = './cache/BDC/IMX_PG/bdc/dw/fact_cpu_sno_parts/cdc/' 51 | output_directory = './cache/BDC/IMX_PG/bdc/dw/fact_cpu_sno_parts/cdc2/' 52 | 53 | # 获取所有 Parquet 文件的路径 54 | parquet_files = glob.glob(os.path.join(input_directory, '**', '*.parquet'), recursive=True) 55 | 56 | # 遍历每个 Parquet 文件,进行转换 57 | for input_file in parquet_files: 58 | # 生成输出文件的路径 59 | output_file = os.path.join(output_directory, os.path.relpath(input_file, input_directory)) 60 | os.makedirs(os.path.dirname(output_file), exist_ok=True) 61 | 62 | # 转换并保存 63 | convert_tsms(input_file, output_file) 64 | 65 | if __name__ == '__main__': 66 | main() 67 | -------------------------------------------------------------------------------- /parquet/diff-parquet/requirements.txt: -------------------------------------------------------------------------------- 1 | minio 2 | pyarrow 3 | pytz -------------------------------------------------------------------------------- /pgadmin4/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG VERSION=8.14 2 | FROM registry.inventec/proxy/dpage/pgadmin4:${VERSION} 3 | LABEL maintainer="Zhang.Xing-Long@inventec.com" 4 | 5 | COPY patch/pgadmin8-support-gpdb6.patch /tmp 6 | 7 | USER root 8 | 9 | RUN apk add patch \ 10 | && cd /pgadmin4 \ 11 | && patch -p2 < /tmp/pgadmin8-support-gpdb6.patch 12 | 13 | USER pgadmin 14 | 15 | -------------------------------------------------------------------------------- /pgadmin4/build.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | cd `dirname $0` 3 | 4 | PROXY=http://10.190.81.209:3389/ 5 | VERSION=8.14 6 | 7 | # git clone https://github.com/pgadmin-org/pgadmin4.git 8 | # cd pgadmin4 9 | # git checkout REL-8_3 10 | # git apply pgadmin8-support-gpdb6.patch 11 | # make docker 12 | 13 | docker build --rm -f Dockerfile \ 14 | -t registry.inventec/infra/dpage/pgadmin4:${VERSION} \ 15 | --build-arg http_proxy=${PROXY} \ 16 | --build-arg https_proxy=${PROXY} \ 17 | --build-arg VERSION=${VERSION} \ 18 | . 19 | 20 | docker push registry.inventec/infra/dpage/pgadmin4:${VERSION} -------------------------------------------------------------------------------- /pgadmin4/docker-compose-ssl.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | 3 | services: 4 | pgadmin8: 5 | image: registry.inventec/infra/dpage/pgadmin4:8.14 6 | container_name: pgadmin8 7 | ports: 8 | - "443:443" 9 | volumes: 10 | - /data/pgadmin4/pgadmin8:/var/lib/pgadmin 11 | - /data/pgadmin4/certs:/certs 12 | environment: 13 | PGADMIN_ENABLE_TLS: "True" 14 | PGADMIN_DEFAULT_EMAIL: "cobolbaby@qq.com" 15 | PGADMIN_DEFAULT_PASSWORD: "123456" 16 | GUNICORN_THREADS: "50" 17 | PGADMIN_CONFIG_MAX_LOGIN_ATTEMPTS: "10" 18 | PGADMIN_CONFIG_UPGRADE_CHECK_ENABLED: "False" 19 | PGADMIN_CONFIG_WTF_CSRF_ENABLED: "False" 20 | restart: always 21 | -------------------------------------------------------------------------------- /pgadmin4/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | 3 | services: 4 | pgadmin8: 5 | image: registry.inventec/infra/dpage/pgadmin4:8.14 6 | container_name: pgadmin8 7 | ports: 8 | - "80:80" 9 | volumes: 10 | - /data/pgadmin4/pgadmin8:/var/lib/pgadmin 11 | # - /data/pgadmin4/certs:/certs 12 | environment: 13 | # PGADMIN_ENABLE_TLS: "True" 14 | PGADMIN_DEFAULT_EMAIL: "cobolbaby@qq.com" 15 | PGADMIN_DEFAULT_PASSWORD: "123456" 16 | GUNICORN_THREADS: "50" 17 | PGADMIN_CONFIG_MAX_LOGIN_ATTEMPTS: "10" 18 | PGADMIN_CONFIG_UPGRADE_CHECK_ENABLED: "False" 19 | PGADMIN_CONFIG_WTF_CSRF_ENABLED: "False" 20 | restart: always 21 | -------------------------------------------------------------------------------- /pgadmin4/start.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | # https://www.pgadmin.org/docs/pgadmin4/latest/container_deployment.html 4 | # /pgadmin4 $ id 5 | # uid=5050(pgadmin) gid=5050(pgadmin) 6 | # sudo chown -R 5050:5050 /data/pgadmin4/pgadmin8 7 | 8 | # docker compose -f docker-compose.yml up -d 9 | 10 | # Chrome 新版针对非 HTTPS 访问的情况,剪切板功能受限,推荐使用 TLS 证书,但要注意证书文件的命名 11 | docker compose -f docker-compose-ssl.yml up -d 12 | 13 | << comment 14 | 15 | # 如果因为忘记了超管用户的密码而造成账户被锁,请参考 16 | # https://www.pgadmin.org/docs/pgadmin4/development/restore_locked_user.html 17 | 18 | docker run -it --rm \ 19 | -v /data/pgadmin4/pgadmin8:/var/lib/pgadmin \ 20 | --entrypoint=/bin/bash \ 21 | registry.inventec/proxy/nouchka/sqlite3 22 | 23 | comment -------------------------------------------------------------------------------- /pgpool/.gitignore: -------------------------------------------------------------------------------- 1 | pgpool2_on_k8s -------------------------------------------------------------------------------- /pgpool/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | cd `dirname $0` 4 | 5 | PGPOOL_VER=4.3.14 6 | 7 | # git clone https://github.com/pgpool/pgpool2_on_k8s.git 8 | cd pgpool2_on_k8s/pgpool.docker 9 | docker build --rm -f Dockerfile.pgpool \ 10 | -t pgpool/pgpool:${PGPOOL_VER} \ 11 | --build-arg PGPOOL_VER=${PGPOOL_VER} \ 12 | . 13 | 14 | cd - 15 | 16 | cd build 17 | docker build --rm -f Dockerfile \ 18 | -t registry.inventec/infra/pgpool:${PGPOOL_VER} \ 19 | --build-arg PGPOOL_VER=${PGPOOL_VER} \ 20 | . 21 | 22 | docker push registry.inventec/infra/pgpool:${PGPOOL_VER} 23 | -------------------------------------------------------------------------------- /pgpool/build/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG PGPOOL_VER=4.3 2 | FROM pgpool/pgpool:${PGPOOL_VER} 3 | 4 | WORKDIR /opt/confd/bin/ 5 | 6 | USER root 7 | 8 | # wget 下载完之后文件大小为 0,跳过下载 9 | # RUN wget -O /usr/local/bin/confd https://github.com/kelseyhightower/confd/releases/download/v0.16.0/confd-0.16.0-linux-amd64 \ 10 | # && chmod +x /usr/local/bin/confd \ 11 | # && wget -O /usr/local/bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v1.2.5/dumb-init_1.2.5_x86_64 \ 12 | # && chmod +x /usr/local/bin/dumb-init 13 | 14 | ENV PGPOOL_CONF_VOLUME=/config \ 15 | PATH="/opt/confd/bin:$PGPOOL_INSTALL_DIR/bin:$PATH" 16 | 17 | COPY config/pgpool ${PGPOOL_INSTALL_DIR}/etc/ 18 | COPY config/confd /etc/confd 19 | COPY bin /opt/confd/bin 20 | 21 | # 一定要留意 pgpool.conf 的权限问题 22 | RUN chown -R postgres:postgres /etc/confd /opt/confd ${PGPOOL_INSTALL_DIR} \ 23 | && chmod +x /opt/confd/bin/* \ 24 | && chmod -R 644 ${PGPOOL_INSTALL_DIR}/etc/* 25 | 26 | USER postgres 27 | 28 | ENTRYPOINT [ "/opt/confd/bin/entrypoint.sh" ] 29 | -------------------------------------------------------------------------------- /pgpool/build/bin/confd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cobolbaby/dockerize-and-ansible/5baf74e2da7b3a93ca827ca75b072c498d578b20/pgpool/build/bin/confd -------------------------------------------------------------------------------- /pgpool/build/bin/dumb-init: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cobolbaby/dockerize-and-ansible/5baf74e2da7b3a93ca827ca75b072c498d578b20/pgpool/build/bin/dumb-init -------------------------------------------------------------------------------- /pgpool/build/bin/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PATRONI_NAMESPACE=${PATRONI_NAMESPACE:-/service} 4 | # rtrim / 5 | readonly PATRONI_NAMESPACE=${PATRONI_NAMESPACE%/} 6 | readonly PATRONI_SCOPE=${PATRONI_SCOPE:-batman} 7 | readonly CONFD_BACKEND=${CONFD_BACKEND:-etcd} 8 | readonly PGPOOL_HEALTH_CHECK_USER=${PGPOOL_HEALTH_CHECK_USER:-nobody} 9 | readonly PGPOOL_HEALTH_CHECK_PASSWORD=${PGPOOL_HEALTH_CHECK_PASSWORD:-} 10 | readonly PGPOOL_PORT=${PGPOOL_PORT:-5432} 11 | readonly PGPOOL_NUM_INIT_CHILDREN=${PGPOOL_NUM_INIT_CHILDREN:-200} 12 | 13 | # Dynamic configuration 14 | sed -i "s/^#*\s*sr_check_user\s*=.*/sr_check_user = '${PGPOOL_HEALTH_CHECK_USER}'/" /etc/confd/templates/pgpool.tmpl 15 | sed -i "s/^#*\s*health_check_user\s*=.*/health_check_user = '${PGPOOL_HEALTH_CHECK_USER}'/" /etc/confd/templates/pgpool.tmpl 16 | sed -i "s/^#*\s*port\s*=.*/port = ${PGPOOL_PORT}/" /etc/confd/templates/pgpool.tmpl 17 | sed -i "s/^#*\s*num_init_children\s*=.*/num_init_children = ${PGPOOL_NUM_INIT_CHILDREN}/" /etc/confd/templates/pgpool.tmpl 18 | echo "${PGPOOL_HEALTH_CHECK_USER}:TEXT${PGPOOL_HEALTH_CHECK_PASSWORD}" > ${PGPOOL_INSTALL_DIR}/etc/pool_passwd 19 | 20 | # Start Confd 21 | CONFD="confd -prefix=${PATRONI_NAMESPACE}/${PATRONI_SCOPE} -interval=10" 22 | 23 | $CONFD -backend $CONFD_BACKEND -node $(echo $ETCDCTL_ENDPOINTS | sed 's/,/ -node /g') -onetime -sync-only 24 | 25 | # Start Pgpool-II 26 | ${PGPOOL_INSTALL_DIR}/bin/pgpool \ 27 | -f ${PGPOOL_INSTALL_DIR}/etc/pgpool.conf \ 28 | -F ${PGPOOL_INSTALL_DIR}/etc/pcp.conf 29 | 30 | $CONFD -backend $CONFD_BACKEND -node $(echo $ETCDCTL_ENDPOINTS | sed 's/,/ -node /g') 31 | -------------------------------------------------------------------------------- /pgpool/build/config/confd/conf.d/pgpool.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | #prefix = "/service/batman" 3 | owner = "postgres" 4 | mode = "0644" 5 | src = "pgpool.tmpl" 6 | dest = "/opt/pgpool-II/etc/pgpool.conf" 7 | 8 | reload_cmd = "pgpool -f /opt/pgpool-II/etc/pgpool.conf -F /opt/pgpool-II/etc/pcp.conf -m fast stop; pgpool -f /opt/pgpool-II/etc/pgpool.conf -F /opt/pgpool-II/etc/pcp.conf -D" 9 | 10 | keys = [ 11 | "/members/", 12 | ] 13 | -------------------------------------------------------------------------------- /pgpool/build/config/pgpool/pcp.conf: -------------------------------------------------------------------------------- 1 | # PCP Client Authentication Configuration File 2 | # ============================================ 3 | # 4 | # This file contains user ID and his password for pgpool 5 | # communication manager authentication. 6 | # 7 | # Note that users defined here do not need to be PostgreSQL 8 | # users. These users are authorized ONLY for pgpool 9 | # communication manager. 10 | # 11 | # File Format 12 | # =========== 13 | # 14 | # List one UserID and password on a single line. They must 15 | # be concatenated together using ':' (colon) between them. 16 | # No spaces or tabs are allowed anywhere in the line. 17 | # 18 | # Example: 19 | # postgres:e8a48653851e28c69d0506508fb27fc5 20 | # 21 | # Be aware that there will be no spaces or tabs at the 22 | # beginning of the line! although the above example looks 23 | # like so. 24 | # 25 | # Lines beginning with '#' (pound) are comments and will 26 | # be ignored. Again, no spaces or tabs allowed before '#'. 27 | 28 | # USERID:MD5PASSWD 29 | pgpool:ba777e4c2f15c11ea8ac3be7e0440aa0 30 | -------------------------------------------------------------------------------- /pgpool/build/config/pgpool/pool_passwd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cobolbaby/dockerize-and-ansible/5baf74e2da7b3a93ca827ca75b072c498d578b20/pgpool/build/config/pgpool/pool_passwd -------------------------------------------------------------------------------- /pgpool/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2.3' 2 | 3 | services: 4 | pgpool: 5 | image: registry.inventec/infra/pgpool:4.3.1 6 | container_name: pgpool 7 | hostname: pgpool 8 | ports: 9 | - "5494:5432" 10 | volumes: 11 | - /etc/localtime:/etc/localtime:ro 12 | environment: 13 | PATRONI_NAMESPACE: 14 | PATRONI_SCOPE: 15 | ETCDCTL_ENDPOINTS: 16 | restart: always 17 | -------------------------------------------------------------------------------- /playground/python/.gitignore: -------------------------------------------------------------------------------- 1 | # *.pdf 2 | *.xlsx 3 | *.docx 4 | *.pptx 5 | *.txt 6 | *.pyc -------------------------------------------------------------------------------- /playground/python/fill_array_by_column.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def create_and_fill_array(rows, cols): 4 | # 创建一个二维数组,初始值为0 5 | arr = np.zeros((rows, cols), dtype=int) 6 | 7 | # 为每个元素赋值 8 | for i in range(rows): 9 | for j in range(cols): 10 | arr[j][i] = i * cols + j # 或者使用其他你想要的赋值逻辑 11 | 12 | return arr 13 | 14 | # 定义数组的大小 15 | rows = 10240 16 | cols = 10240 17 | 18 | # 创建并填充数组 19 | my_array = create_and_fill_array(rows, cols) 20 | 21 | # 打印数组的一部分,仅作为示例 22 | print("Array slice:") 23 | print(my_array[:3, :3]) 24 | -------------------------------------------------------------------------------- /playground/python/fill_array_by_row.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def create_and_fill_array(rows, cols): 4 | # 创建一个二维数组,初始值为0 5 | arr = np.zeros((rows, cols), dtype=int) 6 | 7 | # 为每个元素赋值 8 | for i in range(rows): 9 | for j in range(cols): 10 | arr[i][j] = i * cols + j # 或者使用其他你想要的赋值逻辑 11 | 12 | return arr 13 | 14 | # 定义数组的大小 15 | rows = 10240 16 | cols = 10240 17 | 18 | # 创建并填充数组 19 | my_array = create_and_fill_array(rows, cols) 20 | 21 | # 打印数组的一部分,仅作为示例 22 | print("Array slice:") 23 | print(my_array[:3, :3]) 24 | -------------------------------------------------------------------------------- /playground/python/webserver_fastapi.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI 2 | import uvicorn 3 | import json 4 | import time 5 | 6 | app = FastAPI() 7 | 8 | @app.get("/") 9 | async def index(): 10 | return {"message": "Welcome to FastAPI!"} 11 | 12 | @app.get("/calculate") 13 | async def calculate(): 14 | # 模拟复杂的计算操作 15 | time.sleep(0.1) # 模拟耗时操作 16 | return {"result": 42} 17 | 18 | if __name__ == "__main__": 19 | uvicorn.run(app, host="0.0.0.0", port=8001) 20 | 21 | ''' 22 | ab -n 100 -c 50 http://127.0.0.1:8001/calculate 23 | This is ApacheBench, Version 2.3 <$Revision: 1879490 $> 24 | Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/ 25 | Licensed to The Apache Software Foundation, http://www.apache.org/ 26 | 27 | Benchmarking 127.0.0.1 (be patient).....done 28 | 29 | 30 | Server Software: uvicorn 31 | Server Hostname: 127.0.0.1 32 | Server Port: 8001 33 | 34 | Document Path: /calculate 35 | Document Length: 13 bytes 36 | 37 | Concurrency Level: 50 38 | Time taken for tests: 10.275 seconds 39 | Complete requests: 100 40 | Failed requests: 0 41 | Total transferred: 15700 bytes 42 | HTML transferred: 1300 bytes 43 | Requests per second: 9.73 [#/sec] (mean) 44 | Time per request: 5137.583 [ms] (mean) 45 | Time per request: 102.752 [ms] (mean, across all concurrent requests) 46 | Transfer rate: 1.49 [Kbytes/sec] received 47 | 48 | Connection Times (ms) 49 | min mean[+/-sd] median max 50 | Connect: 0 2 1.0 2 4 51 | Processing: 108 4493 1583.0 5130 7797 52 | Waiting: 103 3334 1424.5 3700 5143 53 | Total: 108 4495 1582.5 5130 7798 54 | 55 | Percentage of the requests served within a certain time (ms) 56 | 50% 5130 57 | 66% 5133 58 | 75% 5134 59 | 80% 5139 60 | 90% 5140 61 | 95% 7797 62 | 98% 7798 63 | 99% 7798 64 | 100% 7798 (longest request) 65 | ''' -------------------------------------------------------------------------------- /playground/python/题库+答案.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cobolbaby/dockerize-and-ansible/5baf74e2da7b3a93ca827ca75b072c498d578b20/playground/python/题库+答案.pdf -------------------------------------------------------------------------------- /playground/shell/show_progress.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 获取当前时间戳(毫秒) 4 | getCurrentTimeMillis() { 5 | echo $(date +%s%3N) 6 | } 7 | 8 | # 对小数四舍五入 9 | round() { 10 | printf "%.0f" $1 11 | } 12 | 13 | # 根据传入的完成比显示进度条 14 | showProgress() { 15 | percentComplete=$1 16 | # 进度条长度 17 | barlen=$(tput cols|awk '{print $1-22}') 18 | # 已完成部分的长度 19 | completed=$(round $(echo "$barlen*$percentComplete"|bc)) 20 | equals=$(printf "%0.s=" $(seq 1 $completed)) 21 | equals=$(echo $equals|sed 's/=$/>/') 22 | 23 | spaces="" 24 | if [ $completed -lt $barlen ];then 25 | # 未完成部分的长度 26 | incomplete=$(($barlen - $completed)) 27 | if [ $completed -eq 0 ];then 28 | let incomplete-- 29 | fi 30 | spaces=$(printf "%0.s " $(seq 1 $incomplete)) 31 | fi 32 | 33 | # 将完成比转换成百分数 34 | percentage=$(round $(echo "$percentComplete*100"|bc)) 35 | # 计算耗时 36 | elapsed=$(echo "scale=1; ($(getCurrentTimeMillis)-${startTimeMillis})/1000"|bc) 37 | # 打印进度等信息 38 | printf "\r进度 %4d%%[%s%s] in %0.1fs" $percentage $equals "$spaces" $elapsed 39 | if [ "$percentage" == "100" ];then 40 | echo 41 | fi 42 | } 43 | 44 | # 总任务数 45 | total=30 46 | # 当前完成的任务数 47 | current=0 48 | # 开始时间 49 | startTimeMillis=$(getCurrentTimeMillis) 50 | 51 | showProgress 0 52 | while [ $current -lt $total ]; do 53 | # 模拟任务完成 54 | ((current++)) 55 | # 计算完成比 56 | percentComplete=$(echo "scale=2;$current/$total"|bc) 57 | showProgress $percentComplete 58 | done -------------------------------------------------------------------------------- /postgres/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | cd `dirname $0` 4 | 5 | PROXY=http://10.190.81.209:3389/ 6 | PG_VERSION=16.8 7 | 8 | cd build 9 | 10 | # docker buildx rm container-builder 11 | 12 | # docker buildx create \ 13 | # --name container-builder \ 14 | # --driver docker-container \ 15 | # --use --bootstrap 16 | 17 | # 遇到了 registry.inventec 无法解析和证书的问题 18 | # https://github.com/docker/buildx/issues/835 19 | 20 | docker buildx build --rm -f Dockerfile \ 21 | -t registry.inventec/infra/postgres:${PG_VERSION} \ 22 | --build-arg PG_VERSION=${PG_VERSION} \ 23 | --build-arg http_proxy=${PROXY} \ 24 | --build-arg https_proxy=${PROXY} \ 25 | . 26 | # --platform linux/amd64,linux/arm64 \ 27 | docker push registry.inventec/infra/postgres:${PG_VERSION} 28 | -------------------------------------------------------------------------------- /postgres/build/bin/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 用于支持pgbackrest备份 4 | sudo /usr/sbin/sshd 5 | 6 | # 获取启动模式,采用Patroni Or Not 7 | 8 | # 如果采用 Patroni 则执行 patroni + yaml 9 | 10 | # 如果是启动单点,则参考 PG 原始镜像的写法 11 | readonly PATRONI_ENABLE=${PATRONI_ENABLE:-false} 12 | 13 | if [[ "$PATRONI_ENABLE" != "true" ]]; then 14 | echo "【`date`】Start a single instance..." 15 | docker-entrypoint.sh postgres 16 | exit 17 | fi 18 | 19 | echo "【`date`】Start Patroni..." 20 | 21 | # Ref: https://www.cyberciti.biz/tips/bash-shell-parameter-substitution-2.html 22 | # Remove Pattern (Back of $VAR) 23 | PATRONI_NAMESPACE=${PATRONI_NAMESPACE:-/pgcluster} 24 | DOCKER_HOSTNAME=$(hostname -I | cut -d' ' -f1) 25 | 26 | export PATRONI_NAMESPACE=${PATRONI_NAMESPACE%/} 27 | export PATRONI_SCOPE=${PATRONI_SCOPE:-pgcluster_dev_12} 28 | export PATRONI_NAME="${PATRONI_NAME:-$(hostname)}" 29 | export PATRONI_RESTAPI_LISTEN="0.0.0.0:8008" 30 | export PATRONI_RESTAPI_CONNECT_ADDRESS="${PATRONI_RESTAPI_CONNECT_ADDRESS:-$DOCKER_HOSTNAME:8008}" 31 | export PATRONI_POSTGRESQL_LISTEN="0.0.0.0:5432" 32 | export PATRONI_POSTGRESQL_CONNECT_ADDRESS="${PATRONI_POSTGRESQL_CONNECT_ADDRESS:-$DOCKER_HOSTNAME:5432}" 33 | export PATRONI_POSTGRESQL_DATA_DIR="${PATRONI_POSTGRESQL_DATA_DIR:-$PGDATA}" 34 | export PATRONI_REPLICATION_USERNAME="${PATRONI_REPLICATION_USERNAME:-replicator}" 35 | export PATRONI_REPLICATION_PASSWORD="${PATRONI_REPLICATION_PASSWORD:-1Password}" 36 | export PATRONI_SUPERUSER_USERNAME="${PATRONI_SUPERUSER_USERNAME:-postgres}" 37 | export PATRONI_SUPERUSER_PASSWORD="${PATRONI_SUPERUSER_PASSWORD:-1Password}" 38 | 39 | PATRONI_TAGS_NOFAILOVER=${PATRONI_TAGS_NOFAILOVER:-false} 40 | 41 | if [[ "$PATRONI_TAGS_NOFAILOVER" == "true" ]]; then 42 | sed -i 's/nofailover: false/nofailover: true/' /home/postgres/.config/patroni/patronictl.yaml 43 | fi 44 | 45 | readonly PATRONI_CLUSTER_MODE=${PATRONI_CLUSTER_MODE:-normal} 46 | 47 | case $PATRONI_CLUSTER_MODE in 48 | "normal") 49 | exec patroni /home/postgres/.config/patroni/patronictl.yaml 2>&1 50 | ;; 51 | # "pgbackrest") 52 | # # 如果配置为 pgbackrest,则需要校验 stanza 的配置是否存在,如果不存在,则需要创建? 53 | # # 但貌似创建的时候需要数据库处于启动状态,所以没办法在集群没有创建的时候配置 stanza 54 | # exec patroni /home/postgres/.config/patroni/patronictl_pgbackrest.yaml 2>&1 55 | # ;; 56 | "restore") 57 | exec patroni /home/postgres/.config/patroni/patronictl_pgbackrest_restore.yaml 2>&1 58 | ;; 59 | esac 60 | -------------------------------------------------------------------------------- /postgres/build/bin/reinit_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 判断是否做了备份检查配置 4 | 5 | pgbackrest info --stanza=${PG_RESTORE_STANZA} --log-level-console=info 6 | if [ $? -ne 0 ] 7 | then 8 | echo "$(date "+%Y-%m-%d %H:%M:%S") Error: please check backup" 9 | exit 1 10 | else 11 | echo "$(date "+%Y-%m-%d %H:%M:%S") INFO: reinit from ${PG_RESTORE_STANZA}, start restore" 12 | pgbackrest restore --delta --stanza=${PG_RESTORE_STANZA} --log-level-console=info 13 | echo "$(date "+%Y-%m-%d %H:%M:%S") INFO: end restore" 14 | fi 15 | 16 | # Bug: 17 | # postgres@pg1201:~$ ~/reinit_script.sh 18 | # stanza: itc 19 | # status: error (missing stanza path) 20 | # 2022-02-25 18:37:38 INFO: reinit from itc, start restore 21 | # 2022-02-25 18:37:38.119 P00 INFO: restore command begin 2.37: --delta --exec-id=436136-aa804472 --log-level-console=info --log-level-file=info --pg1-path=/var/lib/postgresql/12/data --process-max=4 --repo1-host=pgbackrest --repo1-host-user=postgres --stanza=itc 22 | # WARN: --delta or --force specified but unable to find 'PG_VERSION' or 'backup.manifest' in '/var/lib/postgresql/12/data' to confirm that this is a valid $PGDATA directory. --delta and --force have been disabled and if any files exist in the destination directories the restore will be aborted. 23 | # WARN: repo1: [FileMissingError] unable to load info file '/postgresql/12/backup/itc/backup.info' or '/postgresql/12/backup/itc/backup.info.copy': 24 | # FileMissingError: raised from remote-0 ssh protocol on 'pgbackrest': unable to open missing file '/postgresql/12/backup/itc/backup.info' for read 25 | # FileMissingError: raised from remote-0 ssh protocol on 'pgbackrest': unable to open missing file '/postgresql/12/backup/itc/backup.info.copy' for read 26 | # HINT: backup.info cannot be opened and is required to perform a backup. 27 | # HINT: has a stanza-create been performed? 28 | # ERROR: [075]: no backup set found to restore 29 | # 2022-02-25 18:37:38.387 P00 INFO: restore command end: aborted with exception [075] 30 | # 2022-02-25 18:37:38 INFO: end restore 31 | # postgres@pg1201:~$ echo $? 32 | # 0 33 | -------------------------------------------------------------------------------- /postgres/build/dict/jieba_user.dict: -------------------------------------------------------------------------------- 1 | 云计算 2 | 韩玉鉴赏 3 | 蓝翔 nz 4 | 区块链 10 nz 5 | AI助手 6 | 新华三 -------------------------------------------------------------------------------- /postgres/deploy.dev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd `dirname $0` 3 | 4 | # 前置操作: 5 | # docker network create --driver bridge --subnet=10.14.0.0/16 infra 6 | 7 | INVENTORY_FILE=../inventory.dev 8 | 9 | # 创建数据目录 10 | ansible -i $INVENTORY_FILE postgres -m file -a "dest=/data/hdd/pg/12/data owner=999 group=999 mode=700 state=directory" -b 11 | ansible -i $INVENTORY_FILE postgres -m file -a "dest=/data/ssd/pg/12/data owner=999 group=999 mode=700 state=directory" -b 12 | 13 | # 传出配置文件 14 | ansible -i $INVENTORY_FILE postgres -m copy -a "src=deploy.dev/ dest=/opt/postgres" -b 15 | 16 | # 执行启动命令 17 | ansible -i $INVENTORY_FILE pg01 -m raw -a "cd /opt/postgres/bin && docker-compose -f docker-compose-pg01.yml up -d" 18 | ansible -i $INVENTORY_FILE pg02 -m raw -a "cd /opt/postgres/bin && docker-compose -f docker-compose-pg02.yml up -d" 19 | # Keepalived 20 | ansible -i $INVENTORY_FILE postgres -m raw -a "chmod +x /opt/postgres/keepalived/*/*.sh" -b 21 | ansible -i $INVENTORY_FILE pg01 -m raw -a "cd /opt/postgres/keepalived && docker-compose -f docker-compose-keepalived-master.yml up -d" 22 | ansible -i $INVENTORY_FILE pg02 -m raw -a "cd /opt/postgres/keepalived && docker-compose -f docker-compose-keepalived-standby.yml up -d" 23 | -------------------------------------------------------------------------------- /postgres/deploy.dev/bin/.env: -------------------------------------------------------------------------------- 1 | PATRONI_ENABLE='true' 2 | PATRONI_NAMESPACE=pgcluster 3 | PATRONI_SCOPE=pgcluster_itc_12_tpm 4 | PATRONI_ETCD3_HOSTS="'10.190.50.51:8911','10.190.50.51:8912','10.190.50.51:8913'" 5 | PATRONI_SUPERUSER_USERNAME=postgres 6 | PATRONI_SUPERUSER_PASSWORD=****** 7 | PATRONI_REPLICATION_USERNAME=replicator 8 | PATRONI_REPLICATION_PASSWORD=****** 9 | # PATRONI_RESTAPI_USERNAME=admin 10 | # PATRONI_RESTAPI_PASSWORD=****** 11 | # PATRONI_LOG_LEVEL=DEBUG 12 | PGDATA=/var/lib/postgresql/12/data 13 | # PG_RESTORE_STANZA=itc -------------------------------------------------------------------------------- /postgres/deploy.dev/bin/docker-compose-pg01.yml: -------------------------------------------------------------------------------- 1 | version: '2.3' 2 | 3 | services: 4 | postgres: 5 | image: registry.inventec/infra/postgres:12.14 6 | container_name: pg01 7 | hostname: pg01 8 | ports: 9 | - 5432:5432 10 | - 8008:8008 11 | volumes: 12 | - /etc/localtime:/etc/localtime:ro 13 | - /data/ssd/pg/12/data:/var/lib/postgresql/12/data 14 | # https://docs.docker.com/compose/compose-file/05-services/#env_file 15 | env_file: .env 16 | environment: 17 | PATRONI_NAME: pg01 18 | PATRONI_POSTGRESQL_CONNECT_ADDRESS: 10.190.50.51:5432 19 | PATRONI_RESTAPI_CONNECT_ADDRESS: 10.190.50.51:8008 20 | user: postgres 21 | restart: always 22 | # cpu_count: 4 23 | # mem_limit: 8g 24 | shm_size: 1g 25 | 26 | networks: 27 | default: 28 | external: 29 | name: tpm 30 | -------------------------------------------------------------------------------- /postgres/deploy.dev/bin/docker-compose-pg02.yml: -------------------------------------------------------------------------------- 1 | version: '2.3' 2 | 3 | services: 4 | postgres: 5 | image: registry.inventec/infra/postgres:12.14 6 | container_name: pg02 7 | hostname: pg02 8 | ports: 9 | - 5432:5432 10 | - 8008:8008 11 | volumes: 12 | - /etc/localtime:/etc/localtime:ro 13 | - /data/ssd/pg/12/data:/var/lib/postgresql/12/data 14 | env_file: .env 15 | environment: 16 | PATRONI_NAME: pg02 17 | PATRONI_POSTGRESQL_CONNECT_ADDRESS: 10.190.50.58:5432 18 | PATRONI_RESTAPI_CONNECT_ADDRESS: 10.190.50.58:8008 19 | user: postgres 20 | restart: always 21 | # cpu_count: 4 22 | # mem_limit: 8g 23 | shm_size: 1g 24 | 25 | networks: 26 | default: 27 | external: 28 | name: tpm 29 | -------------------------------------------------------------------------------- /postgres/deploy.dev/cron/.env: -------------------------------------------------------------------------------- 1 | export PGHOST=10.191.7.119 2 | export PGPORT= 3 | export PGDATABASE= 4 | export PGUSER= 5 | export PGPASSWORD= 6 | 7 | export GPHOST= 8 | export GPPORT= 9 | export GPDATABASE= 10 | export GPUSER= 11 | export GPPASSWORD= 12 | 13 | export MC_HOST_backup=http://?:?@? 14 | export MC_BUCKET_backup= 15 | 16 | export MC_HOST_public=http://?:?@? 17 | -------------------------------------------------------------------------------- /postgres/deploy.dev/init/init.sql: -------------------------------------------------------------------------------- 1 | -- 192.168.2.120 5493 2 | 3 | CREATE USER IPTMesUser WITH LOGIN PASSWORD '???'; 4 | -- DROP DATABASE dwf; 5 | CREATE DATABASE dwf OWNER IPTMesUser; 6 | 7 | \c dwf 8 | -- 回收默认权限 9 | ALTER SCHEMA public OWNER TO IPTMesUser; 10 | REVOKE ALL ON SCHEMA public FROM PUBLIC; 11 | REVOKE CONNECT ON DATABASE template1 FROM PUBLIC; 12 | REVOKE CONNECT ON DATABASE dwf FROM PUBLIC; 13 | CREATE EXTENSION postgres_fdw; 14 | GRANT USAGE ON foreign data wrapper postgres_fdw to IPTMesUser; 15 | \q 16 | 17 | -- pg_dump -h 192.168.16.91 -p 5493 -U postgres -Fc -O -x postgres | pg_restore --no-owner -U iptmesuser -d dwf 18 | -- CREATE USER MAPPING FOR IPTMesUser SERVER dwf_postgres_fdw_server_246 OPTIONS ( 19 | -- password '123456', 20 | -- "user" 'postgres' 21 | -- ); 22 | -------------------------------------------------------------------------------- /postgres/deploy.dev/keepalived/docker-compose-keepalived-master.yml: -------------------------------------------------------------------------------- 1 | version: '2.3' 2 | services: 3 | 4 | keepalived: 5 | image: registry.inventec/infra/keepalived:2.0.17 6 | container_name: keepalived 7 | hostname: keepalived 8 | volumes: 9 | - "./volumes/pgkeepa_master.conf:/container/service/keepalived/assets/keepalived.conf:ro" 10 | - "./volumes/check.sh:/check.sh" 11 | network_mode: host 12 | cap_add: 13 | - NET_ADMIN 14 | - NET_BROADCAST 15 | - NET_RAW 16 | command: --loglevel debug --copy-service 17 | restart: always 18 | cpu_count: 1 19 | mem_limit: 1g 20 | -------------------------------------------------------------------------------- /postgres/deploy.dev/keepalived/docker-compose-keepalived-standby.yml: -------------------------------------------------------------------------------- 1 | version: '2.3' 2 | services: 3 | 4 | keepalived: 5 | image: registry.inventec/infra/keepalived:2.0.17 6 | container_name: keepalived 7 | hostname: keepalived 8 | volumes: 9 | - "./volumes/pgkeepa_backup.conf:/container/service/keepalived/assets/keepalived.conf:ro" 10 | - "./volumes/check.sh:/check.sh" 11 | network_mode: host 12 | cap_add: 13 | - NET_ADMIN 14 | - NET_BROADCAST 15 | - NET_RAW 16 | command: --loglevel debug --copy-service 17 | restart: always 18 | cpu_count: 1 19 | mem_limit: 1g 20 | -------------------------------------------------------------------------------- /postgres/deploy.dev/keepalived/volumes/check.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Load Env 4 | export PGPORT=5432 5 | export PGUSER=postgres 6 | export PGPASSWORD=****** 7 | 8 | # 如果是备库,则退出,此脚本不检查备库存活状态 9 | standby_flg=`psql -h 127.0.0.1 -p $PGPORT -U $PGUSER -d postgres -At -c "select pg_is_in_recovery();"` 10 | 11 | # 先判断上面的语句是否执行成功?如果没有成功,直接返回2 12 | if [ $? -ne 0 ]; then 13 | # psql: could not connect to server 14 | # psql: the database system is starting up 15 | echo "`date +%F\ %T`: PG has gone away!" 16 | exit 1 17 | fi 18 | 19 | if [ ${standby_flg} == 't' ]; then 20 | echo -e "`date +%F\ %T`: This is a standby." 21 | exit 1 22 | fi 23 | 24 | exit 0 25 | -------------------------------------------------------------------------------- /postgres/deploy.dev/keepalived/volumes/pgkeepa_backup.conf: -------------------------------------------------------------------------------- 1 | global_defs { 2 | router_id LVS_pgcluster_itc 3 | default_interface eth0 4 | } 5 | 6 | vrrp_script check { 7 | script "/check.sh" 8 | interval 10 9 | weight -30 10 | } 11 | 12 | vrrp_instance VI_1 { 13 | state BACKUP 14 | interface eth0 15 | virtual_router_id 51 16 | priority 110 17 | advert_int 1 18 | # nopreempt 19 | 20 | authentication { 21 | auth_type PASS 22 | auth_pass 1111 23 | } 24 | 25 | unicast_peer { 26 | 10.190.50.51 27 | # 10.190.50.58 28 | } 29 | 30 | virtual_ipaddress { 31 | 10.190.50.55 32 | } 33 | 34 | track_script { 35 | check 36 | } 37 | } -------------------------------------------------------------------------------- /postgres/deploy.dev/keepalived/volumes/pgkeepa_master.conf: -------------------------------------------------------------------------------- 1 | global_defs { 2 | router_id LVS_pgcluster_itc 3 | default_interface eth0 4 | } 5 | 6 | vrrp_script check { 7 | script "/check.sh" 8 | interval 10 9 | weight -30 10 | } 11 | 12 | vrrp_instance VI_1 { 13 | state BACKUP 14 | interface eth0 15 | virtual_router_id 51 16 | priority 120 17 | advert_int 1 18 | # nopreempt 19 | 20 | authentication { 21 | auth_type PASS 22 | auth_pass 1111 23 | } 24 | 25 | # (VI_1) WARNING - equal priority advert received from remote host with our IP address. 26 | unicast_peer { 27 | # 10.190.50.51 28 | 10.190.50.58 29 | } 30 | 31 | virtual_ipaddress { 32 | 10.190.50.55 33 | } 34 | 35 | track_script { 36 | check 37 | } 38 | } -------------------------------------------------------------------------------- /postgres/docs/.gitignore: -------------------------------------------------------------------------------- 1 | *.pdf -------------------------------------------------------------------------------- /postgres/docs/images/graphics-bloat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cobolbaby/dockerize-and-ansible/5baf74e2da7b3a93ca827ca75b072c498d578b20/postgres/docs/images/graphics-bloat.png -------------------------------------------------------------------------------- /postgres/docs/images/graphics-vacuum.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cobolbaby/dockerize-and-ansible/5baf74e2da7b3a93ca827ca75b072c498d578b20/postgres/docs/images/graphics-vacuum.png -------------------------------------------------------------------------------- /postgres/docs/images/optimizer-execution-plan-tuning.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cobolbaby/dockerize-and-ansible/5baf74e2da7b3a93ca827ca75b072c498d578b20/postgres/docs/images/optimizer-execution-plan-tuning.png -------------------------------------------------------------------------------- /postgres/docs/images/optimizer-working-principle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cobolbaby/dockerize-and-ansible/5baf74e2da7b3a93ca827ca75b072c498d578b20/postgres/docs/images/optimizer-working-principle.png -------------------------------------------------------------------------------- /postgres/pg_upgrade/Dockerfile: -------------------------------------------------------------------------------- 1 | # https://github.com/tianon/docker-postgres-upgrade/blob/master/12-to-16/Dockerfile 2 | 3 | ARG OLD_VERSION 4 | ARG NEW_VERSION 5 | 6 | FROM registry.inventec/infra/postgres:${NEW_VERSION} 7 | LABEL maintainer="Zhang.Xing-Long@inventec.com" 8 | 9 | ARG OLD_VERSION 10 | 11 | USER root 12 | 13 | RUN sed -i 's/$/ 12/' /etc/apt/sources.list.d/pgdg.list 14 | 15 | RUN set -eux; \ 16 | apt-get update; \ 17 | apt-get install -y --no-install-recommends \ 18 | # postgresql-12='12.18-1.pgdg120+2' \ 19 | postgresql-12 \ 20 | ; \ 21 | rm -rf /var/lib/apt/lists/* 22 | 23 | ENV PGBINOLD /usr/lib/postgresql/${OLD_VERSION}/bin 24 | ENV PGBINNEW /usr/lib/postgresql/${PG_MAJOR}/bin 25 | 26 | ENV PGDATAOLD /var/lib/postgresql/${OLD_VERSION}/data 27 | ENV PGDATANEW /var/lib/postgresql/${PG_MAJOR}/data 28 | 29 | RUN set -eux; \ 30 | mkdir -p "$PGDATAOLD" "$PGDATANEW"; \ 31 | chown -R postgres:postgres /var/lib/postgresql 32 | 33 | WORKDIR /var/lib/postgresql 34 | 35 | COPY docker-upgrade /usr/local/bin/ 36 | 37 | ENTRYPOINT ["docker-upgrade"] 38 | 39 | # recommended: --link 40 | CMD ["pg_upgrade"] 41 | -------------------------------------------------------------------------------- /postgres/pg_upgrade/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | cd `dirname $0` 4 | 5 | PROXY=http://10.190.81.209:3389/ 6 | OLD_VERSION=12 7 | NEW_VERSION=16.2 8 | 9 | docker build --rm -f Dockerfile \ 10 | -t registry.inventec/infra/pg_upgrade:${OLD_VERSION}-to-${NEW_VERSION} \ 11 | --build-arg http_proxy=${PROXY} \ 12 | --build-arg https_proxy=${PROXY} \ 13 | --build-arg OLD_VERSION=${OLD_VERSION} \ 14 | --build-arg NEW_VERSION=${NEW_VERSION} \ 15 | . 16 | docker push registry.inventec/infra/pg_upgrade:${OLD_VERSION}-to-${NEW_VERSION} 17 | -------------------------------------------------------------------------------- /postgres/pg_upgrade/docker-upgrade: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | if [ "$#" -eq 0 -o "${1:0:1}" = '-' ]; then 5 | set -- pg_upgrade "$@" 6 | fi 7 | 8 | if [ "$1" = 'pg_upgrade' -a "$(id -u)" = '0' ]; then 9 | mkdir -p "$PGDATAOLD" "$PGDATANEW" 10 | chmod 700 "$PGDATAOLD" "$PGDATANEW" 11 | chown postgres . 12 | chown -R postgres "$PGDATAOLD" "$PGDATANEW" 13 | exec gosu postgres "$BASH_SOURCE" "$@" 14 | fi 15 | 16 | if [ "$1" = 'pg_upgrade' ]; then 17 | if [ ! -s "$PGDATANEW/PG_VERSION" ]; then 18 | PGDATA="$PGDATANEW" eval "initdb $POSTGRES_INITDB_ARGS" 19 | fi 20 | fi 21 | 22 | exec "$@" -------------------------------------------------------------------------------- /postgres/recovery.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 根据物理备份做基于时间点的数据恢复 4 | # ./pg_basebackup.sh 5 | 6 | # 准备数据目录 7 | sudo mkdir -p /data/postgres/16/data 8 | 9 | # 将物理备份中的 tgz 包解压至数据目录 10 | sudo tar -zxf /data/physical_backup.tgz -C /data/postgres/16/data 11 | 12 | sudo chown -R 999:999 /data/postgres/16/data 13 | 14 | # 修改 postgresql.conf 中的 recovery 参数 15 | # recovery_target_time = '2024-11-24 00:00:30' 16 | 17 | docker run --rm --name pg1604 \ 18 | -p 5493:5432 \ 19 | -e PGDATA=/var/lib/postgresql/16/data \ 20 | -v /data/postgres/16/data:/var/lib/postgresql/16/data \ 21 | registry.inventec/infra/postgres:16.4 22 | 23 | # 结论: 无法做到基于时间点的恢复,最多恢复到备份时间点。 24 | # -------------------------------------------------------------------------------- /postgres/script/pg_badger_cron.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # set -e 3 | cd `dirname $0` 4 | 5 | current_date=$(date +"%Y-%m-%d") 6 | echo $current_date 7 | 8 | for i in $(docker ps | grep 'infra/postgres' | awk '{print $1}'); do 9 | # fix: docker exec -ti $i /pgcron/pg_badger.sh $current_date 执行报 the input device is not a TTY 10 | docker exec $i /pgcron/pg_badger.sh $current_date 11 | done 12 | 13 | # previous_date=$(date -d "yesterday" +"%Y-%m-%d") 14 | # echo $previous_date 15 | 16 | # for i in $(docker ps -f "name=pg" --format "{{.Names}}"); do echo $i; docker exec -ti $i /pgcron/pg_badger.sh $previous_date; done 17 | -------------------------------------------------------------------------------- /postgres/script/pg_basebackup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # set -o pipefail 4 | cd `dirname $0` 5 | 6 | pgrole=$(curl -s http://localhost:8008/patroni | jq .role | sed "s/\"//g") 7 | if [ $pgrole = "master" ]; then 8 | echo "The role of ${PATRONI_NAME} database is master" 9 | exit 10 | fi 11 | 12 | # 针对物理备份,则相反,主节点停止,从节点继续,但如果是多个从的话,就需要从中找一个了,排序之后找第一个。 13 | firstslave=$(curl -s http://localhost:8008/cluster | jq -c '.members[] | select(.role == "replica") | select(.lag == 0) | .name' | sort | head -n 1 | sed "s/\"//g") 14 | 15 | # TODO:fix: 遇到过firstslave获取为空的情况,可能是Etcd异常了,而该异常会造成两个从节点都会进行数据备份 16 | # ... 17 | 18 | # PATRONI_NAME为当前节点名称 19 | if [ $firstslave != $PATRONI_NAME ]; then 20 | echo "The current role of ${PATRONI_NAME} database is ${pgrole}, and the first slave node is ${firstslave}" 21 | exit 22 | fi 23 | 24 | echo "【`date`】Start to do pg_basebackup..." 25 | # exit 26 | BACKUP_DIR=/pgbackup/$(date +%Y%m%d%H%M%S)/basebackup 27 | mkdir -p ${BACKUP_DIR} && cd ${BACKUP_DIR} 28 | 29 | # -P 是否显示进度 30 | # -Ft -z是否压缩 31 | # time pg_basebackup -U postgres -D ${BACKUP_DIR} -Ft -z -Xs -Pv 32 | # time pg_basebackup -U postgres -D - -Ft -Xs -v | pigz -6 -p 32 > ${BACKUP_DIR}/physical_backup.tgz 33 | # pg_basebackup: cannot stream write-ahead logs in tar mode to stdout 34 | time pg_basebackup -U postgres -D - -Ft -X fetch -v -c fast | pigz -6 -p 32 > ${BACKUP_DIR}/physical_backup.tgz 35 | 36 | # 删除一个月之前的物理备份文件 37 | cd /pgbackup 38 | du -sh * 39 | ls -t | tail -n +5 | xargs rm -rf 40 | du -sh * 41 | 42 | exit 0 43 | 44 | # mc cp --recursive ${BACKUP_DIR} backup/infra-backup/postgresql/ 45 | -------------------------------------------------------------------------------- /postgres/script/pg_basebackup_cron.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # set -e 3 | cd `dirname $0` 4 | 5 | for i in $(docker ps | grep 'infra/postgres' | awk '{print $1}'); do 6 | echo "【`date`】Running db backup job..." 7 | docker exec $i /pgcron/pg_basebackup.sh 8 | done 9 | -------------------------------------------------------------------------------- /postgres/script/pg_dump.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # set -o pipefail 4 | cd `dirname $0` 5 | 6 | # 提前到 pg_dump_cron.sh 中执行 7 | # source .env 8 | 9 | archive_s3_bucket=${MC_BUCKET_backup:-infra-backup} 10 | archive_s3_path=postgresql 11 | archive_subdir=$(date +%Y%m%d%H%M%S) 12 | 13 | echo "【`date`】Start to pg_dump DB..." 14 | 15 | # user, tablespace 16 | echo "【`date`】Dump global information, including users and tablespaces:" 17 | time pg_dumpall --globals-only | mc pipe backup/${archive_s3_bucket}/${archive_s3_path}/${PGHOST}/${archive_subdir}/pg_globals.sql 18 | 19 | # schema, table, function, data 20 | for db in $(psql postgres -Atc 'select datname from pg_database;' | grep -vE 'postgres|template|_del$|_bak$|bdc|spc|chicony|kettle') 21 | do 22 | echo "【`date`】Dump DB ${db}:" 23 | time pg_dump -C -Fc ${db} -T 'public.plt_sys_log' | mc pipe backup/${archive_s3_bucket}/${archive_s3_path}/${PGHOST}/${archive_subdir}/${db}.dump 24 | done 25 | 26 | # schema, table, function 27 | for db in $(psql postgres -Atc 'select datname from pg_database;' | grep -E 'bdc|spc|chicony|kettle') 28 | do 29 | echo "【`date`】Dump DB ${db}:" 30 | time pg_dump -C -s ${db} | mc pipe backup/${archive_s3_bucket}/${archive_s3_path}/${PGHOST}/${archive_subdir}/${db}.sql 31 | done 32 | 33 | echo "【`date`】pg_dump DB end." 34 | 35 | exit 0 -------------------------------------------------------------------------------- /postgres/script/pg_dump_cron.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | cd `dirname $0` 4 | 5 | # echo "【`date`】Getting the container ID of Postgres" 6 | # CONTAINERID=`docker ps --filter name=pg --format "{{.ID}}"` 7 | 8 | echo "【`date`】Running db backup job..." 9 | # docker exec ${CONTAINERID} /pgcron/pg_dump.sh 10 | 11 | source .env 12 | ./pg_dump.sh 13 | -------------------------------------------------------------------------------- /postgres/test/node.js/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | package-lock.json -------------------------------------------------------------------------------- /postgres/test/node.js/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": { 3 | "pg": "^8.11.3" 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /postgres/test/node.js/pg_return_bigint_as_string.js: -------------------------------------------------------------------------------- 1 | const { Pool, types } = require('pg'); 2 | 3 | types.setTypeParser(types.builtins.INT8, (val) => parseInt(val)); 4 | 5 | // PostgreSQL数据库连接配置 6 | const pool = new Pool({ 7 | user: 'your_username', 8 | host: 'your_host', 9 | database: 'your_database', 10 | password: 'your_password', 11 | port: 5432, 12 | }); 13 | 14 | // 测试用例 15 | async function testQuery() { 16 | const client = await pool.connect(); 17 | 18 | try { 19 | // 'plt_usage' 为bigint字段名 20 | const queryText = 'select plt_usage from plt_spare_sparesnusage limit 1'; 21 | const result = await client.query(queryText); 22 | 23 | // 输出查询结果的数据类型和值 24 | if (result.rows.length > 0) { 25 | const bigintValue = result.rows[0].plt_usage; 26 | console.log(`Type of bigint column: ${typeof bigintValue}`); 27 | console.log(`Value of bigint column: ${bigintValue}`); 28 | } else { 29 | console.log('No rows returned from the query.'); 30 | } 31 | } catch (error) { 32 | console.error('Error executing query:', error); 33 | } finally { 34 | client.release(); 35 | // 关闭数据库连接池 36 | pool.end(); 37 | } 38 | } 39 | 40 | // 执行测试用例 41 | testQuery(); 42 | -------------------------------------------------------------------------------- /prometheus/deploy.dev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd `dirname $0` 3 | # docker run --rm telegraf:1.13.0-alpine telegraf config > telegraf.default.conf 4 | # sed '/^\s*#\|^$/d' ./deploy.dev/config/telegraf/telegraf.default.conf > ./deploy.dev/config/telegraf/telegraf.conf 5 | # remove the option: [[outputs.influxdb]] 6 | 7 | # 推送启动脚本至部署目录 8 | INVENTORY_FILE=../inventory.dev 9 | 10 | # 传出配置文件 11 | # ansible -i $INVENTORY_FILE all -m file -a "dest=/opt/prometheus/textfile mode=777 state=directory" -b 12 | # ansible -i $INVENTORY_FILE all -m copy -a "src=deploy.dev/config/node-exporter dest=/opt/prometheus/config/" -b 13 | # ansible -i $INVENTORY_FILE all -m raw -a "chmod +x /opt/prometheus/config/node-exporter/smartmon.sh" -b 14 | # ansible -i $INVENTORY_FILE all -m cron -a 'name="smartmon" state=absent' 15 | # ansible -i $INVENTORY_FILE all -m cron -a 'name="smartmon" minute=0 hour=*/4 job="/opt/prometheus/config/node-exporter/smartmon.sh > /opt/prometheus/textfile/smartmon.prom 2>&1"' -b 16 | # ansible -i $INVENTORY_FILE all -m copy -a "src=deploy.dev/docker-compose-node.yml dest=/opt/prometheus/" -b 17 | # ansible -i $INVENTORY_FILE all -m raw -a "docker-compose -f /opt/prometheus/docker-compose-node.yml up -d" 18 | # exit 19 | 20 | # ansible -i $INVENTORY_FILE prom01 -m file -a "dest=/data/hdd4/prometheus/data owner=999 group=999 mode=777 state=directory" -b 21 | 22 | # 传出配置文件 23 | ansible -i $INVENTORY_FILE prom01 -m copy -a "src=deploy.dev/ dest=/opt/prometheus" 24 | 25 | # 执行启动命令 26 | ansible -i $INVENTORY_FILE prom01 -m raw -a "chmod +x /opt/prometheus/start.sh" 27 | ansible -i $INVENTORY_FILE prom01 -m raw -a "/opt/prometheus/start.sh" 28 | -------------------------------------------------------------------------------- /prometheus/deploy.dev/config/alertmanager/config/wechat.tmpl: -------------------------------------------------------------------------------- 1 | {{ define "wechat.default.message" }} 2 | {{- if gt (len .Alerts.Firing) 0 -}} 3 | {{- range $index, $alert := .Alerts -}} 4 | {{- if eq $index 0 -}} 5 | 告警类型: {{ $alert.Labels.alertname }} 6 | 告警级别: {{ $alert.Labels.severity }} 7 | 8 | ===================== 9 | {{- end }} 10 | ===告警详情=== 11 | 告警详情: {{ $alert.Annotations.description }} 12 | 故障时间: {{ $alert.StartsAt.Local.Format "2006-01-02 15:04:05" }} 13 | ===================== 14 | {{- end }} 15 | {{- end }} 16 | 17 | {{- if gt (len .Alerts.Resolved) 0 -}} 18 | {{- range $index, $alert := .Alerts -}} 19 | {{- if eq $index 0 -}} 20 | 告警类型: {{ $alert.Labels.alertname }} 21 | 告警级别: {{ $alert.Labels.severity }} 22 | 23 | ===================== 24 | {{- end }} 25 | ===告警详情=== 26 | 告警详情: {{ $alert.Annotations.description }} 27 | 故障时间: {{ $alert.StartsAt.Local.Format "2006-01-02 15:04:05" }} 28 | 恢复时间: {{ $alert.EndsAt.Local.Format "2006-01-02 15:04:05" }} 29 | ===================== 30 | {{- end }} 31 | {{- end }} 32 | {{- end }} -------------------------------------------------------------------------------- /prometheus/deploy.dev/config/kafka-lag-exporter/application.conf: -------------------------------------------------------------------------------- 1 | kafka-lag-exporter { 2 | sinks = ["PrometheusEndpointSink"] 3 | 4 | reporters { 5 | prometheus { 6 | port = 8000 7 | } 8 | } 9 | 10 | poll-interval = "60 seconds" 11 | 12 | lookup-table-size = 120 13 | 14 | client-group-id = "kafkaLagExporter" 15 | 16 | clusters = [ 17 | { 18 | name = "" 19 | bootstrap-brokers = "" 20 | topic-whitelist = [ 21 | ] 22 | # TODO:topic-whitelist 与 group-whitelist 都配上,过滤是与的关系而非或 23 | # group-whitelist = [ 24 | # ] 25 | consumer-properties = { 26 | client.id = "kafkaLagExporter" 27 | } 28 | admin-client-properties = { 29 | client.id = "kafkaLagExporter" 30 | } 31 | labels = { 32 | } 33 | } 34 | ] 35 | } -------------------------------------------------------------------------------- /prometheus/deploy.dev/config/kafka-lag-exporter/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | %date{ISO8601} %-5level %logger{36} %X{akkaSource} - %msg %ex%n 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /prometheus/deploy.dev/config/prometheus/flink-alert-rules.yml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: flink-alerting 3 | rules: 4 | - alert: flink-no-running-job 5 | expr: sum(flink_jobmanager_numRunningJobs) by (instance, host) == 0 6 | for: 2m 7 | labels: 8 | severity: yellow 9 | annotations: 10 | summary: "No running jobs on flink ({{ $labels.host }})" 11 | description: "No running jobs on flink ({{ $labels.host }})" 12 | 13 | - alert: flink-restarting-job 14 | expr: increase(flink_jobmanager_job_numRestarts[5m]) > 0 15 | for: 2m 16 | labels: 17 | severity: yellow 18 | annotations: 19 | summary: "Job {{ $labels.job_name }} is restarted on flink ({{ $labels.host }})" 20 | description: "Job {{ $labels.job_name }} is restarted on flink ({{ $labels.host }})" 21 | 22 | - alert: flink-restarting-job 23 | expr: increase(flink_jobmanager_job_numRestarts[5m]) > 0 24 | for: 10m 25 | labels: 26 | severity: orange 27 | annotations: 28 | summary: "Job {{ $labels.job_name }} is restarted on flink ({{ $labels.host }})" 29 | description: "Job {{ $labels.job_name }} is restarted on flink ({{ $labels.host }})" 30 | 31 | - alert: flink-restarting-job 32 | expr: increase(flink_jobmanager_job_numRestarts[5m]) > 0 33 | for: 30m 34 | labels: 35 | severity: red 36 | annotations: 37 | summary: "Job {{ $labels.job_name }} is restarted on flink ({{ $labels.host }})" 38 | description: "Job {{ $labels.job_name }} is restarted on flink ({{ $labels.host }})" 39 | -------------------------------------------------------------------------------- /prometheus/deploy.dev/config/prometheus/kafka-alert-rules.yml: -------------------------------------------------------------------------------- 1 | # TODO:消费流量持续超过 5MB 达到 30min,怀疑客户端有问题 2 | 3 | -------------------------------------------------------------------------------- /prometheus/deploy.dev/config/prometheus/postgres-alert-rules.yml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: postgres-alerting 3 | rules: 4 | - alert: PostgresqlTooManyConnections 5 | expr: sum by (job, instance, server) (pg_stat_activity_count{datname!~"template.*|postgres"}) > pg_settings_max_connections * 0.9 6 | for: 10m 7 | labels: 8 | severity: orange 9 | annotations: 10 | summary: Postgresql too many connections (instance {{ $labels.server }}) 11 | description: "PostgreSQL instance {{ $labels.server }} has too many connections ({{ $value }})" 12 | 13 | # TODO:GP 如果出现 down 掉的节点则告警 14 | # - alert: 15 | 16 | # TODO:PG 主从同步延迟超过 100MB 17 | -------------------------------------------------------------------------------- /prometheus/deploy.dev/docker-compose-node.yml: -------------------------------------------------------------------------------- 1 | version: '2.3' 2 | 3 | services: 4 | # Ref: https://github.com/prometheus/node_exporter/blob/master/README.md#using-docker 5 | node-exporter: 6 | image: registry.inventec/hub/prom/node-exporter:v1.0.1 7 | container_name: node-exporter 8 | network_mode: host 9 | # Swarm: Ignoring unsupported options: pid 10 | pid: host 11 | volumes: 12 | - /:/host:ro,rslave 13 | command: 14 | - --path.rootfs=/host 15 | # 解决因 df cifs (smb目录) 引起的接口响应延迟,跳过挂载的 smb 目录 16 | - --collector.filesystem.ignored-fs-types=^(devtmpfs|tmpfs|squashfs|vfat|fuse.lxcfs|cifs|overlay)$$ 17 | - --collector.netdev.device-blacklist=^(veth|br-|lxc|docker_gwbridge).*$$ 18 | - --no-collector.netclass 19 | - --collector.textfile.directory=/host/opt/prometheus/textfile 20 | restart: unless-stopped 21 | cpu_count: 2 22 | mem_limit: 128M 23 | -------------------------------------------------------------------------------- /prometheus/deploy.dev/docker-compose-pushgateway.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | pushgateway: 5 | image: registry.inventec/proxy/prom/pushgateway:latest 6 | container_name: pushgateway 7 | ports: 8 | - "9091:9091" 9 | restart: unless-stopped 10 | command: 11 | - "--web.enable-admin-api" # 可选:启用删除指标等 API 12 | volumes: 13 | - pushgateway_data:/data # 可选:数据持久化 14 | networks: 15 | - monitoring 16 | 17 | volumes: 18 | pushgateway_data: 19 | 20 | networks: 21 | monitoring: 22 | driver: bridge 23 | -------------------------------------------------------------------------------- /prometheus/deploy.dev/reload.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | cd `dirname $0` 4 | 5 | # 热更新 6 | curl -X POST localhost:9090/-/reload 7 | curl -X POST localhost:9093/-/reload -------------------------------------------------------------------------------- /prometheus/deploy.dev/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | cd `dirname $0` 4 | 5 | export PG_MONITOR_USER=pgexporter PG_MONITOR_PASS=pgexporter 6 | export PG_URI=${PG_HOST}:${PG_PORT} 7 | export GP_URI=${GP_HOST}:${GP_PORT} 8 | 9 | docker stack deploy -c docker-compose-prometheus.yml prometheus 10 | 11 | sleep 10 12 | sh reload.sh 13 | -------------------------------------------------------------------------------- /prometheus/proxy-webhook/.gitignore: -------------------------------------------------------------------------------- 1 | bin -------------------------------------------------------------------------------- /prometheus/proxy-webhook/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.inventec/proxy/alpine:3.18 2 | 3 | WORKDIR /app 4 | 5 | COPY bin/proxy-webhook /app/proxy-webhook 6 | 7 | ENTRYPOINT ["/app/proxy-webhook"] 8 | -------------------------------------------------------------------------------- /prometheus/proxy-webhook/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Go HTTP Proxy with Docker build/push 2 | 3 | APP_NAME := proxy-webhook 4 | BUILD_DIR := bin 5 | SRC := main.go 6 | DOCKER_IMAGE := registry.inventec/infra/$(APP_NAME) 7 | TAG ?= latest 8 | 9 | .PHONY: all build run clean docker-build docker-push 10 | 11 | all: build 12 | 13 | build: 14 | @echo "Building $(APP_NAME)..." 15 | @mkdir -p $(BUILD_DIR) 16 | @CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o $(BUILD_DIR)/$(APP_NAME) $(SRC) 17 | @echo "Build complete: $(BUILD_DIR)/$(APP_NAME)" 18 | 19 | run: build 20 | @echo "Running $(APP_NAME)..." 21 | @./$(BUILD_DIR)/$(APP_NAME) 22 | 23 | clean: 24 | @echo "Cleaning build artifacts..." 25 | @rm -rf $(BUILD_DIR) 26 | @echo "Clean complete." 27 | 28 | docker-build: build 29 | @echo "Building Docker image $(DOCKER_IMAGE):$(TAG)..." 30 | @docker build -t $(DOCKER_IMAGE):$(TAG) . 31 | 32 | docker-push: docker-build 33 | @echo "Pushing Docker image $(DOCKER_IMAGE):$(TAG)..." 34 | @docker push $(DOCKER_IMAGE):$(TAG) 35 | -------------------------------------------------------------------------------- /prometheus/proxy-webhook/README.md: -------------------------------------------------------------------------------- 1 | # Go HTTP 代理工具 2 | 3 | 一个简单的 HTTP 代理程序,支持: 4 | 5 | * 打印每个请求的详细内容(请求行、请求头、请求体) 6 | * 支持配置一个或多个下游目标地址,多目标采用轮询策略转发请求 7 | * 当未配置下游目标时,仅打印请求不转发 8 | 9 | --- 10 | 11 | ## 功能说明 12 | 13 | * 通过 `-target` 参数配置一个或多个下游服务地址 14 | * 通过 `-port` 参数自定义监听端口,默认 8080 15 | * 轮询策略实现多目标负载均衡(可轻松改为随机策略) 16 | * 请求和响应头及体完整转发,保持请求原样转发给目标 17 | 18 | --- 19 | 20 | ## 编译运行 21 | 22 | ### 依赖 23 | 24 | * Go 25 | 26 | ### 编译 27 | 28 | ```bash 29 | make build 30 | ``` 31 | 32 | ### 运行 33 | 34 | ```bash 35 | ./bin/proxy -target http://example1.com -target http://example2.com -port 8080 36 | ``` 37 | 38 | 示例: 39 | 40 | * 监听 8080 端口 41 | * 将请求轮询转发到 `http://example1.com` 和 `http://example2.com` 42 | 43 | --- 44 | 45 | ## 命令行参数 46 | 47 | | 参数 | 说明 | 默认值 | 48 | | --------- | ------------- | ---- | 49 | | `-target` | 下游目标地址(可多次指定) | 无 | 50 | | `-port` | 监听端口 | 8080 | 51 | 52 | --- 53 | 54 | ## 日志示例 55 | 56 | ```log 57 | ======= HTTP 请求开始 ======= 58 | GET /api/v1/test HTTP/1.1 59 | Host: localhost:8080 60 | User-Agent: curl/7.68.0 61 | Accept: */* 62 | 63 | 请求体内容(如果有) 64 | ======= HTTP 请求结束 ======= 65 | ``` 66 | 67 | --- 68 | 69 | ## Makefile 命令 70 | 71 | | 命令 | 说明 | 72 | | ------------ | ------- | 73 | | `make build` | 编译程序 | 74 | | `make run` | 编译并运行程序 | 75 | | `make clean` | 清理编译产物 | 76 | 77 | --- 78 | 79 | 如有任何问题,欢迎提 issue 或联系作者。 80 | -------------------------------------------------------------------------------- /prometheus/proxy-webhook/go.mod: -------------------------------------------------------------------------------- 1 | module proxy-webhook 2 | 3 | go 1.24.2 4 | 5 | require ( 6 | github.com/prometheus/alertmanager v0.28.1 7 | github.com/prometheus/common v0.64.0 8 | ) 9 | 10 | require ( 11 | github.com/beorn7/perks v1.0.1 // indirect 12 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 13 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 14 | github.com/prometheus/client_golang v1.20.5 // indirect 15 | github.com/prometheus/client_model v0.6.2 // indirect 16 | github.com/prometheus/procfs v0.15.1 // indirect 17 | github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c // indirect 18 | github.com/shurcooL/vfsgen v0.0.0-20230704071429-0000e147ea92 // indirect 19 | golang.org/x/sys v0.33.0 // indirect 20 | golang.org/x/text v0.25.0 // indirect 21 | google.golang.org/protobuf v1.36.6 // indirect 22 | ) 23 | -------------------------------------------------------------------------------- /rabbitmq/connect/.gitignore: -------------------------------------------------------------------------------- 1 | config/prod-* 2 | config/__pycache__ -------------------------------------------------------------------------------- /rabbitmq/connect/config/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /rabbitmq/connect/config/dev.py: -------------------------------------------------------------------------------- 1 | KafkaConnectEndpoint = '' 2 | 3 | RabbitMQSrv = { 4 | 'NXT': [ 5 | ('192.167.1.111', 'nxt-s44b'), 6 | ('192.167.1.112', 'nxt-s44a'), 7 | ], 8 | } 9 | 10 | RabbitMQOptions = { 11 | 'NXT': { 12 | 'vhost': 'nxt', 13 | 'username': '', 14 | 'password': '', 15 | 'exchange': 'exchange', 16 | 'queue': '', 17 | 'routing_keys': ['NXT.#'], 18 | 'ttl': 1800000, 19 | 'topic': '', 20 | 'port': 5672 21 | }, 22 | } 23 | -------------------------------------------------------------------------------- /rabbitmq/connect/requirements.txt: -------------------------------------------------------------------------------- 1 | pika==1.1.0 2 | python-nginx==1.5.4 3 | requests==2.32.4 -------------------------------------------------------------------------------- /repomanager/.gitignore: -------------------------------------------------------------------------------- 1 | cert 2 | certs -------------------------------------------------------------------------------- /repomanager/conf/nginx/verdaccio.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | listen [::]:80; 4 | server_name npm.itc.inventec; 5 | 6 | # Redirect all HTTP requests to HTTPS 7 | return 301 https://npm.itc.inventec.net$request_uri; 8 | } 9 | 10 | server { 11 | listen 80; 12 | listen [::]:80; 13 | server_name npm.itc.inventec.net; 14 | 15 | # Redirect all HTTP requests to HTTPS 16 | return 301 https://$host$request_uri; 17 | } 18 | 19 | server { 20 | listen 443 ssl; 21 | listen [::]:443 ssl; 22 | server_name npm.itc.inventec.net; 23 | 24 | ssl_certificate /etc/nginx/ssl/11268666_itc.inventec.net_nginx/itc.inventec.net.pem; 25 | ssl_certificate_key /etc/nginx/ssl/11268666_itc.inventec.net_nginx/itc.inventec.net.key; 26 | ssl_protocols TLSv1.2 TLSv1.3; 27 | ssl_ciphers HIGH:!aNULL:!MD5; 28 | 29 | client_max_body_size 100m; 30 | client_body_buffer_size 2048k; 31 | 32 | location / { 33 | proxy_pass http://verdaccio:4873; 34 | 35 | # 连接超时时间 36 | proxy_connect_timeout 5s; 37 | # 响应超时时间 38 | proxy_read_timeout 1200s; 39 | 40 | proxy_redirect off; 41 | 42 | # 重置host头 43 | proxy_set_header Host $http_host; 44 | proxy_set_header X-Real-IP $remote_addr; 45 | # 后端的Web服务器可以通过X-Forwarded-For获取用户真实IP 46 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 47 | } 48 | 49 | error_page 500 502 503 504 /50x.html; 50 | location = /50x.html { 51 | root /usr/share/nginx/html; 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /repomanager/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | 4 | verdaccio: 5 | image: verdaccio/verdaccio:5.33.0 6 | container_name: verdaccio 7 | hostname: verdaccio 8 | volumes: 9 | - "/opt/verdaccio/conf:/verdaccio/conf" 10 | - "/opt/verdaccio/storage:/verdaccio/storage" 11 | restart: always 12 | environment: 13 | VERDACCIO_PUBLIC_URL: "https://npm.itc.inventec.net" 14 | 15 | nexus3: 16 | image: sonatype/nexus3:3.69.0 17 | container_name: nexus3 18 | hostname: nexus3 19 | volumes: 20 | - "/data/nexus-data:/nexus-data" 21 | environment: 22 | INSTALL4J_ADD_VM_PARAMS: "-Xms1024m -Xmx2703m -XX:MaxDirectMemorySize=2703m -Djava.util.prefs.userRoot=/nexus-data/javaprefs" 23 | restart: always 24 | 25 | nginx: 26 | image: nginx:1.24-alpine 27 | container_name: nginx 28 | hostname: nginx 29 | ports: 30 | - "80:80" 31 | - "443:443" 32 | volumes: 33 | - "./conf/nginx:/etc/nginx/conf.d" 34 | - "./conf/cert:/etc/nginx/ssl" 35 | restart: always 36 | depends_on: 37 | - verdaccio 38 | - nexus3 39 | -------------------------------------------------------------------------------- /repomanager/start.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | cd `dirname $0` 4 | 5 | # sudo mkdir -p storage 6 | # sudo chown -R 10001:root storage 7 | 8 | docker-compose up -d 9 | -------------------------------------------------------------------------------- /samba/docker-compose-client.yml: -------------------------------------------------------------------------------- 1 | services: 2 | 3 | samba-test: 4 | image: registry.inventec/development/samba-test:latest 5 | container_name: samba-test 6 | hostname: samba-test 7 | ports: 8 | - "8080:8080" 9 | # volumes: 10 | # - /data/vscode:/home/root/.vscode 11 | restart: always 12 | # cpu_count: 2 13 | # mem_limit: 4g 14 | networks: 15 | - net 16 | 17 | networks: 18 | net: 19 | external: true 20 | name: infra 21 | -------------------------------------------------------------------------------- /samba/docker-compose-server.yml: -------------------------------------------------------------------------------- 1 | services: 2 | 3 | samba: 4 | image: dperson/samba:latest 5 | container_name: samba 6 | hostname: samba 7 | ports: 8 | - "445:445/tcp" 9 | - "139:139/tcp" 10 | volumes: 11 | - ./volume/res:/share:rw 12 | environment: 13 | TZ: 'Asia/Shanghai' 14 | command: '-s "public;/share;yes;no;no;dev" -u "dev;111111" -S' 15 | restart: always 16 | cpu_count: 2 17 | mem_limit: 4g 18 | networks: 19 | - net 20 | 21 | networks: 22 | net: 23 | external: true 24 | name: infra 25 | 26 | # Ref: https://github.com/miketeo/pysmb/issues/95#issuecomment-535349556 27 | # command: '-s "public;/share;yes;no;no;dev" -u "dev;111111" -g "server min protocol = LANMAN1"' 28 | -------------------------------------------------------------------------------- /samba/test/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.inventec/proxy/python:3.11-alpine 2 | LABEL maintainer="Zhang.Xing-Long@inventec.com" 3 | 4 | USER root 5 | 6 | # RUN cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \ 7 | # && echo 'Asia/Shanghai' > /etc/timezone \ 8 | # && yum install -y net-tools openssh-server \ 9 | # && yum clean all \ 10 | # && echo root:111111 | chpasswd \ 11 | # && ssh-keygen -t rsa -N '' -f /etc/ssh/ssh_host_rsa_key \ 12 | # && ssh-keygen -t ecdsa -N '' -f /etc/ssh/ssh_host_ecdsa_key \ 13 | # && ssh-keygen -t ed25519 -N '' -f /etc/ssh/ssh_host_ed25519_key 14 | 15 | RUN pip install --no-cache-dir --upgrade pip -i https://nexus.itc.inventec.net/repository/pypi-proxy/simple/ 16 | 17 | WORKDIR /usr/src/app 18 | 19 | COPY src/requirements.txt . 20 | RUN pip install --no-cache-dir -r requirements.txt -i https://nexus.itc.inventec.net/repository/pypi-proxy/simple/ 21 | 22 | COPY src . 23 | 24 | COPY entrypoint.sh . 25 | RUN chmod +x entrypoint.sh 26 | 27 | ENTRYPOINT [ "./entrypoint.sh" ] 28 | -------------------------------------------------------------------------------- /samba/test/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | cd `dirname $0` 4 | 5 | REGISTRY=registry.inventec 6 | 7 | docker build --rm -f Dockerfile -t ${REGISTRY}/development/samba-test:latest . 8 | docker push ${REGISTRY}/development/samba-test:latest 9 | -------------------------------------------------------------------------------- /samba/test/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # set -e 3 | 4 | # 启动SSH 5 | # /usr/sbin/sshd 6 | 7 | python main.py 8 | -------------------------------------------------------------------------------- /samba/test/src/.gitignore: -------------------------------------------------------------------------------- 1 | *.yaml 2 | downloads -------------------------------------------------------------------------------- /samba/test/src/requirements.txt: -------------------------------------------------------------------------------- 1 | pysmb 2 | Flask -------------------------------------------------------------------------------- /spark/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | . ./init.sh 3 | . ./clean.sh 4 | 5 | cd build 6 | docker build --rm -f Dockerfile -t ${REGISTRY}/${TAGNAME} --build-arg http_proxy=${PROXY} --build-arg https_proxy=${PROXY} . 7 | docker push ${REGISTRY}/$TAGNAME -------------------------------------------------------------------------------- /spark/build/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM maven:3.6-openjdk-8 as maven 2 | WORKDIR /app 3 | COPY pom.xml . 4 | RUN mvn dependency:copy-dependencies -DoutputDirectory=jars 5 | 6 | FROM centos:7 7 | LABEL maintainer="Zhang.Xing-Long@inventec.com" 8 | 9 | ARG TMP_DIR=/tmp/spark 10 | COPY pkgs/* ${TMP_DIR}/ 11 | 12 | WORKDIR /opt/spark 13 | 14 | RUN cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \ 15 | && echo 'Asia/Shanghai' > /etc/timezone \ 16 | # && mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup \ 17 | # && curl -so /etc/yum.repos.d/CentOS-Base.repo http://mirrors.163.com/.help/CentOS7-Base-163.repo \ 18 | && yum install -y net-tools java-1.8.0-openjdk \ 19 | && yum clean all \ 20 | && tar -zxvf ${TMP_DIR}/spark-2.3.3-bin-hadoop2.7.tgz -C . --strip-components=1 \ 21 | && rm -rf ${TMP_DIR} 22 | 23 | ENV LANG=en_US.UTF-8 \ 24 | SPARK_HOME=/opt/spark \ 25 | PATH=${PATH}:${SPARK_HOME}/bin 26 | 27 | COPY entrypoint.sh . 28 | COPY conf/* ./conf/ 29 | COPY --from=maven /app/jars/* ./jars/ 30 | 31 | EXPOSE 4040 6066 7077 8080 8081 32 | ENTRYPOINT [ "./entrypoint.sh" ] 33 | CMD ["/bin/bash"] 34 | -------------------------------------------------------------------------------- /spark/build/conf/log4j.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed to the Apache Software Foundation (ASF) under one or more 3 | # contributor license agreements. See the NOTICE file distributed with 4 | # this work for additional information regarding copyright ownership. 5 | # The ASF licenses this file to You under the Apache License, Version 2.0 6 | # (the "License"); you may not use this file except in compliance with 7 | # the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | # Set everything to be logged to the console 19 | log4j.rootCategory=WARN, console 20 | log4j.appender.console=org.apache.log4j.ConsoleAppender 21 | log4j.appender.console.target=System.err 22 | log4j.appender.console.layout=org.apache.log4j.PatternLayout 23 | log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n 24 | 25 | # Set the default spark-shell log level to WARN. When running the spark-shell, the 26 | # log level for this class is used to overwrite the root logger's log level, so that 27 | # the user can have different defaults for the shell and regular Spark apps. 28 | log4j.logger.org.apache.spark.repl.Main=WARN 29 | 30 | # Settings to quiet third party logs that are too verbose 31 | log4j.logger.org.spark_project.jetty=WARN 32 | log4j.logger.org.spark_project.jetty.util.component.AbstractLifeCycle=ERROR 33 | log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO 34 | log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO 35 | log4j.logger.org.apache.parquet=ERROR 36 | log4j.logger.parquet=ERROR 37 | 38 | # SPARK-9183: Settings to avoid annoying messages when looking up nonexistent UDFs in SparkSQL with Hive support 39 | log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=FATAL 40 | log4j.logger.org.apache.hadoop.hive.ql.exec.FunctionRegistry=ERROR 41 | -------------------------------------------------------------------------------- /spark/build/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | echo "NODE is: `hostname`" 5 | if [ `hostname` == "sparkmaster" ];then 6 | ./sbin/start-master.sh 7 | ./sbin/start-history-server.sh 8 | else 9 | ./sbin/start-slave.sh spark://sparkmaster:7077 10 | fi 11 | 12 | tail -f /opt/spark/logs/* 13 | -------------------------------------------------------------------------------- /spark/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | . ./init.sh 3 | 4 | # 停止运行中的容器 5 | querynum1=`docker ps | grep $TAGNAME | awk '{print $1}' | wc -l` 6 | if [ $querynum1 -gt 0 ]; then 7 | docker stop $(docker ps | grep $TAGNAME | awk '{print $1}') 8 | fi 9 | 10 | # This will remove: 11 | # - all stopped containers 12 | # - all networks not used by at least one container 13 | # - all dangling images 14 | # - all build cache 15 | docker system prune --force -------------------------------------------------------------------------------- /spark/cron.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | . ./init.sh 3 | 4 | # INVENTORY_FILE=../inventory.dev 5 | INVENTORY_FILE=../inventory.prod 6 | 7 | # 传出配置文件 8 | ansible -i $INVENTORY_FILE spark-master -m cron -a 'name="pi" state=absent' 9 | ansible -i $INVENTORY_FILE spark-master -m cron -a 'name="pi" minute=0 hour=*/2 job="/opt/sparkv2/submit.sh >> /opt/sparkv2/logs/jobs.log 2>&1"' -------------------------------------------------------------------------------- /spark/deploy.dev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | . ./init.sh 3 | 4 | REGISTRY=registry.inventec 5 | 6 | INVENTORY_FILE=../inventory.dev 7 | 8 | # 传出配置文件 9 | ansible -i $INVENTORY_FILE spark-master -m copy -a "src=deploy.dev/ dest=/opt/spark" -b 10 | # ansible -i $INVENTORY_FILE spark -m file -a "dest=/data/ssd0/spark mode=777 state=directory" -f 5 -b 11 | # ansible -i $INVENTORY_FILE spark -m file -a "dest=/data/ssd0/spark/spark-events mode=777 state=directory" -f 5 -b 12 | ansible -i $INVENTORY_FILE spark-slave -m file -a "dest=/data/ssd0/spark/work mode=777 state=directory" -f 5 -b 13 | 14 | # 执行启动命令 15 | ansible -i $INVENTORY_FILE sparkmaster -m command -a "/opt/spark/start.sh ${REGISTRY} ${TAGNAME}" -b -------------------------------------------------------------------------------- /spark/deploy.dev/docker-compose-master.yml: -------------------------------------------------------------------------------- 1 | version: "3.3" 2 | services: 3 | 4 | sparkmaster: 5 | image: ${REGISTRY}/${TAGNAME} 6 | hostname: sparkmaster 7 | volumes: 8 | - /opt/spark/tasks:/opt/tasks 9 | - /data/ssd0/spark/spark-events:/tmp/spark-events 10 | environment: 11 | SPARK_PUBLIC_DNS: 10.99.170.13 12 | SPARK_MASTER_WEBUI_PORT: 18081 13 | SPARK_LOCAL_DIRS: /tmp 14 | networks: 15 | hostnet: {} 16 | extra_hosts: 17 | - "sparkmaster:10.99.170.13" 18 | - "sparkw1:10.99.170.14" 19 | - "sparkw2:10.99.170.15" 20 | deploy: 21 | restart_policy: 22 | condition: on-failure 23 | placement: 24 | constraints: 25 | - node.labels.alias == bdc03.infra.prod.f3.itc.inventec 26 | resources: 27 | limits: 28 | cpus: "8" 29 | memory: 8g 30 | 31 | networks: 32 | hostnet: 33 | external: 34 | name: host -------------------------------------------------------------------------------- /spark/deploy.dev/docker-compose-worker.yml: -------------------------------------------------------------------------------- 1 | version: "3.3" 2 | services: 3 | 4 | sparkw1: 5 | image: ${REGISTRY}/${TAGNAME} 6 | hostname: sparkw1 7 | volumes: 8 | - /data/ssd0/spark/spark-events:/tmp/spark-events 9 | - /data/ssd0/spark/work:/opt/spark/work 10 | environment: 11 | SPARK_WORKER_CORES: 20 12 | SPARK_WORKER_MEMORY: 20g 13 | SPARK_PUBLIC_DNS: 10.99.170.14 14 | SPARK_WORKER_WEBUI_PORT: 18081 15 | SPARK_LOCAL_DIRS: /tmp 16 | networks: 17 | hostnet: {} 18 | extra_hosts: 19 | - "sparkmaster:10.99.170.13" 20 | - "sparkw1:10.99.170.14" 21 | - "sparkw2:10.99.170.15" 22 | deploy: 23 | restart_policy: 24 | condition: on-failure 25 | placement: 26 | constraints: 27 | - node.labels.alias == bdc04.infra.prod.f3.itc.inventec 28 | resources: 29 | limits: 30 | cpus: "24" 31 | memory: 24g 32 | 33 | sparkw2: 34 | image: ${REGISTRY}/${TAGNAME} 35 | hostname: sparkw2 36 | volumes: 37 | - /data/ssd0/spark/spark-events:/tmp/spark-events 38 | - /data/ssd0/spark/work:/opt/spark/work 39 | environment: 40 | SPARK_WORKER_CORES: 20 41 | SPARK_WORKER_MEMORY: 20g 42 | SPARK_PUBLIC_DNS: 10.99.170.15 43 | SPARK_WORKER_WEBUI_PORT: 18081 44 | SPARK_LOCAL_DIRS: /tmp 45 | networks: 46 | hostnet: {} 47 | extra_hosts: 48 | - "sparkmaster:10.99.170.13" 49 | - "sparkw1:10.99.170.14" 50 | - "sparkw2:10.99.170.15" 51 | deploy: 52 | restart_policy: 53 | condition: on-failure 54 | placement: 55 | constraints: 56 | - node.labels.alias == bdc05.infra.prod.f3.itc.inventec 57 | resources: 58 | limits: 59 | cpus: "24" 60 | memory: 24g 61 | 62 | networks: 63 | hostnet: 64 | external: 65 | name: host -------------------------------------------------------------------------------- /spark/deploy.dev/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | cd `dirname $0` 5 | 6 | echo "启动参数: $*" 7 | export REGISTRY="$1" 8 | export TAGNAME="$2" 9 | # export EXTERNAL_IP=`hostname -I | cut -d " " -f 1` 10 | 11 | docker stack deploy -c docker-compose-master.yml spark_master 12 | sleep 20 13 | docker stack deploy -c docker-compose-worker.yml spark_worker -------------------------------------------------------------------------------- /spark/deploy.dev/submit.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | echo "【`date`】" 5 | 6 | CURR_DIR=`dirname $0` 7 | 8 | echo "Getting container ID of the Spark master..." 9 | CONTAINERID=`docker ps --filter name=sparkm --format "{{.ID}}"` 10 | 11 | echo "Running Spark job..." 12 | docker exec ${CONTAINERID} /opt/spark/bin/spark-submit --master spark://sparkmaster:7077 --total-executor-cores 4 --verbose /opt/tasks/python/pi.py -------------------------------------------------------------------------------- /spark/deploy.dev/tasks/python/pi.py: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed to the Apache Software Foundation (ASF) under one or more 3 | # contributor license agreements. See the NOTICE file distributed with 4 | # this work for additional information regarding copyright ownership. 5 | # The ASF licenses this file to You under the Apache License, Version 2.0 6 | # (the "License"); you may not use this file except in compliance with 7 | # the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | from __future__ import print_function 19 | 20 | import sys 21 | from random import random 22 | from operator import add 23 | 24 | from pyspark.sql import SparkSession 25 | 26 | 27 | if __name__ == "__main__": 28 | """ 29 | Usage: pi [partitions] 30 | """ 31 | spark = SparkSession\ 32 | .builder\ 33 | .appName("PythonPi")\ 34 | .getOrCreate() 35 | 36 | spark.sparkContext.setLogLevel("INFO") 37 | 38 | partitions = int(sys.argv[1]) if len(sys.argv) > 1 else 20 39 | n = 100000 * partitions 40 | 41 | def f(_): 42 | x = random() * 2 - 1 43 | y = random() * 2 - 1 44 | return 1 if x ** 2 + y ** 2 <= 1 else 0 45 | 46 | count = spark.sparkContext.parallelize(range(1, n + 1), partitions * 6).map(f).reduce(add) 47 | print("Pi is roughly %f" % (4.0 * count / n)) 48 | 49 | spark.stop() 50 | -------------------------------------------------------------------------------- /spark/init.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | REGISTRY=harbor.inventec.com 3 | 4 | if [ -n "$1" ]; then 5 | BRANCH=$1 6 | else 7 | # BRANCH=2.3.0 8 | BRANCH=2.3.3 9 | fi 10 | TAGNAME=development/spark:${BRANCH} 11 | 12 | PROXY=http://10.190.40.39:18118/ -------------------------------------------------------------------------------- /streampark/bin/.env: -------------------------------------------------------------------------------- 1 | TZ=Asia/Shanghai 2 | 3 | SPRING_PROFILES_ACTIVE=h2 #mysql, pgsql 4 | #SPRING_PROFILES_ACTIVE=pgsql 5 | # If use mysql or pgsql, please set the following parameters 6 | #SPRING_DATASOURCE_URL=jdbc:mysql://localhost:3306/streampark?useSSL=false&useUnicode=true&characterEncoding=UTF-8&allowPublicKeyRetrieval=false&useJDBCCompliantTimezoneShift=true&useLegacyDatetimeCode=false&serverTimezone=GMT%2B8 7 | #SPRING_DATASOURCE_URL=jdbc:postgresql://localhost:5432/streampark?stringtype=unspecified 8 | #SPRING_DATASOURCE_USERNAME= 9 | #SPRING_DATASOURCE_PASSWORD= 10 | 11 | RUN_COMMAND='/bin/sh -c "bash bin/streampark.sh start_docker "' 12 | -------------------------------------------------------------------------------- /streampark/bin/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | services: 3 | streampark-console: 4 | image: registry.inventec/infra/streampark:v2.1.2 5 | command: ${RUN_COMMAND} 6 | container_name: streampark 7 | ports: 8 | - 10000:10000 9 | - 10030:10030 10 | env_file: .env 11 | volumes: 12 | - /var/run/docker.sock:/var/run/docker.sock 13 | - ~/.kube:/root/.kube:ro 14 | - ./streampark_workspace:/opt/streampark_workspace 15 | privileged: true 16 | restart: unless-stopped 17 | networks: 18 | - streampark 19 | 20 | networks: 21 | streampark: 22 | driver: bridge 23 | -------------------------------------------------------------------------------- /streampark/build/Dockerfile: -------------------------------------------------------------------------------- 1 | # 一定要顶头申明 2 | ARG STREAMPARK_VERSION=dev 3 | 4 | FROM registry.inventec/proxy/library/flink:1.12.7-scala_2.12 AS flink1.12.7-builder 5 | FROM registry.inventec/proxy/library/flink:1.13.6-scala_2.12 AS flink1.13.6-builder 6 | FROM registry.inventec/proxy/library/flink:1.14.6-scala_2.12 AS flink1.14.6-builder 7 | FROM registry.inventec/proxy/library/flink:1.15.4-scala_2.12 AS flink1.15.4-builder 8 | FROM registry.inventec/proxy/library/flink:1.16.3-scala_2.12 AS flink1.16.3-builder 9 | FROM registry.inventec/proxy/library/flink:1.17.2-scala_2.12 AS flink1.17.2-builder 10 | 11 | FROM registry.inventec/proxy/apache/streampark:$STREAMPARK_VERSION 12 | 13 | COPY --from=flink1.12.7-builder /opt/flink /opt/flink/flink1.12/ 14 | COPY --from=flink1.13.6-builder /opt/flink /opt/flink/flink1.13/ 15 | COPY --from=flink1.14.6-builder /opt/flink /opt/flink/flink1.14/ 16 | COPY --from=flink1.15.4-builder /opt/flink /opt/flink/flink1.15/ 17 | COPY --from=flink1.16.3-builder /opt/flink /opt/flink/flink1.16/ 18 | COPY --from=flink1.17.2-builder /opt/flink /opt/flink/flink1.17/ 19 | -------------------------------------------------------------------------------- /streampark/build/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | cd `dirname $0` 4 | 5 | REGISTRY=registry.inventec 6 | STREAMPARK_VERSION=v2.1.2 7 | 8 | docker build --rm -f Dockerfile \ 9 | -t ${REGISTRY}/infra/streampark:${STREAMPARK_VERSION} \ 10 | --build-arg STREAMPARK_VERSION=${STREAMPARK_VERSION} \ 11 | . 12 | docker push ${REGISTRY}/infra/streampark:${STREAMPARK_VERSION} 13 | -------------------------------------------------------------------------------- /telegraf/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG GOLANG_VERSION 2 | ARG TELEGRAF_VERSION 3 | 4 | FROM registry.inventec/proxy/golang:${GOLANG_VERSION} as builder 5 | 6 | ENV GOPROXY=http://nexus.itc.inventec.net/repository/go-proxy/,https://goproxy.cn,https://goproxy.io,direct 7 | 8 | WORKDIR /go/src 9 | 10 | ARG TELEGRAF_VERSION 11 | 12 | RUN git clone --depth 1 --branch v${TELEGRAF_VERSION} https://github.com/influxdata/telegraf.git && \ 13 | cd telegraf && make build_tools 14 | 15 | COPY config /opt/telegraf/config 16 | COPY patch /opt/telegraf/patch 17 | 18 | RUN cd telegraf && \ 19 | git apply /opt/telegraf/patch/*.patch && \ 20 | ./tools/custom_builder/custom_builder -config-dir /opt/telegraf/config 21 | 22 | FROM registry.inventec/proxy/telegraf:${TELEGRAF_VERSION}-alpine 23 | 24 | COPY --from=builder /go/src/telegraf/telegraf /usr/bin/telegraf 25 | -------------------------------------------------------------------------------- /telegraf/Makefile: -------------------------------------------------------------------------------- 1 | # Image and version configuration 2 | IMAGE_NAME=registry.inventec/infra/telegraf 3 | GOLANG_VERSION=1.24.4 4 | TELEGRAF_VERSION=1.35.1 5 | 6 | # Proxy settings 7 | HTTP_PROXY=http://10.190.81.209:3389 8 | NO_PROXY=*.inventec.net 9 | 10 | # Derived variables 11 | IMAGE_TAG=${IMAGE_NAME}:${TELEGRAF_VERSION}-lite-alpine 12 | 13 | .PHONY: all docker-build docker-push 14 | 15 | all: docker-build 16 | 17 | docker-build: 18 | @echo "Building Docker image ${IMAGE_TAG}..." 19 | docker build --rm -t ${IMAGE_TAG} \ 20 | --build-arg http_proxy=${HTTP_PROXY} \ 21 | --build-arg https_proxy=${HTTP_PROXY} \ 22 | --build-arg no_proxy=${NO_PROXY} \ 23 | --build-arg GOLANG_VERSION=${GOLANG_VERSION} \ 24 | --build-arg TELEGRAF_VERSION=${TELEGRAF_VERSION} \ 25 | . 26 | 27 | docker-push: docker-build 28 | @echo "Pushing Docker image ${IMAGE_TAG}..." 29 | @docker push ${IMAGE_TAG} 30 | -------------------------------------------------------------------------------- /tensorflow/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tensorflow/tensorflow:1.13.1-gpu-py3-jupyter 2 | LABEL maintainer="Zhang.Xing-Long@inventec.com" 3 | 4 | USER root 5 | 6 | # Ref: https://docs.docker.com/engine/examples/running_ssh_service/ 7 | RUN apt-get update \ 8 | && apt-get install -y net-tools openssh-server \ 9 | && mkdir /var/run/sshd \ 10 | && sed -i 's/PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config \ 11 | && sed -i 's/#PermitRootLogin yes/PermitRootLogin yes/' /etc/ssh/sshd_config \ 12 | # SSH login fix. Otherwise user is kicked off after login 13 | && sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd \ 14 | && rm -rf /var/lib/apt/lists/* \ 15 | && echo root:111111 | chpasswd \ 16 | && pip install --upgrade pip 17 | 18 | WORKDIR /usr/src/app 19 | 20 | COPY requirements.txt . 21 | RUN pip install --no-cache-dir -r requirements.txt 22 | 23 | COPY . . 24 | RUN chmod +x entrypoint.sh 25 | 26 | ENTRYPOINT [ "./entrypoint.sh" ] 27 | -------------------------------------------------------------------------------- /tensorflow/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | cd `dirname $0` 4 | 5 | REGISTRY=registry.inventec 6 | 7 | docker build --rm -f Dockerfile -t ${REGISTRY}/development/tensorflow:1.13.1-gpu-py3-jupyter . 8 | -------------------------------------------------------------------------------- /tensorflow/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # set -e 3 | 4 | # 启动SSH 5 | /usr/sbin/sshd -D 6 | -------------------------------------------------------------------------------- /toolbox/README.md: -------------------------------------------------------------------------------- 1 | - go profiling 2 | 3 | ```bash 4 | docker run -it --rm --net host registry.inventec/infra/debugger /bin/bash 5 | go tool pprof -http :{port} http://localhost:6060/debug/pprof/heap 6 | ``` 7 | 8 | - PG 备份 9 | 10 | ```bash 11 | docker run -it --rm registry.inventec/infra/debugger pg_dump --version 12 | ``` 13 | 14 | - mat 15 | 16 | ```bash 17 | docker run -it --rm -v $(pwd):/dump registry.inventec/infra/debugger \ 18 | /opt/mat/ParseHeapDump.sh \ 19 | /dump/ org.eclipse.mat.api:top_components org.eclipse.mat.api:suspects org.eclipse.mat.api:overview 20 | ``` 21 | 22 | - smartctl 23 | 24 | ```bash 25 | docker run -it --rm -v /dev:/dev --privileged registry.inventec/infra/debugger /smart_report.sh 26 | ``` 27 | 28 | - telnet 29 | 30 | > 如果线上容器并没有打包一些常见的网络测试工具 31 | 32 | ```bash 33 | docker run -it --rm --net container:容器名或者id registry.inventec/infra/debugger /bin/bash 34 | ``` -------------------------------------------------------------------------------- /toolbox/bcc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker run -it --rm \ 4 | --privileged \ 5 | -v /lib/modules:/lib/modules:ro \ 6 | -v /usr/src:/usr/src:ro \ 7 | -v /etc/localtime:/etc/localtime:ro \ 8 | --workdir /usr/share/bcc/tools \ 9 | zlim/bcc -------------------------------------------------------------------------------- /toolbox/build.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | cd `dirname $0` 3 | 4 | PROXY=http://10.190.81.209:3389/ 5 | VERSION=latest 6 | GO_VERSION=1.24.2 7 | MAT_VERSION=1.16.1.20250109 8 | 9 | docker build --rm -f Dockerfile \ 10 | -t registry.inventec/infra/debugger:${VERSION} \ 11 | --build-arg http_proxy=${PROXY} \ 12 | --build-arg https_proxy=${PROXY} \ 13 | --build-arg GO_VERSION=${GO_VERSION} \ 14 | --build-arg MAT_VERSION=${MAT_VERSION} \ 15 | . 16 | -------------------------------------------------------------------------------- /toolbox/smart_report.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 获取所有控制器的 Slot 编号 4 | slots=$(ssacli ctrl all show | awk '/Slot [0-9]+/ { for (i=1; i<=NF; i++) if ($i == "Slot") print $(i+1) }') 5 | 6 | # 遍历每个 Slot 7 | for slot in $slots; do 8 | echo ">> 分析 Controller Slot: $slot" 9 | output=$(ssacli ctrl slot=$slot ld all show detail) 10 | 11 | echo "$output" | awk -v slot="$slot" ' 12 | /^ *Logical Drive:/ { 13 | drive = $3 14 | } 15 | /^ *Disk Name:/ { 16 | disk = $3 17 | printf("%s %s %s\n", slot, drive, disk); 18 | }' | while read -r slot drive device; do 19 | echo ">>> Slot: $slot, Logical Drive: $drive, Device: $device" 20 | smartctl -d cciss,"$drive" -a "$device" | grep -A 10 'ID# ATTRIBUTE_NAME.*RAW_VALUE' 21 | echo "------------------------------------------------------------" 22 | done 23 | done 24 | -------------------------------------------------------------------------------- /xampp/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:7 2 | 3 | ARG WORK_DIR=/opt/shujuguan/www/oc 4 | RUN mkdir -p $WORK_DIR 5 | WORKDIR $WORK_DIR 6 | 7 | # Build之前需要先下载依赖的包 8 | COPY ./pkgs/xampp-linux-x64-5.6.32-0-installer.run . 9 | COPY ./pkgs/redis-3.1.4.tgz . 10 | 11 | # 替换镜像源/安装基础工具/安装XAMPP/安装Redis扩展 12 | RUN cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \ 13 | && echo 'Asia/Shanghai' > /etc/timezone \ 14 | && mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup \ 15 | && curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.163.com/.help/CentOS7-Base-163.repo \ 16 | && rm -rf /var/cache/yum \ 17 | && yum install -y net-tools gcc autoconf automake libtool make \ 18 | && chmod +x ./xampp-linux-x64-5.6.32-0-installer.run \ 19 | && ./xampp-linux-x64-5.6.32-0-installer.run \ 20 | && rm -rf xampp-linux-x64-5.6.32-0-installer.run \ 21 | && tar -zxvf redis-3.1.4.tgz \ 22 | && cd redis-3.1.4/ \ 23 | && /opt/lampp/bin/phpize \ 24 | && ./configure --with-php-config=/opt/lampp/bin/php-config \ 25 | && make && make install \ 26 | && cd - && rm -rf redis-3.1.4* 27 | 28 | # 拷贝配置 29 | COPY ./etc/httpd.conf /opt/lampp/etc/httpd.conf 30 | COPY ./etc/php.ini /opt/lampp/etc/php.ini 31 | 32 | # 拷贝源码至容器中,但会自动忽略.dockerignore中指明的文件以及文件夹 33 | COPY ./www . 34 | RUN chmod +x run.sh 35 | 36 | EXPOSE 80 37 | 38 | ENTRYPOINT [ "./run.sh" ] -------------------------------------------------------------------------------- /xampp/build.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | REGISTRY=registry.com:5000 4 | NAME=binary:super 5 | 6 | docker build -t $NAME . 7 | docker tag $NAME ${REGISTRY}/$NAME 8 | docker push ${REGISTRY}/$NAME -------------------------------------------------------------------------------- /xampp/etc/httpd.conf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cobolbaby/dockerize-and-ansible/5baf74e2da7b3a93ca827ca75b072c498d578b20/xampp/etc/httpd.conf -------------------------------------------------------------------------------- /xampp/www/index.php: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 |

使用数组来模拟栈的各种操作

6 | top==$this->maxSize-1){ 18 | echo '
栈满,不能添加'; 19 | return; 20 | } 21 | 22 | $this->top++; 23 | $this->stack[$this->top]=$val; 24 | 25 | 26 | } 27 | 28 | //出栈的操作,就是把栈顶的值取出 29 | public function pop(){ 30 | 31 | //判断是否栈空 32 | if($this->top==-1){ 33 | echo '
栈空'; 34 | return; 35 | } 36 | 37 | //把栈顶的值,取出 38 | $topVal=$this->stack[$this->top]; 39 | $this->top--; 40 | return $topVal; 41 | 42 | } 43 | 44 | //显示栈的所有数据的方法. 45 | public function showStack(){ 46 | 47 | if($this->top==-1){ 48 | echo '
栈空'; 49 | return; 50 | } 51 | echo '
当前栈的情况是....'; 52 | for($i=$this->top;$i>-1;$i--){ 53 | echo '
stack['.$i.']='.$this->stack[$i]; 54 | } 55 | } 56 | } 57 | 58 | 59 | $mystack=new MyStack; 60 | $mystack->push('西瓜'); 61 | $mystack->push('香蕉'); 62 | $mystack->push('橘子'); 63 | $mystack->push('柚子'); 64 | $mystack->push('柚子x'); 65 | 66 | $mystack->showStack(); 67 | 68 | $val=$mystack->pop(); 69 | echo '
pop出栈了一个数据'.$val; 70 | $mystack->showStack(); 71 | 72 | $val=$mystack->pop(); 73 | echo '
pop出栈了一个数据'.$val; 74 | $mystack->showStack(); 75 | 76 | $val=$mystack->pop(); 77 | echo '
pop出栈了一个数据'.$val; 78 | $mystack->showStack(); 79 | 80 | $val=$mystack->pop(); 81 | echo '
pop出栈了一个数据'.$val; 82 | $mystack->showStack(); 83 | 84 | 85 | 86 | 87 | ?> 88 | 89 | 90 | 91 | -------------------------------------------------------------------------------- /xray/build/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG NGINX_VERSION 2 | 3 | FROM nginx:${NGINX_VERSION} 4 | 5 | ARG XRAY_VERSION 6 | 7 | RUN apt-get update && \ 8 | apt-get install -y \ 9 | procps net-tools curl wget unzip jq vim 10 | 11 | # 安装 TLS 证书 https://certbot.eff.org/instructions?ws=nginx&os=pip 12 | RUN apt-get update && \ 13 | apt-get install -y python3 python3-venv libaugeas0 && \ 14 | python3 -m venv /opt/certbot/ && \ 15 | /opt/certbot/bin/pip install --upgrade pip && \ 16 | /opt/certbot/bin/pip install certbot certbot-nginx && \ 17 | ln -s /opt/certbot/bin/certbot /usr/bin/certbot 18 | 19 | WORKDIR /opt/xray 20 | 21 | RUN wget -O x.zip https://github.com/XTLS/Xray-core/releases/download/${XRAY_VERSION}/Xray-linux-64.zip && \ 22 | unzip x.zip && rm -rf x.zip && \ 23 | chmod +x xray && \ 24 | mkdir -p /var/log/xray /usr/share/xray && \ 25 | wget -O /usr/share/xray/geosite.dat https://github.com/v2fly/domain-list-community/releases/latest/download/dlc.dat && \ 26 | wget -O /usr/share/xray/geoip.dat https://github.com/v2fly/geoip/releases/latest/download/geoip.dat 27 | 28 | COPY conf/xray/config_server.json /opt/xray/config.json 29 | COPY conf/nginx/* /etc/nginx/conf.d/ 30 | COPY entrypoint.sh / 31 | 32 | ENV PATH $PATH:/opt/xray 33 | 34 | ENTRYPOINT ["/entrypoint.sh"] 35 | -------------------------------------------------------------------------------- /xray/build/conf/nginx/example.org.conf.tmpl: -------------------------------------------------------------------------------- 1 | # HTTP server configuration 2 | server { 3 | listen 80; 4 | server_name example.org; 5 | server_tokens off; 6 | 7 | location /.well-known/acme-challenge/ { 8 | root /usr/share/nginx/html; 9 | } 10 | 11 | # Redirect HTTP requests to HTTPS 12 | location / { 13 | return 301 https://$host$request_uri; 14 | } 15 | } 16 | 17 | # HTTPS server configuration 18 | server { 19 | # listen 443 ssl; 20 | listen 8001; 21 | server_name example.org; 22 | server_tokens off; 23 | 24 | # SSL/TLS certificate and key 25 | # ssl_certificate /etc/nginx/ssl/live/example.org/fullchain.pem; 26 | # ssl_certificate_key /etc/nginx/ssl/live/example.org/privkey.pem; 27 | 28 | # SSL/TLS configuration 29 | ssl_protocols TLSv1.2 TLSv1.3; 30 | ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:HIGH:!aNULL:!MD5:!RC4:!DHE; 31 | ssl_prefer_server_ciphers on; 32 | ssl_session_cache shared:SSL:10m; 33 | ssl_session_timeout 5m; 34 | 35 | # Xray Core reverse proxy settings 36 | location / { 37 | root /usr/share/nginx/html; 38 | } 39 | 40 | location = /favicon.ico { 41 | log_not_found off; 42 | access_log off; 43 | } 44 | 45 | location = /robots.txt { 46 | deny all; 47 | log_not_found off; 48 | access_log off; 49 | } 50 | 51 | # 不允许访问隐藏文件例如 .htaccess, .htpasswd, .DS_Store (Mac). 52 | location ~ /\. { 53 | deny all; 54 | log_not_found off; 55 | access_log off; 56 | } 57 | } -------------------------------------------------------------------------------- /xray/build/conf/xray/config_client.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cobolbaby/dockerize-and-ansible/5baf74e2da7b3a93ca827ca75b072c498d578b20/xray/build/conf/xray/config_client.json -------------------------------------------------------------------------------- /xray/build/conf/xray/config_server.json: -------------------------------------------------------------------------------- 1 | { 2 | "log": { 3 | "loglevel": "warning" 4 | }, 5 | "routing": { 6 | "domainStrategy": "IPIfNonMatch", 7 | "rules": [ 8 | { 9 | "type": "field", 10 | "ip": [ 11 | "geoip:cn" 12 | ], 13 | "outboundTag": "block" 14 | } 15 | ] 16 | }, 17 | "inbounds": [ 18 | { 19 | "listen": "0.0.0.0", 20 | "port": 443, 21 | "protocol": "vless", 22 | "settings": { 23 | "clients": [ 24 | { 25 | "id": "", 26 | "flow": "xtls-rprx-vision" 27 | } 28 | ], 29 | "decryption": "none", 30 | "fallbacks": [ 31 | { 32 | "dest": "8001", 33 | "xver": 1 34 | } 35 | ] 36 | }, 37 | "streamSettings": { 38 | "network": "tcp", 39 | "security": "tls", 40 | "tlsSettings": { 41 | "rejectUnknownSni": true, 42 | "minVersion": "1.2", 43 | "certificates": [ 44 | { 45 | "ocspStapling": 3600, 46 | "certificateFile": "/etc/ssl/private/fullchain.cer", 47 | "keyFile": "/etc/ssl/private/private.key" 48 | } 49 | ] 50 | } 51 | }, 52 | "sniffing": { 53 | "enabled": true, 54 | "destOverride": [ 55 | "http", 56 | "tls" 57 | ] 58 | } 59 | } 60 | ], 61 | "outbounds": [ 62 | { 63 | "protocol": "freedom", 64 | "tag": "direct" 65 | }, 66 | { 67 | "protocol": "blackhole", 68 | "tag": "block" 69 | } 70 | ], 71 | "policy": { 72 | "levels": { 73 | "0": { 74 | "handshake": 2, 75 | "connIdle": 120 76 | } 77 | } 78 | } 79 | } -------------------------------------------------------------------------------- /xray/build/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Domain(s) that this server should serve 5 | CERTBOT_DOMAIN="${CERTBOT_DOMAIN:-cobolbaby.xyz}" 6 | 7 | # Email address to use when registering for Certbot 8 | CERTBOT_EMAIL="${CERTBOT_EMAIL:-cobolbaby@qq.com}" 9 | 10 | CERTBOT_SSL_CERT_PATH="/etc/letsencrypt/live/$CERTBOT_DOMAIN/fullchain.pem" 11 | CERTBOT_SSL_KEY_PATH="/etc/letsencrypt/live/$CERTBOT_DOMAIN/privkey.pem" 12 | 13 | # Path to the Nginx SSL configuration file 14 | NGINX_SSL_CONF="/etc/nginx/conf.d/${CERTBOT_DOMAIN}.conf" 15 | 16 | # Update the Nginx SSL configuration file with the paths to the SSL certificate and key 17 | cp /etc/nginx/conf.d/example.org.conf.tmpl $NGINX_SSL_CONF 18 | 19 | sed -i "s|example.org|$CERTBOT_DOMAIN|g" $NGINX_SSL_CONF 20 | 21 | # Start nginx in the background 22 | nginx 23 | 24 | sleep 5 25 | 26 | # Obtain the SSL certificate 27 | if [[ ! -f $CERTBOT_SSL_CERT_PATH || ! -f $CERTBOT_SSL_KEY_PATH ]]; then 28 | certbot certonly --non-interactive --agree-tos --email $CERTBOT_EMAIL --webroot --webroot-pat /usr/share/nginx/html -d $CERTBOT_DOMAIN 29 | fi 30 | 31 | # sed -i "s|# ssl_certificate .*|ssl_certificate $CERTBOT_SSL_CERT_PATH;|" $NGINX_SSL_CONF 32 | # sed -i "s|# ssl_certificate_key .*|ssl_certificate_key $CERTBOT_SSL_KEY_PATH;|" $NGINX_SSL_CONF 33 | 34 | # nginx -t && nginx -s reload 35 | 36 | # sleep 5 37 | 38 | # Start XRAY 39 | XRAY_CONF="/opt/xray/config.json" 40 | 41 | sed -i "s|\"id\": \"\"|\"id\": \"$(xray uuid)\"|" $XRAY_CONF 42 | sed -i "s|\"certificateFile\": \".*\"|\"certificateFile\": \"$CERTBOT_SSL_CERT_PATH\"|" $XRAY_CONF 43 | sed -i "s|\"keyFile\": \".*\"|\"keyFile\": \"$CERTBOT_SSL_KEY_PATH\"|" $XRAY_CONF 44 | 45 | xray run -config $XRAY_CONF 46 | -------------------------------------------------------------------------------- /xray/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | 4 | nginx: 5 | build: 6 | context: ./build 7 | dockerfile: Dockerfile 8 | args: 9 | NGINX_VERSION: 1.22 10 | XRAY_VERSION: v1.7.5 11 | container_name: nginx 12 | hostname: nginx 13 | ports: 14 | - "80:80" 15 | - "443:443" 16 | volumes: 17 | - ./certbot/www/:/usr/share/nginx/html/ 18 | - ./certbot/conf/:/etc/letsencrypt/ 19 | environment: 20 | - CERTBOT_DOMAIN=cobolbaby.xyz 21 | - CERTBOT_EMAIL=cobolbaby@qq.com 22 | restart: always 23 | -------------------------------------------------------------------------------- /xray/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker-compose up --build -d 4 | --------------------------------------------------------------------------------