├── roles ├── check_ports │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── vars │ │ └── main.yml │ ├── templates │ │ └── check_port.sh.j2 │ └── tasks │ │ └── main.yml ├── dingo │ ├── templates │ │ ├── jmx_config.yaml.j2 │ │ ├── application-proxy.yaml.j2 │ │ ├── client.yaml.j2 │ │ ├── application-proxy-dev.yaml.j2 │ │ ├── coordinator.yaml.j2 │ │ ├── stop-dingo-web.sh.j2 │ │ ├── start-web.sh.j2 │ │ ├── import.sh.j2 │ │ ├── stop-executor-proxy.sh.j2 │ │ ├── executor-noldap.yaml.j2 │ │ ├── application-web-dev.yaml.j2 │ │ ├── start-driver.sh.j2 │ │ ├── start-proxy.sh.j2 │ │ ├── application-web.yaml.j2 │ │ ├── executor.yaml.j2 │ │ ├── stop-all-component.sh.j2 │ │ ├── logback-driver.xml.j2 │ │ ├── logback-import.xml.j2 │ │ ├── logback-sqlline.xml.j2 │ │ ├── logback-coordinator.xml.j2 │ │ ├── start-executor.sh.j2 │ │ ├── logback.xml.j2 │ │ ├── logback-web.xml.j2 │ │ └── logback-proxy.xml.j2 │ ├── tasks │ │ ├── main.yml │ │ ├── 01_basic_command.yml │ │ ├── 03_start_roles_command.yml │ │ └── 02_update_configuration.yml │ └── defaults │ │ └── main.yml ├── nginx │ ├── templates │ │ ├── start.sh.j2 │ │ ├── reload.sh.j2 │ │ ├── stop.sh.j2 │ │ ├── systemd │ │ │ └── nginx.service.j2 │ │ ├── nginx.conf.j2 │ │ └── default.conf.j2 │ ├── tasks │ │ ├── 01_os_user.yml │ │ ├── main.yml │ │ ├── 05_startup.yml │ │ ├── 00_preflight.yml │ │ ├── 04_config.yml │ │ ├── 03_untar_monitor_web.yml │ │ └── 02_build.yml │ └── defaults │ │ └── main.yml ├── jdk │ ├── defaults │ │ └── main.yml │ └── tasks │ │ ├── 01_os_user.yml │ │ ├── main.yml │ │ ├── 03_config.yml │ │ └── 02_untar.yml ├── prometheus │ ├── templates │ │ ├── node.yml.j2 │ │ ├── process.yml.j2 │ │ ├── blackbox.yml.j2 │ │ ├── pushgateway.service.j2 │ │ ├── jmx.json.j2 │ │ ├── blackbox-exporter.service.j2 │ │ ├── blackbox-exporter.yml.j2 │ │ ├── prometheus.service.j2 │ │ └── prometheus.yml.j2 │ ├── tasks │ │ ├── main.yml │ │ ├── 04_add_target.yml │ │ ├── 02_pushgateway.yml │ │ ├── 01_blackbox_exporter.yml │ │ └── 03_prometheus_server.yml │ └── defaults │ │ └── main.yml ├── dingo_store │ ├── templates │ │ ├── coordinator-logrotate.j2 │ │ ├── store-logrotate.j2 │ │ ├── generate_id.sh.j2 │ │ ├── gen_coor_list.sh.j2 │ │ ├── start-coordinator.sh.j2 │ │ ├── start-diskann.sh.j2 │ │ └── start-store.sh.j2 │ ├── tasks │ │ ├── main.yml │ │ ├── 02_update_configuration.yml │ │ ├── 01_basic_command.yml │ │ └── 03_start_roles_command.yml │ └── defaults │ │ └── main.yml ├── scaling_in_dingo │ ├── templates │ │ ├── coordinator-logrotate.j2 │ │ ├── store-logrotate.j2 │ │ ├── mysql_init.sh.j2 │ │ ├── gen_coor_list.sh.j2 │ │ ├── start-coordinator.sh.j2 │ │ └── start-store.sh.j2 │ ├── tasks │ │ ├── main.yml │ │ ├── 03_start_roles_command.yml │ │ ├── 02_update_configuration.yml │ │ └── 01_basic_command.yml │ └── defaults │ │ └── main.yml ├── system │ ├── defaults │ │ └── main.yml │ └── tasks │ │ ├── 05_install_basic_tools.yml │ │ ├── 02_install_system_cfg.yml │ │ ├── main.yml │ │ ├── 01_check_install_requirement.yml │ │ ├── 07_optimize_memory.yml │ │ ├── 06_enable_core_dumps.yml │ │ ├── 04_install_ntp_or_chrony.yml │ │ ├── 03_hostname.yml │ │ ├── 04_1_ntp.yml │ │ └── 04_2_chrony.yml ├── node_exporter │ ├── tasks │ │ ├── main.yml │ │ └── 01_node_exporter.yml │ ├── defaults │ │ └── main.yml │ └── templates │ │ └── node-exporter.service.j2 ├── process_exporter │ ├── tasks │ │ ├── main.yml │ │ └── 01_process_exporter.yml │ ├── files │ │ └── process_name.yaml │ ├── templates │ │ └── process-exporter.service.j2 │ └── defaults │ │ └── main.yml ├── create_users │ └── defaults │ │ └── main.yml └── grafana │ ├── templates │ ├── grafana-env.j2 │ └── grafana.service.j2 │ └── defaults │ └── main.yml ├── group_vars └── all │ ├── customize.yml │ └── user_info.yml ├── refer └── cluster_topology.png ├── artifacts ├── axel-2.4-9.el7.x86_64.rpm ├── system │ └── centos8 │ │ ├── repo │ │ ├── CentOS-Vault.repo │ │ ├── CentOS-fasttrack.repo │ │ ├── CentOS-Debuginfo.repo │ │ ├── CentOS-HA.repo │ │ ├── CentOS-Devel.repo │ │ ├── CentOS-Media.repo │ │ ├── CentOS-CR.repo │ │ ├── CentOS-Sources.repo │ │ └── CentOS-Base.repo │ │ └── limits │ │ └── limits.conf ├── sqlline-1.13.0-SNAPSHOT-jar-with-dependencies.jar ├── config.yml └── merge_dingo.sh ├── auto-tests └── dingo │ ├── restart.sh │ ├── start.sh │ ├── script.md │ └── docker-compose.yml ├── container ├── images │ ├── templates │ │ ├── conf │ │ │ ├── application.yaml │ │ │ ├── client.yaml │ │ │ ├── coordinator.yaml │ │ │ ├── executor.yaml │ │ │ ├── application-dev.yaml │ │ │ ├── logback-executor.xml │ │ │ ├── logback-coordinator.xml │ │ │ ├── logback-web.xml │ │ │ ├── logback-driver.xml │ │ │ └── logback-sqlline.xml │ │ └── bin │ │ │ ├── start-driver.sh │ │ │ ├── start-executor.sh │ │ │ └── start.sh │ ├── Dockerfile │ └── buildImages.sh └── docker-compose │ ├── docker-compose.localhost-1replica.yml │ ├── docker-compose.single.yml │ └── docker-compose.lite.yml ├── playbooks ├── 02_jdk.yml ├── 04_dingo.yml ├── 08_grafana.yml ├── 05_prometheus.yml ├── 09_monitor_web.yml ├── 02_1_check_ports.yml ├── 03_dingo-store.yml ├── 06_node_exporter.yml ├── 031_scaling_in_dingo.yml ├── 07_process_exporter.yml └── 01_system.yml ├── create-user.yml ├── filter_plugins └── custom_filter.py ├── relink.sh ├── ansible.cfg ├── package.sh ├── inventory └── hosts ├── action_plugins └── resolve_artifacts.py └── playbook.yml /roles/check_ports/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/check_ports/meta/main.yml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /group_vars/all/customize.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Add your custom config here 3 | -------------------------------------------------------------------------------- /roles/dingo/templates/jmx_config.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | rules: 3 | - pattern: ".*" -------------------------------------------------------------------------------- /roles/nginx/templates/start.sh.j2: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | 3 | cd {{ nginx_home }} 4 | sbin/nginx 5 | -------------------------------------------------------------------------------- /roles/nginx/templates/reload.sh.j2: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | 3 | cd {{ nginx_home }} 4 | sbin/nginx -s reload 5 | -------------------------------------------------------------------------------- /refer/cluster_topology.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dingodb/dingo-deploy/HEAD/refer/cluster_topology.png -------------------------------------------------------------------------------- /roles/nginx/templates/stop.sh.j2: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | 3 | ps -ef|grep nginx|awk '{print $2}'|xargs kill -9 4 | -------------------------------------------------------------------------------- /roles/jdk/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | 4 | installer_cache_path: /tmp 5 | delete_cache_after_install: true 6 | -------------------------------------------------------------------------------- /artifacts/axel-2.4-9.el7.x86_64.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dingodb/dingo-deploy/HEAD/artifacts/axel-2.4-9.el7.x86_64.rpm -------------------------------------------------------------------------------- /artifacts/system/centos8/repo/CentOS-Vault.repo: -------------------------------------------------------------------------------- 1 | # CentOS Vault contains rpms from older releases in the CentOS-8 2 | # tree. 3 | 4 | -------------------------------------------------------------------------------- /roles/check_ports/vars/main.yml: -------------------------------------------------------------------------------- 1 | 2 | 3 | script_base_directory: "/tmp" 4 | script_file_path: "{{ script_base_directory }}/check_ports.sh" -------------------------------------------------------------------------------- /auto-tests/dingo/restart.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | nohup docker-compose -f /root/dingo/docker-compose.yml up -d >> /tmp/dingo.log 2>&1 & 4 | -------------------------------------------------------------------------------- /container/images/templates/conf/application.yaml: -------------------------------------------------------------------------------- 1 | spring: 2 | application: 3 | name: dingodb-manager 4 | profiles: 5 | active: dev 6 | -------------------------------------------------------------------------------- /artifacts/sqlline-1.13.0-SNAPSHOT-jar-with-dependencies.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dingodb/dingo-deploy/HEAD/artifacts/sqlline-1.13.0-SNAPSHOT-jar-with-dependencies.jar -------------------------------------------------------------------------------- /roles/prometheus/templates/node.yml.j2: -------------------------------------------------------------------------------- 1 | - labels: 2 | env: cluster 3 | targets: 4 | {% for node_exporter_host in node_exporter_servers %} 5 | - {{ node_exporter_host }}:{{ node_exporter_port }} 6 | {% endfor %} 7 | -------------------------------------------------------------------------------- /roles/prometheus/templates/process.yml.j2: -------------------------------------------------------------------------------- 1 | - labels: 2 | env: process 3 | targets: 4 | {% for process_exporter_host in process_exporter_servers %} 5 | - {{ process_exporter_host }}:{{ process_exporter_port }} 6 | {% endfor %} 7 | -------------------------------------------------------------------------------- /container/images/templates/conf/client.yaml: -------------------------------------------------------------------------------- 1 | cluster: 2 | name: dingo 3 | exchange: 4 | host: XXXXXX 5 | port: 8765 6 | client: 7 | coordinatorExchangeSvrList: coordinator3:19181,coordinator2:19181,coordinator1:19181 8 | 9 | -------------------------------------------------------------------------------- /auto-tests/dingo/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | nohup docker-compose -f /root/dingo/docker-compose.yml down && docker rmi -f dingodatabase/dingo:latest && docker-compose -f /root/dingo/docker-compose.yml up -d >> /tmp/dingo.log 2>&1 & 4 | -------------------------------------------------------------------------------- /roles/prometheus/templates/blackbox.yml.j2: -------------------------------------------------------------------------------- 1 | {% for svc in service_list %} 2 | - targets: 3 | {% for host in svc.hosts %} 4 | - {{ host }}:{{ svc.port }} 5 | {% endfor %} 6 | labels: 7 | group: {{ svc.group }} 8 | 9 | {% endfor %} 10 | -------------------------------------------------------------------------------- /roles/jdk/tasks/01_os_user.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Ensure jdk group exist: {{ jdk_group }}" 4 | group: name={{ jdk_group }} state=present 5 | 6 | - name: "Ensure jdk user exist: {{ jdk_user }}" 7 | user: name={{ jdk_user }} group={{ jdk_group }} 8 | -------------------------------------------------------------------------------- /roles/dingo_store/templates/coordinator-logrotate.j2: -------------------------------------------------------------------------------- 1 | {{ dingo_store_home }}/dist/coordinator1/log/COORDINATOR* { 2 | daily 3 | rotate 10 4 | size 80M 5 | create 6 | compress 7 | missingok 8 | dateext 9 | su {{ dingo_user }} {{ dingo_group }} 10 | } 11 | -------------------------------------------------------------------------------- /roles/scaling_in_dingo/templates/coordinator-logrotate.j2: -------------------------------------------------------------------------------- 1 | {{ dingo_store_home }}/dist/coordinator1/log/COORDINATOR* { 2 | daily 3 | rotate 10 4 | size 80M 5 | create 6 | compress 7 | missingok 8 | dateext 9 | su {{ dingo_user }} {{ dingo_group }} 10 | } 11 | -------------------------------------------------------------------------------- /roles/nginx/tasks/01_os_user.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Ensure nginx group exist: {{ nginx_group }}" 4 | group: name={{ nginx_group }} state=present 5 | 6 | - name: "Ensure nginx user exist: {{ nginx_user }}" 7 | user: name="{{ nginx_user }}" group={{ nginx_group }} 8 | -------------------------------------------------------------------------------- /roles/system/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | delete_cache_after_install: true 4 | 5 | # check_disk_size: false 6 | installer_root_path_min_size_in_mb: "50*1024" 7 | installer_cache_min_size_in_mb: "10*1024" 8 | 9 | disable_firewall: true 10 | 11 | default_domain: "zetyun.local" -------------------------------------------------------------------------------- /roles/node_exporter/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Check node-exporter should install or Not" 4 | meta: end_play 5 | when: install_node_exporter == false 6 | 7 | - name: "Load artifacts info" 8 | action: resolve_artifacts 9 | 10 | - import_tasks: 01_node_exporter.yml 11 | 12 | -------------------------------------------------------------------------------- /roles/dingo_store/templates/store-logrotate.j2: -------------------------------------------------------------------------------- 1 | {% for item in store_disk_list %} 2 | {{ item }}/store{{ loop.index }}/log/STORE* { 3 | daily 4 | rotate 10 5 | size 80M 6 | create 7 | compress 8 | missingok 9 | dateext 10 | su {{ dingo_user }} {{ dingo_group }} 11 | } 12 | 13 | {% endfor %} 14 | -------------------------------------------------------------------------------- /roles/process_exporter/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Check process-exporter should install or Not" 4 | meta: end_play 5 | when: install_process_exporter == false 6 | 7 | - name: "Load artifacts info" 8 | action: resolve_artifacts 9 | 10 | - import_tasks: 01_process_exporter.yml 11 | 12 | -------------------------------------------------------------------------------- /roles/scaling_in_dingo/templates/store-logrotate.j2: -------------------------------------------------------------------------------- 1 | {% for item in store_disk_list %} 2 | {{ item }}/store{{ loop.index }}/log/STORE* { 3 | daily 4 | rotate 10 5 | size 80M 6 | create 7 | compress 8 | missingok 9 | dateext 10 | su {{ dingo_user }} {{ dingo_group }} 11 | } 12 | 13 | {% endfor %} 14 | -------------------------------------------------------------------------------- /playbooks/02_jdk.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | #--------------------------------- 4 | # 2. Install JDK 5 | #--------------------------------- 6 | - hosts: 7 | - all_nodes 8 | vars_files: 9 | - ../group_vars/all/_shared.yml 10 | tasks: 11 | - include_role: name=jdk 12 | when: install_java_sdk 13 | -------------------------------------------------------------------------------- /container/images/templates/conf/coordinator.yaml: -------------------------------------------------------------------------------- 1 | cluster: 2 | name: dingo 3 | exchange: 4 | host: XXXXXX 5 | port: 19181 6 | server: 7 | dataPath: /opt/dingo/coordinator 8 | servers: coordinator3:19181,coordinator2:19181,coordinator1:19181 9 | schedule: 10 | autoSplit: true 11 | -------------------------------------------------------------------------------- /roles/create_users/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Note: 'debug_enabled_default: true' will put hashed passwords in the output. 3 | debug_enabled_default: true 4 | default_update_password: on_create 5 | default_shell: /bin/bash 6 | default_generate_ssh_key_comment: "ansible-generated for {{ item.username }}@{{ ansible_hostname }}" 7 | -------------------------------------------------------------------------------- /create-user.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | #--------------------------------- 4 | # 1. Prepare System 5 | #--------------------------------- 6 | - hosts: 7 | - all_nodes 8 | vars_files: 9 | - ./group_vars/all/_shared.yml 10 | - ./group_vars/all/user_info.yml 11 | tasks: 12 | - include_role: name=create_users 13 | 14 | -------------------------------------------------------------------------------- /playbooks/04_dingo.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | #--------------------------------- 4 | # 4. Install Dingo nodes 5 | #--------------------------------- 6 | - hosts: 7 | - executor_nodes 8 | vars_files: 9 | - ../group_vars/all/_shared.yml 10 | tasks: 11 | - include_role: name=dingo 12 | when: install_dingo 13 | 14 | -------------------------------------------------------------------------------- /playbooks/08_grafana.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | #--------------------------------- 4 | # 8. Install grafana 5 | #--------------------------------- 6 | 7 | - hosts: 8 | - grafana 9 | vars_files: 10 | - ../group_vars/all/_shared.yml 11 | tasks: 12 | - include_role: name=grafana 13 | when: install_grafana 14 | 15 | -------------------------------------------------------------------------------- /playbooks/05_prometheus.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | #--------------------------------- 4 | # 5. Install prometheus 5 | #--------------------------------- 6 | - hosts: 7 | - prometheus 8 | vars_files: 9 | - ../group_vars/all/_shared.yml 10 | tasks: 11 | - include_role: name=prometheus 12 | when: install_prometheus 13 | -------------------------------------------------------------------------------- /playbooks/09_monitor_web.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | #--------------------------------- 4 | # 9. Install monitor web 5 | #--------------------------------- 6 | 7 | - hosts: 8 | - web 9 | vars_files: 10 | - ../group_vars/all/_shared.yml 11 | tasks: 12 | - include_role: name=nginx 13 | when: install_monitor_web 14 | 15 | -------------------------------------------------------------------------------- /playbooks/02_1_check_ports.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | #--------------------------------- 4 | # 2_1. Check Port Conflicts 5 | #--------------------------------- 6 | - hosts: 7 | - all_nodes 8 | vars_files: 9 | - ../group_vars/all/_shared.yml 10 | tasks: 11 | - include_role: name=check_ports 12 | when: check_port_conflicts 13 | -------------------------------------------------------------------------------- /roles/dingo_store/templates/generate_id.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | BASE_DIR=$(dirname $(cd $(dirname $0); pwd)) 3 | uuid=$(uuidgen) 4 | echo "uuid: $uuid" 5 | 6 | key="dingo" 7 | cd "${BASE_DIR}/build/bin/" 8 | ./dingodb_client CoorKvPut --key=$key --value=$uuid --need-prev-kv=true --lease=0 9 | ./dingodb_client CoorKvGet --key=$key 10 | -------------------------------------------------------------------------------- /playbooks/03_dingo-store.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | #--------------------------------- 4 | # 3. Install Dingo_store nodes 5 | #--------------------------------- 6 | - hosts: 7 | - all_nodes 8 | vars_files: 9 | - ../group_vars/all/_shared.yml 10 | tasks: 11 | - include_role: name=dingo_store 12 | when: install_dingo_store 13 | -------------------------------------------------------------------------------- /playbooks/06_node_exporter.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | #--------------------------------- 4 | # 6. Install node_exporter 5 | #--------------------------------- 6 | 7 | - hosts: 8 | - all_nodes 9 | vars_files: 10 | - ../group_vars/all/_shared.yml 11 | tasks: 12 | - include_role: name=node_exporter 13 | when: install_node_exporter 14 | -------------------------------------------------------------------------------- /roles/process_exporter/files/process_name.yaml: -------------------------------------------------------------------------------- 1 | process_names: 2 | # - name: "{{.Comm}}" 3 | # cmdline: 4 | # - '.+' 5 | 6 | - name: "{{.ExeFull}}" 7 | cmdline: 8 | - 'dingodb_server' 9 | 10 | - name: prometheus 11 | cmdline: 12 | - 'prometheus' 13 | 14 | - name: grafana 15 | cmdline: 16 | - 'grafana' 17 | -------------------------------------------------------------------------------- /playbooks/031_scaling_in_dingo.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | 4 | #--------------------------------- 5 | # 3.1 ADD Dingo_store nodes 6 | #--------------------------------- 7 | - hosts: 8 | - scaling_in_dingo 9 | vars_files: 10 | - ../group_vars/all/_shared.yml 11 | tasks: 12 | - include_role: name=scaling_in_dingo 13 | when: install_dingo_store -------------------------------------------------------------------------------- /playbooks/07_process_exporter.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | #--------------------------------- 4 | # 7. Install process_exporter 5 | #--------------------------------- 6 | - hosts: 7 | - all_nodes 8 | vars_files: 9 | - ../group_vars/all/_shared.yml 10 | tasks: 11 | - include_role: name=process_exporter 12 | when: install_process_exporter 13 | -------------------------------------------------------------------------------- /container/images/templates/conf/executor.yaml: -------------------------------------------------------------------------------- 1 | cluster: 2 | name: dingo 3 | exchange: 4 | host: XXXXXX 5 | port: 19191 6 | server: 7 | coordinatorExchangeSvrList: coordinator3:19181,coordinator2:19181,coordinator1:19181 8 | dataPath: /opt/dingo/executor/meta 9 | store: 10 | dbPath: /opt/dingo/executor/raftDb 11 | collectStatsInterval: 5 12 | -------------------------------------------------------------------------------- /roles/dingo/templates/application-proxy.yaml.j2: -------------------------------------------------------------------------------- 1 | spring: 2 | application: 3 | name: dingodb-manager 4 | profiles: 5 | active: dev 6 | security: 7 | cipher: 8 | keyPath: {{ dingo_home }}/conf/dingodb.jks 9 | keyPass: dingodb 10 | storePass: dingodb 11 | alias: dingodb 12 | issuer: dingo 13 | verify: true -------------------------------------------------------------------------------- /container/images/templates/conf/application-dev.yaml: -------------------------------------------------------------------------------- 1 | server: 2 | compression: 3 | enabled: true 4 | mime-types: text/html,text/xml,text/plain,text/css, application/javascript, application/json 5 | min-response-size: 1024 6 | host: XXXXXX 7 | port: 13000 8 | coordinatorExchangeSvrList: coordinator3:19181,coordinator2:19181,coordinator1:19181 9 | -------------------------------------------------------------------------------- /playbooks/01_system.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | #--------------------------------- 4 | # 1. Prepare System 5 | #--------------------------------- 6 | - hosts: 7 | - all_nodes 8 | vars_files: 9 | - ../group_vars/all/_shared.yml 10 | tasks: 11 | - include_role: name=system 12 | when: install_system 13 | handlers: 14 | - name: Reload limits 15 | command: sysctl -p 16 | ignore_errors: yes -------------------------------------------------------------------------------- /artifacts/system/centos8/repo/CentOS-fasttrack.repo: -------------------------------------------------------------------------------- 1 | #CentOS-fasttrack.repo 2 | 3 | [fasttrack] 4 | name=CentOS-$releasever - fasttrack 5 | mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=fasttrack&infra=$infra 6 | #baseurl=http://mirror.centos.org/$contentdir/$releasever/fasttrack/$basearch/os/ 7 | gpgcheck=1 8 | enabled=0 9 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial 10 | 11 | -------------------------------------------------------------------------------- /roles/dingo/templates/client.yaml.j2: -------------------------------------------------------------------------------- 1 | cluster: 2 | name: dingo 3 | exchange: 4 | host: {{ inventory_hostname }} 5 | port: 8765 6 | client: 7 | coordinatorExchangeSvrList: {{ dingo_coordinator_exchange_connection_list }} 8 | security: 9 | cipher: 10 | keyPath: {{ dingo_home }}/conf/dingodb.jks 11 | keyPass: dingodb 12 | storePass: dingodb 13 | alias: dingodb 14 | issuer: dingo 15 | verify: true 16 | -------------------------------------------------------------------------------- /roles/system/tasks/05_install_basic_tools.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | #- name: install Vim 4 | # dnf: 5 | # name: vim 6 | # state: present 7 | 8 | #- name: install Tmux 9 | # dnf: 10 | # name: tmux 11 | # state: present 12 | 13 | - name: "Install required packages" 14 | package: name={{ item }} state=present 15 | with_items: 16 | - "tar" 17 | - "vim" 18 | - "tmux" 19 | - "unzip" 20 | - "libaio-devel" 21 | - "boost-devel" 22 | become: yes -------------------------------------------------------------------------------- /roles/dingo/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Check DingoDB should install or Not" 4 | meta: end_play 5 | when: install_dingo == false 6 | 7 | - name: "Load artifacts info" 8 | action: resolve_artifacts 9 | 10 | - import_tasks: 01_basic_command.yml 11 | when: install_dingo_basic_command 12 | 13 | - import_tasks: 02_update_configuration.yml 14 | when: install_dingo_update_configuration 15 | 16 | - import_tasks: 03_start_roles_command.yml 17 | when: install_dingo_start_roles 18 | -------------------------------------------------------------------------------- /roles/nginx/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Load artifacts info" 4 | action: resolve_artifacts 5 | 6 | - name: "Check nginx installed or not" 7 | stat: path={{ nginx_home }}/usr/sbin/nginx 8 | register: nginx_installed 9 | 10 | - import_tasks: 01_os_user.yml 11 | 12 | - import_tasks: 02_build.yml 13 | when: not nginx_installed.stat.exists 14 | 15 | - import_tasks: 03_untar_monitor_web.yml 16 | 17 | - import_tasks: 04_config.yml 18 | 19 | - import_tasks: 05_startup.yml 20 | 21 | -------------------------------------------------------------------------------- /roles/jdk/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Check current operation is needed" 4 | meta: end_play 5 | when: install_java_sdk == false 6 | 7 | - name: "Load artifacts info" 8 | action: resolve_artifacts 9 | 10 | - name: "Check jdk installed or not" 11 | stat: path={{ jdk_home }}/bin/java 12 | register: jdk_installed 13 | 14 | - block: 15 | - include: 01_os_user.yml 16 | 17 | - include: 02_untar.yml 18 | 19 | - include: 03_config.yml 20 | 21 | #when: not jdk_installed.stat.exists 22 | -------------------------------------------------------------------------------- /roles/dingo/templates/application-proxy-dev.yaml.j2: -------------------------------------------------------------------------------- 1 | server: 2 | compression: 3 | enabled: true 4 | mime-types: text/html,text/xml,text/plain,text/css, application/javascript, application/json 5 | min-response-size: 1024 6 | host: {{ inventory_hostname }} 7 | port: {{ dingo_proxy_http_port }} 8 | grpc: 9 | port: {{ dingo_proxy_grpc_port }} 10 | coordinatorExchangeSvrList: {{ dingo_coordinator_exchange_connection_list }} 11 | client: 12 | retry: 60 13 | -------------------------------------------------------------------------------- /auto-tests/dingo/script.md: -------------------------------------------------------------------------------- 1 | # How to install crontab script 2 | 3 | - Edit crontab files 4 | 5 | ```shell 6 | crontab -e 7 | ``` 8 | 9 | - Copy Commands to Script 10 | 11 | ```shell 12 | 10 15 * * * /root/dingo/start.sh 13 | ``` 14 | 15 | - Docker configuration 16 | 17 | ```shell 18 | { 19 | "auths": { 20 | "https://index.docker.io/v1/": { 21 | "auth": "ZGluZ29kYXRhYmFzZTpTZXJ2ZXIyMDIxIQ==" 22 | } 23 | }, 24 | 25 | "proxies": { 26 | "default": { 27 | } 28 | } 29 | } 30 | ``` 31 | -------------------------------------------------------------------------------- /roles/dingo_store/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Check DingoDB should install or Not" 4 | meta: end_play 5 | when: install_dingo_store == false 6 | 7 | - name: "Load artifacts info" 8 | action: resolve_artifacts 9 | 10 | - import_tasks: 01_basic_command.yml 11 | when: install_dingo_store_basic_command 12 | 13 | - import_tasks: 02_update_configuration.yml 14 | when: install_dingo_store_update_configuration 15 | 16 | - import_tasks: 03_start_roles_command.yml 17 | when: install_dingo_store_start_roles 18 | -------------------------------------------------------------------------------- /roles/scaling_in_dingo/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Check DingoDB should install or Not" 4 | meta: end_play 5 | when: install_dingo_store == false 6 | 7 | - name: "Load artifacts info" 8 | action: resolve_artifacts 9 | 10 | - import_tasks: 01_basic_command.yml 11 | when: install_dingo_store_basic_command 12 | 13 | - import_tasks: 02_update_configuration.yml 14 | when: install_dingo_store_update_configuration 15 | 16 | - import_tasks: 03_start_roles_command.yml 17 | when: install_dingo_store_start_roles 18 | -------------------------------------------------------------------------------- /roles/check_ports/templates/check_port.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export TIMEOUT_SECONDS=1 4 | 5 | printf "\U1F4DD Test port on ${LOCAL_HOST_NAME}:\n" 6 | {% for var_name,var_value in vars.items() %} 7 | {% if var_name.endswith('_port') %} 8 | timeout $TIMEOUT_SECONDS bash -c "echo 'What is up by howhow ...' >/dev/tcp/127.0.0.1/{{ var_value }}" 2>/dev/null && \ 9 | printf " \U1F4DB Ports Conflicts at {{ var_value }}" || printf " \U1F44D Ports OK at {{ var_value }}" 10 | echo 11 | {% endif%} 12 | {% endfor %} 13 | 14 | exit 0 -------------------------------------------------------------------------------- /roles/prometheus/templates/pushgateway.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Prometheus Pushgateway 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | User={{ pushgateway_user }} 8 | Group={{ pushgateway_group }} 9 | WorkingDirectory={{ pushgateway_home }}/ 10 | ExecStart=/bin/sh -c 'exec {{ pushgateway_home }}/{{ pushgateway_exec }} --web.listen-address=:{{ pushgateway_port }} \ 11 | > {{ pushgateway_log_path }}/{{ pushgateway_service_name }}.out 2>&1 ' 12 | Restart=always 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /roles/prometheus/templates/jmx.json.j2: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "targets": ["172.20.3.20:9088"] 4 | 5 | },{ 6 | "targets": ["172.20.3.21:9088"] 7 | 8 | },{ 9 | "targets": ["172.20.3.22:9088"] 10 | 11 | },{ 12 | "targets": ["172.20.3.20:9089"] 13 | 14 | },{ 15 | "targets": ["172.20.3.21:9089"] 16 | 17 | },{ 18 | "targets": ["172.20.3.22:9089"] 19 | 20 | } 21 | ] 22 | 23 | -------------------------------------------------------------------------------- /roles/prometheus/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Check prometheus should install or Not" 3 | meta: end_play 4 | when: install_prometheus == false 5 | 6 | - name: "Load artifacts info" 7 | action: resolve_artifacts 8 | 9 | - name: Gather facts from extra hosts (regardless of limit or tags) 10 | setup: 11 | delegate_to: "{{ item }}" 12 | delegate_facts: true 13 | when: hostvars[item]['ansible_default_ipv4'] is not defined 14 | with_items: "{{ groups['all'] }}" 15 | 16 | - import_tasks: 03_prometheus_server.yml 17 | 18 | #- import_tasks: 04_add_target.yml -------------------------------------------------------------------------------- /roles/dingo/templates/coordinator.yaml.j2: -------------------------------------------------------------------------------- 1 | cluster: 2 | name: dingo 3 | exchange: 4 | host: {{ inventory_hostname }} 5 | port: {{ dingo_coordinator_exchange_port }} 6 | server: 7 | dataPath: {{ dingo_coordinator_meta_path }} 8 | servers: {{ dingo_coordinator_exchange_connection_list }} 9 | monitorPort: {{ dingo_coordinator_http_monitor_port }} 10 | security: 11 | cipher: 12 | keyPath: {{ dingo_home }}/conf/dingodb.jks 13 | keyPass: dingodb 14 | storePass: dingodb 15 | alias: dingodb 16 | issuer: dingo 17 | verify: true 18 | -------------------------------------------------------------------------------- /roles/grafana/templates/grafana-env.j2: -------------------------------------------------------------------------------- 1 | GRAFANA_USER={{ grafana_user }} 2 | 3 | GRAFANA_GROUP={{ grafana_group }} 4 | 5 | GRAFANA_HOME={{ grafana_home }} 6 | 7 | LOG_DIR={{ grafana_log_path }} 8 | 9 | DATA_DIR={{ grafana_data_path }} 10 | 11 | MAX_OPEN_FILES=10000 12 | 13 | CONF_DIR={{ grafana_conf_path }} 14 | 15 | CONF_FILE={{ grafana_conf_path }}/grafana.ini 16 | 17 | RESTART_ON_UPGRADE=true 18 | 19 | PLUGINS_DIR={{ grafana_plugin_path }} 20 | 21 | PROVISIONING_CFG_DIR={{ grafana_conf_path }}/provisioning 22 | 23 | # Only used on systemd systems 24 | PID_FILE_DIR={{ grafana_run_path }} 25 | -------------------------------------------------------------------------------- /roles/system/tasks/02_install_system_cfg.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Disable selinux temporarily 4 | - name: "Disable selinux temporarily" 5 | shell: "setenforce 0 || /bin/true" 6 | failed_when: false 7 | 8 | # Disable Firewall 9 | - name: " Disable firewall" 10 | systemd: name=firewalld state=stopped enabled=no 11 | when: ansible_os_family == 'RedHat' and ansible_service_mgr == "systemd" and disable_firewall 12 | failed_when: false 13 | 14 | # Update user open file limits 15 | - name: Update Open file limits 16 | copy: src={{ cfg_system_open_file_limits_path }} dest=/etc/security/limits.conf -------------------------------------------------------------------------------- /roles/check_ports/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for check_ports 3 | 4 | 5 | - name: generat check port script to "{{ script_file_path }}" 6 | template: 7 | src: templates/check_port.sh.j2 8 | dest: "{{ script_file_path }}" 9 | 10 | - name: run the script of check_ports 11 | shell: | 12 | bash /tmp/check_ports.sh && \ 13 | rm -f /tmp/check_ports.sh 14 | register: check_ports_result 15 | 16 | - name: summary check ports conflicts result 17 | debug: 18 | msg: "{{ check_ports_result.stdout_lines }}" 19 | failed_when: check_ports_result.stdout_lines|join(',')|regex_search('Conflicts') 20 | -------------------------------------------------------------------------------- /filter_plugins/custom_filter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import json 3 | 4 | 5 | class FilterModule(object): 6 | def filters(self): 7 | return { 8 | 'cidr': self.cidr, 9 | 'json_result': self.json_result, 10 | } 11 | 12 | @staticmethod 13 | def cidr(ip, netmask): 14 | return ip + "/" + str(sum(bin(int(x)).count('1') for x in netmask.split('.'))) 15 | 16 | @staticmethod 17 | def json_result(json_str): 18 | json_result = {} 19 | try: 20 | json_result = json.loads(json_str) 21 | except json.JSONDecodeError: 22 | pass 23 | return json_result 24 | -------------------------------------------------------------------------------- /roles/node_exporter/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | node_exporter_service_name: "dingo-node-exporter" 4 | 5 | node_exporter_home: "{{ installer_root_path | default('/opt') }}/node-exporter" 6 | node_exporter_user: "{{ dingo_user | default('prometheus') }}" 7 | node_exporter_group: "{{ dingo_group | default('prometheus') }}" 8 | 9 | node_exporter_log_path: "{{ dingo_log_dir | default('/var/log') }}/node-exporter" 10 | node_exporter_data_path: "{{ dingo_data_dir | default('/var/lib') }}/node-exporter" 11 | node_exporter_run_path: "{{ dingo_run_dir | default('/var/run') }}/node-exporter" 12 | 13 | node_exporter_port: 19100 14 | 15 | node_exporter_web_listen_address: "0.0.0.0:{{ node_exporter_port }}" -------------------------------------------------------------------------------- /group_vars/all/user_info.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # generate password with: python -c 'import crypt,getpass; print(crypt.crypt(getpass.getpass(), crypt.mksalt(crypt.METHOD_SHA512)))' 3 | 4 | users: 5 | - username: "{{ dingo_user }}" 6 | password: "{{ installer_password }}" 7 | update_password: on_create 8 | comment: ansible manager 9 | primarygroup: "{{ dingo_group }}" 10 | shell: /bin/bash 11 | generate_ssh_key: yes 12 | ssh_key_bits: 2048 13 | use_sudo: yes 14 | use_sudo_nopass: yes 15 | no_passwd_login: "{{ install_no_passwd_login }}" 16 | user_state: present 17 | servers: 18 | - coordinator 19 | - store 20 | - prometheus 21 | - grafana 22 | -------------------------------------------------------------------------------- /roles/process_exporter/templates/process-exporter.service.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | [Unit] 3 | Description=Prometheus Node Exporter 4 | After=network.target 5 | 6 | [Service] 7 | Type=simple 8 | User={{ process_exporter_user }} 9 | Group={{ process_exporter_group }} 10 | Nice=-5 11 | ExecStart=/bin/sh -c 'exec {{ process_exporter_home }}/process-exporter -config.path={{ process_exporter_home }}/process_name.yaml \ 12 | --web.listen-address {{ process_exporter_web_listen_address }} \ 13 | > {{ process_exporter_log_path }}/{{ process_exporter_service_name }}.out 2>&1 ' 14 | 15 | SyslogIdentifier={{ process_exporter_service_name }} 16 | Restart=always 17 | 18 | [Install] 19 | WantedBy=multi-user.target 20 | -------------------------------------------------------------------------------- /roles/prometheus/templates/blackbox-exporter.service.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | [Unit] 3 | Description=Prometheus Blackbox Exporter 4 | After=network.target 5 | 6 | [Service] 7 | Type=simple 8 | User={{ blackbox_exporter_user }} 9 | Group={{ blackbox_exporter_group }} 10 | Nice=-5 11 | ExecStart=/bin/sh -c 'exec {{ blackbox_exporter_home }}/blackbox_exporter \ 12 | --web.listen-address {{ blackbox_exporter_web_listen_address }} \ 13 | --config.file={{ blackbox_exporter_home }}/blackbox.yml \ 14 | > {{ blackbox_exporter_log_path }}/{{ blackbox_exporter_service_name }}.out 2>&1 ' 15 | 16 | SyslogIdentifier={{ blackbox_exporter_service_name }} 17 | Restart=always 18 | 19 | [Install] 20 | WantedBy=multi-user.target 21 | -------------------------------------------------------------------------------- /roles/process_exporter/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | process_exporter_service_name: "dingo-process-exporter" 4 | 5 | process_exporter_home: "{{ installer_root_path | default('/opt') }}/process-exporter" 6 | process_exporter_user: "{{ dingo_user | default('prometheus') }}" 7 | process_exporter_group: "{{ dingo_group | default('prometheus') }}" 8 | 9 | process_exporter_log_path: "{{ dingo_log_dir | default('/var/log') }}/process-exporter" 10 | process_exporter_data_path: "{{ dingo_data_dir | default('/var/lib') }}/process-exporter" 11 | process_exporter_run_path: "{{ dingo_run_dir | default('/var/run') }}/process-exporter" 12 | 13 | process_exporter_port: 19256 14 | 15 | process_exporter_web_listen_address: "0.0.0.0:{{ process_exporter_port }}" 16 | -------------------------------------------------------------------------------- /roles/system/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Check current operation is needed" 4 | meta: end_play 5 | when: install_system == false 6 | 7 | - name: "Install Basic Linux Tools" 8 | action: resolve_artifacts 9 | 10 | - import_tasks: 01_check_install_requirement.yml 11 | 12 | - include_tasks: 02_install_system_cfg.yml 13 | when: install_system_fileLimits 14 | 15 | - import_tasks: 03_hostname.yml 16 | 17 | - import_tasks: 04_install_ntp_or_chrony.yml 18 | when: install_system_basicTools 19 | 20 | - include_tasks: 05_install_basic_tools.yml 21 | when: install_system_basicTools 22 | 23 | - include_tasks: 06_enable_core_dumps.yml 24 | when: set_core_file 25 | 26 | 27 | - include_tasks: 07_optimize_memory.yml 28 | when: install_optimize_memory -------------------------------------------------------------------------------- /roles/nginx/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | nginx_home: "{{ nginx_install_path }}/nginx" 4 | nginx_conf_dir: "{{ nginx_home }}/conf" 5 | # Just for nginx init script 6 | nginx_log_path: "{{ nginx_home }}/var/log/nginx" 7 | nginx_pid_file: "{{ nginx_home }}/var/run/nginx.pid" 8 | 9 | nginx_user: "datacanvas" 10 | nginx_group: "datacanvas" 11 | 12 | 13 | nginx_worker_processes: "{% if ansible_processor_vcpus is defined %}{{ ansible_processor_vcpus }}{% else %}auto{% endif %}" 14 | 15 | nginx_http_default_params: 16 | - sendfile on 17 | - tcp_nopush on 18 | - tcp_nodelay on 19 | - server_tokens off 20 | 21 | nginx_http_params: "{{ nginx_http_default_params }}" 22 | 23 | nginx_disabled_sites: [] 24 | 25 | installer_cache_path: /tmp 26 | delete_cache_after_install: true 27 | -------------------------------------------------------------------------------- /roles/jdk/tasks/03_config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Get home directory of user: {{ jdk_user }}" 4 | shell: "cat /etc/passwd | grep '{{ jdk_user }}:x' | awk -F ':' '{print $6}'" 5 | register: user_home_output 6 | 7 | - set_fact: env_file="{{ user_home_output.stdout }}/.bash_profile" 8 | 9 | - name: "Ensure {{ env_file }} exists" 10 | file: path="{{ env_file }}" state=touch owner={{ jdk_user }} group={{ jdk_group }} 11 | 12 | - name: "Add environment variables to {{ env_file }}" 13 | lineinfile: 14 | path: "{{ env_file }}" 15 | line: "{{ item }}" 16 | state: present 17 | with_items: 18 | - "JAVA_HOME={{ jdk_home }}" 19 | - "PATH=${JAVA_HOME}/bin:$PATH" 20 | - "export JAVA_HOME PATH" 21 | 22 | - name: "Source Java Profile" 23 | shell: "source {{ env_file }}" -------------------------------------------------------------------------------- /roles/nginx/tasks/05_startup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Start nginx via systemd" 4 | systemd: name=nginx state=started enabled=yes daemon_reload=yes 5 | become: yes 6 | when: ansible_service_mgr == "systemd" 7 | 8 | - name: "Start nginx service" 9 | service: name=nginx state=started enabled=yes 10 | become: yes 11 | when: (ansible_service_mgr == "upstart" or ansible_service_mgr == "sysvinit") 12 | 13 | - name: "Ensure nginx service is started via systemd" 14 | shell: "{{ nginx_home }}/reload.sh" 15 | become: yes 16 | when: ansible_service_mgr == "systemd" 17 | 18 | - name: "Ensure nginx service is started" 19 | service: name=nginx state=restarted enabled=yes 20 | become: yes 21 | when: (ansible_service_mgr == "upstart" or ansible_service_mgr == "sysvinit") 22 | -------------------------------------------------------------------------------- /roles/nginx/templates/systemd/nginx.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=nginx - high performance web server 3 | Documentation=http://nginx.org/en/docs/ 4 | After=network-online.target remote-fs.target nss-lookup.target 5 | Wants=network-online.target 6 | 7 | [Service] 8 | Type=forking 9 | User={{ nginx_user }} 10 | Group={{ nginx_group }} 11 | PermissionsStartOnly=true 12 | ExecStartPre=/bin/chown -R {{ nginx_user }}:{{ nginx_group }} {{ nginx_home }} 13 | ExecStartPre=/bin/chown -R {{ nginx_user }}:{{ nginx_group }} {{nginx_log_path}} 14 | ExecStart=/bin/sh -c 'exec {{ nginx_home }}/start.sh' 15 | ExecReload=/bin/sh -c 'exec {{ nginx_home }}/reload.sh' 16 | ExecStop=/bin/sh -c 'exec {{ nginx_home }}/stop.sh' 17 | Restart=always 18 | 19 | [Install] 20 | WantedBy=multi-user.target 21 | -------------------------------------------------------------------------------- /artifacts/system/centos8/repo/CentOS-Debuginfo.repo: -------------------------------------------------------------------------------- 1 | # CentOS-Debug.repo 2 | # 3 | # The mirror system uses the connecting IP address of the client and the 4 | # update status of each mirror to pick mirrors that are updated to and 5 | # geographically close to the client. You should use this for CentOS updates 6 | # unless you are manually picking other mirrors. 7 | # 8 | 9 | # All debug packages from all the various CentOS-8 releases 10 | # are merged into a single repo, split by BaseArch 11 | # 12 | # Note: packages in the debuginfo repo are currently not signed 13 | # 14 | 15 | [base-debuginfo] 16 | name=CentOS-$releasever - Debuginfo 17 | baseurl=http://debuginfo.centos.org/$releasever/$basearch/ 18 | gpgcheck=1 19 | enabled=0 20 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial 21 | 22 | -------------------------------------------------------------------------------- /relink.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | readonly SCRIPT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 4 | cd "${SCRIPT_ROOT}" 5 | 6 | echo "[1/4] Linking _shared.yml" 7 | rm -f shared.yml 8 | ln -s group_vars/all/_shared.yml shared.yml 9 | 10 | echo "[2/4] Linking customize.yml" 11 | rm -f customize.yml 12 | ln -s group_vars/all/customize.yml customize.yml 13 | 14 | echo "[3/4] Linking hosts" 15 | rm -f hosts.ini 16 | ln -s inventory/hosts hosts.ini 17 | 18 | echo "[4/4] Updating .gitattributes" 19 | tee .gitattributes >/dev/null < {{ node_exporter_log_path }}/{{ node_exporter_service_name }}.out 2>&1 ' 19 | 20 | SyslogIdentifier={{ node_exporter_service_name }} 21 | Restart=always 22 | 23 | [Install] 24 | WantedBy=multi-user.target 25 | -------------------------------------------------------------------------------- /container/images/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:18.04 2 | 3 | ENV TZ=Asia/Shanghai \ 4 | DEBIAN_FRONTEND=noninteractive 5 | SHELL ["/bin/bash", "-c"] 6 | 7 | RUN apt-get update \ 8 | && apt-get install -y openjdk-8-jdk vim unzip netcat net-tools tzdata \ 9 | && unset http_proxy https_proxy HTTP_PROXY HTTPS_PROXY \ 10 | && ln -fs /usr/share/zoneinfo/${TZ} /etc/localtime \ 11 | && echo ${TZ} > /etc/timezone \ 12 | && dpkg-reconfigure --frontend noninteractive tzdata \ 13 | && rm -rf /var/lib/apt/lists/* 14 | 15 | COPY ./dingo.zip /opt 16 | 17 | RUN unzip /opt/dingo.zip -d /opt/dingo && mkdir /opt/dingo/log && mkdir /opt/dingo/coordinator && mkdir -p /opt/dingo/executor/meta && mkdir /opt/dingo/executor/raftDb && mkdir /opt/dingo/executor/raftLog && chmod +x /opt/dingo/bin/* 18 | 19 | ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64/ 20 | 21 | WORKDIR /opt/dingo 22 | 23 | ENTRYPOINT [ "/opt/dingo/bin/start.sh" ] 24 | -------------------------------------------------------------------------------- /roles/dingo/templates/stop-dingo-web.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 4 | # Copyright 2021 DataCanvas 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | 19 | ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )" 20 | 21 | 22 | PID=`ps -u {{ dingo_user }} -o pid,cmd | grep dingo-web | grep -v grep | awk '{print $1}'` 23 | if [[ "" != "$PID" ]]; then 24 | echo "killing $PID" 25 | kill -9 $PID 26 | fi -------------------------------------------------------------------------------- /roles/prometheus/templates/prometheus.service.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | [Unit] 3 | Description=Prometheus 4 | After=network.target 5 | 6 | [Service] 7 | Type=simple 8 | Environment="GOMAXPROCS={{ ansible_processor_vcpus|default(ansible_processor_count) }}" 9 | User={{ prometheus_user }} 10 | Group={{ prometheus_group }} 11 | LimitNOFILE=1024000 12 | ExecReload=/bin/kill -HUP $MAINPID 13 | ExecStart=/bin/sh -c 'exec {{ prometheus_home }}/prometheus \ 14 | --config.file={{ prometheus_home }}/prometheus.yml \ 15 | --storage.tsdb.path={{ prometheus_data_path }} \ 16 | --storage.tsdb.retention={{ prometheus_storage_retention }} \ 17 | --web.listen-address={{ prometheus_web_listen_address }} \ 18 | --web.external-url={{ prometheus_web_external_url }} > {{ prometheus_log_path }}/{{ prometheus_service_name }}.out 2>&1 ' 19 | 20 | SyslogIdentifier={{ prometheus_service_name }} 21 | Restart=always 22 | 23 | [Install] 24 | WantedBy=multi-user.target 25 | -------------------------------------------------------------------------------- /roles/system/tasks/01_check_install_requirement.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Check Disk is Avaliable or Not 3 | 4 | - name: "[PRE-CHECK] Check Disk Available Size" 5 | shell: | 6 | target_path={{ item.path }} 7 | min_size_in_kb={{ item.min_size_in_mb }}*1024 8 | 9 | existing_path=$target_path 10 | until [[ -d $existing_path ]]; do existing_path=$(dirname $existing_path); done 11 | 12 | mountpoint_available_size_in_kb=$(df $existing_path | tail -1 | awk '{print $4}') 13 | if [[ $mountpoint_available_size_in_kb -ge $min_size_in_kb ]];then exit 0;else exit 1;fi 14 | args: 15 | warn: no 16 | register: check_result 17 | with_items: 18 | - { path: "{{ installer_root_path }}" , min_size_in_mb: "{{ installer_root_path_min_size_in_mb }}" } 19 | - { path: "{{ installer_cache_path }}" , min_size_in_mb: "{{ installer_cache_min_size_in_mb }}" } 20 | failed_when: check_result.rc != 0 21 | when: check_disk_size is not defined or check_disk_size 22 | -------------------------------------------------------------------------------- /roles/nginx/templates/nginx.conf.j2: -------------------------------------------------------------------------------- 1 | #{{ ansible_managed }} 2 | user {{ nginx_user }} {{ nginx_group }}; 3 | worker_processes {{ nginx_worker_processes }}; 4 | 5 | events { 6 | worker_connections 1024; 7 | } 8 | 9 | http { 10 | client_max_body_size 256m; 11 | include mime.types; 12 | {% for v in nginx_http_params %} 13 | {{ v if "}" in v[-2:] else v+";" }} 14 | {% endfor %} 15 | 16 | ## 17 | # Gzip Settings 18 | ## 19 | gzip on; 20 | gzip_http_version 1.0; 21 | gzip_disable "msie6"; 22 | 23 | # gzip_vary on; 24 | # gzip_proxied any; 25 | # gzip_comp_level 6; 26 | # gzip_buffers 16 8k; 27 | # gzip_http_version 1.1; 28 | # gzip_types text/plain text/css application/json application/javascript application/x-javascript text/xml application/xml application/xml+rss text/javascript; 29 | 30 | include {{ nginx_conf_dir }}/conf.d/*.conf; 31 | include {{ nginx_conf_dir }}/sites-enabled/*; 32 | } 33 | -------------------------------------------------------------------------------- /roles/system/tasks/07_optimize_memory.yml: -------------------------------------------------------------------------------- 1 | 2 | - name: Ensure transparent_hugepage 3 | shell: | 4 | echo "madvise" > /sys/kernel/mm/transparent_hugepage/enabled 5 | become: yes 6 | 7 | - name: Set vm.overcommit_memory value to 1 8 | sysctl: 9 | name: vm.overcommit_memory 10 | value: "1" 11 | state: present 12 | reload: yes 13 | become: yes 14 | 15 | - name: Set suid_dumpable value to 2 16 | sysctl: 17 | name: fs.suid_dumpable 18 | value: 2 19 | state: present 20 | reload: yes 21 | 22 | - name: Set aio-max-nr value 23 | sysctl: 24 | name: fs.aio-max-nr 25 | value: "{{ fs_aio_max_nr }}" 26 | state: present 27 | reload: yes 28 | 29 | - name: Set vm max_map_count value 30 | sysctl: 31 | name: vm.max_map_count 32 | value: "{{ max_map_count }}" 33 | state: present 34 | reload: yes 35 | 36 | - name: Set large page nr_hugepages 37 | sysctl: 38 | name: vm.nr_hugepages 39 | value: "{{ nr_hugepages }}" 40 | state: present 41 | reload: yes 42 | -------------------------------------------------------------------------------- /roles/dingo/templates/start-web.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 4 | # Copyright 2021 DataCanvas 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | 19 | set -x 20 | ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )" 21 | JAR_PATH=$(find $ROOT -name dingo-web*.jar) 22 | 23 | nohup $ROOT/Linux-x64/bin/java ${JAVA_OPTS} \ 24 | -Dlogging.config=file:${ROOT}/conf/logback-web.xml \ 25 | -jar ${JAR_PATH} \ 26 | --spring.config.location=${ROOT}/conf/application-web.yaml \ 27 | > {{ dingo_log_path }}/dingo-web.out & 28 | -------------------------------------------------------------------------------- /artifacts/system/centos8/repo/CentOS-Media.repo: -------------------------------------------------------------------------------- 1 | # CentOS-Media.repo 2 | # 3 | # This repo can be used with mounted DVD media, verify the mount point for 4 | # CentOS-8. You can use this repo and yum to install items directly off the 5 | # DVD ISO that we release. 6 | # 7 | # To use this repo, put in your DVD and use it with the other repos too: 8 | # yum --enablerepo=c8-media [command] 9 | # 10 | # or for ONLY the media repo, do this: 11 | # 12 | # yum --disablerepo=\* --enablerepo=c8-media [command] 13 | 14 | [c8-media-BaseOS] 15 | name=CentOS-BaseOS-$releasever - Media 16 | baseurl=file:///media/CentOS/BaseOS 17 | file:///media/cdrom/BaseOS 18 | file:///media/cdrecorder/BaseOS 19 | gpgcheck=1 20 | enabled=0 21 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial 22 | 23 | [c8-media-AppStream] 24 | name=CentOS-AppStream-$releasever - Media 25 | baseurl=file:///media/CentOS/AppStream 26 | file:///media/cdrom/AppStream 27 | file:///media/cdrecorder/AppStream 28 | gpgcheck=1 29 | enabled=0 30 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial 31 | -------------------------------------------------------------------------------- /roles/nginx/tasks/00_preflight.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 定义一个变量,它代表一个列表,包含要安装的软件包的名称,这个变量会在后面两个task中使用 3 | - name: Required package list 4 | set_fact: 5 | required_packages: 6 | - "gcc" 7 | - "gcc-c++" 8 | - "make" 9 | - "automake" 10 | - "autoconf" 11 | - "libtool" 12 | - "autoconf" 13 | 14 | # 获取python2关键的第一个符合要求的路径 15 | - name: Get Python 2 path 16 | shell: ls /usr/bin/python* | grep python2 | head -n 1 17 | register: python2_output 18 | ignore_errors: true 19 | 20 | - debug: 21 | msg: "{{ python2_output.stdout }}" 22 | 23 | - name: Set Python 2 path 24 | set_fact: 25 | python2_path: "{{ python2_output.stdout }}" 26 | 27 | - name: Install required packages using Python 2.* 28 | when: python2_path is defined 29 | package: name={{ item }} state=present 30 | with_items: "{{ required_packages }}" 31 | vars: 32 | ansible_python_interpreter: "{{ python2_path }}" 33 | failed_when: false 34 | 35 | - name: Install required packages using Python 3.* 36 | package: name={{ item }} state=present 37 | with_items: "{{ required_packages }}" 38 | failed_when: false -------------------------------------------------------------------------------- /roles/nginx/templates/default.conf.j2: -------------------------------------------------------------------------------- 1 | #Ansible managed 2 | map $http_upgrade $connection_upgrade { 3 | default upgrade; 4 | '' close; 5 | } 6 | 7 | upstream monitorcluster { 8 | {% for host in groups['web'] %} 9 | server {{ host }}:{{ dingo_monitor_backend_port }}; 10 | {% endfor %} 11 | } 12 | 13 | limit_req_zone $binary_remote_addr zone=req_zone_wl:100m rate=150r/s; 14 | 15 | 16 | 17 | server { 18 | listen {{ dingo_monitor_frontend_port }} default_server; 19 | proxy_set_header X-Forwarded-For $remote_addr; 20 | root {{nginx_data_path}}/monitor_web/; 21 | location /home { 22 | alias {{nginx_data_path}}/monitor_web; 23 | autoindex on; 24 | } 25 | location /fetchApi/monitor/ { 26 | proxy_pass http://monitorcluster/monitor/; 27 | add_header Access-Control-Allow-Origin '*'; 28 | add_header Access-Control-Allow-Credentials 'true'; 29 | add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE"; 30 | proxy_set_header X-Real-IP $remote_addr; 31 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 32 | } 33 | 34 | } 35 | -------------------------------------------------------------------------------- /roles/dingo/templates/import.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Copyright 2021 DataCanvas 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )" 19 | JAR_PATH=$(find $ROOT -name dingo-cli-*.jar) 20 | NET_JAR_PATH=$(find $ROOT -name dingo-net-*.jar) 21 | 22 | $ROOT/Linux-x64/bin/java ${JAVA_OPTS} \ 23 | -Dlogback.configurationFile=file:${ROOT}/conf/logback-import.xml \ 24 | -classpath ${JAR_PATH}:${NET_JAR_PATH} \ 25 | io.dingodb.cli.source.Import \ 26 | --config ${ROOT}/conf/client.yaml \ 27 | $@ 28 | -------------------------------------------------------------------------------- /roles/system/tasks/06_enable_core_dumps.yml: -------------------------------------------------------------------------------- 1 | 2 | - name: Ensure directory for core file pattern exists 3 | file: 4 | path: "{{ core_file_dir }}" 5 | state: directory 6 | mode: "0777" 7 | 8 | - name: Check if limits.d exists 9 | stat: 10 | path: /etc/security/limits.d 11 | register: directory_stat 12 | 13 | - name: Create limits.d directory if it does not exist 14 | become: yes 15 | file: 16 | path: /etc/security/limits.d 17 | state: directory 18 | mode: 0755 19 | when: 20 | - not directory_stat.stat.exists 21 | 22 | - name: Enable core dumps in /etc/security/limits.conf 23 | copy: 24 | content: | 25 | {{ dingo_user }} - core unlimited 26 | {{ dingo_user }} - nproc {{ ulimit_nproc_limit }} 27 | {{ dingo_user }} - nofile {{ ulimit_nofile_limit }} 28 | dest: /etc/security/limits.d/90-dingo.conf 29 | become: yes 30 | notify: 31 | - Reload limits 32 | 33 | 34 | 35 | - name: Set core file pattern in /etc/sysctl.conf 36 | sysctl: 37 | name: kernel.core_pattern 38 | value: "{{ core_file_dir }}/core.%e.%p.%t" 39 | state: present 40 | reload: yes -------------------------------------------------------------------------------- /roles/system/tasks/04_install_ntp_or_chrony.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Gather the package facts" 4 | package_facts: 5 | manager: auto 6 | 7 | - set_fact: 8 | chrony_installed: "{{ 'chrony' in ansible_facts.packages }}" 9 | # ntp_installed: "{{ 'ntp' in ansible_facts.packages }}" 10 | ntp_installed: false 11 | 12 | - debug: var=chrony_installed 13 | - debug: var=ntp_installed 14 | 15 | # Case 1: chrony_installed, just start chronyd 16 | - name: "Start chronyd service on all hosts" 17 | service: name=chronyd state=started enabled=yes 18 | when: chrony_installed 19 | 20 | # Case 2: ntp_installed, just start ntpd 21 | - name: "Start ntp service on ntp server" 22 | service: name=ntpd state=started enabled=yes 23 | when: ntp_installed 24 | 25 | # Case 3: none installed 26 | - include_tasks: 04_1_ntp.yml 27 | when: (not chrony_installed and not ntp_installed ) and (install_chrony is not defined or not install_chrony) and (install_ntp is not defined or install_ntp) 28 | 29 | - include_tasks: 04_2_chrony.yml 30 | when: (not chrony_installed and not ntp_installed ) and install_chrony is defined and install_chrony 31 | -------------------------------------------------------------------------------- /container/images/templates/bin/start-driver.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Copyright 2021 DataCanvas 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )" 19 | JAR_PATH=$(find $ROOT -name dingo-cli-*.jar) 20 | NET_JAR_PATH=$(find $ROOT -name dingo-net-*.jar) 21 | 22 | java ${JAVA_OPTS} \ 23 | -Dlogback.configurationFile=file:${ROOT}/conf/logback-driver.xml \ 24 | -classpath ${JAR_PATH}:${NET_JAR_PATH} \ 25 | io.dingodb.cli.Tools driver \ 26 | --config ${ROOT}/conf/client.yaml \ 27 | $@ > ${ROOT}/log/driver.out 28 | -------------------------------------------------------------------------------- /roles/dingo/templates/stop-executor-proxy.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 4 | # Copyright 2021 DataCanvas 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | 19 | ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )" 20 | 21 | 22 | PID=`ps -u {{ dingo_user }} -o pid,cmd | grep executor.Starter | grep -v grep | awk '{print $1}'` 23 | if [[ "" != "$PID" ]]; then 24 | echo "killing $PID" 25 | kill -9 $PID 26 | fi 27 | 28 | PID=`ps -u {{ dingo_user }} -o pid,cmd | grep dingo-proxy | grep -v grep | awk '{print $1}'` 29 | if [[ "" != "$PID" ]]; then 30 | echo "killing $PID" 31 | kill -9 $PID 32 | fi -------------------------------------------------------------------------------- /roles/dingo/templates/executor-noldap.yaml.j2: -------------------------------------------------------------------------------- 1 | cluster: 2 | name: dingo 3 | exchange: 4 | host: {{ inventory_hostname }} 5 | port: {{ dingo_executor_exchange_port }} 6 | server: 7 | coordinators: {{ dingo_coordinator_exchange_connection_list }} 8 | user: user 9 | keyring: TO_BE_CONTINUED 10 | resourceTag: 1 11 | mysqlPort: {{ dingo_mysql_port }} 12 | variable: 13 | autoIncrementCacheCount: {{ dingo_auto_increment_cache_count }} 14 | autoIncrementIncrement: 1 15 | autoIncrementOffset: 1 16 | enableTableLock: true 17 | lowerCaseTableNames: 2 18 | common: 19 | scheduledCoreThreads: 16 20 | lockCoreThreads: 0 21 | globalCoreThreads: 0 22 | gcSafePointPeriod: 300 23 | gcDeleteRegionPeriod: 60 24 | store: 25 | bufferSize: {{ dingo_executor_buffer_size }} 26 | bufferNumber: {{ dingo_executor_buffer_number }} 27 | fileSize: {{ dingo_executor_file_size }} 28 | path: {{ dingo_home }}/localStore 29 | security: 30 | cipher: 31 | keyPath: {{ dingo_home }}/conf/dingodb.jks 32 | keyPass: dingodb 33 | storePass: dingodb 34 | alias: dingodb 35 | issuer: dingo -------------------------------------------------------------------------------- /artifacts/system/centos8/repo/CentOS-CR.repo: -------------------------------------------------------------------------------- 1 | # CentOS-CR.repo 2 | # 3 | # The Continuous Release ( CR ) repository contains rpms that are due in the next 4 | # release for a specific CentOS Version ( eg. next release in CentOS-8 ); these rpms 5 | # are far less tested, with no integration checking or update path testing having 6 | # taken place. They are still built from the upstream sources, but might not map 7 | # to an exact upstream distro release. 8 | # 9 | # These packages are made available soon after they are built, for people willing 10 | # to test their environments, provide feedback on content for the next release, and 11 | # for people looking for early-access to next release content. 12 | # 13 | # The CR repo is shipped in a disabled state by default; its important that users 14 | # understand the implications of turning this on. 15 | # 16 | 17 | [cr] 18 | name=CentOS-$releasever - cr 19 | mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=cr&infra=$infra 20 | #baseurl=http://mirror.centos.org/$contentdir/$releasever/cr/$basearch/os/ 21 | gpgcheck=1 22 | enabled=0 23 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial 24 | 25 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | 2 | [defaults] 3 | roles_path = ./roles 4 | host_key_checking = False 5 | 6 | # remote_user = deploy 7 | # private_key_file = ~/.ssh/key 8 | 9 | retry_files_enabled = False 10 | # retry_files_save_path = ~/.ansible-retry 11 | 12 | 13 | # set plugin path directories here, separate with colons 14 | #action_plugins = /usr/share/ansible/plugins/action 15 | #cache_plugins = /usr/share/ansible/plugins/cache 16 | #callback_plugins = /usr/share/ansible/plugins/callback 17 | #connection_plugins = /usr/share/ansible/plugins/connection 18 | #lookup_plugins = /usr/share/ansible/plugins/lookup 19 | #inventory_plugins = /usr/share/ansible/plugins/inventory 20 | #vars_plugins = /usr/share/ansible/plugins/vars 21 | #filter_plugins = /usr/share/ansible/plugins/filter 22 | #test_plugins = /usr/share/ansible/plugins/test 23 | #terminal_plugins = /usr/share/ansible/plugins/terminal 24 | #strategy_plugins = /usr/share/ansible/plugins/strategy 25 | 26 | action_plugins = ./action_plugins 27 | filter_plugins = ./filter_plugins 28 | 29 | log_path = ./ansible.log 30 | 31 | inventory = ./inventory 32 | 33 | [privilege_escalation] 34 | become = True 35 | -------------------------------------------------------------------------------- /roles/dingo/templates/application-web-dev.yaml.j2: -------------------------------------------------------------------------------- 1 | server: 2 | compression: 3 | enabled: true 4 | mime-types: text/html,text/xml,text/plain,text/css, application/javascript, application/json 5 | min-response-size: 1024 6 | host: {{ inventory_hostname }} 7 | port: {{ dingo_monitor_backend_port }} 8 | coordinatorExchangeSvrList: {{ dingo_coordinator_exchange_tmp_list_string_1 }} 9 | prometheus: http://{{groups['prometheus'][0]}}:{{prometheus_port}}/prometheus/api/v1/query 10 | monitor: 11 | executor: 12 | heapAlarmThreshold: 80 13 | logPath: {{ dingo_log_path }}/log/ 14 | instance: 15 | exportPort: {{node_exporter_port}} 16 | cpuAlarmThreshold: 70 17 | memAlarmThreshold: 70 18 | diskAlarmThreshold: 90 19 | 20 | spring: 21 | datasource: 22 | url: jdbc:mysql://{{groups['executor'][0]}}:{{dingo_mysql_port}}/information_schema?useSSL=false&serverTimezone=UTC&useLegacyDatetimeCode=false&allowPublicKeyRetrieval=true 23 | username: root 24 | password: 123123 25 | jpa: 26 | database-platform: org.hibernate.dialect.MySQLDialect 27 | show-sql: true -------------------------------------------------------------------------------- /roles/dingo/templates/start-driver.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Copyright 2021 DataCanvas 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )" 19 | JAR_PATH=$(find $ROOT -name driver-server-*.jar) 20 | NET_JAR_PATH=$(find $ROOT -name dingo-net-*.jar) 21 | 22 | nohup $ROOT/Linux-x64/bin/java ${JAVA_OPTS} \ 23 | -Dlogback.configurationFile=file:${ROOT}/conf/logback-driver.xml \ 24 | -classpath ${JAR_PATH}:${NET_JAR_PATH} \ 25 | io.dingodb.driver.server.Starter \ 26 | --config ${ROOT}/conf/client.yaml \ 27 | $@ \ 28 | > {{ dingo_log_path }}/driver.out 2>&1 & 29 | -------------------------------------------------------------------------------- /roles/system/tasks/03_hostname.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # hostname & /etc/hosts 3 | 4 | - name: "Set hostname related vars" 5 | set_fact: 6 | default_hostname_rhel: "{{ inventory_hostname | replace('.','-') }}" 7 | ip_identity: "{{ inventory_hostname.split('.')[3] }}" 8 | 9 | - debug: 10 | msg: "default_hostname_rhel={{ default_hostname_rhel }}, new_hostname=dingo{{ ip_identity }}.{{ default_domain }}" 11 | 12 | - name: "Update hostname if not set" 13 | shell: "hostnamectl set-hostname --static dingo{{ ip_identity }}.{{ default_domain }}" 14 | when: ansible_hostname == "localhost" or ansible_hostname == default_hostname_rhel 15 | 16 | - name: "Refresh facts after hostname changed" 17 | setup: 18 | # when: ansible_hostname == "localhost" or ansible_hostname == default_hostname_rhel 19 | 20 | - debug: 21 | msg: "{{ inventory_hostname }} {{ hostvars[inventory_hostname]['ansible_fqdn'] }} {{ hostvars[inventory_hostname]['ansible_hostname'] }}" 22 | 23 | - name: "Update /etc/hosts" 24 | lineinfile: 25 | path: /etc/hosts 26 | line: "{{ item }} {{ hostvars[item]['ansible_fqdn'] }} {{ hostvars[item]['ansible_hostname'] }}" 27 | state: present 28 | with_items: "{{ groups['all_nodes'] }}" 29 | -------------------------------------------------------------------------------- /artifacts/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: dingodb-deploy 3 | version: develop 4 | artifacts: 5 | - {name: "cfg_jdk_local_path", path_in_repo: "jdk-8u171-linux-x64.tar.gz" } 6 | - {name: "cfg_system_open_file_limits_path", path_in_repo: "system/centos8/limits/limits.conf" } 7 | - {name: "cfg_dingodb_store_local_path", path_in_repo: "dingo.tar.gz"} 8 | - {name: "prometheus_local_file", path_in_repo: "prometheus-2.14.0.linux-amd64.tar.gz" } 9 | - {name: "grafana_local_file", path_in_repo: "grafana-8.3.3.linux-amd64.tar.gz"} 10 | - {name: "node_exporter_local_file", path_in_repo: "node_exporter-0.18.1.linux-amd64.tar.gz"} 11 | - {name: "process_exporter_local_file", path_in_repo: "process-exporter-0.7.10.linux-amd64.tar.gz"} 12 | - {name: "jmx_prometheus_javaagent_local_file", path_in_repo: "jmx_prometheus_javaagent-0.17.2.jar"} 13 | - { name: "monitor_web_local_file", path_in_repo: "nginx/dingo-monitor.tar.gz" } 14 | - { name: "zlib_src_file", path_in_repo: "nginx/zlib-1.2.11.tar.gz" } 15 | - { name: "license_src_file", path_in_repo: "license/dingo-license.zip" } 16 | - { name: "nginx_local_file", path_in_repo: "nginx/nginx-x86.zip" } 17 | - { name: "nginx_arm_local_file", path_in_repo: "nginx/nginx-arm.zip" } 18 | -------------------------------------------------------------------------------- /roles/dingo/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dingo_home: "{{ installer_root_path | default('/opt') }}/dingo-store" 4 | 5 | dingo_log_path: "{{ dingo_log_dir }}/dingo" 6 | dingo_data_path: "{{ dingo_data_dir }}/dingo" 7 | 8 | root_log_level: info 9 | 10 | installer_cache_path: /tmp 11 | delete_cache_after_install: true 12 | dingo_store_tmp_coordinator_list: "{{ groups['coordinator'] }}" 13 | dingo_store_coordinator_list: "{% for item in dingo_store_tmp_coordinator_list %} {{item}}:{{ dingo_store_coordinator_exchange_port }} {% endfor %}" 14 | 15 | 16 | dingo_executor_server_db_path: "{{ dingo_data_path }}/executor/meta" 17 | dingo_executor_raft_log_path: "{{ dingo_data_path }}/executor/raftLog" 18 | dingo_executor_raft_db_path: "{{ dingo_data_path }}/executor/raftDb" 19 | 20 | 21 | dingo_tmp_executor_list: "{{ groups['executor'] }}" 22 | 23 | # define dingo coordinator exchange connection string: 172.20.3.18:22001 24 | dingo_coordinator_exchange_connection_list: "{{ dingo_store_coordinator_list.split() | join(\",\") }}" 25 | 26 | # define the flag to check current role executor 27 | is_dingo_executor: "{{ 'executor' in group_names }}" 28 | is_dingo_proxy: "{{ 'proxy' in group_names }}" 29 | is_dingo_web: "{{ 'web' in group_names }}" 30 | -------------------------------------------------------------------------------- /roles/dingo/templates/start-proxy.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 4 | # Copyright 2021 DataCanvas 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | 19 | set -x 20 | ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )" 21 | JAR_PATH=$(find $ROOT -name dingo-proxy*.jar) 22 | JAVA_OPTS="-Xms1g -Xmx1g -XX:+AlwaysPreTouch -XX:+UseG1GC -XX:+ScavengeBeforeFullGC -XX:+DisableExplicitGC -XX:+HeapDumpOnOutOfMemoryError" 23 | 24 | 25 | nohup $ROOT/Linux-x64/bin/java ${JAVA_OPTS} \ 26 | -Dlogging.config=file:${ROOT}/conf/logback-proxy.xml \ 27 | -jar ${JAR_PATH} \ 28 | --spring.config.location=${ROOT}/conf/application-proxy.yaml \ 29 | > {{ dingo_log_path }}/dingo-proxy.out & 30 | -------------------------------------------------------------------------------- /container/images/templates/bin/start-executor.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 4 | # Copyright 2021 DataCanvas 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | 19 | ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )" 20 | JAR_PATH=$(find $ROOT -name dingo-*-executor-*.jar) 21 | STORE_JAR_PATH=$(find $ROOT -name dingo-store*.jar) 22 | NET_JAR_PATH=$(find $ROOT -name dingo-net-*.jar) 23 | 24 | java ${JAVA_OPTS} \ 25 | -Dlogback.configurationFile=file:${ROOT}/conf/logback-executor.xml \ 26 | -classpath ${JAR_PATH}:${STORE_JAR_PATH}:${NET_JAR_PATH} \ 27 | io.dingodb.server.executor.Starter \ 28 | --config ${ROOT}/conf/executor.yaml \ 29 | > ${ROOT}/log/executor.out 30 | 31 | -------------------------------------------------------------------------------- /roles/scaling_in_dingo/templates/mysql_init.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source ~/.bash_profile 3 | BASE_DIR=$(dirname $(cd $(dirname $0); pwd)) 4 | 5 | cd $BASE_DIR/build/bin/ || exit 1 6 | DINGODB_HAVE_STORE_AVAILABLE=0 7 | DINGODB_MYSQL_INIT=0 8 | 9 | while [ "${DINGODB_HAVE_STORE_AVAILABLE}" -eq 0 ]; do 10 | echo "DINGODB_HAVE_STORE_AVAILABLE = 0, wait 1 second" 11 | sleep 1 12 | DINGODB_HAVE_STORE_AVAILABLE=$(./dingodb_client_coordinator --method=GetStoreMap 2>&1 >/dev/null |grep -c DINGODB_HAVE_STORE_AVAILABLE) 13 | done 14 | 15 | echo "DINGODB_HAVE_STORE_AVAILABLE = 1, start to initialize MySQL" 16 | cd - || exit 1 17 | 18 | DINGODB_MYSQL_INIT=$(./dingodb_client_coordinator --method=GetSchemas 2>&1 >/dev/null |grep -c information_schema) 19 | 20 | if [ "${DINGODB_MYSQL_INIT}" -ne 0 ]; then 21 | echo "information_schema exists exit" 22 | exit 1 23 | fi 24 | 25 | # run Java start mysql_init 26 | $BASE_DIR/Linux-x64/bin/java -cp $BASE_DIR/build/bin/dingo-mysql-init-0.6.0-SNAPSHOT.jar io.dingodb.mysql.MysqlInit {{ inventory_hostname }}:{{ dingo_store_coordinator_exchange_port }} > mysql_init.log 27 | 28 | # check status 29 | if [ $? -eq 0 ] 30 | then 31 | echo "Java mysql init succes" 32 | else 33 | echo "Java mysql init fail" 34 | fi -------------------------------------------------------------------------------- /roles/system/tasks/04_1_ntp.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | 4 | - name: " Install ntp" 5 | package: name=ntp state=present 6 | 7 | - name: " Get netmask" 8 | set_fact: 9 | netmask="{{ hostvars[inventory_hostname]['ansible_'~item]['ipv4']['netmask'] }}" 10 | with_items: 11 | - "{{ ansible_interfaces | map('replace', '-','_') | list }}" 12 | when: 13 | - "'ipv4' in hostvars[inventory_hostname]['ansible_'~item]" 14 | - hostvars[inventory_hostname]['ansible_'~item]['ipv4']['address'] == inventory_hostname 15 | - debug: var=netmask 16 | 17 | - name: " All LAN ntp access" 18 | lineinfile: 19 | dest: /etc/ntp.conf 20 | line: "{{ item }}" 21 | state: present 22 | with_items: 23 | - "server 127.127.1.0" 24 | - "fudge 127.127.1.0 stratum 8" 25 | - "restrict {{ inventory_hostname }} mask {{ netmask }} nomodify notrap" 26 | when: inventory_hostname == ntp_server 27 | 28 | - name: " Setup NTP server for all hosts" 29 | lineinfile: 30 | path: "/etc/ntp.conf" 31 | line: "{{ item }}" 32 | state: present 33 | with_items: 34 | - "server {{ ntp_server }}" 35 | when: inventory_hostname != ntp_server 36 | 37 | - name: " Start ntp service on ntp server" 38 | service: name=ntpd state=started enabled=yes 39 | -------------------------------------------------------------------------------- /roles/dingo/tasks/01_basic_command.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Ensure dingo user group exist: {{ dingo_group }}" 4 | group: name={{ dingo_group }} state=present 5 | 6 | - name: "Ensure dingo user exist {{ dingo_user }}" 7 | user: name={{ dingo_user }} group={{ dingo_group }} 8 | 9 | - name: "Kill old process of DingoDB executor" 10 | shell: pgrep -fu {{ dingo_user }} "executor" | xargs -r kill -9 11 | ignore_errors: yes 12 | 13 | - name: "Kill old process of DingoDB proxy" 14 | shell: pgrep -fu {{ dingo_user }} "proxy"| xargs -r kill -9 15 | ignore_errors: yes 16 | 17 | 18 | - name: "Kill old process of DingoDB dingo-web" 19 | shell: pgrep -fu {{ dingo_user }} "dingo-web"| xargs -r kill -9 20 | ignore_errors: yes 21 | 22 | 23 | - name: "Remove dingo home/log/data directories if exist" 24 | file: path="{{ item }}" state=absent 25 | with_items: 26 | - "{{ dingo_log_path }}" 27 | - "{{ dingo_data_path }}" 28 | 29 | 30 | - name: "Create dingo directories" 31 | file: path="{{item}}" state=directory owner={{dingo_user}} group={{dingo_group}} 32 | with_items: 33 | - "{{ dingo_log_path }}" 34 | - "{{ dingo_data_path }}" 35 | - "{{ dingo_executor_server_db_path }}" 36 | - "{{ dingo_executor_raft_log_path }}" 37 | - "{{ dingo_executor_raft_db_path }}" -------------------------------------------------------------------------------- /roles/dingo/templates/application-web.yaml.j2: -------------------------------------------------------------------------------- 1 | spring: 2 | application: 3 | name: dingodb-monitor 4 | cache: 5 | type: ehcache 6 | ehcache: 7 | config: classpath:ehcache.xml 8 | datasource: 9 | url: jdbc:mysql://{{groups['executor'][0]}}:{{dingo_mysql_port}}/information_schema?useSSL=false&serverTimezone=UTC&useLegacyDatetimeCode=false&allowPublicKeyRetrieval=true 10 | username: root 11 | password: 123123 12 | jpa: 13 | database-platform: org.hibernate.dialect.MySQLDialect 14 | show-sql: true 15 | 16 | server: 17 | compression: 18 | enabled: true 19 | mime-types: text/html,text/xml,text/plain,text/css, application/javascript, application/json 20 | min-response-size: 1024 21 | host: {{ inventory_hostname }} 22 | port: {{ dingo_monitor_backend_port }} 23 | coordinatorExchangeSvrList: {{ dingo_coordinator_exchange_tmp_list_string_1 }} 24 | prometheus: http://{{groups['prometheus'][0]}}:{{prometheus_port}}/prometheus/api/v1/query 25 | monitor: 26 | executor: 27 | heapAlarmThreshold: 80 28 | logPath: {{ dingo_log_path }}/log/ 29 | instance: 30 | exportPort: {{node_exporter_port}} 31 | cpuAlarmThreshold: 70 32 | memAlarmThreshold: 70 33 | diskAlarmThreshold: 90 34 | 35 | -------------------------------------------------------------------------------- /roles/scaling_in_dingo/tasks/03_start_roles_command.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # stop all the coordinator and store 4 | - name: "Stop and Del db Coordinator on all host" 5 | become: true 6 | become_user: "{{ dingo_user }}" 7 | shell: "/bin/bash ./scripts/start-coordinator.sh clean" 8 | args: 9 | chdir: "{{ dingo_store_home }}" 10 | when: is_dingo_store_coordinator 11 | 12 | # stop all the coordinator and store 13 | - name: "Stop and Del db Store on all host" 14 | become: true 15 | become_user: "{{ dingo_user }}" 16 | shell: "/bin/bash ./scripts/start-store.sh clean" 17 | args: 18 | chdir: "{{ dingo_store_home }}" 19 | when: is_dingo_store_store 20 | 21 | # start coordinator 22 | - name: "Deploy and Start Coordinator using Script" 23 | become: true 24 | become_user: "{{ dingo_user }}" 25 | shell: "/bin/bash ./scripts/start-coordinator.sh cleanstart" 26 | args: 27 | chdir: "{{ dingo_store_home }}" 28 | when: is_dingo_store_coordinator 29 | 30 | - name: "Sleep wait coordinator start" 31 | shell: "sleep 20" 32 | 33 | # start store 34 | - name: "Deploy and Start Store using Shell Script" 35 | become: true 36 | become_user: "{{ dingo_user }}" 37 | shell: "/bin/bash ./scripts/start-store.sh cleanstart" 38 | args: 39 | chdir: "{{ dingo_store_home }}" 40 | when: is_dingo_store_store 41 | 42 | -------------------------------------------------------------------------------- /roles/grafana/templates/grafana.service.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | [Unit] 4 | Description=Grafana instance 5 | Documentation=http://docs.grafana.org 6 | Wants=network-online.target 7 | After=network-online.target 8 | 9 | [Service] 10 | User={{ grafana_user }} 11 | Group={{ grafana_group }} 12 | Type=notify 13 | Restart=on-failure 14 | WorkingDirectory={{ grafana_home }} 15 | RuntimeDirectory=grafana 16 | RuntimeDirectoryMode=0750 17 | ExecStart=/bin/sh -c 'exec {{ grafana_home }}/bin/grafana-server \ 18 | --config={{ grafana_conf_path }}/grafana.ini \ 19 | --pidfile={{ grafana_run_path }}/grafana-server.pid \ 20 | cfg:default.paths.logs={{ grafana_log_path }} \ 21 | cfg:default.paths.data={{ grafana_data_path }} \ 22 | cfg:default.paths.plugins={{ grafana_plugin_path }} \ 23 | cfg:default.paths.provisioning={{ grafana_conf_path }}/provisioning >{{ grafana_log_path }}/{{ grafana_service_name }}.out 2>&1 ' 24 | 25 | LimitNOFILE=10000 26 | TimeoutStopSec=20 27 | TimeoutStartSec=180 28 | 29 | [Install] 30 | WantedBy=multi-user.target 31 | -------------------------------------------------------------------------------- /roles/jdk/tasks/02_untar.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Remove previous install directories" 4 | file: path={{ item }} state=absent 5 | with_items: 6 | - "{{ jdk_home }}" 7 | 8 | - set_fact: remote_tarball_path={{ installer_cache_path }}/{{ cfg_jdk_local_path | basename}} 9 | 10 | - name: "Ensure installer cache directory {{ installer_cache_path }}" 11 | file: path={{ installer_cache_path }} state=directory 12 | 13 | - name: "Copy tarball to remote host {{ remote_tarball_path }}" 14 | copy: src={{ cfg_jdk_local_path }} dest={{ remote_tarball_path }} 15 | 16 | - name: "Ensure jdk directories" 17 | file: path={{ item }} state=directory owner={{ jdk_user }} group={{ jdk_group }} 18 | with_items: 19 | - "{{ jdk_home }}" 20 | 21 | - name: "Unarchive {{ remote_tarball_path }} to {{ jdk_home }}" 22 | unarchive: 23 | src: "{{ remote_tarball_path}}" 24 | dest: "{{ jdk_home }}" 25 | owner: "{{ jdk_user }}" 26 | group: "{{ jdk_group }}" 27 | remote_src: yes 28 | extra_opts: ['--strip-components=1'] 29 | 30 | - name: "Set ownership of {{ jdk_home }} to {{ jdk_user }}" 31 | file: path={{ jdk_home }} owner={{ jdk_user }} group={{ jdk_group }} recurse=yes 32 | 33 | - name: "Delete temporary tarball file: {{ remote_tarball_path}}" 34 | file: path={{ remote_tarball_path }} state=absent 35 | when: delete_cache_after_install 36 | -------------------------------------------------------------------------------- /roles/system/tasks/04_2_chrony.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | 4 | - name: " Install chrony" 5 | package: name=chrony state=present 6 | 7 | - name: " Get netmask" 8 | set_fact: 9 | netmask="{{ hostvars[inventory_hostname]['ansible_'~item]['ipv4']['netmask'] }}" 10 | with_items: 11 | - "{{ ansible_interfaces | map('replace', '-','_') | list }}" 12 | when: 13 | - "'ipv4' in hostvars[inventory_hostname]['ansible_'~item]" 14 | - hostvars[inventory_hostname]['ansible_'~item]['ipv4']['address'] == inventory_hostname 15 | - debug: var=netmask 16 | 17 | - name: " Get CIDR" 18 | set_fact: 19 | net_cidr: "{{ inventory_hostname | cidr(netmask) }}" 20 | - debug: var=net_cidr 21 | 22 | - name: " All LAN chronyc access" 23 | lineinfile: 24 | dest: /etc/chrony.conf 25 | line: "{{ item }}" 26 | state: present 27 | with_items: 28 | - "allow {{ net_cidr }}" 29 | when: inventory_hostname == ntp_server 30 | 31 | - name: " Add cron job for synchronizing date(client nodes only)" 32 | lineinfile: 33 | dest: /etc/chrony.conf 34 | line: "{{ item }}" 35 | state: present 36 | with_items: 37 | - "server {{ ntp_server }} iburst" 38 | when: inventory_hostname != ntp_server 39 | 40 | - name: " Start chronyd service on all hosts" 41 | service: name=chronyd state=started enabled=yes 42 | -------------------------------------------------------------------------------- /roles/dingo_store/templates/gen_coor_list.sh.j2: -------------------------------------------------------------------------------- 1 | 2 | BASE_DIR=$(dirname $(cd $(dirname $0); pwd)) 3 | DIST_DIR=${BASE_DIR}/dist 4 | 5 | COOR_SRV_PEERS=$1 6 | 7 | # regex 8 | regex='^([0-9]{1,3}\.){3}[0-9]{1,3}:[0-9]{1,5}$' 9 | 10 | # param use ',' split 11 | echo "${COOR_SRV_PEERS}" | awk -F ',' '{for(i=1;i<=NF;i++) print $i}' | while read line; do 12 | # check param 13 | if echo "${line}" | grep -qE "$regex"; then 14 | echo "${line} is good" 15 | else 16 | echo "${line} id bad" 17 | echo "please input param '172.0.0.1:22001,172.0.0.1:22001,172.0.0.1:22001'" 18 | exit 1 19 | fi 20 | done 21 | 22 | TMP_COORDINATOR_SERVICES=${BASE_DIR}/build/bin/coor_list 23 | 24 | echo "# dingo-store coordinators" > ${TMP_COORDINATOR_SERVICES} 25 | echo ${COOR_SRV_PEERS} | tr ',' '\n' >> ${TMP_COORDINATOR_SERVICES} 26 | 27 | 28 | COOR_DIST=${BASE_DIR}/dist 29 | DISK_LIST_STR="{{ hostvars[inventory_hostname]['disk'] | default('${DIST_DIR}') }}" 30 | eval DISK_LIST=(${DISK_LIST_STR}) 31 | DISK_LIST+=(${COOR_DIST}) 32 | echo "DISK_LIST: ${DISK_LIST[@]}" 33 | 34 | SORTED_LIST=($(echo "${DISK_LIST[@]}" | tr ' ' '\n' | sort -u)) 35 | 36 | for dist_name in "${SORTED_LIST[@]}" 37 | do 38 | store_dist=$(find "${dist_name}" -type d -name "*conf*") 39 | for dir in ${store_dist} 40 | do 41 | # copy file 42 | cp ${TMP_COORDINATOR_SERVICES} "${dir}" 43 | done 44 | done 45 | 46 | 47 | 48 | 49 | 50 | -------------------------------------------------------------------------------- /roles/scaling_in_dingo/templates/gen_coor_list.sh.j2: -------------------------------------------------------------------------------- 1 | 2 | BASE_DIR=$(dirname $(cd $(dirname $0); pwd)) 3 | DIST_DIR=${BASE_DIR}/dist 4 | 5 | COOR_SRV_PEERS=$1 6 | 7 | # regex 8 | regex='^([0-9]{1,3}\.){3}[0-9]{1,3}:[0-9]{1,5}$' 9 | 10 | # param use ',' split 11 | echo "${COOR_SRV_PEERS}" | awk -F ',' '{for(i=1;i<=NF;i++) print $i}' | while read line; do 12 | # check param 13 | if echo "${line}" | grep -qE "$regex"; then 14 | echo "${line} is good" 15 | else 16 | echo "${line} id bad" 17 | echo "please input param '172.0.0.1:22001,172.0.0.1:22001,172.0.0.1:22001'" 18 | exit 1 19 | fi 20 | done 21 | 22 | TMP_COORDINATOR_SERVICES=${BASE_DIR}/build/bin/coor_list 23 | 24 | echo "# dingo-store coordinators" > ${TMP_COORDINATOR_SERVICES} 25 | echo ${COOR_SRV_PEERS} | tr ',' '\n' >> ${TMP_COORDINATOR_SERVICES} 26 | 27 | 28 | COOR_DIST=${BASE_DIR}/dist 29 | DISK_LIST_STR="{{ hostvars[inventory_hostname]['disk'] | default('${DIST_DIR}') }}" 30 | eval DISK_LIST=(${DISK_LIST_STR}) 31 | DISK_LIST+=(${COOR_DIST}) 32 | echo "DISK_LIST: ${DISK_LIST[@]}" 33 | 34 | SORTED_LIST=($(echo "${DISK_LIST[@]}" | tr ' ' '\n' | sort -u)) 35 | 36 | for dist_name in "${SORTED_LIST[@]}" 37 | do 38 | store_dist=$(find "${dist_name}" -type d -name "*conf*") 39 | for dir in ${store_dist} 40 | do 41 | # copy file 42 | cp ${TMP_COORDINATOR_SERVICES} "${dir}" 43 | done 44 | done 45 | 46 | 47 | 48 | 49 | 50 | -------------------------------------------------------------------------------- /roles/grafana/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | grafana_service_name: "dingo-grafana" 4 | 5 | grafana_home: "{{ installer_root_path | default('/opt') }}/grafana" 6 | grafana_user: "{{ dingo_user | default('grafana') }}" 7 | grafana_group: "{{ dingo_group | default('grafana') }}" 8 | 9 | grafana_log_path: "{{ dingo_log_dir | default('/var/log') }}/grafana" 10 | grafana_data_path: "{{ dingo_data_dir | default('/var/lib') }}/grafana" 11 | grafana_run_path: "{{ dingo_run_dir | default('/var/run') }}/grafana" 12 | 13 | grafana_conf_path: "{{ grafana_home }}/conf" 14 | grafana_plugin_path: "{{ grafana_data_path }}/plugins" 15 | 16 | grafana_port: 3000 17 | 18 | default_datasource_name: "Prometheus" 19 | grafana_admin_user: "admin" 20 | grafana_admin_password: "admin" 21 | default_dashboard_title: "Dingo" 22 | default_dashboard_uid: "RNezu0fWk" 23 | default_dashboard_template: "dingo.json.j2" 24 | process_dashboard_title: "DingoProcess" 25 | process_dashboard_uid: "PCJkOyLVk" 26 | process_dashboard_template: "system-processes-metrics.json.j2" 27 | dingo_store_dashboard_title: "DingoMerics" 28 | dingo_store_dashboard_uid: "xYgURYs4z" 29 | dingo_store_dashboard_template: "dingo_metrics.json.j2" 30 | iostat_dashboard_title: "DingoIostat" 31 | iostat_dashboard_uid: "9l09q0qik" 32 | iostat_dashboard_template: "node_iostat.json.j2" 33 | sql_dashboard_template: "sqlmetric.json.j2" 34 | sql_dashboard_title: "sql_metrics" 35 | sql_dashboard_uid: "Bm4sOIfSz" -------------------------------------------------------------------------------- /package.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | readonly SCRIPT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 6 | cd "${SCRIPT_ROOT}" 7 | 8 | # shellcheck disable=SC2155 9 | export dist_product=$(grep "^name: " artifacts/config.yml | sed -e "s/^name: //") 10 | # shellcheck disable=SC2155 11 | export dist_version=$(grep "^version: " artifacts/config.yml | sed -e "s/^version: //") 12 | 13 | function indent() { sed "s/^/ 👉 /"; } 14 | 15 | # shellcheck disable=SC2154 16 | installer_name=ansible-${dist_product}-${dist_version} 17 | echo "#---------------------------------------------------------" 18 | echo "#️ ⏳️ Creating installer: ${installer_name}" 19 | echo "#---------------------------------------------------------" 20 | 21 | echo "✅ Downloading artifacts" 22 | artifacts/download.py 2>&1 | indent 23 | 24 | echo "✅ Packaging ${installer_name}" 25 | git archive --format=tar --prefix=${installer_name}/ HEAD -o ${installer_name}.tar 26 | echo "✅ Adding artifacts to installer tarball" 27 | mkdir -p ${installer_name} 28 | ln -s ../artifacts ${installer_name}/artifacts 29 | # shellcheck disable=SC2207 30 | export TARBALLS=( $(artifacts/download.py resolve 2>/dev/null) ) 31 | # shellcheck disable=SC2068 32 | for tarball in ${TARBALLS[@]}; do 33 | echo " 👉 $(basename ${tarball})" 34 | tar -rf ${installer_name}.tar ${installer_name}/artifacts/$(basename ${tarball}) 35 | done 36 | rm -rf ${installer_name} 37 | echo "😁 Done" 38 | -------------------------------------------------------------------------------- /roles/dingo/templates/executor.yaml.j2: -------------------------------------------------------------------------------- 1 | cluster: 2 | name: dingo 3 | exchange: 4 | host: {{ inventory_hostname }} 5 | port: {{ dingo_executor_exchange_port }} 6 | server: 7 | coordinators: {{ dingo_coordinator_exchange_connection_list }} 8 | user: user 9 | keyring: TO_BE_CONTINUED 10 | resourceTag: 1 11 | mysqlPort: {{ dingo_mysql_port }} 12 | variable: 13 | autoIncrementCacheCount: {{ dingo_auto_increment_cache_count }} 14 | autoIncrementIncrement: 1 15 | autoIncrementOffset: 1 16 | enableTableLock: true 17 | lowerCaseTableNames: 2 18 | common: 19 | scheduledCoreThreads: 16 20 | lockCoreThreads: 0 21 | globalCoreThreads: 0 22 | gcSafePointPeriod: 300 23 | gcDeleteRegionPeriod: 60 24 | enableGcSdkRegion: true 25 | store: 26 | bufferSize: {{ dingo_executor_buffer_size }} 27 | bufferNumber: {{ dingo_executor_buffer_number }} 28 | fileSize: {{ dingo_executor_file_size }} 29 | path: {{ dingo_home }}/localStore 30 | security: 31 | ldap: 32 | ldapHost: {{ openldap_server_ip }} 33 | ldapPort: {{ openldap_server_port }} 34 | bindDN: {{ openldap_server_bindDN }} 35 | password: {{ openldap_server_root_password }} 36 | baseDN: {{ openldap_server_baseDN }} 37 | cipher: 38 | keyPath: {{ dingo_home }}/conf/dingodb.jks 39 | keyPass: dingodb 40 | storePass: dingodb 41 | alias: dingodb 42 | issuer: dingo -------------------------------------------------------------------------------- /artifacts/merge_dingo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | NEW_DIR=$(cd `dirname $0`; pwd) 5 | echo ${NEW_DIR} 6 | cd ${NEW_DIR} 7 | mkdir dingo 8 | tar -zxvf dingo-store.tar.gz -C dingo 9 | unzip dingo.zip -d dingo 10 | rm -f dingo/conf/coordinator.yaml 11 | rm -f dingo/conf/store.yaml 12 | if [ -d dingo/dingo ]; then 13 | echo "dingo.zip have two layer" 14 | cp -rf dingo/dingo/* dingo/ 15 | rm -rf dingo/dingo 16 | fi 17 | cd dingo 18 | cp ${NEW_DIR}/sqlline-*-SNAPSHOT-jar-with-dependencies.jar libs/ 19 | # sed修改store-gflags.conf、index-gflags.conf、coordinator-gflags.conf的变量值 20 | #sed -i 's/-min_system_disk_capacity_free_ratio=0.05/-min_system_disk_capacity_free_ratio=0.05/g' conf/store-gflags.conf 21 | #sed -i 's/-min_system_memory_capacity_free_ratio=0.10/-min_system_memory_capacity_free_ratio=0.10/g' conf/store-gflags.conf 22 | #sed -i 's/-storage_worker_num=32/-storage_worker_num=32/g' conf/store-gflags.conf 23 | # 24 | #sed -i 's/-min_system_disk_capacity_free_ratio=0.05/-min_system_disk_capacity_free_ratio=0.05/g' conf/index-gflags.conf 25 | #sed -i 's/-min_system_memory_capacity_free_ratio=0.10/-min_system_memory_capacity_free_ratio=0.10/g' conf/index-gflags.conf 26 | #sed -i 's/-storage_worker_num=16/-storage_worker_num=16/g' conf/index-gflags.conf 27 | # 28 | #sed -i 's/-max_hnsw_memory_size_of_region=2147483648/-max_hnsw_memory_size_of_region=2147483648/g' conf/coordinator-gflags.conf 29 | tar -czvf ../dingo.tar.gz ./* 30 | cd .. 31 | rm -rf dingo -------------------------------------------------------------------------------- /roles/prometheus/templates/prometheus.yml.j2: -------------------------------------------------------------------------------- 1 | #jinja2: trim_blocks: True, lstrip_blocks: True 2 | # {{ ansible_managed }} 3 | # http://prometheus.io/docs/operating/configuration/ 4 | 5 | global: 6 | evaluation_interval: 15s 7 | scrape_interval: 15s 8 | scrape_timeout: 10s 9 | 10 | rule_files: 11 | - {{ prometheus_home }}/rules/*.yml 12 | 13 | scrape_configs: 14 | - job_name: "prometheus" 15 | metrics_path: "/prometheus/metrics" 16 | static_configs: 17 | - targets: 18 | - "localhost:{{ prometheus_port }}" 19 | 20 | - job_name: "node" 21 | file_sd_configs: 22 | - files: 23 | - "{{ prometheus_home }}/file_sd/node*.yml" 24 | 25 | - job_name: "process" 26 | file_sd_configs: 27 | - files: 28 | - "{{ prometheus_home }}/file_sd/process*.yml" 29 | 30 | - job_name: "coordinator" 31 | metrics_path: '/NodeService/DingoMetrics' 32 | static_configs: 33 | - targets: {{ dingo_coordinator_http_monitor_list }} 34 | 35 | - job_name: "store" 36 | metrics_path: '/NodeService/DingoMetrics' 37 | static_configs: 38 | - targets: {{ dingo_store_http_monitor_list }} 39 | 40 | - job_name: "index" 41 | metrics_path: '/NodeService/DingoMetrics' 42 | static_configs: 43 | - targets: {{ dingo_index_http_monitor_list }} 44 | 45 | - job_name: "executor-exporter" 46 | metrics_path: '/metrics' 47 | static_configs: 48 | - targets: {{ dingo_executor_http_monitor_list }} 49 | 50 | -------------------------------------------------------------------------------- /roles/prometheus/tasks/04_add_target.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Get current Prometheus configuration 3 | uri: 4 | url: http://localhost:9090/api/v1/targets 5 | method: GET 6 | return_content: yes 7 | register: prometheus_config 8 | 9 | - name: Add new target to configuration 10 | set_fact: 11 | new_target: 12 | labels: 13 | job: "{{ item }}" 14 | targets: 15 | - "http://{{item}}:{{dingo_store_coordinator_exchange_port}}/NodeService/DingoMetrics" 16 | when: "'{{ item }}' not in prometheus_config.content" 17 | register: new_target 18 | loop: {{all_nodes}} 19 | 20 | - name: Add new target to configuration 21 | set_fact: 22 | new_target: 23 | labels: 24 | job: "{{ item }}" 25 | targets: 26 | - "http://{{item}}:{{dingo_store_coordinator_exchange_port}}/NodeService/DingoMetrics" 27 | when: "'{{ item }}' not in prometheus_config.content" 28 | register: new_target 29 | loop: {{all_nodes}} 30 | 31 | - name: Update Prometheus configuration 32 | uri: 33 | url: http://localhost:9090/api/v1/targets 34 | method: POST 35 | body_format: json 36 | body: 37 | targets: 38 | - "{{ new_target.new_target.labels.job }}/{{ item }}" 39 | labels: 40 | job: "{{ new_target.new_target.labels.job }}" 41 | headers: 42 | Content-Type: "application/json" 43 | status_code: 200 44 | with_items: "{{ new_target.new_target.targets }}" 45 | when: new_target.changed 46 | -------------------------------------------------------------------------------- /inventory/hosts: -------------------------------------------------------------------------------- 1 | [all:vars] 2 | ansible_connection=ssh 3 | #ansible_ssh_user=root 4 | #ansible_ssh_pass=datacanvas@123 5 | ansible_python_interpreter=/usr/bin/python3 6 | 7 | #[add_coordinator] 8 | # 172.20.3.203 9 | 10 | #[add_store] 11 | #172.20.3.203 12 | 13 | 14 | [scaling_in_dingo:children] 15 | add_coordinator 16 | add_store 17 | 18 | 19 | [coordinator] 20 | 172.20.3.201 21 | 172.20.3.200 22 | 172.20.3.202 23 | 24 | [store] 25 | # 172.20.3.201 26 | # 172.20.3.201 store_num=2 27 | # 172.20.3.201 store_num=2 disk='/home/sd1/store1 /home/sd2/store2' 28 | 172.20.3.201 29 | 172.20.3.200 30 | 172.20.3.202 31 | 32 | [document] 33 | # 172.20.3.201 document_num=2 disk='/home/sd1/document1 /home/sd2/document2' 34 | 172.20.3.201 35 | 172.20.3.200 36 | 172.20.3.202 37 | 38 | [index] 39 | # 172.20.3.201 index_num=2 disk='/home/sd1/index1 /home/sd2/index2' 40 | 172.20.3.201 41 | 172.20.3.200 42 | 172.20.3.202 43 | 44 | [diskann] 45 | 172.20.3.201 46 | 47 | [prometheus] 48 | 172.20.3.201 49 | 50 | [grafana] 51 | 172.20.3.201 52 | 53 | 54 | [all_nodes:children] 55 | coordinator 56 | store 57 | index 58 | 59 | [executor] 60 | 172.20.3.201 61 | 172.20.3.200 62 | 172.20.3.202 63 | 64 | [proxy] 65 | 172.20.3.201 66 | 67 | [web] 68 | 172.20.3.201 69 | 70 | [executor_nodes:children] 71 | executor 72 | proxy 73 | 74 | 75 | [node_exporter] 76 | 172.20.3.201 77 | 172.20.3.200 78 | 172.20.3.202 79 | 80 | 81 | [process_exporter] 82 | 172.20.3.201 83 | 172.20.3.200 84 | 172.20.3.202 85 | 86 | -------------------------------------------------------------------------------- /roles/dingo/templates/stop-all-component.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 4 | # Copyright 2021 DataCanvas 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | 19 | ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )" 20 | 21 | 22 | PID=`ps -u {{ dingo_user }} -o pid,cmd | grep executor.Starter | grep -v grep | awk '{print $1}'` 23 | if [[ "" != "$PID" ]]; then 24 | echo "killing $PID" 25 | kill -9 $PID 26 | fi 27 | 28 | PID=`ps -u {{ dingo_user }} -o pid,cmd | grep dingo-proxy | grep -v grep | awk '{print $1}'` 29 | if [[ "" != "$PID" ]]; then 30 | echo "killing $PID" 31 | kill -9 $PID 32 | fi 33 | 34 | PID=`ps -u {{ dingo_user }} -o pid,cmd | grep dingo-web | grep -v grep | awk '{print $1}'` 35 | if [[ "" != "$PID" ]]; then 36 | echo "killing $PID" 37 | kill -9 $PID 38 | fi 39 | 40 | # PID=`ps -ef | grep dingo | grep java | grep -v grep | awk '{print $2}'` 41 | # if [[ "" != "$PID" ]]; then 42 | # ps -ef | grep dingo | grep java | grep -v grep | awk '{print $2}' | xargs kill -9 43 | # fi -------------------------------------------------------------------------------- /artifacts/system/centos8/repo/CentOS-Sources.repo: -------------------------------------------------------------------------------- 1 | # CentOS-Sources.repo 2 | # 3 | # The mirror system uses the connecting IP address of the client and the 4 | # update status of each mirror to pick mirrors that are updated to and 5 | # geographically close to the client. You should use this for CentOS updates 6 | # unless you are manually picking other mirrors. 7 | # 8 | # If the mirrorlist= does not work for you, as a fall back you can try the 9 | # remarked out baseurl= line instead. 10 | # 11 | # 12 | 13 | [BaseOS-source] 14 | name=CentOS-$releasever - BaseOS Sources 15 | baseurl=http://vault.centos.org/$contentdir/$releasever/BaseOS/Source/ 16 | gpgcheck=1 17 | enabled=0 18 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial 19 | 20 | #AppStream 21 | [AppStream-source] 22 | name=CentOS-$releasever - AppStream Sources 23 | baseurl=http://vault.centos.org/$contentdir/$releasever/AppStream/Source/ 24 | gpgcheck=1 25 | enabled=0 26 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial 27 | 28 | #additional packages that may be useful 29 | [extras-source] 30 | name=CentOS-$releasever - Extras Sources 31 | baseurl=http://vault.centos.org/$contentdir/$releasever/extras/Source/ 32 | gpgcheck=1 33 | enabled=0 34 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial 35 | 36 | #additional packages that extend functionality of existing packages 37 | [centosplus-source] 38 | name=CentOS-$releasever - Plus Sources 39 | baseurl=http://vault.centos.org/$contentdir/$releasever/centosplus/Source/ 40 | gpgcheck=1 41 | enabled=0 42 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial 43 | 44 | -------------------------------------------------------------------------------- /action_plugins/resolve_artifacts.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | import yaml 5 | from ansible.plugins.action import ActionBase 6 | 7 | try: 8 | from __main__ import display 9 | except ImportError: 10 | from ansible.utils.display import Display 11 | 12 | display = Display() 13 | 14 | 15 | class ActionModule(ActionBase): 16 | """ Returns map of inventory hosts and their associated SCM hostIds """ 17 | 18 | def run(self, tmp=None, task_vars=None): 19 | script_path = os.path.dirname(os.path.realpath(__file__)) 20 | artifact_path = os.path.realpath(os.path.join(script_path, "../artifacts")) 21 | config_file = os.path.realpath(os.path.join(artifact_path, "config.yml")) 22 | display.display("Loading configs from '%s'" % config_file) 23 | 24 | if task_vars is None: 25 | task_vars = dict() 26 | 27 | result = super(ActionModule, self).run(tmp, task_vars) 28 | 29 | try: 30 | cfg = yaml.safe_load(open(config_file).read()) 31 | version = cfg['version'] 32 | artifacts = {c['name']: self.process_artifact(artifact_path, c, version) for c in cfg['artifacts']} 33 | except KeyError as e: 34 | result['failed'] = True 35 | result['msg'] = str(e) 36 | return result 37 | 38 | result['ansible_facts'] = artifacts 39 | return result 40 | 41 | @staticmethod 42 | def process_artifact(artifact_path, artifact_info, version): 43 | # print(comp) 44 | artifact_info['path_in_repo'] = artifact_info['path_in_repo'].replace("${version}", version) 45 | local_file_name = os.path.join(artifact_path, artifact_info['path_in_repo']) 46 | return local_file_name 47 | -------------------------------------------------------------------------------- /roles/nginx/tasks/04_config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Generate systemd service config file" 4 | template: > 5 | src=systemd/nginx.service.j2 dest=/etc/systemd/system/nginx.service 6 | owner={{ nginx_user }} group={{ nginx_group }} mode=0644 force=yes 7 | when: ansible_service_mgr == "systemd" 8 | 9 | - name: "Generate service init script" 10 | template: > 11 | src=service/nginx.j2 dest=/etc/init.d/nginx 12 | owner={{ nginx_user }} group={{ nginx_group }} mode=0755 force=yes 13 | when: (ansible_service_mgr == "upstart" or ansible_service_mgr == "sysvinit") 14 | 15 | - name: "Create the configuration directory" 16 | file: > 17 | path={{ nginx_conf_dir }} state=directory 18 | owner={{ nginx_user }} group={{ nginx_group }} mode=0755 19 | recurse=yes 20 | force=yes 21 | 22 | - name: "Create the configuration conf.d directory" 23 | file: > 24 | path={{ nginx_conf_dir }}/conf.d state=directory 25 | owner={{ nginx_user }} group={{ nginx_group }} mode=0755 26 | recurse=yes 27 | force=yes 28 | 29 | - name: "Generate global nginx config file" 30 | template: > 31 | src=nginx.conf.j2 dest={{ nginx_conf_dir }}/nginx.conf 32 | owner={{ nginx_user }} group={{ nginx_group }} mode=0644 force=yes 33 | 34 | - name: "Create the configurations for conf.d" 35 | template: > 36 | src="{{ item }}.conf.j2" 37 | dest="{{ nginx_conf_dir }}/conf.d/{{ item }}.conf" 38 | owner={{ nginx_user }} group={{ nginx_group }} mode=0644 force=yes 39 | with_items: 40 | - "default" 41 | 42 | - name: "Generate nginx start script" 43 | template: > 44 | src="{{ item }}.sh.j2" 45 | dest="{{ nginx_home }}/{{ item }}.sh" 46 | owner={{ nginx_user }} group={{ nginx_group }} mode=0777 force=yes 47 | with_items: 48 | - "start" 49 | - "stop" 50 | - "reload" -------------------------------------------------------------------------------- /roles/scaling_in_dingo/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dingo_store_home: "{{ installer_root_path | default('/opt') }}/dingo-store" 4 | 5 | dingo_store_log_path: "{{ dingo_log_dir }}/dingo-store" 6 | dingo_store_data_path: "{{ dingo_data_dir }}/dingo-store" 7 | 8 | installer_cache_path: /tmp 9 | delete_cache_after_install: true 10 | 11 | dingo_store_coordinator_meta_path: "{{ dingo_store_data_path }}/coordinator" 12 | 13 | dingo_store_tmp_coordinator_list: "{{ groups['coordinator'] }}" 14 | dingo_store_tmp_store_list: "{{ groups['store'] }}" 15 | dingo_store_tmp_add_coordinator_list: "{{ groups['add_coordinator'] | default([]) }}" 16 | dingo_store_tmp_add_store_list: "{{ groups['add_store'] }}" 17 | 18 | new_dingo_store_tmp_coordinator_list: "{{ dingo_store_tmp_coordinator_list + dingo_store_tmp_add_coordinator_list }}" 19 | 20 | # define dingo coordinator raft connection string: 172.20.3.18:22101,172.20.3.19:22101,172.20.3.20:22101 21 | dingo_store_coordinator_raft_list: "{% for item in new_dingo_store_tmp_coordinator_list %} {{item}}:{{ dingo_store_coordinator_raft_port }} {% endfor %}" 22 | dingo_coordinator_raft_connection_list: "{{ dingo_store_coordinator_raft_list.split() | join(\",\") }}" 23 | 24 | dingo_store_store_exchange_list: "{% for item in new_dingo_store_tmp_coordinator_list %} {{item}}:{{ dingo_store_coordinator_exchange_port }} {% endfor %}" 25 | dingo_store_store_exchange_connection_list: "{{ dingo_store_store_exchange_list.split() | join(\",\") }}" 26 | 27 | is_dingo_store_coordinator: "{{ 'add_coordinator' in group_names }}" 28 | is_dingo_store_store: "{{ 'add_store' in group_names }}" 29 | 30 | store_num: "{{ hostvars[inventory_hostname]['store_num'] | default(1) }}" 31 | default_disk_join: "{{ (dingo_store_home | string ~ '/dist ') * (store_num | int) }}" 32 | default_disk_list: "{{ default_disk_join | trim}}" 33 | -------------------------------------------------------------------------------- /roles/dingo/tasks/03_start_roles_command.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # stop dingo all role: executor, proxy, web 4 | - name: "Stop executor, proxy, web on all host" 5 | become: true 6 | become_user: "{{ dingo_user }}" 7 | shell: "/bin/bash ./bin/stop-all-component.sh" 8 | args: 9 | chdir: "{{ dingo_home }}" 10 | 11 | - set_fact: remote_prometheus_javaagent_path={{ dingo_home }}/libs/ 12 | - set_fact: remote_license_path={{ dingo_home }}/libs/ 13 | 14 | - name: "Copy jmx_prometheus_javaagent-0.17.2.jar to remote host" 15 | copy: src={{ jmx_prometheus_javaagent_local_file }} dest={{ remote_prometheus_javaagent_path }} 16 | when: is_dingo_executor 17 | 18 | - name: "Unzip license.zip to remote host" 19 | unarchive: src={{ license_src_file }} dest={{ remote_license_path }} 20 | when: is_license_support 21 | 22 | # create jmx_config.yaml 23 | - name: "Create jmx_config.yaml" 24 | become: true 25 | template: 26 | src: "jmx_config.yaml.j2" 27 | dest: "{{ dingo_home }}/conf/jmx_config.yaml" 28 | when: is_dingo_executor 29 | 30 | 31 | # start executor 32 | - name: "Start Executor using Shell Script" 33 | become: true 34 | become_user: "{{ dingo_user }}" 35 | shell: "/bin/bash ./bin/start-executor.sh " 36 | args: 37 | chdir: "{{ dingo_home }}" 38 | when: is_dingo_executor 39 | 40 | # Start dingo proxy 41 | - name: "Start Dingo Proxy appliaction using Shell script" 42 | become: true 43 | become_user: "{{ dingo_user }}" 44 | shell: "/bin/bash ./bin/start-proxy.sh" 45 | args: 46 | chdir: "{{ dingo_home }}" 47 | when: is_dingo_proxy 48 | 49 | # Start dingo web 50 | - name: "Start Dingo Web appliaction using Shell script" 51 | become: true 52 | become_user: "{{ dingo_user }}" 53 | shell: "/bin/bash ./bin/start-web.sh" 54 | args: 55 | chdir: "{{ dingo_home }}" 56 | when: is_dingo_web 57 | 58 | -------------------------------------------------------------------------------- /artifacts/system/centos8/repo/CentOS-Base.repo: -------------------------------------------------------------------------------- 1 | # CentOS-Base.repo 2 | # 3 | # The mirror system uses the connecting IP address of the client and the 4 | # update status of each mirror to pick mirrors that are updated to and 5 | # geographically close to the client. You should use this for CentOS updates 6 | # unless you are manually picking other mirrors. 7 | # 8 | # If the mirrorlist= does not work for you, as a fall back you can try the 9 | # remarked out baseurl= line instead. 10 | # 11 | # 12 | 13 | [base] 14 | name=CentOS-8.5.2111 - Base - mirrors.aliyun.com 15 | baseurl=http://mirrors.aliyun.com/centos-vault/8.5.2111/BaseOS/$basearch/os/ 16 | gpgcheck=0 17 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-Official 18 | 19 | #additional packages that may be useful 20 | [extras] 21 | name=CentOS-8.5.2111 - Extras - mirrors.aliyun.com 22 | baseurl=http://mirrors.aliyun.com/centos-vault/8.5.2111/extras/$basearch/os/ 23 | gpgcheck=0 24 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-Official 25 | 26 | #additional packages that extend functionality of existing packages 27 | [centos-vaultplus] 28 | name=CentOS-8.5.2111 - Plus - mirrors.aliyun.com 29 | baseurl=http://mirrors.aliyun.com/centos-vault/8.5.2111/centos-vaultplus/$basearch/os/ 30 | gpgcheck=0 31 | enabled=0 32 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-Official 33 | 34 | [PowerTools] 35 | name=CentOS-8.5.2111 - PowerTools - mirrors.aliyun.com 36 | baseurl=http://mirrors.aliyun.com/centos-vault/8.5.2111/PowerTools/$basearch/os/ 37 | gpgcheck=0 38 | enabled=0 39 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-Official 40 | 41 | 42 | [AppStream] 43 | name=CentOS-8.5.2111 - AppStream - mirrors.aliyun.com 44 | baseurl=http://mirrors.aliyun.com/centos-vault/8.5.2111/AppStream/$basearch/os/ 45 | gpgcheck=0 46 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-Official 47 | -------------------------------------------------------------------------------- /playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | #--------------------------------- 4 | # 1. Prepare System 5 | #--------------------------------- 6 | - hosts: 7 | - all_nodes 8 | tasks: 9 | - include_role: name=system 10 | when: install_system 11 | handlers: 12 | - name: Reload limits 13 | command: sysctl -p 14 | ignore_errors: yes 15 | #--------------------------------- 16 | # 2. Install JDK 17 | #--------------------------------- 18 | - hosts: 19 | - all_nodes 20 | tasks: 21 | - include_role: name=jdk 22 | when: install_java_sdk 23 | 24 | 25 | #--------------------------------- 26 | # 3. Install Dingo_store nodes 27 | #--------------------------------- 28 | - hosts: 29 | - all_nodes 30 | tasks: 31 | - include_role: name=dingo_store 32 | when: install_dingo_store 33 | 34 | #--------------------------------- 35 | # 3. Install Dingo nodes 36 | #--------------------------------- 37 | - hosts: 38 | - executor_nodes 39 | tasks: 40 | - include_role: name=dingo 41 | when: install_dingo 42 | 43 | 44 | #--------------------------------- 45 | # 4. Install prometheus and grafana 46 | #--------------------------------- 47 | - hosts: 48 | - prometheus 49 | tasks: 50 | - include_role: name=prometheus 51 | when: install_prometheus 52 | 53 | - hosts: 54 | - grafana 55 | tasks: 56 | - include_role: name=grafana 57 | when: install_grafana 58 | 59 | - hosts: 60 | - all_nodes 61 | tasks: 62 | - include_role: name=node_exporter 63 | when: install_node_exporter 64 | 65 | - hosts: 66 | - all_nodes 67 | tasks: 68 | - include_role: name=process_exporter 69 | when: install_process_exporter 70 | 71 | #--------------------------------- 72 | # 5. Install monitor web 73 | #--------------------------------- 74 | - hosts: 75 | - web 76 | tasks: 77 | - include_role: name=nginx 78 | when: install_monitor_web 79 | 80 | -------------------------------------------------------------------------------- /roles/dingo_store/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dingo_store_home: "{{ installer_root_path | default('/opt') }}/dingo-store" 4 | 5 | dingo_store_log_path: "{{ dingo_log_dir }}/dingo-store" 6 | dingo_store_data_path: "{{ dingo_data_dir }}/dingo-store" 7 | 8 | installer_cache_path: /tmp 9 | delete_cache_after_install: true 10 | 11 | 12 | 13 | dingo_store_coordinator_meta_path: "{{ dingo_store_data_path }}/coordinator" 14 | 15 | dingo_store_tmp_coordinator_list: "{{ groups['coordinator'] }}" 16 | dingo_store_tmp_store_list: "{{ groups['store'] }}" 17 | dingo_diskann_list: "{{ groups['diskann'] }}" 18 | dingo_diskann_host: "{{ (dingo_diskann_list[0] | default('127.0.0.1')) }}" 19 | 20 | # define dingo coordinator raft connection string: 172.20.3.18:22101,172.20.3.19:22101,172.20.3.20:22101 21 | dingo_store_coordinator_raft_list: "{% for item in dingo_store_tmp_coordinator_list %} {{item}}:{{ dingo_store_coordinator_raft_port }} {% endfor %}" 22 | dingo_coordinator_raft_connection_list: "{{ dingo_store_coordinator_raft_list.split() | join(\",\") }}" 23 | 24 | dingo_store_store_exchange_list: "{% for item in dingo_store_tmp_coordinator_list %} {{item}}:{{ dingo_store_coordinator_exchange_port }} {% endfor %}" 25 | dingo_store_store_exchange_connection_list: "{{ dingo_store_store_exchange_list.split() | join(\",\") }}" 26 | 27 | is_dingo_store_coordinator: "{{ 'coordinator' in group_names }}" 28 | is_dingo_store_store: "{{ 'store' in group_names }}" 29 | is_dingo_store_document: "{{ 'document' in group_names }}" 30 | is_dingo_store_index: "{{ 'index' in group_names }}" 31 | is_dingo_store_diskann: "{{ 'diskann' in group_names }}" 32 | 33 | store_num: "{{ hostvars[inventory_hostname]['store_num'] | default(1) }}" 34 | document_num: "{{ hostvars[inventory_hostname]['document_num'] | default(1) }}" 35 | index_num: "{{ hostvars[inventory_hostname]['index_num'] | default(1) }}" 36 | diskann_num: "{{ hostvars[inventory_hostname]['diskann_num'] | default(1) }}" 37 | default_disk_join: "{{ (dingo_store_home | string ~ '/dist ') * (store_num | int) }}" 38 | default_disk_list: "{{ default_disk_join | trim}}" -------------------------------------------------------------------------------- /roles/nginx/tasks/03_untar_monitor_web.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Remove previous install directories" 4 | file: path={{ item }} state=absent 5 | with_items: 6 | - "{{ nginx_data_path }}/monitor_web" 7 | 8 | - name: "debug info" 9 | debug: msg="{{ installer_cache_path }}/{{ monitor_web_local_file | basename}}" 10 | 11 | - set_fact: remote_tarball_path={{ installer_cache_path }}/{{ monitor_web_local_file | basename}} 12 | 13 | - name: "Ensure installer cache directory {{ installer_cache_path }}" 14 | file: path={{ installer_cache_path }} state=directory 15 | 16 | - name: "Copy tarball to remote host {{ remote_tarball_path }}" 17 | copy: src={{ monitor_web_local_file }} dest={{ remote_tarball_path }} 18 | 19 | - name: "Ensure monitor_web directories" 20 | file: path={{ item }} state=directory owner={{ nginx_user }} group={{ nginx_group }} 21 | with_items: 22 | - "{{ nginx_data_path }}/monitor_web" 23 | 24 | - name: "Unarchive {{ remote_tarball_path }} to {{ nginx_data_path }}/monitor_web" 25 | unarchive: 26 | src: "{{ remote_tarball_path}}" 27 | dest: "{{ nginx_data_path }}/monitor_web" 28 | owner: "{{ nginx_user }}" 29 | group: "{{ nginx_group }}" 30 | remote_src: yes 31 | extra_opts: ['--strip-components=1'] 32 | 33 | - name: "Set ownership of {{ nginx_data_path }} to {{ nginx_user }}" 34 | file: path={{ nginx_data_path }} owner={{ nginx_user }} group={{ nginx_group }} recurse=yes 35 | 36 | - name: Find JS files in {{ nginx_data_path }}/monitor_web 37 | find: 38 | paths: "{{ nginx_data_path }}/monitor_web" 39 | patterns: "*.js" 40 | register: js_files 41 | 42 | - name: "Replace {{ nginx_data_path }}/monitor_web/*.js" 43 | replace: 44 | path: "{{ item.path }}" 45 | regexp: ".concat[^/]*/d/Bm4sOIfSz/sql_metrics" 46 | replace: '.concat("{{ grafana_server }}", ":{{ grafana_port }}/d/Bm4sOIfSz/sql_metrics' 47 | with_items: "{{ js_files.files }}" 48 | 49 | - name: "Delete temporary tarball file: {{ remote_tarball_path}}" 50 | file: path={{ remote_tarball_path }} state=absent 51 | when: delete_cache_after_install 52 | 53 | -------------------------------------------------------------------------------- /roles/dingo_store/tasks/02_update_configuration.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "set disk_string" 3 | set_fact: 4 | store_disk_string: "{{ hostvars[inventory_hostname]['disk'] | default(default_disk_list) }}" 5 | 6 | - name: "split string into disk list" 7 | set_fact: 8 | store_disk_list: "{{ store_disk_string.split() }}" 9 | 10 | - name: "Generate dingo configuration files" 11 | template: src={{ item.src }} dest={{ item.dest }} owner={{ dingo_user }} group={{ dingo_group }} mode=0755 12 | with_items: 13 | - { 14 | src: "start-coordinator.sh.j2", 15 | dest: "{{ dingo_store_home }}/scripts/start-coordinator.sh" 16 | } 17 | - { 18 | src: "start-document.sh.j2", 19 | dest: "{{ dingo_store_home }}/scripts/start-document.sh" 20 | } 21 | - { 22 | src: "start-store.sh.j2", 23 | dest: "{{ dingo_store_home }}/scripts/start-store.sh" 24 | } 25 | - { 26 | src: "start-index.sh.j2", 27 | dest: "{{ dingo_store_home }}/scripts/start-index.sh" 28 | } 29 | - { 30 | src: "start-diskann.sh.j2", 31 | dest: "{{ dingo_store_home }}/scripts/start-diskann.sh" 32 | } 33 | - { 34 | src: "gen_coor_list.sh.j2", 35 | dest: "{{ dingo_store_home }}/scripts/gen_coor_list.sh" 36 | } 37 | - { 38 | src: "generate_id.sh.j2", 39 | dest: "{{ dingo_store_home }}/scripts/generate_id.sh" 40 | } 41 | 42 | - name: "Remove dingo logrotate file if exist" 43 | file: path="{{ item }}" state=absent 44 | with_items: 45 | - "/etc/logrotate.d/store-logrotate" 46 | - "/etc/logrotate.d/coordinator-logrotate" 47 | when: open_dingo_store_logrotate == false 48 | 49 | - name: "Generate dingo configuration files logrotate" 50 | template: src={{ item.src }} dest={{ item.dest }} owner=root group=root mode=0644 51 | with_items: 52 | - { 53 | src: "store-logrotate.j2", 54 | dest: "/etc/logrotate.d/store-logrotate" 55 | } 56 | - { 57 | src: "coordinator-logrotate.j2", 58 | dest: "/etc/logrotate.d/coordinator-logrotate" 59 | } 60 | when: open_dingo_store_logrotate 61 | -------------------------------------------------------------------------------- /roles/nginx/tasks/02_build.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Try stopping nginx service via systemd" 4 | systemd: name=nginx state=stopped 5 | when: ansible_service_mgr == "systemd" 6 | failed_when: false 7 | 8 | - name: "Try stopping nginx service" 9 | service: name=nginx state=stopped 10 | when: (ansible_service_mgr == "upstart" or ansible_service_mgr == "sysvinit") 11 | failed_when: false 12 | 13 | - name: "Ensure nginx directories" 14 | file: path={{ item }} state=directory owner={{ nginx_user }} group={{ nginx_group }} 15 | with_items: 16 | - "{{ nginx_home }}" 17 | - "{{ nginx_data_path }}" 18 | - "{{ nginx_log_path }}" 19 | - "{{ nginx_run_path }}" 20 | 21 | - name: "Ensure installer cache directory {{ installer_cache_path }}" 22 | file: path={{ installer_cache_path }} state=directory 23 | 24 | - name: "Extract nginx tarball" 25 | unarchive: src={{ nginx_local_file }} 26 | dest={{ nginx_home }} 27 | remote_src=yes 28 | when: ansible_architecture == 'x86_64' 29 | 30 | - name: "Extract nginx tarball" 31 | unarchive: src={{ nginx_arm_local_file }} 32 | dest={{ nginx_home }} 33 | remote_src=yes 34 | when: ansible_architecture == 'aarch64' 35 | 36 | - name: "Ensure monitor_web directories" 37 | file: path={{ item }} state=directory owner={{ nginx_user }} group={{ nginx_group }} 38 | with_items: 39 | - "{{ nginx_data_path }}/monitor_web" 40 | 41 | - name: "Unarchive {{ monitor_web_local_file }} to {{ nginx_data_path }}/monitor_web" 42 | unarchive: 43 | src: "{{ monitor_web_local_file }}" 44 | dest: "{{ nginx_data_path }}/monitor_web" 45 | owner: "{{ nginx_user }}" 46 | group: "{{ nginx_group }}" 47 | remote_src: yes 48 | extra_opts: ['--strip-components=1'] 49 | 50 | - name: "Ensure nginx directories" 51 | file: path={{ item }} state=directory owner={{ nginx_user }} group={{ nginx_group }} 52 | with_items: 53 | - "{{ nginx_conf_dir }}/sites-available" 54 | - "{{ nginx_conf_dir }}/sites-enabled" 55 | 56 | - name: "Set ownership of {{ nginx_home }} to {{ nginx_user }}" 57 | file: path={{ nginx_home }} owner={{ nginx_user }} group={{ nginx_group }} recurse=yes 58 | 59 | -------------------------------------------------------------------------------- /roles/scaling_in_dingo/tasks/02_update_configuration.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "set disk_string" 3 | set_fact: 4 | store_disk_string: "{{ hostvars[inventory_hostname]['disk'] | default(default_disk_list) }}" 5 | 6 | - name: "split string into disk list" 7 | set_fact: 8 | store_disk_list: "{{ store_disk_string.split() }}" 9 | 10 | 11 | - name: "Generate dingo configuration files" 12 | template: src={{ item.src }} dest={{ item.dest }} owner={{ dingo_user }} group={{ dingo_group }} mode=0755 13 | with_items: 14 | - { 15 | src: "mysql_init.sh.j2", 16 | dest: "{{ dingo_store_home }}/scripts/mysql_init.sh" 17 | } 18 | - { 19 | src: "gen_coor_list.sh.j2", 20 | dest: "{{ dingo_store_home }}/scripts/gen_coor_list.sh" 21 | } 22 | 23 | 24 | - name: "Generate coordinator files" 25 | template: src={{ item.src }} dest={{ item.dest }} owner={{ dingo_user }} group={{ dingo_group }} mode=0755 26 | with_items: 27 | - { 28 | src: "start-coordinator.sh.j2", 29 | dest: "{{ dingo_store_home }}/scripts/start-coordinator.sh", 30 | } 31 | when: is_dingo_store_coordinator 32 | 33 | - name: "Generate store files" 34 | template: src={{ item.src }} dest={{ item.dest }} owner={{ dingo_user }} group={{ dingo_group }} mode=0755 35 | with_items: 36 | - { 37 | src: "start-store.sh.j2", 38 | dest: "{{ dingo_store_home }}/scripts/start-store.sh", 39 | } 40 | when: is_dingo_store_store 41 | 42 | 43 | - name: "Remove dingo logrotate file if exist" 44 | file: path="{{ item }}" state=absent 45 | with_items: 46 | - "/etc/logrotate.d/store-logrotate" 47 | - "/etc/logrotate.d/coordinator-logrotate" 48 | when: open_dingo_store_logrotate == false 49 | 50 | - name: "Generate dingo configuration files logrotate" 51 | template: src={{ item.src }} dest={{ item.dest }} owner=root group=root mode=0644 52 | with_items: 53 | - { 54 | src: "store-logrotate.j2", 55 | dest: "/etc/logrotate.d/store-logrotate" 56 | } 57 | - { 58 | src: "coordinator-logrotate.j2", 59 | dest: "/etc/logrotate.d/coordinator-logrotate" 60 | } 61 | when: open_dingo_store_logrotate 62 | -------------------------------------------------------------------------------- /container/images/templates/conf/logback-executor.xml: -------------------------------------------------------------------------------- 1 | 2 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 26 | UTF-8 27 | 28 | 29 | 30 | ${LOG_HOME}/${LOG_FILE} 31 | 32 | ${LOG_HOME}/${LOG_FILE}.%d{yyyy-MM-dd}.%i 33 | 7 34 | 35 | 150MB 36 | 37 | 38 | 39 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5level %class{36} [%L] [%M] - %msg%xEx%n 40 | UTF-8 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | -------------------------------------------------------------------------------- /container/images/templates/conf/logback-coordinator.xml: -------------------------------------------------------------------------------- 1 | 2 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 27 | UTF-8 28 | 29 | 30 | 31 | ${LOG_HOME}/${LOG_FILE} 32 | 33 | ${LOG_HOME}/${LOG_FILE}.%d{yyyy-MM-dd}.%i 34 | 7 35 | 36 | 150MB 37 | 38 | 39 | 40 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5level %class{36} [%L] [%M] - %msg%xEx%n 41 | UTF-8 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | -------------------------------------------------------------------------------- /roles/dingo/templates/logback-driver.xml.j2: -------------------------------------------------------------------------------- 1 | 2 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5level %class{36} [%L] [%M] - %msg%xEx%n 26 | UTF-8 27 | 28 | 29 | 30 | ${LOG_HOME}/${LOG_FILE} 31 | 32 | ${LOG_HOME}/${LOG_FILE}.%d{yyyy-MM-dd}.%i 33 | 7 34 | 35 | 150MB 36 | 37 | 38 | 39 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5level %class{36} [%L] [%M] - %msg%xEx%n 40 | UTF-8 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | -------------------------------------------------------------------------------- /roles/dingo/templates/logback-import.xml.j2: -------------------------------------------------------------------------------- 1 | 2 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5level %class{36} [%L] [%M] - %msg%xEx%n 26 | UTF-8 27 | 28 | 29 | 30 | ${LOG_HOME}/${LOG_FILE} 31 | 32 | ${LOG_HOME}/${LOG_FILE}.%d{yyyy-MM-dd}.%i 33 | 7 34 | 35 | 150MB 36 | 37 | 38 | 39 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5level %class{36} [%L] [%M] - %msg%xEx%n 40 | UTF-8 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | -------------------------------------------------------------------------------- /roles/prometheus/tasks/02_pushgateway.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Try stopping existing service via systemd" 4 | systemd: name={{ pushgateway_service_name }} state=stopped 5 | when: ansible_service_mgr == "systemd" 6 | failed_when: false 7 | 8 | - name: "Remove previous install directory" 9 | file: path={{ pushgateway_home }} state=absent 10 | 11 | - name: "Create directories" 12 | file: path={{ item }} state=directory owner={{ pushgateway_user }} group={{ pushgateway_group }} 13 | with_items: 14 | - "{{ pushgateway_home }}" 15 | - "{{ pushgateway_log_path }}" 16 | - "{{ pushgateway_data_path }}" 17 | - "{{ pushgateway_run_path }}" 18 | 19 | - set_fact: remote_tarball_path={{ installer_cache_path }}/{{ pushgateway_local_file | basename}} 20 | 21 | - name: "Ensure installer cache Directory {{ installer_cache_path }}" 22 | file: path={{ installer_cache_path }} state=directory 23 | 24 | - name: "Copy tarball to remote host" 25 | copy: src={{ pushgateway_local_file }} dest={{ remote_tarball_path }} 26 | 27 | - name: "Unarchive package file" 28 | unarchive: 29 | src: "{{ remote_tarball_path }}" 30 | dest: "{{ pushgateway_home }}" 31 | owner: "{{ pushgateway_user }}" 32 | group: "{{ pushgateway_group }}" 33 | remote_src: yes 34 | extra_opts: ['--strip-components=1'] 35 | 36 | - name: "Delete temporary tarball file: {{ remote_tarball_path }}" 37 | file: path={{ remote_tarball_path }} state=absent 38 | when: delete_cache_after_install 39 | 40 | - name: "Ensure program file is executable" 41 | file: 42 | path: "{{ pushgateway_home }}/{{ pushgateway_exec }}" 43 | owner: "{{ pushgateway_user }}" 44 | group: "{{ pushgateway_group }}" 45 | mode: 0755 46 | 47 | - name: "Generate systemd service config file" 48 | template: > 49 | src=pushgateway.service.j2 50 | dest=/etc/systemd/system/{{ pushgateway_service_name }}.service 51 | mode=0644 force=yes owner={{ pushgateway_user }} group={{ pushgateway_group }} 52 | when: ansible_service_mgr == "systemd" 53 | 54 | - name: "Ensure pushgateway is started via systemd" 55 | systemd: name={{ pushgateway_service_name }} state=restarted enabled=yes daemon_reload=yes 56 | when: ansible_service_mgr == "systemd" 57 | -------------------------------------------------------------------------------- /roles/dingo/templates/logback-sqlline.xml.j2: -------------------------------------------------------------------------------- 1 | 2 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5level %class{36} [%L] [%M] - %msg%xEx%n 26 | UTF-8 27 | 28 | 29 | 30 | ${LOG_HOME}/${LOG_FILE} 31 | 32 | ${LOG_HOME}/${LOG_FILE}.%d{yyyy-MM-dd}.%i 33 | 7 34 | 35 | 150MB 36 | 37 | 38 | 39 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5level %class{36} [%L] [%M] - %msg%xEx%n 40 | UTF-8 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | -------------------------------------------------------------------------------- /roles/prometheus/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | prometheus_service_name: "dingo-prometheus" 4 | 5 | prometheus_home: "{{ installer_root_path | default('/opt') }}/prometheus" 6 | prometheus_user: "{{ dingo_user | default('prometheus') }}" 7 | prometheus_group: "{{ dingo_group | default('prometheus') }}" 8 | 9 | prometheus_log_path: "{{ dingo_log_dir | default('/var/log') }}/prometheus" 10 | prometheus_data_path: "{{ dingo_data_dir | default('/var/lib') }}/prometheus" 11 | prometheus_run_path: "{{ dingo_run_dir | default('/var/run') }}/prometheus" 12 | 13 | prometheus_port: 19090 14 | 15 | prometheus_web_listen_address: "0.0.0.0:{{ prometheus_port }}" 16 | prometheus_web_external_url: "http://0.0.0.0:{{ prometheus_port }}/prometheus" 17 | prometheus_storage_retention: "31d" 18 | prometheus_url: "{{ inventory_hostname }}:{{ prometheus_port }}/prometheus" 19 | 20 | service_list: [] 21 | 22 | # blackbox exporter 23 | blackbox_exporter_service_name: "dingo-blackbox-exporter" 24 | 25 | blackbox_exporter_home: "{{ installer_root_path | default('/opt') }}/blackbox-exporter" 26 | blackbox_exporter_user: "{{ dingo_user | default('prometheus') }}" 27 | blackbox_exporter_group: "{{ dingo_group | default('prometheus') }}" 28 | 29 | blackbox_exporter_log_path: "{{ dingo_log_dir | default('/var/log') }}/blackbox-exporter" 30 | blackbox_exporter_data_path: "{{ dingo_data_dir | default('/var/lib') }}/blackbox-exporter" 31 | blackbox_exporter_run_path: "{{ dingo_run_dir | default('/var/run') }}/blackbox-exporter" 32 | 33 | blackbox_exporter_port: 19115 34 | 35 | blackbox_exporter_web_listen_address: "0.0.0.0:{{ blackbox_exporter_port }}" 36 | 37 | # pushgateway 38 | pushgateway_service_name: "dingo-pushgateway" 39 | 40 | pushgateway_home: "{{ installer_root_path | default('/opt') }}/pushgateway" 41 | pushgateway_user: "{{ dingo_user | default('prometheus') }}" 42 | pushgateway_group: "{{ dingo_group | default('prometheus') }}" 43 | 44 | pushgateway_log_path: "{{ dingo_log_dir | default('/var/log') }}/pushgateway" 45 | pushgateway_data_path: "{{ dingo_data_dir | default('/var/lib') }}/pushgateway" 46 | pushgateway_run_path: "{{ dingo_run_dir | default('/var/run') }}/pushgateway" 47 | 48 | pushgateway_port: 19091 49 | 50 | pushgateway_exec: "pushgateway" 51 | -------------------------------------------------------------------------------- /roles/dingo/templates/logback-coordinator.xml.j2: -------------------------------------------------------------------------------- 1 | 2 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5level %class{36} [%L] [%M] - %msg%xEx%n 27 | UTF-8 28 | 29 | 30 | 31 | ${LOG_HOME}/${LOG_FILE} 32 | 33 | ${LOG_HOME}/${LOG_FILE}.%d{yyyy-MM-dd}.%i 34 | 7 35 | 36 | 150MB 37 | 38 | 39 | 40 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5level %class{36} [%L] [%M] - %msg%xEx%n 41 | UTF-8 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | -------------------------------------------------------------------------------- /roles/node_exporter/tasks/01_node_exporter.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Ensure node_exporter group exist: {{ node_exporter_group }}" 4 | group: name={{ node_exporter_group }} state=present 5 | 6 | - name: "Ensure node_exporter user exist: {{ node_exporter_user }}" 7 | user: name="{{ node_exporter_user }}" group={{ node_exporter_group }} 8 | 9 | - name: "Try stopping existing service via systemd" 10 | systemd: name={{ node_exporter_service_name }} state=stopped 11 | when: ansible_service_mgr == "systemd" 12 | failed_when: false 13 | 14 | - name: "Remove previous install directory" 15 | file: path={{ node_exporter_home }} state=absent 16 | 17 | - name: "Create directories" 18 | file: path={{ item }} state=directory owner={{ node_exporter_user }} group={{ node_exporter_group }} 19 | with_items: 20 | - "{{ node_exporter_home }}" 21 | - "{{ node_exporter_log_path }}" 22 | - "{{ node_exporter_data_path }}" 23 | - "{{ node_exporter_run_path }}" 24 | 25 | - set_fact: remote_tarball_path={{ installer_cache_path }}/{{ node_exporter_local_file | basename}} 26 | 27 | - name: "Ensure installer cache Directory {{ installer_cache_path }}" 28 | file: path={{ installer_cache_path }} state=directory 29 | 30 | - name: "Copy tarball to remote host" 31 | copy: src={{ node_exporter_local_file }} dest={{ remote_tarball_path }} 32 | 33 | - name: "Unarchive package file" 34 | unarchive: 35 | src: "{{ remote_tarball_path }}" 36 | dest: "{{ node_exporter_home }}" 37 | owner: "{{ node_exporter_user }}" 38 | group: "{{ node_exporter_group }}" 39 | remote_src: yes 40 | extra_opts: ['--strip-components=1'] 41 | 42 | - name: "Delete temporary tarball file: {{ remote_tarball_path }}" 43 | file: path={{ remote_tarball_path }} state=absent 44 | when: delete_cache_after_install 45 | 46 | - name: "Generate Systemd service File" 47 | template: > 48 | src=node-exporter.service.j2 dest=/etc/systemd/system/{{ node_exporter_service_name }}.service mode=0644 49 | force=yes owner={{ node_exporter_user }} group={{ node_exporter_group }} 50 | when: ansible_service_mgr == "systemd" 51 | 52 | - name: "Ensure node-exporter is started via systemd" 53 | systemd: name={{ node_exporter_service_name }} state=started enabled=yes daemon_reload=yes 54 | when: ansible_service_mgr == "systemd" 55 | -------------------------------------------------------------------------------- /roles/scaling_in_dingo/tasks/01_basic_command.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Ensure dingo user group exist: {{ dingo_group }}" 4 | group: name={{ dingo_group }} state=present 5 | 6 | - name: "Ensure dingo user exist {{ dingo_user }}" 7 | user: name={{ dingo_user }} group={{ dingo_group }} 8 | 9 | 10 | 11 | - name: Check dingo-store if file exists 12 | stat: 13 | path: "{{ dingo_store_home }}" 14 | register: file_status 15 | check_mode: no 16 | 17 | - name: Set variable file_status 18 | set_fact: 19 | file_exists: "{{ file_status.stat.exists|default(false) }}" 20 | 21 | - name: Print variable debug info 22 | debug: 23 | msg: "{{ dingo_store_log_path }}, {{ dingo_store_home }}, {{ dingo_store_data_path }}, {{ dingo_store_coordinator_meta_path }}" 24 | 25 | 26 | - name: "Ensure installer cache Directory {{ installer_cache_path }}" 27 | file: path={{ installer_cache_path }} state=directory 28 | 29 | - name: "Create dingo directories" 30 | file: path="{{item}}" state=directory owner={{ dingo_user }} group={{ dingo_group }} 31 | with_items: 32 | - "{{ dingo_store_home }}" 33 | - "{{ dingo_store_log_path }}" 34 | - "{{ dingo_store_data_path }}" 35 | - "{{ dingo_store_coordinator_meta_path }}" 36 | when: file_exists == false 37 | 38 | 39 | - name: "Copy dingo zip archive to remote host" 40 | copy: src={{ item }} dest={{ installer_cache_path }} owner={{ dingo_user }} group={{ dingo_group }} 41 | with_items: 42 | - "{{ cfg_dingodb_store_local_path }}" 43 | when: file_exists == false 44 | 45 | - name: "Unarchive {{ cfg_dingodb_store_local_path }} to {{ dingo_store_home }}" 46 | unarchive: 47 | src: "{{ installer_cache_path }}/{{ cfg_dingodb_store_local_path|basename}}" 48 | dest: "{{ dingo_store_home }}" 49 | mode: "go-w" 50 | remote_src: yes 51 | list_files: yes 52 | owner: "{{ dingo_user }}" 53 | group: "{{ dingo_group }}" 54 | register: archive_contents 55 | when: file_exists == False 56 | 57 | - name: "Delete temporary tarball file: {{ installer_cache_path }}/{{ cfg_dingodb_store_local_path|basename}}" 58 | file: path={{ installer_cache_path }}/{{ cfg_dingodb_store_local_path|basename }} state=absent 59 | when: delete_cache_after_install 60 | 61 | - name: Change owner of all files in directory 62 | file: 63 | path: "{{ dingo_store_home }}" 64 | owner: "{{ dingo_user }}" 65 | group: "{{ dingo_group }}" 66 | recurse: yes 67 | 68 | -------------------------------------------------------------------------------- /roles/dingo_store/tasks/01_basic_command.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Ensure dingo user group exist: {{ dingo_group }}" 4 | group: name={{ dingo_group }} state=present 5 | 6 | - name: "Ensure dingo user exist {{ dingo_user }}" 7 | user: name={{ dingo_user }} group={{ dingo_group }} 8 | 9 | - name: "Kill old process of DingoDB" 10 | shell: pgrep -fu {{ dingo_user }} "dingodb_server -role" | xargs -r kill -9 11 | ignore_errors: yes 12 | 13 | 14 | - name: "Remove dingo home/log/data directories if exist" 15 | file: path="{{ item }}" state=absent 16 | with_items: 17 | - "{{ dingo_store_home }}" 18 | - "{{ dingo_store_log_path }}" 19 | - "{{ dingo_store_data_path }}" 20 | 21 | - name: Print variable debug info 22 | debug: 23 | msg: "{{ dingo_store_log_path }}, {{ dingo_store_home }}, {{ dingo_store_data_path }}, {{ dingo_store_coordinator_meta_path }}" 24 | 25 | 26 | - name: "Ensure installer cache Directory {{ installer_cache_path }}" 27 | file: path={{ installer_cache_path }} state=directory 28 | 29 | - name: "Create dingo directories" 30 | file: path="{{item}}" state=directory owner={{ dingo_user }} group={{ dingo_group }} 31 | with_items: 32 | - "{{ dingo_store_home }}" 33 | - "{{ dingo_store_log_path }}" 34 | - "{{ dingo_store_data_path }}" 35 | - "{{ dingo_store_coordinator_meta_path }}" 36 | 37 | 38 | - name: "Copy dingo zip archive to remote host" 39 | copy: src={{ item }} dest={{ installer_cache_path }} owner={{ dingo_user }} group={{ dingo_group }} 40 | with_items: 41 | - "{{ cfg_dingodb_store_local_path }}" 42 | 43 | - name: "Unarchive {{ cfg_dingodb_store_local_path }} to {{ dingo_store_home }}" 44 | unarchive: 45 | src: "{{ installer_cache_path }}/{{ cfg_dingodb_store_local_path|basename}}" 46 | dest: "{{ dingo_store_home }}" 47 | mode: "go-w" 48 | remote_src: yes 49 | list_files: yes 50 | owner: "{{ dingo_user }}" 51 | group: "{{ dingo_group }}" 52 | register: archive_contents 53 | 54 | - name: "Delete temporary tarball file: {{ installer_cache_path }}/{{ cfg_dingodb_store_local_path|basename}}" 55 | file: path={{ installer_cache_path }}/{{ cfg_dingodb_store_local_path|basename }} state=absent 56 | when: delete_cache_after_install 57 | 58 | - name: Change owner of all files in directory 59 | file: 60 | path: "{{ dingo_store_home }}" 61 | owner: "{{ dingo_user }}" 62 | group: "{{ dingo_group }}" 63 | recurse: yes 64 | 65 | -------------------------------------------------------------------------------- /roles/process_exporter/tasks/01_process_exporter.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Ensure process_exporter group exist: {{ process_exporter_group }}" 4 | group: name={{ process_exporter_group }} state=present 5 | 6 | - name: "Ensure process_exporter user exist: {{ process_exporter_user }}" 7 | user: name="{{ process_exporter_user }}" group={{ process_exporter_group }} 8 | 9 | - name: "Try stopping existing service via systemd" 10 | systemd: name={{ process_exporter_service_name }} state=stopped 11 | when: ansible_service_mgr == "systemd" 12 | failed_when: false 13 | 14 | - name: "Remove previous install directory" 15 | file: path={{ process_exporter_home }} state=absent 16 | 17 | - name: "Create directories" 18 | file: path={{ item }} state=directory owner={{ process_exporter_user }} group={{ process_exporter_group }} 19 | with_items: 20 | - "{{ process_exporter_home }}" 21 | - "{{ process_exporter_log_path }}" 22 | - "{{ process_exporter_data_path }}" 23 | - "{{ process_exporter_run_path }}" 24 | 25 | - set_fact: remote_tarball_path={{ installer_cache_path }}/{{ process_exporter_local_file | basename}} 26 | 27 | - name: "Ensure installer cache Directory {{ installer_cache_path }}" 28 | file: path={{ installer_cache_path }} state=directory 29 | 30 | - name: "Copy tarball to remote host" 31 | copy: src={{ process_exporter_local_file }} dest={{ remote_tarball_path }} 32 | 33 | - name: "Unarchive package file" 34 | unarchive: 35 | src: "{{ remote_tarball_path }}" 36 | dest: "{{ process_exporter_home }}" 37 | owner: "{{ process_exporter_user }}" 38 | group: "{{ process_exporter_group }}" 39 | remote_src: yes 40 | extra_opts: ['--strip-components=1'] 41 | 42 | - name: "Copy files to home host" 43 | copy: src=process_name.yaml dest={{ process_exporter_home }} owner={{ process_exporter_user }} group={{ process_exporter_group }} 44 | 45 | - name: "Delete temporary tarball file: {{ remote_tarball_path }}" 46 | file: path={{ remote_tarball_path }} state=absent 47 | when: delete_cache_after_install 48 | 49 | - name: "Generate Systemd service File" 50 | template: > 51 | src=process-exporter.service.j2 dest=/etc/systemd/system/{{ process_exporter_service_name }}.service mode=0644 52 | force=yes owner={{ process_exporter_user }} group={{ process_exporter_group }} 53 | when: ansible_service_mgr == "systemd" 54 | 55 | - name: "Ensure process-exporter is started via systemd" 56 | systemd: name={{ process_exporter_service_name }} state=started enabled=yes daemon_reload=yes 57 | when: ansible_service_mgr == "systemd" 58 | -------------------------------------------------------------------------------- /auto-tests/dingo/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '2.2' 3 | services: 4 | coordinator1: 5 | image: dingodatabase/dingo:latest 6 | hostname: coordinator1 7 | container_name: coordinator1 8 | networks: 9 | - dingo_net 10 | environment: 11 | DINGO_ROLE: coordinator 12 | DINGO_HOSTNAME: coordinator1 13 | 14 | coordinator2: 15 | image: dingodatabase/dingo:latest 16 | hostname: coordinator2 17 | container_name: coordinator2 18 | networks: 19 | - dingo_net 20 | environment: 21 | DINGO_ROLE: coordinator 22 | DINGO_HOSTNAME: coordinator2 23 | 24 | coordinator3: 25 | image: dingodatabase/dingo:latest 26 | hostname: coordinator3 27 | container_name: coordinator3 28 | networks: 29 | - dingo_net 30 | environment: 31 | DINGO_ROLE: coordinator 32 | DINGO_HOSTNAME: coordinator3 33 | 34 | executor1: 35 | image: dingodatabase/dingo:latest 36 | hostname: executor1 37 | container_name: executor1 38 | ports: 39 | - 8765:8765 40 | networks: 41 | - dingo_net 42 | depends_on: 43 | - coordinator1 44 | - coordinator2 45 | - coordinator3 46 | environment: 47 | DINGO_ROLE: executor 48 | DINGO_HOSTNAME: executor1 49 | 50 | executor2: 51 | image: dingodatabase/dingo:latest 52 | hostname: executor2 53 | container_name: executor2 54 | ports: 55 | - 8766:8765 56 | networks: 57 | - dingo_net 58 | depends_on: 59 | - coordinator1 60 | - coordinator2 61 | - coordinator3 62 | environment: 63 | DINGO_ROLE: executor 64 | DINGO_HOSTNAME: executor2 65 | 66 | executor3: 67 | image: dingodatabase/dingo:latest 68 | hostname: executor3 69 | container_name: executor3 70 | ports: 71 | - 8767:8765 72 | networks: 73 | - dingo_net 74 | depends_on: 75 | - coordinator1 76 | - coordinator2 77 | - coordinator3 78 | environment: 79 | DINGO_ROLE: executor 80 | DINGO_HOSTNAME: executor3 81 | 82 | web: 83 | image: dingodatabase/dingo:latest 84 | hostname: web 85 | container_name: web 86 | ports: 87 | - 13000:13000 88 | networks: 89 | - dingo_net 90 | depends_on: 91 | - coordinator1 92 | - coordinator2 93 | - coordinator3 94 | environment: 95 | DINGO_ROLE: web 96 | DINGO_HOSTNAME: web 97 | 98 | networks: 99 | dingo_net: 100 | driver: bridge 101 | -------------------------------------------------------------------------------- /roles/prometheus/tasks/01_blackbox_exporter.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Ensure blackbox-exporter group exist: {{ blackbox_exporter_group }}" 4 | group: name={{ blackbox_exporter_group }} state=present 5 | 6 | - name: "Ensure blackbox-exporter user exist: {{ blackbox_exporter_user }}" 7 | user: name="{{ blackbox_exporter_user }}" group={{ blackbox_exporter_group }} 8 | 9 | - name: "Try stopping existing service via systemd" 10 | systemd: name={{ blackbox_exporter_service_name }} state=stopped 11 | when: ansible_service_mgr == "systemd" 12 | failed_when: false 13 | 14 | - name: "Remove previous install directory" 15 | file: path={{ blackbox_exporter_home }} state=absent 16 | 17 | - name: "Create directories" 18 | file: path={{ item }} state=directory owner={{ blackbox_exporter_user }} group={{ blackbox_exporter_group }} 19 | with_items: 20 | - "{{ blackbox_exporter_home }}" 21 | - "{{ blackbox_exporter_log_path }}" 22 | - "{{ blackbox_exporter_data_path }}" 23 | - "{{ blackbox_exporter_run_path }}" 24 | 25 | - set_fact: remote_tarball_path={{ installer_cache_path }}/{{ blackbox_exporter_local_file | basename}} 26 | 27 | - name: "Ensure installer cache Directory {{ installer_cache_path }}" 28 | file: path={{ installer_cache_path }} state=directory 29 | 30 | - name: "Copy tarball to remote host" 31 | copy: src={{ blackbox_exporter_local_file }} dest={{ remote_tarball_path }} 32 | 33 | - name: "Unarchive package file" 34 | unarchive: 35 | src: "{{ remote_tarball_path }}" 36 | dest: "{{ blackbox_exporter_home }}" 37 | owner: "{{ blackbox_exporter_user }}" 38 | group: "{{ blackbox_exporter_group }}" 39 | remote_src: yes 40 | extra_opts: ['--strip-components=1'] 41 | 42 | - name: "Delete temporary tarball file: {{ remote_tarball_path }}" 43 | file: path={{ remote_tarball_path }} state=absent 44 | when: delete_cache_after_install 45 | 46 | - name: "Generate module config file: blackbox.yml" 47 | template: > 48 | src=blackbox-exporter.yml.j2 dest={{ blackbox_exporter_home }}/blackbox.yml mode=0644 49 | force=yes owner={{ blackbox_exporter_user }} group={{ blackbox_exporter_group }} 50 | 51 | - name: "Generate Systemd service File" 52 | template: > 53 | src=blackbox-exporter.service.j2 dest=/etc/systemd/system/{{ blackbox_exporter_service_name }}.service mode=0644 54 | force=yes owner={{ blackbox_exporter_user }} group={{ blackbox_exporter_group }} 55 | when: ansible_service_mgr == "systemd" 56 | 57 | - name: "Ensure blackbox-exporter is started via systemd" 58 | systemd: name={{ blackbox_exporter_service_name }} state=started enabled=yes daemon_reload=yes 59 | when: ansible_service_mgr == "systemd" 60 | -------------------------------------------------------------------------------- /artifacts/system/centos8/limits/limits.conf: -------------------------------------------------------------------------------- 1 | # /etc/security/limits.conf 2 | # 3 | #This file sets the resource limits for the users logged in via PAM. 4 | #It does not affect resource limits of the system services. 5 | # 6 | #Also note that configuration files in /etc/security/limits.d directory, 7 | #which are read in alphabetical order, override the settings in this 8 | #file in case the domain is the same or more specific. 9 | #That means for example that setting a limit for wildcard domain here 10 | #can be overriden with a wildcard setting in a config file in the 11 | #subdirectory, but a user specific setting here can be overriden only 12 | #with a user specific setting in the subdirectory. 13 | # 14 | #Each line describes a limit for a user in the form: 15 | # 16 | # 17 | # 18 | #Where: 19 | # can be: 20 | # - a user name 21 | # - a group name, with @group syntax 22 | # - the wildcard *, for default entry 23 | # - the wildcard %, can be also used with %group syntax, 24 | # for maxlogin limit 25 | # 26 | # can have the two values: 27 | # - "soft" for enforcing the soft limits 28 | # - "hard" for enforcing hard limits 29 | # 30 | # can be one of the following: 31 | # - core - limits the core file size (KB) 32 | # - data - max data size (KB) 33 | # - fsize - maximum filesize (KB) 34 | # - memlock - max locked-in-memory address space (KB) 35 | # - nofile - max number of open file descriptors 36 | # - rss - max resident set size (KB) 37 | # - stack - max stack size (KB) 38 | # - cpu - max CPU time (MIN) 39 | # - nproc - max number of processes 40 | # - as - address space limit (KB) 41 | # - maxlogins - max number of logins for this user 42 | # - maxsyslogins - max number of logins on the system 43 | # - priority - the priority to run user process with 44 | # - locks - max number of file locks the user can hold 45 | # - sigpending - max number of pending signals 46 | # - msgqueue - max memory used by POSIX message queues (bytes) 47 | # - nice - max nice priority allowed to raise to values: [-20, 19] 48 | # - rtprio - max realtime priority 49 | # 50 | # 51 | # 52 | 53 | #* soft core 0 54 | #* hard rss 10000 55 | #@student hard nproc 20 56 | #@faculty soft nproc 20 57 | #@faculty hard nproc 50 58 | #ftp hard nproc 0 59 | #@student - maxlogins 4 60 | 61 | * soft nproc 80000 62 | * hard nproc 80000 63 | * soft nofile 80000 64 | * hard nofile 80000 65 | 66 | # End of file 67 | -------------------------------------------------------------------------------- /roles/dingo/templates/start-executor.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 4 | # Copyright 2021 DataCanvas 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | 19 | ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )" 20 | JAR_PATH=$(find $ROOT -name dingo-executor-*.jar) 21 | LOCAL_STORE_JAR_PATH=$(find $ROOT -name dingo-store-local*.jar) 22 | NET_JAR_PATH=$(find $ROOT -name dingo-net-*.jar) 23 | APP_HOME=$( cd "$( dirname "$0" )/.." && pwd ) 24 | PLATFORM=$(uname -s)-$(uname -m | sed 's/x86_64/x64/') 25 | 26 | JAVA_OPTS="\ 27 | -Xms8g -Xmx8g \ 28 | -XX:+UseZGC \ 29 | -XX:SoftMaxHeapSize=8g \ 30 | -XX:ZAllocationSpikeTolerance=5 \ 31 | -XX:+ZProactive \ 32 | -XX:ZCollectionInterval=4 \ 33 | -XX:+UseLargePages \ 34 | -XX:+UseNUMA \ 35 | -XX:+ParallelRefProcEnabled \ 36 | -XX:+AlwaysPreTouch \ 37 | -XX:+DisableExplicitGC \ 38 | -XX:+HeapDumpOnOutOfMemoryError \ 39 | -XX:MaxDirectMemorySize=4096m \ 40 | -XX:ReservedCodeCacheSize=256m \ 41 | -XX:+UseCodeCacheFlushing \ 42 | -XX:+TieredCompilation \ 43 | -XX:TieredStopAtLevel=4 \ 44 | -XX:InitialCodeCacheSize=256m \ 45 | -Xlog:gc*:file={{ dingo_log_path }}/gc.log:time:filecount=5,filesize=100M \ 46 | " 47 | 48 | EMBEDDED_JDK="${APP_HOME}/${PLATFORM}" 49 | if [ -d "${EMBEDDED_JDK}" ]; then 50 | export DINGO_JAVA_HOME="${EMBEDDED_JDK}" 51 | PATH="${DINGO_JAVA_HOME}/bin:${PATH}" 52 | fi 53 | 54 | 55 | {% if is_license_support %} 56 | LICENSE_JAR_PATH=$(find $ROOT -name license.jar) 57 | nohup ${DINGO_JAVA_HOME}/bin/java ${JAVA_OPTS} \ 58 | -Dlogback.configurationFile=file:${ROOT}/conf/logback-executor.xml \ 59 | -classpath ${JAR_PATH}:${NET_JAR_PATH}:${LOCAL_STORE_JAR_PATH}:${LICENSE_JAR_PATH} \ 60 | io.dingodb.server.executor.Starter \ 61 | --config ${ROOT}/conf/executor.yaml \ 62 | > {{ dingo_log_path }}/executor.out & 63 | {% else %} 64 | nohup ${DINGO_JAVA_HOME}/bin/java ${JAVA_OPTS} \ 65 | --add-opens java.base/java.util=ALL-UNNAMED \ 66 | --add-opens java.base/java.lang=ALL-UNNAMED \ 67 | -Dlogback.configurationFile=file:${ROOT}/conf/logback-executor.xml \ 68 | -classpath ${JAR_PATH}:${NET_JAR_PATH}:${LOCAL_STORE_JAR_PATH} \ 69 | io.dingodb.server.executor.Starter \ 70 | --config ${ROOT}/conf/executor.yaml \ 71 | > {{ dingo_log_path }}/executor.out & 72 | {% endif %} 73 | -------------------------------------------------------------------------------- /container/docker-compose/docker-compose.localhost-1replica.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | x-shared-environment: &shared-env 4 | SERVER_LISTEN_HOST: 0.0.0.0 5 | SERVER_HOST: host.docker.internal 6 | RAFT_LISTEN_HOST: 0.0.0.0 7 | RAFT_HOST: host.docker.internal 8 | COOR_RAFT_PEERS: host.docker.internal:22101 9 | COOR_SRV_PEERS: host.docker.internal:22001 10 | DEFAULT_REPLICA_NUM: 1 11 | 12 | services: 13 | coordinator1: 14 | image: dingodatabase/dingo-store:latest 15 | hostname: coordinator1 16 | container_name: coordinator1 17 | ports: 18 | - 22001:22001 19 | - 22101:22101 20 | - 28001:8000 21 | # network_mode: host 22 | networks: 23 | - dingo_net 24 | environment: 25 | FLAGS_role: coordinator 26 | COORDINATOR_SERVER_START_PORT: 22001 27 | COORDINATOR_RAFT_START_PORT: 22101 28 | INSTANCE_START_ID: 1001 29 | <<: *shared-env 30 | 31 | store1: 32 | image: dingodatabase/dingo-store:latest 33 | hostname: store1 34 | container_name: store1 35 | ports: 36 | - 20001:20001 37 | - 20101:20101 38 | # network_mode: host 39 | networks: 40 | - dingo_net 41 | depends_on: 42 | - coordinator1 43 | environment: 44 | FLAGS_role: store 45 | RAFT_START_PORT: 20101 46 | SERVER_START_PORT: 20001 47 | INSTANCE_START_ID: 1001 48 | <<: *shared-env 49 | 50 | index1: 51 | image: dingodatabase/dingo-store:latest 52 | hostname: index1 53 | container_name: index1 54 | ports: 55 | - 21001:21001 56 | - 21101:21101 57 | # network_mode: host 58 | networks: 59 | - dingo_net 60 | depends_on: 61 | - coordinator1 62 | environment: 63 | FLAGS_role: index 64 | INDEX_RAFT_START_PORT: 21101 65 | INDEX_SERVER_START_PORT: 21001 66 | INDEX_INSTANCE_START_ID: 1101 67 | <<: *shared-env 68 | 69 | executor: 70 | image: dingodatabase/dingo:latest 71 | hostname: executor 72 | container_name: executor 73 | ports: 74 | - 8765:8765 75 | - 3307:3307 76 | networks: 77 | - dingo_net 78 | restart: on-failure:5 79 | environment: 80 | DINGO_ROLE: executor 81 | DINGO_HOSTNAME: executor 82 | DINGO_COORDINATORS: host.docker.internal:22001 83 | DINGO_MYSQL_COORDINATORS: host.docker.internal:22001 84 | <<: *shared-env 85 | 86 | proxy: 87 | image: dingodatabase/dingo:latest 88 | hostname: proxy 89 | container_name: proxy 90 | ports: 91 | - 13000:13000 92 | - 9999:9999 93 | networks: 94 | - dingo_net 95 | environment: 96 | DINGO_ROLE: proxy 97 | DINGO_HOSTNAME: proxy 98 | DINGO_COORDINATORS: host.docker.internal:22001 99 | <<: *shared-env 100 | 101 | networks: 102 | dingo_net: 103 | driver: bridge 104 | -------------------------------------------------------------------------------- /container/images/templates/bin/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Copyright 2021 DataCanvas 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )" 19 | COORDINATOR_JAR_PATH=$(find $ROOT -name dingo-*-coordinator-*.jar) 20 | EXECUTOR_JAR_PATH=$(find $ROOT -name dingo-*-executor-*.jar) 21 | DRIVER_JAR_PATH=$(find $ROOT -name dingo-cli-*.jar) 22 | WEB_JAR_PATH=$(find $ROOT -name dingo-web*.jar) 23 | NET_JAR_PATH=$(find $ROOT -name dingo-net-*.jar) 24 | 25 | ROLE=$DINGO_ROLE 26 | HOSTNAME=$DINGO_HOSTNAME 27 | 28 | if [ X"$HOSTNAME" = "X" ]; then 29 | echo -e "HOSTNAME has not set, will exist" > ${ROOT}/log/$ROLE.out 30 | exit -1 31 | fi 32 | 33 | sed -i 's/XXXXXX/'"$HOSTNAME"'/g' ${ROOT}/conf/coordinator.yaml 34 | sed -i 's/XXXXXX/'"$HOSTNAME"'/g' ${ROOT}/conf/executor.yaml 35 | sed -i 's/XXXXXX/'"$HOSTNAME"'/g' ${ROOT}/conf/client.yaml 36 | sed -i 's/XXXXXX/'"$HOSTNAME"'/g' ${ROOT}/conf/application-dev.yaml 37 | 38 | if [[ $ROLE == "coordinator" ]] 39 | then 40 | java ${JAVA_OPTS} \ 41 | -Dlogback.configurationFile=file:${ROOT}/conf/logback-coordinator.xml \ 42 | -classpath ${COORDINATOR_JAR_PATH}:${NET_JAR_PATH} \ 43 | io.dingodb.server.coordinator.Starter \ 44 | --config ${ROOT}/conf/coordinator.yaml \ 45 | > ${ROOT}/log/coordinator.out 46 | elif [[ $ROLE == "executor" ]] 47 | then 48 | sleep 20 49 | /opt/dingo/bin/wait-for-it.sh coordinator1:19181 -t 0 -s -- echo "Wait Coordniator1 Start Successfully!" 50 | /opt/dingo/bin/wait-for-it.sh coordinator2:19181 -t 0 -s -- echo "Wait Coordniator2 Start Successfully!" 51 | /opt/dingo/bin/wait-for-it.sh coordinator3:19181 -t 0 -s -- echo "Wait Coordniator3 Start Successfully!" 52 | ./bin/start-executor.sh & 53 | P1=$! 54 | sleep 20 55 | ./bin/start-driver.sh & 56 | P2=$! 57 | wait $P1 $P2 58 | elif [[ $ROLE == "driver" ]] 59 | then 60 | sleep 60 61 | ./bin/start-driver.sh & 62 | P3=$! 63 | wait $P3 64 | elif [[ $ROLE == "web" ]] 65 | then 66 | java ${JAVA_OPTS} \ 67 | -Dlogback.configurationFile=file:${ROOT}/conf/logback-web.xml \ 68 | -jar ${WEB_JAR_PATH} \ 69 | --spring.config.location=${ROOT}/conf/application.yaml \ 70 | io.dingodb.web.DingoApplication \ 71 | > ${ROOT}/log/web.out 72 | else 73 | echo -e "Invalid DingoDB cluster roles" 74 | fi 75 | -------------------------------------------------------------------------------- /container/docker-compose/docker-compose.single.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '2.2' 3 | 4 | x-shared-environment: &shared-env 5 | SERVER_LISTEN_HOST: 0.0.0.0 6 | SERVER_HOST: ${DINGO_HOST_IP} 7 | RAFT_LISTEN_HOST: 0.0.0.0 8 | RAFT_HOST: ${DINGO_HOST_IP} 9 | DISKANN_SERVER_HOST: ${DINGO_HOST_IP} 10 | COOR_RAFT_PEERS: ${DINGO_HOST_IP}:22101 11 | COOR_SRV_PEERS: ${DINGO_HOST_IP}:22001 12 | DEFAULT_REPLICA_NUM: 1 13 | 14 | services: 15 | coordinator1: 16 | image: dingodatabase/dingo-store:latest 17 | hostname: coordinator1 18 | container_name: coordinator1 19 | network_mode: host 20 | environment: 21 | FLAGS_role: coordinator 22 | COORDINATOR_SERVER_START_PORT: 22001 23 | COORDINATOR_RAFT_START_PORT: 22101 24 | INSTANCE_START_ID: 1001 25 | <<: *shared-env 26 | 27 | store1: 28 | image: dingodatabase/dingo-store:latest 29 | hostname: store1 30 | container_name: store1 31 | network_mode: host 32 | depends_on: 33 | - coordinator1 34 | environment: 35 | FLAGS_role: store 36 | RAFT_START_PORT: 20101 37 | SERVER_START_PORT: 20001 38 | INSTANCE_START_ID: 1001 39 | <<: *shared-env 40 | 41 | document1: 42 | image: dingodatabase/dingo-store:latest 43 | hostname: document1 44 | container_name: document1 45 | network_mode: host 46 | depends_on: 47 | - coordinator1 48 | environment: 49 | FLAGS_role: document 50 | RAFT_START_PORT: 23101 51 | SERVER_START_PORT: 23001 52 | INSTANCE_START_ID: 1201 53 | <<: *shared-env 54 | 55 | index1: 56 | image: dingodatabase/dingo-store:latest 57 | hostname: index1 58 | container_name: index1 59 | network_mode: host 60 | depends_on: 61 | - coordinator1 62 | environment: 63 | FLAGS_role: index 64 | INDEX_RAFT_START_PORT: 21101 65 | INDEX_SERVER_START_PORT: 21001 66 | INDEX_INSTANCE_START_ID: 1101 67 | <<: *shared-env 68 | 69 | executor: 70 | image: dingodatabase/dingo:latest 71 | hostname: executor 72 | container_name: executor 73 | ports: 74 | - 8765:8765 75 | - 3307:3307 76 | networks: 77 | - dingo_net 78 | restart: on-failure:5 79 | environment: 80 | DINGO_ROLE: executor 81 | DINGO_HOSTNAME: executor 82 | DINGO_COORDINATORS: ${DINGO_HOST_IP}:22001 83 | DINGO_MYSQL_COORDINATORS: ${DINGO_HOST_IP}:22001 84 | <<: *shared-env 85 | 86 | proxy: 87 | image: dingodatabase/dingo:latest 88 | hostname: proxy 89 | container_name: proxy 90 | ports: 91 | - 13000:13000 92 | - 9999:9999 93 | networks: 94 | - dingo_net 95 | environment: 96 | DINGO_ROLE: proxy 97 | DINGO_HOSTNAME: proxy 98 | DINGO_COORDINATORS: ${DINGO_HOST_IP}:22001 99 | <<: *shared-env 100 | 101 | networks: 102 | dingo_net: 103 | driver: bridge 104 | -------------------------------------------------------------------------------- /roles/prometheus/tasks/03_prometheus_server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Load artifacts info" 4 | action: resolve_artifacts 5 | 6 | - name: "Ensure prometheus group exist: {{ prometheus_group }}" 7 | group: name={{ prometheus_group }} state=present 8 | 9 | - name: "Ensure prometheus user exist: {{ prometheus_user }}" 10 | user: name="{{ prometheus_user }}" group={{ prometheus_group }} 11 | 12 | - name: "Try stopping existing service via systemd" 13 | systemd: name={{ prometheus_service_name }} state=stopped 14 | when: ansible_service_mgr == "systemd" 15 | failed_when: false 16 | 17 | - name: "Remove previous install directory" 18 | file: path={{ prometheus_home }} state=absent 19 | 20 | - name: "Create directories" 21 | file: path={{ item }} state=directory owner={{ prometheus_user }} group={{ prometheus_group }} 22 | with_items: 23 | - "{{ prometheus_home }}" 24 | - "{{ prometheus_home }}/file_sd" 25 | - "{{ prometheus_log_path }}" 26 | - "{{ prometheus_data_path }}" 27 | - "{{ prometheus_run_path }}" 28 | 29 | - set_fact: remote_tarball_path={{ installer_cache_path }}/{{ prometheus_local_file | basename}} 30 | 31 | - name: "Ensure installer cache Directory {{ installer_cache_path }}" 32 | file: path={{ installer_cache_path }} state=directory 33 | 34 | - name: "Copy tarball to remote host" 35 | copy: src={{ prometheus_local_file }} dest={{ remote_tarball_path }} 36 | 37 | - name: "Unarchive package file" 38 | unarchive: 39 | src: "{{ remote_tarball_path }}" 40 | dest: "{{ prometheus_home }}" 41 | owner: "{{ prometheus_user }}" 42 | group: "{{ prometheus_group }}" 43 | remote_src: yes 44 | extra_opts: [ '--strip-components=1' ] 45 | 46 | - name: "Delete temporary tarball file: {{ remote_tarball_path }}" 47 | file: path={{ remote_tarball_path }} state=absent 48 | when: delete_cache_after_install 49 | 50 | - name: "Generate prometheus configuration files" 51 | template: > 52 | src={{ item.src }} dest={{ item.dest }} 53 | owner={{ prometheus_user }} group={{ prometheus_group }} mode=0644 force=yes 54 | with_items: 55 | - { 56 | src: "prometheus.yml.j2", 57 | dest: "{{ prometheus_home }}/prometheus.yml" 58 | } 59 | - { 60 | src: "jmx.json.j2", 61 | dest: "{{ prometheus_home }}/file_sd/jmx.json" 62 | } 63 | - { 64 | src: "node.yml.j2", 65 | dest: "{{ prometheus_home }}/file_sd/node.yml" 66 | } 67 | - { 68 | src: "process.yml.j2", 69 | dest: "{{ prometheus_home }}/file_sd/process.yml" 70 | } 71 | 72 | - name: "Generate Systemd service File" 73 | template: > 74 | src=prometheus.service.j2 dest=/etc/systemd/system/{{ prometheus_service_name }}.service mode=0644 75 | force=yes owner={{ prometheus_user }} group={{ prometheus_group }} 76 | when: ansible_service_mgr == "systemd" 77 | 78 | - name: "Ensure prometheus is started via systemd" 79 | systemd: name={{ prometheus_service_name }} state=started enabled=yes daemon_reload=yes 80 | when: ansible_service_mgr == "systemd" 81 | -------------------------------------------------------------------------------- /roles/dingo/tasks/02_update_configuration.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # for debugger to display variables 4 | - name: debug 5 | debug: 6 | msg: 7 | - "Coordinator Exchange Connection: {{ dingo_coordinator_exchange_connection_list }}" 8 | 9 | - name: "Generate dingo configuration files" 10 | template: src={{ item.src }} dest={{ item.dest }} owner={{ dingo_user }} group={{ dingo_group }} mode=0755 11 | with_items: 12 | - { 13 | src: "logback-executor.xml.j2", 14 | dest: "{{ dingo_home }}/conf/logback-executor.xml" 15 | } 16 | - { 17 | src: "logback-proxy.xml.j2", 18 | dest: "{{ dingo_home }}/conf/logback-proxy.xml" 19 | } 20 | - { 21 | src: "logback-web.xml.j2", 22 | dest: "{{ dingo_home }}/conf/logback-web.xml" 23 | } 24 | - { 25 | src: "logback.xml.j2", 26 | dest: "{{ dingo_home }}/conf/logback.xml" 27 | } 28 | - { 29 | src: "client.yaml.j2", 30 | dest: "{{ dingo_home }}/conf/client.yaml" 31 | } 32 | - { 33 | src: "start-executor.sh.j2", 34 | dest: "{{ dingo_home }}/bin/start-executor.sh" 35 | } 36 | - { 37 | src: "start-web.sh.j2", 38 | dest: "{{ dingo_home }}/bin/start-web.sh" 39 | } 40 | - { 41 | src: "start-proxy.sh.j2", 42 | dest: "{{ dingo_home }}/bin/start-proxy.sh" 43 | } 44 | - { 45 | src: "stop-all-component.sh.j2", 46 | dest: "{{ dingo_home }}/bin/stop-all-component.sh" 47 | } 48 | - { 49 | src: "stop-executor-proxy.sh.j2", 50 | dest: "{{ dingo_home }}/bin/stop-executor-proxy.sh" 51 | } 52 | - { 53 | src: "stop-dingo-web.sh.j2", 54 | dest: "{{ dingo_home }}/bin/stop-dingo-web.sh" 55 | } 56 | - { 57 | src: "executor.yaml.j2", 58 | dest: "{{ dingo_home }}/conf/executor.yaml" 59 | } 60 | - { 61 | src: "executor-noldap.yaml.j2", 62 | dest: "{{ dingo_home }}/conf/executor-noldap.yaml" 63 | } 64 | - { 65 | src: "application-proxy-dev.yaml.j2", 66 | dest: "{{ dingo_home }}/conf/application-proxy-dev.yaml" 67 | } 68 | - { 69 | src: "application-proxy.yaml.j2", 70 | dest: "{{ dingo_home }}/conf/application-proxy.yaml" 71 | } 72 | - { 73 | src: "application-web.yaml.j2", 74 | dest: "{{ dingo_home }}/conf/application-web.yaml" 75 | } 76 | - { 77 | src: "application-web-dev.yaml.j2", 78 | dest: "{{ dingo_home }}/conf/application-web-dev.yaml" 79 | } 80 | 81 | # 如果is_support_ldap是false,将{{ dingo_home }}/conf/executor.yaml删除,并将{{ dingo_home }}/conf/executor-noldap.yaml重命名为executor.yaml 82 | - name: "Remove executor.yaml if is_support_ldap is false" 83 | file: path="{{ dingo_home }}/conf/executor.yaml" state=absent 84 | when: is_support_ldap == false 85 | 86 | - name: "Rename executor-noldap.yaml to executor.yaml if is_support_ldap is false" 87 | command: mv "{{ dingo_home }}/conf/executor-noldap.yaml" "{{ dingo_home }}/conf/executor.yaml" 88 | when: is_support_ldap == false 89 | 90 | - name: "Remove unused configure files" 91 | file: path="{{ item }}" state=absent 92 | with_items: 93 | - "{{ dingo_home }}/conf/config.yaml" 94 | -------------------------------------------------------------------------------- /container/images/buildImages.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 4 | # Copyright 2022 DataCanvas 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | 18 | help() { 19 | echo "" 20 | echo "Usage: $0 -a VERSION" 21 | echo -e "\t-a VERSION: represent the version of current, such as v0.2.0" 22 | exit 1 # Exit script after printing help 23 | } 24 | 25 | while getopts "a:b:" opt 26 | do 27 | case "$opt" in 28 | a ) VERSION="$OPTARG" ;; 29 | ? ) help ;; # Print help in case parameter is non-existent 30 | esac 31 | done 32 | 33 | # Print help in case parameters are empty 34 | if [ -z "$VERSION" ]; then 35 | help 36 | fi 37 | 38 | REPOSITORY=local 39 | echo -e "0.=============Input: version=$VERSION, repository=$REPOSITORY=========" 40 | 41 | # stop1. delete old archive package 42 | echo -e "1.=============remove old artifactory files===================" 43 | [ -f dingo.zip ] && rm -rf dingo.zip && rm -rf ./tmp 44 | 45 | # step2. unarchive `dingo.zip` and update the configuration 46 | ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd ../../ && pwd )" 47 | DINGO_ZIP=$(find $ROOT -name dingo.zip) 48 | 49 | echo -e "2.=============update dingo.zip with templates===================" 50 | if [ -f "$DINGO_ZIP" ] ; then 51 | mkdir tmp && unzip $DINGO_ZIP -d tmp/ 52 | cp -r ./templates/conf/* ./tmp/conf/ 53 | cp -r ./templates/bin/* ./tmp/bin/ 54 | rm -rf ./tmp/conf/config.yaml && rm -rf ./tmp/conf/logback.xml 55 | else 56 | echo "$DINGO_ZIP does not exist, please copy the dingo.zip to artifactory" 57 | exit -1 58 | fi 59 | 60 | # Archive the dingo.zip package 61 | cd ./tmp && zip -r dingo.zip * && cp dingo.zip .. && cd .. && rm -rf tmp 62 | 63 | # step3. docker build the images 64 | echo -e "3.=============build docker images on local machine===================" 65 | DINGO_IMAGE_NAME=dingodb.ubuntu 66 | DINGO_IMAGE_INTERNAL_REPO=172.20.3.185:5000/dingodb/$DINGO_IMAGE_NAME:$VERSION 67 | docker build -t $DINGO_IMAGE_NAME:$VERSION . 68 | 69 | # step3. upload the docker images to github or internal 70 | IMAGEID=`docker images | grep $DINGO_IMAGE_NAME | grep $VERSION | awk '{print $3}'` 71 | echo -e "4.=============dingodb $VERSION , current docker image: $DINGO_IMAGE_NAME is: $IMAGEID" 72 | 73 | # if [ $REPOSITORY == 'local' ]; then 74 | # echo -e "4.1=============>current repository is local, will push image to: $DINGO_IMAGE_INTERNAL_REPO" 75 | # docker tag $IMAGEID $DINGO_IMAGE_INTERNAL_REPO 76 | # docker push $DINGO_IMAGE_INTERNAL_REPO 77 | # fi 78 | 79 | # if [ $REPOSITORY == 'github' ]; then 80 | # echo -e "4.1=============>Current repository is: github" 81 | # fi 82 | 83 | # step 5: remove the dingo.zip files 84 | [ -f dingo.zip ] && rm -rf dingo.zip 85 | -------------------------------------------------------------------------------- /container/images/templates/conf/logback-web.xml: -------------------------------------------------------------------------------- 1 | 2 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 26 | UTF-8 27 | 28 | 29 | 30 | ${LOG_HOME}/${LOG_FILE} 31 | 32 | ${LOG_HOME}/${LOG_FILE}.%d{yyyy-MM-dd}.%i 33 | 7 34 | 35 | 20MB 36 | 37 | 38 | 39 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 40 | UTF-8 41 | 42 | 43 | 44 | 45 | ${LOG_HOME}/${ERROR_LOG_FILE} 46 | 47 | ${LOG_HOME}/${ERROR_LOG_FILE}.%d{yyyy-MM-dd}.%i 48 | 7 49 | 50 | 20MB 51 | 52 | 53 | 54 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 55 | UTF-8 56 | 57 | 58 | ERROR 59 | ACCEPT 60 | DENY 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | -------------------------------------------------------------------------------- /container/images/templates/conf/logback-driver.xml: -------------------------------------------------------------------------------- 1 | 2 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 26 | UTF-8 27 | 28 | 29 | 30 | ${LOG_HOME}/${LOG_FILE} 31 | 32 | ${LOG_HOME}/${LOG_FILE}.%d{yyyy-MM-dd}.%i 33 | 7 34 | 35 | 200MB 36 | 37 | 38 | 39 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 40 | UTF-8 41 | 42 | 43 | 44 | 45 | ${LOG_HOME}/${ERROR_LOG_FILE} 46 | 47 | ${LOG_HOME}/${ERROR_LOG_FILE}.%d{yyyy-MM-dd}.%i 48 | 7 49 | 50 | 200MB 51 | 52 | 53 | 54 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 55 | UTF-8 56 | 57 | 58 | ERROR 59 | ACCEPT 60 | DENY 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | -------------------------------------------------------------------------------- /roles/dingo/templates/logback.xml.j2: -------------------------------------------------------------------------------- 1 | 2 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 26 | UTF-8 27 | 28 | 29 | 30 | ${LOG_HOME}/${LOG_FILE} 31 | 32 | ${LOG_HOME}/${LOG_FILE}.%d{yyyy-MM-dd}.%i 33 | 7 34 | 35 | 200MB 36 | 37 | 38 | 39 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 40 | UTF-8 41 | 42 | 43 | 44 | 45 | ${LOG_HOME}/${ERROR_LOG_FILE} 46 | 47 | ${LOG_HOME}/${ERROR_LOG_FILE}.%d{yyyy-MM-dd}.%i 48 | 7 49 | 50 | 200MB 51 | 52 | 53 | 54 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 55 | UTF-8 56 | 57 | 58 | ERROR 59 | ACCEPT 60 | DENY 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | -------------------------------------------------------------------------------- /container/images/templates/conf/logback-sqlline.xml: -------------------------------------------------------------------------------- 1 | 2 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 26 | UTF-8 27 | 28 | 29 | 30 | ${LOG_HOME}/${LOG_FILE} 31 | 32 | ${LOG_HOME}/${LOG_FILE}.%d{yyyy-MM-dd}.%i 33 | 7 34 | 35 | 200MB 36 | 37 | 38 | 39 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 40 | UTF-8 41 | 42 | 43 | 44 | 45 | ${LOG_HOME}/${ERROR_LOG_FILE} 46 | 47 | ${LOG_HOME}/${ERROR_LOG_FILE}.%d{yyyy-MM-dd}.%i 48 | 7 49 | 50 | 200MB 51 | 52 | 53 | 54 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 55 | UTF-8 56 | 57 | 58 | ERROR 59 | ACCEPT 60 | DENY 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | -------------------------------------------------------------------------------- /roles/dingo/templates/logback-web.xml.j2: -------------------------------------------------------------------------------- 1 | 2 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 26 | UTF-8 27 | 28 | 29 | 30 | ${LOG_HOME}/${LOG_FILE} 31 | 32 | ${LOG_HOME}/${LOG_FILE}.%d{yyyy-MM-dd}.%i 33 | 7 34 | 35 | 20MB 36 | 37 | 38 | 39 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 40 | UTF-8 41 | 42 | 43 | 44 | 45 | ${LOG_HOME}/${ERROR_LOG_FILE} 46 | 47 | ${LOG_HOME}/${ERROR_LOG_FILE}.%d{yyyy-MM-dd}.%i 48 | 7 49 | 50 | 20MB 51 | 52 | 53 | 54 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 55 | UTF-8 56 | 57 | 58 | ERROR 59 | ACCEPT 60 | DENY 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | -------------------------------------------------------------------------------- /roles/dingo_store/templates/start-coordinator.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mydir="${BASH_SOURCE%/*}" 4 | if [[ ! -d "$mydir" ]]; then mydir="$PWD"; fi 5 | . $mydir/shflags 6 | 7 | DEFINE_string role 'coordinator' 'server role' 8 | DEFINE_boolean clean_db 1 'clean db' 9 | DEFINE_boolean clean_raft 1 'clean raft' 10 | DEFINE_boolean clean_log 0 'clean log' 11 | DEFINE_boolean replace_conf 0 'replace conf' 12 | 13 | # parse the command-line 14 | FLAGS "$@" || exit 1 15 | eval set -- "${FLAGS_ARGV}" 16 | 17 | BASE_DIR=$(dirname $(cd $(dirname $0); pwd)) 18 | DIST_DIR=$BASE_DIR/dist 19 | 20 | if [ ! -d "$DIST_DIR" ]; then 21 | mkdir "$DIST_DIR" 22 | fi 23 | 24 | SERVER_HOST={{ inventory_hostname | default('127.0.0.1') }} 25 | SERVER_LISTEN_HOST={{ server_listen_host | default('0.0.0.0') }} 26 | RAFT_HOST={{ inventory_hostname | default('127.0.0.1') }} 27 | RAFT_LISTEN_HOST={{ raft_listen_host | default('0.0.0.0') }} 28 | COORDINATOR_RAFT_START_PORT={{ dingo_store_coordinator_raft_port }} 29 | COORDINATOR_SERVER_START_PORT={{ dingo_store_coordinator_exchange_port }} 30 | COOR_RAFT_PEERS={{ dingo_coordinator_raft_connection_list }} 31 | COOR_SRV_PEERS={{ dingo_store_store_exchange_connection_list }} 32 | INSTANCE_START_ID={{ 1000 | int + groups['coordinator'].index(inventory_hostname) | int }} 33 | TMP_COORDINATOR_SERVICES=$BASE_DIR/build/bin/coor_list 34 | DEFAULT_REPLICA_NUM={{ install_dingo_store_default_replica_num }} 35 | 36 | source $mydir/deploy_func.sh 37 | 38 | deploy() { 39 | echo "# dingo-store coordinators" > ${TMP_COORDINATOR_SERVICES} 40 | echo $COOR_SRV_PEERS | tr ',' '\n' >> ${TMP_COORDINATOR_SERVICES} 41 | 42 | program_dir=$BASE_DIR/dist/${FLAGS_role}1 43 | deploy_store ${FLAGS_role} $BASE_DIR $program_dir $COORDINATOR_SERVER_START_PORT $COORDINATOR_RAFT_START_PORT $INSTANCE_START_ID ${COOR_RAFT_PEERS} ${TMP_COORDINATOR_SERVICES} 44 | } 45 | 46 | start() { 47 | #FLAGS_role=${FLAGS_role} 48 | i=1 49 | program_dir=$BASE_DIR/dist/${FLAGS_role}${i} 50 | # clean log 51 | rm -f ${program_dir}/log/* 52 | start_program ${FLAGS_role} ${program_dir} 53 | } 54 | 55 | clean() 56 | { 57 | i=1 58 | program_dir=$BASE_DIR/dist/${FLAGS_role}${i} 59 | rm -rf ${program_dir} 60 | sleep 1 61 | echo "rm -rf ${program_dir} files" 62 | } 63 | 64 | stop() 65 | { 66 | i=1 67 | program_dir=$BASE_DIR/dist/${FLAGS_role}${i} 68 | k_pid=$(ps -fu {{ dingo_user }} | grep "${program_dir}.*dingodb_server.*${FLAGS_role}" |grep -v grep|awk '{printf $2 "\n"}') 69 | for i in $k_pid 70 | do 71 | echo $i 72 | $(kill -9 $i) 73 | done 74 | } 75 | 76 | usage() 77 | { 78 | echo "Usage: $0 [clean|deploy|start|stop|restart|cleanstart]" 79 | } 80 | 81 | if [ $# -lt 1 ];then 82 | usage 83 | exit 84 | fi 85 | 86 | if [ "$1" = "deploy" ];then 87 | deploy 88 | elif [ "$1" = "clean" ];then 89 | stop 90 | clean 91 | elif [ "$1" = "start" ];then 92 | start 93 | 94 | elif [ "$1" = "stop" ];then 95 | stop 96 | 97 | elif [ "$1" = "restart" ];then 98 | stop 99 | start 100 | 101 | elif [ "$1" = "cleanstart" ];then 102 | stop 103 | clean 104 | deploy 105 | start 106 | 107 | else 108 | usage 109 | fi 110 | -------------------------------------------------------------------------------- /container/docker-compose/docker-compose.lite.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | x-shared-environment: &shared-env 4 | SERVER_LISTEN_HOST: 0.0.0.0 5 | SERVER_HOST: host.docker.internal 6 | RAFT_LISTEN_HOST: 0.0.0.0 7 | RAFT_HOST: host.docker.internal 8 | COOR_RAFT_PEERS: host.docker.internal:22101 9 | COOR_SRV_PEERS: host.docker.internal:22001 10 | DEFAULT_REPLICA_NUM: 1 11 | DINGODB_ENABLE_LITE: 1 12 | DEFAULT_MIN_SYSTEM_DISK_CAPACITY_FREE_RATIO: 0.05 13 | DEFAULT_MIN_SYSTEM_MEMORY_CAPACITY_FREE_RATIO: 0.20 14 | DINGODB_ENABLE_ROCKSDB_SYNC: 1 15 | 16 | services: 17 | coordinator1: 18 | image: dingodatabase/dingo-store:latest 19 | hostname: coordinator1 20 | container_name: coordinator1 21 | ports: 22 | - 22001:22001 23 | - 22101:22101 24 | - 28001:8000 25 | # network_mode: host 26 | volumes: 27 | - d:\coordinator1:/opt/dingo-store/dist/coordinator1/data 28 | networks: 29 | - dingo_net 30 | environment: 31 | FLAGS_role: coordinator 32 | COORDINATOR_SERVER_START_PORT: 22001 33 | COORDINATOR_RAFT_START_PORT: 22101 34 | INSTANCE_START_ID: 1001 35 | <<: *shared-env 36 | 37 | store1: 38 | image: dingodatabase/dingo-store:latest 39 | hostname: store1 40 | container_name: store1 41 | ports: 42 | - 20001:20001 43 | - 20101:20101 44 | # network_mode: host 45 | volumes: 46 | - d:\store1:/opt/dingo-store/dist/store1/data 47 | networks: 48 | - dingo_net 49 | depends_on: 50 | - coordinator1 51 | environment: 52 | FLAGS_role: store 53 | RAFT_START_PORT: 20101 54 | SERVER_START_PORT: 20001 55 | INSTANCE_START_ID: 1001 56 | <<: *shared-env 57 | 58 | index1: 59 | image: dingodatabase/dingo-store:latest 60 | hostname: index1 61 | container_name: index1 62 | ports: 63 | - 21001:21001 64 | - 21101:21101 65 | # network_mode: host 66 | volumes: 67 | - d:\index1:/opt/dingo-store/dist/index1/data 68 | networks: 69 | - dingo_net 70 | depends_on: 71 | - coordinator1 72 | environment: 73 | FLAGS_role: index 74 | INDEX_RAFT_START_PORT: 21101 75 | INDEX_SERVER_START_PORT: 21001 76 | INDEX_INSTANCE_START_ID: 1101 77 | <<: *shared-env 78 | 79 | executor: 80 | image: dingodatabase/dingo:latest 81 | hostname: executor 82 | container_name: executor 83 | ports: 84 | - 8765:8765 85 | - 3307:3307 86 | networks: 87 | - dingo_net 88 | restart: on-failure:5 89 | environment: 90 | DINGO_ROLE: executor 91 | DINGO_HOSTNAME: executor 92 | DINGO_COORDINATORS: host.docker.internal:22001 93 | DINGO_MYSQL_COORDINATORS: host.docker.internal:22001 94 | <<: *shared-env 95 | 96 | proxy: 97 | image: dingodatabase/dingo:latest 98 | hostname: proxy 99 | container_name: proxy 100 | ports: 101 | - 13000:13000 102 | - 9999:9999 103 | networks: 104 | - dingo_net 105 | environment: 106 | DINGO_ROLE: proxy 107 | DINGO_HOSTNAME: proxy 108 | DINGO_COORDINATORS: host.docker.internal:22001 109 | <<: *shared-env 110 | 111 | networks: 112 | dingo_net: 113 | driver: bridge 114 | -------------------------------------------------------------------------------- /roles/dingo/templates/logback-proxy.xml.j2: -------------------------------------------------------------------------------- 1 | 2 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 26 | UTF-8 27 | 28 | 29 | 30 | ${LOG_HOME}/${LOG_FILE} 31 | 32 | ${LOG_HOME}/${LOG_FILE}.%d{yyyy-MM-dd}.%i 33 | 7 34 | 35 | 200MB 36 | 37 | 38 | 39 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 40 | UTF-8 41 | 42 | 43 | 44 | 45 | ${LOG_HOME}/${ERROR_LOG_FILE} 46 | 47 | ${LOG_HOME}/${ERROR_LOG_FILE}.%d{yyyy-MM-dd}.%i 48 | 7 49 | 50 | 200MB 51 | 52 | 53 | 54 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 55 | UTF-8 56 | 57 | 58 | ERROR 59 | ACCEPT 60 | DENY 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | -------------------------------------------------------------------------------- /roles/scaling_in_dingo/templates/start-coordinator.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mydir="${BASH_SOURCE%/*}" 4 | if [[ ! -d "$mydir" ]]; then mydir="$PWD"; fi 5 | . $mydir/shflags 6 | 7 | 8 | DEFINE_string role 'coordinator' 'server role' 9 | DEFINE_boolean clean_db 1 'clean db' 10 | DEFINE_boolean clean_raft 1 'clean raft' 11 | DEFINE_boolean clean_log 0 'clean log' 12 | DEFINE_boolean replace_conf 0 'replace conf' 13 | 14 | # parse the command-line 15 | FLAGS "$@" || exit 1 16 | eval set -- "${FLAGS_ARGV}" 17 | 18 | 19 | BASE_DIR=$(dirname $(cd $(dirname $0); pwd)) 20 | DIST_DIR=$BASE_DIR/dist 21 | 22 | if [ ! -d "$DIST_DIR" ]; then 23 | mkdir "$DIST_DIR" 24 | fi 25 | 26 | SERVER_HOST={{ inventory_hostname | default('127.0.0.1') }} 27 | SERVER_LISTEN_HOST={{ inventory_hostname | default('127.0.0.1') }} 28 | RAFT_HOST={{ inventory_hostname | default('127.0.0.1') }} 29 | RAFT_LISTEN_HOST={{ inventory_hostname | default('127.0.0.1') }} 30 | COORDINATOR_RAFT_START_PORT={{ dingo_store_coordinator_raft_port }} 31 | COORDINATOR_SERVER_START_PORT={{ dingo_store_coordinator_exchange_port }} 32 | COOR_RAFT_PEERS={{ dingo_coordinator_raft_connection_list }} 33 | COOR_SRV_PEERS={{ dingo_store_store_exchange_connection_list }} 34 | INSTANCE_START_ID={{ 2000 | int + groups['add_coordinator'].index(inventory_hostname) | int }} 35 | TMP_COORDINATOR_SERVICES=$BASE_DIR/build/bin/coor_list 36 | DEFAULT_REPLICA_NUM={{ install_dingo_store_default_replica_num }} 37 | 38 | source $mydir/deploy_func.sh 39 | 40 | deploy() { 41 | echo "# dingo-store coordinators" > ${TMP_COORDINATOR_SERVICES} 42 | echo $COOR_SRV_PEERS | tr ',' '\n' >> ${TMP_COORDINATOR_SERVICES} 43 | 44 | program_dir=$BASE_DIR/dist/${FLAGS_role}1 45 | deploy_store ${FLAGS_role} $BASE_DIR $program_dir $COORDINATOR_SERVER_START_PORT $COORDINATOR_RAFT_START_PORT $INSTANCE_START_ID ${COOR_RAFT_PEERS} ${TMP_COORDINATOR_SERVICES} 46 | 47 | } 48 | 49 | 50 | 51 | start() { 52 | #FLAGS_role=${FLAGS_role} 53 | i=1 54 | program_dir=$BASE_DIR/dist/${FLAGS_role}${i} 55 | # clean log 56 | rm -f ${program_dir}/log/* 57 | start_program ${FLAGS_role} ${program_dir} 58 | } 59 | 60 | 61 | clean() 62 | { 63 | i=1 64 | program_dir=$BASE_DIR/dist/${FLAGS_role}${i} 65 | rm -rf ${program_dir} 66 | sleep 1 67 | echo "rm -rf ${program_dir} files" 68 | } 69 | 70 | 71 | 72 | 73 | stop() 74 | { 75 | k_pid=$(ps -fu {{ dingo_user }} | grep "dingodb_server --role coordinator" |grep -v grep|awk '{printf $2 "\n"}') 76 | for i in $k_pid 77 | do 78 | echo $i 79 | $(kill -9 $i) 80 | done 81 | } 82 | 83 | usage() 84 | { 85 | echo "Usage: $0 [clean|deploy|start|stop|restart|cleanstart]" 86 | } 87 | 88 | if [ $# -lt 1 ];then 89 | usage 90 | exit 91 | fi 92 | 93 | 94 | if [ "$1" = "deploy" ];then 95 | deploy 96 | elif [ "$1" = "clean" ];then 97 | stop 98 | clean 99 | elif [ "$1" = "start" ];then 100 | start 101 | 102 | elif [ "$1" = "stop" ];then 103 | stop 104 | 105 | elif [ "$1" = "restart" ];then 106 | stop 107 | start 108 | 109 | elif [ "$1" = "cleanstart" ];then 110 | stop 111 | clean 112 | deploy 113 | start 114 | 115 | else 116 | usage 117 | fi 118 | -------------------------------------------------------------------------------- /roles/dingo_store/tasks/03_start_roles_command.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # stop all the coordinator and store 4 | - name: "Stop and Del db Coordinator on all host" 5 | become: true 6 | become_user: "{{ dingo_user }}" 7 | shell: "/bin/bash ./scripts/start-coordinator.sh clean" 8 | args: 9 | chdir: "{{ dingo_store_home }}" 10 | 11 | # stop all the coordinator and store 12 | - name: "Stop and Del db Store on all host" 13 | become: true 14 | become_user: "{{ dingo_user }}" 15 | shell: "/bin/bash ./scripts/start-store.sh clean" 16 | args: 17 | chdir: "{{ dingo_store_home }}" 18 | 19 | # stop all the document 20 | - name: "Stop and Del db Document on all host" 21 | become: true 22 | become_user: "{{ dingo_user }}" 23 | shell: "/bin/bash ./scripts/start-document.sh clean" 24 | args: 25 | chdir: "{{ dingo_store_home }}" 26 | 27 | # stop all the index 28 | - name: "Stop and Del db Index on all host" 29 | become: true 30 | become_user: "{{ dingo_user }}" 31 | shell: "/bin/bash ./scripts/start-index.sh clean" 32 | args: 33 | chdir: "{{ dingo_store_home }}" 34 | 35 | # stop all the diskann 36 | - name: "Stop and Del db diskann on all host" 37 | become: true 38 | become_user: "{{ dingo_user }}" 39 | shell: "/bin/bash ./scripts/start-diskann.sh clean" 40 | args: 41 | chdir: "{{ dingo_store_home }}" 42 | 43 | # start coordinator 44 | - name: "Deploy and Start Coordinator using Script" 45 | become: true 46 | become_user: "{{ dingo_user }}" 47 | shell: "/bin/bash ./scripts/start-coordinator.sh cleanstart" 48 | args: 49 | chdir: "{{ dingo_store_home }}" 50 | when: is_dingo_store_coordinator 51 | 52 | - name: "Sleep wait coordinator start" 53 | shell: "sleep 20" 54 | 55 | # start store 56 | - name: "Deploy and Start Store using Shell Script" 57 | become: true 58 | become_user: "{{ dingo_user }}" 59 | shell: "/bin/bash ./scripts/start-store.sh cleanstart" 60 | args: 61 | chdir: "{{ dingo_store_home }}" 62 | when: is_dingo_store_store 63 | 64 | # start document 65 | - name: "Deploy and Start document using Shell Script" 66 | become: true 67 | become_user: "{{ dingo_user }}" 68 | shell: "/bin/bash ./scripts/start-document.sh cleanstart" 69 | args: 70 | chdir: "{{ dingo_store_home }}" 71 | when: is_dingo_store_document 72 | 73 | # start index 74 | - name: "Deploy and Start index using Shell Script" 75 | become: true 76 | become_user: "{{ dingo_user }}" 77 | shell: "/bin/bash ./scripts/start-index.sh cleanstart" 78 | args: 79 | chdir: "{{ dingo_store_home }}" 80 | when: is_dingo_store_index 81 | 82 | - name: "Sleep wait store start" 83 | shell: "sleep 20" 84 | 85 | # start diskann 86 | - name: "Deploy and Start diskann using Shell Script" 87 | become: true 88 | become_user: "{{ dingo_user }}" 89 | shell: "/bin/bash ./scripts/start-diskann.sh cleanstart" 90 | args: 91 | chdir: "{{ dingo_store_home }}" 92 | when: is_dingo_store_diskann 93 | 94 | - name: "Sleep wait store start" 95 | shell: "sleep 20" 96 | 97 | - name: "install license" 98 | register: license_id 99 | shell: "/bin/bash ./scripts/generate_id.sh" 100 | args: 101 | chdir: "{{ dingo_store_home }}" 102 | when: 103 | - is_license_support 104 | - inventory_hostname == groups['store'][0] 105 | 106 | # 将上一步/bin/bash ./scripts/generate_id.sh执行的结果打印出来 107 | - name: "Print license_id" 108 | debug: 109 | msg: "license ID {{ license_id.stdout_lines|join(',')|regex_search('value: \"(.*?)\"') }}" 110 | when: 111 | - is_license_support 112 | - inventory_hostname == groups['store'][0] 113 | 114 | 115 | -------------------------------------------------------------------------------- /roles/scaling_in_dingo/templates/start-store.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mydir="${BASH_SOURCE%/*}" 4 | if [[ ! -d "$mydir" ]]; then mydir="$PWD"; fi 5 | . $mydir/shflags 6 | 7 | 8 | DEFINE_string role 'store' 'server role' 9 | DEFINE_boolean clean_db 1 'clean db' 10 | DEFINE_boolean clean_raft 1 'clean raft' 11 | DEFINE_boolean clean_log 0 'clean log' 12 | DEFINE_boolean replace_conf 0 'replace conf' 13 | 14 | # parse the command-line 15 | FLAGS "$@" || exit 1 16 | eval set -- "${FLAGS_ARGV}" 17 | 18 | 19 | BASE_DIR=$(dirname $(cd $(dirname $0); pwd)) 20 | DIST_DIR=$BASE_DIR/dist 21 | 22 | SERVER_HOST={{ inventory_hostname | default('127.0.0.1') }} 23 | SERVER_LISTEN_HOST={{ inventory_hostname | default('127.0.0.1') }} 24 | RAFT_HOST={{ inventory_hostname | default('127.0.0.1') }} 25 | RAFT_LISTEN_HOST={{ inventory_hostname | default('127.0.0.1') }} 26 | COOR_SRV_PEERS={{ dingo_store_store_exchange_connection_list }} 27 | STORE_NUM={{ store_num | int }} 28 | RAFT_START_PORT={{ dingo_store_raft_port | default(20101) }} 29 | SERVER_START_PORT={{ dingo_store_exchange_port | default(20001) }} 30 | DISK_LIST_STR="{{ hostvars[inventory_hostname]['disk'] | default('${DIST_DIR}') }}" 31 | eval DISK_LIST=($DISK_LIST_STR) 32 | TMP_COORDINATOR_SERVICES=$BASE_DIR/build/bin/coor_list 33 | DEFAULT_REPLICA_NUM={{ install_dingo_store_default_replica_num }} 34 | 35 | if [ ! -d "$DIST_DIR" ]; then 36 | mkdir "$DIST_DIR" 37 | fi 38 | 39 | source $mydir/deploy_func.sh 40 | 41 | deploy() { 42 | echo "# dingo-store coordinators" > ${TMP_COORDINATOR_SERVICES} 43 | echo $COOR_SRV_PEERS | tr ',' '\n' >> ${TMP_COORDINATOR_SERVICES} 44 | 45 | for i in $(seq 1 $STORE_NUM) 46 | do 47 | START_ID={{ groups['add_store'].index(inventory_hostname) * 100 }} 48 | INSTANCE_START_ID=`expr 2000 + $i + $START_ID` 49 | N_SERVER_START_PORT=`expr $SERVER_START_PORT + $i - 1` 50 | N_RAFT_START_PORT=`expr $RAFT_START_PORT + $i - 1` 51 | # program_dir=$BASE_DIR/dist/${FLAGS_role}${i} 52 | program_dir=${DISK_LIST[i - 1]}/${FLAGS_role}${i} 53 | if [ ! -d "${DISK_LIST[i - 1]}" ]; then 54 | mkdir -p "${DISK_LIST[i - 1]}" 55 | fi 56 | 57 | deploy_store ${FLAGS_role} $BASE_DIR $program_dir $N_SERVER_START_PORT $N_RAFT_START_PORT $INSTANCE_START_ID ${COOR_RAFT_PEERS:-fail} ${TMP_COORDINATOR_SERVICES} 58 | done 59 | } 60 | 61 | 62 | start() { 63 | #FLAGS_role=${FLAGS_role} 64 | for i in $(seq 1 $STORE_NUM) 65 | do 66 | # program_dir=$BASE_DIR/dist/${FLAGS_role}${i} 67 | program_dir=${DISK_LIST[i - 1]}/${FLAGS_role}${i} 68 | # clean log 69 | rm -f ${program_dir}/log/* 70 | start_program ${FLAGS_role} ${program_dir} 71 | done 72 | } 73 | 74 | 75 | 76 | 77 | clean() 78 | { 79 | for i in $(seq 1 $STORE_NUM) 80 | do 81 | program_dir=${DISK_LIST[i - 1]}/${FLAGS_role}${i} 82 | rm -rf ${program_dir} 83 | sleep 1 84 | echo "rm -rf ${program_dir} files" 85 | done 86 | } 87 | 88 | 89 | 90 | 91 | stop() 92 | { 93 | k_pid=$(ps -fu {{ dingo_user }} | grep "dingodb_server --role store" |grep -v grep|awk '{printf $2 "\n"}') 94 | for i in $k_pid 95 | do 96 | echo $i 97 | $(kill -9 $i) 98 | done 99 | } 100 | 101 | usage() 102 | { 103 | echo "Usage: $0 [clean|deploy|start|stop|restart|cleanstart]" 104 | } 105 | 106 | if [ $# -lt 1 ];then 107 | usage 108 | exit 109 | fi 110 | 111 | if [ "$1" = "deploy" ];then 112 | deploy 113 | 114 | elif [ "$1" = "clean" ];then 115 | stop 116 | clean 117 | 118 | elif [ "$1" = "start" ];then 119 | start 120 | 121 | elif [ "$1" = "stop" ];then 122 | stop 123 | 124 | elif [ "$1" = "restart" ];then 125 | stop 126 | start 127 | 128 | elif [ "$1" = "cleanstart" ];then 129 | stop 130 | clean 131 | deploy 132 | start 133 | 134 | else 135 | usage 136 | fi 137 | -------------------------------------------------------------------------------- /roles/dingo_store/templates/start-diskann.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mydir="${BASH_SOURCE%/*}" 4 | if [[ ! -d "$mydir" ]]; then mydir="$PWD"; fi 5 | . $mydir/shflags 6 | 7 | DEFINE_string role 'diskann' 'server role' 8 | DEFINE_boolean clean_db 1 'clean db' 9 | DEFINE_boolean clean_raft 1 'clean raft' 10 | DEFINE_boolean clean_log 0 'clean log' 11 | DEFINE_boolean replace_conf 0 'replace conf' 12 | 13 | # parse the command-line 14 | FLAGS "$@" || exit 1 15 | eval set -- "${FLAGS_ARGV}" 16 | 17 | BASE_DIR=$(dirname $(cd $(dirname $0); pwd)) 18 | DIST_DIR=$BASE_DIR/dist 19 | 20 | SERVER_HOST={{ inventory_hostname | default('127.0.0.1') }} 21 | SERVER_LISTEN_HOST={{ server_listen_host | default('0.0.0.0') }} 22 | RAFT_HOST={{ inventory_hostname | default('127.0.0.1') }} 23 | RAFT_LISTEN_HOST={{ raft_listen_host | default('0.0.0.0') }} 24 | COOR_SRV_PEERS={{ dingo_store_store_exchange_connection_list }} 25 | STORE_NUM={{ diskann_num | int }} 26 | RAFT_START_PORT={{ dingo_store_diskann_raft_port | default(24101) }} 27 | SERVER_START_PORT={{ dingo_store_diskann_exchange_port | default(24001) }} 28 | DISK_LIST_STR="{{ hostvars[inventory_hostname]['disk'] | default('${DIST_DIR}') }}" 29 | eval DISK_LIST=($DISK_LIST_STR) 30 | TMP_COORDINATOR_SERVICES=$BASE_DIR/build/bin/coor_list 31 | DEFAULT_REPLICA_NUM=1 32 | 33 | if [ ! -d "$DIST_DIR" ]; then 34 | mkdir "$DIST_DIR" 35 | fi 36 | 37 | source $mydir/deploy_func.sh 38 | 39 | deploy() { 40 | echo "# dingo-store coordinators" > ${TMP_COORDINATOR_SERVICES} 41 | echo $COOR_SRV_PEERS | tr ',' '\n' >> ${TMP_COORDINATOR_SERVICES} 42 | 43 | for i in $(seq 1 $STORE_NUM) 44 | do 45 | START_ID={{ groups['coordinator'].index(inventory_hostname) }} 46 | INSTANCE_START_ID=`expr 4000 + $i + $START_ID` 47 | N_SERVER_START_PORT=`expr $SERVER_START_PORT + $i - 1` 48 | N_RAFT_START_PORT=`expr $RAFT_START_PORT + $i - 1` 49 | # program_dir=$BASE_DIR/dist/${FLAGS_role}${i} 50 | program_dir=${DISK_LIST[i - 1]:-${DISK_LIST[0]}}/${FLAGS_role}${i} 51 | if [ ! -d "${DISK_LIST[i - 1]:-${DISK_LIST[0]}}" ]; then 52 | mkdir -p "${DISK_LIST[i - 1]:-${DISK_LIST[0]}}" 53 | fi 54 | 55 | deploy_store ${FLAGS_role} $BASE_DIR $program_dir $N_SERVER_START_PORT $N_RAFT_START_PORT $INSTANCE_START_ID ${COOR_RAFT_PEERS:-fail} ${TMP_COORDINATOR_SERVICES} 56 | done 57 | } 58 | 59 | start() { 60 | #FLAGS_role=${FLAGS_role} 61 | for i in $(seq 1 $STORE_NUM) 62 | do 63 | # program_dir=$BASE_DIR/dist/${FLAGS_role}${i} 64 | program_dir=${DISK_LIST[i - 1]:-${DISK_LIST[0]}}/${FLAGS_role}${i} 65 | # clean log 66 | rm -f ${program_dir}/log/* 67 | start_program ${FLAGS_role} ${program_dir} 68 | done 69 | } 70 | 71 | clean() 72 | { 73 | for i in $(seq 1 $STORE_NUM) 74 | do 75 | program_dir=${DISK_LIST[i - 1]:-${DISK_LIST[0]}}/${FLAGS_role}${i} 76 | rm -rf ${program_dir} 77 | sleep 1 78 | echo "rm -rf ${program_dir} files" 79 | done 80 | } 81 | 82 | stop() 83 | { 84 | for dir in "${DISK_LIST[@]}"; do 85 | k_pid=$(ps -fu {{ dingo_user }} | grep "${dir}.*dingodb_server.*${FLAGS_role}" |grep -v grep|awk '{printf $2 "\n"}') 86 | for i in $k_pid 87 | do 88 | echo $i 89 | $(kill -9 $i) 90 | done 91 | done 92 | } 93 | 94 | usage() 95 | { 96 | echo "Usage: $0 [clean|deploy|start|stop|restart|cleanstart]" 97 | } 98 | 99 | if [ $# -lt 1 ];then 100 | usage 101 | exit 102 | fi 103 | 104 | if [ "$1" = "deploy" ];then 105 | deploy 106 | 107 | elif [ "$1" = "clean" ];then 108 | stop 109 | clean 110 | 111 | elif [ "$1" = "start" ];then 112 | start 113 | 114 | elif [ "$1" = "stop" ];then 115 | stop 116 | 117 | elif [ "$1" = "restart" ];then 118 | stop 119 | start 120 | 121 | elif [ "$1" = "cleanstart" ];then 122 | stop 123 | clean 124 | deploy 125 | start 126 | 127 | else 128 | usage 129 | fi 130 | -------------------------------------------------------------------------------- /roles/dingo_store/templates/start-store.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mydir="${BASH_SOURCE%/*}" 4 | if [[ ! -d "$mydir" ]]; then mydir="$PWD"; fi 5 | . $mydir/shflags 6 | 7 | DEFINE_string role 'store' 'server role' 8 | DEFINE_boolean clean_db 1 'clean db' 9 | DEFINE_boolean clean_raft 1 'clean raft' 10 | DEFINE_boolean clean_log 0 'clean log' 11 | DEFINE_boolean replace_conf 0 'replace conf' 12 | 13 | # parse the command-line 14 | FLAGS "$@" || exit 1 15 | eval set -- "${FLAGS_ARGV}" 16 | 17 | BASE_DIR=$(dirname $(cd $(dirname $0); pwd)) 18 | DIST_DIR=$BASE_DIR/dist 19 | 20 | SERVER_HOST={{ inventory_hostname | default('127.0.0.1') }} 21 | SERVER_LISTEN_HOST={{ server_listen_host | default('0.0.0.0') }} 22 | RAFT_HOST={{ inventory_hostname | default('127.0.0.1') }} 23 | RAFT_LISTEN_HOST={{ raft_listen_host | default('0.0.0.0') }} 24 | COOR_SRV_PEERS={{ dingo_store_store_exchange_connection_list }} 25 | STORE_NUM={{ store_num | int }} 26 | RAFT_START_PORT={{ dingo_store_raft_port | default(20101) }} 27 | SERVER_START_PORT={{ dingo_store_exchange_port | default(20001) }} 28 | DISK_LIST_STR="{{ hostvars[inventory_hostname]['disk'] | default('${DIST_DIR}') }}" 29 | eval DISK_LIST=($DISK_LIST_STR) 30 | TMP_COORDINATOR_SERVICES=$BASE_DIR/build/bin/coor_list 31 | DEFAULT_REPLICA_NUM={{ install_dingo_store_default_replica_num }} 32 | 33 | if [ ! -d "$DIST_DIR" ]; then 34 | mkdir "$DIST_DIR" 35 | fi 36 | 37 | source $mydir/deploy_func.sh 38 | 39 | deploy() { 40 | echo "# dingo-store coordinators" > ${TMP_COORDINATOR_SERVICES} 41 | echo $COOR_SRV_PEERS | tr ',' '\n' >> ${TMP_COORDINATOR_SERVICES} 42 | 43 | for i in $(seq 1 $STORE_NUM) 44 | do 45 | START_ID={{ groups['coordinator'].index(inventory_hostname) }} 46 | INSTANCE_START_ID=`expr 1000 + $i + $START_ID` 47 | N_SERVER_START_PORT=`expr $SERVER_START_PORT + $i - 1` 48 | N_RAFT_START_PORT=`expr $RAFT_START_PORT + $i - 1` 49 | # program_dir=$BASE_DIR/dist/${FLAGS_role}${i} 50 | program_dir=${DISK_LIST[i - 1]:-${DISK_LIST[0]}}/${FLAGS_role}${i} 51 | if [ ! -d "${DISK_LIST[i - 1]:-${DISK_LIST[0]}}" ]; then 52 | mkdir -p "${DISK_LIST[i - 1]:-${DISK_LIST[0]}}" 53 | fi 54 | 55 | deploy_store ${FLAGS_role} $BASE_DIR $program_dir $N_SERVER_START_PORT $N_RAFT_START_PORT $INSTANCE_START_ID ${COOR_RAFT_PEERS:-fail} ${TMP_COORDINATOR_SERVICES} 56 | done 57 | } 58 | 59 | start() { 60 | #FLAGS_role=${FLAGS_role} 61 | for i in $(seq 1 $STORE_NUM) 62 | do 63 | # program_dir=$BASE_DIR/dist/${FLAGS_role}${i} 64 | program_dir=${DISK_LIST[i - 1]:-${DISK_LIST[0]}}/${FLAGS_role}${i} 65 | # clean log 66 | rm -f ${program_dir}/log/* 67 | start_program ${FLAGS_role} ${program_dir} 68 | done 69 | } 70 | 71 | clean() 72 | { 73 | for i in $(seq 1 $STORE_NUM) 74 | do 75 | program_dir=${DISK_LIST[i - 1]:-${DISK_LIST[0]}}/${FLAGS_role}${i} 76 | rm -rf ${program_dir} 77 | sleep 1 78 | echo "rm -rf ${program_dir} files" 79 | done 80 | } 81 | 82 | stop() 83 | { 84 | for dir in "${DISK_LIST[@]}"; do 85 | k_pid=$(ps -fu {{ dingo_user }} | grep "${dir}.*dingodb_server.*${FLAGS_role}" |grep -v grep|awk '{printf $2 "\n"}') 86 | for i in $k_pid 87 | do 88 | echo $i 89 | $(kill -9 $i) 90 | done 91 | done 92 | } 93 | 94 | usage() 95 | { 96 | echo "Usage: $0 [clean|deploy|start|stop|restart|cleanstart]" 97 | } 98 | 99 | if [ $# -lt 1 ];then 100 | usage 101 | exit 102 | fi 103 | 104 | if [ "$1" = "deploy" ];then 105 | deploy 106 | 107 | elif [ "$1" = "clean" ];then 108 | stop 109 | clean 110 | 111 | elif [ "$1" = "start" ];then 112 | start 113 | 114 | elif [ "$1" = "stop" ];then 115 | stop 116 | 117 | elif [ "$1" = "restart" ];then 118 | stop 119 | start 120 | 121 | elif [ "$1" = "cleanstart" ];then 122 | stop 123 | clean 124 | deploy 125 | start 126 | 127 | else 128 | usage 129 | fi 130 | --------------------------------------------------------------------------------