Конфигурация postgresql (нажать, чтобы открыть)
53 |
54 | ```bash
55 | # DB Version: 11
56 | # OS Type: linux
57 | # DB Type: dw
58 | # Total Memory (RAM): 4 GB
59 | # CPUs num: 2
60 | # Connections num: 200
61 | # Data Storage: hdd
62 |
63 | max_connections = 200
64 | shared_buffers = 1GB
65 | effective_cache_size = 3GB
66 | maintenance_work_mem = 512MB
67 | checkpoint_completion_target = 0.9
68 | wal_buffers = 16MB
69 | default_statistics_target = 500
70 | random_page_cost = 4
71 | effective_io_concurrency = 2
72 | work_mem = 2621kB
73 | min_wal_size = 4GB
74 | max_wal_size = 8GB
75 | max_worker_processes = 2
76 | max_parallel_workers_per_gather = 1
77 | max_parallel_workers = 2
78 | ```
79 |
80 |
81 |
82 | [https://overload.yandex.net/222008](https://overload.yandex.net/222008)
83 |
84 | ## ВЫВОД
85 |
86 | В первую очередь необходима тонкая настройка БД. Очевидно, что на данном этапе менеджер пула соединений pgbouncer не приносит сколько-нибудь значимой пользы, т.к. продолжает удерживать коннекты к БД дольше, чем сама БД.
87 |
--------------------------------------------------------------------------------
/provisioning_proxmox/roles/05_consul-cluster/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: edit /etc/hosts (comment line)
3 | replace:
4 | path: "{{ HOSTS_FILE }}"
5 | #regexp: '(^(127\.0\.0\.1)(\s*|\t*)(hl-[a-zA-Z]*[0-9]*)(\s*|\t*)(hl-[a-zA-Z]*[0-9]*)(\s*|\t*)$)'
6 | regexp: '(^(127\.0\.0\.1)(\s*|\t*)\{\{fqdn\}\}(\s*|\t*)(\{\{hostname\}\})(\s*|\t*)$)'
7 | replace: '#\1'
8 | tags:
9 | - update_hosts
10 | - change_name_HOST_NAME_PG_CON_POOL_VIP
11 |
12 | - name: edit2 /etc/hosts (comment line)
13 | replace:
14 | path: "{{ HOSTS_FILE }}"
15 | #regexp: '(^(127\.0\.0\.1)(\s*|\t*)(hl-[a-zA-Z]*[0-9]*)(\s*|\t*)(hl-[a-zA-Z]*[0-9]*)(\s*|\t*)$)'
16 | regexp: '(^(::1)(\s*|\t*)\{\{fqdn\}\}(\s*|\t*)(\{\{hostname\}\})(\s*|\t*)$)'
17 | replace: '#\1'
18 | tags:
19 | - update_hosts
20 | - change_name_HOST_NAME_PG_CON_POOL_VIP
21 |
22 | - name: cloud-init restart
23 | systemd:
24 | name: cloud-init
25 | state: restarted
26 | tags:
27 | - update_hosts
28 | - change_name_HOST_NAME_PG_CON_POOL_VIP
29 |
30 | - name: install jq
31 | yum:
32 | name: jq
33 | state: latest
34 |
35 | - name: download consul
36 | get_url:
37 | url: https://releases.hashicorp.com/consul/{{ CONSUL_VERSION }}/consul_{{ CONSUL_VERSION }}_linux_amd64.zip
38 | dest: /tmp/consul_{{ CONSUL_VERSION }}_linux_amd64.zip
39 | mode: '0600'
40 |
41 | - name: extract consul zip-archive
42 | unarchive:
43 | remote_src: yes
44 | src: /tmp/consul_{{ CONSUL_VERSION }}_linux_amd64.zip
45 | dest: /usr/local/bin/
46 |
47 | - name: create system group "consul"
48 | group:
49 | name: consul
50 | system: yes
51 | state: present
52 |
53 | - name: add system user "consul"
54 | user:
55 | name: consul
56 | group: consul
57 | shell: /sbin/nologin
58 | home: /var/lib/consul
59 | system: yes
60 |
61 | - name: set permissions for consul data directories
62 | file:
63 | path: "{{ item }}"
64 | state: directory
65 | owner: consul
66 | group: consul
67 | mode: '0775'
68 | with_items:
69 | - /var/lib/consul
70 | - /etc/consul.d
71 |
72 | #- name: create directory for consul config files
73 | # file:
74 | # path: /etc/consul.d
75 | # state: directory
76 | # owner: consul
77 | # group: consul
78 | # mode: '0775'
79 |
80 | - name: put consul-server.service template
81 | template:
82 | src: consul-server.service.j2
83 | dest: /etc/systemd/system/consul-server.service
84 | owner: root
85 | group: root
86 | mode: '0644'
87 |
88 | #generate encryption key that will be used ad the "encrypt" entry of ALL CONSUL NODES
89 | #CONSUL_KEY=$(consul keygen); echo $CONSUL_KEY
90 | #sgIjP/24ugFcKfJ5DJ1Ob29dB5jgzwZbXY0lED3RM9w=
91 | #see ansible variable CONSUL_KEY
92 |
93 | - name: put bootstrap consul configuration template
94 | template:
95 | src: consul-server.json.j2
96 | dest: /etc/consul.d/consul-server.json
97 | owner: root
98 | group: root
99 | mode: '0644'
100 |
101 | - name: consul-server restarted
102 | systemd:
103 | daemon_reload: yes
104 | name: consul-server
105 | state: restarted
106 | enabled: yes
107 | tags:
108 | - change_name_HOST_NAME_PG_CON_POOL_VIP
109 |
--------------------------------------------------------------------------------
/provisioning_proxmox/roles/06_pgsql-patroni/01_consul-client/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: edit /etc/hosts (comment line)
3 | replace:
4 | path: "{{ HOSTS_FILE }}"
5 | #regexp: '(^(127\.0\.0\.1)(\s*|\t*)(hl-[a-zA-Z]*[0-9]*)(\s*|\t*)(hl-[a-zA-Z]*[0-9]*)(\s*|\t*)$)'
6 | regexp: '(^(127\.0\.0\.1)(\s*|\t*)\{\{fqdn\}\}(\s*|\t*)(\{\{hostname\}\})(\s*|\t*)$)'
7 | replace: '#\1'
8 | tags:
9 | - update_hosts
10 | - change_name_HOST_NAME_PG_CON_POOL_VIP
11 |
12 | - name: edit2 /etc/hosts (comment line)
13 | replace:
14 | path: "{{ HOSTS_FILE }}"
15 | #regexp: '(^(127\.0\.0\.1)(\s*|\t*)(hl-[a-zA-Z]*[0-9]*)(\s*|\t*)(hl-[a-zA-Z]*[0-9]*)(\s*|\t*)$)'
16 | regexp: '(^(::1)(\s*|\t*)\{\{fqdn\}\}(\s*|\t*)(\{\{hostname\}\})(\s*|\t*)$)'
17 | replace: '#\1'
18 | tags:
19 | - update_hosts
20 | - change_name_HOST_NAME_PG_CON_POOL_VIP
21 |
22 | - name: cloud-init restart
23 | systemd:
24 | name: cloud-init
25 | state: restarted
26 | tags:
27 | - update_hosts
28 | - change_name_HOST_NAME_PG_CON_POOL_VIP
29 |
30 | - name: install jq
31 | yum:
32 | name: jq
33 | state: latest
34 |
35 | - name: download consul
36 | get_url:
37 | url: https://releases.hashicorp.com/consul/{{ CONSUL_VERSION }}/consul_{{ CONSUL_VERSION }}_linux_amd64.zip
38 | dest: /tmp/consul_{{ CONSUL_VERSION }}_linux_amd64.zip
39 | mode: '0600'
40 |
41 | - name: extract consul zip-archive
42 | unarchive:
43 | remote_src: yes
44 | src: /tmp/consul_{{ CONSUL_VERSION }}_linux_amd64.zip
45 | dest: /usr/local/bin/
46 |
47 | - name: create system group "consul"
48 | group:
49 | name: consul
50 | system: yes
51 | state: present
52 |
53 | - name: add system user "consul"
54 | user:
55 | name: consul
56 | group: consul
57 | shell: /sbin/nologin
58 | home: /var/lib/consul
59 | system: yes
60 |
61 | - name: set permissions for consul data directories
62 | file:
63 | path: "{{ item }}"
64 | state: directory
65 | owner: consul
66 | group: consul
67 | mode: '0775'
68 | with_items:
69 | - /var/lib/consul
70 | - /etc/consul.d
71 |
72 | #- name: create directory for consul config files
73 | # file:
74 | # path: /etc/consul.d
75 | # state: directory
76 | # owner: consul
77 | # group: consul
78 | # mode: '0775'
79 |
80 | - name: copy consul-client.service
81 | copy:
82 | src: consul-client.service
83 | dest: /etc/systemd/system/consul-client.service
84 | owner: root
85 | group: root
86 | mode: '0644'
87 |
88 | #generate encryption key that will be used ad the "encrypt" entry of ALL CONSUL NODES
89 | #CONSUL_KEY=$(consul keygen); echo $CONSUL_KEY
90 | #sgIjP/24ugFcKfJ5DJ1Ob29dB5jgzwZbXY0lED3RM9w=
91 | #see ansible variable CONSUL_KEY
92 |
93 | - name: put bootstrap consul configuration template
94 | template:
95 | src: consul-client.json.j2
96 | dest: /etc/consul.d/consul-client.json
97 | owner: root
98 | group: root
99 | mode: '0644'
100 |
101 | - name: consul-client restarted
102 | systemd:
103 | daemon_reload: yes
104 | name: consul-client
105 | state: restarted
106 | enabled: yes
107 | tags:
108 | - change_name_HOST_NAME_PG_CON_POOL_VIP
109 |
--------------------------------------------------------------------------------
/provisioning/roles/03_keepalived-haproxy/templates/haproxy.cfg.j2:
--------------------------------------------------------------------------------
1 | #---------------------------------------------------------------------
2 | # Global settings
3 | #---------------------------------------------------------------------
4 | global
5 | log 127.0.0.1 local0
6 | log 127.0.0.1 local1 notice
7 | #log loghost local0 info
8 |
9 | maxconn 4096
10 | maxsessrate 4096
11 | #chroot /usr/share/haproxy
12 | chroot /var/lib/haproxy
13 | pidfile /var/run/haproxy.pid
14 |
15 | user haproxy
16 | group haproxy
17 |
18 | daemon
19 |
20 | #debug
21 | #quiet
22 |
23 | # turn on stats unix socket
24 | stats socket /var/lib/haproxy/stats
25 |
26 | #---------------------------------------------------------------------
27 | # common defaults that all the 'listen' and 'backend' sections will
28 | # use if not designated in their block
29 | #---------------------------------------------------------------------
30 | defaults
31 | log global
32 | mode http
33 | option httplog
34 | option dontlognull
35 | option http-server-close
36 | option forwardfor except 127.0.0.0/8
37 | retries 3
38 | option redispatch
39 | timeout http-request 10s
40 | timeout queue 1m
41 | timeout connect 10s
42 | timeout client 1m
43 | timeout server 1m
44 | timeout http-keep-alive 10s
45 | timeout check 10s
46 | maxconn 10000
47 | #maxconn 3000
48 | #contimeout 5000
49 | #clitimeout 50000
50 | #srvtimeout 50000
51 |
52 | #---------------------------------------------------------------------
53 | #HAProxy Monitoring Config
54 | #---------------------------------------------------------------------
55 | #Haproxy Monitoring run on port 8080
56 | listen haproxy-stat *:8080
57 | mode http
58 | option forwardfor
59 | option httpclose
60 | stats enable
61 | stats show-legends
62 | stats refresh 15s
63 |
64 | #URL for HAProxy monitoring
65 | stats uri /stats
66 | stats realm Haproxy\ Statistics
67 |
68 | #User and Password for login to the monitoring dashboard
69 | stats auth {{ USER_HAPROXY }}:{{ PASS_FOR_HAPROXY }}
70 | stats admin if TRUE
71 |
72 | #This is optionally for monitoring backend
73 | default_backend hl-zabbix
74 |
75 | #---------------------------------------------------------------------
76 | # FrontEnd Configuration
77 | #---------------------------------------------------------------------
78 | frontend main
79 | bind *:80
80 | option http-server-close
81 | option forwardfor
82 | default_backend hl-zabbix
83 |
84 | #---------------------------------------------------------------------
85 | # BackEnd roundrobin as balance algorithm
86 | #---------------------------------------------------------------------
87 | backend hl-zabbix
88 | mode http
89 | balance roundrobin
90 | option httpclose
91 | option forwardfor
92 | cookie SERVERNAME insert indirect nocache
93 | server {{ HOST_NAME_WEB01 }}.{{ FAKE_DOMAIN }} {{ HOST_IP_WEB01 }}:8080 maxconn 5000 cookie s1 check
94 | server {{ HOST_NAME_WEB02 }}.{{ FAKE_DOMAIN }} {{ HOST_IP_WEB02 }}:8080 maxconn 5000 cookie s2 check
95 |
--------------------------------------------------------------------------------
/provisioning_proxmox/roles/03_keepalived-haproxy/templates/haproxy.cfg.j2:
--------------------------------------------------------------------------------
1 | #---------------------------------------------------------------------
2 | # Global settings
3 | #---------------------------------------------------------------------
4 | global
5 | log 127.0.0.1 local0
6 | log 127.0.0.1 local1 notice
7 | #log loghost local0 info
8 |
9 | maxconn 4096
10 | maxsessrate 4096
11 | #chroot /usr/share/haproxy
12 | chroot /var/lib/haproxy
13 | pidfile /var/run/haproxy.pid
14 |
15 | user haproxy
16 | group haproxy
17 |
18 | daemon
19 |
20 | #debug
21 | #quiet
22 |
23 | # turn on stats unix socket
24 | stats socket /var/lib/haproxy/stats
25 |
26 | #---------------------------------------------------------------------
27 | # common defaults that all the 'listen' and 'backend' sections will
28 | # use if not designated in their block
29 | #---------------------------------------------------------------------
30 | defaults
31 | log global
32 | mode http
33 | option httplog
34 | option dontlognull
35 | option http-server-close
36 | option forwardfor except 127.0.0.0/8
37 | retries 3
38 | option redispatch
39 | timeout http-request 10s
40 | timeout queue 1m
41 | timeout connect 10s
42 | timeout client 1m
43 | timeout server 1m
44 | timeout http-keep-alive 10s
45 | timeout check 10s
46 | maxconn 10000
47 | #maxconn 3000
48 | #contimeout 5000
49 | #clitimeout 50000
50 | #srvtimeout 50000
51 |
52 | #---------------------------------------------------------------------
53 | #HAProxy Monitoring Config
54 | #---------------------------------------------------------------------
55 | #Haproxy Monitoring run on port 8080
56 | listen haproxy-stat *:8080
57 | mode http
58 | option forwardfor
59 | option httpclose
60 | stats enable
61 | stats show-legends
62 | stats refresh 15s
63 |
64 | #URL for HAProxy monitoring
65 | stats uri /stats
66 | stats realm Haproxy\ Statistics
67 |
68 | #User and Password for login to the monitoring dashboard
69 | stats auth {{ USER_HAPROXY }}:{{ PASS_FOR_HAPROXY }}
70 | stats admin if TRUE
71 |
72 | #This is optionally for monitoring backend
73 | default_backend hl-zabbix
74 |
75 | #---------------------------------------------------------------------
76 | # FrontEnd Configuration
77 | #---------------------------------------------------------------------
78 | frontend main
79 | bind *:80
80 | option http-server-close
81 | option forwardfor
82 | default_backend hl-zabbix
83 |
84 | #---------------------------------------------------------------------
85 | # BackEnd roundrobin as balance algorithm
86 | #---------------------------------------------------------------------
87 | backend hl-zabbix
88 | mode http
89 | balance roundrobin
90 | option httpclose
91 | option forwardfor
92 | cookie SERVERNAME insert indirect nocache
93 | server {{ HOST_NAME_WEB01 }}.{{ FAKE_DOMAIN }} {{ HOST_IP_WEB01 }}:8080 maxconn 5000 cookie s1 check
94 | server {{ HOST_NAME_WEB02 }}.{{ FAKE_DOMAIN }} {{ HOST_IP_WEB02 }}:8080 maxconn 5000 cookie s2 check
95 |
--------------------------------------------------------------------------------
/provisioning/roles/01_tuning_OS/01_tuning_OS/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install epel
3 | yum:
4 | name:
5 | - epel-release
6 |
7 | - name: install packages
8 | yum:
9 | name:
10 | - chrony
11 | - libselinux-python
12 | - vim
13 | - vim-enhanced
14 | - mc
15 | - screen
16 | - ccze
17 | # - lnav
18 | - redhat-lsb-core
19 | - wget
20 | - yum-utils
21 | - htop
22 | - sudo
23 | - iftop
24 | - net-tools
25 | - elinks
26 | - lynx
27 | - bind-utils
28 | - deltarpm
29 | - lsof
30 | - tree
31 | - traceroute
32 | - tcpdump
33 | - nmap
34 | - unzip
35 | # - iperf3
36 | - lbzip2
37 | - fuse-sshfs
38 | - bash-completion
39 | state: latest
40 | notify:
41 | - chronyd start and enable
42 |
43 | - name: copy .screenrc to root user
44 | copy:
45 | src: screenrc
46 | dest: /root/.screenrc
47 | owner: root
48 | group: root
49 | mode: '0600'
50 |
51 | - name: edit bashrc, vimrc
52 | shell:
53 | echo "alias vi='vim'" >> /root/.bashrc && echo "colorscheme desert" >> /etc/vimrc
54 |
55 | - name: set timezone
56 | timezone:
57 | name: "{{ TIMEZONE_OS }}"
58 |
59 | - name: put SELinux in permissive mode
60 | selinux:
61 | policy: targeted
62 | state: permissive
63 |
64 | - name: add mappings to /etc/hosts
65 | blockinfile:
66 | path: "{{ HOSTS_FILE }}"
67 | block: |
68 | {{ item.ip }} {{ item.name }}.{{ FAKE_DOMAIN }} {{ item.name }}
69 | marker: "# {mark} ANSIBLE MANAGED BLOCK {{ item.name }}"
70 | with_items:
71 | - { name: "{{ HOST_NAME_BALANCER_VIP }}", ip: "{{ HOST_IP_BALANCER_VIP }} "}
72 | - { name: "{{ HOST_NAME_BALANCER_01 }}", ip: "{{ HOST_IP_BALANCER_01 }} "}
73 | - { name: "{{ HOST_NAME_BALANCER_02 }}", ip: "{{ HOST_IP_BALANCER_02 }} "}
74 | #- { name: "{{ HOST_NAME_PG_HAPROXY }}", ip: "{{ HOST_IP_PG_HAPROXY }}" }
75 | - { name: "{{ HOST_NAME_PG_CON_POOL_VIP }}", ip: "{{ HOST_IP_PG_CON_POOL_VIP }}" }
76 | - { name: "{{ HOST_NAME_PG_CON_POOL_01 }}", ip: "{{ HOST_IP_PG_CON_POOL_01 }}" }
77 | - { name: "{{ HOST_NAME_PG_CON_POOL_02 }}", ip: "{{ HOST_IP_PG_CON_POOL_02 }}" }
78 | - { name: "{{ HOST_NAME_DCS_01 }}", ip: "{{ HOST_IP_DCS_01 }}" }
79 | - { name: "{{ HOST_NAME_DCS_02 }}", ip: "{{ HOST_IP_DCS_02 }}" }
80 | - { name: "{{ HOST_NAME_DCS_03 }}", ip: "{{ HOST_IP_DCS_03 }}" }
81 | - { name: "{{ HOST_NAME_PG01 }}", ip: "{{ HOST_IP_PG01 }}" }
82 | - { name: "{{ HOST_NAME_PG02 }}", ip: "{{ HOST_IP_PG02 }}" }
83 | - { name: "{{ HOST_NAME_PG03 }}", ip: "{{ HOST_IP_PG03 }}" }
84 | - { name: "{{ HOST_NAME_PG_VIP }}", ip: "{{ HOST_IP_PG_VIP }}" }
85 | #- { name: "{{ HOST_NAME_DCS }}", ip: "{{ HOST_IP_DCS }}" }
86 | - { name: "{{ HOST_NAME_WEB_VIP }}", ip: "{{ HOST_IP_WEB_VIP }}" }
87 | - { name: "{{ HOST_NAME_WEB01 }}", ip: "{{ HOST_IP_WEB01 }}" }
88 | - { name: "{{ HOST_NAME_WEB02 }}", ip: "{{ HOST_IP_WEB02 }}" }
89 | - { name: "{{ HOST_NAME_HL_CLIENT }}", ip: "{{ HOST_IP_HL_CLIENT }}" }
90 | tags:
91 | - update_hosts
92 |
93 | - name: firewalld disable
94 | service:
95 | name: firewalld
96 | state: stopped
97 | enabled: no
98 |
99 | - name: set the russian locale on database and zabbix servers
100 | shell: |
101 | localedef -i ru_RU -f UTF-8 ru_RU.UTF-8
102 | localectl set-locale LANG=ru_RU.UTF-8
103 | notify:
104 | - system restart
105 | # when: host in groups['database']
106 | when: ('hl-pg0' in ansible_hostname) or ('hl-zabbix0' in ansible_hostname)
107 |
--------------------------------------------------------------------------------
/tests/tank/files/03_db02_vacuum.md:
--------------------------------------------------------------------------------
1 | # VACUUM FULL
2 |
3 | При проведении дальнейших тестов в БД участились взаимные блокировки. В связи с этим, было выполнено сжатие раздутых таблиц и индексов и уменьшение физического размера файлов БД:
4 |
5 | ```bash
6 | psql -U postgres -h /tmp -d zabbix -c "VACUUM FULL VERBOSE ANALYZE;"
7 | psql -U postgres -h /tmp -d zabbix -c "REINDEX DATABASE zabbix;"
8 | ```
9 |
10 | То же самое можно проделать без блокировок (VACUUM FULL), например, с помощью утилиты [pgcompacttable](https://github.com/dataegret/pgcompacttable)
11 |
12 | ```bash
13 | psql -U postgres -h /tmp -d zabbix -c "create extension if not exists pgstattuple;"
14 | pgcompacttable -h /tmp --all --force --verbose
15 | ```
16 |
17 | ## До сжатия
18 |
19 | размер БД zabbix
20 |
21 | ```sql
22 | zabbix=# SELECT pg_size_pretty( pg_database_size( 'zabbix' ) );
23 | pg_size_pretty
24 | ----------------
25 | 1607 MB
26 | (1 строка)
27 | ```
28 |
29 | размер 20 самых больших таблиц
30 |
31 | ```sql
32 | SELECT nspname || '.' || relname AS "relation",
33 | pg_size_pretty(pg_total_relation_size(C.oid)) AS "total_size"
34 | FROM pg_class C
35 | LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
36 | WHERE nspname NOT IN ('pg_catalog', 'information_schema')
37 | AND C.relkind <> 'i'
38 | AND nspname !~ '^pg_toast'
39 | ORDER BY pg_total_relation_size(C.oid) DESC
40 | LIMIT 20;
41 | relation | total_size
42 | ---------------------------+------------
43 | public.history | 593 MB
44 | public.history_uint | 447 MB
45 | public.sessions | 238 MB
46 | public.auditlog | 208 MB
47 | public.trends_uint | 48 MB
48 | public.trends | 45 MB
49 | public.events | 2592 kB
50 | public.items | 2280 kB
51 | public.history_str | 1728 kB
52 | public.images | 1184 kB
53 | public.triggers | 672 kB
54 | public.items_applications | 576 kB
55 | public.history_text | 560 kB
56 | public.problem | 400 kB
57 | public.profiles | 400 kB
58 | public.functions | 376 kB
59 | public.event_recovery | 368 kB
60 | public.item_discovery | 304 kB
61 | public.graphs_items | 288 kB
62 | public.graphs | 272 kB
63 | (20 строк)
64 | ```
65 |
66 | ## После сжатия
67 |
68 | размер БД zabbix
69 |
70 | ```sql
71 | zabbix=# SELECT pg_size_pretty( pg_database_size( 'zabbix' ) );
72 | pg_size_pretty
73 | ----------------
74 | 1019 MB
75 | (1 строка)
76 | ```
77 |
78 | размер 20 самых больших таблиц
79 |
80 | ```sql
81 | SELECT nspname || '.' || relname AS "relation",
82 | pg_size_pretty(pg_total_relation_size(C.oid)) AS "total_size"
83 | FROM pg_class C
84 | LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
85 | WHERE nspname NOT IN ('pg_catalog', 'information_schema')
86 | AND C.relkind <> 'i'
87 | AND nspname !~ '^pg_toast'
88 | ORDER BY pg_total_relation_size(C.oid) DESC
89 | LIMIT 20;
90 | relation | total_size
91 | ---------------------------+------------
92 | public.history | 329 MB
93 | public.history_uint | 226 MB
94 | public.sessions | 194 MB
95 | public.auditlog | 188 MB
96 | public.trends_uint | 31 MB
97 | public.trends | 29 MB
98 | public.items | 1952 kB
99 | public.events | 1232 kB
100 | public.images | 1168 kB
101 | public.history_str | 992 kB
102 | public.triggers | 504 kB
103 | public.items_applications | 464 kB
104 | public.history_text | 336 kB
105 | public.functions | 312 kB
106 | public.graphs_items | 248 kB
107 | public.event_recovery | 240 kB
108 | public.graphs | 224 kB
109 | public.item_discovery | 216 kB
110 | public.hosts | 144 kB
111 | public.item_preproc | 136 kB
112 | (20 строк)
113 | ```
114 |
--------------------------------------------------------------------------------
/provisioning/roles/04_pgconpool/05_install_odyssey/templates/odyssey.conf.j2:
--------------------------------------------------------------------------------
1 | ###
2 | ### SERVICE
3 | ###
4 |
5 | #daemonize no
6 | #priority -10
7 | # pid_file "/var/run/odyssey.pid"
8 |
9 | unix_socket_dir "/tmp"
10 | unix_socket_mode "0644"
11 |
12 | ###
13 | ### LOGGING
14 | ###
15 |
16 | #log_file "/var/log/odyssey.log"
17 |
18 | log_format "%p %t %l [%i %s] [user - %u, db - %d] (%c) %m\n"
19 | log_config yes
20 | #log_debug yes
21 | #log_session yes
22 | #log_query yes
23 | #log_stats yes
24 | log_debug no
25 | log_session no
26 | log_query no
27 | log_stats no
28 |
29 | #stats_interval 60
30 | stats_interval 300
31 |
32 | ###
33 | ### PERFORMANCE
34 | ###
35 |
36 | #workers 1
37 | #resolvers 1
38 | readahead 8192
39 | cache_coroutine 210
40 |
41 | nodelay yes
42 | keepalive 7200
43 |
44 | ###
45 | ### GLOBAL LIMITS
46 | ###
47 |
48 | #client_max 2000
49 | client_max_routing 32
50 |
51 | ###
52 | ### LISTEN
53 | ###
54 |
55 | listen {
56 | tls "disable"
57 | host "*"
58 | # port 6432
59 | port 5000
60 | }
61 |
62 | ###
63 | ### ROUTING
64 | ###
65 |
66 | storage "local" {
67 | type "local"
68 | #tls "disable"
69 | }
70 |
71 | database "console" {
72 | user "odyssey" {
73 | #authentication "none"
74 | authentication "md5"
75 | password "{{ PASS_ODYSSEY_USER }}"
76 | pool "session"
77 | storage "local"
78 | }
79 | }
80 |
81 | storage "postgres_server" {
82 | type "remote"
83 | tls "disable"
84 | host "{{ HOST_NAME_PG_VIP }}.{{ FAKE_DOMAIN }}"
85 | port 5432
86 | }
87 |
88 | database "postgres" {
89 | user "postgres" {
90 | authentication "md5"
91 | password "{{ PASS_POSTGRES_FOR_DB }}"
92 | storage "postgres_server"
93 | storage_user "postgres"
94 | storage_password "{{ PASS_POSTGRES_FOR_DB }}"
95 | pool "session"
96 | client_max 4
97 | pool_size 0
98 | pool_timeout 0
99 | pool_ttl 60
100 | pool_cancel yes
101 | pool_discard yes
102 | pool_rollback yes
103 | client_fwd_error yes
104 | log_debug no
105 | }
106 | }
107 |
108 | database "template1" {
109 | user "postgres" {
110 | authentication "md5"
111 | password "{{ PASS_POSTGRES_FOR_DB }}"
112 | storage "postgres_server"
113 | storage_user "postgres"
114 | storage_password "{{ PASS_POSTGRES_FOR_DB }}"
115 | pool "session"
116 | client_max 4
117 | pool_size 0
118 | pool_timeout 0
119 | pool_ttl 60
120 | pool_cancel yes
121 | pool_discard yes
122 | pool_rollback yes
123 | client_fwd_error yes
124 | log_debug no
125 | }
126 | }
127 |
128 | database "zabbix" {
129 | user "postgres" {
130 | authentication "md5"
131 | password "{{ PASS_POSTGRES_FOR_DB }}"
132 | storage "postgres_server"
133 | storage_user "postgres"
134 | storage_db "postgres"
135 | storage_password "{{ PASS_POSTGRES_FOR_DB }}"
136 | pool "session"
137 | client_max 10
138 | pool_size 0
139 | pool_timeout 0
140 | pool_ttl 60
141 | pool_cancel yes
142 | #pool_discard no
143 | pool_discard yes
144 | pool_rollback yes
145 | client_fwd_error yes
146 | log_debug no
147 | }
148 | user "zabbix" {
149 | authentication "md5"
150 | password "{{ PASS_ZAB_FOR_DB }}"
151 | storage "postgres_server"
152 | storage_user "zabbix"
153 | storage_password "{{ PASS_ZAB_FOR_DB }}"
154 | #pool "transaction"
155 | pool "session"
156 | client_max 2000
157 | pool_size 185
158 | pool_timeout 0
159 | pool_ttl 60
160 | pool_cancel yes
161 | #pool_discard no
162 | pool_discard yes
163 | pool_rollback yes
164 | client_fwd_error yes
165 | log_debug no
166 | }
167 | }
168 |
--------------------------------------------------------------------------------
/provisioning_proxmox/roles/04_pgconpool/05_install_odyssey/templates/odyssey.conf.j2:
--------------------------------------------------------------------------------
1 | ###
2 | ### SERVICE
3 | ###
4 |
5 | #daemonize no
6 | #priority -10
7 | # pid_file "/var/run/odyssey.pid"
8 |
9 | unix_socket_dir "/tmp"
10 | unix_socket_mode "0644"
11 |
12 | ###
13 | ### LOGGING
14 | ###
15 |
16 | #log_file "/var/log/odyssey.log"
17 |
18 | log_format "%p %t %l [%i %s] [user - %u, db - %d] (%c) %m\n"
19 | log_config yes
20 | #log_debug yes
21 | #log_session yes
22 | #log_query yes
23 | #log_stats yes
24 | log_debug no
25 | log_session no
26 | log_query no
27 | log_stats no
28 |
29 | #stats_interval 60
30 | stats_interval 300
31 |
32 | ###
33 | ### PERFORMANCE
34 | ###
35 |
36 | #workers 1
37 | #resolvers 1
38 | readahead 8192
39 | cache_coroutine 210
40 |
41 | nodelay yes
42 | keepalive 7200
43 |
44 | ###
45 | ### GLOBAL LIMITS
46 | ###
47 |
48 | #client_max 2000
49 | client_max_routing 32
50 |
51 | ###
52 | ### LISTEN
53 | ###
54 |
55 | listen {
56 | tls "disable"
57 | host "*"
58 | # port 6432
59 | port 5000
60 | }
61 |
62 | ###
63 | ### ROUTING
64 | ###
65 |
66 | storage "local" {
67 | type "local"
68 | #tls "disable"
69 | }
70 |
71 | database "console" {
72 | user "odyssey" {
73 | #authentication "none"
74 | authentication "md5"
75 | password "{{ PASS_ODYSSEY_USER }}"
76 | pool "session"
77 | storage "local"
78 | }
79 | }
80 |
81 | storage "postgres_server" {
82 | type "remote"
83 | tls "disable"
84 | host "{{ HOST_NAME_PG_VIP }}.{{ FAKE_DOMAIN }}"
85 | port 5432
86 | }
87 |
88 | database "postgres" {
89 | user "postgres" {
90 | authentication "md5"
91 | password "{{ PASS_POSTGRES_FOR_DB }}"
92 | storage "postgres_server"
93 | storage_user "postgres"
94 | storage_password "{{ PASS_POSTGRES_FOR_DB }}"
95 | pool "session"
96 | client_max 4
97 | pool_size 0
98 | pool_timeout 0
99 | pool_ttl 60
100 | pool_cancel yes
101 | pool_discard yes
102 | pool_rollback yes
103 | client_fwd_error yes
104 | log_debug no
105 | }
106 | }
107 |
108 | database "template1" {
109 | user "postgres" {
110 | authentication "md5"
111 | password "{{ PASS_POSTGRES_FOR_DB }}"
112 | storage "postgres_server"
113 | storage_user "postgres"
114 | storage_password "{{ PASS_POSTGRES_FOR_DB }}"
115 | pool "session"
116 | client_max 4
117 | pool_size 0
118 | pool_timeout 0
119 | pool_ttl 60
120 | pool_cancel yes
121 | pool_discard yes
122 | pool_rollback yes
123 | client_fwd_error yes
124 | log_debug no
125 | }
126 | }
127 |
128 | database "zabbix" {
129 | user "postgres" {
130 | authentication "md5"
131 | password "{{ PASS_POSTGRES_FOR_DB }}"
132 | storage "postgres_server"
133 | storage_user "postgres"
134 | storage_db "postgres"
135 | storage_password "{{ PASS_POSTGRES_FOR_DB }}"
136 | pool "session"
137 | client_max 10
138 | pool_size 0
139 | pool_timeout 0
140 | pool_ttl 60
141 | pool_cancel yes
142 | #pool_discard no
143 | pool_discard yes
144 | pool_rollback yes
145 | client_fwd_error yes
146 | log_debug no
147 | }
148 | user "zabbix" {
149 | authentication "md5"
150 | password "{{ PASS_ZAB_FOR_DB }}"
151 | storage "postgres_server"
152 | storage_user "zabbix"
153 | storage_password "{{ PASS_ZAB_FOR_DB }}"
154 | #pool "transaction"
155 | pool "session"
156 | client_max 2000
157 | pool_size 185
158 | pool_timeout 0
159 | pool_ttl 60
160 | pool_cancel yes
161 | #pool_discard no
162 | pool_discard yes
163 | pool_rollback yes
164 | client_fwd_error yes
165 | log_debug no
166 | }
167 | }
168 |
--------------------------------------------------------------------------------
/provisioning/roles/06_pgsql-patroni/03_pgsql_optimization/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: setting optimized postgresql parameters and restart the patroni current node
3 | shell: |
4 | patronictl -c /etc/patroni.yml edit-config \
5 | --pg max_connections=200 \
6 | --pg shared_buffers=128MB \
7 | --pg effective_cache_size=384MB \
8 | --pg maintenance_work_mem=32MB \
9 | --pg checkpoint_completion_target=0.9 \
10 | --pg wal_buffers=3932kB \
11 | --pg default_statistics_target=100 \
12 | --pg random_page_cost=4 \
13 | --pg effective_io_concurrency=2 \
14 | --pg work_mem=327kB \
15 | --pg min_wal_size=2GB \
16 | --pg max_wal_size=4GB \
17 | --pg checkpoint_timeout=5min \
18 | --pg synchronous_commit=off \
19 | --pg wal_compression=on \
20 | --force
21 | sleep 5
22 | patronictl -c /etc/patroni.yml restart {{ FAKE_DOMAIN }} {{ ansible_hostname }} --force
23 | tags: patroni_edit-config
24 |
25 | - name: waiting until postgresql is up and running
26 | wait_for:
27 | port: 5432
28 | host: "{{ ansible_hostname }}"
29 | delay: 10
30 | timeout: 120
31 |
32 | - name: patroni stop
33 | systemd:
34 | name: patroni
35 | state: stopped
36 |
37 | - name: creates an entry "disabling transparent_hugepage on reboot" in the cron
38 | cron:
39 | name: "disabling transparent_hugepage on reboot"
40 | special_time: reboot
41 | job: "/usr/bin/echo never > /sys/kernel/mm/transparent_hugepage/enabled"
42 |
43 | - name: creates an entry "disabling defrag transparent_hugepage on reboot" in the cron
44 | cron:
45 | name: "disabling defrag transparent_hugepage on reboot"
46 | special_time: reboot
47 | job: "/usr/bin/echo never > /sys/kernel/mm/transparent_hugepage/defrag"
48 |
49 | - name: disabling transparent huge pages
50 | shell: |
51 | echo never > /sys/kernel/mm/transparent_hugepage/enabled
52 | echo never > /sys/kernel/mm/transparent_hugepage/defrag
53 |
54 | - name: get uid postgres user
55 | shell: id -u postgres
56 | register: uid_postgres
57 |
58 | #- name: debug registered var
59 | # debug: var=uid_postgres.stdout
60 | #- name: test shell for registered var
61 | # shell: echo {{ uid_postgres.stdout }} > /tmp/testfie
62 |
63 | - name: get gid postgres user
64 | shell: id -g postgres
65 | register: gid_postgres
66 |
67 | - name: edit fstab - mounting the pg_stat_tmp directory in RAM
68 | mount:
69 | path: /var/lib/pgsql/11/data/pg_stat_tmp
70 | src: tmpfs
71 | fstype: tmpfs
72 | opts: noatime,nodiratime,defaults,size=16M,mode=700,uid={{ uid_postgres.stdout }},gid={{ gid_postgres.stdout }}
73 | state: mounted
74 |
75 | - name: edit fstab - add noatime mount option to root partition
76 | lineinfile:
77 | path: /etc/fstab
78 | backup: yes
79 | backrefs: yes
80 | regexp: '^(UUID=[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}\s+\/\s+xfs\s+)(defaults)(\s+0\s+0)$'
81 | #regexp: '^(\/dev\/mapper\/centos-root\s+\/\s+xfs\s+)(defaults)(\s+0\s+0)$'
82 | line: '\1\2,noatime\3'
83 | register: fstab
84 | tags: update_fstab_to_pg0X
85 |
86 | #- name: if /etc/fstab changed, remount root partition
87 | # shell: mount / -v -o remount
88 | # when: fstab.changed
89 | # tags: update_fstab_to_pg0X
90 |
91 | - name: if /etc/fstab changed, remount root partition
92 | mount:
93 | path: /
94 | state: remounted
95 | when: fstab.changed
96 | tags: update_fstab_to_pg0X
97 |
98 | - name: create /etc/sysctl.d/30-postgresql.conf
99 | file:
100 | path: /etc/sysctl.d/30-postgresql.conf
101 | owner: root
102 | group: root
103 | mode: '0644'
104 | state: touch
105 |
106 | - name: edit /etc/sysctl.d/30-postgresql.conf
107 | sysctl:
108 | name: "{{ item.key }}"
109 | value: "{{ item.value }}"
110 | sysctl_file: /etc/sysctl.d/30-postgresql.conf
111 | sysctl_set: yes
112 | state: present
113 | reload: yes
114 | with_items:
115 | - { key: "vm.nr_hugepages", value: "196" }
116 | - { key: "vm.hugetlb_shm_group", value: "{{ gid_postgres.stdout }}" }
117 | - { key: "kernel.sched_migration_cost_ns", value: "5000000" }
118 |
119 | - name: patroni start
120 | systemd:
121 | name: patroni
122 | state: started
123 |
--------------------------------------------------------------------------------