├── .gitignore ├── README.md ├── hw01 ├── .config ├── README.md └── yum.log ├── hw02-1 ├── README.md └── Vagrantfile ├── hw02 ├── README.md ├── Vagrantfile ├── mdadm.conf └── scripts │ └── create-raid ├── hw03 ├── README.md ├── Vagrantfile ├── dracut │ └── module.d │ │ ├── 91resizeroot │ │ ├── module-setup.sh │ │ └── resizeroot-local.sh │ │ └── 91resizerootxfs │ │ ├── module-setup.sh │ │ └── resizerootxfs-local.sh ├── lvm_practice.txt └── scripts │ ├── add-dracut-module │ └── create-raid ├── hw04 ├── README.md └── Vagrantfile ├── hw05 ├── README.md ├── logs │ ├── access.log │ └── error.log └── web_log_email_notify.sh ├── hw06 ├── README.md ├── Vagrantfile ├── httpd@.service ├── jira ├── jira.png ├── jira.service ├── monitor-timer.service ├── monitor-timer.target ├── monitor-timer.timer ├── phpvbox.png ├── spawn-fcgi └── spawn-fcgi.service ├── hw07 ├── README.md └── holidays_acct ├── hw08 ├── README.md ├── Vagrantfile ├── docker-compose.yml ├── nginx.spec ├── pic01.png ├── pic02.png ├── pic03.png ├── pic04.png ├── prometheus.yml └── stat │ ├── Dockerfile │ ├── default.conf │ ├── nginx.conf │ └── vts.repo ├── hw09 ├── README.md ├── iotest.sh ├── pic1.png ├── pic2.png └── prcss.sh ├── hw10 ├── README.md ├── Vagrantfile ├── add_local_user.yml ├── ansible │ ├── ansible.cfg │ ├── group_vars │ │ ├── ipa_clients.yml │ │ └── ipa_master.yml │ ├── hosts.txt │ ├── ipa-cli-role.yml │ ├── ipa-srv-role.yml │ ├── ipa_clients_install.yml │ ├── ipa_server_install.yml │ ├── ping.yml │ └── roles │ │ ├── add_epel_repo │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ │ ├── deploy_ipa_client │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ │ └── deploy_ipa_server │ │ ├── README.md │ │ ├── defaults │ │ └── main.yml │ │ └── tasks │ │ └── main.yml ├── img │ ├── pic01.png │ └── pic02.png └── ipa-srv-cli-cmd.sh ├── hw11 ├── README.md ├── ansible.cfg ├── deploy-nginx.yml ├── hosts.txt ├── img │ ├── pic01.png │ ├── pic02.png │ └── pic03.png └── roles │ ├── add_local_repo │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── vts.repo │ └── tasks │ │ └── main.yml │ └── deploy_nginx_server │ ├── README.md │ ├── defaults │ └── main.yml │ ├── files │ └── nginx.service │ ├── handlers │ └── main.yml │ ├── tasks │ └── main.yml │ └── templates │ ├── default.conf.j2 │ ├── index.html.j2 │ └── nginx.conf.j2 ├── hw12 ├── README.md ├── Vagrantfile ├── add_local_user.yml ├── ansible.cfg ├── hosts.txt ├── roles │ ├── add_repos │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── deploy_monitoring_tools │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── setup_atop_service │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── atop.j2 │ ├── tune_kernel │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── sysctl.conf.j2 │ └── upgrade_kernel │ │ ├── README.md │ │ ├── defaults │ │ └── main.yml │ │ └── tasks │ │ └── main.yml └── setup-new-host.yml ├── hw13 ├── README.md ├── Vagrantfile ├── add_local_user.yml ├── ansible.cfg ├── hosts.txt ├── pic │ ├── pic01.png │ ├── pic02.png │ └── pic03.png ├── roles │ ├── add_bacula │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── bacula-dir.conf.j2 │ │ │ ├── bacula-sd.conf.j2 │ │ │ ├── bconsole.conf.j2 │ │ │ └── pgpass.j2 │ ├── add_bacula_client │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── bacula-fd.conf.j2 │ ├── add_pgsql11 │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── pg_hba.conf.j2 │ └── add_repos │ │ ├── README.md │ │ ├── defaults │ │ └── main.yml │ │ └── tasks │ │ └── main.yml ├── setup-bacula-client.yml └── setup-bacula-server.yml ├── hw14 ├── README.md ├── Vagrantfile ├── add_local_user.yml ├── ansible.cfg ├── hosts.txt ├── pic │ └── pic01.png └── roles │ ├── conf_audit_client │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── au-remote.conf.j2 │ │ ├── audisp-remote.conf.j2 │ │ ├── audispd.conf.j2 │ │ ├── audit.rules.j2 │ │ └── auditd.conf.j2 │ ├── conf_audit_client_syslog │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── audit.rules.j2 │ │ ├── auditd.conf.j2 │ │ ├── syslog.conf.j2 │ │ └── syslog.plugin.conf.j2 │ ├── conf_audit_server │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── auditd.conf.j2 │ ├── conf_rsyslog_client │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ ├── client-include-nginx.conf.j2 │ │ ├── nginx.conf.j2 │ │ └── rsyslog.conf.j2 │ ├── tests │ │ ├── inventory │ │ └── test.yml │ └── vars │ │ └── main.yml │ ├── conf_rsyslog_server │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── rsyslog.conf.j2 │ │ └── srv-include-nginx.conf.j2 │ └── deploy_elk_syslog │ ├── README.md │ ├── defaults │ └── main.yml │ ├── handlers │ └── main.yml │ └── tasks │ └── main.yml ├── hw15 ├── README.md ├── Riemann │ ├── dash │ │ ├── dash.json │ │ └── dash.rb │ ├── default.conf │ └── riemann.conf ├── Vagrantfile ├── add_local_user.yml ├── ansible.cfg ├── hosts.txt ├── pic │ ├── pic01.png │ ├── pic02.png │ ├── pic03.png │ ├── pic04.png │ └── pic05.png ├── roles │ ├── add_grafana │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── grafana.ini.j2 │ ├── add_nginx_server │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ ├── files │ │ │ └── nginx.service │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── default.conf.j2 │ │ │ ├── index.html.j2 │ │ │ └── nginx.conf.j2 │ ├── add_node_exporter │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── install.yml │ │ │ └── main.yml │ │ └── templates │ │ │ └── node_exporter.service.j2 │ ├── add_prometheus │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── configure.yml │ │ │ ├── install.yml │ │ │ └── main.yml │ │ └── templates │ │ │ ├── alert.rules.j2 │ │ │ ├── prometheus.service.j2 │ │ │ └── prometheus.yml.j2 │ └── add_repos │ │ ├── README.md │ │ ├── defaults │ │ └── main.yml │ │ └── tasks │ │ └── main.yml ├── setup-node-exp-client.yml └── setup-prom-server.yml ├── hw16 ├── README.md ├── Vagrantfile ├── add_local_user.yml ├── network.yml └── pic │ ├── pic01.png │ ├── pic02.png │ ├── pic03.png │ ├── pic04.png │ ├── pic05.png │ ├── pic06.png │ ├── pic07.png │ ├── pic08.png │ ├── pic09.png │ ├── pic10.png │ ├── pic11.png │ └── pic12.png ├── hw17 ├── README.md ├── docker-compose │ ├── code │ │ └── index.php │ ├── default-php.conf │ └── docker-compose.yml ├── docker │ ├── nginx │ │ ├── Dockerfile │ │ ├── default.conf │ │ └── nginx.conf │ └── php │ │ └── Dockerfile └── pic │ └── pic01.png ├── hw18 ├── README.md ├── Vagrantfile ├── add_local_user.yml ├── network-nmcli.yml ├── network.yml └── pic │ ├── pic01.gif │ └── pic02.gif ├── hw19 ├── README.md ├── Vagrantfile ├── add_local_user.yml ├── etc │ ├── daemons │ └── hosts ├── network-nmcli.yml ├── network.yml ├── pic │ ├── pic01.png │ ├── pic02.png │ └── pic03.png ├── router1 │ ├── ospfd.conf │ └── zebra.conf ├── router2 │ ├── ospfd.conf │ └── zebra.conf └── router3 │ ├── ospfd.conf │ └── zebra.conf ├── hw20 ├── README.md ├── Vagrantfile ├── ca │ ├── 5in1 │ ├── client.conf │ ├── client.tmpl │ ├── server.conf │ └── vars ├── etc │ ├── daemons │ └── hosts ├── fetch.yml ├── pic │ ├── pic01.png │ ├── pic02.png │ └── pic03.png ├── router1 │ ├── ospfd.conf │ └── zebra.conf ├── router2 │ ├── ospfd.conf │ └── zebra.conf ├── router3 │ ├── ospfd.conf │ └── zebra.conf ├── secret │ └── static.key ├── tap │ ├── client.conf │ └── server.conf └── tun │ ├── client.conf │ └── server.conf ├── hw21 ├── README.md ├── Vagrantfile └── provisioning │ ├── client-motd │ ├── client-resolv.conf │ ├── master-named.conf │ ├── named.client.dns.lab │ ├── named.client.dns.lab.rev │ ├── named.ddns.lab │ ├── named.dns.lab │ ├── named.dns.lab.rev │ ├── named.newdns.lab │ ├── named.zonetransfer.key │ ├── playbook.yml │ ├── rndc.conf │ ├── servers-resolv.conf │ ├── slave-named.conf │ └── zonetransfer.key ├── hw22 ├── README.md ├── Vagrantfile └── pic │ ├── pic01.png │ ├── pic02.png │ └── pic03.png ├── hw23 ├── README.md ├── default.conf ├── nginx.conf └── pic │ ├── pic01.png │ └── pic02.png ├── hw24 ├── Docker │ ├── .env │ ├── docker-compose.yml │ ├── js │ │ └── cluster-setup.js │ ├── router.env │ └── shell-entrypoint.sh ├── README.md ├── Swarm │ ├── Vagrantfile │ ├── docker-ce.repo │ ├── playbook.retry │ ├── playbook.yml │ ├── swdocker │ │ ├── mysql-cluster.yml │ │ └── port.yml │ └── worker_token.yml ├── Vagrantfile ├── pic │ ├── pic01.png │ ├── pic02.png │ ├── pic03.png │ ├── pic04.png │ └── pic05.png └── provisioning │ ├── dns-server.yml │ ├── etc │ ├── hosts │ ├── named.conf │ ├── named.otus.test │ ├── named.otus.test.rev │ ├── named.zonetransfer.key │ └── resolv.conf │ ├── mysql-router │ └── cluster-setup.js │ ├── mysql-server │ ├── .my.cnf │ └── mysqld.te │ ├── playbook.retry │ ├── playbook.yml │ ├── selinux.yml │ └── vars.yml ├── hw25 ├── README.md ├── Vagrantfile ├── pic │ ├── pic01.png │ └── pic02.png └── provisioning │ ├── dns-server.yml │ ├── dump │ └── db.sql │ ├── etc │ ├── NetworkManager.conf │ ├── hosts │ ├── named.conf │ ├── named.otus.test │ ├── named.otus.test.rev │ ├── named.zonetransfer.key │ └── resolv.conf │ ├── mysql-server │ ├── .my.cnf │ ├── master.my.cnf │ ├── mysqld.te │ └── slave.my.cnf │ ├── playbook.retry │ ├── playbook.yml │ ├── selinux.yml │ └── vars.yml ├── hw26 ├── README.md ├── Vagrantfile ├── pic │ ├── pic01.png │ └── pic02.png └── provision │ ├── locale.conf │ ├── playbook.retry │ ├── playbook.yml │ └── roles │ ├── dns │ ├── README.md │ ├── files │ │ ├── NetworkManager.conf │ │ ├── named.conf │ │ ├── named.otus.test │ │ ├── named.otus.test.rev │ │ ├── named.zonetransfer.key │ │ └── resolv.conf │ └── tasks │ │ └── main.yml │ ├── firewall │ ├── README.md │ ├── files │ │ └── kerberos.xml │ └── tasks │ │ └── main.yml │ ├── kerberos-client │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── krb.conf │ └── tasks │ │ └── main.yml │ ├── kerberos │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ ├── kadm.acl │ │ ├── kdc.conf │ │ └── krb.conf │ └── tasks │ │ └── main.yml │ ├── nfs4-client │ ├── README.md │ ├── files │ │ ├── auto.master │ │ └── auto.nfs │ └── tasks │ │ └── main.yml │ ├── nfs4 │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── exports │ └── tasks │ │ └── main.yml │ ├── ntp-client │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── chrony.conf │ └── tasks │ │ └── main.yml │ ├── ntp │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── chrony.conf │ └── tasks │ │ └── main.yml │ └── samba │ ├── README.md │ ├── defaults │ └── main.yml │ ├── files │ └── smb.conf │ └── tasks │ └── main.yml ├── hw27 ├── README.md ├── Vagrantfile ├── pic │ ├── pic01.png │ ├── pic02.png │ ├── pic03.png │ └── pic04.png └── provision │ ├── locale.conf │ ├── playbook.yml │ └── roles │ ├── dns │ ├── README.md │ ├── files │ │ ├── NetworkManager.conf │ │ ├── named.conf │ │ ├── named.otus.test │ │ ├── named.otus.test.rev │ │ ├── named.zonetransfer.key │ │ └── resolv.conf │ └── tasks │ │ └── main.yml │ ├── dovecot │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ ├── .pinerc │ │ ├── 10-auth.conf │ │ ├── 10-mail.conf │ │ ├── 10-ssl.conf │ │ └── dovecot.conf │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── users.j2 │ ├── ntp-client │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── chrony.conf │ └── tasks │ │ └── main.yml │ ├── ntp │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── chrony.conf │ └── tasks │ │ └── main.yml │ └── postfix │ ├── README.md │ ├── defaults │ └── main.yml │ ├── files │ ├── main.cf │ ├── opendkim.conf │ └── opendkim │ │ ├── KeyTable │ │ ├── SigningTable │ │ ├── TrustedHosts │ │ └── keys │ │ └── otus.test.private │ └── tasks │ └── main.yml ├── hw28 ├── .smbdeleteAAA805c9 ├── README.md ├── Vagrantfile ├── pic │ ├── Thumbs.db │ ├── pic01.png │ ├── pic02.png │ ├── pic03.png │ ├── pic04.png │ ├── pic05-1.png │ ├── pic05-2.png │ └── pic06.png └── provision │ ├── environment │ ├── locale.conf │ ├── playbook.retry │ ├── playbook.yml │ └── roles │ ├── dns │ ├── README.md │ ├── files │ │ ├── NetworkManager.conf │ │ ├── named.conf │ │ ├── named.otus.test │ │ ├── named.otus.test.rev │ │ ├── named.zonetransfer.key │ │ └── resolv.conf │ └── tasks │ │ └── main.yml │ ├── kerberos-client │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── krb.conf │ └── tasks │ │ └── main.yml │ ├── kerberos │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ ├── kadm.acl │ │ ├── kdc.conf │ │ └── krb.conf │ └── tasks │ │ └── main.yml │ ├── nfs4-client │ ├── README.md │ ├── files │ │ ├── auto.master │ │ └── auto.nfs │ └── tasks │ │ └── main.yml │ ├── nfs4 │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── exports │ └── tasks │ │ └── main.yml │ ├── ntp-client │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── chrony.conf │ └── tasks │ │ └── main.yml │ ├── ntp │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── chrony.conf │ └── tasks │ │ └── main.yml │ ├── pgsql11-primary │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── pg_hba.conf.j2 │ │ ├── pg_ident.conf.j2 │ │ ├── pgbackrest.conf.j2 │ │ └── postgresql.primary.conf.j2 │ └── pgsql11-standby │ ├── README.md │ ├── defaults │ └── main.yml │ ├── handlers │ └── main.yml │ ├── tasks │ └── main.yml │ └── templates │ ├── pgbackrest.conf.j2 │ ├── postgresql.standby.conf.j2 │ └── recovery.conf.j2 ├── hw29 ├── README.md ├── Vagrantfile ├── pic │ ├── pic01.png │ ├── pic02.png │ └── pic03.png └── provision │ ├── environment │ ├── locale.conf │ ├── playbook.yml │ └── roles │ ├── dns │ ├── README.md │ ├── files │ │ ├── NetworkManager.conf │ │ ├── named.conf │ │ ├── named.otus.test │ │ ├── named.otus.test.rev │ │ ├── named.zonetransfer.key │ │ └── resolv.conf │ └── tasks │ │ └── main.yml │ ├── etcd │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── etcd.conf.j2 │ ├── haproxy │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── haproxy.cfg.j2 │ │ └── haproxy.conf.j2 │ ├── ntp-client │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── chrony.conf │ └── tasks │ │ └── main.yml │ ├── ntp │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── chrony.conf │ └── tasks │ │ └── main.yml │ ├── patroni │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── patroni.service.j2 │ │ └── patroni.yml.j2 │ └── pgsql11 │ ├── README.md │ ├── defaults │ └── main.yml │ ├── handlers │ └── main.yml │ ├── tasks │ └── main.yml │ └── templates │ ├── pg_hba.conf.j2 │ ├── pg_ident.conf.j2 │ ├── pgbackrest.conf.j2 │ └── postgresql.primary.conf.j2 └── project ├── README.md ├── Vagrantfile ├── docs ├── Erik_Nordtrom_GrafanaCon_EU_2018.pdf ├── LizardFS.pdf ├── grafana.sql └── whitepaper_lizard_v3.12_web.pdf ├── pic ├── pic_cloud01.png ├── pic_db01.png ├── pic_grafana01.png ├── pic_grafana02.png ├── pic_lizard01.png ├── pic_lizard02.png ├── pic_lizard03.png ├── pic_lizard04.png ├── pic_monitoring01.png ├── pic_monitoring02.png ├── pic_monitoring03.png ├── pic_monitoring04.jpg ├── pic_stand01.png ├── pic_stand02.png ├── pic_stand03.png └── pic_stand04.png ├── provision ├── environment ├── locale.conf ├── playbook.yml └── roles │ ├── audit-client │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── au-remote.conf.j2 │ │ ├── audisp-remote.conf.j2 │ │ ├── audispd.conf.j2 │ │ ├── audit.rules.j2 │ │ └── auditd.conf.j2 │ ├── audit-server │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── auditd.conf.j2 │ ├── autofs │ ├── README.md │ ├── files │ │ ├── auto.data │ │ └── auto.master │ └── tasks │ │ └── main.yml │ ├── dns │ ├── README.md │ ├── files │ │ ├── NetworkManager.conf │ │ ├── named.conf │ │ ├── named.otus.test │ │ ├── named.otus.test.rev │ │ ├── named.zonetransfer.key │ │ └── resolv.conf │ └── tasks │ │ └── main.yml │ ├── etcd │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── etcd.conf.j2 │ ├── grafana │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ ├── dashboard.json │ │ └── sample_querys.txt │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── grafana.ini.j2 │ ├── haproxy │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── haproxy.cfg.j2 │ │ └── haproxy.conf.j2 │ ├── kerberos-client │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── krb.conf │ └── tasks │ │ └── main.yml │ ├── kerberos │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ ├── kadm.acl │ │ ├── kdc.conf │ │ └── krb.conf │ └── tasks │ │ └── main.yml │ ├── lizard-client │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── mount.mfs │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── mfsmount.cfg.j2 │ ├── lizard-master │ ├── defaults │ │ └── main.yml │ ├── files │ │ ├── lizardfs-uraft.service │ │ ├── metadata.nfs │ │ ├── mfsexports.cfg │ │ ├── mfsgoals.cfg │ │ └── mfstopology.cfg │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── lizardfs-uraft.cfg.j2 │ │ └── mfsmaster.cfg.j2 │ ├── lizard-node │ ├── defaults │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── mfschunkserver.cfg.j2 │ │ ├── mfshdd.cfg.j2 │ │ └── mfsmetalogger.cfg.j2 │ ├── lvm2 │ ├── README.md │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml │ ├── netdata-central │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── netdata.conf.j2 │ │ └── stream.conf.j2 │ ├── netdata-client │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── netdata.conf.j2 │ │ └── stream.conf.j2 │ ├── nginx │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── nginx.service │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── default.conf.j2 │ │ ├── index.html.j2 │ │ └── nginx.conf.j2 │ ├── ntp-client │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── chrony.conf │ └── tasks │ │ └── main.yml │ ├── ntp │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── chrony.conf │ └── tasks │ │ └── main.yml │ ├── patroni │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── patroni.service.j2 │ │ └── patroni.yml.j2 │ ├── pgbackrest │ ├── README.md │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── pgbackrest.conf.j2 │ ├── pgsql11 │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── pg_hba.conf.j2 │ │ ├── pg_ident.conf.j2 │ │ ├── pgbackrest.conf.j2 │ │ └── postgresql.primary.conf.j2 │ ├── prometheus │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ ├── configure.yml │ │ ├── install.yml │ │ └── main.yml │ └── templates │ │ ├── alert.rules.j2 │ │ ├── prometheus-postgresql-adapter.service.j2 │ │ ├── prometheus.service.bak │ │ ├── prometheus.service.j2 │ │ └── prometheus.yml.j2 │ └── timescaledb │ ├── README.md │ ├── defaults │ └── main.yml │ ├── files │ ├── install.sh │ ├── pg_prometheus.tar.gz │ └── timescale_timescaledb.repo │ └── tasks │ └── main.yml └── setup.sh /.gitignore: -------------------------------------------------------------------------------- 1 | # gitignore file 2 | .DS_Store 3 | *.vdi 4 | /.vagrant 5 | .gitignore 6 | -------------------------------------------------------------------------------- /hw02/mdadm.conf: -------------------------------------------------------------------------------- 1 | # mdadm config file 2 | 3 | ARRAY /dev/md/1 level=raid5 num-devices=4 metadata=1.2 name=otuslinux:1 UUID=bbc3a500:e6c0bd47:bf42b077:f7acdfd7 4 | ARRAY /dev/md/2 level=raid5 num-devices=4 metadata=1.2 name=otuslinux:2 UUID=07e17afc:44bcef46:3d936ae9:53ca7349 5 | ARRAY /dev/md/0 level=raid0 num-devices=2 metadata=1.2 name=otuslinux:0 UUID=be10e59d:3749e7e3:99fdb6bb:54c4db44 6 | 7 | -------------------------------------------------------------------------------- /hw02/scripts/create-raid: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | for d in /dev/sd{b,c,d,e,f,g,h,i}; do parted -s $d mktable gpt; done 3 | for d in /dev/sd{b,c,d,e,f,g,h,i}; do parted -s $d mkpart primary 0% 100%; done 4 | mdadm --create /dev/md1 -l 5 -n 4 /dev/sd{b,c,d,e}1 5 | mdadm --create /dev/md2 -l 5 -n 4 /dev/sd{f,g,h,i}1 6 | mdadm --create /dev/md0 -l 0 -n 2 /dev/md{1,2} 7 | parted -s /dev/md0 mktable gpt 8 | parted -s /dev/md0 mkpart primary ext4 0% 100% 9 | mkdir -p /raid 10 | mkfs.ext4 /dev/md0p1 11 | mount /dev/md0p1 /raid 12 | df -h /raid 13 | -------------------------------------------------------------------------------- /hw03/dracut/module.d/91resizeroot/module-setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | check() { 4 | return 0 5 | } 6 | 7 | depends() { 8 | return 0 9 | } 10 | 11 | install() { 12 | inst_hook pre-mount 00 "$moddir/resizeroot-local.sh" 13 | } 14 | -------------------------------------------------------------------------------- /hw03/dracut/module.d/91resizeroot/resizeroot-local.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | resize_local() 3 | { 4 | lvm lvreduce --config 'global {locking_type=1}' -y -r -L -12G /dev/vg0/lvm_root 5 | } 6 | resize_local 7 | -------------------------------------------------------------------------------- /hw03/dracut/module.d/91resizerootxfs/module-setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | check() { 4 | return 0 5 | } 6 | 7 | depends() { 8 | return 0 9 | } 10 | 11 | install() { 12 | inst_hook pre-mount 00 "$moddir/resizerootxfs-local.sh" 13 | } 14 | -------------------------------------------------------------------------------- /hw03/dracut/module.d/91resizerootxfs/resizerootxfs-local.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | resize_local() 3 | { 4 | mkdir /mnt 5 | lvm lvcreate --config 'global {locking_type=1}' -n temp_xfs -L 8G vg0 6 | mkfs.xfs -f /dev/vg0/temp_xfs 7 | mount /dev/vg0/temp_xfs /tmp 8 | mount /dev/vg0/lvm_root /mnt 9 | xfsdump -J - /mnt | xfsrestore -J - /tmp 10 | umount /tmp 11 | umount /mnt 12 | lvm lvreduce --config 'global {locking_type=1}' --yes -f -L -12G /dev/vg0/lvm_root 13 | mkfs.xfs -f -m uuid=1b615c91-2ba1-4877-a06d-fa5d14844aac /dev/vg0/lvm_root 14 | mount /dev/vg0/temp_xfs /tmp 15 | mount /dev/vg0/lvm_root /mnt 16 | xfsdump -J - /tmp | xfsrestore -J - /mnt 17 | umount /tmp 18 | umount /mnt 19 | lvm lvremove --config 'global {locking_type=1}' --yes -f /dev/vg0/temp_xfs 20 | } 21 | resize_local 22 | -------------------------------------------------------------------------------- /hw03/scripts/add-dracut-module: -------------------------------------------------------------------------------- 1 | mkdir /usr/lib/dracut/modules.d/91resizeroot 2 | touch /usr/lib/dracut/modules.d/91resizeroot/module-setup.sh 3 | touch /usr/lib/dracut/modules.d/91resizeroot/resizeroot-local.sh 4 | 5 | echo "#!/bin/bash check() { return 0 } depends() { return 0 } install() { inst_hook pre-mount 00 `$moddir/resizeroot-local.sh`}" > /usr/lib/dracut/modules.d/91resizeroot/module-setup.sh 6 | echo "#!/bin/sh resize_local() { lvm lvreduce --config 'global {locking_type=1}' -y -r -L -12G /dev/vg0/lvm_root } resize_local" > /usr/lib/dracut/modules.d/91resizeroot/resizeroot-local.sh 7 | 8 | mkdir /var/log/journal && chown :systemd-journal /var/log/journal 9 | systemctl restart systemd-journald 10 | -------------------------------------------------------------------------------- /hw06/httpd@.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Apache httpd server %I 3 | After=network.target remote-fs.target nss-lookup.target 4 | Documentation=man:httpd(8) 5 | Documentation=man:apachectl(8) 6 | 7 | [Service] 8 | Type=notify 9 | EnvironmentFile=/etc/sysconfig/httpd 10 | ExecStart=/usr/sbin/httpd $OPTIONS -f I% DFOREGROUND 11 | ExecReload=/usr/sbin/httpd $OPTIONS -k graceful 12 | ExecStop=/bin/kill -WINCH ${MAINPID} 13 | KillSignal=SIGCONT 14 | PrivateTmp=true 15 | 16 | [Install] 17 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /hw06/jira.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw06/jira.png -------------------------------------------------------------------------------- /hw06/jira.service: -------------------------------------------------------------------------------- 1 | 2 | [Unit] 3 | Description=Atlassian Jira 4 | After=network.target 5 | 6 | [Service] 7 | Type=simple 8 | User=root 9 | EnvironmentFile=/etc/sysconfig/jira 10 | ExecStart=/bin/java $OPTIONS_START 11 | ExecStop=/bin/java $OPTIONS_STOP 12 | KillMode=process 13 | 14 | [Install] 15 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /hw06/monitor-timer.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=SSH wrong username monitoring, run every 30 seconds 3 | Wants=monitor-timer.timer 4 | 5 | [Service] 6 | Type=notify 7 | EnvironmentFile=/etc/sysconfig/monitor 8 | ExecStart=/bin/bash -c '/bin/cat $LOGFILE | /bin/grep $WORD' 9 | 10 | [Install] 11 | WantedBy=monitor-timer.target -------------------------------------------------------------------------------- /hw06/monitor-timer.target: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=SSH monitor target 3 | StopWhenUnneeded=yes -------------------------------------------------------------------------------- /hw06/monitor-timer.timer: -------------------------------------------------------------------------------- 1 | 2 | [Unit] 3 | Description=SSH monitor timer 4 | 5 | [Timer] 6 | OnUnitActiveSec=30 7 | OnCalendar=*:*:30 8 | Unit=monitor-timer.target 9 | 10 | [Install] 11 | WantedBy=basic.target -------------------------------------------------------------------------------- /hw06/phpvbox.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw06/phpvbox.png -------------------------------------------------------------------------------- /hw06/spawn-fcgi: -------------------------------------------------------------------------------- 1 | # See spawn-fcgi(1) for all possible options. 2 | # 3 | # Example : 4 | SOCKET=/var/run/php-fcgi.sock 5 | OPTIONS="-u apache -g apache -s /var/run/php-fcgi.sock -S -M 0600 -C 4 -P /var/run/spawn-fcgi.pid -- /usr/bin/php-cgi" -------------------------------------------------------------------------------- /hw06/spawn-fcgi.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Start and stop FastCGI processes 3 | After=network.target httpd.service 4 | Wants=httpd.service 5 | 6 | [Service] 7 | Type=forking 8 | EnvironmentFile=/etc/sysconfig/spawn-fcgi 9 | ExecStart=/bin/spawn-fcgi $OPTIONS 10 | ExecStop=/bin/kill -TERM $MAINPID 11 | ExecReload=/bin/kill -HUP $MAINPID 12 | KillMode=process 13 | Restart=on-failure 14 | RestartSec=5s 15 | TimeoutStopSec=5s 16 | 17 | [Install] 18 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /hw08/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | nginx: 4 | image: kakoka/nginx:latest 5 | ports: 6 | - 8080:8080 7 | exporter: 8 | image: sophos/nginx-vts-exporter:latest 9 | ports: 10 | - 9913:9913 11 | environment: 12 | - "NGINX_STATUS=http://10.10.10.136:8080/status/format/json" 13 | prometheus: 14 | image: prom/prometheus 15 | volumes: 16 | - ./prometheus.yml:/etc/prometheus/prometheus.yml 17 | command: "--config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus" 18 | ports: 19 | - 9090:9090 20 | depends_on: 21 | - exporter 22 | grafana: 23 | image: grafana/grafana 24 | ports: 25 | - 3000:3000 26 | depends_on: 27 | - prometheus 28 | -------------------------------------------------------------------------------- /hw08/pic01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw08/pic01.png -------------------------------------------------------------------------------- /hw08/pic02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw08/pic02.png -------------------------------------------------------------------------------- /hw08/pic03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw08/pic03.png -------------------------------------------------------------------------------- /hw08/pic04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw08/pic04.png -------------------------------------------------------------------------------- /hw08/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 15s 3 | scrape_timeout: 10s 4 | evaluation_interval: 15s 5 | scrape_configs: 6 | - job_name: nginx 7 | scrape_interval: 15s 8 | scrape_timeout: 10s 9 | metrics_path: /metrics 10 | scheme: http 11 | static_configs: 12 | - targets: 13 | - 10.10.10.136:9913 14 | labels: 15 | server: simple 16 | organization: just_for_test 17 | -------------------------------------------------------------------------------- /hw08/stat/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:latest 2 | MAINTAINER Pavel Konotopov 3 | 4 | COPY vts.repo /etc/yum.repos.d 5 | 6 | RUN yum install -y nginx && yum clean all 7 | 8 | COPY nginx.conf /etc/nginx/nginx.conf 9 | COPY default.conf /etc/nginx/conf.d/default.conf 10 | 11 | WORKDIR /usr/share/nginx/html 12 | RUN echo "NGINX with vts on CentOS 7 inside Docker" > /usr/share/nginx/html/index.html 13 | EXPOSE 8080 14 | CMD ["nginx", "-g", "daemon off;"] 15 | -------------------------------------------------------------------------------- /hw08/stat/vts.repo: -------------------------------------------------------------------------------- 1 | [vts] 2 | name=local vts repo 3 | baseurl=http://10.10.10.136/localrepo 4 | gpgcheck=0 5 | enabled=1 6 | priority=1 7 | -------------------------------------------------------------------------------- /hw09/pic1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw09/pic1.png -------------------------------------------------------------------------------- /hw09/pic2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw09/pic2.png -------------------------------------------------------------------------------- /hw10/ansible/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | host_key_checking = false 3 | inventory = hosts.txt -------------------------------------------------------------------------------- /hw10/ansible/group_vars/ipa_clients.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ansible_user: kakoka 3 | ansible_ssh_private_key_file: /Users/kakoka/.ssh/id_rsa 4 | ipa_server_address: 192.168.11.150 5 | ipa_fqdn: master.homework.local 6 | ipa_domain: homework.local 7 | ipa_realm: HOMEWORK.LOCAL 8 | ipa_pkg: 9 | - ipa-client 10 | - bind-utils 11 | ipa_install_command: ipa-client-install -U 12 | ... -------------------------------------------------------------------------------- /hw10/ansible/group_vars/ipa_master.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ansible_user: kakoka 3 | ansible_ssh_private_key_file: /Users/kakoka/.ssh/id_rsa 4 | ipa_domain: homework.local 5 | ipa_realm: HOMEWORK.LOCAL 6 | ipa_pkg: 7 | - ipa-server 8 | - ipa-server-dns 9 | - bind-dyndb-ldap 10 | ipa_install_command: ipa-server-install -U 11 | ... -------------------------------------------------------------------------------- /hw10/ansible/hosts.txt: -------------------------------------------------------------------------------- 1 | [ipa_master] 2 | master.homework.local ansible_host=192.168.11.150 3 | 4 | [ipa_clients] 5 | node1.homework.local ansible_host=192.168.11.151 6 | node2.homework.local ansible_host=192.168.11.152 -------------------------------------------------------------------------------- /hw10/ansible/ipa-cli-role.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # IPA clients install 4 | # 5 | - name: Install IPA clients 6 | hosts: ipa_clients 7 | become: yes 8 | 9 | roles: 10 | - { role: add_epel_repo } 11 | - { role: deploy_ipa_client } 12 | ... -------------------------------------------------------------------------------- /hw10/ansible/ipa-srv-role.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # IPA server install. 4 | # 5 | - name: Install IPA clients 6 | hosts: ipa_master 7 | become: yes 8 | 9 | roles: 10 | - { role: add_epel_repo } 11 | - { role: deploy_ipa_server } 12 | ... -------------------------------------------------------------------------------- /hw10/ansible/ping.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Ping-pong 4 | # 5 | - name: test connection to my servers 6 | hosts: all 7 | become: yes 8 | 9 | tasks: 10 | - name: Ping all 11 | ping: 12 | ... -------------------------------------------------------------------------------- /hw10/ansible/roles/add_epel_repo/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Роль для добавления epel репозитория. 4 | 5 | #### 1. Requirements 6 | 7 | Centos/7. 8 | 9 | #### 2. Role Variables 10 | 11 | - epel_repo_url - url для доступа к epel репозиторию 12 | - epel_repo_gpg_key_url - url для доступа к ключу epel репозитория 13 | - epel_repofile_path - местоположение описания репозитария на локальной машине `/etc/yum.repos.d/epel.repo` 14 | 15 | #### 3. Example Playbook 16 | 17 | ``` 18 | - hosts: servers 19 | roles: 20 | - { role: add_epel_repo } 21 | ``` 22 | -------------------------------------------------------------------------------- /hw10/ansible/roles/add_epel_repo/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for add_epel_repo 3 | 4 | epel_repo_url: "https://dl.fedoraproject.org/pub/epel/epel-release-latest-{{ ansible_distribution_major_version }}.noarch.rpm" 5 | epel_repo_gpg_key_url: "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{{ ansible_distribution_major_version }}" 6 | epel_repofile_path: "/etc/yum.repos.d/epel.repo" 7 | ... -------------------------------------------------------------------------------- /hw10/ansible/roles/add_epel_repo/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for add_epel_repo 3 | 4 | - name: 1.Check if EPEL repo is already configured. 5 | stat: path={{ epel_repofile_path }} 6 | register: epel_repofile_result 7 | 8 | - name: 2.Install EPEL repo. 9 | yum: 10 | name: "{{ epel_repo_url }}" 11 | state: present 12 | register: result 13 | when: not epel_repofile_result.stat.exists 14 | 15 | - name: 3.Import EPEL GPG key. 16 | rpm_key: 17 | key: "{{ epel_repo_gpg_key_url }}" 18 | state: present 19 | when: not epel_repofile_result.stat.exists 20 | ... -------------------------------------------------------------------------------- /hw10/ansible/roles/deploy_ipa_client/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Роль для установки IPA-клиента. 4 | 5 | #### 1. Requirements 6 | 7 | Centos/7, необходимо наличие локального IPA-сервера. 8 | 9 | #### 2. Role Variables 10 | 11 | - ipa_server_address - адрес IPA-сервера `192.168.11.150` 12 | - ipa_fqdn - fqdn-имя `master.homework.local` 13 | - ipa_domain - имя домена `homework.local` 14 | - ipa_realm - имя домена, но заглавными буквами `HOMEWORK.LOCAL` 15 | - ipa_pkg: 16 | - ipa-client - список устанавливаемых пакетов 17 | - ipa_install_command - команда для установки клиента `ipa-client-install -U` 18 | 19 | #### 3. Example Playbook 20 | 21 | - hosts: servers 22 | roles: 23 | - { role: deploy_ipa_client } -------------------------------------------------------------------------------- /hw10/ansible/roles/deploy_ipa_client/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for deploy_ipa_client 3 | 4 | ansible_user: kakoka 5 | ansible_ssh_private_key_file: /Users/kakoka/.ssh/id_rsa 6 | ipa_server_address: 192.168.11.150 7 | ipa_fqdn: master.homework.local 8 | ipa_domain: homework.local 9 | ipa_realm: HOMEWORK.LOCAL 10 | ipa_pkg: 11 | - ipa-client 12 | - bind-utils 13 | ipa_install_command: ipa-client-install -U 14 | ... -------------------------------------------------------------------------------- /hw10/ansible/roles/deploy_ipa_server/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for deploy_ipa_server 3 | ansible_user: kakoka 4 | ansible_ssh_private_key_file: /Users/kakoka/.ssh/id_rsa 5 | ipa_domain: homework.local 6 | ipa_realm: HOMEWORK.LOCAL 7 | ipa_pkg: 8 | - ipa-server 9 | - ipa-server-dns 10 | - bind-dyndb-ldap 11 | ipa_install_command: ipa-server-install -U 12 | ipa_add_rule_command: ipa sudorule-add 13 | ipa_grant_admin_command: ipa sudorule-add-user 14 | ipa_user_add: ipa user-add 15 | ipa_add_to_group: ipa group-add-member 16 | ... -------------------------------------------------------------------------------- /hw10/img/pic01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw10/img/pic01.png -------------------------------------------------------------------------------- /hw10/img/pic02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw10/img/pic02.png -------------------------------------------------------------------------------- /hw11/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | host_key_checking = false 3 | inventory = hosts.txt -------------------------------------------------------------------------------- /hw11/deploy-nginx.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Deploy nginx server on 8080 port 4 | # 5 | - name: Deploy nginx server on 8080 port 6 | hosts: ipa_clients 7 | become: yes 8 | 9 | roles: 10 | - { role: add_local_repo } 11 | - { role: deploy_nginx_server } 12 | ... -------------------------------------------------------------------------------- /hw11/hosts.txt: -------------------------------------------------------------------------------- 1 | [ipa_master] 2 | master.homework.local ansible_host=192.168.11.150 3 | 4 | [ipa_clients] 5 | node1.homework.local ansible_host=192.168.11.151 6 | node2.homework.local ansible_host=192.168.11.152 -------------------------------------------------------------------------------- /hw11/img/pic01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw11/img/pic01.png -------------------------------------------------------------------------------- /hw11/img/pic02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw11/img/pic02.png -------------------------------------------------------------------------------- /hw11/img/pic03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw11/img/pic03.png -------------------------------------------------------------------------------- /hw11/roles/add_local_repo/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка локального репозитория. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 2. Role Variables 10 | 11 | - local_repo - имя файла локального репозитория, `vts.repo` 12 | - dest_folder - папка, куда добавляем локальный репозиторий, `/etc/yum.repos.d/` 13 | 14 | #### 3. Example Playbook 15 | 16 | ``` 17 | --- 18 | - name: Add local repo 19 | hosts: servers 20 | 21 | roles: 22 | - { role: add_local_repo } 23 | ``` -------------------------------------------------------------------------------- /hw11/roles/add_local_repo/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for add_local_repo 3 | local_repo: vts.repo 4 | dest_folder: /etc/yum.repos.d/ 5 | ... -------------------------------------------------------------------------------- /hw11/roles/add_local_repo/files/vts.repo: -------------------------------------------------------------------------------- 1 | [vts] 2 | name=local vts repo 3 | baseurl=http://10.10.10.136/localrepo 4 | gpgcheck=0 5 | enabled=1 6 | priority=1 -------------------------------------------------------------------------------- /hw11/roles/add_local_repo/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for add_local_repo 3 | 4 | - name: Check local repo is already added 5 | stat: 6 | path: '{{ dest_folder }}{{ local_repo }}' 7 | register: repofile_result 8 | 9 | - name: Add local repofile 10 | copy: 11 | src: '{{ local_repo }}' 12 | dest: '{{ dest_folder }}' 13 | when: not repofile_result.stat.exists 14 | ... -------------------------------------------------------------------------------- /hw11/roles/deploy_nginx_server/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка ранее собранного nginx с модулем статистики из локального репозитория. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | - Перед установкой nginx необходимо добавить локальный репозиторий. 9 | 10 | #### 2. Role Variables 11 | 12 | - nginx_port - порт сервера, `8080` 13 | - nginx_web_root - каталог, где лежит контент сайта, `/var/www/html` 14 | - nginx_conf_folder - каталог, где лежит конфиг nginx, `/etc/nginx` 15 | - nginx_conf_def_folder - каталог где лежат конфиги сайтов, `/etc/nginx/conf.d` 16 | 17 | #### 3. Example Playbook 18 | 19 | ``` 20 | --- 21 | - name: Nginx server deploy 22 | hosts: servers 23 | 24 | roles: 25 | - { role: deploy_nginx_server } 26 | ``` -------------------------------------------------------------------------------- /hw11/roles/deploy_nginx_server/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for deploy_nginx_server 3 | nginx_port: 8080 4 | nginx_web_root: /var/www/html/ 5 | nginx_conf_folder: /etc/nginx 6 | nginx_conf_def_folder: /etc/nginx/conf.d 7 | ... -------------------------------------------------------------------------------- /hw11/roles/deploy_nginx_server/files/nginx.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=A high performance web server and a reverse proxy server 3 | After=network.target 4 | 5 | [Service] 6 | Type=forking 7 | PIDFile=/var/run/nginx.pid 8 | ExecStartPre=/sbin/nginx -t -q -g 'daemon on; master_process on;' 9 | ExecStart=/sbin/nginx -g 'daemon on; master_process on;' 10 | ExecReload=/sbin/nginx -g 'daemon on; master_process on;' -s reload 11 | TimeoutStopSec=5 12 | KillMode=mixed 13 | 14 | [Install] 15 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /hw11/roles/deploy_nginx_server/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for deploy_nginx_server 3 | 4 | - name: Restart nginx 5 | service: 6 | name: nginx 7 | state: restarted 8 | ... -------------------------------------------------------------------------------- /hw11/roles/deploy_nginx_server/templates/index.html.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Index of {{ ansible_fqdn }} server 5 | 6 | 7 | 8 | 9 |

This is main page of server {{ ansible_fqdn }}

10 |

Server name: {{ ansible_fqdn }}.

11 |

Server ip: {{ ansible_eth1.ipv4.address }}.

12 | 13 | -------------------------------------------------------------------------------- /hw12/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | 3 | ansible_user=kakoka 4 | ansible_ssh_private_key_file=/Users/kakoka/.ssh/id_rsa 5 | 6 | host_key_checking = false 7 | inventory = hosts.txt 8 | 9 | # roles_path = roles -------------------------------------------------------------------------------- /hw12/hosts.txt: -------------------------------------------------------------------------------- 1 | [ipa_master] 2 | master.homework.local ansible_host=192.168.11.150 3 | 4 | [ipa_clients] 5 | node1.homework.local ansible_host=192.168.11.151 6 | node2.homework.local ansible_host=192.168.11.152 -------------------------------------------------------------------------------- /hw12/roles/add_repos/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка дополнительных репозиториев. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 2. Role Variables 10 | 11 | Переменная `repo` вида 12 | 13 | ``` 14 | repo: 15 | - { name, key, url, file} 16 | ``` 17 | Пример: 18 | 19 | ``` 20 | - { name: 'epel', key: 'https://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7', url: 'https://download.fedoraproject.org/pub/epel/$releasever/$basearch/', file: '/etc/yum.repos.d/epel.repo'} 21 | ``` 22 | 23 | #### 3. Example Playbook 24 | 25 | ``` 26 | --- 27 | - name: Do something 28 | hosts: servers 29 | 30 | roles: 31 | - { role: add_repos } 32 | ``` -------------------------------------------------------------------------------- /hw12/roles/add_repos/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for add_repos 3 | 4 | repo: 5 | - { name: 'epel', key: 'https://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7', url: 'https://download.fedoraproject.org/pub/epel/$releasever/$basearch/', file: '/etc/yum.repos.d/epel.repo'} 6 | - { name: 'elrepo', key: 'https://www.elrepo.org/RPM-GPG-KEY-elrepo.org', url: 'https://elrepo.org/linux/elrepo/el7/x86_64/', file: '/etc/yum.repos.d/elrepo.repo'} 7 | - { name: 'elrepo-kernel', key: 'https://www.elrepo.org/RPM-GPG-KEY-elrepo.org', url: 'https://elrepo.org/linux/kernel/el7/x86_64/', file: '/etc/yum.repos.d/epel.repo'} 8 | ... -------------------------------------------------------------------------------- /hw12/roles/deploy_monitoring_tools/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка пакетов для мониторинга. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 2. Role Variables 10 | 11 | Список добавляемых пакетов: 12 | 13 | - monitoring_packages_list: 14 | - procps-ng 15 | - ... 16 | - tuned 17 | 18 | #### 3. Example Playbook 19 | 20 | ``` 21 | --- 22 | - name: Do something 23 | hosts: servers 24 | 25 | roles: 26 | - { role: deploy_monitoring_tools } 27 | ``` -------------------------------------------------------------------------------- /hw12/roles/deploy_monitoring_tools/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for deploy_build_and_monitoring_tools 3 | 4 | monitoring_packages_list: 5 | - procps-ng 6 | - sysstat 7 | - iotop 8 | - strace 9 | - ltrace 10 | - gdb 11 | - atop 12 | - iftop 13 | ... 14 | -------------------------------------------------------------------------------- /hw12/roles/deploy_monitoring_tools/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for deploy_monitoring_tools 3 | 4 | - name: Update all packages 5 | yum: 6 | name: '*' 7 | state: latest 8 | update_cache: yes 9 | exclude: kernel 10 | 11 | - name: Add packages for monitoring 12 | yum: 13 | name: "{{ monitoring_packages_list }}" 14 | state: latest 15 | update_cache: yes 16 | ... 17 | -------------------------------------------------------------------------------- /hw12/roles/setup_atop_service/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка atop как сервис в systemd. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | - Перед установкой nginx необходимо добавить пакет atop. 9 | 10 | #### 2. Role Variables 11 | 12 | - atop_log_path: /var/log/atop 13 | - atop_bin_path: /usr/bin 14 | - atop_pid_file: /var/run/atop.pid 15 | - atop_polling_interval: 15 16 | 17 | #### 3. Example Playbook 18 | 19 | ``` 20 | --- 21 | - name: Do something 22 | hosts: servers 23 | 24 | roles: 25 | - { role: setup_atop_service } 26 | ``` -------------------------------------------------------------------------------- /hw12/roles/setup_atop_service/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for setup_atop_service 3 | 4 | atop_log_path: /var/log/atop 5 | atop_bin_path: /usr/bin 6 | atop_pid_file: /var/run/atop.pid 7 | atop_polling_interval: 15 8 | ... -------------------------------------------------------------------------------- /hw12/roles/setup_atop_service/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for setup_atop_service 3 | 4 | - name: Restart atop 5 | service: 6 | name: atop 7 | state: restarted 8 | ... -------------------------------------------------------------------------------- /hw12/roles/setup_atop_service/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for setup_atop_service 3 | 4 | - name: Enable service atop 5 | systemd: 6 | name: atop 7 | enabled: yes 8 | masked: no 9 | 10 | - name: Start service atop 11 | service: 12 | name: atop 13 | state: started 14 | 15 | - name: Generate atop config file 16 | template: 17 | src: atop.j2 18 | dest: /etc/sysconfig/atop 19 | notify: Restart atop 20 | 21 | ... -------------------------------------------------------------------------------- /hw12/roles/setup_atop_service/templates/atop.j2: -------------------------------------------------------------------------------- 1 | # sysconfig atop 2 | # 3 | 4 | # Current Day format 5 | CURDAY=`date +%Y%m%d` 6 | 7 | # Log files path 8 | LOGPATH={{ atop_log_path }} 9 | 10 | # Binaries path 11 | BINPATH={{ atop_bin_path }} 12 | 13 | # PID File 14 | PIDFILE={{ atop_pid_file }} 15 | 16 | # interval (default 10 minutes) 17 | INTERVAL= {{ atop_polling_interval }} 18 | -------------------------------------------------------------------------------- /hw12/roles/tune_kernel/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Добавление параметров ядра в sysctl.conf. 4 | Sysctl как служба в systemd. 5 | 6 | #### 1. Requirements 7 | 8 | - Centos/7 9 | 10 | #### 2. Role Variables 11 | 12 | Параметризованный sysctl.conf находится в templates/sysctl.conf.j2, можно добавлять в шаблон нужные опции и генерировать новый конфиг. 13 | Переменные в defaults/main.yml. 14 | 15 | Пример: 16 | 17 | - net_ipv6_conf_all_disable_ipv6: 1 18 | 19 | в шаблоне: 20 | 21 | - net.ipv6.conf.all.disable_ipv6={{ net_ipv6_conf_all_disable_ipv6 }} 22 | 23 | 24 | #### 3. Example Playbook 25 | 26 | ``` 27 | --- 28 | - name: Do something 29 | hosts: servers 30 | 31 | roles: 32 | - { role: tune_kernel } 33 | ``` -------------------------------------------------------------------------------- /hw12/roles/tune_kernel/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for tune_kernel 3 | 4 | - name: Restart systemd-sysctl service 5 | service: 6 | name: systemd-sysctl 7 | state: restarted 8 | ... -------------------------------------------------------------------------------- /hw12/roles/tune_kernel/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for tune_kernel 3 | 4 | - name: systemd-sysctl service enable 5 | systemd: 6 | name: systemd-sysctl 7 | enabled: yes 8 | masked: no 9 | 10 | - name: Sysctl config file 11 | template: 12 | src: sysctl.conf.j2 13 | dest: /etc/sysctl.conf 14 | delay: 10 15 | notify: Restart systemd-sysctl service 16 | ignore_errors: yes 17 | 18 | ... -------------------------------------------------------------------------------- /hw12/roles/upgrade_kernel/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Обновление ядра до текущей версии из репозитория ELRepo. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 2. Role Variables 10 | 11 | - grub_build_command: grub2-mkconfig -o 12 | 13 | #### 3. Example Playbook 14 | 15 | ``` 16 | --- 17 | - name: Do something 18 | hosts: servers 19 | 20 | roles: 21 | - { role: upgrade_kernel } 22 | ``` -------------------------------------------------------------------------------- /hw12/roles/upgrade_kernel/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for upgrade_kernel 3 | 4 | grub_build_command: grub2-mkconfig -o 5 | ... -------------------------------------------------------------------------------- /hw12/setup-new-host.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Setup a vagrant host 4 | # 5 | 6 | - name: Setup new host 7 | hosts: node1.homework.local 8 | become: yes 9 | 10 | roles: 11 | - { role: add_repos } 12 | - { role: deploy_monitoring_tools } 13 | - { role: setup_atop_service } 14 | - { role: tune_kernel } 15 | - { role: upgrade_kernel } 16 | ... -------------------------------------------------------------------------------- /hw13/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | 3 | ansible_user=kakoka 4 | ansible_ssh_private_key_file=/Users/kakoka/.ssh/id_rsa 5 | 6 | host_key_checking = false 7 | inventory = hosts.txt 8 | 9 | # roles_path = roles -------------------------------------------------------------------------------- /hw13/hosts.txt: -------------------------------------------------------------------------------- 1 | [masters] 2 | master.homework.local 3 | 4 | [clients] 5 | node1.homework.local 6 | node2.homework.local 7 | -------------------------------------------------------------------------------- /hw13/pic/pic01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw13/pic/pic01.png -------------------------------------------------------------------------------- /hw13/pic/pic02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw13/pic/pic02.png -------------------------------------------------------------------------------- /hw13/pic/pic03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw13/pic/pic03.png -------------------------------------------------------------------------------- /hw13/roles/add_bacula/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка Bacula 9. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 3. Example Playbook 10 | 11 | ``` 12 | --- 13 | - name: Do something 14 | hosts: servers 15 | 16 | roles: 17 | - { role: add_bacula } 18 | ``` -------------------------------------------------------------------------------- /hw13/roles/add_bacula/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for add_bacula 3 | 4 | PG_BACULA_PASSWORD: bacula123 5 | 6 | bacula_packages: 7 | - bacula-director 8 | - bacula-storage 9 | - bacula-console 10 | 11 | bacula_folders: 12 | - /opt/backups 13 | - /opt/restore 14 | - /var/spool/bacula 15 | - /var/log/bacula 16 | 17 | conf_files: 18 | - bacula-sd.conf 19 | - bacula-dir.conf 20 | - bconsole.conf 21 | 22 | bacula_services: 23 | - bacula-sd 24 | - bacula-dir 25 | ... -------------------------------------------------------------------------------- /hw13/roles/add_bacula/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for add_bacula 3 | 4 | - name: Restart bacula 5 | systemd: 6 | name: "{{ item }}" 7 | state: restarted 8 | with_items: "{{ bacula_services }}" 9 | ... -------------------------------------------------------------------------------- /hw13/roles/add_bacula/templates/bacula-sd.conf.j2: -------------------------------------------------------------------------------- 1 | Storage { 2 | Name = master-storage 3 | SDPort = 9103 4 | WorkingDirectory = "/var/spool/bacula" 5 | Pid Directory = "/var/run" 6 | Maximum Concurrent Jobs = 20 7 | } 8 | Director { 9 | Name = master-director 10 | Password = "bacula123" 11 | } 12 | Director { 13 | Name = master-director-mon 14 | Password = "bacula123" 15 | Monitor = yes 16 | } 17 | 18 | Device { 19 | Name = FileStorage 20 | Media Type = File 21 | Archive Device = /opt/backups 22 | Random Access = Yes; 23 | RemovableMedia = no; 24 | LabelMedia = yes; 25 | AutomaticMount = yes; 26 | AlwaysOpen = no; 27 | } 28 | Messages { 29 | Name = Standard 30 | director = master-director = all, !skipped 31 | } 32 | -------------------------------------------------------------------------------- /hw13/roles/add_bacula/templates/bconsole.conf.j2: -------------------------------------------------------------------------------- 1 | # 2 | # Bacula User Agent (or Console) Configuration File 3 | # 4 | 5 | Director { 6 | Name = bacula-dir 7 | DIRport = 9101 8 | address = localhost 9 | Password = "bacula123" 10 | } 11 | -------------------------------------------------------------------------------- /hw13/roles/add_bacula/templates/pgpass.j2: -------------------------------------------------------------------------------- 1 | localhost:5432:bacula:bacula:{{ PG_BACULA_PASSWORD }} -------------------------------------------------------------------------------- /hw13/roles/add_bacula_client/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка Bacula 9. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 3. Example Playbook 10 | 11 | ``` 12 | --- 13 | - name: Do something 14 | hosts: servers 15 | 16 | roles: 17 | - { role: add_bacula } 18 | ``` -------------------------------------------------------------------------------- /hw13/roles/add_bacula_client/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for add_bacula 3 | 4 | bacula_packages: 5 | - epel-release 6 | - bacula-client 7 | - python-pip 8 | 9 | bacula_services: 10 | - bacula-fd 11 | ... -------------------------------------------------------------------------------- /hw13/roles/add_bacula_client/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for add_bacula 3 | 4 | - name: Restart bacula 5 | systemd: 6 | name: "{{ item }}" 7 | state: restarted 8 | with_items: "{{ bacula_services }}" 9 | ... -------------------------------------------------------------------------------- /hw13/roles/add_bacula_client/templates/bacula-fd.conf.j2: -------------------------------------------------------------------------------- 1 | Director { 2 | Name = master-director 3 | Password = "bacula123" 4 | # Monitor = yes 5 | } 6 | 7 | FileDaemon { 8 | Name = bacula-fd 9 | FDport = 9102 10 | WorkingDirectory = /var/spool/bacula 11 | 12 | Pid Directory = /var/run 13 | Maximum Concurrent Jobs = 20 14 | Plugin Directory = /usr/lib64/bacula 15 | 16 | PKI Signatures = Yes 17 | PKI Encryption = Yes 18 | PKI Keypair = "/etc/bacula/node.pem" 19 | PKI Master Key = "/etc/bacula/master.crt" 20 | } 21 | 22 | # Send all messages except skipped files back to Director 23 | 24 | Messages { 25 | Name = Standard 26 | director = master-director = all, !skipped, !restored 27 | } 28 | -------------------------------------------------------------------------------- /hw13/roles/add_pgsql11/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка PostgreSQL 11. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 3. Example Playbook 10 | 11 | ``` 12 | --- 13 | - name: Do something 14 | hosts: servers 15 | 16 | roles: 17 | - { role: add_pgsql11 } 18 | ``` -------------------------------------------------------------------------------- /hw13/roles/add_pgsql11/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for add_pgsql11 3 | 4 | -------------------------------------------------------------------------------- /hw13/roles/add_pgsql11/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for add_pgsql11 3 | 4 | - name: Restart postgres 5 | service: 6 | name: postgresql-11 7 | state: restarted 8 | ... 9 | -------------------------------------------------------------------------------- /hw13/roles/add_repos/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка дополнительных репозиториев для Bacula 9 и PostgreSQL 11. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 3. Example Playbook 10 | 11 | ``` 12 | --- 13 | - name: Do something 14 | hosts: servers 15 | 16 | roles: 17 | - { role: add_repos } 18 | ``` -------------------------------------------------------------------------------- /hw13/roles/add_repos/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for add_repos 3 | rpm_command: rpm -ihv 4 | ... 5 | -------------------------------------------------------------------------------- /hw13/roles/add_repos/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for add_repos 3 | 4 | - name: Yum clean metadata 5 | command: yum clean all 6 | args: 7 | warn: no 8 | 9 | - name: Add bacula repository 10 | action: command 11 | {{ rpm_command }} 12 | http://repo.backup-solutions.ru/pub/bacula-bs-7-1.el7.backupsolutions.noarch.rpm 13 | 14 | - name: Add postgresql repository 15 | action: command 16 | {{ rpm_command }} 17 | https://download.postgresql.org/pub/repos/yum/11/redhat/rhel-7-x86_64/pgdg-centos11-11-2.noarch.rpm 18 | ... 19 | -------------------------------------------------------------------------------- /hw13/setup-bacula-client.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Setup a vagrant host 4 | # 5 | 6 | - name: Setup new host 7 | hosts: node2.homework.local 8 | become: yes 9 | 10 | roles: 11 | - { role: add_repos } 12 | - { role: add_bacula_client } 13 | ... 14 | -------------------------------------------------------------------------------- /hw13/setup-bacula-server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Setup a vagrant host 4 | # 5 | 6 | - name: Setup new host 7 | hosts: master.homework.local 8 | become: yes 9 | 10 | roles: 11 | - { role: add_repos } 12 | - { role: add_pgsql11 } 13 | - { role: add_bacula } 14 | ... 15 | -------------------------------------------------------------------------------- /hw14/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | 3 | ansible_user=kakoka 4 | ansible_ssh_private_key_file=/Users/kakoka/.ssh/id_rsa 5 | 6 | host_key_checking = false 7 | inventory = hosts.txt 8 | 9 | # roles_path = roles -------------------------------------------------------------------------------- /hw14/hosts.txt: -------------------------------------------------------------------------------- 1 | [masters] 2 | master.homework.local 3 | 4 | [clients] 5 | node1.homework.local 6 | node2.homework.local 7 | -------------------------------------------------------------------------------- /hw14/pic/pic01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw14/pic/pic01.png -------------------------------------------------------------------------------- /hw14/roles/conf_audit_client/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Пересылка логов аудита (auditd) на центральный сервер, без сохранения на локальном, кроме случая переполнения очереди. [audit clinet -> audit server] 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 3. Example Playbook 10 | 11 | ``` 12 | --- 13 | - name: Do something 14 | hosts: servers 15 | 16 | roles: 17 | - { role: conf_audit_client } 18 | ``` -------------------------------------------------------------------------------- /hw14/roles/conf_audit_client/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for conf_audit_client 3 | 4 | audit_remote_server: master 5 | audit_remote_server_port: 60 6 | audit_rules: ['a exit,always -F arch=b32 -S execve','a exit,always -F arch=b64 -S execve','-a exit,always -S unlink -S rmdir','-a exit,always -S open -F loginuid=1001','-w /etc/group -p wa','-w /etc/passwd -p wa','-w /etc/shadow -p wa','-w /etc/sudoers -p wa','-w /etc -p r','-w /etc/nginx/nginx.conf -p wa'] 7 | ... -------------------------------------------------------------------------------- /hw14/roles/conf_audit_client/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for conf_audit_client 3 | 4 | - name: Restart auditd 5 | systemd: 6 | name: auditd 7 | state: restarted 8 | ... -------------------------------------------------------------------------------- /hw14/roles/conf_audit_client/templates/au-remote.conf.j2: -------------------------------------------------------------------------------- 1 | # This file controls the audispd data path to the 2 | # remote event logger. This plugin will send events to 3 | # a remote machine (Central Logger). 4 | 5 | active = yes 6 | direction = out 7 | path = /sbin/audisp-remote 8 | type = always 9 | #args = 10 | format = string 11 | 12 | -------------------------------------------------------------------------------- /hw14/roles/conf_audit_client/templates/audispd.conf.j2: -------------------------------------------------------------------------------- 1 | # 2 | # This file controls the configuration of the audit event 3 | # dispatcher daemon, audispd. 4 | # 5 | 6 | q_depth = 150 7 | overflow_action = SYSLOG 8 | priority_boost = 4 9 | max_restarts = 10 10 | name_format = HOSTNAME 11 | #name = mydomain 12 | #plugin_dir = /etc/audisp/plugins.d/ -------------------------------------------------------------------------------- /hw14/roles/conf_audit_client/templates/audit.rules.j2: -------------------------------------------------------------------------------- 1 | ## First rule - delete all 2 | -D 3 | 4 | ## Increase the buffers to survive stress events. 5 | ## Make this bigger for busy systems 6 | -b 8192 7 | 8 | ## Set failure mode to syslog 9 | -f 1 10 | 11 | # Let's generate rules from list 12 | 13 | {% for rule in audit_rules %} 14 | {{ rule }} 15 | {% endfor %} 16 | 17 | # Different rules for different things :) 18 | 19 | #-a exit,always -F arch=b64 -S execve 20 | #-a exit,always -F arch=b32 -S execve 21 | #-a exit,always -S unlink -S rmdir 22 | #-a exit,always -S open -F loginuid=1001 23 | #-w /etc/nginx/nginx.conf -p wa 24 | #-w /etc/group -p wa 25 | #-w /etc/passwd -p wa 26 | #-w /etc/shadow -p wa 27 | #-w /etc/sudoers -p wa 28 | #-w /etc -p r -------------------------------------------------------------------------------- /hw14/roles/conf_audit_client_syslog/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Пересылка логов аудита (auditd) на центральный сервер, без сохранения на локальном. [audit -> local syslog -> syslog server]. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 3. Example Playbook 10 | 11 | ``` 12 | --- 13 | - name: Do something 14 | hosts: servers 15 | 16 | roles: 17 | - { role: conf_audit_client_syslog } 18 | ``` -------------------------------------------------------------------------------- /hw14/roles/conf_audit_client_syslog/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for conf_audit_client 3 | audit_rules: ['a exit,always -F arch=b32 -S execve','a exit,always -F arch=b64 -S execve','-a exit,always -S unlink -S rmdir','-a exit,always -S open -F loginuid=1001','-w /etc/group -p wa','-w /etc/passwd -p wa','-w /etc/shadow -p wa','-w /etc/sudoers -p wa','-w /etc -p r','-w /etc/nginx/nginx.conf -p wa -k nginx'] 4 | ... -------------------------------------------------------------------------------- /hw14/roles/conf_audit_client_syslog/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for conf_audit_client_syslog 3 | 4 | - name: Restart auditd 5 | systemd: 6 | name: auditd 7 | state: restarted 8 | 9 | - name: Restart rsyslog 10 | systemd: 11 | name: rsyslog 12 | state: restarted 13 | ... -------------------------------------------------------------------------------- /hw14/roles/conf_audit_client_syslog/templates/audit.rules.j2: -------------------------------------------------------------------------------- 1 | ## First rule - delete all 2 | -D 3 | 4 | ## Increase the buffers to survive stress events. 5 | ## Make this bigger for busy systems 6 | -b 8192 7 | 8 | ## Set failure mode to syslog 9 | -f 1 10 | 11 | # Let's generate rules from list 12 | 13 | {% for rule in audit_rules %} 14 | {{ rule }} 15 | {% endfor %} 16 | 17 | # Different rules for different things :) 18 | 19 | #-a exit,always -F arch=b64 -S execve 20 | #-a exit,always -F arch=b32 -S execve 21 | #-a exit,always -S unlink -S rmdir 22 | #-a exit,always -S open -F loginuid=1001 23 | #-w /etc/nginx/nginx.conf -p wa 24 | #-w /etc/group -p wa 25 | #-w /etc/passwd -p wa 26 | #-w /etc/shadow -p wa 27 | #-w /etc/sudoers -p wa 28 | #-w /etc -p r -------------------------------------------------------------------------------- /hw14/roles/conf_audit_client_syslog/templates/syslog.plugin.conf.j2: -------------------------------------------------------------------------------- 1 | # vi /etc/audisp/plugins.d/syslog.conf 2 | 3 | # This file controls the configuration of the syslog plugin. 4 | # It simply takes events and writes them to syslog. The 5 | # arguments provided can be the default priority that you 6 | # want the events written with. And optionally, you can give 7 | # a second argument indicating the facility that you want events 8 | # logged to. Valid options are LOG_LOCAL0 through 7, LOG_AUTH, 9 | # LOG_AUTHPRIV, LOG_DAEMON, LOG_SYSLOG, and LOG_USER. 10 | 11 | active = yes 12 | direction = out 13 | path = builtin_syslog 14 | type = builtin 15 | args = LOG_LOCAL6 16 | # args = LOG_INFO 17 | format = string 18 | 19 | -------------------------------------------------------------------------------- /hw14/roles/conf_audit_server/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Конфигурирование aditd для приема логов. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 3. Example Playbook 10 | 11 | ``` 12 | --- 13 | - name: Do something 14 | hosts: servers 15 | 16 | roles: 17 | - { role: conf_auditd_server } 18 | ``` -------------------------------------------------------------------------------- /hw14/roles/conf_audit_server/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for conf_audit_server 3 | ... -------------------------------------------------------------------------------- /hw14/roles/conf_audit_server/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for conf_audit_client 3 | 4 | - name: Restart auditd 5 | systemd: 6 | name: auditd 7 | state: restarted 8 | ... -------------------------------------------------------------------------------- /hw14/roles/conf_audit_server/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for conf_auditd_server 3 | 4 | - name: Client audit daemon config file 5 | template: 6 | src: auditd.conf.j2 7 | dest: /etc/audit/auditd.conf 8 | notify: Restart auditd -------------------------------------------------------------------------------- /hw14/roles/conf_rsyslog_client/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка дополнительных репозиториев для Bacula 9 и PostgreSQL 11. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 3. Example Playbook 10 | 11 | ``` 12 | --- 13 | - name: Do something 14 | hosts: servers 15 | 16 | roles: 17 | - { role: add_repos } 18 | ``` -------------------------------------------------------------------------------- /hw14/roles/conf_rsyslog_client/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for conf_rsyslog_client -------------------------------------------------------------------------------- /hw14/roles/conf_rsyslog_client/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for conf_rsyslog_client -------------------------------------------------------------------------------- /hw14/roles/conf_rsyslog_client/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for conf_rsyslog_client -------------------------------------------------------------------------------- /hw14/roles/conf_rsyslog_client/templates/client-include-nginx.conf.j2: -------------------------------------------------------------------------------- 1 | # Nginx error and access logs transfer to remote 2 | 3 | $ModLoad imfile 4 | 5 | $InputFileName /var/log/nginx/error.log 6 | $InputFileTag nginx: 7 | $InputFileStateFile stat-nginx-error 8 | $InputFileSeverity error 9 | $InputFileFacility local1 10 | $InputFilePollInterval 1 11 | $InputRunFileMonitor 12 | 13 | # access log 14 | $InputFileName /var/log/nginx/access.log 15 | $InputFileTag nginx: 16 | $InputFileStateFile stat-nginx-access 17 | $InputFileSeverity notice 18 | $InputFileFacility local2 19 | $InputFilePollInterval 1 20 | $InputRunFileMonitor 21 | 22 | $template error, "<163> %msg%" 23 | $template access, "<166> %msg%" 24 | 25 | local1.* @@master;error 26 | local2.* @@master;access -------------------------------------------------------------------------------- /hw14/roles/conf_rsyslog_client/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /hw14/roles/conf_rsyslog_client/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - conf_rsyslog_client -------------------------------------------------------------------------------- /hw14/roles/conf_rsyslog_client/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for conf_rsyslog_client -------------------------------------------------------------------------------- /hw14/roles/conf_rsyslog_server/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка Bacula 9. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 3. Example Playbook 10 | 11 | ``` 12 | --- 13 | - name: Do something 14 | hosts: servers 15 | 16 | roles: 17 | - { role: } 18 | ``` -------------------------------------------------------------------------------- /hw14/roles/conf_rsyslog_server/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for conf_rsyslog_server -------------------------------------------------------------------------------- /hw14/roles/conf_rsyslog_server/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for conf_rsyslog_server -------------------------------------------------------------------------------- /hw14/roles/conf_rsyslog_server/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for conf_rsyslog_server -------------------------------------------------------------------------------- /hw14/roles/deploy_elk_syslog/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка Bacula 9. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 3. Example Playbook 10 | 11 | ``` 12 | --- 13 | - name: Do something 14 | hosts: servers 15 | 16 | roles: 17 | - { role: } 18 | ``` -------------------------------------------------------------------------------- /hw14/roles/deploy_elk_syslog/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for deploy_elk_syslog -------------------------------------------------------------------------------- /hw14/roles/deploy_elk_syslog/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for deploy_elk_syslog -------------------------------------------------------------------------------- /hw14/roles/deploy_elk_syslog/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for deploy_elk_syslog -------------------------------------------------------------------------------- /hw15/Riemann/dash/dash.rb: -------------------------------------------------------------------------------- 1 | set :bind, "0.0.0.0" 2 | config[:ws_config] = 'dash.json' 3 | -------------------------------------------------------------------------------- /hw15/Riemann/riemann.conf: -------------------------------------------------------------------------------- 1 | ; -*- mode: clojure; -*- 2 | ; vim: filetype=clojure 3 | 4 | (logging/init {:file "/var/log/riemann/riemann.log"}) 5 | 6 | ; Listen on the local interface over TCP (5555), UDP (5555), and websockets 7 | ; (5556) 8 | (let [host "0.0.0.0"] 9 | (tcp-server {:host host}) 10 | (udp-server {:host host}) 11 | (ws-server {:host host})) 12 | 13 | ; Expire old events from the index every 5 seconds. 14 | (periodically-expire 60) 15 | 16 | (let [index (index)] 17 | ; Inbound events will be passed to these streams: 18 | (streams 19 | 20 | (default :ttl 60 21 | ; Index all events immediately. 22 | index 23 | prn 24 | #(info %)) 25 | ) 26 | ) -------------------------------------------------------------------------------- /hw15/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | 3 | ansible_user=kakoka 4 | ansible_ssh_private_key_file=/Users/kakoka/.ssh/id_rsa 5 | 6 | host_key_checking = false 7 | inventory = hosts.txt 8 | 9 | # roles_path = roles -------------------------------------------------------------------------------- /hw15/hosts.txt: -------------------------------------------------------------------------------- 1 | [masters] 2 | master.homework.local 3 | 4 | [clients] 5 | node1.homework.local 6 | node2.homework.local 7 | -------------------------------------------------------------------------------- /hw15/pic/pic01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw15/pic/pic01.png -------------------------------------------------------------------------------- /hw15/pic/pic02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw15/pic/pic02.png -------------------------------------------------------------------------------- /hw15/pic/pic03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw15/pic/pic03.png -------------------------------------------------------------------------------- /hw15/pic/pic04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw15/pic/pic04.png -------------------------------------------------------------------------------- /hw15/pic/pic05.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw15/pic/pic05.png -------------------------------------------------------------------------------- /hw15/roles/add_grafana/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Развертывание Grafana. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 2. Role Variables 10 | 11 | - domain_name: `domain.name` 12 | 13 | #### 3. Example Playbook 14 | 15 | ``` 16 | --- 17 | - name: Do something 18 | hosts: servers 19 | 20 | roles: 21 | - { role: add_grafana } 22 | ``` -------------------------------------------------------------------------------- /hw15/roles/add_grafana/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for add_grafana 3 | domain_name: homework.local 4 | ... 5 | -------------------------------------------------------------------------------- /hw15/roles/add_grafana/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for add_grafana 3 | 4 | - name: Restart grafana 5 | service: 6 | name: grafana-server 7 | state: restarted 8 | ... -------------------------------------------------------------------------------- /hw15/roles/add_grafana/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for add_grafana 3 | 4 | - name: Install grafana 5 | yum: 6 | name: grafana 7 | state: latest 8 | 9 | - name: Enable grafana service 10 | systemd: 11 | name: grafana-server 12 | enabled: yes 13 | masked: no 14 | 15 | - name: Start grafana service 16 | systemd: 17 | name: grafana-server 18 | state: started 19 | 20 | - name: Configuration file 21 | template: 22 | src: grafana.ini.j2 23 | dest: /etc/grafana/grafana.ini 24 | notify: Restart grafana 25 | ... 26 | -------------------------------------------------------------------------------- /hw15/roles/add_nginx_server/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка nginx в качестве reverse proxe. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | - Перед установкой nginx необходимо добавить локальный репозиторий. 9 | 10 | #### 2. Role Variables 11 | 12 | - nginx_port - порт сервера, `80` 13 | - nginx_web_root - `/var/www/html` 14 | - nginx_conf_folder - `/etc/nginx` 15 | - nginx_conf_def_folder - `/etc/nginx/conf.d` 16 | 17 | #### 3. Example Playbook 18 | 19 | ``` 20 | --- 21 | - name: Nginx server deploy 22 | hosts: servers 23 | 24 | roles: 25 | - { role: add_nginx_server } 26 | ``` -------------------------------------------------------------------------------- /hw15/roles/add_nginx_server/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for deploy_nginx_server 3 | nginx_port: 80 4 | nginx_web_root: /var/www/html/ 5 | nginx_conf_folder: /etc/nginx 6 | nginx_conf_def_folder: /etc/nginx/conf.d 7 | ... -------------------------------------------------------------------------------- /hw15/roles/add_nginx_server/files/nginx.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=A high performance web server and a reverse proxy server 3 | After=network.target 4 | 5 | [Service] 6 | Type=forking 7 | PIDFile=/var/run/nginx.pid 8 | ExecStartPre=/sbin/nginx -t -q -g 'daemon on; master_process on;' 9 | ExecStart=/sbin/nginx -g 'daemon on; master_process on;' 10 | ExecReload=/sbin/nginx -g 'daemon on; master_process on;' -s reload 11 | TimeoutStopSec=5 12 | KillMode=mixed 13 | 14 | [Install] 15 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /hw15/roles/add_nginx_server/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for deploy_nginx_server 3 | 4 | - name: Restart nginx 5 | service: 6 | name: nginx 7 | state: restarted 8 | ... -------------------------------------------------------------------------------- /hw15/roles/add_nginx_server/templates/index.html.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Index of {{ ansible_fqdn }} server 5 | 6 | 7 | 8 | 9 |

This is main page of server {{ ansible_fqdn }}

10 |

Server name: {{ ansible_fqdn }}.

11 |

Server ip: {{ ansible_eth1.ipv4.address }}.

12 | 13 | -------------------------------------------------------------------------------- /hw15/roles/add_node_exporter/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Развертывание node_exporter. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 2. Role Variables 10 | 11 | - node_exp_ver: 0.17.0 12 | - node_listen_address: "127.0.0.1:9100" 13 | - node_collector_dir: /var/lib/node-exporter/textfile_collector 14 | 15 | #### 3. Example Playbook 16 | 17 | ``` 18 | --- 19 | - name: Do something 20 | hosts: servers 21 | 22 | roles: 23 | - { role: add_node_exporter } 24 | ``` -------------------------------------------------------------------------------- /hw15/roles/add_node_exporter/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for add_node_exporter 3 | 4 | node_exp_ver: 0.17.0 5 | node_listen_address: "{{ ansible_eth0.ipv4.address }}:9100" 6 | node_collector_dir: /var/lib/node-exporter/textfile_collector 7 | ... 8 | -------------------------------------------------------------------------------- /hw15/roles/add_node_exporter/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for add_node_exporter 3 | - name: Restart node_exporter 4 | become: true 5 | systemd: 6 | daemon_reload: true 7 | name: node_exporter 8 | state: restarted 9 | ... 10 | -------------------------------------------------------------------------------- /hw15/roles/add_node_exporter/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for add_node_exporter 3 | 4 | - include: install.yml 5 | become: true 6 | tags: 7 | - install 8 | 9 | - name: ensure node_exporter service is started and enabled 10 | become: true 11 | systemd: 12 | daemon_reload: true 13 | name: node_exporter 14 | state: started 15 | enabled: true 16 | tags: 17 | - run 18 | ... -------------------------------------------------------------------------------- /hw15/roles/add_node_exporter/templates/node_exporter.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Node-exporter 3 | Wants=network-online.target 4 | After=network-online.target 5 | 6 | [Service] 7 | User=prometheus 8 | Group=prometheus 9 | Type=simple 10 | ExecStart=/usr/sbin/node_exporter \ 11 | --collector.textfile.directory {{ node_collector_dir }} \ 12 | --web.listen-address={{ node_listen_address }} \ 13 | --web.telemetry-path=/metrics 14 | 15 | [Install] 16 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /hw15/roles/add_prometheus/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Развертывание prometheus. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 2. Role Variables 10 | 11 | - prom_ver: 2.6.0 12 | - prom_user: prometheus 13 | - prom_group: prometheus 14 | - prom_conf_dir: /etc/prom 15 | - prom_db_dir: /var/lib/prom 16 | - prom_listen_address: "127.0.0.1:9090" 17 | - prom_ext_url: "{{ ansible_fqdn }}" 18 | - prom_retention: "14d" 19 | 20 | #### 3. Example Playbook 21 | 22 | ``` 23 | --- 24 | - name: Do something 25 | hosts: servers 26 | 27 | roles: 28 | - { role: add_prometheus } 29 | ``` -------------------------------------------------------------------------------- /hw15/roles/add_prometheus/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for add_prometheus 3 | prom_ver: 2.6.0 4 | prom_user: prometheus 5 | prom_group: prometheus 6 | prom_conf_dir: /etc/prom 7 | prom_db_dir: /var/lib/prom 8 | prom_listen_address: "127.0.0.1:9090" 9 | prom_ext_url: "{{ ansible_fqdn }}" 10 | prom_retention: "14d" 11 | ... 12 | -------------------------------------------------------------------------------- /hw15/roles/add_prometheus/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for add_prometheus 3 | 4 | - name: Restart prometheus 5 | become: true 6 | systemd: 7 | daemon_reload: true 8 | name: prometheus 9 | state: restarted 10 | 11 | - name: Reload prometheus 12 | become: true 13 | systemd: 14 | name: prometheus 15 | state: reloaded 16 | ... 17 | -------------------------------------------------------------------------------- /hw15/roles/add_prometheus/tasks/configure.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: alerting rules file 3 | template: 4 | src: "alert.rules.j2" 5 | dest: "{{ prom_conf_dir }}/alert.rules" 6 | owner: root 7 | group: prometheus 8 | mode: 0640 9 | # validate: "/usr/sbin/promtool check rules %s" 10 | notify: 11 | - Reload prometheus 12 | 13 | - name: configure prometheus 14 | template: 15 | src: "prometheus.yml.j2" 16 | dest: "{{ prom_conf_dir }}/prometheus.yml" 17 | force: true 18 | owner: root 19 | group: prometheus 20 | mode: 0640 21 | # validate: "/usr/sbin/promtool check config %s" 22 | notify: 23 | - Reload prometheus 24 | ... 25 | -------------------------------------------------------------------------------- /hw15/roles/add_prometheus/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for add_prometheus 3 | 4 | - include: install.yml 5 | become: true 6 | tags: 7 | - install 8 | 9 | - include: configure.yml 10 | become: true 11 | tags: 12 | - configure 13 | 14 | - name: ensure prometheus service is started and enabled 15 | become: true 16 | systemd: 17 | daemon_reload: true 18 | name: prometheus 19 | state: started 20 | enabled: true 21 | tags: 22 | - run 23 | ... 24 | -------------------------------------------------------------------------------- /hw15/roles/add_repos/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка дополнительных репозиториев. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 2. Role Variables 10 | 11 | Переменная `repo` вида 12 | 13 | ``` 14 | repo: 15 | - { name, key, url, file} 16 | ``` 17 | Пример: 18 | 19 | ``` 20 | - { name: 'epel', key: 'https://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7', url: 'https://download.fedoraproject.org/pub/epel/$releasever/$basearch/', file: '/etc/yum.repos.d/epel.repo'} 21 | ``` 22 | 23 | #### 3. Example Playbook 24 | 25 | ``` 26 | --- 27 | - name: Do something 28 | hosts: servers 29 | 30 | roles: 31 | - { role: add_repos } 32 | ``` -------------------------------------------------------------------------------- /hw15/roles/add_repos/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for add_repos 3 | 4 | repo: 5 | - { name: 'grafana', key: 'https://grafanarel.s3.amazonaws.com/RPM-GPG-KEY-grafana', url: 'https://packagecloud.io/grafana/stable/el/7/$basearch/', file: '/etc/yum.repos.d/grafana.repo'} 6 | - { name: 'nginx repo', key: 'https://nginx.org/keys/nginx_signing.key', url: 'https://nginx.org/packages/mainline/cenotos/7/$basearch', file: '/etc/yum.repos.d/nginx.repo'} 7 | ... -------------------------------------------------------------------------------- /hw15/setup-node-exp-client.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Setup node_exporter 4 | # 5 | 6 | - name: Setup node_exporter 7 | hosts: clients 8 | become: yes 9 | 10 | roles: 11 | - { role: add_node_exporter } 12 | ... 13 | -------------------------------------------------------------------------------- /hw15/setup-prom-server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Setup a prometheus-grafana host 4 | # 5 | 6 | - name: Setup prometheus-grafana host 7 | hosts: masters 8 | become: yes 9 | 10 | roles: 11 | - { role: add_repos } 12 | - { role: add_prometheus } 13 | - { role: add_grafana } 14 | - { role: add_nginx_server } 15 | ... 16 | -------------------------------------------------------------------------------- /hw16/network.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Restart local network 4 | # 5 | 6 | - name: Restart network service 7 | hosts: all 8 | become: yes 9 | tasks: 10 | - name: Restart network 11 | service: 12 | name: network 13 | state: restarted 14 | ... 15 | -------------------------------------------------------------------------------- /hw16/pic/pic01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw16/pic/pic01.png -------------------------------------------------------------------------------- /hw16/pic/pic02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw16/pic/pic02.png -------------------------------------------------------------------------------- /hw16/pic/pic03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw16/pic/pic03.png -------------------------------------------------------------------------------- /hw16/pic/pic04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw16/pic/pic04.png -------------------------------------------------------------------------------- /hw16/pic/pic05.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw16/pic/pic05.png -------------------------------------------------------------------------------- /hw16/pic/pic06.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw16/pic/pic06.png -------------------------------------------------------------------------------- /hw16/pic/pic07.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw16/pic/pic07.png -------------------------------------------------------------------------------- /hw16/pic/pic08.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw16/pic/pic08.png -------------------------------------------------------------------------------- /hw16/pic/pic09.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw16/pic/pic09.png -------------------------------------------------------------------------------- /hw16/pic/pic10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw16/pic/pic10.png -------------------------------------------------------------------------------- /hw16/pic/pic11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw16/pic/pic11.png -------------------------------------------------------------------------------- /hw16/pic/pic12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw16/pic/pic12.png -------------------------------------------------------------------------------- /hw17/docker-compose/code/index.php: -------------------------------------------------------------------------------- 1 | /usr/share/nginx/html/index.html 13 | 14 | EXPOSE 8080 15 | CMD ["nginx", "-g", "daemon off;"] 16 | -------------------------------------------------------------------------------- /hw17/docker/nginx/default.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 8080; 3 | server_name localhost; 4 | 5 | location / { 6 | root /usr/share/nginx/html; 7 | index index.html index.htm; 8 | } 9 | 10 | error_page 404 /404.html; 11 | 12 | # redirect server error pages to the static page /50x.html 13 | # 14 | error_page 500 502 503 504 /50x.html; 15 | location = /50x.html { 16 | root /usr/share/nginx/html; 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /hw17/docker/php/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:latest 2 | 3 | MAINTAINER Pavel Konotopov 4 | 5 | ENV PHP_VERSION=7 \ 6 | PHP_FPM_USER=www-data 7 | 8 | RUN apk update \ 9 | && apk upgrade \ 10 | && apk add php${PHP_VERSION}-fpm php${PHP_VERSION}-cli php${PHP_VERSION}-gd \ 11 | && mkdir -p /run/php/ \ 12 | && sed -i 's/^listen = .*/listen = 0.0.0.0:9000/' /etc/php${PHP_VERSION}/php-fpm.d/www.conf 13 | 14 | EXPOSE 9000 15 | 16 | CMD ["/usr/sbin/php-fpm7", "-F"] 17 | -------------------------------------------------------------------------------- /hw17/pic/pic01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw17/pic/pic01.png -------------------------------------------------------------------------------- /hw18/network.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Restart local network 4 | # 5 | 6 | - name: Restart network service 7 | hosts: all 8 | become: yes 9 | tasks: 10 | - name: Restart network 11 | service: 12 | name: network 13 | state: restarted 14 | sleep: 3 15 | ... 16 | -------------------------------------------------------------------------------- /hw18/pic/pic01.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw18/pic/pic01.gif -------------------------------------------------------------------------------- /hw18/pic/pic02.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw18/pic/pic02.gif -------------------------------------------------------------------------------- /hw19/etc/daemons: -------------------------------------------------------------------------------- 1 | zebra=yes 2 | ospfd=yes 3 | bgpd=no 4 | ospf6d=no 5 | ripd=no 6 | ripngd=no -------------------------------------------------------------------------------- /hw19/etc/hosts: -------------------------------------------------------------------------------- 1 | 192.168.10.1 router1 router1.area0 router1-eth11 2 | 192.168.10.10 router1-eth13 3 | 10.10.1.1 router1-eth2 4 | 192.168.10.2 router2 router2.area0 router2-eth11 5 | 192.168.10.5 router2-eth12 6 | 10.10.2.1 router2-eth2 7 | 192.168.10.6 router3 router3.area0 router3-eth12 8 | 192.168.10.9 router3-eth13 9 | 10.10.3.1 router3-eth2 10 | 10.10.1.2 client1.area1 11 | 10.10.3.2 client1.area3 12 | 172.16.10.100 router1 13 | 172.16.10.101 router2 14 | 172.16.10.102 router3 15 | 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 16 | ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 -------------------------------------------------------------------------------- /hw19/network.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Restart local network 4 | # 5 | 6 | - name: Restart network service 7 | hosts: all 8 | become: yes 9 | tasks: 10 | - name: Restart network 11 | service: 12 | name: network 13 | state: restarted 14 | sleep: 3 15 | ... 16 | -------------------------------------------------------------------------------- /hw19/pic/pic01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw19/pic/pic01.png -------------------------------------------------------------------------------- /hw19/pic/pic02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw19/pic/pic02.png -------------------------------------------------------------------------------- /hw19/pic/pic03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw19/pic/pic03.png -------------------------------------------------------------------------------- /hw19/router1/ospfd.conf: -------------------------------------------------------------------------------- 1 | ! 2 | ! Zebra configuration saved from vty 3 | ! 2019/01/21 11:38:03 4 | ! 5 | hostname Router1 6 | log file /var/log/quagga/ospfd.log 7 | ! 8 | ! 9 | ! 10 | interface 1.3 11 | ! 12 | interface eth0 13 | ! 14 | interface eth1 15 | ! 16 | interface eth1.1 17 | ! 18 | interface eth1.3 19 | ! 20 | interface eth2 21 | ip ospf cost 1 22 | ! 23 | interface lo 24 | ip ospf cost 1000 25 | ! 26 | router ospf 27 | ospf router-id 172.16.10.100 28 | log-adjacency-changes 29 | redistribute connected 30 | network 10.10.1.0/24 area 0.0.0.1 31 | network 192.168.10.0/30 area 0.0.0.0 32 | network 192.168.10.8/30 area 0.0.0.0 33 | ! 34 | line vty 35 | ! -------------------------------------------------------------------------------- /hw19/router1/zebra.conf: -------------------------------------------------------------------------------- 1 | ! 2 | ! Zebra configuration saved from vty 3 | ! 2019/01/21 11:38:03 4 | ! 5 | hostname Router1 6 | log file /var/log/quagga/zebra.log 7 | ! 8 | interface 1.3 9 | ipv6 nd suppress-ra 10 | ! 11 | interface eth0 12 | ipv6 nd suppress-ra 13 | ! 14 | interface eth1 15 | ipv6 nd suppress-ra 16 | ! 17 | interface eth1.1 18 | ip address 192.168.10.1/30 19 | ipv6 nd suppress-ra 20 | ! 21 | interface eth1.3 22 | ip address 192.168.10.10/30 23 | ipv6 nd suppress-ra 24 | ! 25 | interface eth2 26 | ip address 10.10.1.1/24 27 | ipv6 nd suppress-ra 28 | ! 29 | interface lo 30 | ip address 172.16.10.100/32 31 | ! 32 | router-id 172.16.10.100 33 | ip forwarding 34 | ! 35 | ! 36 | line vty 37 | ! -------------------------------------------------------------------------------- /hw19/router2/ospfd.conf: -------------------------------------------------------------------------------- 1 | ! 2 | ! Zebra configuration saved from vty 3 | ! 2019/01/21 11:37:17 4 | ! 5 | hostname Router2 6 | log file /var/log/quagga/ospfd.log 7 | ! 8 | ! 9 | ! 10 | interface eth0 11 | ! 12 | interface eth1 13 | ! 14 | interface eth1.1 15 | ip ospf mtu-ignore 16 | ! 17 | interface eth1.2 18 | ip ospf mtu-ignore 19 | ! 20 | interface eth2 21 | ip ospf cost 1 22 | ! 23 | interface lo 24 | ip ospf cost 1000 25 | ! 26 | router ospf 27 | ospf router-id 172.16.10.101 28 | log-adjacency-changes 29 | redistribute connected 30 | network 10.10.2.0/24 area 0.0.0.2 31 | network 192.168.10.0/30 area 0.0.0.0 32 | network 192.168.10.4/30 area 0.0.0.0 33 | ! 34 | line vty 35 | ! -------------------------------------------------------------------------------- /hw19/router2/zebra.conf: -------------------------------------------------------------------------------- 1 | ! 2 | ! Zebra configuration saved from vty 3 | ! 2019/01/21 11:37:17 4 | ! 5 | hostname Router2 6 | log file /var/log/quagga/zebra.log 7 | ! 8 | interface eth0 9 | ipv6 nd suppress-ra 10 | ! 11 | interface eth1 12 | ipv6 nd suppress-ra 13 | ! 14 | interface eth1.1 15 | ip address 192.168.10.2/30 16 | ipv6 nd suppress-ra 17 | ! 18 | interface eth1.2 19 | ip address 192.168.10.5/30 20 | ipv6 nd suppress-ra 21 | ! 22 | interface eth2 23 | ip address 10.10.2.1/24 24 | ipv6 nd suppress-ra 25 | ! 26 | interface lo 27 | ip address 172.16.10.101/32 28 | ! 29 | router-id 172.16.10.101 30 | ip forwarding 31 | ! 32 | ! 33 | line vty 34 | ! -------------------------------------------------------------------------------- /hw19/router3/ospfd.conf: -------------------------------------------------------------------------------- 1 | ! 2 | ! Zebra configuration saved from vty 3 | ! 2019/01/21 13:28:32 4 | ! 5 | hostname Router3 6 | log file /var/log/quagga/ospfd.log 7 | ! 8 | ! 9 | ! 10 | interface eth0 11 | ! 12 | interface eth1 13 | ! 14 | interface eth1.2 15 | ! 16 | interface eth1.3 17 | ! 18 | interface eth2 19 | ip ospf cost 1 20 | ! 21 | interface lo 22 | ip ospf cost 1000 23 | ! 24 | router ospf 25 | ospf router-id 172.16.10.102 26 | log-adjacency-changes 27 | redistribute connected 28 | network 10.10.3.0/24 area 0.0.0.3 29 | network 192.168.10.4/30 area 0.0.0.0 30 | network 192.168.10.8/30 area 0.0.0.0 31 | ! 32 | line vty 33 | ! -------------------------------------------------------------------------------- /hw19/router3/zebra.conf: -------------------------------------------------------------------------------- 1 | ! 2 | ! Zebra configuration saved from vty 3 | ! 2019/01/21 13:28:32 4 | ! 5 | hostname Router3 6 | log file /var/log/quagga/zebra.log 7 | ! 8 | interface eth0 9 | ipv6 nd suppress-ra 10 | ! 11 | interface eth1 12 | ipv6 nd suppress-ra 13 | ! 14 | interface eth1.2 15 | ip address 192.168.10.6/30 16 | ipv6 nd suppress-ra 17 | ! 18 | interface eth1.3 19 | ip address 192.168.10.9/30 20 | ipv6 nd suppress-ra 21 | ! 22 | interface eth2 23 | ip address 10.10.3.1/24 24 | ipv6 nd suppress-ra 25 | ! 26 | interface lo 27 | ip address 172.16.10.102/32 28 | ! 29 | router-id 172.16.10.102 30 | ip forwarding 31 | ! 32 | ! 33 | line vty 34 | ! -------------------------------------------------------------------------------- /hw20/ca/5in1: -------------------------------------------------------------------------------- 1 | c_conf=$(cat client.tmpl) 2 | c_cert=$(cat pki/issued/client.crt) 3 | c_key=$(cat pki/private/client.key) 4 | ca=$(cat pki/ca.crt) 5 | ta=$(cat static.key) 6 | 7 | 8 | cat < client.conf 9 | $c_conf 10 | 11 | EOF 12 | 13 | cat <> client.conf 14 | 15 | $ca 16 | 17 | EOF 18 | 19 | cat <> client.conf 20 | 21 | $ta 22 | 23 | EOF 24 | 25 | cat <> client.conf 26 | 27 | $c_cert 28 | 29 | EOF 30 | 31 | cat <> client.conf 32 | 33 | $c_key 34 | 35 | 36 | EOF -------------------------------------------------------------------------------- /hw20/ca/client.tmpl: -------------------------------------------------------------------------------- 1 | tls-client 2 | proto udp 3 | dev tun 4 | topology subnet 5 | 6 | remote 172.16.10.101 7 | port 1194 8 | cd /etc/openvpn 9 | pull 10 | 11 | tls-client 12 | tls-auth ta.key 1 13 | keepalive 10 120 14 | comp-lzo 15 | persist-key 16 | persist-tun 17 | ping-timer-rem 18 | cipher AES-256-CBC 19 | 20 | user nobody 21 | group nobody 22 | 23 | status /var/run/openvpn-status.log 24 | log-append /var/log/openvpn/openvpn.log 25 | verb 3 26 | -------------------------------------------------------------------------------- /hw20/ca/vars: -------------------------------------------------------------------------------- 1 | set_var EASYRSA_REQ_COUNTRY "RU" 2 | set_var EASYRSA_REQ_PROVINCE "Msk" 3 | set_var EASYRSA_REQ_CITY "Msk" 4 | set_var EASYRSA_REQ_ORG "HomeworkLocal" 5 | set_var EASYRSA_REQ_EMAIL "adm@homework.local" 6 | set_var EASYRSA_REQ_OU "Servers" 7 | set_var EASYRSA_REQ_CN "HWL" 8 | set_var EASYRSA_KEY_SIZE 2048 9 | set_var EASYRSA_BATCH "Yes" -------------------------------------------------------------------------------- /hw20/etc/daemons: -------------------------------------------------------------------------------- 1 | zebra=yes 2 | ospfd=yes 3 | bgpd=no 4 | ospf6d=no 5 | ripd=no 6 | ripngd=no -------------------------------------------------------------------------------- /hw20/etc/hosts: -------------------------------------------------------------------------------- 1 | 192.168.10.1 router1 router1.area0 router1-eth11 2 | 192.168.10.10 router1-eth13 3 | 10.10.1.1 router1-eth2 4 | 192.168.10.2 router2 router2.area0 router2-eth11 5 | 192.168.10.5 router2-eth12 6 | 10.10.2.1 router2-eth2 7 | 192.168.10.6 router3 router3.area0 router3-eth12 8 | 192.168.10.9 router3-eth13 9 | 10.10.3.1 router3-eth2 10 | 10.10.1.2 client1.area1 11 | 10.10.3.2 client1.area3 12 | 172.16.10.100 router1 13 | 172.16.10.101 router2 14 | 172.16.10.102 router3 15 | 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 16 | ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 17 | -------------------------------------------------------------------------------- /hw20/fetch.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Transfer OpenVPN cert from server to client 3 | hosts: all 4 | become: yes 5 | vars: 6 | temporary_local_path: /tmp 7 | project_local_path: /home/kakoka/git/otus-homework/hw20/ca 8 | tasks: 9 | - name: make temp dir 10 | file: 11 | path: "{{ temporary_local_path }}" 12 | state: directory 13 | delegate_to: 127.0.0.1 14 | - fetch: 15 | become: yes 16 | src: /etc/openvpn/server/client.conf 17 | dest: "{{ temporary_local_path }}/client.conf" 18 | flat: yes 19 | - name: client.conf file copy to project dir 20 | local_action: command cp "{{ temporary_local_path }}/client.conf" "{{ project_local_path }}" 21 | ... 22 | -------------------------------------------------------------------------------- /hw20/pic/pic01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw20/pic/pic01.png -------------------------------------------------------------------------------- /hw20/pic/pic02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw20/pic/pic02.png -------------------------------------------------------------------------------- /hw20/pic/pic03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw20/pic/pic03.png -------------------------------------------------------------------------------- /hw20/router1/ospfd.conf: -------------------------------------------------------------------------------- 1 | ! 2 | ! Zebra configuration saved from vty 3 | ! 2019/01/21 11:38:03 4 | ! 5 | hostname Router1 6 | log file /var/log/quagga/ospfd.log 7 | ! 8 | ! 9 | ! 10 | interface eth0 11 | ! 12 | interface eth1 13 | ! 14 | interface eth1.1 15 | ! 16 | interface eth1.3 17 | ! 18 | interface eth2 19 | ip ospf cost 1 20 | ! 21 | interface lo 22 | ip ospf cost 1000 23 | ! 24 | router ospf 25 | ospf router-id 172.16.10.100 26 | log-adjacency-changes 27 | redistribute connected 28 | network 10.10.1.0/24 area 0.0.0.1 29 | network 192.168.10.0/30 area 0.0.0.0 30 | network 192.168.10.8/30 area 0.0.0.0 31 | ! 32 | line vty 33 | ! -------------------------------------------------------------------------------- /hw20/router1/zebra.conf: -------------------------------------------------------------------------------- 1 | ! 2 | ! Zebra configuration saved from vty 3 | ! 2019/01/21 11:38:03 4 | ! 5 | hostname Router1 6 | log file /var/log/quagga/zebra.log 7 | ! 8 | interface eth0 9 | ipv6 nd suppress-ra 10 | ! 11 | interface eth1 12 | ipv6 nd suppress-ra 13 | ! 14 | interface eth1.1 15 | ip address 192.168.10.1/30 16 | ipv6 nd suppress-ra 17 | ! 18 | interface eth1.3 19 | ip address 192.168.10.10/30 20 | ipv6 nd suppress-ra 21 | ! 22 | interface eth2 23 | ip address 10.10.1.1/24 24 | ipv6 nd suppress-ra 25 | ! 26 | interface lo 27 | ip address 172.16.10.100/32 28 | ! 29 | router-id 172.16.10.100 30 | ip forwarding 31 | ! 32 | ! 33 | line vty 34 | ! -------------------------------------------------------------------------------- /hw20/router2/ospfd.conf: -------------------------------------------------------------------------------- 1 | ! 2 | ! Zebra configuration saved from vty 3 | ! 2019/01/21 11:37:17 4 | ! 5 | hostname Router2 6 | log file /var/log/quagga/ospfd.log 7 | ! 8 | ! 9 | ! 10 | interface eth0 11 | ! 12 | interface eth1 13 | ! 14 | interface eth1.1 15 | ! 16 | interface eth1.2 17 | ! 18 | interface eth2 19 | ip ospf cost 1 20 | ! 21 | interface tun0 22 | ip ospf cost 1 23 | ! 24 | interface lo 25 | ip ospf cost 1000 26 | ! 27 | router ospf 28 | ospf router-id 172.16.10.101 29 | log-adjacency-changes 30 | redistribute connected 31 | network 10.10.8.0/24 area 0.0.0.4 32 | network 10.10.2.0/24 area 0.0.0.2 33 | network 192.168.10.0/30 area 0.0.0.0 34 | network 192.168.10.4/30 area 0.0.0.0 35 | ! 36 | line vty 37 | ! 38 | -------------------------------------------------------------------------------- /hw20/router2/zebra.conf: -------------------------------------------------------------------------------- 1 | ! 2 | ! Zebra configuration saved from vty 3 | ! 2019/01/21 11:37:17 4 | ! 5 | hostname Router2 6 | log file /var/log/quagga/zebra.log 7 | ! 8 | interface eth0 9 | ipv6 nd suppress-ra 10 | ! 11 | interface eth1 12 | ipv6 nd suppress-ra 13 | ! 14 | interface eth1.1 15 | ip address 192.168.10.2/30 16 | ipv6 nd suppress-ra 17 | ! 18 | interface eth1.2 19 | ip address 192.168.10.5/30 20 | ipv6 nd suppress-ra 21 | ! 22 | interface eth2 23 | ip address 10.10.2.1/24 24 | ipv6 nd suppress-ra 25 | ! 26 | interface tun0 27 | ip address 10.10.8.1/24 28 | ipv6 nd suppress-ra 29 | ! 30 | interface lo 31 | ip address 172.16.10.101/32 32 | ! 33 | router-id 172.16.10.101 34 | ip forwarding 35 | ! 36 | ! 37 | line vty 38 | ! 39 | -------------------------------------------------------------------------------- /hw20/router3/ospfd.conf: -------------------------------------------------------------------------------- 1 | ! 2 | ! Zebra configuration saved from vty 3 | ! 2019/01/21 13:28:32 4 | ! 5 | hostname Router3 6 | log file /var/log/quagga/ospfd.log 7 | ! 8 | ! 9 | ! 10 | interface eth0 11 | ! 12 | interface eth1 13 | ! 14 | interface eth1.2 15 | ! 16 | interface eth1.3 17 | ! 18 | interface eth2 19 | ip ospf cost 1 20 | ! 21 | interface lo 22 | ip ospf cost 1000 23 | ! 24 | router ospf 25 | ospf router-id 172.16.10.102 26 | log-adjacency-changes 27 | redistribute connected 28 | network 10.10.3.0/24 area 0.0.0.3 29 | network 192.168.10.4/30 area 0.0.0.0 30 | network 192.168.10.8/30 area 0.0.0.0 31 | ! 32 | line vty 33 | ! -------------------------------------------------------------------------------- /hw20/router3/zebra.conf: -------------------------------------------------------------------------------- 1 | ! 2 | ! Zebra configuration saved from vty 3 | ! 2019/01/21 13:28:32 4 | ! 5 | hostname Router3 6 | log file /var/log/quagga/zebra.log 7 | ! 8 | interface eth0 9 | ipv6 nd suppress-ra 10 | ! 11 | interface eth1 12 | ipv6 nd suppress-ra 13 | ! 14 | interface eth1.2 15 | ip address 192.168.10.6/30 16 | ipv6 nd suppress-ra 17 | ! 18 | interface eth1.3 19 | ip address 192.168.10.9/30 20 | ipv6 nd suppress-ra 21 | ! 22 | interface eth2 23 | ip address 10.10.3.1/24 24 | ipv6 nd suppress-ra 25 | ! 26 | interface lo 27 | ip address 172.16.10.102/32 28 | ! 29 | router-id 172.16.10.102 30 | ip forwarding 31 | ! 32 | ! 33 | line vty 34 | ! -------------------------------------------------------------------------------- /hw20/secret/static.key: -------------------------------------------------------------------------------- 1 | # 2 | # 2048 bit OpenVPN static key 3 | # 4 | -----BEGIN OpenVPN Static key V1----- 5 | 9852d621fc1246b5d5e88ad31c6fa8e8 6 | a326927906d331f8a29c816f032e4b8d 7 | 9f0d752148f9f1b83f2faa74a929dbaf 8 | c1eb03947838ec32db0784ffa271b829 9 | 8a37e6a00c447330f92f4778da367b25 10 | 2486a46b95d22b8022f326eded908637 11 | e0361b3dad0e237f8fa48d2e02fe2942 12 | 4b5d5be177507be797d4e27a5925ef88 13 | e26e255beb2f126a36fc19e600980f0b 14 | b33c96cef6f7b676eff4d223c892e3ab 15 | 6f06661dad098f0324379a47f0b7e8b0 16 | a39ca932cee43b67a3e6a288a91a0058 17 | bd6a574af83710400efd26825eac3189 18 | f87089e427ff5969fb807fab47f15538 19 | abe7f8f2e3ab7a1dbf23ada3388404e9 20 | 2a579150e4f40001fd0430e31265668a 21 | -----END OpenVPN Static key V1----- 22 | -------------------------------------------------------------------------------- /hw20/tap/client.conf: -------------------------------------------------------------------------------- 1 | dev tap 2 | remote client1.area1 3 | ifconfig 10.10.10.2 255.255.255.0 4 | topology subnet 5 | secret static.key 6 | status /var/run/openvpn-status.log 7 | log /var/log/openvpn.log 8 | verb 3 9 | -------------------------------------------------------------------------------- /hw20/tap/server.conf: -------------------------------------------------------------------------------- 1 | dev tap 2 | ifconfig 10.10.10.1 255.255.255.0 3 | topology subnet 4 | secret static.key 5 | status /var/run/openvpn-status.log 6 | log /var/log/openvpn.log 7 | verb 3 8 | -------------------------------------------------------------------------------- /hw20/tun/client.conf: -------------------------------------------------------------------------------- 1 | dev tun 2 | remote client1.area1 3 | ifconfig 10.10.10.2 10.10.10.1 4 | secret static.key 5 | -------------------------------------------------------------------------------- /hw20/tun/server.conf: -------------------------------------------------------------------------------- 1 | dev tun 2 | ifconfig 10.10.10.1 10.10.10.2 3 | secret static.key -------------------------------------------------------------------------------- /hw21/provisioning/client-motd: -------------------------------------------------------------------------------- 1 | ### Welcome to the DNS lab! ### 2 | 3 | - Use this client to test the enviroment, with dig or nslookup. 4 | dig @192.168.50.10 ns01.dns.lab 5 | dig @192.168.50.11 -x 192.168.50.10 6 | 7 | - nsupdate is available in the ddns.lab zone. Ex: 8 | nsupdate -k /etc/named.zonetransfer.key 9 | server 192.168.50.10 10 | zone ddns.lab 11 | update add www.ddns.lab. 60 A 192.168.50.15 12 | send 13 | 14 | - rndc is also available to manage the servers 15 | rndc -c ~/rndc.conf reload 16 | 17 | Enjoy! 18 | -------------------------------------------------------------------------------- /hw21/provisioning/client-resolv.conf: -------------------------------------------------------------------------------- 1 | domain dns.lab 2 | search dns.lab 3 | nameserver 192.168.50.10 4 | nameserver 192.168.50.11 5 | -------------------------------------------------------------------------------- /hw21/provisioning/named.client.dns.lab: -------------------------------------------------------------------------------- 1 | $TTL 3600 2 | $ORIGIN dns.lab. 3 | @ IN SOA ns01.dns.lab. root.dns.lab. ( 4 | 2901201901 ; serial 5 | 3600 ; refresh (1 hour) 6 | 600 ; retry (10 minutes) 7 | 86400 ; expire (1 day) 8 | 600 ; minimum (10 minutes) 9 | ) 10 | 11 | IN NS ns01.dns.lab. 12 | IN NS ns02.dns.lab. 13 | 14 | ; DNS Servers 15 | ns01 IN A 192.168.50.10 16 | ns02 IN A 192.168.50.11 17 | web1 IN A 192.168.50.15 18 | -------------------------------------------------------------------------------- /hw21/provisioning/named.client.dns.lab.rev: -------------------------------------------------------------------------------- 1 | $TTL 3600 2 | $ORIGIN 50.168.192.in-addr.arpa. 3 | 50.168.192.in-addr.arpa. IN SOA ns01.dns.lab. root.dns.lab. ( 4 | 2901201901 ; serial 5 | 3600 ; refresh (1 hour) 6 | 600 ; retry (10 minutes) 7 | 86400 ; expire (1 day) 8 | 600 ; minimum (10 minutes) 9 | ) 10 | 11 | IN NS ns01.dns.lab. 12 | IN NS ns02.dns.lab. 13 | 14 | ; DNS Servers 15 | 10 IN PTR ns01.dns.lab. 16 | 11 IN PTR ns02.dns.lab. 17 | 15 IN PTR web1.dns.lab. 18 | -------------------------------------------------------------------------------- /hw21/provisioning/named.ddns.lab: -------------------------------------------------------------------------------- 1 | $TTL 3600 2 | $ORIGIN ddns.lab. 3 | @ IN SOA ns01.dns.lab. root.dns.lab. ( 4 | 2711201407 ; serial 5 | 3600 ; refresh (1 hour) 6 | 600 ; retry (10 minutes) 7 | 86400 ; expire (1 day) 8 | 600 ; minimum (10 minutes) 9 | ) 10 | 11 | IN NS ns01.dns.lab. 12 | IN NS ns02.dns.lab. 13 | 14 | ; DNS Servers 15 | ns01 IN A 192.168.50.10 16 | ns02 IN A 192.168.50.11 17 | -------------------------------------------------------------------------------- /hw21/provisioning/named.dns.lab: -------------------------------------------------------------------------------- 1 | $TTL 3600 2 | $ORIGIN dns.lab. 3 | @ IN SOA ns01.dns.lab. root.dns.lab. ( 4 | 2711201408 ; serial 5 | 3600 ; refresh (1 hour) 6 | 600 ; retry (10 minutes) 7 | 86400 ; expire (1 day) 8 | 600 ; minimum (10 minutes) 9 | ) 10 | 11 | IN NS ns01.dns.lab. 12 | IN NS ns02.dns.lab. 13 | 14 | ; DNS Servers 15 | ns01 IN A 192.168.50.10 16 | ns02 IN A 192.168.50.11 17 | web1 IN A 192.168.50.15 18 | web2 IN A 192.168.50.16 19 | -------------------------------------------------------------------------------- /hw21/provisioning/named.zonetransfer.key: -------------------------------------------------------------------------------- 1 | key "zonetransfer.key" { 2 | algorithm hmac-md5; 3 | secret "SB4Db9pJomyKxTNynlAq/g=="; 4 | }; 5 | -------------------------------------------------------------------------------- /hw21/provisioning/rndc.conf: -------------------------------------------------------------------------------- 1 | key "rndc-key" { 2 | algorithm hmac-md5; 3 | secret "GrtiE9kz16GK+OKKU/qJvQ=="; 4 | }; 5 | 6 | options { 7 | default-key "rndc-key"; 8 | default-server 192.168.50.10; 9 | }; 10 | -------------------------------------------------------------------------------- /hw21/provisioning/servers-resolv.conf: -------------------------------------------------------------------------------- 1 | domain dns.lab 2 | search dns.lab 3 | nameserver 127.0.0.1 4 | -------------------------------------------------------------------------------- /hw21/provisioning/zonetransfer.key: -------------------------------------------------------------------------------- 1 | key "zonetransfer.key" { 2 | algorithm hmac-md5; 3 | secret "SB4Db9pJomyKxTNynlAq/g=="; 4 | }; 5 | -------------------------------------------------------------------------------- /hw22/pic/pic01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw22/pic/pic01.png -------------------------------------------------------------------------------- /hw22/pic/pic02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw22/pic/pic02.png -------------------------------------------------------------------------------- /hw22/pic/pic03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw22/pic/pic03.png -------------------------------------------------------------------------------- /hw23/default.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name localhost; 4 | index index.html index.htm; 5 | root /usr/share/nginx/html; 6 | access_log /var/log/nginx/host.access.log main; 7 | 8 | location / { 9 | if ($cookie_id != "123") { 10 | return 302 $scheme://$server_addr/get_cookie; 11 | } 12 | } 13 | location /get_cookie { 14 | add_header Set-Cookie "id=123"; 15 | return 302 $scheme://$server_addr$request_uri; 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /hw23/pic/pic01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw23/pic/pic01.png -------------------------------------------------------------------------------- /hw23/pic/pic02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw23/pic/pic02.png -------------------------------------------------------------------------------- /hw24/Docker/.env: -------------------------------------------------------------------------------- 1 | MYSQL_ROOT_PASSWORD=swimming3 2 | MYSQL_ROOT_HOST=% -------------------------------------------------------------------------------- /hw24/Docker/js/cluster-setup.js: -------------------------------------------------------------------------------- 1 | dbPass = "swimming3"; 2 | try { 3 | 4 | shell.connect('root@node01:3306', dbPass); 5 | cluster = dba.createCluster("otuscluster"); 6 | cluster.addInstance({user: "root", host: "node02", password: dbPass}); 7 | cluster.addInstance({user: "root", host: "node03", password: dbPass}); 8 | 9 | } catch(e) { 10 | print('\nThe InnoDB cluster could not be created.\n\nError: ' + e.message + '\n'); 11 | } -------------------------------------------------------------------------------- /hw24/Docker/router.env: -------------------------------------------------------------------------------- 1 | MYSQL_USER=root 2 | MYSQL_PASSWORD=swimming3 3 | MYSQL_HOST=node01 4 | MYSQL_PORT=3306 5 | MYSQL_INNODB_NUM_MEMBERS=3 -------------------------------------------------------------------------------- /hw24/Swarm/playbook.retry: -------------------------------------------------------------------------------- 1 | master 2 | -------------------------------------------------------------------------------- /hw24/Swarm/worker_token.yml: -------------------------------------------------------------------------------- 1 | worker_token: 'SWMTKN-1-1klyiccq3b7gvoz6fm93m1ookizaaebjq7rqshmoe8q1y45xd6-ebzup45tgx5ymwb8t0qn6wedp' -------------------------------------------------------------------------------- /hw24/pic/pic01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw24/pic/pic01.png -------------------------------------------------------------------------------- /hw24/pic/pic02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw24/pic/pic02.png -------------------------------------------------------------------------------- /hw24/pic/pic03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw24/pic/pic03.png -------------------------------------------------------------------------------- /hw24/pic/pic04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw24/pic/pic04.png -------------------------------------------------------------------------------- /hw24/pic/pic05.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw24/pic/pic05.png -------------------------------------------------------------------------------- /hw24/provisioning/etc/hosts: -------------------------------------------------------------------------------- 1 | 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 2 | ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 -------------------------------------------------------------------------------- /hw24/provisioning/etc/named.otus.test: -------------------------------------------------------------------------------- 1 | $TTL 3600 2 | $ORIGIN otus.test. 3 | @ IN SOA ns.otus.test. root.otus.test. ( 4 | 1502201901 ; serial 5 | 3600 ; refresh (1 hour) 6 | 600 ; retry (10 minutes) 7 | 86400 ; expire (1 day) 8 | 600 ; minimum (10 minutes) 9 | ) 10 | IN NS ns.otus.test. 11 | ; DNS Servers 12 | ns IN A 192.168.50.10 13 | ; Hosts 14 | router IN A 192.168.50.100 15 | node01 IN A 192.168.50.101 16 | node02 IN A 192.168.50.102 17 | node03 IN A 192.168.50.103 -------------------------------------------------------------------------------- /hw24/provisioning/etc/named.zonetransfer.key: -------------------------------------------------------------------------------- 1 | key "zonetransfer.key" { 2 | algorithm hmac-md5; 3 | secret "SB4Db9pJomyKxTNynlAq/g=="; 4 | }; 5 | -------------------------------------------------------------------------------- /hw24/provisioning/etc/resolv.conf: -------------------------------------------------------------------------------- 1 | nameserver 192.168.50.10 2 | domain otus.test -------------------------------------------------------------------------------- /hw24/provisioning/mysql-server/.my.cnf: -------------------------------------------------------------------------------- 1 | [client] 2 | user=root 3 | password=playable8-Congenial0 -------------------------------------------------------------------------------- /hw24/provisioning/mysql-server/mysqld.te: -------------------------------------------------------------------------------- 1 | 2 | module mysqld 1.0; 3 | 4 | require { 5 | type ephemeral_port_t; 6 | type mysqld_t; 7 | class tcp_socket name_connect; 8 | } 9 | 10 | #============= mysqld_t ============== 11 | 12 | #!!!! This avc can be allowed using one of the these booleans: 13 | # nis_enabled, mysql_connect_any 14 | allow mysqld_t ephemeral_port_t:tcp_socket name_connect; 15 | -------------------------------------------------------------------------------- /hw24/provisioning/playbook.retry: -------------------------------------------------------------------------------- 1 | router 2 | -------------------------------------------------------------------------------- /hw24/provisioning/selinux.yml: -------------------------------------------------------------------------------- 1 | - name: Copy SELinux type enforcement file 2 | copy: 3 | src: mysql-server/mysqld.te 4 | dest: /tmp 5 | - name: Compile SELinux module file 6 | command: checkmodule -M -m -o /tmp/mysqld.mod /tmp/mysqld.te 7 | - name: Build SELinux policy package 8 | command: semodule_package -o /tmp/mysqld.pp -m /tmp/mysqld.mod 9 | - name: Load SELinux policy package 10 | command: semodule -i /tmp/mysqld.pp 11 | - name: Remove temporary files 12 | file: 13 | path: /tmp/mysqld.* 14 | state: absent -------------------------------------------------------------------------------- /hw24/provisioning/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | mysql_root_password: 'playable8-Congenial0' 3 | mysql_cluster_admin_user: 'cadmin' 4 | mysql_cluster_admin_password: 'convoy-Punk0' 5 | mysql_cluster_dbuser: cuser 6 | mysql_cluster_dbuser_password: Prods8-3Upstage 7 | mysql_log_file: '/var/log/mysqld.log' 8 | mysql_nodes: 9 | - node01 10 | - node02 11 | - node03 12 | ... -------------------------------------------------------------------------------- /hw25/pic/pic01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw25/pic/pic01.png -------------------------------------------------------------------------------- /hw25/pic/pic02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw25/pic/pic02.png -------------------------------------------------------------------------------- /hw25/provisioning/etc/NetworkManager.conf: -------------------------------------------------------------------------------- 1 | [main] 2 | dns=none -------------------------------------------------------------------------------- /hw25/provisioning/etc/hosts: -------------------------------------------------------------------------------- 1 | 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 2 | ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 -------------------------------------------------------------------------------- /hw25/provisioning/etc/named.otus.test: -------------------------------------------------------------------------------- 1 | $TTL 3600 2 | $ORIGIN otus.test. 3 | @ IN SOA ns.otus.test. root.otus.test. ( 4 | 1502201901 ; serial 5 | 3600 ; refresh (1 hour) 6 | 600 ; retry (10 minutes) 7 | 86400 ; expire (1 day) 8 | 600 ; minimum (10 minutes) 9 | ) 10 | IN NS ns.otus.test. 11 | ; DNS Servers 12 | ns IN A 192.168.50.100 13 | ; Hosts 14 | master IN A 192.168.50.100 15 | slave IN A 192.168.50.101 16 | -------------------------------------------------------------------------------- /hw25/provisioning/etc/named.otus.test.rev: -------------------------------------------------------------------------------- 1 | $TTL 3600 2 | $ORIGIN 50.168.192.in-addr.arpa. 3 | 50.168.192.in-addr.arpa. IN SOA ns.otus.test. root.otus.test. ( 4 | 1502201901 ; serial 5 | 3600 ; refresh (1 hour) 6 | 600 ; retry (10 minutes) 7 | 86400 ; expire (1 day) 8 | 600 ; minimum (10 minutes) 9 | ) 10 | 11 | IN NS ns.otus.test. 12 | ; DNS Servers 13 | 100 IN PTR ns.otus.test. 14 | ; Hosts 15 | 100 IN PTR master.otus.test. 16 | 101 IN PTR slave.otus.test. 17 | -------------------------------------------------------------------------------- /hw25/provisioning/etc/named.zonetransfer.key: -------------------------------------------------------------------------------- 1 | key "zonetransfer.key" { 2 | algorithm hmac-md5; 3 | secret "SB4Db9pJomyKxTNynlAq/g=="; 4 | }; 5 | -------------------------------------------------------------------------------- /hw25/provisioning/etc/resolv.conf: -------------------------------------------------------------------------------- 1 | nameserver 192.168.50.100 2 | domain otus.test -------------------------------------------------------------------------------- /hw25/provisioning/mysql-server/.my.cnf: -------------------------------------------------------------------------------- 1 | [client] 2 | user=root 3 | password=swimming3 -------------------------------------------------------------------------------- /hw25/provisioning/mysql-server/master.my.cnf: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | 3 | binlog-checksum=crc32 4 | gtid-mode=on 5 | enforce-gtid-consistency=true 6 | log-slave-updates=true 7 | server-id = 1 8 | replicate-do-db=bet 9 | replicate-do-table=bet.bookmaker 10 | replicate-do-table=bet.competition 11 | replicate-do-table=bet.market 12 | replicate-do-table=bet.odds 13 | replicate-do-table=bet.outcome 14 | 15 | datadir=/var/lib/mysql 16 | socket=/var/lib/mysql/mysql.sock 17 | 18 | log-error=/var/log/mysqld.log 19 | pid-file=/var/run/mysqld/mysqld.pid -------------------------------------------------------------------------------- /hw25/provisioning/mysql-server/mysqld.te: -------------------------------------------------------------------------------- 1 | 2 | module mysqld 1.0; 3 | 4 | require { 5 | type ephemeral_port_t; 6 | type mysqld_t; 7 | class tcp_socket name_connect; 8 | } 9 | 10 | #============= mysqld_t ============== 11 | 12 | #!!!! This avc can be allowed using one of the these booleans: 13 | # nis_enabled, mysql_connect_any 14 | allow mysqld_t ephemeral_port_t:tcp_socket name_connect; 15 | -------------------------------------------------------------------------------- /hw25/provisioning/mysql-server/slave.my.cnf: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | 3 | binlog-checksum=crc32 4 | gtid-mode=on 5 | enforce-gtid-consistency=true 6 | log-slave-updates=true 7 | server-id = 2 8 | replicate-do-db=bet 9 | replicate-do-table=bet.bookmaker 10 | replicate-do-table=bet.competition 11 | replicate-do-table=bet.market 12 | replicate-do-table=bet.odds 13 | replicate-do-table=bet.outcome 14 | 15 | datadir=/var/lib/mysql 16 | socket=/var/lib/mysql/mysql.sock 17 | 18 | log-error=/var/log/mysqld.log 19 | pid-file=/var/run/mysqld/mysqld.pid -------------------------------------------------------------------------------- /hw25/provisioning/playbook.retry: -------------------------------------------------------------------------------- 1 | slave 2 | -------------------------------------------------------------------------------- /hw25/provisioning/selinux.yml: -------------------------------------------------------------------------------- 1 | - name: Copy SELinux type enforcement file 2 | copy: 3 | src: mysql-server/mysqld.te 4 | dest: /tmp 5 | - name: Compile SELinux module file 6 | command: checkmodule -M -m -o /tmp/mysqld.mod /tmp/mysqld.te 7 | - name: Build SELinux policy package 8 | command: semodule_package -o /tmp/mysqld.pp -m /tmp/mysqld.mod 9 | - name: Load SELinux policy package 10 | command: semodule -i /tmp/mysqld.pp 11 | - name: Remove temporary files 12 | file: 13 | path: /tmp/mysqld.* 14 | state: absent -------------------------------------------------------------------------------- /hw25/provisioning/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | mysql_root_password: 'Pool-swimming3' 3 | mysql_dbuser: user 4 | mysql_dbuser_password: 'Pool-swimming3' 5 | mysql_log_file: '/var/log/mysqld.log' 6 | ... -------------------------------------------------------------------------------- /hw26/pic/pic01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw26/pic/pic01.png -------------------------------------------------------------------------------- /hw26/pic/pic02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw26/pic/pic02.png -------------------------------------------------------------------------------- /hw26/provision/locale.conf: -------------------------------------------------------------------------------- 1 | LC_ALL=en_US.utf8 2 | LC_CTYPE=en_US.utf8 3 | LANG=en_US.utf8 -------------------------------------------------------------------------------- /hw26/provision/playbook.retry: -------------------------------------------------------------------------------- 1 | ns 2 | -------------------------------------------------------------------------------- /hw26/provision/roles/dns/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup DNS server. -------------------------------------------------------------------------------- /hw26/provision/roles/dns/files/NetworkManager.conf: -------------------------------------------------------------------------------- 1 | [main] 2 | dns=none -------------------------------------------------------------------------------- /hw26/provision/roles/dns/files/named.otus.test: -------------------------------------------------------------------------------- 1 | $TTL 3600 2 | $ORIGIN otus.test. 3 | @ IN SOA ns.otus.test. root.otus.test. ( 4 | 1502201901 ; serial 5 | 3600 ; refresh (1 hour) 6 | 600 ; retry (10 minutes) 7 | 86400 ; expire (1 day) 8 | 600 ; minimum (10 minutes) 9 | ) 10 | IN NS ns.otus.test. 11 | ; DNS Servers 12 | ns IN A 192.168.50.10 13 | ; Hosts 14 | client IN A 192.168.50.100 -------------------------------------------------------------------------------- /hw26/provision/roles/dns/files/named.otus.test.rev: -------------------------------------------------------------------------------- 1 | $TTL 3600 2 | $ORIGIN 50.168.192.in-addr.arpa. 3 | 50.168.192.in-addr.arpa. IN SOA ns.otus.test. root.otus.test. ( 4 | 1502201901 ; serial 5 | 3600 ; refresh (1 hour) 6 | 600 ; retry (10 minutes) 7 | 86400 ; expire (1 day) 8 | 600 ; minimum (10 minutes) 9 | ) 10 | 11 | IN NS ns.otus.test. 12 | ; DNS Servers 13 | 10 IN PTR ns.otus.test. 14 | ; Hosts 15 | 100 IN PTR client.otus.test. 16 | -------------------------------------------------------------------------------- /hw26/provision/roles/dns/files/named.zonetransfer.key: -------------------------------------------------------------------------------- 1 | key "zonetransfer.key" { 2 | algorithm hmac-md5; 3 | secret "SB4Db9pJomyKxTNynlAq/g=="; 4 | }; 5 | -------------------------------------------------------------------------------- /hw26/provision/roles/dns/files/resolv.conf: -------------------------------------------------------------------------------- 1 | nameserver 192.168.50.10 2 | domain otus.test -------------------------------------------------------------------------------- /hw26/provision/roles/firewall/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup Firewall. -------------------------------------------------------------------------------- /hw26/provision/roles/firewall/files/kerberos.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | Kerberos 4 | Kerberos network authentication protocol server 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /hw26/provision/roles/kerberos-client/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup Kerberos KDC. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS, NTP and KDC services required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | kadmin_user: root/admin 15 | kadmin_pass: pass 16 | kerb_user: vagrant 17 | kerb_user_pass: vagrant 18 | 19 | Example Playbook 20 | ---------------- 21 | 22 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 23 | 24 | - hosts: servers 25 | roles: 26 | - { role: kerberos-client } 27 | -------------------------------------------------------------------------------- /hw26/provision/roles/kerberos-client/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for kerberos-client 3 | kadmin_user: root/admin 4 | kadmin_pass: pass 5 | kerb_user: vagrant 6 | kerb_user_pass: vagrant 7 | ... -------------------------------------------------------------------------------- /hw26/provision/roles/kerberos/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup Kerberos KDC. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS and NTP services required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | db_pass: pass 15 | realm: OTUS.TEST 16 | kadmin_user: root/admin 17 | kadmin_pass: pass 18 | kuser_user: vagrant 19 | kuser_pass: vagrant 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: kerberos } 29 | -------------------------------------------------------------------------------- /hw26/provision/roles/kerberos/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for kerberos 3 | db_pass: pass 4 | realm: OTUS.TEST 5 | kadmin_user: root/admin 6 | kadmin_pass: pass 7 | kuser_user: vagrant 8 | kuser_pass: vagrant -------------------------------------------------------------------------------- /hw26/provision/roles/kerberos/files/kadm.acl: -------------------------------------------------------------------------------- 1 | */admin@OTUS.TEST * -------------------------------------------------------------------------------- /hw26/provision/roles/kerberos/files/kdc.conf: -------------------------------------------------------------------------------- 1 | default_realm = OTUS.TEST 2 | 3 | [kdcdefaults] 4 | v4_mode = nopreauth 5 | kdc_ports = 0 6 | 7 | [realms] 8 | OTUS.TEST = { 9 | kdc_ports = 88 10 | admin_keytab = /etc/kadm5.keytab 11 | database_name = /var/kerberos/krb5kdc/principal 12 | acl_file = /var/kerberos/krb5kdc/kadm5.acl 13 | key_stash_file = /var/kerberos/krb5kdc/stash 14 | max_life = 10h 0m 0s 15 | max_renewable_life = 7d 0h 0m 0s 16 | master_key_type = des3-hmac-sha1 17 | supported_enctypes = arcfour-hmac:normal des3-hmac-sha1:normal des-cbc-crc:normal des:normal des:v4 des:norealm des:onlyrealm des:afs3 18 | default_principal_flags = +preauth 19 | } -------------------------------------------------------------------------------- /hw26/provision/roles/nfs4-client/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup NFS client with automount service. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS, NTP, KDC and NFS services required. 10 | 11 | Example Playbook 12 | ---------------- 13 | 14 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 15 | 16 | - hosts: servers 17 | roles: 18 | - { role: nfs4-client } 19 | -------------------------------------------------------------------------------- /hw26/provision/roles/nfs4-client/files/auto.master: -------------------------------------------------------------------------------- 1 | /mnt /etc/auto.nfs --timeout=60 -------------------------------------------------------------------------------- /hw26/provision/roles/nfs4-client/files/auto.nfs: -------------------------------------------------------------------------------- 1 | upload -fstype=nfs4,rw,sec=krb5 ns.otus.test:/opt/upload -------------------------------------------------------------------------------- /hw26/provision/roles/nfs4-client/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for nfs-client 3 | - name: install nfs server packages 4 | yum: 5 | name: ['nfs-utils','autofs'] 6 | state: present 7 | 8 | - name: enable and start nfs client 9 | service: 10 | name: nfs-client.target 11 | enabled: true 12 | state: restarted 13 | 14 | - name: autofs config step 2 15 | copy: 16 | src: "{{ item }}" 17 | dest: /etc 18 | with_fileglob: 19 | - auto.nfs 20 | - auto.master 21 | 22 | - name: automount service enabled and started 23 | service: 24 | name: autofs 25 | enabled: true 26 | state: restarted 27 | 28 | # mount example via hands 29 | # - name: mount nfs share 30 | # shell: mount.nfs4 -o sec=krb5 ns.otus.test:/opt/share/nfs-test /mnt/nfs -v 31 | ... -------------------------------------------------------------------------------- /hw26/provision/roles/nfs4/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup NFS server. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS, NTP, KDC services required. 10 | 11 | Example Playbook 12 | ---------------- 13 | 14 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 15 | 16 | - hosts: servers 17 | roles: 18 | - { role: nfs4 } 19 | -------------------------------------------------------------------------------- /hw26/provision/roles/nfs4/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for nfs4 -------------------------------------------------------------------------------- /hw26/provision/roles/nfs4/files/exports: -------------------------------------------------------------------------------- 1 | /opt/upload *(rw,no_root_squash,sec=krb5) -------------------------------------------------------------------------------- /hw26/provision/roles/ntp-client/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup chronyd as ntp-client. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS, NTP services required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | ntp_timezone: Europe/Moscow 15 | 16 | Example Playbook 17 | ---------------- 18 | 19 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 20 | 21 | - hosts: servers 22 | roles: 23 | - { role: ntp-client } 24 | -------------------------------------------------------------------------------- /hw26/provision/roles/ntp-client/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for ntp-client 3 | ntp_timezone: Europe/Moscow -------------------------------------------------------------------------------- /hw26/provision/roles/ntp-client/files/chrony.conf: -------------------------------------------------------------------------------- 1 | server ns.otus.test iburst 2 | driftfile /var/lib/chrony/drift 3 | logdir /var/log/chrony 4 | log measurements statistics tracking -------------------------------------------------------------------------------- /hw26/provision/roles/ntp-client/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set timezone 3 | timezone: 4 | name: "{{ ntp_timezone }}" 5 | 6 | - name: config chrony daemon 7 | copy: 8 | src: chrony.conf 9 | dest: /etc/chrony.conf 10 | owner: root 11 | group: root 12 | mode: 0644 13 | 14 | - name: chronyd service restarted 15 | service: 16 | name: chronyd 17 | state: restarted 18 | enabled: true 19 | ... -------------------------------------------------------------------------------- /hw26/provision/roles/ntp/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup chronyd as NTP server. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS service required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | ntp_timezone: Europe/Moscow 15 | ntp_server: 0.rhel.pool.ntp.org 16 | 17 | Example Playbook 18 | ---------------- 19 | 20 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 21 | 22 | - hosts: servers 23 | roles: 24 | - { role: ntp } 25 | -------------------------------------------------------------------------------- /hw26/provision/roles/ntp/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for ntp 3 | ntp_timezone: Europe/Moscow 4 | ntp_server: 0.rhel.pool.ntp.org -------------------------------------------------------------------------------- /hw26/provision/roles/ntp/files/chrony.conf: -------------------------------------------------------------------------------- 1 | server 0.centos.pool.ntp.org iburst 2 | manual 3 | allow 192.168.0.0/16 4 | local stratum 8 -------------------------------------------------------------------------------- /hw26/provision/roles/ntp/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set timezone 3 | timezone: 4 | name: "{{ ntp_timezone }}" 5 | 6 | - name: config chrony daemon 7 | copy: 8 | src: chrony.conf 9 | dest: /etc/chrony.conf 10 | owner: root 11 | group: root 12 | mode: 0644 13 | 14 | - name: chronyd service restarted 15 | service: 16 | name: chronyd 17 | state: restarted 18 | enabled: true 19 | ... -------------------------------------------------------------------------------- /hw26/provision/roles/samba/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for samba -------------------------------------------------------------------------------- /hw26/provision/roles/samba/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for samba 3 | - name: install samba server packages 4 | yum: 5 | name: ['samba'] 6 | state: present 7 | - name: 8 | copy: 9 | src: smb.conf 10 | dest: /etc/samba/smb.conf 11 | owner: root 12 | group: root 13 | mode: 0640 14 | - name: 15 | file: 16 | path: /opt/share/smb-test 17 | state: directory 18 | owner: vagrant 19 | mode: 0755 20 | -------------------------------------------------------------------------------- /hw27/pic/pic01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw27/pic/pic01.png -------------------------------------------------------------------------------- /hw27/pic/pic02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw27/pic/pic02.png -------------------------------------------------------------------------------- /hw27/pic/pic03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw27/pic/pic03.png -------------------------------------------------------------------------------- /hw27/pic/pic04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw27/pic/pic04.png -------------------------------------------------------------------------------- /hw27/provision/locale.conf: -------------------------------------------------------------------------------- 1 | LC_ALL=en_US.utf8 2 | LC_CTYPE=en_US.utf8 3 | LANG=en_US.utf8 -------------------------------------------------------------------------------- /hw27/provision/roles/dns/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup DNS server. -------------------------------------------------------------------------------- /hw27/provision/roles/dns/files/NetworkManager.conf: -------------------------------------------------------------------------------- 1 | [main] 2 | dns=none -------------------------------------------------------------------------------- /hw27/provision/roles/dns/files/named.otus.test.rev: -------------------------------------------------------------------------------- 1 | $TTL 3600 2 | $ORIGIN 50.168.192.in-addr.arpa. 3 | 50.168.192.in-addr.arpa. IN SOA ns.otus.test. root.otus.test. ( 4 | 1502201901 ; serial 5 | 3600 ; refresh (1 hour) 6 | 600 ; retry (10 minutes) 7 | 86400 ; expire (1 day) 8 | 600 ; minimum (10 minutes) 9 | ) 10 | 11 | IN NS ns.otus.test. 12 | ; DNS Servers 13 | 10 IN PTR ns.otus.test. 14 | ; Hosts 15 | 100 IN PTR client.otus.test. 16 | -------------------------------------------------------------------------------- /hw27/provision/roles/dns/files/named.zonetransfer.key: -------------------------------------------------------------------------------- 1 | key "zonetransfer.key" { 2 | algorithm hmac-md5; 3 | secret "SB4Db9pJomyKxTNynlAq/g=="; 4 | }; 5 | -------------------------------------------------------------------------------- /hw27/provision/roles/dns/files/resolv.conf: -------------------------------------------------------------------------------- 1 | nameserver 192.168.50.10 2 | domain otus.test -------------------------------------------------------------------------------- /hw27/provision/roles/dovecot/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup dovecot. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS, NTP, postfix services required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | 15 | Example Playbook 16 | ---------------- 17 | 18 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 19 | 20 | - hosts: servers 21 | roles: 22 | - { role: dovecot } 23 | -------------------------------------------------------------------------------- /hw27/provision/roles/dovecot/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for dovecot 3 | users: 4 | - { name: vagrant, password: vagrant, uid: 1000, gid: 1000 } 5 | - { name: dovecot, password: dovecot, uid: 1001, gid: 1000 } -------------------------------------------------------------------------------- /hw27/provision/roles/dovecot/templates/users.j2: -------------------------------------------------------------------------------- 1 | {% for user in users %} 2 | {{ user.name }}:{PLAIN}{{ user.password }}:{{ user.uid }}:{{ user.gid }}::/home/{{ user.name }} 3 | {% endfor %} -------------------------------------------------------------------------------- /hw27/provision/roles/ntp-client/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup chronyd as ntp-client. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS, NTP services required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | ntp_timezone: Europe/Moscow 15 | 16 | Example Playbook 17 | ---------------- 18 | 19 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 20 | 21 | - hosts: servers 22 | roles: 23 | - { role: ntp-client } 24 | -------------------------------------------------------------------------------- /hw27/provision/roles/ntp-client/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for ntp-client 3 | ntp_timezone: Europe/Moscow -------------------------------------------------------------------------------- /hw27/provision/roles/ntp-client/files/chrony.conf: -------------------------------------------------------------------------------- 1 | server ns.otus.test iburst 2 | driftfile /var/lib/chrony/drift 3 | logdir /var/log/chrony 4 | log measurements statistics tracking -------------------------------------------------------------------------------- /hw27/provision/roles/ntp-client/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set timezone 3 | timezone: 4 | name: "{{ ntp_timezone }}" 5 | 6 | - name: config chrony daemon 7 | copy: 8 | src: chrony.conf 9 | dest: /etc/chrony.conf 10 | owner: root 11 | group: root 12 | mode: 0644 13 | 14 | - name: chronyd service restarted 15 | service: 16 | name: chronyd 17 | state: restarted 18 | enabled: true 19 | ... -------------------------------------------------------------------------------- /hw27/provision/roles/ntp/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup chronyd as NTP server. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS service required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | ntp_timezone: Europe/Moscow 15 | ntp_server: 0.rhel.pool.ntp.org 16 | 17 | Example Playbook 18 | ---------------- 19 | 20 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 21 | 22 | - hosts: servers 23 | roles: 24 | - { role: ntp } 25 | -------------------------------------------------------------------------------- /hw27/provision/roles/ntp/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for ntp 3 | ntp_timezone: Europe/Moscow 4 | ntp_server: 0.rhel.pool.ntp.org -------------------------------------------------------------------------------- /hw27/provision/roles/ntp/files/chrony.conf: -------------------------------------------------------------------------------- 1 | server 0.centos.pool.ntp.org iburst 2 | manual 3 | allow 192.168.0.0/16 4 | local stratum 8 -------------------------------------------------------------------------------- /hw27/provision/roles/ntp/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set timezone 3 | timezone: 4 | name: "{{ ntp_timezone }}" 5 | 6 | - name: config chrony daemon 7 | copy: 8 | src: chrony.conf 9 | dest: /etc/chrony.conf 10 | owner: root 11 | group: root 12 | mode: 0644 13 | 14 | - name: chronyd service restarted 15 | service: 16 | name: chronyd 17 | state: restarted 18 | enabled: true 19 | ... -------------------------------------------------------------------------------- /hw27/provision/roles/postfix/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup postfix. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS, NTP services required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | 15 | Example Playbook 16 | ---------------- 17 | 18 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 19 | 20 | - hosts: servers 21 | roles: 22 | - { role: postfix } 23 | -------------------------------------------------------------------------------- /hw27/provision/roles/postfix/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for postfix -------------------------------------------------------------------------------- /hw27/provision/roles/postfix/files/opendkim.conf: -------------------------------------------------------------------------------- 1 | Domain otus.test 2 | Socket inet:50055@localhost 3 | Syslog Yes 4 | SyslogSuccess Yes 5 | LogWhy Yes 6 | AutoRestart Yes 7 | AutoRestartRate 10/1h 8 | Umask 002 9 | Canonicalization relaxed/simple 10 | ExternalIgnoreList refile:/etc/opendkim/TrustedHosts 11 | InternalHosts refile:/etc/opendkim/TrustedHosts 12 | KeyTable refile:/etc/opendkim/KeyTable 13 | SigningTable refile:/etc/opendkim/SigningTable 14 | Mode sv 15 | PidFile /var/run/opendkim/opendkim.pid 16 | SignatureAlgorithm rsa-sha256 17 | UserID opendkim:opendkim -------------------------------------------------------------------------------- /hw27/provision/roles/postfix/files/opendkim/KeyTable: -------------------------------------------------------------------------------- 1 | ns._domainkey.otus.test otus.test:ns:/etc/opendkim/keys/otus.test.private -------------------------------------------------------------------------------- /hw27/provision/roles/postfix/files/opendkim/SigningTable: -------------------------------------------------------------------------------- 1 | *@otus.test ns._domainkey.otus.test -------------------------------------------------------------------------------- /hw27/provision/roles/postfix/files/opendkim/TrustedHosts: -------------------------------------------------------------------------------- 1 | 127.0.0.1 2 | ::1 3 | 192.168.50.0/24 4 | ns.otus.test -------------------------------------------------------------------------------- /hw28/pic/Thumbs.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw28/pic/Thumbs.db -------------------------------------------------------------------------------- /hw28/pic/pic01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw28/pic/pic01.png -------------------------------------------------------------------------------- /hw28/pic/pic02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw28/pic/pic02.png -------------------------------------------------------------------------------- /hw28/pic/pic03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw28/pic/pic03.png -------------------------------------------------------------------------------- /hw28/pic/pic04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw28/pic/pic04.png -------------------------------------------------------------------------------- /hw28/pic/pic05-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw28/pic/pic05-1.png -------------------------------------------------------------------------------- /hw28/pic/pic05-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw28/pic/pic05-2.png -------------------------------------------------------------------------------- /hw28/pic/pic06.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw28/pic/pic06.png -------------------------------------------------------------------------------- /hw28/provision/environment: -------------------------------------------------------------------------------- 1 | LANG=en_US.utf8 2 | LC_CTYPE=en_US.utf8 3 | LC_ALL=en_US.UTF-8 -------------------------------------------------------------------------------- /hw28/provision/locale.conf: -------------------------------------------------------------------------------- 1 | LC_ALL=en_US.utf8 2 | LC_CTYPE=en_US.utf8 3 | LANG=en_US.utf8 -------------------------------------------------------------------------------- /hw28/provision/playbook.retry: -------------------------------------------------------------------------------- 1 | primary 2 | -------------------------------------------------------------------------------- /hw28/provision/roles/dns/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup DNS server. -------------------------------------------------------------------------------- /hw28/provision/roles/dns/files/NetworkManager.conf: -------------------------------------------------------------------------------- 1 | [main] 2 | dns=none -------------------------------------------------------------------------------- /hw28/provision/roles/dns/files/named.otus.test: -------------------------------------------------------------------------------- 1 | $TTL 3600 2 | $ORIGIN otus.test. 3 | @ IN SOA primary.otus.test. root.otus.test. ( 4 | 1502201901 ; serial 5 | 3600 ; refresh (1 hour) 6 | 600 ; retry (10 minutes) 7 | 86400 ; expire (1 day) 8 | 600 ; minimum (10 minutes) 9 | ) 10 | IN NS primary.otus.test. 11 | ; DNS Servers 12 | primary IN A 192.168.50.100 13 | ; Hosts 14 | standby IN A 192.168.50.101 -------------------------------------------------------------------------------- /hw28/provision/roles/dns/files/named.otus.test.rev: -------------------------------------------------------------------------------- 1 | $TTL 3600 2 | $ORIGIN 50.168.192.in-addr.arpa. 3 | 50.168.192.in-addr.arpa. IN SOA primary.otus.test. root.otus.test. ( 4 | 1502201901 ; serial 5 | 3600 ; refresh (1 hour) 6 | 600 ; retry (10 minutes) 7 | 86400 ; expire (1 day) 8 | 600 ; minimum (10 minutes) 9 | ) 10 | 11 | IN NS primary.otus.test. 12 | ; DNS Servers 13 | ; Hosts 14 | 100 IN PTR primary.otus.test. 15 | 101 IN PTR standby.otus.test. 16 | 17 | -------------------------------------------------------------------------------- /hw28/provision/roles/dns/files/named.zonetransfer.key: -------------------------------------------------------------------------------- 1 | key "zonetransfer.key" { 2 | algorithm hmac-md5; 3 | secret "SB4Db9pJomyKxTNynlAq/g=="; 4 | }; 5 | -------------------------------------------------------------------------------- /hw28/provision/roles/dns/files/resolv.conf: -------------------------------------------------------------------------------- 1 | nameserver 192.168.50.100 2 | domain otus.test -------------------------------------------------------------------------------- /hw28/provision/roles/kerberos-client/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup Kerberos KDC. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS, NTP and KDC services required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | kadmin_user: root/admin 15 | kadmin_pass: pass 16 | kerb_user: vagrant 17 | kerb_user_pass: vagrant 18 | 19 | Example Playbook 20 | ---------------- 21 | 22 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 23 | 24 | - hosts: servers 25 | roles: 26 | - { role: kerberos-client } 27 | -------------------------------------------------------------------------------- /hw28/provision/roles/kerberos-client/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for kerberos-client 3 | kadmin_user: root/admin 4 | kadmin_pass: pass 5 | kerb_user: vagrant 6 | kerb_user_pass: vagrant 7 | ... -------------------------------------------------------------------------------- /hw28/provision/roles/kerberos/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup Kerberos KDC. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS and NTP services required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | db_pass: pass 15 | realm: OTUS.TEST 16 | kadmin_user: root/admin 17 | kadmin_pass: pass 18 | kuser_user: vagrant 19 | kuser_pass: vagrant 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: kerberos } 29 | -------------------------------------------------------------------------------- /hw28/provision/roles/kerberos/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for kerberos 3 | db_pass: pass 4 | realm: OTUS.TEST 5 | kadmin_user: root/admin 6 | kadmin_pass: pass 7 | kuser_user: vagrant 8 | kuser_pass: vagrant -------------------------------------------------------------------------------- /hw28/provision/roles/kerberos/files/kadm.acl: -------------------------------------------------------------------------------- 1 | */admin@OTUS.TEST * -------------------------------------------------------------------------------- /hw28/provision/roles/kerberos/files/kdc.conf: -------------------------------------------------------------------------------- 1 | default_realm = OTUS.TEST 2 | 3 | [kdcdefaults] 4 | v4_mode = nopreauth 5 | kdc_ports = 0 6 | 7 | [realms] 8 | OTUS.TEST = { 9 | kdc_ports = 88 10 | admin_keytab = /etc/kadm5.keytab 11 | database_name = /var/kerberos/krb5kdc/principal 12 | acl_file = /var/kerberos/krb5kdc/kadm5.acl 13 | key_stash_file = /var/kerberos/krb5kdc/stash 14 | max_life = 10h 0m 0s 15 | max_renewable_life = 7d 0h 0m 0s 16 | master_key_type = des3-hmac-sha1 17 | supported_enctypes = arcfour-hmac:normal des3-hmac-sha1:normal des-cbc-crc:normal des:normal des:v4 des:norealm des:onlyrealm des:afs3 18 | default_principal_flags = +preauth 19 | } -------------------------------------------------------------------------------- /hw28/provision/roles/nfs4-client/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup NFS client with automount service. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS, NTP, KDC and NFS services required. 10 | 11 | Example Playbook 12 | ---------------- 13 | 14 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 15 | 16 | - hosts: servers 17 | roles: 18 | - { role: nfs4-client } 19 | -------------------------------------------------------------------------------- /hw28/provision/roles/nfs4-client/files/auto.master: -------------------------------------------------------------------------------- 1 | /opt /etc/auto.nfs --timeout=60 -------------------------------------------------------------------------------- /hw28/provision/roles/nfs4-client/files/auto.nfs: -------------------------------------------------------------------------------- 1 | backup -fstype=nfs4,rw,sec=krb5 primary.otus.test:/opt/backup -------------------------------------------------------------------------------- /hw28/provision/roles/nfs4-client/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for nfs-client 3 | - name: install nfs server packages 4 | yum: 5 | name: ['nfs-utils','autofs'] 6 | state: present 7 | 8 | - name: enable and start nfs client 9 | service: 10 | name: nfs-client.target 11 | enabled: true 12 | state: restarted 13 | 14 | - name: autofs config step 2 15 | copy: 16 | src: "{{ item }}" 17 | dest: /etc 18 | with_fileglob: 19 | - auto.nfs 20 | - auto.master 21 | 22 | - name: automount service enabled and started 23 | service: 24 | name: autofs 25 | enabled: true 26 | state: restarted 27 | 28 | # mount example via hands 29 | # - name: mount nfs share 30 | # shell: mount.nfs4 -o sec=krb5 ns.otus.test:/opt/share/nfs-test /mnt/nfs -v 31 | ... -------------------------------------------------------------------------------- /hw28/provision/roles/nfs4/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup NFS server. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS, NTP, KDC services required. 10 | 11 | Example Playbook 12 | ---------------- 13 | 14 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 15 | 16 | - hosts: servers 17 | roles: 18 | - { role: nfs4 } 19 | -------------------------------------------------------------------------------- /hw28/provision/roles/nfs4/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for nfs4 -------------------------------------------------------------------------------- /hw28/provision/roles/nfs4/files/exports: -------------------------------------------------------------------------------- 1 | /opt/backup *(rw,sync,no_root_squash,sec=krb5) -------------------------------------------------------------------------------- /hw28/provision/roles/ntp-client/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup chronyd as ntp-client. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS, NTP services required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | ntp_timezone: Europe/Moscow 15 | 16 | Example Playbook 17 | ---------------- 18 | 19 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 20 | 21 | - hosts: servers 22 | roles: 23 | - { role: ntp-client } 24 | -------------------------------------------------------------------------------- /hw28/provision/roles/ntp-client/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for ntp-client 3 | ntp_timezone: Europe/Moscow -------------------------------------------------------------------------------- /hw28/provision/roles/ntp-client/files/chrony.conf: -------------------------------------------------------------------------------- 1 | server primary.otus.test iburst 2 | driftfile /var/lib/chrony/drift 3 | logdir /var/log/chrony 4 | log measurements statistics tracking -------------------------------------------------------------------------------- /hw28/provision/roles/ntp-client/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set timezone 3 | timezone: 4 | name: "{{ ntp_timezone }}" 5 | 6 | - name: config chrony daemon 7 | copy: 8 | src: chrony.conf 9 | dest: /etc/chrony.conf 10 | owner: root 11 | group: root 12 | mode: 0700 13 | 14 | - name: chronyd service restarted 15 | service: 16 | name: chronyd 17 | state: restarted 18 | enabled: true 19 | ... -------------------------------------------------------------------------------- /hw28/provision/roles/ntp/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup chronyd as NTP server. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS service required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | ntp_timezone: Europe/Moscow 15 | ntp_server: 0.rhel.pool.ntp.org 16 | 17 | Example Playbook 18 | ---------------- 19 | 20 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 21 | 22 | - hosts: servers 23 | roles: 24 | - { role: ntp } 25 | -------------------------------------------------------------------------------- /hw28/provision/roles/ntp/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for ntp 3 | ntp_timezone: Europe/Moscow 4 | ntp_server: 0.rhel.pool.ntp.org -------------------------------------------------------------------------------- /hw28/provision/roles/ntp/files/chrony.conf: -------------------------------------------------------------------------------- 1 | server 0.centos.pool.ntp.org iburst 2 | manual 3 | allow 192.168.0.0/16 4 | local stratum 8 -------------------------------------------------------------------------------- /hw28/provision/roles/ntp/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set timezone 3 | timezone: 4 | name: "{{ ntp_timezone }}" 5 | 6 | - name: config chrony daemon 7 | copy: 8 | src: chrony.conf 9 | dest: /etc/chrony.conf 10 | owner: root 11 | group: root 12 | mode: 0700 13 | 14 | - name: chronyd service restarted 15 | service: 16 | name: chronyd 17 | state: restarted 18 | enabled: true 19 | ... -------------------------------------------------------------------------------- /hw28/provision/roles/pgsql11-primary/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка PostgreSQL 11. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 3. Example Playbook 10 | 11 | ``` 12 | --- 13 | - name: Do something 14 | hosts: servers 15 | 16 | roles: 17 | - { role: add_pgsql11 } 18 | ``` -------------------------------------------------------------------------------- /hw28/provision/roles/pgsql11-primary/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for add_pgsql11 3 | 4 | -------------------------------------------------------------------------------- /hw28/provision/roles/pgsql11-primary/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for add_pgsql11 3 | 4 | - name: Restart postgres 5 | service: 6 | name: postgresql-11 7 | state: restarted 8 | ... 9 | -------------------------------------------------------------------------------- /hw28/provision/roles/pgsql11-primary/templates/pgbackrest.conf.j2: -------------------------------------------------------------------------------- 1 | [global] 2 | repo1-path=/opt/backup 3 | repo1-retention-full=2 4 | process-max=2 5 | log-level-console=info 6 | log-level-file=debug 7 | 8 | [backup] 9 | pg1-path=/var/lib/pgsql/11/data -------------------------------------------------------------------------------- /hw28/provision/roles/pgsql11-standby/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка PostgreSQL 11. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 3. Example Playbook 10 | 11 | ``` 12 | --- 13 | - name: Do something 14 | hosts: servers 15 | 16 | roles: 17 | - { role: add_pgsql11 } 18 | ``` -------------------------------------------------------------------------------- /hw28/provision/roles/pgsql11-standby/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for add_pgsql11 3 | 4 | -------------------------------------------------------------------------------- /hw28/provision/roles/pgsql11-standby/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for add_pgsql11 3 | 4 | - name: Restart postgres 5 | service: 6 | name: postgresql-11 7 | state: restarted 8 | ... 9 | -------------------------------------------------------------------------------- /hw28/provision/roles/pgsql11-standby/templates/pgbackrest.conf.j2: -------------------------------------------------------------------------------- 1 | [backup] 2 | pg1-host=primary.otus.test 3 | pg1-path=/var/lib/pgsql/11/data 4 | pg2-path=/var/lib/pgsql/11/data 5 | recovery-option=standby_mode=on 6 | recovery-option=primary_conninfo=host=primary.otus.test user=replication 7 | recovery-option=recovery_target_timeline=latest 8 | 9 | [global] 10 | repo1-path=/opt/backup 11 | repo1-retention-full=1 12 | process-max=2 13 | log-level-console=info 14 | log-level-file=debug 15 | backup-standby=y 16 | delta=y -------------------------------------------------------------------------------- /hw28/provision/roles/pgsql11-standby/templates/recovery.conf.j2: -------------------------------------------------------------------------------- 1 | standby_mode = on 2 | primary_conninfo = 'host=primary.otus.test port=5432 user=replication password=swimming3 application_name=standby' 3 | restore_command = 'pgbackrest --stanza=backup archive-get %f "%p"' 4 | trigger_file = '/tmp/postgresql.trigger.5432' 5 | recovery_target_timeline = 'latest' -------------------------------------------------------------------------------- /hw29/pic/pic01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw29/pic/pic01.png -------------------------------------------------------------------------------- /hw29/pic/pic02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw29/pic/pic02.png -------------------------------------------------------------------------------- /hw29/pic/pic03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/hw29/pic/pic03.png -------------------------------------------------------------------------------- /hw29/provision/environment: -------------------------------------------------------------------------------- 1 | LANG=en_US.utf8 2 | LC_CTYPE=en_US.utf8 3 | LC_ALL=en_US.UTF-8 -------------------------------------------------------------------------------- /hw29/provision/locale.conf: -------------------------------------------------------------------------------- 1 | LC_ALL=en_US.utf8 2 | LC_CTYPE=en_US.utf8 3 | LANG=en_US.utf8 -------------------------------------------------------------------------------- /hw29/provision/roles/dns/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup DNS server. -------------------------------------------------------------------------------- /hw29/provision/roles/dns/files/NetworkManager.conf: -------------------------------------------------------------------------------- 1 | [main] 2 | dns=none -------------------------------------------------------------------------------- /hw29/provision/roles/dns/files/named.otus.test: -------------------------------------------------------------------------------- 1 | $TTL 3600 2 | $ORIGIN otus.test. 3 | @ IN SOA ns.otus.test. root.otus.test. ( 4 | 1502201901 ; serial 5 | 3600 ; refresh (1 hour) 6 | 600 ; retry (10 minutes) 7 | 86400 ; expire (1 day) 8 | 600 ; minimum (10 minutes) 9 | ) 10 | IN NS ns.otus.test. 11 | ; DNS Servers 12 | ns IN A 192.168.50.10 13 | ; Hosts 14 | pg01 IN A 192.168.50.101 15 | pg02 IN A 192.168.50.102 16 | pg03 IN A 192.168.50.103 -------------------------------------------------------------------------------- /hw29/provision/roles/dns/files/named.zonetransfer.key: -------------------------------------------------------------------------------- 1 | key "zonetransfer.key" { 2 | algorithm hmac-md5; 3 | secret "SB4Db9pJomyKxTNynlAq/g=="; 4 | }; 5 | -------------------------------------------------------------------------------- /hw29/provision/roles/dns/files/resolv.conf: -------------------------------------------------------------------------------- 1 | nameserver 192.168.50.10 2 | domain otus.test -------------------------------------------------------------------------------- /hw29/provision/roles/etcd/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка etcd. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 3. Example Playbook 10 | 11 | ``` 12 | --- 13 | - name: Do something 14 | hosts: servers 15 | 16 | roles: 17 | - { role: etcd } 18 | ``` -------------------------------------------------------------------------------- /hw29/provision/roles/etcd/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for etcd 3 | etcd_hostname: 192.168.50.10 -------------------------------------------------------------------------------- /hw29/provision/roles/etcd/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for etcd 3 | 4 | - name: Restart etcd 5 | service: 6 | name: etcd 7 | state: restarted 8 | ... 9 | -------------------------------------------------------------------------------- /hw29/provision/roles/etcd/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for etcd 3 | 4 | - name: etcd install 5 | yum: 6 | name: 7 | - etcd 8 | state: latest 9 | 10 | - name: Add etcd.conf 11 | template: 12 | src: etcd.conf.j2 13 | dest: /etc/etcd/etcd.conf 14 | owner: root 15 | group: root 16 | mode: 0644 17 | notify: Restart etcd 18 | 19 | - name: Set and start etcd as a service 20 | systemd: 21 | name: etcd 22 | enabled: yes 23 | state: started 24 | masked: no 25 | ... 26 | -------------------------------------------------------------------------------- /hw29/provision/roles/etcd/templates/etcd.conf.j2: -------------------------------------------------------------------------------- 1 | ETCD_DATA_DIR="/var/lib/etcd/default.etcd" 2 | ETCD_LISTEN_PEER_URLS="http://{{ etcd_hostname }}:2380" 3 | ETCD_LISTEN_CLIENT_URLS="http://localhost:2379,http://{{ etcd_hostname }}:2379" 4 | ETCD_NAME="etcd0" 5 | ETCD_INITIAL_ADVERTISE_PEER_URLS="http://{{ etcd_hostname }}:2380" 6 | ETCD_ADVERTISE_CLIENT_URLS="http://{{ etcd_hostname }}:2379" 7 | ETCD_INITIAL_CLUSTER="etcd0=http://{{ etcd_hostname }}:2380" 8 | ETCD_INITIAL_CLUSTER_TOKEN="otus" 9 | ETCD_INITIAL_CLUSTER_STATE="new" -------------------------------------------------------------------------------- /hw29/provision/roles/haproxy/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка HAproxy. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 3. Example Playbook 10 | 11 | ``` 12 | --- 13 | - name: Do something 14 | hosts: servers 15 | 16 | roles: 17 | - { role: haproxy } 18 | ``` -------------------------------------------------------------------------------- /hw29/provision/roles/haproxy/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for haproxy 3 | 4 | pg_hosts: 5 | - { name: 'pg01.otus.test', status: 'rw' } 6 | - { name: 'pg02.otus.test', status: 'ro' } 7 | - { name: 'pg03.otus.test', status: 'ro' } 8 | -------------------------------------------------------------------------------- /hw29/provision/roles/haproxy/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for haproxy 3 | 4 | - name: Restart haproxy 5 | service: 6 | name: haproxy 7 | state: restarted 8 | ... 9 | -------------------------------------------------------------------------------- /hw29/provision/roles/haproxy/templates/haproxy.conf.j2: -------------------------------------------------------------------------------- 1 | $ModLoad imudp 2 | $UDPServerAddress 127.0.0.1 3 | $UDPServerRun 514 4 | local2.* /var/log/haproxy.log -------------------------------------------------------------------------------- /hw29/provision/roles/ntp-client/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup chronyd as ntp-client. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS, NTP services required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | ntp_timezone: Europe/Moscow 15 | 16 | Example Playbook 17 | ---------------- 18 | 19 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 20 | 21 | - hosts: servers 22 | roles: 23 | - { role: ntp-client } 24 | -------------------------------------------------------------------------------- /hw29/provision/roles/ntp-client/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for ntp-client 3 | ntp_timezone: Europe/Moscow -------------------------------------------------------------------------------- /hw29/provision/roles/ntp-client/files/chrony.conf: -------------------------------------------------------------------------------- 1 | server ns.otus.test iburst 2 | driftfile /var/lib/chrony/drift 3 | logdir /var/log/chrony 4 | log measurements statistics tracking -------------------------------------------------------------------------------- /hw29/provision/roles/ntp-client/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set timezone 3 | timezone: 4 | name: "{{ ntp_timezone }}" 5 | 6 | - name: config chrony daemon 7 | copy: 8 | src: chrony.conf 9 | dest: /etc/chrony.conf 10 | owner: root 11 | group: root 12 | mode: 0700 13 | 14 | - name: chronyd service restarted 15 | service: 16 | name: chronyd 17 | state: restarted 18 | enabled: true 19 | ... -------------------------------------------------------------------------------- /hw29/provision/roles/ntp/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup chronyd as NTP server. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS service required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | ntp_timezone: Europe/Moscow 15 | ntp_server: 0.rhel.pool.ntp.org 16 | 17 | Example Playbook 18 | ---------------- 19 | 20 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 21 | 22 | - hosts: servers 23 | roles: 24 | - { role: ntp } 25 | -------------------------------------------------------------------------------- /hw29/provision/roles/ntp/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for ntp 3 | ntp_timezone: Europe/Moscow 4 | ntp_server: 0.rhel.pool.ntp.org -------------------------------------------------------------------------------- /hw29/provision/roles/ntp/files/chrony.conf: -------------------------------------------------------------------------------- 1 | server 0.centos.pool.ntp.org iburst 2 | manual 3 | allow 192.168.0.0/16 4 | local stratum 8 -------------------------------------------------------------------------------- /hw29/provision/roles/ntp/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set timezone 3 | timezone: 4 | name: "{{ ntp_timezone }}" 5 | 6 | - name: config chrony daemon 7 | copy: 8 | src: chrony.conf 9 | dest: /etc/chrony.conf 10 | owner: root 11 | group: root 12 | mode: 0700 13 | 14 | - name: chronyd service restarted 15 | service: 16 | name: chronyd 17 | state: restarted 18 | enabled: true 19 | ... -------------------------------------------------------------------------------- /hw29/provision/roles/patroni/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка patroni. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 3. Example Playbook 10 | 11 | ``` 12 | --- 13 | - name: Do something 14 | hosts: servers 15 | 16 | roles: 17 | - { role: patroni } 18 | ``` -------------------------------------------------------------------------------- /hw29/provision/roles/patroni/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for haproxy 3 | etcd_hosts: 4 | - { name: 'ns.otus.test', ip: '192.168.50.10' } 5 | pg_hosts: 6 | - { name: 'pg01.otus.test', ip: '192.168.50.101', alias: 'pg01' } 7 | - { name: 'pg02.otus.test', ip: '192.168.50.102', alias: 'pg02' } 8 | - { name: 'pg03.otus.test', ip: '192.168.50.103', alias: 'pg03' } 9 | 10 | pg_bin_dir: /usr/pgsql-11/bin 11 | pg_database_dir: /var/lib/pgsql/11/data 12 | -------------------------------------------------------------------------------- /hw29/provision/roles/patroni/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for haproxy 3 | 4 | - name: Restart haproxy 5 | service: 6 | name: haproxy 7 | state: restarted 8 | ... 9 | -------------------------------------------------------------------------------- /hw29/provision/roles/patroni/templates/patroni.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Runners to orchestrate a high-availability PostgreSQL 3 | After=syslog.target network.target 4 | 5 | [Service] 6 | Type=simple 7 | 8 | User=postgres 9 | Group=postgres 10 | 11 | ExecStart=/bin/patroni /etc/patroni.yml 12 | KillMode=process 13 | TimeoutSec=30 14 | Restart=no 15 | 16 | [Install] 17 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /hw29/provision/roles/pgsql11/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка PostgreSQL 11. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 3. Example Playbook 10 | 11 | ``` 12 | --- 13 | - name: Do something 14 | hosts: servers 15 | 16 | roles: 17 | - { role: pgsql11 } 18 | ``` -------------------------------------------------------------------------------- /hw29/provision/roles/pgsql11/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for pgsql11 -------------------------------------------------------------------------------- /hw29/provision/roles/pgsql11/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for add_pgsql11 3 | 4 | - name: Restart postgres 5 | service: 6 | name: postgresql-11 7 | state: restarted 8 | ... 9 | -------------------------------------------------------------------------------- /hw29/provision/roles/pgsql11/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for pgsql11 3 | - name: install postgres reposiroty from url 4 | yum: 5 | name: https://download.postgresql.org/pub/repos/yum/11/redhat/rhel-7-x86_64/pgdg-centos11-11-2.noarch.rpm 6 | 7 | - name: PostgreSQL 11 server install 8 | yum: 9 | name: 10 | - postgresql11-server 11 | - postgresql11 12 | state: latest 13 | 14 | - name: Check if postgres database dir exsits 15 | stat: 16 | path: /var/lib/pgsql/11/data 17 | register: pg_dir_exist 18 | 19 | - name: check rights on datadir 20 | file: 21 | path: /var/lib/pgsql/11/data 22 | state: directory 23 | mode: 0700 24 | when: pg_dir_exist.stat.exists == False 25 | ... 26 | -------------------------------------------------------------------------------- /hw29/provision/roles/pgsql11/templates/pgbackrest.conf.j2: -------------------------------------------------------------------------------- 1 | [global] 2 | repo1-path=/opt/backup 3 | repo1-retention-full=2 4 | process-max=2 5 | log-level-console=info 6 | log-level-file=debug 7 | 8 | [backup] 9 | pg1-path=/var/lib/pgsql/11/data -------------------------------------------------------------------------------- /project/docs/Erik_Nordtrom_GrafanaCon_EU_2018.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/project/docs/Erik_Nordtrom_GrafanaCon_EU_2018.pdf -------------------------------------------------------------------------------- /project/docs/LizardFS.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/project/docs/LizardFS.pdf -------------------------------------------------------------------------------- /project/docs/whitepaper_lizard_v3.12_web.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/project/docs/whitepaper_lizard_v3.12_web.pdf -------------------------------------------------------------------------------- /project/pic/pic_cloud01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/project/pic/pic_cloud01.png -------------------------------------------------------------------------------- /project/pic/pic_db01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/project/pic/pic_db01.png -------------------------------------------------------------------------------- /project/pic/pic_grafana01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/project/pic/pic_grafana01.png -------------------------------------------------------------------------------- /project/pic/pic_grafana02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/project/pic/pic_grafana02.png -------------------------------------------------------------------------------- /project/pic/pic_lizard01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/project/pic/pic_lizard01.png -------------------------------------------------------------------------------- /project/pic/pic_lizard02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/project/pic/pic_lizard02.png -------------------------------------------------------------------------------- /project/pic/pic_lizard03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/project/pic/pic_lizard03.png -------------------------------------------------------------------------------- /project/pic/pic_lizard04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/project/pic/pic_lizard04.png -------------------------------------------------------------------------------- /project/pic/pic_monitoring01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/project/pic/pic_monitoring01.png -------------------------------------------------------------------------------- /project/pic/pic_monitoring02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/project/pic/pic_monitoring02.png -------------------------------------------------------------------------------- /project/pic/pic_monitoring03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/project/pic/pic_monitoring03.png -------------------------------------------------------------------------------- /project/pic/pic_monitoring04.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/project/pic/pic_monitoring04.jpg -------------------------------------------------------------------------------- /project/pic/pic_stand01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/project/pic/pic_stand01.png -------------------------------------------------------------------------------- /project/pic/pic_stand02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/project/pic/pic_stand02.png -------------------------------------------------------------------------------- /project/pic/pic_stand03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/project/pic/pic_stand03.png -------------------------------------------------------------------------------- /project/pic/pic_stand04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/project/pic/pic_stand04.png -------------------------------------------------------------------------------- /project/provision/environment: -------------------------------------------------------------------------------- 1 | LANG=en_US.utf8 2 | LC_CTYPE=en_US.utf8 3 | LC_ALL=en_US.UTF-8 -------------------------------------------------------------------------------- /project/provision/locale.conf: -------------------------------------------------------------------------------- 1 | LC_ALL=en_US.utf8 2 | LC_CTYPE=en_US.utf8 3 | LANG=en_US.utf8 -------------------------------------------------------------------------------- /project/provision/roles/audit-client/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Пересылка логов аудита (auditd) на центральный сервер, без сохранения на локальном, кроме случая переполнения очереди. [audit clinet -> audit server] 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 3. Example Playbook 10 | 11 | ``` 12 | --- 13 | - name: Do something 14 | hosts: servers 15 | 16 | roles: 17 | - { role: conf_audit_client } 18 | ``` -------------------------------------------------------------------------------- /project/provision/roles/audit-client/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for conf_audit_client 3 | 4 | audit_remote_server: ns.otus.test 5 | audit_remote_server_port: 60 6 | audit_rules: 7 | - '-w /var/log/audit/ -k auditlog' 8 | - '-w /etc/audit/ -p wa -k auditconfig' 9 | - '-w /etc/libaudit.conf -p wa -k auditconfig' 10 | - '-w /etc/audisp/ -p wa -k audispconfig' 11 | - '-w /sbin/auditctl -p x -k audittools' 12 | - '-w /sbin/auditd -p x -k audittools' 13 | - '-a exit,always -F arch=b64 -S execve' 14 | - '-a exit,always -F arch=b32 -S execve' 15 | audit_domain: otus.test 16 | ... -------------------------------------------------------------------------------- /project/provision/roles/audit-client/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for conf_audit_client 3 | 4 | - name: Restart auditd 5 | command: service auditd restart 6 | ... -------------------------------------------------------------------------------- /project/provision/roles/audit-client/templates/au-remote.conf.j2: -------------------------------------------------------------------------------- 1 | # This file controls the audispd data path to the 2 | # remote event logger. This plugin will send events to 3 | # a remote machine (Central Logger). 4 | 5 | active = yes 6 | direction = out 7 | path = /sbin/audisp-remote 8 | type = always 9 | #args = 10 | format = string 11 | 12 | -------------------------------------------------------------------------------- /project/provision/roles/audit-client/templates/audispd.conf.j2: -------------------------------------------------------------------------------- 1 | # 2 | # This file controls the configuration of the audit event 3 | # dispatcher daemon, audispd. 4 | # 5 | 6 | q_depth = 150 7 | overflow_action = SYSLOG 8 | priority_boost = 4 9 | max_restarts = 10 10 | name_format = HOSTNAME 11 | name = {{ audit_domain }} 12 | #plugin_dir = /etc/audisp/plugins.d/ -------------------------------------------------------------------------------- /project/provision/roles/audit-client/templates/audit.rules.j2: -------------------------------------------------------------------------------- 1 | # Remove any existing rules 2 | -D 3 | -b 8192 4 | -f 1 5 | -i 6 | {% for rule in audit_rules %} 7 | {{ rule }} 8 | {% endfor %} -------------------------------------------------------------------------------- /project/provision/roles/audit-server/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Конфигурирование aditd для приема логов. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 3. Example Playbook 10 | 11 | ``` 12 | --- 13 | - name: Do something 14 | hosts: servers 15 | 16 | roles: 17 | - { role: conf_auditd_server } 18 | ``` -------------------------------------------------------------------------------- /project/provision/roles/audit-server/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for conf_audit_server 3 | ... -------------------------------------------------------------------------------- /project/provision/roles/audit-server/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for conf_audit_client 3 | 4 | - name: Restart auditd 5 | command: service auditd restart 6 | ... -------------------------------------------------------------------------------- /project/provision/roles/audit-server/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for conf_auditd_server 3 | 4 | - name: Client audit daemon config file 5 | template: 6 | src: auditd.conf.j2 7 | dest: /etc/audit/auditd.conf 8 | 9 | - name: Restart auditd 10 | command: service auditd restart -------------------------------------------------------------------------------- /project/provision/roles/autofs/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup automount service. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS, Lizard services required. 10 | 11 | Example Playbook 12 | ---------------- 13 | 14 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 15 | 16 | - hosts: servers 17 | roles: 18 | - { role: autofs } 19 | -------------------------------------------------------------------------------- /project/provision/roles/autofs/files/auto.data: -------------------------------------------------------------------------------- 1 | lizard -fstype=mfs,big_writes,nosuid,nodev,noatime,allow_other master:9421:/ -------------------------------------------------------------------------------- /project/provision/roles/autofs/files/auto.master: -------------------------------------------------------------------------------- 1 | /data /etc/auto.data -------------------------------------------------------------------------------- /project/provision/roles/autofs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for nfs-client 3 | - name: install autofs 4 | yum: 5 | name: ['autofs'] 6 | state: present 7 | 8 | - name: autofs config 9 | copy: 10 | src: "{{ item }}" 11 | dest: /etc 12 | with_fileglob: 13 | - auto.data 14 | - auto.master 15 | 16 | - name: automount service enabled and started 17 | service: 18 | name: autofs 19 | enabled: true 20 | state: restarted 21 | ... -------------------------------------------------------------------------------- /project/provision/roles/dns/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup DNS server. -------------------------------------------------------------------------------- /project/provision/roles/dns/files/NetworkManager.conf: -------------------------------------------------------------------------------- 1 | [main] 2 | dns=none -------------------------------------------------------------------------------- /project/provision/roles/dns/files/named.zonetransfer.key: -------------------------------------------------------------------------------- 1 | key "zonetransfer.key" { 2 | algorithm hmac-md5; 3 | secret "SB4Db9pJomyKxTNynlAq/g=="; 4 | }; 5 | -------------------------------------------------------------------------------- /project/provision/roles/dns/files/resolv.conf: -------------------------------------------------------------------------------- 1 | nameserver 192.168.50.10 2 | domain otus.test -------------------------------------------------------------------------------- /project/provision/roles/etcd/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка etcd. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 3. Example Playbook 10 | 11 | ``` 12 | --- 13 | - name: Do something 14 | hosts: servers 15 | 16 | roles: 17 | - { role: etcd } 18 | ``` -------------------------------------------------------------------------------- /project/provision/roles/etcd/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for etcd 3 | etcd_hostname: 192.168.50.10 -------------------------------------------------------------------------------- /project/provision/roles/etcd/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for etcd 3 | 4 | - name: Restart etcd 5 | service: 6 | name: etcd 7 | state: restarted 8 | ... 9 | -------------------------------------------------------------------------------- /project/provision/roles/etcd/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for etcd 3 | 4 | - name: etcd install 5 | yum: 6 | name: ['etcd'] 7 | state: latest 8 | 9 | - name: Add etcd.conf 10 | template: 11 | src: etcd.conf.j2 12 | dest: /etc/etcd/etcd.conf 13 | owner: root 14 | group: root 15 | mode: 0644 16 | notify: Restart etcd 17 | 18 | - name: Set and start etcd as a service 19 | systemd: 20 | name: etcd 21 | enabled: yes 22 | state: started 23 | masked: no 24 | ... 25 | -------------------------------------------------------------------------------- /project/provision/roles/etcd/templates/etcd.conf.j2: -------------------------------------------------------------------------------- 1 | ETCD_DATA_DIR="/var/lib/etcd/default.etcd" 2 | ETCD_LISTEN_PEER_URLS="http://{{ etcd_hostname }}:2380" 3 | ETCD_LISTEN_CLIENT_URLS="http://localhost:2379,http://{{ etcd_hostname }}:2379" 4 | ETCD_NAME="etcd0" 5 | ETCD_INITIAL_ADVERTISE_PEER_URLS="http://{{ etcd_hostname }}:2380" 6 | ETCD_ADVERTISE_CLIENT_URLS="http://{{ etcd_hostname }}:2379" 7 | ETCD_INITIAL_CLUSTER="etcd0=http://{{ etcd_hostname }}:2380" 8 | ETCD_INITIAL_CLUSTER_TOKEN="otus" 9 | ETCD_INITIAL_CLUSTER_STATE="new" -------------------------------------------------------------------------------- /project/provision/roles/grafana/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Развертывание Grafana. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 2. Role Variables 10 | 11 | - domain_name: `domain.name` 12 | 13 | #### 3. Example Playbook 14 | 15 | ``` 16 | --- 17 | - name: Do something 18 | hosts: servers 19 | 20 | roles: 21 | - { role: add_grafana } 22 | ``` -------------------------------------------------------------------------------- /project/provision/roles/grafana/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for add_grafana 3 | domain_name: otus.test 4 | ... 5 | -------------------------------------------------------------------------------- /project/provision/roles/grafana/files/sample_querys.txt: -------------------------------------------------------------------------------- 1 | SELECT DISTINCT labels->'instance' FROM metrics_labels WHERE metric_name = 'netdata_disk_iotime_milliseconds_persec_average'; 2 | 3 | 4 | SELECT 5 | time_bucket ('1m', time) AS time, 6 | avg(value) as load_1m 7 | FROM 8 | metrics 9 | WHERE 10 | time BETWEEN $__timeFrom() AND $__timeTo() 11 | AND name = 'netdata_disk_iotime_milliseconds_persec_average' 12 | AND labels @> '{"instance": $instance}' 13 | GROUP BY 1 14 | ORDER BY 1 ASC; 15 | 16 | netdata_disk_io_KiB_persec_average 17 | 18 | 19 | http://10.10.10.105:8080/lizard/mfs.cgi?masterport=9421&masterhost=master&CCdata=cpu§ions=CS -------------------------------------------------------------------------------- /project/provision/roles/grafana/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for add_grafana 3 | 4 | - name: Restart grafana 5 | service: 6 | name: grafana-server 7 | state: restarted 8 | ... -------------------------------------------------------------------------------- /project/provision/roles/grafana/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for add_grafana 3 | 4 | - name: Install grafana 5 | yum: 6 | name: https://dl.grafana.com/oss/release/grafana-5.4.2-1.x86_64.rpm 7 | state: present 8 | 9 | - name: Install grafana 10 | yum: 11 | name: grafana 12 | state: latest 13 | 14 | - name: Configuration file 15 | template: 16 | src: grafana.ini.j2 17 | dest: /etc/grafana/grafana.ini 18 | 19 | - name: Enable grafana service 20 | systemd: 21 | name: grafana-server 22 | enabled: yes 23 | masked: no 24 | state: started 25 | 26 | - name: Start grafana service 27 | systemd: 28 | name: grafana-server 29 | ... 30 | -------------------------------------------------------------------------------- /project/provision/roles/haproxy/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка HAproxy. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 3. Example Playbook 10 | 11 | ``` 12 | --- 13 | - name: Do something 14 | hosts: servers 15 | 16 | roles: 17 | - { role: haproxy } 18 | ``` -------------------------------------------------------------------------------- /project/provision/roles/haproxy/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for haproxy 3 | 4 | pg_hosts: 5 | - { name: 'master01.otus.test', status: 'rw' } 6 | - { name: 'master02.otus.test', status: 'ro' } 7 | - { name: 'master03.otus.test', status: 'ro' } 8 | -------------------------------------------------------------------------------- /project/provision/roles/haproxy/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for haproxy 3 | 4 | - name: Restart haproxy 5 | service: 6 | name: haproxy 7 | state: restarted 8 | ... 9 | -------------------------------------------------------------------------------- /project/provision/roles/haproxy/templates/haproxy.conf.j2: -------------------------------------------------------------------------------- 1 | $ModLoad imudp 2 | $UDPServerAddress 127.0.0.1 3 | $UDPServerRun 514 4 | local2.* /var/log/haproxy.log -------------------------------------------------------------------------------- /project/provision/roles/kerberos-client/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup Kerberos KDC. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS, NTP and KDC services required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | kadmin_user: root/admin 15 | kadmin_pass: pass 16 | kerb_user: vagrant 17 | kerb_user_pass: vagrant 18 | 19 | Example Playbook 20 | ---------------- 21 | 22 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 23 | 24 | - hosts: servers 25 | roles: 26 | - { role: kerberos-client } 27 | -------------------------------------------------------------------------------- /project/provision/roles/kerberos-client/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for kerberos-client 3 | kadmin_user: root/admin 4 | kadmin_pass: pass 5 | kerb_user: vagrant 6 | kerb_user_pass: vagrant 7 | ... -------------------------------------------------------------------------------- /project/provision/roles/kerberos/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup Kerberos KDC. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS and NTP services required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | db_pass: pass 15 | realm: OTUS.TEST 16 | kadmin_user: root/admin 17 | kadmin_pass: pass 18 | kuser_user: vagrant 19 | kuser_pass: vagrant 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: kerberos } 29 | -------------------------------------------------------------------------------- /project/provision/roles/kerberos/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for kerberos 3 | db_pass: pass 4 | realm: OTUS.TEST 5 | kadmin_user: root/admin 6 | kadmin_pass: pass 7 | kuser_user: vagrant 8 | kuser_pass: vagrant -------------------------------------------------------------------------------- /project/provision/roles/kerberos/files/kadm.acl: -------------------------------------------------------------------------------- 1 | */admin@OTUS.TEST * -------------------------------------------------------------------------------- /project/provision/roles/kerberos/files/kdc.conf: -------------------------------------------------------------------------------- 1 | default_realm = OTUS.TEST 2 | 3 | [kdcdefaults] 4 | v4_mode = nopreauth 5 | kdc_ports = 0 6 | 7 | [realms] 8 | OTUS.TEST = { 9 | kdc_ports = 88 10 | admin_keytab = /etc/kadm5.keytab 11 | database_name = /var/kerberos/krb5kdc/principal 12 | acl_file = /var/kerberos/krb5kdc/kadm5.acl 13 | key_stash_file = /var/kerberos/krb5kdc/stash 14 | max_life = 10h 0m 0s 15 | max_renewable_life = 7d 0h 0m 0s 16 | master_key_type = des3-hmac-sha1 17 | supported_enctypes = arcfour-hmac:normal des3-hmac-sha1:normal des-cbc-crc:normal des:normal des:v4 des:norealm des:onlyrealm des:afs3 18 | default_principal_flags = +preauth 19 | } -------------------------------------------------------------------------------- /project/provision/roles/lizard-client/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | mfsmount: /data/lizard 3 | master_ip: 192.168.50.99 4 | master: master 5 | bundle: lizardfs-bundle-CentOS-7.5.1804 6 | ... -------------------------------------------------------------------------------- /project/provision/roles/lizard-client/files/mount.mfs: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | echo "LizardFS mount options:" 1>&2 3 | echo $@ 1>&2 4 | TARGET=$1 5 | MNTPOINT=$2 6 | KEY=3 7 | while [ ${!KEY} != "-o" ] 8 | do 9 | KEY=$(($KEY+1)) 10 | done 11 | echo "key=${!KEY}" 12 | KEY=$(($KEY+1)) 13 | OPTS=${!KEY} 14 | mfsmount $MNTPOINT -o $OPTS 1>&2 15 | exit $? -------------------------------------------------------------------------------- /project/provision/roles/lizard-client/templates/mfsmount.cfg.j2: -------------------------------------------------------------------------------- 1 | mfsmaster={{ master }},mfsport=9421 2 | {{ mfsmount }} 3 | cacheexpirationtime=500 4 | readaheadmaxwindowsize=4096 -------------------------------------------------------------------------------- /project/provision/roles/lizard-master/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | bundle: lizardfs-bundle-CentOS-7.5.1804 3 | masters: 4 | - { name: 'master01', ip: '192.168.50.21', port: '19427', id: '0' } 5 | - { name: 'master02', ip: '192.168.50.22', port: '19427', id: '1' } 6 | - { name: 'master03', ip: '192.168.50.23', port: '19427', id: '2' } 7 | float_ip: 192.168.50.99 8 | float_if: eth1 -------------------------------------------------------------------------------- /project/provision/roles/lizard-master/files/lizardfs-uraft.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=LizardFS uraft high availability daemon 3 | Requires=lizardfs-ha-master.service 4 | After=network.target 5 | After=lizardfs-ha-master.service 6 | 7 | [Service] 8 | Type=simple 9 | PIDFile=/var/run/lizardfs-uraft.pid 10 | TimeoutSec=0 11 | ExecStart=/usr/sbin/lizardfs-uraft 12 | ExecStopPost=/usr/sbin/lizardfs-uraft-helper demote 13 | Restart=no 14 | User=root 15 | 16 | [Install] 17 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /project/provision/roles/lizard-master/files/metadata.nfs: -------------------------------------------------------------------------------- 1 | MFSM NEW -------------------------------------------------------------------------------- /project/provision/roles/lizard-node/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | bundle: lizardfs-bundle-CentOS-7.5.1804 3 | mfsmount: /mnt/data 4 | master_ip: 192.168.50.99 5 | master: master 6 | ... -------------------------------------------------------------------------------- /project/provision/roles/lizard-node/templates/mfshdd.cfg.j2: -------------------------------------------------------------------------------- 1 | {{ mfsmount }} -------------------------------------------------------------------------------- /project/provision/roles/lizard-node/templates/mfsmetalogger.cfg.j2: -------------------------------------------------------------------------------- 1 | MASTER_HOST = {{ master_ip }} -------------------------------------------------------------------------------- /project/provision/roles/lvm2/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup NFS client with automount service. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS, NTP, KDC and NFS services required. 10 | 11 | Example Playbook 12 | ---------------- 13 | 14 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 15 | 16 | - hosts: servers 17 | roles: 18 | - { role: nfs4-client } 19 | -------------------------------------------------------------------------------- /project/provision/roles/lvm2/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | mountpoint: /mnt/data 3 | vgname: vg 4 | lvname: lv 5 | filesystem: xfs 6 | size: 100%FREE 7 | disk: /dev/sdb 8 | ... -------------------------------------------------------------------------------- /project/provision/roles/netdata-central/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup chronyd as ntp-client. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS, NTP services required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | ntp_timezone: Europe/Moscow 15 | 16 | Example Playbook 17 | ---------------- 18 | 19 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 20 | 21 | - hosts: servers 22 | roles: 23 | - { role: ntp-client } 24 | -------------------------------------------------------------------------------- /project/provision/roles/netdata-central/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for netdata 3 | # 4 | - name: reload systemd netdata 5 | systemd: 6 | name: "netdata" 7 | daemon_reload: true 8 | 9 | - name: restart netdata 10 | service: 11 | name: "netdata" 12 | state: "restarted" 13 | enabled: true 14 | sleep: 10 15 | 16 | - name: start netdata 17 | service: 18 | name: "netdata" 19 | state: "started" 20 | enabled: true 21 | 22 | - name: stop netdata 23 | service: 24 | name: "netdata" 25 | state: "stopped" 26 | enabled: true 27 | -------------------------------------------------------------------------------- /project/provision/roles/netdata-client/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup chronyd as ntp-client. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS, NTP services required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | ntp_timezone: Europe/Moscow 15 | 16 | Example Playbook 17 | ---------------- 18 | 19 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 20 | 21 | - hosts: servers 22 | roles: 23 | - { role: ntp-client } 24 | -------------------------------------------------------------------------------- /project/provision/roles/netdata-client/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for netdata 3 | # 4 | - name: reload systemd netdata 5 | systemd: 6 | name: "netdata" 7 | daemon_reload: true 8 | 9 | - name: restart netdata 10 | service: 11 | name: "netdata" 12 | state: "restarted" 13 | enabled: true 14 | sleep: 10 15 | 16 | - name: start netdata 17 | service: 18 | name: "netdata" 19 | state: "started" 20 | enabled: true 21 | 22 | - name: stop netdata 23 | service: 24 | name: "netdata" 25 | state: "stopped" 26 | enabled: true 27 | -------------------------------------------------------------------------------- /project/provision/roles/nginx/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка nginx в качестве reverse proxe. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | - Перед установкой nginx необходимо добавить локальный репозиторий. 9 | 10 | #### 2. Role Variables 11 | 12 | - nginx_port - порт сервера, `80` 13 | - nginx_web_root - `/var/www/html` 14 | - nginx_conf_folder - `/etc/nginx` 15 | - nginx_conf_def_folder - `/etc/nginx/conf.d` 16 | 17 | #### 3. Example Playbook 18 | 19 | ``` 20 | --- 21 | - name: Nginx server deploy 22 | hosts: servers 23 | 24 | roles: 25 | - { role: add_nginx_server } 26 | ``` -------------------------------------------------------------------------------- /project/provision/roles/nginx/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for deploy_nginx_server 3 | server_ip: 192.168.50.10 4 | server_name: ns.otus.test 5 | nginx_port: 80 6 | nginx_web_root: /var/www/ 7 | nginx_conf_folder: /etc/nginx 8 | nginx_conf_def_folder: /etc/nginx/conf.d 9 | ... -------------------------------------------------------------------------------- /project/provision/roles/nginx/files/nginx.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=A high performance web server and a reverse proxy server 3 | After=network.target 4 | 5 | [Service] 6 | Type=forking 7 | PIDFile=/var/run/nginx.pid 8 | ExecStartPre=/sbin/nginx -t -q -g 'daemon on; master_process on;' 9 | ExecStart=/sbin/nginx -g 'daemon on; master_process on;' 10 | ExecReload=/sbin/nginx -g 'daemon on; master_process on;' -s reload 11 | TimeoutStopSec=5 12 | KillMode=mixed 13 | 14 | [Install] 15 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /project/provision/roles/nginx/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for deploy_nginx_server 3 | 4 | - name: Restart nginx 5 | service: 6 | name: nginx 7 | state: restarted 8 | ... -------------------------------------------------------------------------------- /project/provision/roles/nginx/templates/index.html.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Index of {{ server_name }} server 5 | 6 | 7 | 8 | 9 |

This is main page of server {{ server_name }}

10 |

Server name: {{ server_name }}.

11 |

Server ip: {{ server_ip }}.

12 | 13 | -------------------------------------------------------------------------------- /project/provision/roles/ntp-client/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup chronyd as ntp-client. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS, NTP services required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | ntp_timezone: Europe/Moscow 15 | 16 | Example Playbook 17 | ---------------- 18 | 19 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 20 | 21 | - hosts: servers 22 | roles: 23 | - { role: ntp-client } 24 | -------------------------------------------------------------------------------- /project/provision/roles/ntp-client/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for ntp-client 3 | ntp_timezone: Europe/Moscow -------------------------------------------------------------------------------- /project/provision/roles/ntp-client/files/chrony.conf: -------------------------------------------------------------------------------- 1 | server ns.otus.test iburst 2 | driftfile /var/lib/chrony/drift 3 | logdir /var/log/chrony 4 | log measurements statistics tracking -------------------------------------------------------------------------------- /project/provision/roles/ntp-client/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set timezone 3 | timezone: 4 | name: "{{ ntp_timezone }}" 5 | 6 | - name: config chrony daemon 7 | copy: 8 | src: chrony.conf 9 | dest: /etc/chrony.conf 10 | owner: root 11 | group: root 12 | mode: 0700 13 | 14 | - name: chronyd service restarted 15 | service: 16 | name: chronyd 17 | state: restarted 18 | enabled: true 19 | ... -------------------------------------------------------------------------------- /project/provision/roles/ntp/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup chronyd as NTP server. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS service required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | ntp_timezone: Europe/Moscow 15 | ntp_server: 0.rhel.pool.ntp.org 16 | 17 | Example Playbook 18 | ---------------- 19 | 20 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 21 | 22 | - hosts: servers 23 | roles: 24 | - { role: ntp } 25 | -------------------------------------------------------------------------------- /project/provision/roles/ntp/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for ntp 3 | ntp_timezone: Europe/Moscow 4 | ntp_server: 0.rhel.pool.ntp.org -------------------------------------------------------------------------------- /project/provision/roles/ntp/files/chrony.conf: -------------------------------------------------------------------------------- 1 | server 0.centos.pool.ntp.org iburst 2 | manual 3 | allow 192.168.0.0/16 4 | local stratum 8 -------------------------------------------------------------------------------- /project/provision/roles/ntp/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set timezone 3 | timezone: 4 | name: "{{ ntp_timezone }}" 5 | 6 | - name: config chrony daemon 7 | copy: 8 | src: chrony.conf 9 | dest: /etc/chrony.conf 10 | owner: root 11 | group: root 12 | mode: 0700 13 | 14 | - name: chronyd service restarted 15 | service: 16 | name: chronyd 17 | state: restarted 18 | enabled: true 19 | ... -------------------------------------------------------------------------------- /project/provision/roles/patroni/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка patroni. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 3. Example Playbook 10 | 11 | ``` 12 | --- 13 | - name: Do something 14 | hosts: servers 15 | 16 | roles: 17 | - { role: patroni } 18 | ``` -------------------------------------------------------------------------------- /project/provision/roles/patroni/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for haproxy 3 | etcd_hosts: 4 | - { name: 'ns.otus.test', ip: '192.168.50.10' } 5 | pg_hosts: 6 | - { name: 'master01.otus.test', ip: '192.168.50.21', alias: 'master01' } 7 | - { name: 'master02.otus.test', ip: '192.168.50.22', alias: 'master02' } 8 | - { name: 'master03.otus.test', ip: '192.168.50.23', alias: 'master03' } 9 | 10 | pg_bin_dir: /usr/pgsql-11/bin 11 | pg_database_dir: /var/lib/pgsql/11/data 12 | -------------------------------------------------------------------------------- /project/provision/roles/patroni/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for haproxy 3 | 4 | - name: Restart haproxy 5 | service: 6 | name: haproxy 7 | state: restarted 8 | ... 9 | -------------------------------------------------------------------------------- /project/provision/roles/patroni/templates/patroni.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Runners to orchestrate a high-availability PostgreSQL 3 | After=syslog.target network.target 4 | 5 | [Service] 6 | Type=simple 7 | 8 | User=postgres 9 | Group=postgres 10 | 11 | ExecStart=/bin/patroni /etc/patroni.yml 12 | KillMode=process 13 | TimeoutSec=30 14 | Restart=no 15 | 16 | [Install] 17 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /project/provision/roles/pgbackrest/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка PostgreSQL 11. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 3. Example Playbook 10 | 11 | ``` 12 | --- 13 | - name: Do something 14 | hosts: servers 15 | 16 | roles: 17 | - { role: add_pgsql11 } 18 | ``` -------------------------------------------------------------------------------- /project/provision/roles/pgbackrest/templates/pgbackrest.conf.j2: -------------------------------------------------------------------------------- 1 | [global] 2 | repo1-path=/data/lizard/backup 3 | repo1-retention-full=2 4 | process-max=2 5 | log-level-console=info 6 | log-level-file=debug 7 | 8 | [backup] 9 | pg1-host=master01 10 | pg1-path=/var/lib/pgsql/11/data -------------------------------------------------------------------------------- /project/provision/roles/pgsql11/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Установка PostgreSQL 11. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 3. Example Playbook 10 | 11 | ``` 12 | --- 13 | - name: Do something 14 | hosts: servers 15 | 16 | roles: 17 | - { role: pgsql11 } 18 | ``` -------------------------------------------------------------------------------- /project/provision/roles/pgsql11/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for pgsql11 -------------------------------------------------------------------------------- /project/provision/roles/pgsql11/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for add_pgsql11 3 | 4 | - name: Restart postgres 5 | service: 6 | name: postgresql-11 7 | state: restarted 8 | ... 9 | -------------------------------------------------------------------------------- /project/provision/roles/pgsql11/templates/pgbackrest.conf.j2: -------------------------------------------------------------------------------- 1 | [global] 2 | repo1-path=/opt/backup 3 | repo1-retention-full=2 4 | process-max=2 5 | log-level-console=info 6 | log-level-file=debug 7 | 8 | [backup] 9 | pg1-path=/var/lib/pgsql/11/data -------------------------------------------------------------------------------- /project/provision/roles/prometheus/README.md: -------------------------------------------------------------------------------- 1 | ### Role Name 2 | 3 | Развертывание prometheus. 4 | 5 | #### 1. Requirements 6 | 7 | - Centos/7 8 | 9 | #### 2. Role Variables 10 | 11 | - prom_ver: 2.6.0 12 | - prom_user: prometheus 13 | - prom_group: prometheus 14 | - prom_conf_dir: /etc/prom 15 | - prom_db_dir: /var/lib/prom 16 | - prom_listen_address: "127.0.0.1:9090" 17 | - prom_ext_url: "{{ ansible_fqdn }}" 18 | - prom_retention: "14d" 19 | 20 | #### 3. Example Playbook 21 | 22 | ``` 23 | --- 24 | - name: Do something 25 | hosts: servers 26 | 27 | roles: 28 | - { role: add_prometheus } 29 | ``` -------------------------------------------------------------------------------- /project/provision/roles/prometheus/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for add_prometheus 3 | prom_ver: 2.9.2 4 | prom_sql_ad_ver: 0.4.1 5 | prom_user: prometheus 6 | prom_group: prometheus 7 | prom_conf_dir: /etc/prom 8 | prom_db_dir: /var/lib/prom 9 | prom_listen_address: "0.0.0.0:9090" 10 | prom_ext_url: "http://localhost/prometheus/" 11 | prom_retention: "14d" 12 | nodes: 13 | - { name: 'ns', label: 'masters'} 14 | - { name: 'master01', label: 'masters'} 15 | - { name: 'master02', label: 'masters'} 16 | - { name: 'master03', label: 'masters'} 17 | - { name: 'node01', label: 'nodes'} 18 | - { name: 'node02', label: 'nodes'} 19 | - { name: 'node03', label: 'nodes'} 20 | ... -------------------------------------------------------------------------------- /project/provision/roles/prometheus/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for add_prometheus 3 | 4 | - name: Restart prometheus 5 | become: true 6 | systemd: 7 | daemon_reload: true 8 | name: prometheus 9 | state: restarted 10 | 11 | - name: Reload prometheus 12 | become: true 13 | systemd: 14 | name: prometheus 15 | state: reloaded 16 | ... 17 | -------------------------------------------------------------------------------- /project/provision/roles/prometheus/tasks/configure.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: alerting rules file 3 | template: 4 | src: "alert.rules.j2" 5 | dest: "{{ prom_conf_dir }}/alert.rules" 6 | owner: root 7 | group: prometheus 8 | mode: 0640 9 | # validate: "/usr/sbin/promtool check rules %s" 10 | notify: 11 | - Reload prometheus 12 | 13 | - name: configure prometheus 14 | template: 15 | src: "prometheus.yml.j2" 16 | dest: "{{ prom_conf_dir }}/prometheus.yml" 17 | force: true 18 | owner: root 19 | group: prometheus 20 | mode: 0640 21 | # validate: "/usr/sbin/promtool check config %s" 22 | notify: 23 | - Reload prometheus 24 | ... 25 | -------------------------------------------------------------------------------- /project/provision/roles/prometheus/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for add_prometheus 3 | 4 | - include: install.yml 5 | become: true 6 | tags: 7 | - install 8 | 9 | - include: configure.yml 10 | become: true 11 | tags: 12 | - configure 13 | 14 | - name: ensure prometheus service is started and enabled 15 | become: true 16 | systemd: 17 | daemon_reload: true 18 | name: prometheus 19 | state: started 20 | enabled: true 21 | tags: 22 | - run 23 | ... 24 | -------------------------------------------------------------------------------- /project/provision/roles/prometheus/templates/prometheus-postgresql-adapter.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Prometheus postgresql adapter service unit 3 | Wants=network-online.target 4 | After=network-online.target 5 | 6 | [Service] 7 | Type=simple 8 | User={{ prom_user }} 9 | Group={{ prom_group }} 10 | ExecReload=/bin/kill -HUP $MAINPID 11 | ExecStart=/usr/sbin/prometheus-postgresql-adapter \ 12 | -pg.host ns.otus.test \ 13 | -pg.port 5432 \ 14 | -pg.user 'postgres' \ 15 | -pg.password 'postgres' \ 16 | -pg.database 'postgres' \ 17 | -log.level 'info' 18 | SyslogIdentifier=prometheus 19 | Restart=on-failure 20 | RestartSec=5s 21 | 22 | [Install] 23 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /project/provision/roles/timescaledb/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Setup chronyd as ntp-client. 5 | 6 | Requirements 7 | ------------ 8 | 9 | DNS, NTP services required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | ntp_timezone: Europe/Moscow 15 | 16 | Example Playbook 17 | ---------------- 18 | 19 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 20 | 21 | - hosts: servers 22 | roles: 23 | - { role: ntp-client } 24 | -------------------------------------------------------------------------------- /project/provision/roles/timescaledb/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for ntp-client 3 | ntp_timezone: Europe/Moscow -------------------------------------------------------------------------------- /project/provision/roles/timescaledb/files/pg_prometheus.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kakoka/otus-homework/6d7e85da8fa1186525a026e66b6230e1da874673/project/provision/roles/timescaledb/files/pg_prometheus.tar.gz -------------------------------------------------------------------------------- /project/provision/roles/timescaledb/files/timescale_timescaledb.repo: -------------------------------------------------------------------------------- 1 | [timescale_timescaledb] 2 | name=timescale_timescaledb 3 | baseurl=https://packagecloud.io/timescale/timescaledb/el/7/\$basearch 4 | repo_gpgcheck=1 5 | gpgcheck=0 6 | enabled=1 7 | gpgkey=https://packagecloud.io/timescale/timescaledb/gpgkey 8 | sslverify=1 9 | sslcacert=/etc/pki/tls/certs/ca-bundle.crt 10 | metadata_expire=300 --------------------------------------------------------------------------------