├── manifests ├── blank.pp ├── sakila.pp ├── test_user.pp ├── tpcc.pp ├── haproxy.pp ├── test_imdb.pp ├── maxscale.pp ├── percona_access.pp ├── training_user.pp ├── haproxy-pxc.pp ├── sysbench_build.pp ├── percona_repository.pp ├── mha_node.pp ├── remove_insecure_key.pp ├── base.pp ├── docker_server.pp ├── employees.pp ├── percona_client.pp ├── my_movies.pp ├── myq_gadgets.pp ├── mha_manager.pp ├── percona_toolkit.pp ├── sysbench_load.pp ├── pxc_client.pp ├── local_percona_repo.pp ├── mysql_datadir.pp ├── mount.pp ├── training_imdb_nomysql_slave.pp ├── client.pp ├── mysql_repository.pp ├── pxc_arbitrator.pp ├── mysql_client.pp ├── consul_client.pp ├── pxc_slave.pp ├── consul_server.pp ├── sysbench.pp ├── training_imdb_nomysql.pp ├── crappy_server.pp ├── pxc_playground.pp ├── mysql_server.pp ├── pxc_server.pp └── percona_server.pp ├── modules ├── haproxy │ ├── manifests │ │ ├── init.pp │ │ └── server.pp │ └── templates │ │ └── haproxy.cfg.erb ├── misc │ ├── files │ │ └── dbsake │ └── manifests │ │ ├── innotop.pp │ │ ├── dbsake.pp │ │ ├── remove_insecure_key.pp │ │ ├── myq_gadgets.pp │ │ ├── speedometer.pp │ │ ├── softraid.pp │ │ ├── myq_tools.pp │ │ ├── vividcortex.pp │ │ ├── sakila.pp │ │ ├── employees.pp │ │ ├── percona_access.pp │ │ ├── mount.pp │ │ └── local_percona_repo.pp ├── percona │ ├── manifests │ │ ├── toolkit.pp │ │ ├── tokudb_config.pp │ │ ├── service.pp │ │ ├── cluster │ │ │ ├── xinetdclustercheck.pp │ │ │ ├── config.pp │ │ │ ├── clustercheckuser.pp │ │ │ ├── sstuser.pp │ │ │ ├── remove_server.pp │ │ │ ├── service.pp │ │ │ ├── client.pp │ │ │ ├── garb.pp │ │ │ └── server.pp │ │ ├── sysbench.pp │ │ ├── remove_anonymous_user.pp │ │ ├── tokudb_enable.pp │ │ ├── cluster.pp │ │ ├── tokudb_install.pp │ │ ├── server-password.pp │ │ ├── config.pp │ │ ├── agent.pp │ │ ├── pxc-clustercheck.pp │ │ ├── repository.pp │ │ ├── client.pp │ │ └── server.pp │ ├── files │ │ ├── percona-clustercheck-1.0-0.noarch.rpm │ │ ├── percona-clustercheck-1.0-2.noarch.rpm │ │ ├── percona-clustercheck-1.1-1.noarch.rpm │ │ └── percona-agent-install.expect │ └── templates │ │ ├── my-tokudb.cnf.erb │ │ ├── my.cnf.erb │ │ └── my-cluster.cnf.erb ├── mysql │ ├── manifests │ │ ├── service.pp │ │ ├── client.pp │ │ ├── server.pp │ │ ├── config.pp │ │ ├── backupdir.pp │ │ ├── repository.pp │ │ └── datadir.pp │ └── templates │ │ └── my.cnf.erb ├── base │ ├── manifests │ │ ├── motd.pp │ │ ├── hostname.pp │ │ ├── sshd_rootenabled.pp │ │ ├── swappiness.pp │ │ ├── insecure.pp │ │ └── packages.pp │ └── files │ │ └── sshd_config_rootenabled ├── test │ ├── files │ │ ├── imdb │ │ │ ├── my.indexes.sql │ │ │ └── my.grants.sql │ │ ├── my-movies.config.inc.php │ │ ├── sysbench_custom_lua │ │ │ ├── custom-oltp.lua │ │ │ └── custom-common.lua │ │ └── run_sysbench.sh │ └── manifests │ │ ├── imdb_ignore_indexes.pp │ │ ├── sysbench_custom_lua.pp │ │ ├── user.pp │ │ ├── sysbench_pkg.pp │ │ ├── sysbench_load.pp │ │ ├── tpcc.pp │ │ ├── sysbench_build.pp │ │ ├── sysbench_test_script.pp │ │ └── imdb.pp ├── training │ ├── files │ │ ├── imdb_workload │ │ │ ├── rc.local │ │ │ ├── add_load.py │ │ │ └── constant_workload.py │ │ ├── imdb_optimization.sql │ │ └── galeraWaitUntilEmptyRecvQueue.func.sql │ ├── manifests │ │ ├── imdb │ │ │ ├── optimization.pp │ │ │ ├── erase_perconaserverinstall.pp │ │ │ └── workload.pp │ │ ├── pxc_exercises.pp │ │ ├── ssh_key.pp │ │ └── helper_scripts.pp │ └── templates │ │ ├── last_node_to_dc2.sh.erb │ │ ├── ssh_keygen_and_distribute.sh.erb │ │ ├── reproduce_lcf.sh.erb │ │ └── run_app.sh.erb ├── docker │ └── manifests │ │ └── server.pp ├── mariadb │ └── manifests │ │ ├── repository │ │ └── maxscale.pp │ │ ├── maxscale.pp │ │ └── repository.pp └── mha │ ├── templates │ └── mha.cnf.erb │ ├── manifests │ ├── manager.pp │ └── node.pp │ └── files │ └── master_ip_failover ├── .gitignore ├── get_aws_ips.sh ├── .gitmodules ├── create-new-env.sh ├── Vagrantfile.mysql57.rb ├── Vagrantfile.ebs_custom.rb ├── Vagrantfile57_semi ├── Vagrantfile.ec2_provisioned_iops.rb ├── ms-setup.pl ├── Vagrantfile.ms.rb ├── Vagrantfile.perconaserver.rb ├── Vagrantfile.pxc-big.rb ├── Vagrantfile.pxc.rb ├── Vagrantfile.tokudb.rb ├── Vagrantfile.ps_sysbench.rb ├── Vagrantfile.consul.rb ├── Vagrantfile.pxc_playground.rb ├── lib └── vagrant-common.rb └── README.md /manifests/blank.pp: -------------------------------------------------------------------------------- 1 | # Dummy! -------------------------------------------------------------------------------- /modules/haproxy/manifests/init.pp: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /manifests/sakila.pp: -------------------------------------------------------------------------------- 1 | include misc::sakila -------------------------------------------------------------------------------- /manifests/test_user.pp: -------------------------------------------------------------------------------- 1 | include test::user -------------------------------------------------------------------------------- /manifests/tpcc.pp: -------------------------------------------------------------------------------- 1 | include test::tpcc 2 | -------------------------------------------------------------------------------- /manifests/haproxy.pp: -------------------------------------------------------------------------------- 1 | include haproxy::server 2 | -------------------------------------------------------------------------------- /manifests/test_imdb.pp: -------------------------------------------------------------------------------- 1 | include test::imdb 2 | -------------------------------------------------------------------------------- /manifests/maxscale.pp: -------------------------------------------------------------------------------- 1 | 2 | include mariadb::maxscale -------------------------------------------------------------------------------- /manifests/percona_access.pp: -------------------------------------------------------------------------------- 1 | include misc::percona_access -------------------------------------------------------------------------------- /manifests/training_user.pp: -------------------------------------------------------------------------------- 1 | include training::ssh_key 2 | -------------------------------------------------------------------------------- /manifests/haproxy-pxc.pp: -------------------------------------------------------------------------------- 1 | include haproxy::server-pxc 2 | 3 | -------------------------------------------------------------------------------- /manifests/sysbench_build.pp: -------------------------------------------------------------------------------- 1 | include test::sysbench_build 2 | -------------------------------------------------------------------------------- /manifests/percona_repository.pp: -------------------------------------------------------------------------------- 1 | include percona::repository 2 | 3 | -------------------------------------------------------------------------------- /manifests/mha_node.pp: -------------------------------------------------------------------------------- 1 | include base::packages 2 | include mha::node 3 | -------------------------------------------------------------------------------- /manifests/remove_insecure_key.pp: -------------------------------------------------------------------------------- 1 | include misc::remove_insecure_key 2 | -------------------------------------------------------------------------------- /manifests/base.pp: -------------------------------------------------------------------------------- 1 | include base::packages 2 | include base::insecure 3 | 4 | include base::hostname 5 | -------------------------------------------------------------------------------- /manifests/docker_server.pp: -------------------------------------------------------------------------------- 1 | 2 | 3 | class { 'docker::server': 4 | docker_device => $docker_device 5 | } 6 | -------------------------------------------------------------------------------- /modules/misc/files/dbsake: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jayjanssen/vagrant-percona-deprecated/HEAD/modules/misc/files/dbsake -------------------------------------------------------------------------------- /modules/misc/manifests/innotop.pp: -------------------------------------------------------------------------------- 1 | class misc::innotop { 2 | 3 | package {"innotop": ensure => installed} 4 | 5 | } 6 | -------------------------------------------------------------------------------- /manifests/employees.pp: -------------------------------------------------------------------------------- 1 | include base::packages 2 | include misc::employees 3 | 4 | Class['base::packages'] -> Class['misc::employees'] -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .vagrant 3 | packer/*.box 4 | packer/packer_cache 5 | packer/crash.log 6 | Vagrantfile 7 | sftp-config.json 8 | .sync -------------------------------------------------------------------------------- /modules/percona/manifests/toolkit.pp: -------------------------------------------------------------------------------- 1 | class percona::toolkit { 2 | package { 3 | "percona-toolkit": ensure => installed; 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /manifests/percona_client.pp: -------------------------------------------------------------------------------- 1 | include percona::repository 2 | include percona::client 3 | 4 | Class['percona::repository'] -> Class['percona::client'] -------------------------------------------------------------------------------- /manifests/my_movies.pp: -------------------------------------------------------------------------------- 1 | # Assumes mysql is already installed in some form 2 | include test::imdb 3 | include test::user 4 | 5 | include mysql::service -------------------------------------------------------------------------------- /manifests/myq_gadgets.pp: -------------------------------------------------------------------------------- 1 | include base::packages 2 | include misc::myq_gadgets 3 | 4 | Class['base::packages'] -> Class['misc::myq_gadgets'] 5 | 6 | -------------------------------------------------------------------------------- /manifests/mha_manager.pp: -------------------------------------------------------------------------------- 1 | include base::packages 2 | 3 | include mha::manager 4 | include mha::node 5 | 6 | Class['mha::node'] -> Class['mha::manager'] 7 | -------------------------------------------------------------------------------- /manifests/percona_toolkit.pp: -------------------------------------------------------------------------------- 1 | include percona::repository 2 | include percona::toolkit 3 | 4 | Class['percona::repository'] -> Class['percona::toolkit'] 5 | -------------------------------------------------------------------------------- /modules/mysql/manifests/service.pp: -------------------------------------------------------------------------------- 1 | class mysql::service { 2 | 3 | service { 4 | "mysqld": 5 | enable => true, 6 | ensure => 'running'; 7 | } 8 | } -------------------------------------------------------------------------------- /manifests/sysbench_load.pp: -------------------------------------------------------------------------------- 1 | class { 'test::sysbench_load': 2 | tables => $tables, 3 | rows => $rows, 4 | threads => $threads, 5 | schema => $schema 6 | } 7 | -------------------------------------------------------------------------------- /manifests/pxc_client.pp: -------------------------------------------------------------------------------- 1 | include percona::repository 2 | include percona::cluster::client 3 | 4 | Class['percona::repository'] -> Class['percona::cluster::client'] 5 | -------------------------------------------------------------------------------- /manifests/local_percona_repo.pp: -------------------------------------------------------------------------------- 1 | include percona::repository 2 | include misc::local_percona_repo 3 | 4 | Class['percona::repository'] -> Class['misc::local_percona_repo'] -------------------------------------------------------------------------------- /modules/base/manifests/motd.pp: -------------------------------------------------------------------------------- 1 | class base::motd { 2 | 3 | file { 4 | "/etc/motd": 5 | ensure => present, 6 | source => "puppet:///modules/base/motd" 7 | } 8 | 9 | } -------------------------------------------------------------------------------- /modules/test/files/imdb/my.indexes.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE movie_info ADD INDEX movie_id (movie_id); 2 | ALTER TABLE cast_info ADD INDEX movie_id (movie_id), ADD INDEX person_id (person_id); 3 | -------------------------------------------------------------------------------- /modules/percona/files/percona-clustercheck-1.0-0.noarch.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jayjanssen/vagrant-percona-deprecated/HEAD/modules/percona/files/percona-clustercheck-1.0-0.noarch.rpm -------------------------------------------------------------------------------- /modules/percona/files/percona-clustercheck-1.0-2.noarch.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jayjanssen/vagrant-percona-deprecated/HEAD/modules/percona/files/percona-clustercheck-1.0-2.noarch.rpm -------------------------------------------------------------------------------- /modules/percona/files/percona-clustercheck-1.1-1.noarch.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jayjanssen/vagrant-percona-deprecated/HEAD/modules/percona/files/percona-clustercheck-1.1-1.noarch.rpm -------------------------------------------------------------------------------- /modules/test/manifests/imdb_ignore_indexes.pp: -------------------------------------------------------------------------------- 1 | 2 | 3 | class test::imdb_ignore_indexes { 4 | 5 | file { 6 | '/tmp/my.indexes.sql.done': 7 | ensure => present 8 | } 9 | 10 | } 11 | -------------------------------------------------------------------------------- /modules/misc/manifests/dbsake.pp: -------------------------------------------------------------------------------- 1 | class misc::dbsake { 2 | file { 3 | '/usr/local/bin/dbsake': 4 | ensure => present, 5 | mode => 0755, 6 | source => "puppet:///modules/misc/dbsake"; 7 | } 8 | } -------------------------------------------------------------------------------- /modules/percona/manifests/tokudb_config.pp: -------------------------------------------------------------------------------- 1 | class percona::tokudb_config { 2 | file { 3 | "/etc/mysql.d/my-tokudb.cnf": 4 | ensure => present, 5 | content => template("percona/my-tokudb.cnf.erb") 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /manifests/mysql_datadir.pp: -------------------------------------------------------------------------------- 1 | class { 'mysql::datadir': 2 | datadir_dev => $datadir_dev, 3 | datadir_dev_scheduler => $datadir_dev_scheduler, 4 | datadir_fs => $datadir_fs, 5 | datadir_fs_opts => $datadir_fs_opts, 6 | datadir_mkfs_opts => $datadir_mkfs_opts 7 | } 8 | -------------------------------------------------------------------------------- /modules/misc/manifests/remove_insecure_key.pp: -------------------------------------------------------------------------------- 1 | class misc::remove_insecure_key { 2 | exec { 3 | "remove_insecure_key": 4 | path => "/bin:/usr/bin:/sbin:/usr/sbin", 5 | command => "sed -ie 's/.*vagrant insecure public key.*//' /root/.ssh/authorized_keys"; 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /modules/percona/manifests/service.pp: -------------------------------------------------------------------------------- 1 | class percona::service { 2 | 3 | service { 4 | "mysql": 5 | enable => true, 6 | ensure => 'running', 7 | require => [File['/etc/my.cnf'],Package['MySQL-server']], 8 | subscribe => File['/etc/my.cnf']; 9 | 10 | } 11 | } -------------------------------------------------------------------------------- /modules/percona/manifests/cluster/xinetdclustercheck.pp: -------------------------------------------------------------------------------- 1 | class percona::cluster::xinetdclustercheck { 2 | include percona::cluster::clustercheckuser 3 | 4 | package { 5 | "xinetd": 6 | ensure => latest; 7 | } 8 | 9 | service { 10 | "xinetd": 11 | ensure => running; 12 | } 13 | } 14 | 15 | -------------------------------------------------------------------------------- /modules/mysql/manifests/client.pp: -------------------------------------------------------------------------------- 1 | class mysql::client { 2 | package { 3 | 'mysql-community-client': 4 | ensure => 'installed'; 5 | 'mysql-community-libs': 6 | alias => "MySQL-shared", 7 | ensure => 'installed'; 8 | 'mysql-community-devel': 9 | alias => 'MySQL-devel', 10 | ensure => 'installed'; 11 | } 12 | 13 | } 14 | -------------------------------------------------------------------------------- /modules/test/files/my-movies.config.inc.php: -------------------------------------------------------------------------------- 1 | 15 | -------------------------------------------------------------------------------- /modules/training/files/imdb_workload/rc.local: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # This script will be executed *after* all the other init scripts. 4 | # You can put your own initialization stuff in here if you don't 5 | # want to do the full Sys V style init stuff. 6 | 7 | touch /var/lock/subsys/local 8 | /home/percona/.bin/constant_workload.py >/dev/null 2>&1 & 9 | -------------------------------------------------------------------------------- /modules/percona/manifests/sysbench.pp: -------------------------------------------------------------------------------- 1 | class percona::sysbench { 2 | package { 3 | "sysbench": 4 | # Should be in percona yum repo now 5 | ensure => 'installed'; 6 | } 7 | file { 8 | "/root/sysbench_tests": 9 | ensure => link, 10 | target => '/usr/share/doc/sysbench/tests', 11 | require => Package['sysbench']; 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /modules/mysql/manifests/server.pp: -------------------------------------------------------------------------------- 1 | class mysql::server { 2 | package { 3 | 'mysql-community-server': 4 | ensure => 'installed', 5 | require => Package['mariadb-libs']; 6 | 'mysql-community-libs': 7 | ensure => 'installed', 8 | require => Package['mariadb-libs']; 9 | 'mariadb-libs': 10 | ensure => 'purged'; 11 | } 12 | 13 | } 14 | -------------------------------------------------------------------------------- /modules/training/manifests/imdb/optimization.pp: -------------------------------------------------------------------------------- 1 | class training::imdb::optimization { 2 | 3 | file { 4 | "/root/.data/": 5 | ensure => directory; 6 | "/root/.data/imdb_optimization.sql": 7 | ensure => present, 8 | source => "puppet:///modules/training/imdb_optimization.sql", 9 | require => File["/root/.data/"]; 10 | } 11 | 12 | } 13 | -------------------------------------------------------------------------------- /manifests/mount.pp: -------------------------------------------------------------------------------- 1 | class { 'misc::mount': 2 | mount_point => $mount_point, 3 | mount_dev => $mount_dev, 4 | mount_dev_scheduler => $mount_dev_scheduler, 5 | mount_fs => $mount_fs, 6 | mount_fs_opts => $mount_fs_opts, 7 | mount_mkfs_opts => $mount_mkfs_opts, 8 | mount_owner => $mount_owner, 9 | mount_group => $mount_group, 10 | mount_mode => $mount_mode 11 | } -------------------------------------------------------------------------------- /modules/percona/manifests/remove_anonymous_user.pp: -------------------------------------------------------------------------------- 1 | class percona::remove_anonymous_user { 2 | 3 | exec { 4 | 'remove_anonymous_user': 5 | command => "mysql -Ne \"select concat('DROP USER \\'', user, '\\'@\\'', host, '\\';') from mysql.user where user='';\" | mysql ", 6 | cwd => "/root", 7 | path => ['/usr/bin', '/bin'], 8 | require => [ Service['mysql'] ]; 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /manifests/training_imdb_nomysql_slave.pp: -------------------------------------------------------------------------------- 1 | 2 | 3 | class { 'mysql::datadir': 4 | datadir_dev => $datadir_dev 5 | } 6 | class { 'mysql::backupdir': 7 | backupdir_dev => $backupdir_dev 8 | } 9 | Class['mysql::datadir'] -> Class['mysql::backupdir'] 10 | 11 | include base::packages 12 | include percona::repository 13 | 14 | include training::ssh_key 15 | 16 | include percona::config 17 | 18 | -------------------------------------------------------------------------------- /modules/percona/manifests/tokudb_enable.pp: -------------------------------------------------------------------------------- 1 | class percona::tokudb_enable { 2 | exec { 3 | "MySQL-TokuDB-enable": 4 | command => "ps_tokudb_admin --enable && systemctl restart mysql.service", 5 | cwd => "/tmp", 6 | path => ['/bin','/usr/bin','/usr/local/bin'], 7 | creates => '/var/lib/mysql/tokudb.directory'; 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /modules/percona/manifests/cluster.pp: -------------------------------------------------------------------------------- 1 | class percona::cluster { 2 | 3 | include percona::cluster::remove_server 4 | include percona::cluster::packages 5 | include percona::cluster::config 6 | include percona::cluster::service 7 | 8 | Class['percona::cluster::remove_server'] -> Class['percona::cluster::packages'] -> Class['percona::cluster::config'] -> Class['percona::cluster::service'] 9 | 10 | } -------------------------------------------------------------------------------- /modules/training/manifests/pxc_exercises.pp: -------------------------------------------------------------------------------- 1 | class training::pxc_exercises { 2 | 3 | file { 4 | "/root/bin/reproduce_lcf.sh": 5 | ensure => present, 6 | content => template('training/reproduce_lcf.sh.erb'), 7 | mode => 0755; 8 | "/root/bin/last_node_to_dc2.sh": 9 | ensure => present, 10 | mode => 0755, 11 | content => template('training/last_node_to_dc2.sh.erb'); 12 | } 13 | 14 | } -------------------------------------------------------------------------------- /modules/training/manifests/imdb/erase_perconaserverinstall.pp: -------------------------------------------------------------------------------- 1 | # handsondba training requires imdb data, but part of the training is also to install percona server, so we have to delete it 2 | 3 | class training::imdb::erase_perconaserverinstall { 4 | 5 | 6 | exec{ 7 | "remove-percona-server": 8 | command => "/usr/bin/yum remove -y Percona-Server-server-56 Percona-Server-client-56"; 9 | } 10 | 11 | 12 | } 13 | -------------------------------------------------------------------------------- /modules/test/files/imdb/my.grants.sql: -------------------------------------------------------------------------------- 1 | GRANT ALL ON imdb.* to 'app'@'%' IDENTIFIED BY 'pass'; 2 | GRANT ALL ON *.* to 'approot'@'%' IDENTIFIED BY 'pass'; 3 | GRANT FILE ON *.* to 'approot'@'%' IDENTIFIED BY 'pass'; 4 | GRANT FILE ON *.* to 'approot'@'%' IDENTIFIED BY 'pass'; 5 | GRANT ALL on *.* to cactiuser@"%" identified by "cactiuser"; 6 | GRANT REPLICATION CLIENT, REPLICATION SLAVE ON *.* to 'repl'@'%' IDENTIFIED BY 'pass'; 7 | -------------------------------------------------------------------------------- /modules/haproxy/manifests/server.pp: -------------------------------------------------------------------------------- 1 | class haproxy::server { 2 | package { 3 | 'haproxy': 4 | ensure => 'installed'; 5 | } 6 | 7 | service { 8 | 'haproxy': 9 | enable => true, 10 | ensure => 'running'; 11 | } 12 | 13 | file { 14 | '/etc/haproxy/haproxy.cfg': 15 | ensure => 'present', 16 | require => Package['haproxy'], 17 | content => template('haproxy/haproxy.cfg.erb'); 18 | } 19 | 20 | } 21 | -------------------------------------------------------------------------------- /modules/percona/manifests/tokudb_install.pp: -------------------------------------------------------------------------------- 1 | class percona::tokudb_install { 2 | # Install TokuDB package 3 | # currently only built for PS 5.6 and x86_64 4 | case $operatingsystem { 5 | centos: { 6 | package { 7 | "Percona-Server-tokudb-$percona_server_version.$hardwaremodel": 8 | alias => "MySQL-TokuDB", 9 | ensure => latest; 10 | } 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /modules/percona/files/percona-agent-install.expect: -------------------------------------------------------------------------------- 1 | #!/usr/bin/expect -f 2 | set percona_agent_api_key [lindex $argv 0] 3 | spawn ./install -api-key=$percona_agent_api_key 4 | expect "Create MySQL user for agent? ('N' to use existing user) (Y): " 5 | send "n\r" 6 | expect "MySQL username (root): " 7 | send "test\r" 8 | expect "MySQL password: " 9 | send "test\r" 10 | expect "socket file (localhost): " 11 | send "\r" 12 | expect "OK" 13 | 14 | sleep 5 -------------------------------------------------------------------------------- /modules/training/templates/last_node_to_dc2.sh.erb: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | case $1 in 4 | enable) 5 | action='add' 6 | ;; 7 | disable) 8 | action='del' 9 | ;; 10 | *) 11 | echo "ERROR: either use enable or disable" 12 | exit 1; 13 | esac 14 | 15 | 16 | <%- last_node= @cluster_servers.split(',').last %> 17 | echo "Last node is " <%= last_node %> 18 | 19 | ssh <%= last_node %> tc qdisc $action dev eth1 root netem delay 200ms 20 | 21 | -------------------------------------------------------------------------------- /manifests/client.pp: -------------------------------------------------------------------------------- 1 | include percona::repository 2 | include percona::toolkit 3 | 4 | # Include percona server because tpcc-mysql coredumps without it! 5 | include percona::server 6 | 7 | include misc 8 | include misc::sysbench 9 | include misc::tpcc_mysql 10 | 11 | Class['misc'] -> Class['percona::repository'] 12 | 13 | Class['percona::repository'] -> Class['percona::toolkit'] 14 | Class['percona::repository'] -> Class['percona::server'] 15 | 16 | -------------------------------------------------------------------------------- /manifests/mysql_repository.pp: -------------------------------------------------------------------------------- 1 | # 5.6 enabled by default. Client is reponsible to only have one of these 2 | # enabled and to disable the default. 3 | if( $enable_55 == undef ) { 4 | $enable_55 = 0 5 | } 6 | if( $enable_56 == undef ) { 7 | $enable_56 = 1 8 | } 9 | if( $enable_57 == undef ) { 10 | $enable_57 = 0 11 | } 12 | 13 | 14 | class { 'mysql::repository': 15 | 55_enabled => $enable_55, 16 | 56_enabled => $enable_56, 17 | 57_enabled => $enable_57 18 | } -------------------------------------------------------------------------------- /modules/base/manifests/hostname.pp: -------------------------------------------------------------------------------- 1 | class base::hostname { 2 | exec { 3 | "set_hostname": 4 | command => "hostname $vagrant_hostname", 5 | unless => "test `hostname` = $vagrant_hostname", 6 | path => ["/bin", "/usr/bin"]; 7 | "remove_hostname_from_localhost_ip": 8 | command => "sed -ie 's/127.0.0.1.*/127.0.0.1\tlocalhost localhost.localdomain localhost4 localhost4.localdomain4/' /etc/hosts", 9 | path => ["/bin", "/usr/bin"]; 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /modules/misc/manifests/myq_gadgets.pp: -------------------------------------------------------------------------------- 1 | class misc::myq_gadgets { 2 | exec { 3 | "myq_gadgets": 4 | command => "wget -O myq_gadgets-latest.tgz https://github.com/jayjanssen/myq_gadgets/tarball/master && tar xvzf myq_gadgets-latest.tgz -C /usr/local/bin --strip-components=1", 5 | cwd => "/tmp", 6 | creates => "/usr/local/bin/myq_status", 7 | path => ['/bin','/usr/bin','/usr/local/bin'], 8 | require => Package['wget']; 9 | } 10 | } -------------------------------------------------------------------------------- /manifests/pxc_arbitrator.pp: -------------------------------------------------------------------------------- 1 | # different tools which are used to setup lot's of stuff for pxc and to test/train... 2 | 3 | 4 | # all other defaults 5 | include percona::repository 6 | include percona::cluster::garb 7 | Class['percona::repository'] -> Class['percona::cluster::garb'] 8 | 9 | include base::packages 10 | include base::hostname 11 | include base::insecure 12 | include misc::myq_gadgets 13 | 14 | Class['base::packages'] -> Class['misc::myq_gadgets'] 15 | 16 | 17 | -------------------------------------------------------------------------------- /modules/misc/manifests/speedometer.pp: -------------------------------------------------------------------------------- 1 | class misc::speedometer { 2 | 3 | case $operatingsystem { 4 | centos: { 5 | package { 6 | 'python-urwid': ensure => 'present'; 7 | } 8 | 9 | file { 10 | '/root/bin/speedometer': 11 | ensure => present, 12 | mode => 0755, 13 | source => "puppet:///modules/misc/speedometer.py" 14 | } 15 | } 16 | ubuntu: { 17 | package { 18 | "speedometer": 19 | ensure => installed; 20 | } 21 | } 22 | } 23 | } -------------------------------------------------------------------------------- /modules/percona/manifests/cluster/config.pp: -------------------------------------------------------------------------------- 1 | class percona::cluster::config { 2 | 3 | # Use the default_interface's address unless wsrep_node_address is explicitly set 4 | 5 | if $wsrep_node_address == undef { 6 | if $default_interface != undef { 7 | $wsrep_node_address = getvar("ipaddress_${default_interface}") 8 | } 9 | } 10 | 11 | file { 12 | "/etc/my.cnf": 13 | ensure => present, 14 | content => template("percona/my-cluster.cnf.erb"); 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /modules/misc/manifests/softraid.pp: -------------------------------------------------------------------------------- 1 | class misc::softraid( 2 | $softraid_dev, 3 | $softraid_level, 4 | $softraid_devices, 5 | $softraid_dev_str 6 | ){ 7 | 8 | package { 9 | 'mdadm': ensure => 'present'; 10 | } 11 | 12 | exec { 13 | "$softraid_dev": 14 | command => "mdadm --create --verbose --force --run $softraid_dev --level=$softraid_level --raid-devices=$softraid_devices $softraid_dev_str", 15 | require => Package['mdadm'], 16 | path => "/usr/sbin", 17 | creates => "/dev/md0"; 18 | } 19 | } -------------------------------------------------------------------------------- /modules/percona/manifests/cluster/clustercheckuser.pp: -------------------------------------------------------------------------------- 1 | class percona::cluster::clustercheckuser { 2 | include percona::toolkit 3 | 4 | exec { 5 | 'create_clustercheck_user': 6 | command => "mysql -e \"GRANT USAGE ON *.* TO 'clustercheckuser'@'localhost' IDENTIFIED BY 'clustercheckpassword!'\"", 7 | cwd => '/root', 8 | unless => "pt-show-grants | grep \"GRANT USAGE ON *.* TO 'clustercheckuser'@'localhost'\"", 9 | path => ['/usr/bin', '/bin'], 10 | require => [ Package['percona-toolkit'], Service['mysql'] ]; 11 | } 12 | } -------------------------------------------------------------------------------- /manifests/mysql_client.pp: -------------------------------------------------------------------------------- 1 | # 5.6 enabled by default. Client is reponsible to only have one of these 2 | # enabled and to disable the default. 3 | if( $enable_55 == undef ) { 4 | $enable_55 = 0 5 | } 6 | if( $enable_56 == undef ) { 7 | $enable_56 = 1 8 | } 9 | if( $enable_57 == undef ) { 10 | $enable_57 = 0 11 | } 12 | 13 | class { 'mysql::repository': 14 | 55_enabled => $enable_55, 15 | 56_enabled => $enable_56, 16 | 57_enabled => $enable_57 17 | } 18 | 19 | include mysql::client 20 | 21 | Class['mysql::repository'] -> Class['mysql::client'] 22 | -------------------------------------------------------------------------------- /modules/percona/manifests/cluster/sstuser.pp: -------------------------------------------------------------------------------- 1 | class percona::cluster::sstuser { 2 | include percona::toolkit 3 | 4 | exec { 5 | 'create_sst_user': 6 | command => "mysql -e \"GRANT LOCK TABLES, PROCESS, RELOAD, REPLICATION CLIENT ON *.* TO 'sst'@'localhost' IDENTIFIED BY 'secret'\"", 7 | cwd => '/root', 8 | unless => "pt-show-grants | grep \"GRANT LOCK TABLES, PROCESS, RELOAD, REPLICATION CLIENT ON *.* TO 'sst'@'localhost'\"", 9 | path => ['/usr/bin', '/bin'], 10 | require => [ Package['percona-toolkit'], Service['mysql'] ]; 11 | } 12 | } -------------------------------------------------------------------------------- /get_aws_ips.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | 4 | # curl -s http://169.254.169.254/latest/meta-data/public-hostname | sed -re 's/[ tab]$//' 5 | 6 | node_list=($(vagrant status | grep running | grep -v "instance is running" | awk '{print $1}')) 7 | node_ips=() 8 | 9 | for (( i = 0 ; i < ${#node_list[@]} ; i++ )) 10 | # for i in "${node_list[@]}" 11 | do 12 | node=${node_list[$i]} 13 | get_ip_cmd="curl -s http://169.254.169.254/latest/meta-data/public-hostname" 14 | ip=`vagrant ssh $node -c "$get_ip_cmd" 2>/dev/null | grep -o '.*\.amazonaws\.com'` 15 | echo "$node: '$ip'" 16 | done -------------------------------------------------------------------------------- /modules/base/manifests/sshd_rootenabled.pp: -------------------------------------------------------------------------------- 1 | class base::sshd_rootenabled { 2 | 3 | file { 4 | "/etc/ssh/sshd_config": 5 | mode => 0644, 6 | owner => root, 7 | group => root, 8 | source => "puppet:///modules/base/sshd_config_rootenabled", 9 | notify => Service["sshd"] 10 | } 11 | 12 | exec { 13 | "changerootpassword": 14 | command => "/usr/bin/echo -n 'perconapassword' | /usr/bin/passwd root --stdin && touch /root/perconapassword.ok", 15 | creates => "/root/perconapassword.ok" 16 | } 17 | 18 | service{ 19 | "sshd": 20 | ensure => running 21 | } 22 | } -------------------------------------------------------------------------------- /modules/misc/manifests/myq_tools.pp: -------------------------------------------------------------------------------- 1 | class misc::myq_tools { 2 | exec { 3 | "myq_tools": 4 | command => "wget `curl -s https://api.github.com/repos/jayjanssen/myq-tools/releases | grep browser_download_url | head -n 1 | cut -d '\"' -f 4` && tar xvzf myq_tools.tgz -C /usr/local/bin --strip-components=1 && ln -sf /usr/local/bin/myq_status.linux-amd64 /usr/local/bin/myq_status", 5 | cwd => "/tmp", 6 | creates => "/usr/local/bin/myq_status.linux-amd64", 7 | path => ['/bin','/usr/bin','/usr/local/bin'], 8 | require => Package['wget']; 9 | } 10 | } -------------------------------------------------------------------------------- /modules/training/templates/ssh_keygen_and_distribute.sh.erb: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ssh-keygen -q -t rsa -f ~/.ssh/id_rsa -N "" 3 | cp ~/.ssh/id_rsa.pub ~/.ssh/authorized_keys 4 | 5 | 6 | <%- @cluster_servers.split(',').each do |node| %> 7 | ssh-keyscan -H <%= node %> >> ~/.ssh/known_hosts 8 | sshpass -p perconapassword ssh-copy-id -i ~/.ssh/id_rsa.pub root@<%= node %> 9 | scp -r ~/.ssh/{id_rsa*,authorized_keys} <%= node %>:.ssh/ 10 | <%- @cluster_servers.split(',').each do |node_2| %> 11 | ssh <%= node %> "ssh-keyscan -H <%= node_2 %> >> ~/.ssh/known_hosts" 12 | <%- end %> 13 | <%- end %> 14 | -------------------------------------------------------------------------------- /modules/test/manifests/sysbench_custom_lua.pp: -------------------------------------------------------------------------------- 1 | class test::sysbench_custom_lua { 2 | 3 | file { 4 | "/root/sysbench_custom_lua": 5 | ensure => directory; 6 | "/root/sysbench_custom_lua/custom-oltp.lua": 7 | ensure => present, 8 | source => "puppet:///modules/test/sysbench_custom_lua/custom-oltp.lua", 9 | require => File["/root/sysbench_custom_lua"]; 10 | "/root/sysbench_custom_lua/custom-common.lua": 11 | ensure => present, 12 | source => "puppet:///modules/test/sysbench_custom_lua/custom-common.lua", 13 | require => File["/root/sysbench_custom_lua"]; 14 | } 15 | 16 | } 17 | -------------------------------------------------------------------------------- /modules/docker/manifests/server.pp: -------------------------------------------------------------------------------- 1 | class docker::server ( 2 | $docker_device = undef, 3 | ) { 4 | 5 | package { 6 | "docker": ensure => installed 7 | } 8 | 9 | 10 | if ( $docker_device ) { 11 | file { 12 | "/etc/sysconfig/docker-storage-setup": 13 | ensure => present, 14 | content => "DEVS=$docker_device 15 | VG=docker" 16 | } 17 | } else { 18 | file { 19 | "/etc/sysconfig/docker-storage-setup": 20 | ensure => present, 21 | content => "" 22 | } 23 | } 24 | 25 | service { 26 | "docker": ensure => running, require => File["/etc/sysconfig/docker-storage-setup"]; 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /modules/mysql/templates/my.cnf.erb: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | datadir = /var/lib/mysql 3 | log_error = error.log 4 | 5 | log-bin 6 | server-id = <%= @server_id %> 7 | 8 | query_cache_size = 0 9 | query_cache_type = 0 10 | 11 | innodb_buffer_pool_size = <%= @innodb_buffer_pool_size %> 12 | innodb_log_file_size = <%= @innodb_log_file_size %> 13 | innodb_flush_method = O_DIRECT 14 | innodb_file_per_table 15 | innodb_flush_log_at_trx_commit = <%= @innodb_flush_log_at_trx_commit %> 16 | 17 | <%=@extra_mysqld_config %> 18 | 19 | [mysql] 20 | prompt = "<%=@vagrant_hostname %> mysql> " 21 | 22 | [client] 23 | user = root 24 | -------------------------------------------------------------------------------- /modules/base/manifests/swappiness.pp: -------------------------------------------------------------------------------- 1 | class base::swappiness( $swappiness = 1) { 2 | exec{ 3 | 'swappiness_sysctl_conf': 4 | command => "echo 'vm.swappiness = $swappiness' >> /etc/sysctl.conf", 5 | cwd => '/root', 6 | unless => "grep '^vm.swappiness = $swappiness' /etc/sysctl.conf", 7 | path => ['/usr/bin', '/bin']; 8 | 'apply_sysctl': 9 | # We use -w instead of -p to avoid unknown key errors 10 | command => "sysctl -w vm.swappiness=$swappiness", 11 | path => ['/usr/sbin', '/usr/bin', '/sbin', '/bin'], 12 | unless => "sysctl vm.swappiness | egrep '^vm.swappiness = $swappiness$'"; 13 | } 14 | } 15 | 16 | -------------------------------------------------------------------------------- /manifests/consul_client.pp: -------------------------------------------------------------------------------- 1 | include stdlib 2 | 3 | $config_hash = delete_undef_values( { 4 | 'datacenter' => $datacenter, 5 | 'data_dir' => '/opt/consul', 6 | 'log_level' => 'INFO', 7 | 'node_name' => $node_name ? { 8 | undef => $vagrant_hostname, 9 | default => $node_name 10 | }, 11 | 'bind_addr' => $default_interface ? { 12 | undef => undef, 13 | default => getvar("ipaddress_${default_interface}") 14 | }, 15 | 'client_addr' => '0.0.0.0', 16 | 'ui_dir' => '/opt/consul/ui', 17 | 'server' => false, 18 | 'retry_join' => split($retry_join, ',') 19 | }) 20 | 21 | class { 'consul': 22 | version => '0.7.2', 23 | config_hash => $config_hash 24 | } 25 | -------------------------------------------------------------------------------- /modules/training/manifests/ssh_key.pp: -------------------------------------------------------------------------------- 1 | class training::ssh_key { 2 | 3 | ssh_authorized_key {$title: 4 | ensure => present, 5 | name => 'percona-training', 6 | user => 'root', 7 | type => ssh-rsa, 8 | key => "AAAAB3NzaC1yc2EAAAADAQABAAABAQCPPDcD4/1gZY8EZaCuYZq7l4KdnPzkr2LIi94pa7GQ6vAg9l/o2MDXNrBT+P7sjfbzRurR633wD5NkERWx8TdRPIZRaZKYp6F4CbOf+LtEYw9dF3CwZVjFHKLqEsKEDMnbpwbaL33RycbjDh3cTHHN65WxiWKhX2yMIwxj3q+rGbx2CP+IUtP59hxc3iz/Fddm3JziB0N4bd0kPL3f8CtdXpmgz+rScL73+L7L0gmF453qXdCYc8wWRdNLhDyxC9nTBDheKEDasyYiprdeuT1D/Nj0eeN/jppU1GJfZ81rryfBRoXShu4yPc0TwDUgF9L9wQiY0lYdVSIh0wbano+B" 9 | } 10 | 11 | } 12 | -------------------------------------------------------------------------------- /modules/base/manifests/insecure.pp: -------------------------------------------------------------------------------- 1 | class base::insecure { 2 | 3 | case $operatingsystem { 4 | centos: { 5 | if( $operatingsystemrelease =~ /^7/ ) { 6 | service { 7 | 'firewalld': ensure => 'stopped', enable => false; 8 | } 9 | } else { 10 | service { 11 | 'iptables': ensure => 'stopped', enable => false, 12 | status => 'iptables -L -v | grep REJECT' ; 13 | } 14 | } 15 | } 16 | } 17 | 18 | exec { 19 | "disable-selinux": 20 | path => ["/usr/sbin","/bin","/usr/bin"], 21 | command => "setenforce Permissive", 22 | unless => "getenforce | egrep 'Disabled|Permissive'"; 23 | } 24 | 25 | } 26 | 27 | -------------------------------------------------------------------------------- /modules/mariadb/manifests/repository/maxscale.pp: -------------------------------------------------------------------------------- 1 | class mariadb::repository::maxscale { 2 | # no 32bit builds are provided by mariadb, don't use this class on 32bit! 3 | 4 | # only centos supported at the moment 5 | case $operatingsystem { 6 | centos: { 7 | yumrepo { 8 | "MariaDB-MaxScale": 9 | baseurl => $architecture ? { 10 | "x86_64" => "http://code.mariadb.com/mariadb-maxscale/latest/centos/7/x86_64/" 11 | }, 12 | descr => "MariaDB-MaxScale", 13 | enabled => 1, 14 | gpgcheck => 0, 15 | gpgkey => "https://yum.mariadb.org/RPM-GPG-KEY-MariaDB" 16 | } 17 | 18 | } 19 | 20 | } 21 | 22 | } 23 | -------------------------------------------------------------------------------- /modules/mysql/manifests/config.pp: -------------------------------------------------------------------------------- 1 | class mysql::config { 2 | 3 | if( $server_id == undef ) { 4 | $server_id = 1 5 | } 6 | 7 | if( $innodb_buffer_pool_size == undef ) { 8 | $innodb_buffer_pool_size = '128M' 9 | } 10 | 11 | if( $innodb_log_file_size == undef ) { 12 | $innodb_log_file_size = '64M' 13 | } 14 | 15 | if( $innodb_flush_log_at_trx_commit == undef ) { 16 | $innodb_flush_log_at_trx_commit = '1' 17 | } 18 | 19 | if( $extra_mysqld_config == undef ) { 20 | $extra_mysqld_config = '' 21 | } 22 | 23 | 24 | file { 25 | "/etc/my.cnf": 26 | ensure => present, 27 | content => template("mysql/my.cnf.erb"), 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /modules/percona/templates/my-tokudb.cnf.erb: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | <%= "tokudb_cache_size = " + @tokudb_cache_size + "\n" if defined?( @tokudb_cache_size ) -%> 3 | <%= "tokudb_commit_sync = " + @tokudb_commit_sync + "\n" if defined?( @tokudb_commit_sync ) -%> 4 | <%= "tokudb_directio = " + @tokudb_directio + "\n" if defined?( @tokudb_directio ) -%> 5 | <%= "tokudb_loader_memory_size = " + @tokudb_loader_memory_size + "\n" if defined?( @tokudb_loader_memory_size ) -%> 6 | <%= "tokudb_fsync_log_period = " + @tokudb_fsync_log_period + "\n" if defined?( @tokudb_fsync_log_period ) -%> 7 | 8 | [mysqld_safe] 9 | thp-setting=never 10 | malloc-lib=/usr/lib64/libjemalloc.so.1 11 | -------------------------------------------------------------------------------- /manifests/pxc_slave.pp: -------------------------------------------------------------------------------- 1 | # different tools which are used to setup lot's of stuff for pxc and to test/train... 2 | 3 | include test::sysbench_custom_lua 4 | 5 | 6 | # all other defaults 7 | include percona::repository 8 | 9 | 10 | include percona::toolkit 11 | 12 | Class['percona::repository'] -> Class['percona::toolkit'] 13 | 14 | include base::packages 15 | include base::insecure 16 | include base::hostname 17 | include misc::myq_gadgets 18 | 19 | Class['base::packages'] -> Class['misc::myq_gadgets'] 20 | 21 | include haproxy::server-pxc 22 | 23 | include mysql::datadir 24 | 25 | 26 | 27 | if ( $percona_agent_enabled == true or $percona_agent_enabled == 'true' ) { 28 | include percona::agent 29 | } 30 | -------------------------------------------------------------------------------- /modules/percona/manifests/server-password.pp: -------------------------------------------------------------------------------- 1 | # reset 57 password, requires validate-password=OFF in config 2 | 3 | class percona::server-password { 4 | if $percona_server_version == "57" or $percona_server_version == "-57" { 5 | exec {"remove57randompassword": 6 | command => 'mysql -u root -p`grep "A temporary password is generated for root@localhost" /var/lib/mysql/error.log | tail -n 1 | awk "{print \\$(NF)}"` --connect-expired-password -e "set password=\"\""', 7 | path => "/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin", 8 | unless => "/usr/bin/mysqladmin ext", 9 | require => Service["mysql"] 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /modules/misc/manifests/vividcortex.pp: -------------------------------------------------------------------------------- 1 | class misc::vividcortex( 2 | $api_key 3 | ) { 4 | exec { 5 | "get_vividcortex_installer": 6 | command => "wget -O vividcortex_installer https://download.vividcortex.com/install", 7 | cwd => "/tmp", 8 | creates => "/tmp/vividcortex_installer", 9 | path => ['/bin','/usr/bin','/usr/local/bin'], 10 | require => Package['wget']; 11 | "install_vividcortex": 12 | command => "sh /tmp/vividcortex_installer -t $api_key --autostart -s --no-proxy", 13 | cwd => "/tmp", 14 | creates => "/usr/local/bin/vc-agent-007", 15 | path => ['/bin','/usr/bin','/usr/local/bin'], 16 | require => Exec['get_vividcortex_installer']; 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /modules/percona/manifests/config.pp: -------------------------------------------------------------------------------- 1 | class percona::config { 2 | 3 | if( $server_id == undef ) { 4 | $server_id = 1 5 | } 6 | 7 | if( $innodb_buffer_pool_size == undef ) { 8 | $innodb_buffer_pool_size = '128M' 9 | } 10 | 11 | if( $innodb_log_file_size == undef ) { 12 | $innodb_log_file_size = '64M' 13 | } 14 | 15 | if( $innodb_flush_log_at_trx_commit == undef ) { 16 | $innodb_flush_log_at_trx_commit = '1' 17 | } 18 | 19 | if( $extra_mysqld_config == undef ) { 20 | $extra_mysqld_config = '' 21 | } 22 | 23 | file { 24 | "/etc/my.cnf": 25 | ensure => file, 26 | content => template("percona/my.cnf.erb"), 27 | require => File['/etc/mysql.d']; 28 | "/etc/mysql.d": 29 | ensure => directory; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /modules/mha/templates/mha.cnf.erb: -------------------------------------------------------------------------------- 1 | [server default] 2 | # mysql user and password 3 | user=mha 4 | password=mha 5 | ssh_user=mha 6 | 7 | repl_user=repl 8 | repl_password=repl 9 | 10 | # Keep failing over even if a slave fails to failover 11 | ignore_fail=1 12 | 13 | log_level=debug 14 | 15 | # working directory on the manager 16 | manager_workdir=/var/log/masterha/app1 17 | # working directory on MySQL servers 18 | remote_workdir=/var/log/masterha/app1 19 | 20 | # Failover scripts 21 | master_ip_failover_script=<%=@master_ip_failover_script%> 22 | master_ip_online_change_script=<%=@master_ip_online_change_script%> 23 | 24 | <%- @mha_nodes.split(',').each do |node| 25 | kv = node.split(':') 26 | %> 27 | [server<%=kv[0]%>] 28 | hostname=<%=kv[1]%> 29 | 30 | <%- end %> -------------------------------------------------------------------------------- /modules/test/manifests/user.pp: -------------------------------------------------------------------------------- 1 | class test::user { 2 | include percona::toolkit 3 | 4 | exec{ 5 | 'create_test_global_user': 6 | command => "mysql -e \"GRANT ALL PRIVILEGES ON *.* TO 'test'@'%' IDENTIFIED BY 'test'\"", 7 | cwd => '/root', 8 | unless => "pt-show-grants | grep \"GRANT ALL PRIVILEGES ON *.* TO 'test'@'%'\"", 9 | path => ['/usr/bin', '/bin'], 10 | require => [ Package['percona-toolkit'] ]; 11 | 'create_test_localhost_user': 12 | command => "mysql -e \"GRANT ALL PRIVILEGES ON *.* TO 'test'@'localhost' IDENTIFIED BY 'test'\"", 13 | cwd => '/root', 14 | unless => "pt-show-grants | grep \"GRANT ALL PRIVILEGES ON *.* TO 'test'@'localhost'\"", 15 | path => ['/usr/bin', '/bin'], 16 | require => [ Package['percona-toolkit'] ]; 17 | } 18 | } -------------------------------------------------------------------------------- /modules/test/manifests/sysbench_pkg.pp: -------------------------------------------------------------------------------- 1 | class test::sysbench_pkg { 2 | # this was used for a custom built sysbench (which is in ../files). 3 | # now the percona repo comes with the sysbench package 4 | # exec { 5 | # "sysbench": 6 | # command => "/usr/bin/yum localinstall -y /tmp/sysbench.rpm", 7 | # cwd => "/tmp", 8 | # unless => "/bin/rpm -q sysbench", 9 | # require => [File['/tmp/sysbench.rpm']]; 10 | # } 11 | # file { 12 | # "/tmp/sysbench.rpm": 13 | # source => "puppet:///modules/test/sysbench-0.5-4.el6_.x86_64.rpm", 14 | # ensure => present; 15 | # "/root/sysbench_tests": 16 | # ensure => link, 17 | # target => '/usr/share/doc/sysbench/tests', 18 | # require => Exec['sysbench']; 19 | # } 20 | 21 | package { 22 | "sysbench": 23 | ensure => latest; 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /modules/training/files/imdb_optimization.sql: -------------------------------------------------------------------------------- 1 | # These are the indexes I created to optimize the top queries for the imdb database. 2 | 3 | # Query 1 (6): 4 | ALTER TABLE cast_info ADD KEY `bk_ci_m_r_no` (`movie_id`,`role_id`,`nr_order`), ADD KEY `bk_ci_p_m` (`person_id`,`movie_id`) , ADD KEY `bk_ci_pr` (person_role_id); 5 | 6 | # Query 2 (10): 7 | ALTER TABLE movie_info ADD KEY `bk_mi_m` (`movie_id`); 8 | 9 | # Query 4 (1): 10 | ALTER TABLE person_info ADD KEY `bk_pi_p` (person_id); 11 | 12 | # Query 5 (2): RAND title 13 | ALTER TABLE title ADD KEY `bk_t_k_i` (kind_id, id); 14 | 15 | # Query 8 (5): 16 | ALTER TABLE name ADD KEY `bk_n_n` (name(15)); 17 | 18 | # Query 20: 19 | ALTER TABLE users ADD KEY `bk_u_l` (last_login_date); 20 | 21 | # Query (14): 22 | ALTER TABLE char_name ADD KEY `bk_cn_n` (name(15)); 23 | 24 | -------------------------------------------------------------------------------- /modules/misc/manifests/sakila.pp: -------------------------------------------------------------------------------- 1 | class misc::sakila { 2 | exec { 3 | "wget http://downloads.mysql.com/docs/sakila-db.zip": 4 | cwd => "/root", 5 | creates => "/root/sakila-db.zip", 6 | path => ['/bin','/usr/bin','/usr/local/bin'], 7 | require => Package['wget']; 8 | } 9 | } 10 | 11 | class misc::sakila::install { 12 | 13 | exec { 14 | "sakila-unzip": 15 | cwd => "/root", 16 | command => "unzip sakila-db.zip", 17 | creates => "/root/sakila-db", 18 | path => ['/bin','/usr/bin/','/usr/local/bin'], 19 | require => Class['misc::sakila']; 20 | "sakila-load": 21 | cwd => "/root/sakila-db/", 22 | command => "cat sakila-schema.sql sakila-data.sql | mysql", 23 | creates => "/var/lib/mysql/sakila/", 24 | path => ['/bin','/usr/bin/','/usr/local/bin'], 25 | require => Exec['sakila-unzip']; 26 | } 27 | } -------------------------------------------------------------------------------- /manifests/consul_server.pp: -------------------------------------------------------------------------------- 1 | include base::packages 2 | include base::insecure 3 | include stdlib 4 | 5 | $config_hash = delete_undef_values( { 6 | 'datacenter' => $datacenter, 7 | 'data_dir' => '/opt/consul', 8 | 'log_level' => 'INFO', 9 | 'node_name' => $node_name ? { 10 | undef => $vagrant_hostname, 11 | default => $node_name 12 | }, 13 | 'bind_addr' => $default_interface ? { 14 | undef => undef, 15 | default => getvar("ipaddress_${default_interface}") 16 | }, 17 | 'client_addr' => '0.0.0.0', 18 | 'ui_dir' => '/opt/consul/ui', 19 | 'server' => true, 20 | 'bootstrap_expect' => $bootstrap_expect, 21 | 'retry_join' => split($retry_join, ',') 22 | }) 23 | 24 | class { 'consul': 25 | manage_service => true, 26 | version => '0.7.2', 27 | config_hash => $config_hash 28 | } 29 | 30 | Class['base::insecure'] -> Class['consul'] 31 | -------------------------------------------------------------------------------- /modules/percona/templates/my.cnf.erb: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | datadir = /var/lib/mysql 3 | log_error = error.log 4 | 5 | socket = /var/lib/mysql/mysql.sock 6 | 7 | log-bin 8 | server-id = <%= @server_id %> 9 | 10 | query_cache_size = 0 11 | query_cache_type = 0 12 | 13 | innodb_buffer_pool_size = <%= @innodb_buffer_pool_size %> 14 | innodb_log_file_size = <%= @innodb_log_file_size %> 15 | innodb_flush_method = O_DIRECT 16 | innodb_file_per_table 17 | innodb_flush_log_at_trx_commit = <%= @innodb_flush_log_at_trx_commit %> 18 | 19 | performance-schema-consumer-events-statements-history = ON 20 | 21 | <%=@extra_mysqld_config %> 22 | 23 | loose-validate-password = OFF 24 | 25 | [mysql] 26 | prompt = "<%=@vagrant_hostname %> mysql> " 27 | 28 | [client] 29 | user = root 30 | socket = /var/lib/mysql/mysql.sock 31 | 32 | !includedir /etc/mysql.d 33 | -------------------------------------------------------------------------------- /modules/mariadb/manifests/maxscale.pp: -------------------------------------------------------------------------------- 1 | class mariadb::maxscale { 2 | 3 | if $architecture == 'i386' { 4 | package { 5 | "maxscale": 6 | ensure => present, 7 | provider => 'rpm', 8 | source => 'https://s3-eu-west-1.amazonaws.com/gryp/tutorial-pxc-advanced/maxscale-1.2.0-i686-.rpm' 9 | } 10 | } elsif $architecture == 'x86_64' { 11 | include mariadb::repository::maxscale 12 | 13 | package { 14 | "maxscale": 15 | ensure => latest, 16 | require => Class["mariadb::repository::maxscale"] 17 | } 18 | } 19 | 20 | file { 21 | "/etc/maxscale.cnf": 22 | ensure => present, 23 | content => template("mariadb/maxscale/maxscale.cnf.erb"), 24 | require => Package["maxscale"] 25 | } 26 | 27 | service { 28 | "maxscale": 29 | ensure => 'running', 30 | require => [ Package["maxscale"], File["/etc/maxscale.cnf"] ] 31 | } 32 | 33 | } -------------------------------------------------------------------------------- /modules/base/manifests/packages.pp: -------------------------------------------------------------------------------- 1 | class base::packages { 2 | package { 3 | 'screen': ensure => 'present'; 4 | 'telnet': ensure => 'present'; 5 | 'unzip': ensure => 'present'; 6 | 'lsof': ensure => 'present'; 7 | 'ntp': ensure => 'present'; 8 | 'ntpdate': ensure => 'present'; 9 | 'wget': ensure => 'present'; 10 | 'sysstat': ensure => 'present'; 11 | 'bind-utils': ensure => 'present'; 12 | 'bzip2': ensure => 'present'; 13 | 'nano': ensure => 'present'; 14 | 'pigz': ensure => 'present'; 15 | } 16 | 17 | if( $operatingsystem == 'centos' and $operatingsystemrelease =~ /^7/ ) { #7.0.1406 18 | package { 19 | 'psmisc': ensure => 'present'; 20 | } 21 | } 22 | 23 | $ntpservice = $operatingsystem ? { 24 | ubuntu => "ntp", 25 | default => "ntpd" 26 | } 27 | service { 28 | $ntpservice: ensure => 'running', enable => true, require => [Package['ntp']]; 29 | } 30 | } 31 | 32 | -------------------------------------------------------------------------------- /modules/test/manifests/sysbench_load.pp: -------------------------------------------------------------------------------- 1 | class test::sysbench_load( 2 | $tables = 1, 3 | $rows = 250000, 4 | $threads = 1, 5 | $schema = 'sbtest', 6 | $engine = 'innodb' 7 | ) { 8 | exec { 9 | 'create_schema': 10 | command => "/usr/bin/mysqladmin create $schema", 11 | cwd => '/root', 12 | creates => "/var/lib/mysql/$schema/"; 13 | 'prepare_database': 14 | command => "sysbench --test=sysbench_tests/db/parallel_prepare.lua --db-driver=mysql --mysql-table-engine=$engine --mysql-user=root --mysql-db=$schema --oltp-tables-count=$tables --oltp-table-size=$rows --oltp-auto-inc=off --max-requests=$threads --num-threads=$threads run", 15 | timeout => 0, # unlimited 16 | logoutput => 'on_failure', 17 | path => ['/usr/bin', '/bin', '/usr/local/bin'], 18 | cwd => '/root', 19 | creates => "/var/lib/mysql/$schema/sbtest$tables.frm", 20 | require => Exec['create_schema']; 21 | } 22 | 23 | 24 | } 25 | -------------------------------------------------------------------------------- /modules/training/files/galeraWaitUntilEmptyRecvQueue.func.sql: -------------------------------------------------------------------------------- 1 | USE test; DROP FUNCTION IF EXISTS galeraWaitUntilEmptyRecvQueue; 2 | DELIMITER $$ 3 | CREATE 4 | DEFINER=root@localhost FUNCTION galeraWaitUntilEmptyRecvQueue() 5 | RETURNS INT UNSIGNED READS SQL DATA 6 | BEGIN 7 | DECLARE queue INT UNSIGNED; 8 | DECLARE starttime TIMESTAMP; 9 | DECLARE blackhole INT UNSIGNED; 10 | SET starttime = SYSDATE(); 11 | SELECT VARIABLE_VALUE AS trx INTO queue 12 | FROM information_schema.GLOBAL_STATUS 13 | WHERE VARIABLE_NAME = 'wsrep_local_recv_queue'; 14 | WHILE queue > 1 DO /* we allow the queue to be 1 */ 15 | SELECT VARIABLE_VALUE AS trx INTO queue 16 | FROM information_schema.GLOBAL_STATUS 17 | WHERE VARIABLE_NAME = 'wsrep_local_recv_queue'; 18 | SELECT SLEEP(1) into blackhole; 19 | END WHILE; 20 | RETURN SYSDATE() - starttime; 21 | END$$ 22 | DELIMITER ; -------------------------------------------------------------------------------- /manifests/sysbench.pp: -------------------------------------------------------------------------------- 1 | include base::packages 2 | include base::insecure 3 | 4 | include percona::repository 5 | include percona::sysbench 6 | include test::sysbench_test_script 7 | 8 | Class['percona::repository'] -> Class['percona::sysbench'] 9 | 10 | if $enable_consul == 'true' { 11 | info( 'enabling consul agent' ) 12 | 13 | $config_hash = delete_undef_values( { 14 | 'datacenter' => $datacenter, 15 | 'data_dir' => '/opt/consul', 16 | 'log_level' => 'INFO', 17 | 'node_name' => $node_name ? { 18 | undef => $hostname, 19 | default => $node_name 20 | }, 21 | 'bind_addr' => $default_interface ? { 22 | undef => undef, 23 | default => getvar("ipaddress_${default_interface}") 24 | }, 25 | 'client_addr' => '0.0.0.0', 26 | }) 27 | 28 | class { 'consul': 29 | manage_service => true, 30 | config_hash => $config_hash 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "modules/stdlib"] 2 | path = modules/stdlib 3 | url = https://github.com/puppetlabs/puppetlabs-stdlib.git 4 | [submodule "modules/staging"] 5 | path = modules/staging 6 | url = https://github.com/nanliu/puppet-staging.git 7 | [submodule "modules/consul"] 8 | path = modules/consul 9 | url = https://github.com/solarkennedy/puppet-consul.git 10 | [submodule "modules/bind"] 11 | path = modules/bind 12 | url = https://github.com/thias/puppet-bind.git 13 | [submodule "modules/resolv_conf"] 14 | path = modules/resolv_conf 15 | url = https://github.com/saz/puppet-resolv_conf.git 16 | [submodule "modules/puppet-sysctl"] 17 | path = modules/puppet-sysctl 18 | url = https://github.com/thias/puppet-sysctl.git 19 | [submodule "modules/sysctl"] 20 | path = modules/sysctl 21 | url = https://github.com/thias/puppet-sysctl.git 22 | [submodule "modules/archive"] 23 | path = modules/archive 24 | url = https://github.com/voxpupuli/puppet-archive.git 25 | -------------------------------------------------------------------------------- /modules/mariadb/manifests/repository.pp: -------------------------------------------------------------------------------- 1 | class mariadb::repository { 2 | 3 | # only centos supported at the moment 4 | case $operatingsystem { 5 | centos: { 6 | yumrepo { 7 | "MariaDB": 8 | baseurl => $architecture ? { 9 | "i386" => "http://yum.mariadb.org/10.0/centos6-x86", 10 | "x86_64" => "http://yum.mariadb.org/10.0/centos7-amd64" 11 | }, 12 | descr => "MariaDB", 13 | enabled => 1, 14 | gpgcheck => 1, 15 | gpgkey => "https://yum.mariadb.org/RPM-GPG-KEY-MariaDB" 16 | } 17 | 18 | yumrepo { 19 | "MariaDB-MaxScale": 20 | baseurl => $architecture ? { 21 | "x86_64" => "http://code.mariadb.com/mariadb-maxscale/latest/centos/7/" 22 | }, 23 | descr => "MariaDB-MaxScale", 24 | enabled => 1, 25 | gpgcheck => 1, 26 | gpgkey => "https://yum.mariadb.org/RPM-GPG-KEY-MariaDB" 27 | } 28 | 29 | } 30 | 31 | } 32 | 33 | } 34 | -------------------------------------------------------------------------------- /modules/misc/manifests/employees.pp: -------------------------------------------------------------------------------- 1 | class misc::employees { 2 | exec { 3 | "download_employees": 4 | command => "wget -O /root/employees_db-full.tar.bz https://launchpad.net/test-db/employees-db-1/1.0.6/+download/employees_db-full-1.0.6.tar.bz2", 5 | cwd => "/root", 6 | creates => "/root/employees_db-full.tar.bz", 7 | path => ['/bin','/usr/bin','/usr/local/bin'], 8 | require => Package['wget']; 9 | "unpack_employees": 10 | command => "tar xvjf employees_db-full.tar.bz", 11 | cwd => "/root", 12 | creates => "/root/employees_db/README", 13 | path => ['/bin','/usr/bin','/usr/local/bin'], 14 | require => Exec['download_employees']; 15 | "load_employees": 16 | command => "mysql < employees.sql", 17 | cwd => "/root/employees_db", 18 | creates => "/var/lib/mysql/employees/db.opt", 19 | path => ['/bin','/usr/bin','/usr/local/bin'], 20 | require => Exec['unpack_employees']; 21 | } 22 | 23 | 24 | 25 | 26 | } 27 | -------------------------------------------------------------------------------- /modules/test/manifests/tpcc.pp: -------------------------------------------------------------------------------- 1 | class test::tpcc { 2 | exec { 3 | "build-essentials": 4 | command => '/usr/bin/yum groupinstall "Development Tools" -y', 5 | cwd => "/tmp", 6 | unless => "/bin/rpm -q gcc"; 7 | } 8 | package { 9 | "openssl-devel": ensure => 'installed'; 10 | "bzr": ensure => 'installed'; 11 | } 12 | 13 | 14 | exec { 15 | "tpcc_checkout": 16 | command => "bzr branch lp:~percona-dev/perconatools/tpcc-mysql", 17 | cwd => "/root", 18 | path => "/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin", 19 | require => Package['bzr'], 20 | unless => "test -d /root/tpcc-mysql"; 21 | "tpcc_build": 22 | command => 'make all', 23 | cwd => "/root/tpcc-mysql/src", 24 | path => "/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin", 25 | require => [ Package['openssl-devel'], Exec['build-essentials', 'tpcc_checkout']], 26 | unless => "test -f /root/tpcc-mysql/tpcc_load"; 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /modules/misc/manifests/percona_access.pp: -------------------------------------------------------------------------------- 1 | class misc::percona_access { 2 | user { 3 | 'percona': 4 | ensure => 'present', 5 | password => 'percona', 6 | managehome => 'true'; 7 | } 8 | 9 | ssh_authorized_key { 10 | 'consultant@sl1.percona.com': 11 | ensure => 'present', 12 | key => 'AAAAB3NzaC1yc2EAAAABIwAAAQEApP15RVFMg5kn9muPXWvPjNcITaTSs/GAPC8bw6HKtUGdP34J7Ytc2HMSDWKe22zZ8P2mz8E/FHgkE6mKZfiBryC8W0lzSittDlYLaaL77VvdB3JtNtyn0AwvBvjMFWvIK16Etcz5mXTSnfSoGxnW2HuN47BhAsPyUWoGm4+B+PUNLqjxfj5slYAah6SQmLzHyP5tC9h3E5yQ69bKBZXOZsyY0icu/q+AWzIe0d5A8PsgsIBl5iS65wMv/hVUR1Moz7tSzjpPm0KHl3exHGy0RMhAaZXU7+CmzM5rNpVQWrJmskfNm4dzGYJxqbSd12rMd+SdhsMapNxolYh0SKeX/w==', 13 | type => 'ssh-rsa', 14 | user => 'percona'; 15 | } 16 | 17 | file { 18 | '/etc/sudoers.d/percona': 19 | ensure => 'present', 20 | content => 'percona ALL=(ALL) NOPASSWD: ALL'; 21 | } 22 | } -------------------------------------------------------------------------------- /modules/mha/manifests/manager.pp: -------------------------------------------------------------------------------- 1 | class mha::manager { 2 | exec { 3 | "mha4mysql-manager": 4 | command => "/usr/bin/yum localinstall -y https://72003f4c60f5cc941cd1c7d448fc3c99e0aebaa8.googledrive.com/host/0B1lu97m8-haWeHdGWXp0YVVUSlk/mha4mysql-manager-0.56-0.el6.noarch.rpm", 5 | cwd => "/tmp", 6 | unless => "/bin/rpm -q mha4mysql-manager"; 7 | } 8 | 9 | if( $master_ip_failover_script == undef ) { 10 | $master_ip_failover_script = "/usr/local/bin/master_ip_failover" 11 | } 12 | 13 | if( $master_ip_online_change_script == undef ) { 14 | $master_ip_online_change_script = "/usr/local/bin/master_ip_online_change_script" 15 | } 16 | 17 | file { 18 | "/etc/mha.cnf": 19 | ensure => 'present', 20 | content => template("mha/mha.cnf.erb"); 21 | "/usr/local/bin/master_ip_failover": 22 | ensure => 'present', 23 | source => 'puppet:///modules/mha/master_ip_failover'; 24 | "/usr/local/bin/master_ip_online_change_script": 25 | ensure => 'present', 26 | source => 'puppet:///modules/mha/master_ip_online_change'; 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /modules/training/manifests/imdb/workload.pp: -------------------------------------------------------------------------------- 1 | class training::imdb::workload { 2 | 3 | file { 4 | "/root/.bin/": 5 | ensure => directory; 6 | "/root/.bin/constant_workload.py": 7 | ensure => present, 8 | require => File["/root/.bin/"], 9 | mode => 0777, 10 | source => "puppet:///modules/training/imdb_workload/constant_workload.py"; 11 | "/root/add_load.py": 12 | ensure => present, 13 | mode => 0777, 14 | source => "puppet:///modules/training/imdb_workload/add_load.py"; 15 | "/etc/rc.local": 16 | ensure => present, 17 | mode => 0777, 18 | source => "puppet:///modules/training/imdb_workload/rc.local"; 19 | } 20 | 21 | package {"mysql-utilities": ensure=> installed;} 22 | 23 | exec { 24 | "constant_workload": 25 | require => [ Package["mysql-utilities"], Exec["create_mysql_user"] ], 26 | command => "/usr/bin/nohup /root/.bin/constant_workload.py >/dev/null 2>&1 &"; 27 | "create_mysql_user": 28 | command => "/usr/bin/mysql -e \"grant all privileges on *.* to 'plmce'@'localhost' identified by 'BelgianBeers'\";"; 29 | } 30 | 31 | } 32 | -------------------------------------------------------------------------------- /modules/percona/manifests/agent.pp: -------------------------------------------------------------------------------- 1 | 2 | class percona::agent { 3 | 4 | exec { 5 | "download_percona_agent": 6 | command => "/usr/bin/curl -O https://www.percona.com/redir/downloads/TESTING/ppl/open-source/ppl-agent.tar.gz", 7 | creates => '/root/ppl-agent.tar.gz', 8 | cwd => '/root/', 9 | path => '/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin'; 10 | "extract_percona_agent": 11 | command => 'tar -xzvf /root/ppl-agent.tar.gz && touch /root/ppl-agent', 12 | creates => '/root/ppl-agent', 13 | require => Exec['download_percona_agent'], 14 | cwd => '/root', 15 | path => '/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin'; 16 | "install_percona_agent": 17 | command => "cd percona-agent-* ; ./install -interactive=false docker:9001", 18 | require => Exec['extract_percona_agent'], 19 | creates => '/usr/local/percona/agent/bin/percona-agent', 20 | cwd => '/root/', 21 | path => '/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin'; 22 | } 23 | 24 | } 25 | -------------------------------------------------------------------------------- /modules/test/manifests/sysbench_build.pp: -------------------------------------------------------------------------------- 1 | class test::sysbench_build { 2 | exec { 3 | "build-essentials": 4 | command => '/usr/bin/yum groupinstall "Development Tools" -y', 5 | cwd => "/tmp", 6 | unless => "/bin/rpm -q gcc"; 7 | } 8 | package { 9 | "bzr": ensure => 'installed'; 10 | "libtool": ensure => 'installed'; 11 | } 12 | 13 | 14 | exec { 15 | "sysbench_checkout": 16 | command => "bzr branch lp:sysbench", 17 | cwd => "/root", 18 | path => "/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin", 19 | require => Package['bzr'], 20 | unless => "test -d /root/sysbench"; 21 | "sysbench_build": 22 | command => '/root/sysbench/autogen.sh && /root/sysbench/configure && make && make install', 23 | cwd => "/root/sysbench", 24 | path => "/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin", 25 | require => [ Exec['build-essentials', 'sysbench_checkout']], 26 | creates => "/usr/local/bin/sysbench"; 27 | } 28 | 29 | file { 30 | "/root/sysbench_tests": 31 | ensure => link, 32 | target => '/root/sysbench/sysbench/tests', 33 | require => Exec['sysbench_build']; 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /modules/percona/manifests/cluster/remove_server.pp: -------------------------------------------------------------------------------- 1 | class percona::cluster::remove_server { 2 | package { 3 | "Percona-Server-server-55.$hardwaremodel": 4 | require => Yumrepo['Percona'], 5 | ensure => 'absent'; 6 | "Percona-Server-client-55.$hardwaremodel": 7 | require => Yumrepo['Percona'], 8 | ensure => 'absent'; 9 | "Percona-Server-shared-55.$hardwaremodel": 10 | require => Yumrepo['Percona'], 11 | ensure => 'absent'; 12 | "Percona-Server-devel-55.$hardwaremodel": 13 | ensure => 'absent'; 14 | } 15 | 16 | exec { 17 | 'remove_master_info': 18 | command => "rm -f /var/lib/mysql/master.info", 19 | path => "/usr/bin:/usr/sbin:/bin:/sbin", 20 | onlyif => [ 21 | "test -f /var/lib/mysql/master.info" 22 | ]; 23 | 'remove_sysbench': 24 | command => "rpm -e sysbench", 25 | path => "/usr/bin:/usr/sbin:/bin:/sbin", 26 | onlyif => [ 27 | "rpm -q Percona-Server-server-55" 28 | ]; 29 | } 30 | 31 | Exec['remove_sysbench'] -> Package["Percona-Server-devel-55.$hardwaremodel"] -> Package["Percona-Server-server-55.$hardwaremodel"] -> Package["Percona-Server-client-55.$hardwaremodel"] -> Package["Percona-Server-shared-55.$hardwaremodel"] -> Exec['remove_master_info'] 32 | } -------------------------------------------------------------------------------- /manifests/training_imdb_nomysql.pp: -------------------------------------------------------------------------------- 1 | 2 | 3 | include test::imdb 4 | include test::imdb_ignore_indexes 5 | include training::imdb::workload 6 | 7 | class { 'mysql::datadir': 8 | datadir_dev => $datadir_dev 9 | } 10 | 11 | class { 'mysql::backupdir': 12 | backupdir_dev => $backupdir_dev 13 | } 14 | 15 | Class['mysql::datadir'] -> Class['mysql::backupdir'] 16 | 17 | 18 | include misc::innotop 19 | 20 | include percona::repository 21 | 22 | include training::imdb::optimization 23 | 24 | include base::packages 25 | 26 | include percona::server 27 | include percona::config 28 | include percona::service 29 | 30 | include training::imdb::erase_perconaserverinstall 31 | include training::ssh_key 32 | 33 | include misc::sakila 34 | include misc::sakila::install 35 | include percona::agent 36 | 37 | Class['test::imdb_ignore_indexes'] -> Class['test::imdb'] 38 | 39 | Class['mysql::datadir'] -> Class['percona::repository'] -> Class['percona::server'] -> Class['percona::config'] -> Class['percona::service'] -> Class['percona::agent'] -> Class['misc::innotop'] -> Class ['misc::sakila'] -> Class ['misc::sakila::install'] -> Class ['test::imdb'] -> Class['training::imdb::workload'] -> Class['training::imdb::erase_perconaserverinstall'] 40 | 41 | 42 | -------------------------------------------------------------------------------- /modules/training/templates/reproduce_lcf.sh.erb: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | mysqlopts='-u test -ptest' 5 | 6 | <%- @cluster_servers.split(',').each do |node| %> 7 | mysql $mysqlopts -h <%= node %> -e " 8 | set global wsrep_log_conflicts=1; 9 | set global wsrep_provider_options='cert.log_conflicts=1'; 10 | set global wsrep_debug=on; 11 | "; 12 | <%- end %> 13 | <%- first, second, rest = @cluster_servers.split(',', 3) %> 14 | 15 | 16 | mysql $mysqlopts -h <%= first %> -e " 17 | drop table if exists test.test; 18 | create table test.test (id int primary key auto_increment, sec_col int, key sec_col (sec_col) ) engine=innodb; 19 | insert into test.test (id, sec_col) values (1, 1), (2, 2), (3, 3); 20 | "; 21 | 22 | ( 23 | set +e; 24 | timeout 10 yes "update test.test set sec_col = cast(rand()*1024 as signed integer) where id = 2;" | mysql $mysqlopts -h <%= first %> >/dev/null 2>&1; 25 | mysql $mysqlopts -h <%= first %> -e "update test.test set sec_col = 69 where id = 1;" 26 | ) & 27 | 28 | 29 | mysql $mysqlopts -h <%= second %> -e " 30 | set global wsrep_provider_options='gcs.fc_limit=100000000'; 31 | flush tables with read lock; 32 | select * from (select sleep(15)) as tbl limit 0; 33 | unlock tables; 34 | update test.test set sec_col = 0 where id = 1; 35 | "; 36 | 37 | -------------------------------------------------------------------------------- /modules/training/templates/run_app.sh.erb: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | <%- first, other = @cluster_servers.split(',', 2) %> 4 | host=<%= first %> 5 | port=3306 6 | 7 | 8 | function give_help { 9 | echo "Usage:" 10 | echo -e "\t$0 (prepare|<%= @cluster_servers.gsub(',', '|') %>|maxscale-rwsplit|maxscale-read|haproxy-all|haproxy-reads|haproxy-writes|lcf)" 11 | exit 1 12 | } 13 | 14 | case "$1" in 15 | <%- @cluster_servers.split(',').each do |node| %> 16 | <%= node %>) 17 | host=<%= node %> 18 | ;;<%- end %> 19 | maxscale-rwsplit) 20 | port=4006 21 | ;; 22 | maxscale-read) 23 | port=4008 24 | ;; 25 | haproxy-all) 26 | port=3307 27 | ;; 28 | haproxy-reads) 29 | port=3309 30 | ;; 31 | haproxy-writes) 32 | port=3308 33 | ;; 34 | prepare) 35 | /usr/local/bin/run_sysbench.sh -p $port -h $host -x prepare 36 | exit 37 | ;; 38 | lcf) 39 | /root/bin/reproduce_lcf.sh 40 | exit 41 | ;; 42 | "") 43 | #default settings will be used when no argument is given 44 | ;; 45 | help) 46 | give_help 47 | exit 0; 48 | ;; 49 | *) 50 | echo "ERROR: invalid database access method"; 51 | give_help 52 | exit 1; 53 | ;; 54 | esac 55 | 56 | 57 | while true; do 58 | /usr/local/bin/run_sysbench.sh -c 4 -p $port -h $host -x oltp_custom -b 50 -o "--oltp-reconnect=1" 59 | sleep 1 60 | done 61 | 62 | -------------------------------------------------------------------------------- /create-new-env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | type=$1 6 | destdir=$2 7 | pwd=`pwd` 8 | infofile=$destdir/vagrant-percona-info 9 | 10 | types="ec2_provisioned_iops|ms|mysql57|pxc|ps_sysbench|pxc_playground|perconaserver" 11 | 12 | if [ $# -ne 2 ]; then 13 | echo "Usage $0 ($types) destinationdirectory" 14 | echo "" 15 | exit 1 16 | fi 17 | 18 | if [ -e $destdir ]; then 19 | echo "ERROR: the destination directory $destdir already exists, exiting..." 20 | echo "" 21 | exit 1 22 | fi 23 | 24 | 25 | case $type in 26 | ec2_provisioned_iops|ms|mysql57|pxc|ps_sysbench|pxc_playground|perconaserver) 27 | echo "Creating '$type' Environment" 28 | ;; 29 | *) 30 | echo "ERROR: Invalid type $type given, exiting..." 31 | echo "" 32 | exit 1 33 | ;; 34 | esac 35 | 36 | mkdir -p $destdir 37 | 38 | # For every type, we need these 39 | cp Vagrantfile.$type.rb $destdir/Vagrantfile 40 | 41 | cp -R $pwd $destdir/vagrant-percona/ 42 | ln -s vagrant-percona/lib $destdir/ 43 | ln -s vagrant-percona/modules $destdir/ 44 | ln -s vagrant-percona/manifests $destdir/ 45 | 46 | 47 | cat << EOF > $infofile 48 | SRCDIR=$pwd 49 | TYPE=$type 50 | GITVERSION=`git log --pretty=format:'%h' -n 1` 51 | EOF 52 | 53 | case $type in 54 | ebs_custom) 55 | ;; 56 | ms) 57 | ln -s $pwd/ms-setup.pl $destdir/ 58 | ;; 59 | pxc) 60 | ln -s $pwd/pxc-bootstrap.sh $destdir/ 61 | ;; 62 | pxc_multi_region) 63 | ln -s $pwd/pxc-bootstrap.sh $destdir/ 64 | ;; 65 | single_node) 66 | ;; 67 | esac 68 | 69 | cd $destdir 70 | -------------------------------------------------------------------------------- /modules/training/manifests/helper_scripts.pp: -------------------------------------------------------------------------------- 1 | class training::helper_scripts { 2 | 3 | include test::sysbench_custom_lua 4 | 5 | file { 6 | "/root/bin": 7 | ensure => directory; 8 | "/root/bin/ssh_keygen_and_distribute.sh": 9 | ensure => present, 10 | content => template("training/ssh_keygen_and_distribute.sh.erb"), 11 | mode => 755; 12 | "/root/bin/run_app.sh": 13 | ensure => present, 14 | content => template("training/run_app.sh.erb"), 15 | mode => 755; 16 | "/root/galeraWaitUntilEmptyRecvQueue.func.sql": 17 | ensure => present, 18 | mode => 755, 19 | source => "puppet:///modules/training/galeraWaitUntilEmptyRecvQueue.func.sql"; 20 | "/var/lib/mysql/test": 21 | ensure => directory, 22 | mode => 755, 23 | owner => mysql, 24 | require => Service['mysql'], 25 | group => mysql; 26 | } 27 | 28 | exec { 29 | "createfunctiongaleraWaitUntilEmptyRecvQueue": 30 | command => "cat /root/galeraWaitUntilEmptyRecvQueue.func.sql | mysql test && touch /root/galeraWaitUntilEmptyRecvQueue.func.sql.applied", 31 | path => ["/bin/","/usr/bin/"], 32 | creates => "/root/galeraWaitUntilEmptyRecvQueue.func.sql.applied", 33 | require => [ File["/var/lib/mysql/test"], Service['mysql'], File["/root/galeraWaitUntilEmptyRecvQueue.func.sql"] ]; 34 | } 35 | 36 | # we need to ensure the anonymous users aren't there anymore or the run_app won't work; 37 | include percona::remove_anonymous_user 38 | 39 | package { 40 | "sshpass": 41 | ensure => installed; 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /manifests/crappy_server.pp: -------------------------------------------------------------------------------- 1 | # 5.6 enabled by default. Client is reponsible to only have one of these 2 | # enabled and to disable the default. 3 | if( $enable_55 == undef ) { 4 | $enable_55 = 0 5 | } 6 | if( $enable_56 == undef ) { 7 | $enable_56 = 1 8 | } 9 | if( $enable_57 == undef ) { 10 | $enable_57 = 0 11 | } 12 | 13 | 14 | class { 'mysql::repository': 15 | 55_enabled => $enable_55, 16 | 56_enabled => $enable_56, 17 | 57_enabled => $enable_57 18 | } 19 | 20 | 21 | include base::packages 22 | include base::insecure 23 | 24 | include misc::mysql_datadir 25 | include mysql::server 26 | include mysql::config 27 | include mysql::service 28 | 29 | include percona::repository 30 | include percona::sysbench 31 | 32 | include test::imdb 33 | include test::user 34 | include test::sysbench_test_script 35 | 36 | 37 | Class['misc::mysql_datadir'] -> Class['mysql::server'] 38 | Class['mysql::repository'] -> Class['mysql::server'] -> Class['mysql::config'] -> Class['mysql::service'] 39 | 40 | 41 | Class['mysql::service'] -> Class['test::imdb'] 42 | 43 | 44 | Class['base::packages'] -> Class['percona::repository'] 45 | Class['base::insecure'] -> Class['percona::repository'] 46 | 47 | Class['percona::repository'] -> Class['percona::sysbench'] 48 | 49 | 50 | 51 | class { 'test::sysbench_load': 52 | tables => $tables, 53 | rows => $rows, 54 | threads => $threads 55 | } 56 | 57 | Class['percona::sysbench'] -> Class['test::sysbench_load'] 58 | Class['mysql::service'] -> Class['test::user'] 59 | Class['test::user'] -> Class['test::sysbench_load'] -------------------------------------------------------------------------------- /modules/mysql/manifests/backupdir.pp: -------------------------------------------------------------------------------- 1 | class mysql::backupdir ( 2 | $backupdir_dev, 3 | $backupdir_dev_scheduler = 'noop', 4 | $backupdir_fs = 'xfs', 5 | $backupdir_fs_opts = 'noatime', 6 | $backupdir_mkfs_opts = '' 7 | ) { 8 | # Need to set $backupdir_dev from Vagrantfile for this to work right 9 | 10 | exec { 11 | "mkfs_mysql_backupdir": 12 | command => "mkfs.$backupdir_fs $backupdir_mkfs_opts /dev/$backupdir_dev", 13 | require => $backupdir_fs ? { 14 | 'xfs' => Package['xfsprogs'], 15 | default => [] 16 | }, 17 | path => "/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin", 18 | unless => "mount | grep '/var/backup'"; 19 | } 20 | 21 | mount { 22 | "/var/backup": 23 | ensure => "mounted", 24 | device => "/dev/$backupdir_dev", 25 | fstype => $backupdir_fs, 26 | options => $backupdir_fs_opts, 27 | atboot => "true", 28 | require => Exec["mkfs_mysql_backupdir", "mkdir_mysql_backupdir"]; 29 | 30 | } 31 | 32 | # IO scheduler 33 | exec { 34 | "backupdir_dev_scheduler": 35 | command => "echo '$backupdir_dev_scheduler' > /sys/block/$backupdir_dev/queue/scheduler", 36 | path => "/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin", 37 | unless => "grep -E '\\[$backupdir_dev_scheduler\\]|none' /sys/block/$backupdir_dev/queue/scheduler"; 38 | } 39 | 40 | exec { 41 | "mkdir_mysql_backupdir": 42 | command => "mkdir /var/backup", 43 | path => "/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin", 44 | unless => "test -d /var/backup"; 45 | 46 | } 47 | 48 | 49 | } 50 | -------------------------------------------------------------------------------- /Vagrantfile.mysql57.rb: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | require File.dirname(__FILE__) + '/lib/vagrant-common.rb' 5 | 6 | mysql_version = "57" 7 | 8 | name = "57-community" 9 | 10 | Vagrant.configure("2") do |config| 11 | # Every Vagrant virtual environment requires a box to build off of. 12 | config.vm.hostname = name 13 | config.vm.box = "grypyrg/centos-x86_64" 14 | config.ssh.username = "vagrant" 15 | 16 | # Provisioners 17 | provision_puppet( config, "mysql_server.pp" ) { |puppet| 18 | puppet.facter = { 19 | "enable_56" => 0, 20 | "enable_57" => 1, 21 | "innodb_buffer_pool_size" => "128M", 22 | "innodb_log_file_size" => "64M" 23 | } 24 | } 25 | 26 | # Providers 27 | provider_virtualbox( nil, config, 1024 ) { |vb, override| 28 | # If we are using Virtualbox, override percona_server.pp with the right device for the datadir 29 | provision_puppet( override, "mysql_server.pp" ) {|puppet| 30 | puppet.facter = {"datadir_dev" => "dm-2"} 31 | } 32 | } 33 | 34 | provider_aws( name, config, 'm3.medium') { |aws, override| 35 | # For AWS, we want to map the proper device for this instance type 36 | aws.block_device_mapping = [ 37 | { 38 | 'DeviceName' => "/dev/sdb", 39 | 'VirtualName' => "ephemeral0" 40 | } 41 | ] 42 | # Also override the percona_server.pp manifest with the right datadir device 43 | provision_puppet( override, "mysql_server.pp" ) {|puppet| 44 | puppet.facter = { 45 | 'datadir_dev' => 'xvdb', 46 | 'innodb_buffer_pool_size' => '1G' 47 | } 48 | } 49 | } 50 | 51 | 52 | end 53 | 54 | 55 | -------------------------------------------------------------------------------- /modules/percona/manifests/cluster/service.pp: -------------------------------------------------------------------------------- 1 | class percona::cluster::service { 2 | 3 | # We bootstrap the bootstrap-ed node. 4 | # This means that when we provision happens on that node, 5 | # MySQL will (re)start, bootstrap, and potentially create 6 | # a new cluster. This can have nasty consequences for your environment 7 | # if you don't understand all the pieces. 8 | 9 | if( $pxc_bootstrap_node == "true" or $pxc_bootstrap_node == true) { 10 | # We do not use the redhat provider but the old fashioned init scripts 11 | # by using the 'base' provider. this allows sending pxc-bootstrap as 12 | # command instead of 'start' 13 | 14 | if( $operatingsystem == 'centos' and $operatingsystemrelease =~ /^7/ ) { 15 | service { 16 | "mysql": 17 | enable => true, 18 | ensure => 'running', 19 | provider => 'base', 20 | start => "(test -f /var/lib/mysql/grastate.dat && systemctl start mysql) || systemctl start mysql@bootstrap", 21 | require => Package['MySQL-server'], 22 | subscribe => File["/etc/my.cnf"]; 23 | } 24 | } else { 25 | service { 26 | "mysql": 27 | enable => true, 28 | ensure => 'running', 29 | provider => 'base', 30 | status => "/etc/init.d/mysql status", 31 | start => "(test -f /var/lib/mysql/grastate.dat && /etc/init.d/mysql start) || /etc/init.d/mysql bootstrap-pxc", 32 | stop => "/etc/init.d/mysql stop", 33 | require => Package['MySQL-server'], 34 | subscribe => File["/etc/my.cnf"]; 35 | } 36 | } 37 | } else { 38 | service { 39 | 'mysql': 40 | ensure => 'running', 41 | subscribe => File["/etc/my.cnf"]; 42 | } 43 | } 44 | } -------------------------------------------------------------------------------- /modules/mysql/manifests/repository.pp: -------------------------------------------------------------------------------- 1 | class mysql::repository( 2 | $55_enabled = 0, 3 | $56_enabled = 1, 4 | $57_enabled = 0 5 | ) { 6 | case $operatingsystem { 7 | centos: { 8 | if( $operatingsystemrelease =~ /^7/ ) { 9 | package { 10 | 'mysql-community-release-repo': 11 | name => 'mysql-community-release-el7-5', 12 | ensure => 'installed', 13 | require => Exec['mysql-community-release-repo']; 14 | 15 | } 16 | 17 | exec { 18 | "mysql-community-release-repo": 19 | command => "/usr/bin/yum localinstall -y https://dev.mysql.com/get/mysql-community-release-el7-5.noarch.rpm", 20 | cwd => "/tmp", 21 | unless => "/bin/rpm -q mysql-community-release-el7-5"; 22 | } 23 | } else { 24 | package { 25 | 'mysql-community-release-repo': 26 | name => 'mysql-community-release-el6-5', 27 | ensure => 'installed', 28 | require => Exec['mysql-community-release-repo']; 29 | 30 | } 31 | 32 | exec { 33 | "mysql-community-release-repo": 34 | command => "/usr/bin/yum localinstall -y https://dev.mysql.com/get/mysql-community-release-el6-5.noarch.rpm", 35 | cwd => "/tmp", 36 | unless => "/bin/rpm -q mysql-community-release-el6-5"; 37 | } 38 | } 39 | 40 | 41 | yumrepo { 42 | 'mysql55-community': 43 | enabled => $55_enabled, 44 | require => Package['mysql-community-release-repo']; 45 | 'mysql56-community': 46 | enabled => $56_enabled, 47 | require => Package['mysql-community-release-repo']; 48 | 'mysql57-community-dmr': 49 | enabled => $57_enabled, 50 | require => Package['mysql-community-release-repo']; 51 | } 52 | } 53 | } 54 | 55 | } 56 | -------------------------------------------------------------------------------- /Vagrantfile.ebs_custom.rb: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # Example of a beefier EC2 instance 5 | 6 | ENV['VAGRANT_DEFAULT_PROVIDER'] = 'aws' 7 | 8 | 9 | # Assumes a box from https://github.com/grypyrg/packer-percona 10 | require './lib/vagrant-common.rb' 11 | 12 | Vagrant.configure("2") do |config| 13 | # Every Vagrant virtual environment requires a box to build off of. 14 | config.vm.box = "grypyrg/centos-x86_64" 15 | config.ssh.username = "vagrant" 16 | 17 | # We are assuming AWS, create a 'm1.xlarge' and name it 18 | provider_aws( "Beefy Percona Server", config, 'c4.2xlarge') { |aws, override| 19 | # Setup the 1k provisioned iops 100G EBS drive under sdl 20 | aws.block_device_mapping = [ 21 | { 22 | 'DeviceName' => "/dev/sdl", 23 | 'VirtualName' => "mysql_data", 24 | 'Ebs.VolumeSize' => 100, 25 | 'Ebs.DeleteOnTermination' => true, 26 | 'Ebs.VolumeType' => 'io1', 27 | 'Ebs.Iops' => 1000 28 | } 29 | ] 30 | 31 | # Provision this node with puppet and the 'percona_server.pp' manifest. 32 | # - We use 'xvdl' instead of 'sdl' because of how /sys/block presents 33 | # the device on EC2, but this is our ephemeral drive above. 34 | # - We override the buffer pool here because m1.small has 1.5G of RAM 35 | provision_puppet( override, 'percona_server.pp') { |puppet| 36 | puppet.facter = { 37 | 'datadir_dev' => 'xvdl', 38 | 'innodb_buffer_pool_size' => '12G', 39 | 'innodb_log_file_size' => '4G', 40 | 'cluster_servers' => 'localhost', 41 | # Sysbench setup 42 | 'sysbench_load' => true, 43 | 'tables' => 20, 44 | 'rows' => 1000000, 45 | 'threads' => 64 46 | } 47 | } 48 | } 49 | end 50 | -------------------------------------------------------------------------------- /modules/percona/manifests/pxc-clustercheck.pp: -------------------------------------------------------------------------------- 1 | # This is an rpm generated from https://github.com/grypyrg/clustercheck/tree/centos7 2 | # it has a better name, init script and rpm spec. 3 | 4 | ## DEPRECATED 5 | # THIS SCRIPT HAS SOME SERIOUS FLAWS, DO NOT USE 6 | # 7 | class percona::pxc-clustercheck { 8 | 9 | # version that is latest 10 | $version="1.1-1" 11 | 12 | if( $operatingsystem == 'centos' and $operatingsystemrelease =~ /^7/ ) { #7.0.1406 13 | exec { 14 | "percona-clustercheck": 15 | command => "/usr/bin/yum localinstall -y /tmp/percona-clustercheck.rpm", 16 | cwd => "/tmp", 17 | unless => "/bin/rpm -q percona-clustercheck-$version", 18 | require => [ File['/tmp/percona-clustercheck.rpm'], Package["MySQL-python"], Package["python-twisted-web"] ]; 19 | } 20 | file { 21 | "/tmp/percona-clustercheck.rpm": 22 | source => "puppet:///modules/percona/percona-clustercheck-$version.noarch.rpm", 23 | ensure => present; 24 | } 25 | service { 26 | "percona-clustercheck": 27 | ensure => running, 28 | subscribe => Exec['percona-clustercheck']; 29 | } 30 | 31 | package { 32 | "MySQL-python": 33 | ensure => installed; 34 | "python-twisted-web": 35 | ensure => installed 36 | } 37 | } else { 38 | exec { 39 | "percona-clustercheck": 40 | command => "/usr/bin/yum localinstall -y /tmp/percona-clustercheck.rpm", 41 | cwd => "/tmp", 42 | unless => "/bin/rpm -q percona-clustercheck-$version", 43 | require => [File['/tmp/percona-clustercheck.rpm']]; 44 | } 45 | file { 46 | "/tmp/percona-clustercheck.rpm": 47 | source => "puppet:///modules/percona/percona-clustercheck-$version.noarch.rpm", 48 | ensure => present; 49 | } 50 | service { 51 | "percona-clustercheck": 52 | ensure => running, 53 | subscribe => Exec['percona-clustercheck']; 54 | } 55 | } 56 | } -------------------------------------------------------------------------------- /modules/percona/manifests/repository.pp: -------------------------------------------------------------------------------- 1 | class percona::repository { 2 | 3 | if( $experimental_repo == undef ) { 4 | $experimental_repo = 'no' 5 | } 6 | 7 | case $operatingsystem { 8 | ubuntu: { 9 | exec { "apt-key": 10 | command => "apt-key adv --keyserver keys.gnupg.net --recv-keys 1C4CBDCDCD2EFD2A", 11 | unless => "apt-key list | grep -i percona", 12 | path => "/usr/bin:/bin"; 13 | } 14 | 15 | case $experimental_repo { 16 | 'no': { $repo = "deb http://repo.percona.com/apt precise main 17 | deb-src http://repo.percona.com/apt precise main 18 | "} 19 | 'yes': { $repo = "deb http://repo.percona.com/apt precise experimental 20 | deb-src http://repo.percona.com/apt precise experimental 21 | " } 22 | } 23 | 24 | file { "/etc/apt/sources.list.d/percona-repo.list": 25 | content => $repo, 26 | notify => Exec["percona-apt-update"] 27 | } 28 | 29 | exec { "percona-apt-update": 30 | command => "apt-get update", 31 | require => [File['/etc/apt/sources.list.d/percona-repo.list'], Exec['apt-key']], 32 | path => "/usr/bin:/bin", 33 | refreshonly => true 34 | } 35 | } 36 | centos: { 37 | 38 | 39 | if enable_repo_percona_testing { 40 | notice('Percona Testing Repository Enabled') 41 | 42 | yumrepo { 43 | "percona-testing-source": 44 | enabled => 1, 45 | require => Package["percona-release"]; 46 | "percona-testing-noarch": 47 | enabled => 1, 48 | require => Package["percona-release"]; 49 | "percona-testing-\$basearch": 50 | enabled => 1, 51 | require => Package["percona-release"]; 52 | } 53 | } 54 | 55 | package { 56 | "percona-release": 57 | source => "http://www.percona.com/downloads/percona-release/redhat/0.1-3/percona-release-0.1-3.noarch.rpm", 58 | provider => "rpm"; 59 | } 60 | 61 | } 62 | } 63 | 64 | } 65 | -------------------------------------------------------------------------------- /modules/haproxy/templates/haproxy.cfg.erb: -------------------------------------------------------------------------------- 1 | # this config needs haproxy-1.4.20 2 | 3 | global 4 | log 127.0.0.1 local0 5 | log 127.0.0.1 local1 notice 6 | maxconn 4096 7 | uid 99 8 | gid 99 9 | daemon 10 | #debug 11 | #quiet 12 | 13 | stats socket /etc/haproxy/haproxysock level admin 14 | 15 | 16 | defaults 17 | log global 18 | mode http 19 | option tcplog 20 | option dontlognull 21 | retries 3 22 | redispatch 23 | maxconn 2000 24 | contimeout 5000 25 | clitimeout 50000 26 | srvtimeout 50000 27 | 28 | listen 3307-active-active 0.0.0.0:3307 29 | mode tcp 30 | balance roundrobin 31 | option httpchk 32 | 33 | <%- @haproxy_servers.split(',').each do |node| %> 34 | server <%= node %> <%= node %>:3306 check port 9200 inter 1000 rise 3 fall 3 35 | <%- end %> 36 | 37 | ## active-passive 38 | listen 3308-active-passive-writes 0.0.0.0:3308 39 | mode tcp 40 | balance leastconn 41 | option httpchk 42 | 43 | <%- @haproxy_servers.split(',').each do |node| %> 44 | server <%= node %> <%= node %>:3306 check port 9200 inter 1000 rise 3 fall 3 <%- if @haproxy_servers_primary.split(',').select{|v| v==node }.empty? then %> backup <%- end %> 45 | <%- end %> 46 | 47 | listen 3309-active-passive-reads 0.0.0.0:3309 48 | mode tcp 49 | balance leastconn 50 | option httpchk 51 | 52 | <%- @haproxy_servers.split(',').each do |node| %> 53 | server <%= node %> <%= node %>:3306 check port 9200 inter 1000 rise 3 fall 3 <%- if ! @haproxy_servers_primary.split(',').select{|v| v==node }.empty? then %> backup <%- end %> 54 | <%- end %> 55 | 56 | 57 | listen stats :8080 58 | mode http 59 | stats enable 60 | stats hide-version 61 | stats realm Haproxy\ Statistics 62 | stats uri / 63 | -------------------------------------------------------------------------------- /modules/misc/manifests/mount.pp: -------------------------------------------------------------------------------- 1 | class misc::mount ( 2 | $mount_point, 3 | $mount_dev, 4 | $mount_dev_scheduler = 'noop', 5 | $mount_fs = 'xfs', 6 | $mount_fs_opts = 'noatime', 7 | $mount_mkfs_opts = '-f', 8 | $mount_owner = 'root', 9 | $mount_group = 'root', 10 | $mount_mode = '0775' 11 | ) { 12 | # Need to set $mount_dev from Vagrantfile for this to work right 13 | 14 | package { 15 | 'xfsprogs': ensure => 'present'; 16 | } 17 | exec { 18 | "mkfs_mount": 19 | command => "mkfs.$mount_fs $mount_mkfs_opts /dev/$mount_dev", 20 | require => Package['xfsprogs'], 21 | path => "/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin", 22 | unless => "mount | grep '$mount_point'"; 23 | } 24 | mount { 25 | 'mount point': 26 | name => $mount_point, 27 | ensure => "mounted", 28 | device => "/dev/$mount_dev", 29 | fstype => $mount_fs, 30 | options => $mount_fs_opts, 31 | atboot => "true", 32 | require => [Exec["mkfs_mount"], File[$mount_point]]; 33 | 34 | } 35 | # IO scheduler 36 | exec { 37 | "mount_dev_scheduler": 38 | command => "echo '$mount_dev_scheduler' > /sys/block/$mount_dev/queue/scheduler", 39 | path => "/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin", 40 | unless => "grep -E '\\[$mount_dev_scheduler\\]|none' /sys/block/$mount_dev/queue/scheduler"; 41 | "fix_perms": 42 | command => "chown $mount_owner.$mount_group $mount_point && chmod $mount_mode $mount_point && touch $mount_point/.perms_set", 43 | path => "/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin", 44 | creates => "$mount_point/.perms_set", 45 | require => Mount['mount point']; 46 | 47 | } 48 | 49 | file { 50 | $mount_point: 51 | ensure => 'directory', 52 | owner => $mount_user, 53 | group => $mount_group, 54 | mode => $mount_mode; 55 | 56 | } 57 | 58 | } -------------------------------------------------------------------------------- /Vagrantfile57_semi: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # Assumes a box from https://github.com/grypyrg/packer-percona 5 | 6 | # This sets up 2 nodes for replication. Run 'ms-setup.pl' after these are provisioned. 7 | require './lib/vagrant-common.rb' 8 | 9 | # Our puppet config 10 | $puppet_config = { 11 | 'innodb_buffer_pool_size' => '2G', 12 | 'extra_mysqld_config' => ' 13 | log-slave-updates 14 | gtid_mode = ON 15 | enforce-gtid-consistency 16 | sync-binlog=1 17 | ' 18 | } 19 | 20 | def build_box( config, name, ip, server_id, region, extra_mysqld_conf ) 21 | config.vm.define name do |node_config| 22 | node_config.vm.hostname = name 23 | node_config.vm.network :private_network, ip: ip 24 | 25 | node_puppet_config = $puppet_config.merge({ 26 | :server_id => server_id 27 | }) 28 | 29 | node_puppet_config['extra_mysqld_config'] = $puppet_config['extra_mysqld_config'] + extra_mysqld_conf 30 | 31 | provider_aws( node_config, name, 'm3.medium', region, ['default','pxc']) { |aws, override| 32 | aws.block_device_mapping = [ 33 | { 34 | 'DeviceName' => "/dev/sdb", 35 | 'VirtualName' => "ephemeral0" 36 | } 37 | ] 38 | provision_puppet( override, 'mysql_server57.pp', 39 | node_puppet_config.merge( 'datadir_dev' => 'xvdb' ) 40 | ) 41 | } 42 | 43 | provider_virtualbox( node_config, '256' ) { |vb, override| 44 | provision_puppet( override, 'mysql_server57.pp', 45 | node_puppet_config.merge('datadir_dev' => 'dm-2') 46 | ) 47 | 48 | } 49 | 50 | end 51 | end 52 | 53 | Vagrant.configure("2") do |config| 54 | config.vm.box = "grypyrg/centos-x86_64" 55 | config.ssh.username = "vagrant" 56 | 57 | build_box( config, 'master', '192.168.70.2', '1', 'us-east-1', ' 58 | plugin-load=rpl_semi_sync_master=semisync_master.so 59 | rpl_semi_sync_master_enabled = 1 60 | ' ) 61 | build_box( config, 'slave', '192.168.70.3', '2', 'us-west-1', ' 62 | plugin-load=rpl_semi_sync_slave=semisync_slave.so 63 | rpl_semi_sync_slave_enabled = 1 64 | ' ) 65 | end 66 | 67 | 68 | -------------------------------------------------------------------------------- /Vagrantfile.ec2_provisioned_iops.rb: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | require File.dirname(__FILE__) + '/lib/vagrant-common.rb' 5 | 6 | mysql_version = "56" 7 | name = "beefy-percona-server" 8 | 9 | Vagrant.configure("2") do |config| 10 | # Every Vagrant virtual environment requires a box to build off of. 11 | config.vm.hostname = name 12 | config.vm.box = "grypyrg/centos-x86_64" 13 | config.ssh.username = "vagrant" 14 | 15 | # Provisioners 16 | provision_puppet( config, "base.pp" ) 17 | provision_puppet( config, "percona_server.pp" ) { |puppet| 18 | puppet.facter = { 19 | 'cluster_servers' => name, 20 | "percona_server_version" => mysql_version, 21 | 'innodb_buffer_pool_size' => '12G', 22 | 'innodb_log_file_size' => '4G' 23 | } 24 | } 25 | provision_puppet( config, "percona_client.pp" ) { |puppet| 26 | puppet.facter = { 27 | "percona_server_version" => mysql_version 28 | } 29 | } 30 | provision_puppet( config, "sysbench.pp" ) 31 | 32 | 33 | 34 | # Providers 35 | provider_virtualbox( nil, config, 256 ) { |vb, override| 36 | # If we are using Virtualbox, override percona_server.pp with the right device for the datadir 37 | provision_puppet( override, "percona_server.pp" ) {|puppet| 38 | puppet.facter = {"datadir_dev" => "dm-2"} 39 | } 40 | } 41 | 42 | provider_aws( name, config, 'm1.xlarge') { |aws, override| 43 | # For AWS, we want to map the proper device for this instance type 44 | aws.block_device_mapping = [ 45 | { 46 | 'DeviceName' => "/dev/sdl", 47 | 'VirtualName' => "mysql_data", 48 | 'Ebs.VolumeSize' => 100, 49 | 'Ebs.DeleteOnTermination' => true, 50 | 'Ebs.VolumeType' => 'io1', 51 | 'Ebs.Iops' => 1000 52 | } 53 | ] 54 | # Also override the percona_server.pp manifest with the right datadir device 55 | provision_puppet( override, "percona_server.pp" ) {|puppet| 56 | puppet.facter = {"datadir_dev" => "xvdl"} 57 | } 58 | } 59 | end 60 | -------------------------------------------------------------------------------- /modules/misc/manifests/local_percona_repo.pp: -------------------------------------------------------------------------------- 1 | class misc::local_percona_repo { 2 | # Enable local percona repo in /var/repo 3 | 4 | file { 5 | '/var/repo': 6 | ensure => 'directory'; 7 | } 8 | 9 | if( $operatingsystem == 'centos' and $operatingsystemrelease =~ /^7/ ) { 10 | # Download only seems to be built-in to yum in 7+ 11 | package { 12 | 'yum': ensure => 'installed', alias => 'yum-plugin-downloadonly'; 13 | } 14 | } else { 15 | package { 16 | 'yum-plugin-downloadonly': ensure => 'installed'; 17 | } 18 | } 19 | 20 | package { 21 | 'createrepo': ensure => 'installed'; 22 | 'yum-plugin-priorities': ensure => 'installed'; 23 | } 24 | 25 | exec { 26 | 'download_pkgs': 27 | command => "/usr/bin/yum install --downloadonly --downloaddir=/var/repo -y Percona-XtraDB-Cluster-56; 28 | /usr/bin/yum install --downloadonly --downloaddir=/var/repo -y Percona-XtraDB-Cluster-garbd-3 29 | /usr/bin/yum install --downloadonly --downloaddir=/var/repo -y Percona-Server-server-56; 30 | /usr/bin/yum install --downloadonly --downloaddir=/var/repo -y percona-xtrabackup; 31 | /usr/bin/yum install --downloadonly --downloaddir=/var/repo -y percona-nagios-plugins; 32 | /usr/bin/yum install --downloadonly --downloaddir=/var/repo -y Percona-Server-shared-51; 33 | /usr/bin/yum install --downloadonly --downloaddir=/var/repo -y haproxy xinetd keepalived; 34 | touch /tmp/repo_downloaded", 35 | creates => "/tmp/repo_downloaded", 36 | require => [File['/var/repo'], Package['yum-plugin-downloadonly']]; 37 | } 38 | 39 | exec { 40 | 'create_local_repo': 41 | command => "createrepo /var/repo", 42 | path => ['/bin','/usr/bin','/usr/local/bin'], 43 | creates => "/var/repo/repodata/repomd.xml", 44 | require => [Package['createrepo'], Exec['download_pkgs']], 45 | 46 | } 47 | 48 | case $operatingsystem { 49 | centos: { 50 | yumrepo{ 'local_percona_repo': 51 | name => "local", 52 | descr => "Local Repo", 53 | gpgcheck => "0", 54 | enabled => "1", 55 | baseurl => "file:///var/repo", 56 | priority => 1, 57 | require => [Exec['create_local_repo'], Package['yum-plugin-priorities']]; 58 | } 59 | } 60 | } 61 | } -------------------------------------------------------------------------------- /modules/percona/manifests/cluster/client.pp: -------------------------------------------------------------------------------- 1 | class percona::cluster::client { 2 | # Default PS version is 55 for now 3 | if( $percona_server_version == undef or $percona_server_version == 55 ) { 4 | $percona_server_version = '-55' 5 | } elsif( $percona_server_version == 56 ) { 6 | $percona_server_version = '-56' 7 | } elsif( $percona_server_version == 57 ) { 8 | $percona_server_version = '-57' 9 | } 10 | 11 | # ugly way of making sure the version we want to use doesn't conflict with the old one 12 | # (oh boy this whole thing might need refactoring) 13 | if $percona_server_version == '' or $percona_server_version == '-57' { 14 | $other_percona_server_version="-55" 15 | $other_percona_server_version2="-56" 16 | } elsif $percona_server_version == "-56" { 17 | $other_percona_server_version="-55" 18 | $other_percona_server_version2="-57" 19 | } elsif $percona_server_version == "-55" { 20 | $other_percona_server_version="-56" 21 | $other_percona_server_version2="-57" 22 | } 23 | 24 | 25 | case $operatingsystem { 26 | centos: { 27 | package { 28 | "Percona-XtraDB-Cluster-client$other_percona_server_version.$hardwaremodel": 29 | ensure => "absent"; 30 | "Percona-XtraDB-Cluster-shared$other_percona_server_version.$hardwaremodel": 31 | ensure => "absent"; 32 | "Percona-XtraDB-Cluster-devel$other_percona_server_version.$hardwaremodel": 33 | ensure => "absent"; 34 | "Percona-XtraDB-Cluster-client$other_percona_server_version2.$hardwaremodel": 35 | ensure => "absent"; 36 | "Percona-XtraDB-Cluster-shared$other_percona_server_version2.$hardwaremodel": 37 | ensure => "absent"; 38 | "Percona-XtraDB-Cluster-devel$other_percona_server_version2.$hardwaremodel": 39 | ensure => "absent"; 40 | 41 | "Percona-XtraDB-Cluster-client$percona_server_version.$hardwaremodel": 42 | alias => "MySQL-client", 43 | ensure => "latest"; 44 | "Percona-XtraDB-Cluster-shared$percona_server_version.$hardwaremodel": 45 | alias => "MySQL-shared", 46 | ensure => "latest"; 47 | } 48 | } 49 | ubuntu: { 50 | package { 51 | "percona-server-client": 52 | alias => "MySQL-client"; 53 | } 54 | } 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /modules/mysql/manifests/datadir.pp: -------------------------------------------------------------------------------- 1 | class mysql::datadir ( 2 | $datadir_dev, 3 | $datadir_dev_scheduler = 'noop', 4 | $datadir_fs = 'xfs', 5 | $datadir_fs_opts = 'noatime', 6 | $datadir_mkfs_opts = '' 7 | ) { 8 | # Need to set $datadir_dev from Vagrantfile for this to work right 9 | 10 | if $datadir_fs == 'xfs' { 11 | package { 12 | 'xfsprogs': ensure => 'present'; 13 | } 14 | } 15 | 16 | exec { 17 | "mkfs_mysql_datadir": 18 | command => "mkfs.$datadir_fs $datadir_mkfs_opts /dev/$datadir_dev", 19 | require => $datadir_fs ? { 20 | 'xfs' => Package['xfsprogs'], 21 | default => [] 22 | }, 23 | path => "/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin", 24 | unless => "mount | grep '/var/lib/mysql'"; 25 | } 26 | 27 | mount { 28 | "/var/lib/mysql": 29 | ensure => "mounted", 30 | device => "/dev/$datadir_dev", 31 | fstype => $datadir_fs, 32 | options => $datadir_fs_opts, 33 | atboot => "true", 34 | require => Exec["mkfs_mysql_datadir", "mkdir_mysql_datadir"]; 35 | 36 | } 37 | 38 | # IO scheduler 39 | exec { 40 | "datadir_dev_scheduler": 41 | command => "echo '$datadir_dev_scheduler' > /sys/block/$datadir_dev/queue/scheduler", 42 | path => "/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin", 43 | unless => "grep -E '\\[$datadir_dev_scheduler\\]|none' /sys/block/$datadir_dev/queue/scheduler"; 44 | } 45 | 46 | exec { 47 | "mkdir_mysql_datadir": 48 | command => "mkdir /var/lib/mysql", 49 | path => "/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin", 50 | unless => "test -d /var/lib/mysql"; 51 | 52 | } 53 | 54 | 55 | 56 | # recreate the mysql datadir if it doesn't exist but mysql_install_db is present 57 | # If mysql hasn't been installed yet, this will not run and let the package install create the datadir 58 | exec { 59 | "mysql_install_db": 60 | command => "mysql_install_db --user=mysql --datadir=/var/lib/mysql", 61 | path => "/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin 62 | ", 63 | require => Mount["/var/lib/mysql"], 64 | onlyif => "test -f /var/lib/mysql/mysql/user.frm", 65 | unless => "which mysql_install_db"; 66 | } 67 | 68 | 69 | 70 | # mount 71 | 72 | 73 | } 74 | -------------------------------------------------------------------------------- /modules/percona/templates/my-cluster.cnf.erb: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | datadir = /var/lib/mysql 3 | log_error = error.log 4 | socket = /var/lib/mysql/mysql.sock 5 | 6 | 7 | query_cache_size=0 8 | query_cache_type=0 9 | 10 | binlog_format = ROW 11 | 12 | <%= "innodb_buffer_pool_size = " + @innodb_buffer_pool_size + "\n" if defined?( @innodb_buffer_pool_size ) -%> 13 | <%= "innodb_log_file_size = " + @innodb_log_file_size + "\n" if defined?( @innodb_log_file_size ) -%> 14 | innodb_flush_method = O_DIRECT 15 | innodb_file_per_table 16 | <%= "innodb_flush_log_at_trx_commit = " + @innodb_flush_log_at_trx_commit + "\n" if defined?( @innodb_flush_log_at_trx_commit ) -%> 17 | innodb_autoinc_lock_mode = 2 18 | # Deprecated: innodb_locks_unsafe_for_binlog = ON 19 | 20 | <%= "wsrep_cluster_address = " + @wsrep_cluster_address + "\n" if defined?( @wsrep_cluster_address ) -%> 21 | wsrep_cluster_name = mycluster 22 | 23 | wsrep_node_name = <%= @vagrant_hostname %> 24 | <%= "wsrep_node_address = " + @wsrep_node_address + "\n" if @wsrep_node_address -%> 25 | 26 | <% if @architecture == "i386" %> 27 | wsrep_provider = /usr/lib/libgalera_smm.so 28 | <% elsif @architecture == "x86_64" %> 29 | wsrep_provider = /usr/lib64/libgalera_smm.so 30 | <% end %> 31 | 32 | <%= "wsrep_provider_options = \"" + @wsrep_provider_options + "\"\n" if defined?( @wsrep_provider_options ) -%> 33 | 34 | <%= "wsrep_slave_threads = " + @wsrep_slave_threads + "\n" if defined?( @wsrep_slave_threads ) -%> 35 | <%= "wsrep_auto_increment_control = " + @wsrep_auto_increment_control + "\n" if defined?( @wsrep_auto_increment_control ) -%> 36 | wsrep_sst_method = xtrabackup-v2 37 | wsrep_sst_auth = sst:secret 38 | 39 | <%= @extra_mysqld_config + "\n\n" if defined?( @extra_mysqld_config ) -%> 40 | 41 | loose-validate-password=OFF 42 | 43 | [mysql] 44 | prompt = "<%=@vagrant_hostname %> mysql> " 45 | 46 | [client] 47 | user = root 48 | socket = /var/lib/mysql/mysql.sock 49 | 50 | !include /etc/my-pxc.cnf 51 | !include /etc/my-pxc-extra.cnf 52 | 53 | -------------------------------------------------------------------------------- /modules/percona/manifests/cluster/garb.pp: -------------------------------------------------------------------------------- 1 | class percona::cluster::garb { 2 | # Default PS version is 56 3 | if( $percona_server_version == undef or $percona_server_version == 56 ) { 4 | $percona_server_version = '-56' 5 | } elsif( $percona_server_version == 55 ) { 6 | $percona_server_version = '-55' 7 | } 8 | 9 | # ugly way of making sure the version we want to use doesn't conflict with the old one 10 | # (oh boy this whole thing might need refactoring) 11 | if $percona_server_version == '' { 12 | $other_percona_server_version="-55" 13 | } elsif $percona_server_version == "-55" { 14 | $other_percona_server_version="-56" 15 | } 16 | 17 | # You can set the $galera_version to 2 or 3 for either 55 or 56, but if it is not set it defaults like this: 18 | if( $galera_version == undef ) { 19 | if( $percona_server_version == "-55" ) { 20 | $galera_version = '2' 21 | } elsif( $percona_server_version == "-56" ) { 22 | $galera_version = '3' 23 | } 24 | } 25 | 26 | if( $galera_version == '2' ) { 27 | $other_galera_version = '3' 28 | } elsif( $galera_version == '3' ) { 29 | $other_galera_version = '2' 30 | } 31 | 32 | 33 | # CENtoS ONLY FOR NOW 34 | case $operatingsystem { 35 | centos: { 36 | package { 37 | "Percona-XtraDB-Cluster-garbd-$galera_version": 38 | alias => "galera", 39 | ensure => "installed"; 40 | } 41 | } 42 | } 43 | 44 | file { 45 | "/etc/sysconfig/garb": 46 | ensure => present, 47 | content => "# Copyright (C) 2012 Codership Oy 48 | # This config file is to be sourced by garb service script. 49 | 50 | # A space-separated list of node addresses (address[:port]) in the cluster 51 | GALERA_NODES='$arbitrator_nodes' 52 | 53 | # Galera cluster name, should be the same as on the rest of the nodes. 54 | GALERA_GROUP='$arbitrator_clustername' 55 | 56 | # Optional Galera internal options string (e.g. SSL settings) 57 | # see http://www.codership.com/wiki/doku.php?id=galera_parameters 58 | # GALERA_OPTIONS='' 59 | 60 | # Log file for garbd. Optional, by default logs to syslog 61 | # Deprecated for CentOS7, use journalctl to query the log for garbd 62 | # LOG_FILE='' 63 | "; 64 | } 65 | 66 | service { 67 | "garb": 68 | ensure => running, 69 | require => [ File["/etc/sysconfig/garb"], Package["Percona-XtraDB-Cluster-garbd-$galera_version"] ]; 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /modules/percona/manifests/client.pp: -------------------------------------------------------------------------------- 1 | class percona::client { 2 | # Default PS version is 55 for now 3 | if( $percona_server_version == undef ) { 4 | $percona_server_version = '57' 5 | } 6 | 7 | # ugly way of making sure the version we want to use doesn't conflict with the old one 8 | # (oh boy this whole thing might need refactoring) 9 | if $percona_server_version == 55 { 10 | $other_percona_server_version="56" 11 | $other_percona_server_version2="57" 12 | } elsif $percona_server_version == 56 { 13 | $other_percona_server_version="55" 14 | $other_percona_server_version2="57" 15 | } elsif $percona_server_version == 57 { 16 | $other_percona_server_version="55" 17 | $other_percona_server_version2="56" 18 | } 19 | 20 | case $operatingsystem { 21 | centos: { 22 | package { 23 | "Percona-Server-client-$percona_server_version.$hardwaremodel": 24 | alias => "MySQL-client", 25 | ensure => latest; 26 | "Percona-Server-devel-$percona_server_version.$hardwaremodel": 27 | require => [ Package['MySQL-client'] ], 28 | alias => "MySQL-devel", 29 | ensure => latest; 30 | "Percona-Server-client-$other_percona_server_version.$hardwaremodel": 31 | before => Package["Percona-Server-client-$percona_server_version.$hardwaremodel"], 32 | ensure => absent; 33 | "Percona-Server-client-$other_percona_server_version2.$hardwaremodel": 34 | before => Package["Percona-Server-client-$percona_server_version.$hardwaremodel"], 35 | ensure => absent; 36 | "Percona-Server-devel-$other_percona_server_version.$hardwaremodel": 37 | before => Package["Percona-Server-devel-$percona_server_version.$hardwaremodel"], 38 | ensure => absent; 39 | "Percona-Server-devel-$other_percona_server_version2.$hardwaremodel": 40 | before => Package["Percona-Server-devel-$percona_server_version.$hardwaremodel"], 41 | ensure => absent; 42 | "Percona-Server-shared-$percona_server_version.$hardwaremodel": 43 | alias => "MySQL-shared", 44 | ensure => latest; 45 | "Percona-Server-shared-$other_percona_server_version.$hardwaremodel": 46 | ensure => absent; 47 | "Percona-Server-shared-$other_percona_server_version2.$hardwaremodel": 48 | ensure => absent; 49 | } 50 | } 51 | ubuntu: { 52 | package { 53 | "percona-server-client": 54 | alias => "MySQL-client"; 55 | } 56 | } 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /ms-setup.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | 3 | print "Setup nodes with replication\n"; 4 | 5 | # setup all nodes in replication with the first node in `vagrant status` as the master. 6 | 7 | my @running_nodes_lines = `vagrant status | grep running`; 8 | my @running_nodes; 9 | foreach my $line( @running_nodes_lines ) { 10 | if( $line =~ m/^(\w+)\s+\w+\s+\((\w+)\)$/ ) { 11 | push( @running_nodes, { 12 | name => $1, 13 | provider => $2 14 | }); 15 | } 16 | } 17 | 18 | # Harvest node ips 19 | foreach my $node( @running_nodes ) { 20 | my $nic = 'eth1'; 21 | $nic = 'eth0' if $node->{provider} eq 'aws'; 22 | 23 | my $ip_str = `vagrant ssh $node->{name} -c "ip a l | grep $nic | grep inet"`; 24 | if( $ip_str =~ m/inet\s(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\// ) { 25 | $node->{ip} = $1; 26 | } 27 | } 28 | 29 | # use Data::Dumper qw( Dumper ); 30 | # print Dumper( @running_nodes ); 31 | 32 | my $master = shift @running_nodes; 33 | my $master_ip = $master->{ip}; 34 | 35 | print "Master node will be: $master->{name} ($master->{ip})\n"; 36 | 37 | # Get Master binlog file and position 38 | my $master_status =<{name} -c \"$master_status\"`; 42 | my $master_log_file; 43 | my $master_log_pos = 0; 44 | if( $master_status_str[$#master_status_str] =~ m/^(.+)\s+(\d+)/ ) { 45 | $master_log_file = $1; 46 | $master_log_pos = $2; 47 | } else { 48 | die "Could not parse master log file and position!\n"; 49 | } 50 | 51 | # Report master status 52 | print <{name}\n"; 61 | my $grant =<{name} -c \"$grant\""); 65 | 66 | # Configure the slaves 67 | my $change_master =<{name}'\n"; 72 | system( "vagrant ssh $slave->{name} -c \"$change_master\""); 73 | } 74 | -------------------------------------------------------------------------------- /manifests/pxc_playground.pp: -------------------------------------------------------------------------------- 1 | # different tools which are used to setup lot's of stuff for pxc and to test/train... 2 | 3 | include test::sysbench_custom_lua 4 | 5 | Class['percona::cluster::service'] -> Class['test::user'] 6 | include test::user 7 | 8 | 9 | # all other defaults 10 | include percona::repository 11 | include percona::cluster::client 12 | 13 | Class['percona::repository'] -> Class['percona::cluster::client'] 14 | 15 | 16 | Class['percona::cluster::service'] -> Class['test::sysbench_pkg'] -> Class['test::sysbench_test_script'] 17 | include test::sysbench_pkg 18 | include test::sysbench_test_script 19 | 20 | 21 | include percona::toolkit 22 | 23 | Class['percona::repository'] -> Class['percona::toolkit'] 24 | 25 | include base::packages 26 | include base::hostname 27 | include base::motd 28 | 29 | include misc::speedometer 30 | include misc::myq_gadgets 31 | include misc::dbsake 32 | 33 | Class['base::packages'] -> Class['misc::myq_gadgets'] 34 | 35 | notice ("haproxy disabled is $haproxy_disabled") 36 | if ( $haproxy_disabled == 'false' ) { 37 | include haproxy::server 38 | } 39 | 40 | notice ("maxscale disabled is $maxscale_disabled") 41 | if ( $maxscale_disabled == 'false' ) { 42 | include mariadb::maxscale 43 | } 44 | 45 | include percona::server-password 46 | include percona::cluster::server 47 | include percona::cluster::config 48 | include percona::cluster::service 49 | include percona::cluster::sstuser 50 | include percona::cluster::xinetdclustercheck 51 | 52 | class { 'mysql::datadir': 53 | datadir_dev => $datadir_dev, 54 | datadir_dev_scheduler => $datadir_dev_scheduler, 55 | datadir_fs => $datadir_fs, 56 | datadir_fs_opts => $datadir_fs_opts, 57 | datadir_mkfs_opts => $datadir_mkfs_opts 58 | } 59 | 60 | Class['mysql::datadir'] -> Class['percona::cluster::server'] 61 | 62 | Class['percona::repository'] -> Class['percona::cluster::server'] -> Class['percona::cluster::config'] -> Class['percona::cluster::service'] -> Class['percona::server-password'] 63 | 64 | 65 | include base::packages 66 | include base::insecure 67 | include base::sshd_rootenabled 68 | 69 | Class['base::insecure'] -> Class['percona::cluster::service'] 70 | 71 | if ( $percona_agent_enabled == true or $percona_agent_enabled == 'true' ) { 72 | include percona::agent 73 | } 74 | 75 | include training::helper_scripts 76 | 77 | include training::pxc_exercises 78 | include base::sshd_rootenabled 79 | 80 | Class['base::sshd_rootenabled'] -> Class['training::helper_scripts'] -> Class['misc::speedometer'] 81 | -------------------------------------------------------------------------------- /Vagrantfile.ms.rb: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # To create multiple slaves, read the instructions near the end 5 | # of this file. 6 | 7 | require File.dirname(__FILE__) + '/lib/vagrant-common.rb' 8 | 9 | def build_box( config, name, ip, server_id ) 10 | mysql_version = "57" 11 | 12 | config.vm.define name do |node_config| 13 | node_config.vm.hostname = name 14 | node_config.vm.network :private_network, ip: ip, adaptor: 1, auto_config: false 15 | node_config.vm.provision :hostmanager 16 | 17 | # Provisioners 18 | provision_puppet( node_config, "base.pp" ) 19 | provision_puppet( node_config, "percona_server.pp" ) { |puppet| 20 | puppet.facter = { 21 | 'cluster_servers' => name, 22 | "percona_server_version" => mysql_version, 23 | "innodb_buffer_pool_size" => "128M", 24 | "innodb_log_file_size" => "64M", 25 | "server_id" => server_id 26 | } 27 | } 28 | provision_puppet( node_config, "percona_client.pp" ) { |puppet| 29 | puppet.facter = { 30 | "percona_server_version" => mysql_version 31 | } 32 | } 33 | 34 | # Providers 35 | provider_virtualbox( nil, node_config, 256 ) { |vb, override| 36 | vb.linked_clone = true 37 | provision_puppet( override, "percona_server.pp" ) {|puppet| 38 | puppet.facter = { 39 | "default_interface" => "eth1", 40 | "datadir_dev" => "dm-2" 41 | } 42 | } 43 | } 44 | 45 | provider_aws( name, node_config, 'm3.medium') { |aws, override| 46 | aws.block_device_mapping = [ 47 | { 48 | 'DeviceName' => "/dev/sdl", 49 | 'VirtualName' => "mysql_data", 50 | 'Ebs.VolumeSize' => 20, 51 | 'Ebs.DeleteOnTermination' => true, 52 | } 53 | ] 54 | provision_puppet( override, "percona_server.pp" ) {|puppet| 55 | puppet.facter = {"datadir_dev" => "xvdl"} 56 | } 57 | } 58 | end 59 | 60 | if block_given? 61 | yield 62 | end 63 | 64 | end 65 | 66 | 67 | Vagrant.configure("2") do |config| 68 | config.vm.box = "grypyrg/centos-x86_64" 69 | config.ssh.username = "vagrant" 70 | 71 | build_box( config, 'master', '192.168.70.2', '1' ) 72 | build_box( config, 'slave1', '192.168.70.3', '2' ) 73 | 74 | # Uncomment the line below to build a 3rd slave. You can add more 75 | # lines like this one to have more nodes. Be sure to adjust the 76 | # parameters to prevent duplicates. 77 | #build_box( config, 'slave2', '192.168.70.4', '3' ) 78 | end 79 | -------------------------------------------------------------------------------- /Vagrantfile.perconaserver.rb: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # Assumes a box from https://github.com/grypyrg/packer-percona 5 | 6 | # it also installs haproxy 7 | 8 | # HOW TO USE 9 | # You have to bring the machines up prior to provisioning. so run it in 2 steps: 10 | # 11 | # # vagrant up --no-provision --parallel 12 | # # vagrant provision --parallel 13 | 14 | require File.dirname(__FILE__) + '/lib/vagrant-common.rb' 15 | 16 | mysql_version = "57" 17 | name = "perconaserver" 18 | aws_region = 'us-east-1' 19 | security_groups = "default" 20 | 21 | ip_address='192.168.8.70' 22 | if_adapter='vboxnet14' 23 | 24 | # should we use the public or private ips when using AWS 25 | hostmanager_aws_ips='public' 26 | 27 | Vagrant.configure("2") do |config| 28 | config.vm.box = "grypyrg/centos-x86_64" 29 | config.ssh.username = "vagrant" 30 | 31 | # it's disabled by default, it's done during the provision phase 32 | config.hostmanager.enabled = false 33 | config.hostmanager.include_offline = true 34 | 35 | # Create all three nodes identically except for name and ip 36 | config.vm.define name do |node_config| 37 | node_config.vm.hostname = name 38 | node_config.vm.network :private_network, ip: ip_address, adaptor: if_adapter 39 | 40 | # custom port forwarding 41 | node_config.vm.network "forwarded_port", guest: 8080, host: 8080, auto_correct: true 42 | node_config.vm.network "forwarded_port", guest: 3306, host: 3306, auto_correct: true 43 | 44 | # Provisioners 45 | node_config.vm.provision :hostmanager 46 | 47 | provision_puppet( node_config, "percona_server.pp" ) { |puppet| 48 | puppet.facter = { 49 | 'cluster_servers' => name, 50 | 'vagrant_hostname' => name, 51 | "percona_server_version" => mysql_version, 52 | "datadir_dev" => "dm-2", 53 | 'innodb_buffer_pool_size' => '128M', 54 | 'innodb_log_file_size' => '64M', 55 | 'innodb_flush_log_at_trx_commit' => '0', 56 | } 57 | } 58 | 59 | # Providers 60 | provider_virtualbox( nil, node_config, 512) { |vb, override| 61 | provision_puppet( override, "percona_server.pp" ) { |puppet| 62 | puppet.facter = {"datadir_dev" => "dm-2"} 63 | } 64 | } 65 | 66 | provider_aws( "PXC #{name}", node_config, 'm3.medium', aws_region, security_groups, hostmanager_aws_ips) { |aws, override| 67 | aws.block_device_mapping = [ 68 | { 69 | 'DeviceName' => "/dev/sdb", 70 | 'VirtualName' => "ephemeral0" 71 | } 72 | ] 73 | provision_puppet( override, "percona_server.pp" ) { |puppet| 74 | puppet.facter = {"datadir_dev" => "xvdb"} 75 | } 76 | 77 | } 78 | end 79 | end 80 | -------------------------------------------------------------------------------- /modules/percona/manifests/server.pp: -------------------------------------------------------------------------------- 1 | class percona::server { 2 | # Default PS version is 57 for now 3 | if( $percona_server_version == undef ) { 4 | $percona_server_version = '57' 5 | } 6 | 7 | # ugly way of making sure the version we want to use doesn't conflict with the old one 8 | # (oh boy this whole thing might need refactoring) 9 | if $percona_server_version == 55 { 10 | $other_percona_server_version="56" 11 | $other_percona_server_version2="57" 12 | } elsif $percona_server_version == 56 { 13 | $other_percona_server_version="55" 14 | $other_percona_server_version2="57" 15 | } elsif $percona_server_version == 57 { 16 | $other_percona_server_version="55" 17 | $other_percona_server_version2="56" 18 | } 19 | 20 | case $operatingsystem { 21 | centos: { 22 | package { 23 | "mariadb-libs": 24 | ensure => purged; 25 | "Percona-Server-client-$percona_server_version.$hardwaremodel": 26 | alias => "MySQL-client", 27 | ensure => latest; 28 | "Percona-Server-client-$other_percona_server_version.$hardwaremodel": 29 | before => Package["Percona-Server-client-$percona_server_version.$hardwaremodel"], 30 | require => Package["Percona-Server-server-$other_percona_server_version.$hardwaremodel"], 31 | ensure => absent; 32 | "Percona-Server-client-$other_percona_server_version2.$hardwaremodel": 33 | before => Package["Percona-Server-client-$percona_server_version.$hardwaremodel"], 34 | require => Package["Percona-Server-server-$other_percona_server_version2.$hardwaremodel"], 35 | ensure => absent; 36 | "Percona-Server-server-$percona_server_version.$hardwaremodel": 37 | alias => "MySQL-server", 38 | require => Package["MySQL-client"], 39 | ensure => latest; 40 | "Percona-Server-server-$other_percona_server_version.$hardwaremodel": 41 | before => Package["Percona-Server-server-$percona_server_version.$hardwaremodel"], 42 | ensure => absent; 43 | "Percona-Server-server-$other_percona_server_version2.$hardwaremodel": 44 | before => Package["Percona-Server-server-$percona_server_version.$hardwaremodel"], 45 | ensure => absent; 46 | "Percona-Server-shared-$percona_server_version.$hardwaremodel": 47 | alias => "MySQL-shared", 48 | ensure => latest; 49 | "Percona-Server-shared-$other_percona_server_version.$hardwaremodel": 50 | before => Package["MySQL-client"], 51 | ensure => absent; 52 | "Percona-Server-shared-$other_percona_server_version2.$hardwaremodel": 53 | before => Package["MySQL-client"], 54 | ensure => absent; 55 | } 56 | } 57 | 58 | ubuntu: { 59 | package { 60 | "percona-server-client": 61 | alias => "MySQL-client"; 62 | "percona-server-server": 63 | name => $percona_server_version ? { 64 | '55' => "percona-server-server-5.5", 65 | '56' => "percona-server-server-5.6", 66 | '57' => "percona-server-server-5.7" 67 | }, 68 | alias => "MySQL-server", 69 | ensure => latest; 70 | } 71 | } 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /manifests/mysql_server.pp: -------------------------------------------------------------------------------- 1 | include stdlib 2 | 3 | include base::hostname 4 | include base::packages 5 | include base::insecure 6 | 7 | class {'base::swappiness': 8 | swappiness => $swappiness 9 | } 10 | 11 | 12 | # 5.6 enabled by default. Client is reponsible to only have one of these 13 | # enabled and to disable the default. 14 | if( $enable_55 == undef ) { 15 | $enable_55 = 0 16 | } 17 | if( $enable_56 == undef ) { 18 | $enable_56 = 1 19 | } 20 | if( $enable_57 == undef ) { 21 | $enable_57 = 0 22 | } 23 | 24 | 25 | class { 'mysql::repository': 26 | 55_enabled => $enable_55, 27 | 56_enabled => $enable_56, 28 | 57_enabled => $enable_57 29 | } 30 | 31 | include mysql::server 32 | include mysql::config 33 | include mysql::service 34 | 35 | if $datadir_dev { 36 | class { 'mysql::datadir': 37 | datadir_dev => $datadir_dev, 38 | datadir_dev_scheduler => $datadir_dev_scheduler, 39 | datadir_fs => $datadir_fs, 40 | datadir_fs_opts => $datadir_fs_opts, 41 | datadir_mkfs_opts => $datadir_mkfs_opts 42 | } 43 | 44 | Class['mysql::datadir'] -> Class['mysql::server'] 45 | } 46 | 47 | Class['mysql::repository'] -> Class['mysql::server'] -> Class['mysql::config'] -> Class['mysql::service'] 48 | 49 | 50 | include misc::myq_gadgets 51 | include misc::myq_tools 52 | 53 | include test::user 54 | 55 | include percona::repository 56 | include percona::toolkit 57 | include percona::sysbench 58 | 59 | Class['base::packages'] -> Class['misc::myq_gadgets'] 60 | Class['base::packages'] -> Class['misc::myq_tools'] 61 | 62 | Class['base::packages'] -> Class['percona::repository'] 63 | Class['base::insecure'] -> Class['percona::repository'] 64 | 65 | Class['percona::repository'] -> Class['percona::toolkit'] 66 | Class['percona::repository'] -> Class['percona::sysbench'] 67 | 68 | Class['mysql::server'] -> Class['percona::toolkit'] 69 | Class['mysql::server'] -> Class['percona::sysbench'] 70 | 71 | Class['mysql::service'] -> Class['test::user'] 72 | 73 | 74 | if $sysbench_load == 'true' { 75 | class { 'test::sysbench_load': 76 | schema => $schema, 77 | tables => $tables, 78 | rows => $rows, 79 | threads => $threads, 80 | engine => $engine 81 | } 82 | 83 | Class['percona::sysbench'] -> Class['test::sysbench_load'] 84 | Class['test::user'] -> Class['test::sysbench_load'] 85 | } 86 | 87 | if $sysbench_skip_test_client != 'true' { 88 | include test::sysbench_test_script 89 | Class['percona::sysbench'] -> Class['test::sysbench_test_script'] 90 | 91 | } 92 | 93 | if $softraid == 'true' { 94 | class { 'misc::softraid': 95 | softraid_dev => $softraid_dev, 96 | softraid_level => $softraid_level, 97 | softraid_devices => $softraid_devices, 98 | softraid_dev_str => $softraid_dev_str 99 | } 100 | 101 | Class['misc::softraid'] -> Class['mysql::datadir'] 102 | } 103 | 104 | if ( $vividcortex_api_key ) { 105 | class { 'misc::vividcortex': 106 | api_key => $vividcortex_api_key 107 | } 108 | 109 | Class['percona::cluster::service'] -> Class['misc::vividcortex'] 110 | } 111 | -------------------------------------------------------------------------------- /modules/test/manifests/sysbench_test_script.pp: -------------------------------------------------------------------------------- 1 | class test::sysbench_test_script { 2 | if !$mysql_host { $mysql_host = 'localhost' } 3 | if !$mysql_port { $mysql_port = '3306' } 4 | if !$schema { $schema = 'sbtest' } 5 | if !$tables { $tables = 1 } 6 | if !$rows { $rows = 100000 } 7 | if !$threads { $threads = 1 } 8 | if !$tx_rate { $tx_rate = 0 } 9 | if !$engine { $engine = 'innodb' } 10 | 11 | file { 12 | '/usr/local/bin/run_sysbench_reload.sh': 13 | ensure => present, 14 | content => "sysbench --db-driver=mysql --test=/usr/share/doc/sysbench/tests/db/oltp.lua --mysql-table-engine=$engine --mysql-user=test --mysql-password=test --mysql-db=$schema --mysql-host=$mysql_host --mysql-port=$mysql_port --oltp-tables-count=$tables cleanup 15 | sysbench --db-driver=mysql --test=/usr/share/doc/sysbench/tests/db/parallel_prepare.lua --mysql-table-engine=$engine --mysql-user=test --mysql-password=test --mysql-db=$schema --mysql-host=$mysql_host --mysql-port=$mysql_port --oltp-tables-count=$tables --oltp-table-size=$rows --oltp-auto-inc=off --max-requests=1 run", 16 | mode => 0755; 17 | } 18 | 19 | file { 20 | '/usr/local/bin/run_sysbench_oltp.sh': 21 | ensure => present, 22 | content => "sysbench --db-driver=mysql --test=/usr/share/doc/sysbench/tests/db/oltp.lua --mysql-user=test --mysql-password=test --mysql-db=$schema --mysql-host=$mysql_host --mysql-port=$mysql_port --mysql-ignore-errors=all --oltp-tables-count=$tables --oltp-table-size=$rows --oltp-auto-inc=off --num-threads=$threads --report-interval=1 --max-requests=0 --tx-rate=$tx_rate run | grep tps", 23 | mode => 0755; 24 | } 25 | 26 | file { 27 | '/usr/local/bin/run_sysbench_update_index.sh': 28 | ensure => present, 29 | content => "sysbench --db-driver=mysql --test=/usr/share/doc/sysbench/tests/db/update_index.lua --mysql-user=test --mysql-password=test --mysql-db=$schema --mysql-host=$mysql_host --mysql-port=$mysql_port --mysql-ignore-errors=all --oltp-tables-count=$tables --oltp-table-size=$rows --oltp-auto-inc=off --num-threads=$threads --report-interval=1 --max-requests=0 --tx-rate=$tx_rate run | grep tps", 30 | mode => 0755; 31 | } 32 | 33 | file { 34 | '/usr/local/bin/run_sysbench.sh': 35 | ensure => present, 36 | source => "puppet:///modules/test/run_sysbench.sh", 37 | mode => 0755; 38 | '/var/lib/mysql/sbtest': 39 | ensure => directory, 40 | owner => 'mysql', 41 | group => 'mysql', 42 | mode => '0755', 43 | } 44 | 45 | if $enable_consul == 'true' { 46 | # Watch for a test in consul and trigger it when the appropriate key/value is set 47 | consul::watch { 48 | 'test': type => 'event', handler => 'wall test consul event'; 49 | 'sysbench_stop': type => 'event', handler => 'killall sysbench'; 50 | 'sysbench_oltp': type => 'event', handler => "pidof sysbench || /usr/local/bin/run_sysbench_oltp.sh"; 51 | 'sysbench_update_index': type => 'event', handler => "pidof sysbench || /usr/local/bin/run_sysbench_update_index.sh"; 52 | } 53 | 54 | consul::service { 55 | 'sysbench_running': checks => [{script => "killall -0 sysbench", interval => '10s'}]; 56 | 'sysbench_ready': checks => [{script => "which sysbench", interval => '1m'}]; 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /Vagrantfile.pxc-big.rb: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # Assumes a box from https://github.com/grypyrg/packer-percona 5 | 6 | # This sets up 3 nodes with a common PXC, but you need to run bootstrap.sh to connect them. 7 | 8 | require File.dirname(__FILE__) + '/lib/vagrant-common.rb' 9 | 10 | pxc_version = "56" 11 | 12 | # Node group counts and aws security groups (if using aws provider) 13 | pxc_nodes = 3 14 | pxc_node_name_prefix = "node" 15 | 16 | # AWS configuration 17 | aws_region = "us-west-1" 18 | aws_ips='private' # Use 'public' for cross-region AWS. 'private' otherwise (or commented out) 19 | pxc_security_groups = ['sg-b4438ad3'] 20 | 21 | cluster_address = 'gcomm://' + Array.new( pxc_nodes ){ |i| pxc_node_name_prefix + (i+1).to_s }.join(',') 22 | 23 | Vagrant.configure("2") do |config| 24 | config.vm.box = "grypyrg/centos-x86_64" 25 | config.ssh.username = "vagrant" 26 | 27 | # Create the PXC nodes 28 | (1..pxc_nodes).each do |i| 29 | name = pxc_node_name_prefix + i.to_s 30 | config.vm.define name do |node_config| 31 | node_config.vm.hostname = name 32 | node_config.vm.network :private_network, type: "dhcp" 33 | node_config.vm.provision :hostmanager 34 | 35 | # Provisioners 36 | provision_puppet( node_config, "pxc_server.pp" ) { |puppet| 37 | puppet.facter = { 38 | # PXC setup 39 | "percona_server_version" => pxc_version, 40 | 'innodb_buffer_pool_size' => '12G', 41 | 'innodb_log_file_size' => '1G', 42 | 'innodb_flush_log_at_trx_commit' => '0', 43 | 'pxc_bootstrap_node' => (i == 1 ? true : false ), 44 | 'wsrep_cluster_address' => cluster_address, 45 | 'wsrep_provider_options' => 'gcache.size=128M; gcs.fc_limit=1024; evs.user_send_window=512; evs.send_window=512', 46 | 'wsrep_slave_threads' => 8, 47 | 48 | # Sysbench setup on node 1 49 | 'sysbench_load' => (i == 1 ? true : false ), 50 | 'tables' => 20, 51 | 'rows' => 1000000, 52 | 'threads' => 8 53 | } 54 | } 55 | 56 | # Providers 57 | provider_virtualbox( nil, node_config, 18432 ) { |vb, override| 58 | provision_puppet( override, "pxc_server.pp" ) { |puppet| 59 | puppet.facter = { 60 | 'default_interface' => 'eth1', 61 | 'datadir_dev' => 'dm-2', 62 | } 63 | } 64 | } 65 | 66 | provider_vmware( name, node_config, 18432 ) { |vb, override| 67 | provision_puppet( override, "pxc_server.pp" ) { |puppet| 68 | puppet.facter = { 69 | 'default_interface' => 'eth1', 70 | 'datadir_dev' => 'dm-2', 71 | } 72 | } 73 | } 74 | 75 | provider_aws( "PXC #{name}", node_config, 'm3.xlarge', aws_region, pxc_security_groups, aws_ips) { |aws, override| 76 | aws.block_device_mapping = [ 77 | { 'DeviceName' => "/dev/sdb", 'VirtualName' => "ephemeral0" }, 78 | { 'DeviceName' => "/dev/sdc", 'VirtualName' => "ephemeral1" } 79 | ] 80 | provision_puppet( override, "pxc_server.pp" ) { |puppet| 81 | puppet.facter = { 82 | 'softraid' => true, 83 | 'softraid_dev' => '/dev/md0', 84 | 'softraid_level' => 'stripe', 85 | 'softraid_devices' => '2', 86 | 'softraid_dev_str' => '/dev/xvdb /dev/xvdc', 87 | 'datadir_dev' => 'md0' 88 | } 89 | } 90 | } 91 | end 92 | end 93 | end 94 | -------------------------------------------------------------------------------- /modules/training/files/imdb_workload/add_load.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import random 4 | import time 5 | from threading import Thread 6 | 7 | from mysql.utilities.common import (database, options, server, table) 8 | 9 | from subprocess import call 10 | from contextlib import contextmanager 11 | 12 | 13 | server_host = '127.0.0.1' 14 | server_port = '3306' 15 | server_user = 'plmce' 16 | server_password = 'BelgianBeers' 17 | 18 | server_connection = "%s:%s@%s:%s" % (server_user, server_password, 19 | server_host, server_port) 20 | 21 | queries = ( 22 | u'SELECT * FROM imdb.title WHERE `id` = %(id)s;', 23 | u'SELECT AVG(rating) avg FROM imdb.movie_ratings WHERE movie_id = %(id)s;', 24 | u'SELECT * FROM imdb.cast_info WHERE movie_id = %(id)s and role_id = 1 ORDER BY nr_order ASC;', 25 | u'SELECT * FROM imdb.name WHERE id = %(id)s;', 26 | u'SELECT * FROM imdb.char_name WHERE id = %(id)s;', 27 | u'SELECT * FROM imdb.comments ORDER BY id DESC limit 10;', 28 | u'SELECT * FROM imdb.comments WHERE type="actor" and type_id = %(id)s ORDER BY id DESC;', 29 | u'SELECT * FROM imdb.favorites WHERE user_id = %(id)s AND type="actor";', 30 | u'SELECT * FROM imdb.favorites WHERE user_id = %(id)s AND type="movie";', 31 | u'SELECT * FROM imdb.movie_info WHERE movie_id = %(id)s;', 32 | u'SELECT * FROM imdb.person_info WHERE person_id = %(id)s;', 33 | u'SELECT * FROM imdb.users WHERE last_login_date > NOW()-INTERVAL 10 MINUTE ORDER BY last_login_date DESC LIMIT 10;', 34 | u'SELECT MAX(id) as c FROM imdb.name;', 35 | u'SELECT MAX(id) as c FROM imdb.title;', 36 | u'SELECT MAX(id) as c FROM imdb.users;', 37 | u'SELECT user2 FROM imdb.user_friends WHERE user1 = %(id)s;', 38 | u'SELECT cast_info.* FROM imdb.cast_info INNER JOIN imdb.title on (cast_info.movie_id=title.id) WHERE cast_info.person_id = %(id)s AND title.kind_id = 1 ORDER BY title.production_year DESC, title.id DESC;' 39 | ) 40 | 41 | 42 | def memoize(func): 43 | cache = dict() 44 | 45 | def wrapper(*args, **kwargs): 46 | key = (func, args, frozenset(kwargs.items())) 47 | if key in cache: 48 | return cache.get(key) 49 | value = func(*args, **kwargs) 50 | cache[key] = value 51 | return value 52 | return wrapper 53 | 54 | 55 | class Movie(object): 56 | 57 | def __init__(self, 58 | server, 59 | database=u"imdb"): 60 | self.server = server 61 | self.database = database 62 | 63 | @property 64 | @memoize 65 | def movie_db(self): 66 | """ Connect and return the connection to MySQL """ 67 | return self.connect_db(self.database) 68 | 69 | @memoize 70 | def connect_db(self, db_name): 71 | """ Method to connect to MySQL """ 72 | db_options = {u'skip_grants': True} 73 | return database.Database(self.server, db_name, db_options) 74 | 75 | def rnd_queries(self): 76 | query = queries[random.randint(0,len(queries)-1)] 77 | id=random.randint(0,5000) 78 | print query % {u'id': id} 79 | t=Thread(target=self.server.exec_query, args=(query % {u'id': id},)) 80 | t.start() 81 | 82 | def main(): 83 | for x in range(1, random.randint(5,10)): 84 | movie = Movie( server.get_server(u'localhost', server_connection, False)) 85 | movie.rnd_queries() 86 | time.sleep(0.5) 87 | 88 | main() 89 | -------------------------------------------------------------------------------- /modules/mha/manifests/node.pp: -------------------------------------------------------------------------------- 1 | class mha::node { 2 | exec { 3 | "mha4mysql-node": 4 | command => "/usr/bin/yum localinstall -y https://72003f4c60f5cc941cd1c7d448fc3c99e0aebaa8.googledrive.com/host/0B1lu97m8-haWeHdGWXp0YVVUSlk/mha4mysql-node-0.56-0.el6.noarch.rpm", 5 | cwd => "/tmp", 6 | unless => "/bin/rpm -q mha4mysql-node"; 7 | } 8 | 9 | user { 10 | 'mha': 11 | ensure => present, 12 | groups => ['mysql'], 13 | home => "/home/mha", 14 | managehome => true; 15 | } 16 | 17 | file { 18 | '/etc/sudoers.d/mha_sudo': 19 | ensure => present, 20 | content => 'Cmnd_Alias VIP_MGMT = /sbin/ip, /usr/sbin/arping 21 | 22 | mha ALL=(root) NOPASSWD: VIP_MGMT 23 | '; 24 | } 25 | 26 | file { 27 | '/var/log/masterha': 28 | ensure => 'directory', 29 | owner => 'mha', 30 | group => 'mysql', 31 | mode => 0775; 32 | } 33 | 34 | file { 35 | '/home/mha/.ssh': 36 | ensure => 'directory', 37 | owner => 'mha', 38 | group => 'mysql', 39 | mode => 0700, 40 | require => User['mha']; 41 | '/home/mha/.ssh/id_dsa': 42 | ensure => 'present', 43 | owner => 'mha', 44 | group => 'mha', 45 | mode => 0400, 46 | content => '-----BEGIN DSA PRIVATE KEY----- 47 | MIIBuwIBAAKBgQDL+aCXasdNotUQBd31nBnhzUscLdKuRc2iZpTK/XixMd3PJXlC 48 | wyhfioz7iwf14QHOr2qg6ZkA14nvwqjLVXzF6NqAH4InCbZ1yC2u4DyrEkHozhVC 49 | td+JN5AkioAuojZPUGQAYkCYFKzvDkBoxudQ5jE6IgI3+4Ihi4D2kbyN0wIVAIGm 50 | 73q7GGXuHtCwO6P+sQLI5ntLAoGBAIahKvuV95ZFEPjGkoRqgIsqpjq2+bx63v0z 51 | UIyxA3shkCnd70QaujvLeRG6zYVIgaWyfzA1JN2r3mTSgBfAMnNqTdkJiTBkbbH3 52 | RC7ML2Ap3OpBdlYweAT1ABQDKHv7ryD8mkAlPE50afhdokElYTrViwyK3icfkoC+ 53 | gsy6n90wAoGADDBl8zD9UiJuFpFQgmMXKbZ5ttBpwx1A6UKeHZ0ipB77gFWpmEie 54 | J0FRnJtmMiNYp4SXbSfF1ERyGcfs5FxOOp+qxLUFGI/rSYe5QOVNwtnpUvdX9c7B 55 | B/SxVLJUbxpi3TWwbCFwKS3d/FxCmg9/Wkv1MFA4+qUWC2s693j/wKECFFYJrWUb 56 | ZlY7oK+DFJqr1jZNE5QA 57 | -----END DSA PRIVATE KEY----- 58 | ', 59 | require => File['/home/mha/.ssh']; 60 | } 61 | ssh_authorized_key { 62 | 'mha-ssh-key': 63 | ensure => 'present', 64 | key => 'AAAAB3NzaC1kc3MAAACBAMv5oJdqx02i1RAF3fWcGeHNSxwt0q5FzaJmlMr9eLEx3c8leULDKF+KjPuLB/XhAc6vaqDpmQDXie/CqMtVfMXo2oAfgicJtnXILa7gPKsSQejOFUK134k3kCSKgC6iNk9QZABiQJgUrO8OQGjG51DmMToiAjf7giGLgPaRvI3TAAAAFQCBpu96uxhl7h7QsDuj/rECyOZ7SwAAAIEAhqEq+5X3lkUQ+MaShGqAiyqmOrb5vHre/TNQjLEDeyGQKd3vRBq6O8t5EbrNhUiBpbJ/MDUk3aveZNKAF8Ayc2pN2QmJMGRtsfdELswvYCnc6kF2VjB4BPUAFAMoe/uvIPyaQCU8TnRp+F2iQSVhOtWLDIreJx+SgL6CzLqf3TAAAACADDBl8zD9UiJuFpFQgmMXKbZ5ttBpwx1A6UKeHZ0ipB77gFWpmEieJ0FRnJtmMiNYp4SXbSfF1ERyGcfs5FxOOp+qxLUFGI/rSYe5QOVNwtnpUvdX9c7BB/SxVLJUbxpi3TWwbCFwKS3d/FxCmg9/Wkv1MFA4+qUWC2s693j/wKE=', 65 | type => 'ssh-dss', 66 | user => 'mha'; 67 | } 68 | 69 | exec { 70 | 'create_mha_user_all': 71 | command => "mysql -e \"GRANT ALL ON *.* TO 'mha'@'%' IDENTIFIED BY 'mha'\"", 72 | cwd => '/root', 73 | unless => "pt-show-grants | grep \"GRANT ALL PRIVILEGES ON *.* TO 'mha'@'%'\"", 74 | path => ['/usr/bin', '/bin'], 75 | require => [ Package['percona-toolkit'], Service['mysql'] ]; 76 | 'create_mha_user_localhost': 77 | command => "mysql -e \"GRANT ALL ON *.* TO 'mha'@'localhost' IDENTIFIED BY 'mha'\"", 78 | cwd => '/root', 79 | unless => "pt-show-grants | grep \"GRANT ALL PRIVILEGES ON *.* TO 'mha'@'localhost'\"", 80 | path => ['/usr/bin', '/bin'], 81 | require => [ Package['percona-toolkit'], Service['mysql'] ]; 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /manifests/pxc_server.pp: -------------------------------------------------------------------------------- 1 | include stdlib 2 | 3 | include base::hostname 4 | include base::packages 5 | include base::insecure 6 | include base::swappiness 7 | 8 | include percona::repository 9 | include percona::toolkit 10 | include percona::sysbench 11 | 12 | include percona::cluster::client 13 | include percona::cluster::server 14 | include percona::cluster::config 15 | include percona::cluster::service 16 | include percona::cluster::sstuser 17 | include percona::cluster::clustercheckuser 18 | 19 | include misc::myq_gadgets 20 | include misc::myq_tools 21 | 22 | include test::user 23 | 24 | if $datadir_dev and $datadir_dev != '' { 25 | class { 'mysql::datadir': 26 | datadir_dev => $datadir_dev, 27 | datadir_dev_scheduler => $datadir_dev_scheduler, 28 | datadir_fs => $datadir_fs, 29 | datadir_fs_opts => $datadir_fs_opts, 30 | datadir_mkfs_opts => $datadir_mkfs_opts 31 | } 32 | 33 | Class['mysql::datadir'] -> Class['percona::cluster::server'] 34 | 35 | if $softraid == 'true' { 36 | class { 'misc::softraid': 37 | softraid_dev => $softraid_dev, 38 | softraid_level => $softraid_level, 39 | softraid_devices => $softraid_devices, 40 | softraid_dev_str => $softraid_dev_str 41 | } 42 | 43 | Class['misc::softraid'] -> Class['mysql::datadir'] 44 | } 45 | } 46 | 47 | 48 | Class['percona::repository'] -> Class['percona::cluster::client'] -> Class['percona::cluster::server'] -> Class['percona::cluster::config'] -> Class['percona::cluster::service'] -> Class['percona::cluster::sstuser'] -> Class['percona::cluster::clustercheckuser'] 49 | 50 | Class['base::packages'] -> Class['misc::myq_gadgets'] 51 | Class['base::packages'] -> Class['misc::myq_tools'] 52 | 53 | Class['base::packages'] -> Class['percona::repository'] 54 | Class['base::insecure'] -> Class['percona::repository'] 55 | 56 | Class['percona::repository'] -> Class['percona::toolkit'] 57 | Class['percona::repository'] -> Class['percona::sysbench'] 58 | 59 | Class['percona::cluster::server'] -> Class['percona::sysbench'] 60 | 61 | Class['percona::cluster::client'] -> Class['percona::toolkit'] 62 | 63 | Class['percona::cluster::service'] -> Class['test::user'] 64 | 65 | if $sysbench_load == 'true' { 66 | class { 'test::sysbench_load': 67 | schema => $schema, 68 | tables => $tables, 69 | rows => $rows, 70 | threads => $threads, 71 | engine => $engine 72 | } 73 | 74 | Class['percona::cluster::client'] -> Class['percona::sysbench'] 75 | Class['percona::sysbench'] -> Class['test::sysbench_load'] 76 | Class['test::user'] -> Class['test::sysbench_load'] 77 | } 78 | 79 | if $enable_consul == 'true' { 80 | info( 'enabling consul agent' ) 81 | 82 | $config_hash = delete_undef_values( { 83 | 'datacenter' => $datacenter, 84 | 'data_dir' => '/opt/consul', 85 | 'log_level' => 'INFO', 86 | 'node_name' => $node_name ? { 87 | undef => $vagrant_hostname, 88 | default => $node_name 89 | }, 90 | 'bind_addr' => $default_interface ? { 91 | undef => undef, 92 | default => getvar("ipaddress_${default_interface}") 93 | }, 94 | 'client_addr' => '0.0.0.0', 95 | }) 96 | 97 | class { 'consul': 98 | config_hash => $config_hash 99 | } 100 | 101 | Class['consul'] -> Class['percona::cluster::config'] 102 | } 103 | 104 | if ( $vividcortex_api_key ) { 105 | class { 'misc::vividcortex': 106 | api_key => $vividcortex_api_key 107 | } 108 | 109 | Class['percona::cluster::service'] -> Class['misc::vividcortex'] 110 | } 111 | 112 | if $sysbench_skip_test_client != 'true' { 113 | include test::sysbench_test_script 114 | Class['percona::cluster::server'] -> Class['test::sysbench_test_script'] 115 | } 116 | 117 | -------------------------------------------------------------------------------- /Vagrantfile.pxc.rb: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # Assumes a box from https://github.com/grypyrg/packer-percona 5 | 6 | # This sets up 3 nodes with a common PXC, but you need to run bootstrap.sh to connect them. 7 | 8 | require File.dirname(__FILE__) + '/lib/vagrant-common.rb' 9 | 10 | pxc_version = "56" 11 | 12 | # Node group counts and aws security groups (if using aws provider) 13 | pxc_nodes = 3 14 | pxc_node_name_prefix = "node" 15 | 16 | # AWS configuration 17 | aws_region = "us-east-1" 18 | aws_ips = 'private' # Use 'public' for cross-region AWS. 'private' otherwise (or commented out) 19 | pxc_security_groups = [] 20 | 21 | cluster_address = 'gcomm://' + Array.new( pxc_nodes ){ |i| pxc_node_name_prefix + (i+1).to_s }.join(',') 22 | 23 | Vagrant.configure("2") do |config| 24 | config.vm.box = "grypyrg/centos-x86_64" 25 | config.ssh.username = "vagrant" 26 | 27 | # Create the PXC nodes 28 | (1..pxc_nodes).each do |i| 29 | name = pxc_node_name_prefix + i.to_s 30 | config.vm.define name do |node_config| 31 | node_config.vm.hostname = name 32 | node_config.vm.network :private_network, type: "dhcp" 33 | node_config.vm.provision :hostmanager 34 | 35 | # Provisioners 36 | provision_puppet( node_config, "pxc_server.pp" ) { |puppet| 37 | puppet.facter = { 38 | # PXC setup 39 | "percona_server_version" => pxc_version, 40 | 'innodb_buffer_pool_size' => '128M', 41 | 'innodb_log_file_size' => '64M', 42 | 'innodb_flush_log_at_trx_commit' => '0', 43 | 'pxc_bootstrap_node' => (i == 1 ? true : false ), 44 | 'wsrep_cluster_address' => cluster_address, 45 | 'wsrep_provider_options' => 'gcache.size=128M; gcs.fc_limit=128', 46 | 47 | # Sysbench setup on node 1 48 | 'sysbench_load' => (i == 1 ? true : false ), 49 | 'tables' => 1, 50 | 'rows' => 100000, 51 | 'threads' => 8 52 | } 53 | } 54 | 55 | # Providers 56 | provider_virtualbox( nil, node_config, 1024 ) { |vb, override| 57 | provision_puppet( override, "pxc_server.pp" ) { |puppet| 58 | puppet.facter = { 59 | 'default_interface' => 'eth1', 60 | 'datadir_dev' => 'dm-2', 61 | } 62 | } 63 | } 64 | 65 | provider_vmware( name, node_config, 1024 ) { |vb, override| 66 | provision_puppet( override, "pxc_server.pp" ) { |puppet| 67 | puppet.facter = { 68 | 'default_interface' => 'eth1', 69 | 'datadir_dev' => 'dm-2', 70 | } 71 | } 72 | } 73 | 74 | provider_aws( name, node_config, 'm3.medium', aws_region, pxc_security_groups, aws_ips) { |aws, override| 75 | aws.block_device_mapping = [ 76 | { 77 | 'DeviceName' => "/dev/sdl", 78 | 'VirtualName' => "mysql_data", 79 | 'Ebs.VolumeSize' => 20, 80 | 'Ebs.DeleteOnTermination' => true, 81 | } 82 | ] 83 | provision_puppet( override, "pxc_server.pp" ) { |puppet| 84 | puppet.facter = { 85 | 'datadir_dev' => 'xvdl' 86 | } 87 | } 88 | } 89 | 90 | # If you wish to use with OpenStack, you must previously have 91 | # an OpenStack installation up and running and the 92 | # vagrant-openstack plugin installed. Then, uncomment these lines. 93 | 94 | #provider_openstack( name, node_config, 'm1.xlarge', nil, 'cc7e31d8-a4aa-4544-8a74-86dfd06655d7' ) { |os, override| 95 | # os.disks = [ 96 | # { 97 | # "name" => "#{name}-data", 98 | # "size" => 100, 99 | # "description" => "MySQL Data" 100 | # } 101 | # ] 102 | # provision_puppet( override, "pxc_server.pp" ) { |puppet| 103 | # puppet.facter = {'datadir_dev' => 'vdb'} 104 | # } 105 | #} 106 | 107 | end 108 | end 109 | end 110 | -------------------------------------------------------------------------------- /Vagrantfile.tokudb.rb: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | require File.dirname(__FILE__) + '/lib/vagrant-common.rb' 5 | 6 | # Number of servers 7 | ps_servers = 1 8 | 9 | # AWS configuration 10 | aws_region = "us-east-1" 11 | aws_ips='private' # Use 'public' for cross-region AWS. 'private' otherwise (or commented out) 12 | security_groups = ["default"] 13 | 14 | 15 | serverlist="ps1,"; 16 | (2..ps_servers).each { |i| 17 | serverlist=serverlist + ',ps' + i.to_s; 18 | } 19 | 20 | 21 | Vagrant.configure("2") do |config| 22 | config.vm.box = "grypyrg/centos-x86_64" 23 | config.vm.box_version = "~> 7" 24 | config.ssh.username = "vagrant" 25 | 26 | # Create the PXC nodes 27 | (1..ps_servers).each do |i| 28 | name = "ps" + i.to_s 29 | config.vm.define name do |node_config| 30 | node_config.vm.hostname = name 31 | node_config.vm.network :private_network, type: "dhcp" 32 | node_config.vm.provision :hostmanager 33 | 34 | # Provisioners 35 | provision_puppet( node_config, "percona_server.pp" ) { |puppet| 36 | puppet.facter = { 37 | 'cluster_servers' => serverlist, 38 | 39 | # PXC setup 40 | "percona_server_version" => '56', 41 | 42 | # Sysbench setup 43 | 'sysbench_load' => (i == 1 ? true : false ), 44 | 'tables' => 20, 45 | 'rows' => 10000000, 46 | 'threads' => 8, 47 | 'engine' => 'tokudb', 48 | # 'tx_rate' => 10, 49 | 50 | # TokuDB setup 51 | 'tokudb_enable' => true, 52 | 'tokudb_cache_size' => '12G', 53 | 'tokudb_commit_sync' => 'OFF', 54 | 'tokudb_directio' => 'ON', 55 | 'tokudb_loader_memory_size' => '64M', 56 | 57 | # PCT setup 58 | 'percona_agent_api_key' => ENV['PERCONA_AGENT_API_KEY'] 59 | } 60 | } 61 | 62 | # Providers 63 | provider_virtualbox( nil, node_config, 1024 ) { |vb, override| 64 | provision_puppet( override, "percona_server.pp" ) {|puppet| 65 | puppet.facter = { 66 | 'default_interface' => 'eth1', 67 | 'datadir_dev' => 'dm-2', 68 | } 69 | } 70 | } 71 | 72 | provider_vmware( name, node_config, 1024 ) { |vb, override| 73 | provision_puppet( override, "percona_server.pp" ) {|puppet| 74 | puppet.facter = { 75 | 'default_interface' => 'eth1', 76 | 'datadir_dev' => 'dm-2', 77 | } 78 | } 79 | } 80 | 81 | provider_aws( "Percona Server #{name}", node_config, 'm3.medium', aws_region, security_groups, aws_ips) { |aws, override| 82 | 83 | aws.block_device_mapping = [ 84 | { 85 | 'DeviceName' => "/dev/sdl", 86 | 'VirtualName' => "mysql_data", 87 | 'Ebs.VolumeSize' => 20, 88 | 'Ebs.DeleteOnTermination' => true, 89 | } 90 | ] 91 | 92 | provision_puppet( override, "percona_server.pp" ) { |puppet| 93 | puppet.facter = {'datadir_dev' => 'xvdl'} 94 | } 95 | } 96 | 97 | provider_openstack( 'Packer Server #{name}', node_config, 'm1.xlarge', nil, ['50285812-3a34-40c5-9e69-0f67fab0ae5c'], '10.60.23.208') { |os, override| 98 | os.disks = [ 99 | { "name" => "#{name}-data", "size" => 100, "description" => "MySQL Data"} 100 | ] 101 | provision_puppet( override, "percona_server.pp" ) { |puppet| 102 | puppet.facter = {'datadir_dev' => 'vdb'} 103 | } 104 | } 105 | 106 | end 107 | end 108 | 109 | end 110 | -------------------------------------------------------------------------------- /modules/test/files/sysbench_custom_lua/custom-oltp.lua: -------------------------------------------------------------------------------- 1 | pathtest = string.match(test, "(.*/)") or "" 2 | 3 | dofile(pathtest .. "custom-common.lua") 4 | 5 | package.path = package.path..";/usr/share/lua/5.1/?.lua" 6 | package.cpath = package.cpath..";/usr/lib64/lua/5.1/?.so" 7 | 8 | function thread_init(thread_id) 9 | set_vars() 10 | 11 | if oltp_reconnect then 12 | db_disconnect() 13 | end 14 | 15 | if (db_driver == "mysql" and mysql_table_engine == "myisam") then 16 | begin_query = "LOCK TABLES sbtest WRITE" 17 | commit_query = "UNLOCK TABLES" 18 | else 19 | begin_query = "BEGIN" 20 | commit_query = "COMMIT" 21 | end 22 | 23 | end 24 | 25 | function event(thread_id) 26 | local rs 27 | local i 28 | local table_name 29 | local range_start 30 | local c_val 31 | local pad_val 32 | local query 33 | 34 | 35 | 36 | if oltp_reconnect then 37 | db_connect() 38 | end 39 | 40 | table_name = "sbtest".. sb_rand_uniform(1, oltp_tables_count) 41 | if not oltp_skip_trx then 42 | db_query(begin_query) 43 | end 44 | 45 | for i=1, oltp_point_selects do 46 | rs = db_query("SELECT c FROM ".. table_name .." WHERE id=" .. sb_rand(1, oltp_table_size)) 47 | end 48 | 49 | for i=1, oltp_simple_ranges do 50 | range_start = sb_rand(1, oltp_table_size) 51 | rs = db_query("SELECT c FROM ".. table_name .." WHERE id BETWEEN " .. range_start .. " AND " .. range_start .. "+" .. oltp_range_size - 1) 52 | end 53 | 54 | for i=1, oltp_sum_ranges do 55 | range_start = sb_rand(1, oltp_table_size) 56 | rs = db_query("SELECT SUM(K) FROM ".. table_name .." WHERE id BETWEEN " .. range_start .. " AND " .. range_start .. "+" .. oltp_range_size - 1) 57 | end 58 | 59 | for i=1, oltp_order_ranges do 60 | range_start = sb_rand(1, oltp_table_size) 61 | rs = db_query("SELECT c FROM ".. table_name .." WHERE id BETWEEN " .. range_start .. " AND " .. range_start .. "+" .. oltp_range_size - 1 .. " ORDER BY c") 62 | end 63 | 64 | for i=1, oltp_distinct_ranges do 65 | range_start = sb_rand(1, oltp_table_size) 66 | rs = db_query("SELECT DISTINCT c FROM ".. table_name .." WHERE id BETWEEN " .. range_start .. " AND " .. range_start .. "+" .. oltp_range_size - 1 .. " ORDER BY c") 67 | end 68 | 69 | if not oltp_read_only then 70 | 71 | for i=1, oltp_index_updates do 72 | rs = db_query("UPDATE " .. table_name .. " SET k=k+1 WHERE id=" .. sb_rand(1, oltp_table_size)) 73 | end 74 | 75 | for i=1, oltp_non_index_updates do 76 | c_val = sb_rand_str("###########-###########-###########-###########-###########-###########-###########-###########-###########-###########") 77 | query = "UPDATE " .. table_name .. " SET c='" .. c_val .. "' WHERE id=" .. sb_rand(1, oltp_table_size) 78 | rs = db_query(query) 79 | if rs then 80 | print(query) 81 | end 82 | end 83 | 84 | i = sb_rand(1, oltp_table_size) 85 | 86 | rs = db_query("DELETE FROM " .. table_name .. " WHERE id=" .. i) 87 | 88 | c_val = sb_rand_str([[ 89 | ###########-###########-###########-###########-###########-###########-###########-###########-###########-###########]]) 90 | pad_val = sb_rand_str([[ 91 | ###########-###########-###########-###########-###########]]) 92 | 93 | rs = db_query("INSERT INTO " .. table_name .. " (id, k, c, pad) VALUES " .. string.format("(%d, %d, '%s', '%s')",i, sb_rand(1, oltp_table_size) , c_val, pad_val)) 94 | 95 | end -- oltp_read_only 96 | 97 | if not oltp_skip_trx then 98 | db_query(commit_query) 99 | end 100 | 101 | if oltp_reconnect then 102 | db_disconnect() 103 | end 104 | 105 | end 106 | 107 | -------------------------------------------------------------------------------- /modules/percona/manifests/cluster/server.pp: -------------------------------------------------------------------------------- 1 | class percona::cluster::server { 2 | # Default PS version is 57 3 | if( $percona_server_version == undef or $percona_server_version == 57 ) { 4 | $percona_server_version = '-57' 5 | } elsif( $percona_server_version == 55 ) { 6 | $percona_server_version = '-55' 7 | } elsif( $percona_server_version == 56 ) { 8 | $percona_server_version = '-56' 9 | } 10 | 11 | # ugly way of making sure the version we want to use doesn't conflict with the old one 12 | # (oh boy this whole thing might need refactoring) 13 | if ( $percona_server_version == '' or $percona_server_version == '-57' ) { 14 | $other_percona_server_version="-55" 15 | $other_percona_server_version2="-56" 16 | } elsif $percona_server_version == "-55" { 17 | $other_percona_server_version="-56" 18 | $other_percona_server_version2="-57" 19 | } elsif $percona_server_version == "-56" { 20 | $other_percona_server_version="-55" 21 | $other_percona_server_version2="-57" 22 | } 23 | 24 | # You can set the $galera_version to 2 or 3 for either 55 or 56, but if it is not set it defaults like this: 25 | if( $galera_version == undef ) { 26 | if( $percona_server_version == "-55" ) { 27 | $galera_version = '2' 28 | } elsif( $percona_server_version == "-56" or $percona_server_version == "-57" ) { 29 | $galera_version = '3' 30 | } 31 | } 32 | 33 | if( $galera_version == '2' ) { 34 | $other_galera_version = '3' 35 | } elsif( $galera_version == '3' ) { 36 | $other_galera_version = '2' 37 | } 38 | 39 | if ( $percona_server_version == "-57" ) { 40 | # temp fix for https://bugs.launchpad.net/percona-xtradb-cluster/+bug/1615089 41 | exec {"update_ld_so_conf_shared_path": 42 | command => "/usr/bin/echo '/usr/lib64/mysql' > /etc/ld.so.conf.d/percona-xtradb-cluster-shared-5.7.12-x86_64.conf ; /sbin/ldconfig", 43 | onlyif => "/usr/bin/md5sum /etc/ld.so.conf.d/percona-xtradb-cluster-shared-5.7.12-x86_64.conf | /usr/bin/grep b74fdfa1c279dd2f8ec5febc0588a538", 44 | require => Package["MySQL-server"]; 45 | } 46 | } 47 | 48 | case $operatingsystem { 49 | centos: { 50 | package { 51 | "Percona-XtraDB-Cluster-server$other_percona_server_version.$hardwaremodel": 52 | ensure => "absent"; 53 | "Percona-XtraDB-Cluster-server$other_percona_server_version2.$hardwaremodel": 54 | ensure => "absent"; 55 | "Percona-XtraDB-Cluster-galera-$other_galera_version": 56 | ensure => "absent"; 57 | } 58 | # 57 does not requrie galera package anymore. it's builtin. 59 | if ( $percona_server_version != "-57" ) { 60 | package { 61 | "Percona-XtraDB-Cluster-server$percona_server_version.$hardwaremodel": 62 | require => [ Package['galera'], Package["MySQL-shared"] ], 63 | alias => "MySQL-server", 64 | ensure => "latest", 65 | notify => Service['mysql']; 66 | "Percona-XtraDB-Cluster-galera-$galera_version": 67 | alias => "galera", 68 | ensure => "latest", 69 | notify => Service['mysql']; 70 | } 71 | } else { 72 | package { 73 | "Percona-XtraDB-Cluster-galera-$galera_version": 74 | alias => "galera", 75 | before => Package["MySQL-server"], 76 | ensure => "absent"; 77 | "Percona-XtraDB-Cluster-server$percona_server_version.$hardwaremodel": 78 | require => [ Package["MySQL-shared"] ], 79 | alias => "MySQL-server", 80 | ensure => "latest", 81 | notify => Service['mysql']; 82 | } 83 | } 84 | } 85 | ubuntu: { 86 | package { 87 | "percona-xtradb-cluster-server-5.5": 88 | alias => "MySQL-server"; 89 | "percona-xtradb-cluster-client-5.5": 90 | alias => "MySQL-client"; 91 | } 92 | } 93 | } 94 | 95 | 96 | if $enable_consul == 'true' { 97 | consul::service {'pxc': checks => [{ 98 | port => 3306, 99 | script => '/usr/bin/clustercheck || (exit 2)', 100 | interval => '5s' 101 | }]; 102 | } 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /Vagrantfile.ps_sysbench.rb: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | require File.dirname(__FILE__) + '/lib/vagrant-common.rb' 5 | 6 | # Number of servers 7 | ps_servers = 1 8 | 9 | # AWS configuration 10 | aws_region = "us-east-1" 11 | aws_ips='private' # Use 'public' for cross-region AWS. 'private' otherwise (or commented out) 12 | security_groups = [] 13 | 14 | 15 | serverlist="ps1,"; 16 | (2..ps_servers).each { |i| 17 | serverlist=serverlist + ',ps' + i.to_s; 18 | } 19 | 20 | 21 | Vagrant.configure("2") do |config| 22 | config.vm.box = "grypyrg/centos-x86_64" 23 | config.vm.box_version = "~> 7" 24 | config.ssh.username = "vagrant" 25 | 26 | # Create the PXC nodes 27 | (1..ps_servers).each do |i| 28 | name = "ps" + i.to_s 29 | config.vm.define name do |node_config| 30 | node_config.vm.hostname = name 31 | node_config.vm.network :private_network, type: "dhcp" 32 | node_config.vm.provision :hostmanager 33 | 34 | # Provisioners 35 | provision_puppet( node_config, "percona_server.pp" ) { |puppet| 36 | puppet.facter = { 37 | 'cluster_servers' => serverlist, 38 | # PXC setup 39 | "percona_server_version" => '56', 40 | 'innodb_buffer_pool_size' => '128M', 41 | 'innodb_log_file_size' => '64M', 42 | 'innodb_flush_log_at_trx_commit' => '0', 43 | 44 | # Sysbench setup 45 | 'sysbench_load' => (i == 1 ? true : false ), 46 | 'tables' => 1, 47 | 'rows' => 1000000, 48 | 'threads' => 1, 49 | # 'tx_rate' => 10, 50 | 51 | # TokuDB setup 52 | 'tokudb_enable' => true, 53 | 'tokudb_directio' => 'ON', 54 | 'tokudb_loader_memory_size' => '64M', 55 | 'tokudb_fsync_log_period' => '0', 56 | 'tokudb_cache_size' => '128M', 57 | 58 | # Vividcortexv setup 59 | 'vividcortex_api_key' => ENV['VIVIDCORTEX_API_KEY'], 60 | 61 | } 62 | } 63 | 64 | # Providers 65 | provider_virtualbox( nil, node_config, 1024 ) { |vb, override| 66 | provision_puppet( override, "percona_server.pp" ) {|puppet| 67 | puppet.facter = { 68 | 'default_interface' => 'eth1', 69 | 'datadir_dev' => 'dm-2', 70 | } 71 | } 72 | } 73 | 74 | provider_vmware( name, node_config, 1024 ) { |vb, override| 75 | provision_puppet( override, "percona_server.pp" ) {|puppet| 76 | puppet.facter = { 77 | 'default_interface' => 'eth1', 78 | 'datadir_dev' => 'dm-2', 79 | } 80 | } 81 | } 82 | 83 | provider_aws( "Percona Server #{name}", node_config, 'm3.medium', aws_region, security_groups, aws_ips) { |aws, override| 84 | 85 | aws.block_device_mapping = [ 86 | { 87 | 'DeviceName' => "/dev/sdl", 88 | 'VirtualName' => "mysql_data", 89 | 'Ebs.VolumeSize' => 20, 90 | 'Ebs.DeleteOnTermination' => true, 91 | } 92 | ] 93 | 94 | provision_puppet( override, "percona_server.pp" ) { |puppet| 95 | puppet.facter = {'datadir_dev' => 'xvdl'} 96 | } 97 | } 98 | 99 | provider_openstack( 'Packer Server #{name}', node_config, 'm1.small', nil, ['cc7e31d8-a4aa-4544-8a74-86dfd06655d7'] ) { |os, override| 100 | os.disks = [ 101 | { "name" => "#{name}-data", "size" => 10, "description" => "MySQL Data"} 102 | ] 103 | provision_puppet( override, "percona_server.pp" ) { |puppet| 104 | puppet.facter = {'datadir_dev' => 'vdb'} 105 | } 106 | } 107 | 108 | provider_openstack( "Percona Server #{name}", node_config, 'm1.small', nil, 'cc7e31d8-a4aa-4544-8a74-86dfd06655d7' ) { |os, override| 109 | os.disks = [ 110 | { "name" => "#{name}-data", "size" => 100, "description" => "MySQL Data"} 111 | ] 112 | provision_puppet( override, "percona_server.pp" ) { |puppet| 113 | puppet.facter = {'datadir_dev' => 'vdb'} 114 | } 115 | } 116 | 117 | end 118 | end 119 | 120 | end 121 | -------------------------------------------------------------------------------- /manifests/percona_server.pp: -------------------------------------------------------------------------------- 1 | include stdlib 2 | 3 | include base::hostname 4 | include base::packages 5 | include base::insecure 6 | 7 | class {'base::swappiness': 8 | swappiness => $swappiness 9 | } 10 | 11 | include percona::repository 12 | include percona::toolkit 13 | include percona::sysbench 14 | 15 | include percona::server 16 | include percona::config 17 | include percona::service 18 | include percona::server-password 19 | 20 | include misc::myq_gadgets 21 | include misc::myq_tools 22 | 23 | include test::user 24 | 25 | if $datadir_dev { 26 | class { 'mysql::datadir': 27 | datadir_dev => $datadir_dev, 28 | datadir_dev_scheduler => $datadir_dev_scheduler, 29 | datadir_fs => $datadir_fs, 30 | datadir_fs_opts => $datadir_fs_opts, 31 | datadir_mkfs_opts => $datadir_mkfs_opts 32 | } 33 | 34 | Class['mysql::datadir'] -> Class['percona::server'] 35 | } 36 | 37 | Class['percona::repository'] -> Class['percona::server'] -> Class['percona::config'] -> Class['percona::service'] -> Class['percona::server-password'] -> Class['test::user'] 38 | 39 | Class['base::packages'] -> Class['misc::myq_gadgets'] 40 | Class['base::packages'] -> Class['misc::myq_tools'] 41 | 42 | Class['base::packages'] -> Class['percona::repository'] 43 | Class['base::insecure'] -> Class['percona::repository'] 44 | 45 | Class['percona::repository'] -> Class['percona::toolkit'] 46 | Class['percona::repository'] -> Class['percona::sysbench'] 47 | 48 | Class['percona::server'] -> Class['percona::sysbench'] 49 | Class['percona::server'] -> Class['percona::toolkit'] 50 | 51 | Class['percona::service'] -> Class['test::user'] 52 | 53 | if $sysbench_load == 'true' { 54 | class { 'test::sysbench_load': 55 | schema => $schema, 56 | tables => $tables, 57 | rows => $rows, 58 | threads => $threads, 59 | engine => $engine 60 | } 61 | 62 | Class['percona::server'] -> Class['percona::sysbench'] 63 | Class['percona::sysbench'] -> Class['test::sysbench_load'] 64 | Class['test::user'] -> Class['test::sysbench_load'] 65 | } 66 | 67 | if $tokudb_enable == 'true' { 68 | include percona::tokudb_install 69 | include percona::tokudb_enable 70 | include percona::tokudb_config 71 | 72 | Class['percona::server'] -> Class['percona::tokudb_install'] 73 | Class['percona::tokudb_install'] -> Class['percona::tokudb_enable'] -> Class['percona::tokudb_config'] 74 | Class['percona::service'] -> Class['percona::tokudb_enable'] 75 | 76 | if $sysbench_load == 'true' { 77 | Class['percona::tokudb_enable'] -> Class['test::sysbench_load'] 78 | } 79 | } 80 | 81 | if $enable_consul == 'true' { 82 | info( 'enabling consul agent' ) 83 | 84 | $config_hash = delete_undef_values( { 85 | 'datacenter' => $datacenter, 86 | 'data_dir' => '/opt/consul', 87 | 'log_level' => 'INFO', 88 | 'node_name' => $node_name, 89 | 'bind_addr' => $default_interface ? { 90 | undef => undef, 91 | default => getvar("ipaddress_${default_interface}") 92 | }, 93 | 'client_addr' => '0.0.0.0', 94 | }) 95 | 96 | class { 'consul': 97 | config_hash => $config_hash 98 | } 99 | 100 | Class['percona::server'] ~> Class['consul'] 101 | Class['consul'] -> Class['percona::service'] 102 | } 103 | 104 | include training::helper_scripts 105 | 106 | if ( $percona_agent_api_key ) { 107 | include percona::agent 108 | Class['percona::service'] -> Class['percona::agent'] 109 | } 110 | 111 | if $sysbench_skip_test_client != 'true' { 112 | include test::sysbench_test_script 113 | Class['percona::service'] -> Class['test::sysbench_test_script'] 114 | } 115 | 116 | if $mha_node == 'true' or $mha_manager == 'true' { 117 | include mha::node 118 | Class['percona::server'] -> Class['mha::node'] 119 | 120 | if $mha_manager == 'true' { 121 | include mha::manager 122 | Class['mha::node'] -> Class['mha::manager'] 123 | } 124 | } 125 | 126 | if $softraid == 'true' { 127 | class { 'misc::softraid': 128 | softraid_dev => $softraid_dev, 129 | softraid_level => $softraid_level, 130 | softraid_devices => $softraid_devices, 131 | softraid_dev_str => $softraid_dev_str 132 | } 133 | 134 | Class['misc::softraid'] -> Class['mysql::datadir'] 135 | } 136 | 137 | if ( $vividcortex_api_key ) { 138 | class { 'misc::vividcortex': 139 | api_key => $vividcortex_api_key 140 | } 141 | 142 | Class['percona::service'] -> Class['misc::vividcortex'] 143 | } 144 | -------------------------------------------------------------------------------- /modules/training/files/imdb_workload/constant_workload.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import random 4 | import time 5 | import string 6 | 7 | from threading import Thread 8 | 9 | from mysql.utilities.common import (database, options, server, table) 10 | 11 | from subprocess import call 12 | from contextlib import contextmanager 13 | 14 | 15 | server_host = '127.0.0.1' 16 | server_port = '3306' 17 | server_user = 'plmce' 18 | server_password = 'BelgianBeers' 19 | 20 | server_connection = "%s:%s@%s:%s" % (server_user, server_password, 21 | server_host, server_port) 22 | 23 | queries = ( 24 | u'SELECT * FROM imdb.title WHERE `id` = %(id)s;', 25 | u'SELECT * FROM imdb.name WHERE id = %(id)s;', 26 | u'SELECT * FROM imdb.char_name WHERE id = %(id)s;', 27 | u'SELECT * FROM imdb.comments ORDER BY id DESC limit 10;', 28 | u'SELECT * FROM imdb.favorites WHERE user_id = %(id)s AND type="actor";', 29 | u'SELECT * FROM imdb.favorites WHERE user_id = %(id)s AND type="movie";', 30 | u'SELECT * FROM imdb.movie_info WHERE movie_id = %(id)s;', 31 | u'SELECT * FROM imdb.person_info WHERE person_id = %(id)s;', 32 | u'SELECT user2 FROM imdb.user_friends WHERE user1 = %(id)s;', 33 | ) 34 | 35 | 36 | def memoize(func): 37 | cache = dict() 38 | 39 | def wrapper(*args, **kwargs): 40 | key = (func, args, frozenset(kwargs.items())) 41 | if key in cache: 42 | return cache.get(key) 43 | value = func(*args, **kwargs) 44 | cache[key] = value 45 | return value 46 | return wrapper 47 | 48 | 49 | class Movie(object): 50 | 51 | def __init__(self, 52 | server, 53 | database=u"imdb"): 54 | self.server = server 55 | self.database = database 56 | 57 | @property 58 | @memoize 59 | def movie_db(self): 60 | """ Connect and return the connection to MySQL """ 61 | return self.connect_db(self.database) 62 | 63 | @memoize 64 | def connect_db(self, db_name): 65 | """ Method to connect to MySQL """ 66 | db_options = {u'skip_grants': True} 67 | return database.Database(self.server, db_name, db_options) 68 | 69 | def rnd_queries(self): 70 | query = queries[random.randint(0,len(queries)-1)] 71 | id=random.randint(0,5000) 72 | print query % {u'id': id} 73 | t=Thread(target=self.server.exec_query, args=(query % {u'id': id},)) 74 | t.start() 75 | 76 | def rnd_user(self): 77 | query = u"INSERT INTO imdb.users (email_address, first_name, last_name) VALUES ('%(email)s','%(first_name)s','%(last_name)s');" 78 | f_name = self.genstring(3,9) 79 | l_name = self.genstring(4,12) 80 | email = "%(f_name)s.%(l_name)s@jaimail.com" % {u'f_name': f_name, u'l_name': l_name} 81 | print query % {u'email': email, u'first_name': f_name, u'last_name': l_name} 82 | t=Thread(target=self.server.exec_query, args=(query % {u'email': email, u'first_name': f_name, u'last_name': l_name},)) 83 | t.start() 84 | 85 | def genstring(self,lim_down=3,lim_up=9): 86 | alpha = random.randint(lim_down,lim_up) 87 | vowels = ['a','e','i','o','u'] 88 | consonants = [a for a in string.ascii_lowercase if a not in vowels] 89 | 90 | ####utility functions 91 | def a_part(slen): 92 | ret = '' 93 | for i in range(slen): 94 | if i%2 ==0: 95 | randid = random.randint(0,20) #number of consonants 96 | ret += consonants[randid] 97 | else: 98 | randid = random.randint(0,4) #number of vowels 99 | ret += vowels[randid] 100 | return ret 101 | 102 | def n_part(slen): 103 | ret = '' 104 | for i in range(slen): 105 | randid = random.randint(0,9) #number of digits 106 | ret += digits[randid] 107 | return ret 108 | 109 | #### 110 | fpl = alpha/2 111 | if alpha % 2 : 112 | fpl = int(alpha/2) + 1 113 | lpl = alpha - fpl 114 | 115 | start = a_part(fpl) 116 | end = a_part(lpl) 117 | 118 | return "%s%s" % (start.capitalize(),end) 119 | 120 | def main(): 121 | while True: 122 | try: 123 | movie = Movie( server.get_server(u'localhost', server_connection, False)) 124 | movie.rnd_queries() 125 | time.sleep(0.5) 126 | movie.rnd_user() 127 | time.sleep(1) 128 | except: 129 | time.sleep(10) 130 | 131 | main() 132 | -------------------------------------------------------------------------------- /Vagrantfile.consul.rb: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # Assumes a box from https://github.com/grypyrg/packer-percona 5 | 6 | # This sets up 3 consul server nodes and 1 consul client node 7 | 8 | require File.dirname(__FILE__) + '/lib/vagrant-common.rb' 9 | 10 | # Node names and ips (for local VMs) 11 | # (Amazon) aws_region is where to bring up the node 12 | # (Amazon) Security groups are 'default' (22 open) and 'pxc' (3306, 4567-4568, 4444 open) for each respective region 13 | # Don't worry about amazon config if you are not using that provider. 14 | consul = { 15 | 'consul1' => { 16 | 'local_vm_ip' => '192.168.70.2', 17 | 'aws_region' => 'us-east-1', 18 | 'server_id' => 1, 19 | 'security_groups' => ['sg-c88d28ae', 'sg-126b9b6e'] 20 | }, 21 | 'consul2' => { 22 | 'local_vm_ip' => '192.168.70.3', 23 | 'aws_region' => 'us-east-1', 24 | 'server_id' => 2, 25 | 'security_groups' => ['sg-c88d28ae', 'sg-126b9b6e'] 26 | }, 27 | 'consul3' => { 28 | 'local_vm_ip' => '192.168.70.4', 29 | 'aws_region' => 'us-east-1', 30 | 'server_id' => 3, 31 | 'security_groups' => ['sg-c88d28ae', 'sg-126b9b6e'] 32 | } 33 | } 34 | client = { 35 | 'client1' => { 36 | 'local_vm_ip' => '192.168.70.10', 37 | 'aws_region' => 'us-east-1', 38 | 'security_groups' => ['sg-c88d28ae', 'sg-be6c9cc2'], 39 | 'type' => 'client' 40 | } 41 | } 42 | 43 | # Use 'public' for cross-region AWS. 'private' otherwise (or commented out) 44 | aws_ips = 'private' 45 | if_adapter='vboxnet1' 46 | 47 | Vagrant.configure("2") do |config| 48 | config.vm.box = "grypyrg/centos-x86_64" 49 | config.vm.box_version = "~> 7" 50 | config.ssh.username = "vagrant" 51 | 52 | config.hostmanager.enabled = false # Disable this for AWS 53 | config.hostmanager.include_offline = true 54 | 55 | # Create all three nodes identically except for name and ip 56 | consul.each_pair { |name, node_params| 57 | config.vm.define name do |node_config| 58 | node_config.vm.hostname = name 59 | node_config.vm.network :private_network, ip: node_params['local_vm_ip'], adaptor: if_adapter 60 | node_config.vm.provision :hostmanager 61 | 62 | # Forward Consul UI port 63 | node_config.vm.network "forwarded_port", guest: 8500, host: 8500 + node_params['server_id'], protocol: 'tcp' 64 | 65 | # Provisioners 66 | provision_puppet( node_config, "base.pp" ) 67 | provision_puppet( node_config, "consul_server.pp" ) { |puppet| 68 | puppet.facter = { 69 | 'datacenter' => 'dc1', 70 | 'bind_addr' => '0.0.0.0', 71 | 'node_name' => name, 72 | 'retry_join' => consul.keys.join(','), 73 | 'bootstrap_expect' => consul.length 74 | } 75 | } 76 | 77 | provider_virtualbox( nil, node_config, 256 ) { |vb, override| 78 | vb.linked_clone = true 79 | 80 | # Override the bind_addr on vbox to use the backend network 81 | provision_puppet( override, "consul_server.pp" ) { |puppet| 82 | puppet.facter = { 83 | 'bind_addr' => node_params['local_vm_ip'], 84 | 'default_interface' => 'eth1' 85 | } 86 | } 87 | } 88 | 89 | provider_aws( "consul #{name}", node_config, 'm3.medium', node_params['aws_region'], node_params['security_groups'], aws_ips) 90 | 91 | end 92 | } 93 | 94 | # Create clients 95 | client.each_pair { |name, node_params| 96 | config.vm.define name do |node_config| 97 | node_config.vm.hostname = name 98 | node_config.vm.network :private_network, ip: node_params['local_vm_ip'], adaptor: if_adapter 99 | node_config.vm.provision :hostmanager 100 | 101 | # Provisioners 102 | provision_puppet( node_config, "base.pp" ) 103 | provision_puppet( node_config, "consul_client.pp" ) { |puppet| 104 | puppet.facter = { 105 | 'datacenter' => 'dc1', 106 | 'bind_addr' => '0.0.0.0', 107 | 'node_name' => name, 108 | 'retry_join' => consul.keys.join(',') 109 | } 110 | } 111 | 112 | provider_virtualbox( nil, node_config, 256 ) { |vb, override| 113 | vb.linked_clone = true 114 | 115 | # Override the bind_addr on vbox to use the backend network 116 | provision_puppet( override, "consul_client.pp" ) { |puppet| 117 | puppet.facter = { 118 | 'bind_addr' => node_params['local_vm_ip'], 119 | 'default_interface' => 'eth1' 120 | } 121 | } 122 | } 123 | 124 | provider_aws( "consul #{name}", node_config, 'm3.medium', node_params['aws_region'], node_params['security_groups'], aws_ips) 125 | 126 | end 127 | } 128 | end 129 | -------------------------------------------------------------------------------- /modules/test/files/sysbench_custom_lua/custom-common.lua: -------------------------------------------------------------------------------- 1 | -- Input parameters 2 | -- oltp-tables-count - number of tables to create 3 | -- oltp-secondary - use secondary key instead PRIMARY key for id column 4 | -- 5 | -- 6 | 7 | function create_insert(table_id) 8 | 9 | local index_name 10 | local i 11 | local j 12 | local query 13 | 14 | if (oltp_secondary) then 15 | index_name = "KEY xid" 16 | else 17 | index_name = "PRIMARY KEY" 18 | end 19 | 20 | i = table_id 21 | 22 | print("Creating table 'sbtest" .. i .. "'...") 23 | if (db_driver == "mysql") then 24 | query = [[ 25 | CREATE TABLE sbtest]] .. i .. [[ ( 26 | id INTEGER UNSIGNED NOT NULL ]] .. 27 | ((oltp_auto_inc and "AUTO_INCREMENT") or "") .. [[, 28 | k INTEGER UNSIGNED DEFAULT '0' NOT NULL, 29 | c CHAR(120) DEFAULT '' NOT NULL, 30 | pad CHAR(60) DEFAULT '' NOT NULL, 31 | ]] .. index_name .. [[ (id) 32 | ) /*! ENGINE = ]] .. mysql_table_engine .. 33 | " MAX_ROWS = " .. myisam_max_rows .. " */" 34 | 35 | elseif (db_driver == "pgsql") then 36 | query = [[ 37 | CREATE TABLE sbtest]] .. i .. [[ ( 38 | id SERIAL NOT NULL, 39 | k INTEGER DEFAULT '0' NOT NULL, 40 | c CHAR(120) DEFAULT '' NOT NULL, 41 | pad CHAR(60) DEFAULT '' NOT NULL, 42 | ]] .. index_name .. [[ (id) 43 | ) ]] 44 | 45 | elseif (db_driver == "drizzle") then 46 | query = [[ 47 | CREATE TABLE sbtest ( 48 | id INTEGER NOT NULL ]] .. ((oltp_auto_inc and "AUTO_INCREMENT") or "") .. [[, 49 | k INTEGER DEFAULT '0' NOT NULL, 50 | c CHAR(120) DEFAULT '' NOT NULL, 51 | pad CHAR(60) DEFAULT '' NOT NULL, 52 | ]] .. index_name .. [[ (id) 53 | ) ]] 54 | else 55 | print("Unknown database driver: " .. db_driver) 56 | return 1 57 | end 58 | 59 | db_query(query) 60 | 61 | db_query("CREATE INDEX k_" .. i .. " on sbtest" .. i .. "(k)") 62 | 63 | print("Inserting " .. oltp_table_size .. " records into 'sbtest" .. i .. "'") 64 | 65 | if (oltp_auto_inc) then 66 | db_bulk_insert_init("INSERT INTO sbtest" .. i .. "(k, c, pad) VALUES") 67 | else 68 | db_bulk_insert_init("INSERT INTO sbtest" .. i .. "(id, k, c, pad) VALUES") 69 | end 70 | 71 | local c_val 72 | local pad_val 73 | 74 | 75 | for j = 1,oltp_table_size do 76 | 77 | c_val = sb_rand_str([[ 78 | ###########-###########-###########-###########-###########-###########-###########-###########-###########-###########]]) 79 | pad_val = sb_rand_str([[ 80 | ###########-###########-###########-###########-###########]]) 81 | 82 | if (oltp_auto_inc) then 83 | db_bulk_insert_next("(" .. sb_rand(1, oltp_table_size) .. ", '".. c_val .."', '" .. pad_val .. "')") 84 | else 85 | db_bulk_insert_next("("..j.."," .. sb_rand(1, oltp_table_size) .. ",'".. c_val .."', '" .. pad_val .. "' )") 86 | end 87 | end 88 | 89 | db_bulk_insert_done() 90 | 91 | 92 | end 93 | 94 | 95 | function prepare() 96 | local query 97 | local i 98 | local j 99 | 100 | set_vars() 101 | 102 | db_connect() 103 | 104 | 105 | for i = 1,oltp_tables_count do 106 | create_insert(i) 107 | end 108 | 109 | return 0 110 | end 111 | 112 | function cleanup() 113 | local i 114 | 115 | set_vars() 116 | 117 | for i = 1,oltp_tables_count do 118 | print("Dropping table 'sbtest" .. i .. "'...") 119 | db_query("DROP TABLE sbtest".. i ) 120 | end 121 | end 122 | 123 | function set_vars() 124 | oltp_table_size = oltp_table_size or 10000 125 | oltp_range_size = oltp_range_size or 100 126 | oltp_tables_count = oltp_tables_count or 1 127 | oltp_point_selects = oltp_point_selects or 10 128 | oltp_simple_ranges = oltp_simple_ranges or 1 129 | oltp_sum_ranges = oltp_sum_ranges or 1 130 | oltp_order_ranges = oltp_order_ranges or 1 131 | oltp_distinct_ranges = oltp_distinct_ranges or 1 132 | oltp_index_updates = oltp_index_updates or 1 133 | oltp_non_index_updates = oltp_non_index_updates or 1 134 | oltp_reconnect = oltp_reconnect or false 135 | oltp_rand_delay_min = oltp_rand_delay_min or 0 136 | oltp_rand_delay_max = oltp_rand_delay_max or 0 137 | 138 | if (oltp_rand_delay_max ~= 0 or oltp_rand_delay_min ~=0 ) then 139 | oltp_rand_delay = true 140 | else 141 | oltp_rand_delay = false 142 | end 143 | 144 | if (oltp_auto_inc == 'off') then 145 | oltp_auto_inc = false 146 | else 147 | oltp_auto_inc = true 148 | end 149 | 150 | if (oltp_read_only == 'on') then 151 | oltp_read_only = true 152 | else 153 | oltp_read_only = false 154 | end 155 | 156 | if (oltp_skip_trx == 'on') then 157 | oltp_skip_trx = true 158 | else 159 | oltp_skip_trx = false 160 | end 161 | 162 | end 163 | -------------------------------------------------------------------------------- /modules/test/files/run_sysbench.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | engine=innodb 4 | schema=sbtest 5 | mysql_port=3306 6 | mysql_host=127.0.0.1 7 | tables=1 8 | rows=100000 9 | threads=1 10 | tx_rate=0 11 | max_requests=0 12 | max_time=0 13 | sysbench_args="" 14 | 15 | while getopts ":e:s:h:p:t:r:c:b:x:a:d:o:" opt; do 16 | case $opt in 17 | e) 18 | echo "Engine: $OPTARG" >&2 19 | engine=$OPTARG 20 | ;; 21 | s) 22 | echo "Schema: $OPTARG" >&2 23 | schema=$OPTARG 24 | ;; 25 | h) 26 | echo "MySQL Host: $OPTARG" >&2 27 | mysql_host=$OPTARG 28 | ;; 29 | p) 30 | echo "MySQL Port: $OPTARG" >&2 31 | mysql_port=$OPTARG 32 | ;; 33 | t) 34 | echo "Tables: $OPTARG" >&2 35 | tables=$OPTARG 36 | ;; 37 | r) 38 | echo "Rows: $OPTARG" >&2 39 | rows=$OPTARG 40 | ;; 41 | c) 42 | echo "Threads: $OPTARG" >&2 43 | threads=$OPTARG 44 | ;; 45 | b) 46 | echo "TX Rate: $OPTARG" >&2 47 | tx_rate=$OPTARG 48 | ;; 49 | x) 50 | echo "Task: $OPTARG" >&2 51 | task=$OPTARG 52 | ;; 53 | \?) 54 | echo "Invalid option: -$OPTARG" >&2 55 | exit 1 56 | ;; 57 | a) 58 | echo "Amount of Requests: $OPTARG" >&2 59 | max_requests=$OPTARG 60 | ;; 61 | d) 62 | echo "Duration: $OPTARG" >&2 63 | max_time=$OPTARG 64 | ;; 65 | o) 66 | echo "Sybench args: $OPTARG" >&2 67 | sysbench_args=$OPTARG 68 | ;; 69 | :) 70 | echo "Option -$OPTARG requires an argument." >&2 71 | exit 1 72 | ;; 73 | esac 74 | done 75 | 76 | 77 | 78 | function prepare { 79 | 80 | sysbench \ 81 | --db-driver=mysql \ 82 | --test=/usr/share/doc/sysbench/tests/db/oltp.lua \ 83 | --mysql-table-engine=$engine \ 84 | --mysql-user=test \ 85 | --mysql-password=test \ 86 | --mysql-db=$schema \ 87 | --mysql-host=$mysql_host \ 88 | --mysql-port=$mysql_port \ 89 | --oltp-tables-count=$tables \ 90 | $sysbench_args \ 91 | cleanup 92 | 93 | sysbench \ 94 | --test=/usr/share/doc/sysbench/tests/db/parallel_prepare.lua \ 95 | --db-driver=mysql \ 96 | --mysql-user=test \ 97 | --mysql-password=test \ 98 | --mysql-db=$schema \ 99 | --mysql-host=$mysql_host \ 100 | --mysql-port=$mysql_port \ 101 | --oltp-tables-count=$tables \ 102 | --oltp-table-size=$rows \ 103 | --oltp-auto-inc=off \ 104 | --num-threads=$threads \ 105 | $sysbench_args \ 106 | run 107 | 108 | } 109 | 110 | 111 | 112 | function oltp { 113 | 114 | sysbench \ 115 | --db-driver=mysql \ 116 | --test=/usr/share/doc/sysbench/tests/db/oltp.lua \ 117 | --mysql-table-engine=$engine \ 118 | --mysql-user=test \ 119 | --mysql-password=test \ 120 | --mysql-db=$schema \ 121 | --mysql-host=$mysql_host \ 122 | --mysql-port=$mysql_port \ 123 | --oltp-tables-count=$tables \ 124 | --report-interval=1 \ 125 | --num-threads=$threads \ 126 | --max-requests=$max_requests \ 127 | --max-time=$max_time \ 128 | --tx-rate=$tx_rate \ 129 | $sysbench_args \ 130 | run | grep -v "queue length" 131 | } 132 | 133 | function update_index { 134 | 135 | sysbench \ 136 | --db-driver=mysql \ 137 | --test=/usr/share/doc/sysbench/tests/db/update_index.lua \ 138 | --mysql-table-engine=$engine \ 139 | --mysql-user=test \ 140 | --mysql-password=test \ 141 | --mysql-db=$schema \ 142 | --mysql-host=$mysql_host \ 143 | --mysql-port=$mysql_port \ 144 | --oltp-tables-count=$tables \ 145 | --report-interval=1 \ 146 | --num-threads=$threads \ 147 | --max-requests=$max_requests \ 148 | --max-time=$max_time \ 149 | --tx-rate=$tx_rate \ 150 | $sysbench_args \ 151 | run | grep -v "queue length" 152 | } 153 | 154 | 155 | 156 | function oltp_custom { 157 | 158 | sysbench \ 159 | --db-driver=mysql \ 160 | --test=/root/sysbench_custom_lua/custom-oltp.lua \ 161 | --mysql-table-engine=$engine \ 162 | --mysql-user=test \ 163 | --mysql-password=test \ 164 | --mysql-db=$schema \ 165 | --mysql-host=$mysql_host \ 166 | --mysql-port=$mysql_port \ 167 | --oltp-tables-count=$tables \ 168 | --report-interval=1 \ 169 | --num-threads=$threads \ 170 | --max-requests=$max_requests \ 171 | --max-time=$max_time \ 172 | --tx-rate=$tx_rate \ 173 | $sysbench_args \ 174 | run | grep -v "queue length" 175 | } 176 | 177 | case $task in 178 | prepare) 179 | prepare 180 | ;; 181 | oltp) 182 | oltp 183 | ;; 184 | update_index) 185 | update_index 186 | ;; 187 | oltp_custom) 188 | oltp_custom 189 | ;; 190 | *) 191 | echo "ERROR: no or unknown task (-x) given: $task" 192 | exit 1 193 | ;; 194 | esac 195 | 196 | -------------------------------------------------------------------------------- /modules/mha/files/master_ip_failover: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env perl 2 | 3 | # Copyright (C) 2011 DeNA Co.,Ltd. 4 | # 5 | # This program is free software; you can redistribute it and/or modify 6 | # it under the terms of the GNU General Public License as published by 7 | # the Free Software Foundation; either version 2 of the License, or 8 | # (at your option) any later version. 9 | # 10 | # This program is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU General Public License 16 | # along with this program; if not, write to the Free Software 17 | # Foundation, Inc., 18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 19 | 20 | ## Note: This is a sample script and is not complete. Modify the script based on your environment. 21 | 22 | use strict; 23 | use warnings FATAL => 'all'; 24 | 25 | use Getopt::Long; 26 | use MHA::DBHelper; 27 | use MHA::ManagerUtil; 28 | 29 | 30 | my ( 31 | $command, $ssh_user, $orig_master_host, 32 | $orig_master_ip, $orig_master_port, $new_master_host, 33 | $new_master_ip, $new_master_port, $new_master_user, 34 | $new_master_password 35 | ); 36 | GetOptions( 37 | 'command=s' => \$command, 38 | 'ssh_user=s' => \$ssh_user, 39 | 'orig_master_host=s' => \$orig_master_host, 40 | 'orig_master_ip=s' => \$orig_master_ip, 41 | 'orig_master_port=i' => \$orig_master_port, 42 | 'new_master_host=s' => \$new_master_host, 43 | 'new_master_ip=s' => \$new_master_ip, 44 | 'new_master_port=i' => \$new_master_port, 45 | 'new_master_user=s' => \$new_master_user, 46 | 'new_master_password=s' => \$new_master_password, 47 | ); 48 | 49 | exit &main(); 50 | 51 | sub main { 52 | if ( $command eq "stop" || $command eq "stopssh" ) { 53 | 54 | # $orig_master_host, $orig_master_ip, $orig_master_port are passed. 55 | # If you manage master ip address at global catalog database, 56 | # invalidate orig_master_ip here. 57 | my $exit_code = 1; 58 | eval { 59 | # updating global catalog, etc 60 | MHA::ManagerUtil::exec_ssh_cmd( $orig_master_ip, '22', "sudo ip addr del 192.168.70.100/32 dev eth1", undef ); 61 | 62 | $exit_code = 0; 63 | }; 64 | if ($@) { 65 | warn "Got Error: $@\n"; 66 | exit $exit_code; 67 | } 68 | exit $exit_code; 69 | } 70 | elsif ( $command eq "start" ) { 71 | 72 | # all arguments are passed. 73 | # If you manage master ip address at global catalog database, 74 | # activate new_master_ip here. 75 | # You can also grant write access (create user, set read_only=0, etc) here. 76 | my $exit_code = 10; 77 | eval { 78 | my $new_master_handler = new MHA::DBHelper(); 79 | 80 | # args: hostname, port, user, password, raise_error_or_not 81 | $new_master_handler->connect( $new_master_ip, $new_master_port, 82 | $new_master_user, $new_master_password, 1 ); 83 | 84 | ## Set read_only=0 on the new master 85 | $new_master_handler->disable_log_bin_local(); 86 | print "Set read_only=0 on the new master.\n"; 87 | $new_master_handler->disable_read_only(); 88 | 89 | ## Creating an app user on the new master 90 | # print "Creating app user on the new master..\n"; 91 | # FIXME_xxx_create_user( $new_master_handler->{dbh} ); 92 | $new_master_handler->enable_log_bin_local(); 93 | $new_master_handler->disconnect(); 94 | 95 | ## Update master ip on the catalog database, etc 96 | MHA::ManagerUtil::exec_ssh_cmd( $new_master_ip, '22', "sudo ip addr add 192.168.70.100/32 dev eth1", undef ); 97 | MHA::ManagerUtil::exec_ssh_cmd( $new_master_ip, '22', "sudo arping -U -c 5 -I eth1 192.168.70.100", undef ); 98 | 99 | 100 | 101 | $exit_code = 0; 102 | }; 103 | if ($@) { 104 | warn $@; 105 | 106 | # If you want to continue failover, exit 10. 107 | exit $exit_code; 108 | } 109 | exit $exit_code; 110 | } 111 | elsif ( $command eq "status" ) { 112 | 113 | my @rc = MHA::ManagerUtil::exec_ssh_cmd( $orig_master_ip, '22', "sudo ip addr list | grep 192.168.70.100", undef ); 114 | 115 | if( $rc[0] == 0 ) { 116 | print "INFO: VIP 192.168.70.100 found on Master\n"; 117 | } else { 118 | print "CRITICAL: VIP 192.168.70.100 not found on Master!\n"; 119 | 120 | } 121 | 122 | exit 0; 123 | } 124 | else { 125 | &usage(); 126 | exit 1; 127 | } 128 | } 129 | 130 | sub usage { 131 | print 132 | "Usage: master_ip_failover --command=start|stop|stopssh|status --orig_master_host=host --orig_master_ip=ip --orig_master_port=port --new_master_host=host --new_master_ip=ip --new_master_port=port\n"; 133 | } 134 | 135 | -------------------------------------------------------------------------------- /modules/base/files/sshd_config_rootenabled: -------------------------------------------------------------------------------- 1 | # $OpenBSD: sshd_config,v 1.93 2014/01/10 05:59:19 djm Exp $ 2 | 3 | # This is the sshd server system-wide configuration file. See 4 | # sshd_config(5) for more information. 5 | 6 | # This sshd was compiled with PATH=/usr/local/bin:/usr/bin 7 | 8 | # The strategy used for options in the default sshd_config shipped with 9 | # OpenSSH is to specify options with their default value where 10 | # possible, but leave them commented. Uncommented options override the 11 | # default value. 12 | 13 | # If you want to change the port on a SELinux system, you have to tell 14 | # SELinux about this change. 15 | # semanage port -a -t ssh_port_t -p tcp #PORTNUMBER 16 | # 17 | #Port 22 18 | #AddressFamily any 19 | #ListenAddress 0.0.0.0 20 | #ListenAddress :: 21 | 22 | # The default requires explicit activation of protocol 1 23 | #Protocol 2 24 | 25 | # HostKey for protocol version 1 26 | #HostKey /etc/ssh/ssh_host_key 27 | # HostKeys for protocol version 2 28 | HostKey /etc/ssh/ssh_host_rsa_key 29 | #HostKey /etc/ssh/ssh_host_dsa_key 30 | HostKey /etc/ssh/ssh_host_ecdsa_key 31 | HostKey /etc/ssh/ssh_host_ed25519_key 32 | 33 | # Lifetime and size of ephemeral version 1 server key 34 | #KeyRegenerationInterval 1h 35 | #ServerKeyBits 1024 36 | 37 | # Ciphers and keying 38 | #RekeyLimit default none 39 | 40 | # Logging 41 | # obsoletes QuietMode and FascistLogging 42 | #SyslogFacility AUTH 43 | SyslogFacility AUTHPRIV 44 | #LogLevel INFO 45 | 46 | # Authentication: 47 | 48 | #LoginGraceTime 2m 49 | PermitRootLogin yes 50 | #StrictModes yes 51 | #MaxAuthTries 6 52 | #MaxSessions 10 53 | 54 | #RSAAuthentication yes 55 | #PubkeyAuthentication yes 56 | 57 | # The default is to check both .ssh/authorized_keys and .ssh/authorized_keys2 58 | # but this is overridden so installations will only check .ssh/authorized_keys 59 | AuthorizedKeysFile .ssh/authorized_keys 60 | 61 | #AuthorizedPrincipalsFile none 62 | 63 | #AuthorizedKeysCommand none 64 | #AuthorizedKeysCommandUser nobody 65 | 66 | # For this to work you will also need host keys in /etc/ssh/ssh_known_hosts 67 | #RhostsRSAAuthentication no 68 | # similar for protocol version 2 69 | #HostbasedAuthentication no 70 | # Change to yes if you don't trust ~/.ssh/known_hosts for 71 | # RhostsRSAAuthentication and HostbasedAuthentication 72 | #IgnoreUserKnownHosts no 73 | # Don't read the user's ~/.rhosts and ~/.shosts files 74 | #IgnoreRhosts yes 75 | 76 | # To disable tunneled clear text passwords, change to no here! 77 | #PasswordAuthentication yes 78 | #PermitEmptyPasswords no 79 | PasswordAuthentication yes 80 | 81 | # Change to no to disable s/key passwords 82 | #ChallengeResponseAuthentication yes 83 | ChallengeResponseAuthentication no 84 | 85 | # Kerberos options 86 | #KerberosAuthentication no 87 | #KerberosOrLocalPasswd yes 88 | #KerberosTicketCleanup yes 89 | #KerberosGetAFSToken no 90 | #KerberosUseKuserok yes 91 | 92 | # GSSAPI options 93 | GSSAPIAuthentication yes 94 | GSSAPICleanupCredentials no 95 | #GSSAPIStrictAcceptorCheck yes 96 | #GSSAPIKeyExchange no 97 | #GSSAPIEnablek5users no 98 | 99 | # Set this to 'yes' to enable PAM authentication, account processing, 100 | # and session processing. If this is enabled, PAM authentication will 101 | # be allowed through the ChallengeResponseAuthentication and 102 | # PasswordAuthentication. Depending on your PAM configuration, 103 | # PAM authentication via ChallengeResponseAuthentication may bypass 104 | # the setting of "PermitRootLogin without-password". 105 | # If you just want the PAM account and session checks to run without 106 | # PAM authentication, then enable this but set PasswordAuthentication 107 | # and ChallengeResponseAuthentication to 'no'. 108 | # WARNING: 'UsePAM no' is not supported in Red Hat Enterprise Linux and may cause several 109 | # problems. 110 | UsePAM yes 111 | 112 | #AllowAgentForwarding yes 113 | #AllowTcpForwarding yes 114 | #GatewayPorts no 115 | X11Forwarding yes 116 | #X11DisplayOffset 10 117 | #X11UseLocalhost yes 118 | #PermitTTY yes 119 | #PrintMotd yes 120 | #PrintLastLog yes 121 | #TCPKeepAlive yes 122 | #UseLogin no 123 | UsePrivilegeSeparation sandbox # Default for new installations. 124 | #PermitUserEnvironment no 125 | #Compression delayed 126 | #ClientAliveInterval 0 127 | #ClientAliveCountMax 3 128 | #ShowPatchLevel no 129 | UseDNS no 130 | #PidFile /var/run/sshd.pid 131 | #MaxStartups 10:30:100 132 | #PermitTunnel no 133 | #ChrootDirectory none 134 | #VersionAddendum none 135 | 136 | # no default banner path 137 | #Banner none 138 | 139 | # Accept locale-related environment variables 140 | AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES 141 | AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT 142 | AcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE 143 | AcceptEnv XMODIFIERS 144 | 145 | # override default of no subsystems 146 | Subsystem sftp /usr/libexec/openssh/sftp-server 147 | 148 | # Example of overriding settings on a per-user basis 149 | #Match User anoncvs 150 | # X11Forwarding no 151 | # AllowTcpForwarding no 152 | # PermitTTY no 153 | # ForceCommand cvs server -------------------------------------------------------------------------------- /modules/test/manifests/imdb.pp: -------------------------------------------------------------------------------- 1 | class test::imdb ($type = 'ibd'){ 2 | 3 | 4 | 5 | 6 | # DB Stuff 7 | file { 8 | "/tmp/my.grants.sql": 9 | source => "puppet:///modules/test/imdb/my.grants.sql"; 10 | "/tmp/my.indexes.sql": 11 | source => "puppet:///modules/test/imdb/my.indexes.sql"; 12 | } 13 | 14 | if ( $type == 'sql' ) { 15 | file { 16 | "/tmp/imdb.sql.bz2": 17 | require => Exec["mysql-download-imdb"]; 18 | } 19 | 20 | exec { 21 | "mysql-download-imdb": 22 | command => "/usr/bin/wget -O /tmp/imdb.sql.bz2 https://s3.amazonaws.com/percona-training/imdb.sql.bz2 && touch /tmp/imdb.sql.bz2.downloaded", 23 | timeout => 0, 24 | creates => "/tmp/imdb.sql.bz2.downloaded"; 25 | "mysql-indexes-add": 26 | command => "/usr/bin/mysql -u root imdb < /tmp/my.indexes.sql && touch /tmp/my.indexes.sql.done", 27 | creates => "/tmp/my.indexes.sql.done", 28 | timeout => 0, 29 | require => [ File["/tmp/my.indexes.sql"], Exec["mysql-imdb-import"] ]; 30 | "mysql-imdb-import": 31 | command => "/usr/bin/bzcat /tmp/imdb.sql.bz2 | mysql -u root imdb && touch /tmp/imdb.sql.bz2.imported", 32 | creates => "/tmp/imdb.sql.bz2.imported", 33 | timeout => 0, 34 | require => [ Exec['mysql-create-schema'], File["/tmp/imdb.sql.bz2"] ]; 35 | } 36 | } elsif ( $type == 'ibd' ) { 37 | file { 38 | "/tmp/imdb-innodb-ibd.tar.gz.downloaded": 39 | require => Exec["mysql-download-imdb-tablespaces"]; 40 | "/var/lib/mysql/imdb_import": 41 | ensure => directory; 42 | "/tmp/imdb-innodb-ibd.import.sh": 43 | content => '#!/bin/bash -e 44 | for file in `ls /var/lib/mysql/imdb_import/*.cfg`; do 45 | table=`basename $file .cfg` 46 | mysql -e "ALTER TABLE $table DISCARD TABLESPACE;" imdb 47 | mv /var/lib/mysql/imdb_import/$table.{cfg,ibd} /var/lib/mysql/imdb/ 48 | mysql -e "ALTER TABLE $table IMPORT TABLESPACE;" imdb 49 | mysql -e "ANALYZE TABLE $table;" imdb 50 | rm /var/lib/mysql/imdb/$table.cfg 51 | done 52 | touch /tmp/imdb-innodb-ibd.import.sh.done 53 | rm -rf /var/lib/mysql/imdb_import 54 | ', 55 | mode => 755; 56 | 57 | } 58 | 59 | exec { 60 | "mysql-download-imdb-tablespaces": 61 | command => "/usr/bin/wget -O /tmp/imdb-innodb-ibd.tar.gz https://s3.amazonaws.com/percona-training/imdb-innodb-ibd.tar.gz && touch /tmp/imdb-innodb-ibd.tar.gz.downloaded", 62 | timeout => 0, 63 | creates => "/tmp/imdb-innodb-ibd.tar.gz.downloaded"; 64 | "mysql-extract-imdb-tablespaces": 65 | command => "/usr/bin/tar -xzvf /tmp/imdb-innodb-ibd.tar.gz && touch /tmp/imdb-innodb-ibd.tar.gz.extracted", 66 | cwd => "/var/lib/mysql/imdb_import", 67 | creates => "/tmp/imdb-innodb-ibd.tar.gz.extracted", 68 | require => [ Exec["mysql-download-imdb-tablespaces"], File["/var/lib/mysql/imdb_import"] ]; 69 | "mysql-imdb-create-tables-tablespace": 70 | command => "/usr/bin/cat /var/lib/mysql/imdb_import/schema.sql | mysql -u root imdb", 71 | creates => "/var/lib/mysql/imdb/cast_info.frm", 72 | require => [ Exec["mysql-extract-imdb-tablespaces"], Exec["mysql-create-schema"] ]; 73 | "mysql-import-tablespaces": 74 | command => "/tmp/imdb-innodb-ibd.import.sh", 75 | timeout => 0, 76 | creates => "/tmp/imdb-innodb-ibd.import.sh.done", 77 | require => [ File["/tmp/imdb-innodb-ibd.import.sh"] , Exec["mysql-imdb-create-tables-tablespace"] ]; 78 | } 79 | 80 | } 81 | 82 | exec { 83 | "mysql-grants-apply": 84 | command => "/usr/bin/mysql -u root < /tmp/my.grants.sql && touch /tmp/my.grants.sql.done", 85 | creates => "/tmp/my.grants.sql.done", 86 | require => [ File["/tmp/my.grants.sql"] ]; 87 | "my-movies-get-branch": 88 | command => "/usr/bin/bzr branch lp:my-movies && touch /tmp/my-movies.downloaded", 89 | cwd => "/tmp", 90 | creates => "/tmp/my-movies.downloaded", 91 | require => [ Package["bzr"]]; 92 | "mysql-create-schema": 93 | command => "/usr/bin/mysqladmin -u root create imdb", 94 | creates => "/var/lib/mysql/imdb"; 95 | } 96 | 97 | 98 | # App stuff 99 | package { 100 | "httpd": 101 | ensure => latest; 102 | "php": 103 | ensure => latest; 104 | "php-mysql": 105 | ensure => latest; 106 | "bzr": 107 | ensure => latest; 108 | } 109 | 110 | service { 111 | "httpd": 112 | ensure => running, 113 | require => [ Package["httpd"], Package["php"], Package["php-mysql"] ]; 114 | } 115 | 116 | exec { 117 | "install-my-movies": 118 | command => "/usr/bin/bzr branch lp:my-movies && touch /var/www/html/my-movies.ok", 119 | cwd => "/var/www/html", 120 | creates => "/var/www/html/my-movies.ok", 121 | require => [ Package["httpd"], Package["bzr"] ]; 122 | } 123 | 124 | file { 125 | "/var/www/html/my-movies/lib/config.inc.php": 126 | source => "puppet:///modules/test/my-movies.config.inc.php", 127 | require => Exec["install-my-movies"]; 128 | } 129 | 130 | 131 | } 132 | -------------------------------------------------------------------------------- /Vagrantfile.pxc_playground.rb: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # Assumes a box from https://github.com/grypyrg/packer-percona 5 | 6 | # This sets up 3 nodes with a common PXC 7 | # it also installs haproxy 8 | 9 | # HOW TO USE 10 | # You have to bring the machines up prior to provisioning. so run it in 2 steps: 11 | # 12 | # # vagrant up --no-provision --parallel 13 | # # vagrant provision --parallel 14 | 15 | prefix='cluster-' 16 | iprange='192.168.56' 17 | if_adapter='vboxnet0' 18 | 19 | enable_repo_percona_testing=true 20 | 21 | require File.dirname(__FILE__) + '/lib/vagrant-common.rb' 22 | 23 | mysql_version = "56" 24 | 25 | # Node names and ips (for local VMs) 26 | # (Amazon) aws_region is where to bring up the node 27 | # (Amazon) Security groups are 'default' (23 open) and 'pxc' (3306, 4567-4568,4444 open) for each respective region 28 | # (Amazon) HAproxy also needs the 'haproxy' security group (3307-3309, 8080, 8000) for each respective region 29 | # Don't worry about amazon config if you are not using that provider. 30 | pxc_nodes = { 31 | prefix + 'pxc1' => { 32 | 'local_vm_ip' => iprange + '.2', 33 | 'aws_region' => 'us-east-1', 34 | 'security_groups' => ['default','pxc', 'haproxy'], 35 | 'haproxy_disabled' => 'false', 36 | 'maxscale_disabled' => 'false', 37 | 'haproxy_primary' => true, 38 | 'pxc_bootstrap_node' => true, 39 | 'server_id' => '1' 40 | }, 41 | prefix + 'pxc2' => { 42 | 'local_vm_ip' => iprange + '.3', 43 | 'aws_region' => 'us-east-1', 44 | 'security_groups' => ['default','pxc', 'haproxy'], 45 | 'pxc_bootstrap_node' => false, 46 | 'haproxy_disabled' => 'true', 47 | 'maxscale_disabled' => 'true', 48 | 'server_id' => '2' 49 | }, 50 | prefix + 'pxc3' => { 51 | 'local_vm_ip' => iprange + '.4', 52 | 'aws_region' => 'us-east-1', 53 | 'security_groups' => ['default','pxc', 'haproxy'], 54 | 'pxc_bootstrap_node' => false, 55 | 'haproxy_disabled' => 'true', 56 | 'maxscale_disabled' => 'true', 57 | 'server_id' => '3' 58 | } 59 | } 60 | 61 | 62 | # make a comma separated serverlist, which can be reused in puppet 63 | serverlist=pxc_nodes.map{|k,v| "#{k}"}.join(',') 64 | 65 | # should we use the public or private ips when using AWS 66 | hostmanager_aws_ips='private' 67 | 68 | # Support for cloud.percona.com through the percona-agent 69 | # please ensure to fill in the correct api key 70 | percona_agent_enabled=false 71 | percona_agent_api_key='----' 72 | 73 | Vagrant.configure("2") do |config| 74 | config.vm.box = "grypyrg/centos-x86_64" 75 | config.ssh.username = "vagrant" 76 | 77 | # it's disabled by default, it's done during the provision phase 78 | config.hostmanager.enabled = false 79 | config.hostmanager.include_offline = true 80 | 81 | # Create all three nodes identically except for name and ip 82 | pxc_nodes.each_pair { |name, node_params| 83 | config.vm.define name do |node_config| 84 | node_config.vm.hostname = name 85 | node_config.vm.network :private_network, ip: node_params['local_vm_ip'], adaptor: if_adapter 86 | 87 | # this is to test haproxy PROXY support in PXC 88 | if name == prefix + 'pxc1' 89 | node_config.vm.network :private_network, ip: '172.0.0.1' 90 | end 91 | 92 | ssh_port = "882" + node_params["server_id"] 93 | haproxy_port = "888" + node_params["server_id"] 94 | 95 | node_config.vm.network "forwarded_port", guest: 22, host: ssh_port, auto_correct: false 96 | node_config.vm.network "forwarded_port", guest: 8080, host: haproxy_port, auto_correct: false 97 | 98 | # custom port forwarding 99 | node_config.vm.network "forwarded_port", guest: 8080, host: 8080, auto_correct: true 100 | 101 | # Provisioners 102 | node_config.vm.provision :hostmanager 103 | 104 | provision_puppet( node_config, "pxc_playground.pp" ) { |puppet| 105 | puppet.facter = { 106 | 'vagrant_hostname' => name, 107 | "percona_server_version" => mysql_version, 108 | "haproxy_servers" => serverlist, 109 | "haproxy_disabled" => node_params['haproxy_disabled'], 110 | "maxscale_disabled" => node_params['maxscale_disabled'], 111 | "haproxy_servers_primary" => pxc_nodes.select{|k,v| ! v.select{|k2,v2| k2=="haproxy_primary" && v2==true}.empty? }.map{|k3,v3| "#{k3}"}.join(','), 112 | "maxscale_servers" => serverlist, 113 | "cluster_servers" => serverlist, 114 | "datadir_dev" => "dm-2", 115 | 'datadir_fs' => "xfs", 116 | 'percona_agent_enabled' => percona_agent_enabled, 117 | 'percona_agent_api_key' => percona_agent_api_key, 118 | 'innodb_buffer_pool_size' => '128M', 119 | 'innodb_log_file_size' => '64M', 120 | 'innodb_flush_log_at_trx_commit' => '0', 121 | 'pxc_bootstrap_node' => node_params['pxc_bootstrap_node'], 122 | 'extra_mysqld_config' => 123 | 'wsrep_cluster_address=gcomm://' + pxc_nodes.map{|k,v| "#{k}"}.join(',') + "\n" + 124 | "wsrep_sst_receive_address=" + name + "\n" + 125 | "wsrep_node_address=" + name + "\n" + 126 | "log_slave_updates\n" + 127 | "server_id=" + node_params['server_id'] + "\n" + 128 | "log_bin" + "\n" 129 | } 130 | } 131 | 132 | # Disable these options 133 | # 'wsrep_provider_options=ist.recv_addr="' + name + "\"\n" + 134 | # 'wsrep_sst_receive_address=' + name + "\n" + 135 | # 'wsrep_node_address=' + name + "\n" + 136 | 137 | # Providers 138 | provider_virtualbox( nil, node_config, 256) { |vb, override| 139 | provision_puppet( override, "pxc_playground.pp" ) {|puppet| 140 | puppet.facter = {"datadir_dev" => "dm-2"} 141 | } 142 | } 143 | 144 | provider_aws( "PXC #{name}", node_config, 'm3.medium', node_params['aws_region'], node_params['security_groups'], hostmanager_aws_ips) { |aws, override| 145 | aws.block_device_mapping = [ 146 | { 147 | 'DeviceName' => "/dev/xvdf", 148 | 'VirtualName' => "ephemeral0" 149 | } 150 | ] 151 | provision_puppet( override, "pxc_playground.pp" ) {|puppet| 152 | puppet.facter = {"datadir_dev" => "xvdf"} 153 | } 154 | 155 | } 156 | 157 | end 158 | } 159 | 160 | end 161 | 162 | -------------------------------------------------------------------------------- /lib/vagrant-common.rb: -------------------------------------------------------------------------------- 1 | # Configure this node for AWS 2 | # -- config: vm config from Vagrantfile 3 | # -- name: name for the node displayed on the aws console 4 | # -- instance_type: http://aws.amazon.com/ec2/instance-types/ 5 | # -- region: defaults to 'us-east-1' 6 | # -- hostmanager_aws_ips: when using hostmanager, should we use 'public' or 'private' ips? 7 | 8 | $ip_cache = Hash.new 9 | def provider_aws( name, config, instance_type, region = nil, security_groups = nil, hostmanager_aws_ips = nil, subnet_id = nil ) 10 | require 'yaml' 11 | 12 | aws_secrets_file = File.join( Dir.home, '.aws_secrets' ) 13 | 14 | if( File.readable?( aws_secrets_file )) 15 | config.vm.provider "aws" do |aws, override| 16 | aws.instance_type = instance_type 17 | 18 | aws_config = YAML::load_file( aws_secrets_file ) 19 | aws.access_key_id = aws_config.fetch("access_key_id") 20 | aws.secret_access_key = aws_config.fetch("secret_access_key") 21 | 22 | aws.tags = { 23 | 'Name' => aws_config.fetch("instance_name_prefix", "") + " " + name 24 | } 25 | 26 | # Used_subnet_id can be overridden if it is nil 27 | used_subnet_id = subnet_id 28 | 29 | 30 | # workaround for https://github.com/mitchellh/vagrant-aws/issues/331 31 | override.vm.synced_folder ".", "/vagrant", type: "rsync" 32 | 33 | if region == nil 34 | aws.keypair_name = aws_config["keypair_name"] 35 | override.ssh.private_key_path = aws_config["keypair_path"] 36 | 37 | if used_subnet_id == nil 38 | used_subnet_id = aws_config.fetch("default_vpc_subnet_id") 39 | end 40 | elsif aws_config['regions'][region] != nil 41 | aws.region = region 42 | aws.keypair_name = aws_config['regions'][region]["keypair_name"] 43 | override.ssh.private_key_path = aws_config['regions'][region]["keypair_path"] 44 | 45 | if used_subnet_id == nil 46 | used_subnet_id = aws_config['regions'][region]["default_vpc_subnet_id"] 47 | end 48 | else 49 | puts "Warning: AWS region #{region} not defined in your ~/.aws_secrets file." 50 | end 51 | 52 | if used_subnet_id != nil 53 | # We assume if the vpc_subnet_id is set, then we should use it. 54 | aws.subnet_id = used_subnet_id 55 | aws.associate_public_ip = true 56 | end 57 | 58 | if security_groups != nil 59 | aws.security_groups = security_groups 60 | end 61 | 62 | if Vagrant.has_plugin?("vagrant-hostmanager") 63 | 64 | if hostmanager_aws_ips == "private" or hostmanager_aws_ips == nil 65 | awsrequest = "local-ipv4" 66 | elsif hostmanager_aws_ips == "public" 67 | awsrequest = "public-ipv4" 68 | end 69 | 70 | override.hostmanager.ip_resolver = proc do |vm| 71 | if $ip_cache[name] == nil 72 | vm.communicate.execute("curl -s http://169.254.169.254/latest/meta-data/" + awsrequest + " 2>&1") do |type,data| 73 | $ip_cache[name] = data if type == :stdout 74 | end 75 | end 76 | $ip_cache[name] 77 | end 78 | end 79 | 80 | if block_given? 81 | yield( aws, override ) 82 | end 83 | end 84 | else 85 | puts "Skipping AWS because of missing/non-readable #{aws_secrets_file} file. Read https://github.com/jayjanssen/vagrant-percona/blob/master/README.md#aws-setup for more information about setting up AWS." 86 | end 87 | end 88 | 89 | # Configure this node for Virtualbox 90 | # -- config: vm config from Vagrantfile 91 | # -- ram: amount of RAM (in MB) 92 | def provider_virtualbox ( name, config, ram = 256, cpus = 1 ) 93 | config.vm.provider "virtualbox" do |vb, override| 94 | vb.name = name 95 | vb.cpus = cpus 96 | vb.memory = ram 97 | 98 | vb.customize ["modifyvm", :id, "--ioapic", "on" ] 99 | 100 | # fix for slow dns https://github.com/mitchellh/vagrant/issues/1172 101 | vb.customize ["modifyvm", :id, "--natdnsproxy1", "off"] 102 | vb.customize ["modifyvm", :id, "--natdnshostresolver1", "off"] 103 | 104 | # Custom ip resolver that works with DHCP or explicit addresses (and is fast) 105 | override.hostmanager.ip_resolver = proc do |vm, resolving_vm| 106 | if vm.id 107 | `VBoxManage guestproperty get #{vm.id} "/VirtualBox/GuestInfo/Net/1/V4/IP"`.split()[1] 108 | end 109 | end 110 | 111 | if block_given? 112 | yield( vb, override ) 113 | end 114 | end 115 | end 116 | 117 | # Configure this node for VMware 118 | # -- config: vm config from Vagrantfile 119 | # -- ram: amount of RAM (in MB) 120 | def provider_vmware ( name, config, ram = 256, cpus = 1 ) 121 | config.vm.provider "vmware_fusion" do |v, override| 122 | v.name = name 123 | v.vmx["memsize"] = ram 124 | v.vmx["numvcpus"] = cpus 125 | 126 | if block_given? 127 | yield( v, override ) 128 | end 129 | end 130 | end 131 | 132 | # Configure this node for Openstack 133 | def provider_openstack( name, config, flavor, security_groups = nil, network = nil, hostmanager_openstack_ips = nil ) 134 | require 'yaml' 135 | require 'vagrant-openstack-plugin' 136 | 137 | os_secrets_file = File.join( Dir.home, '.openstack_secrets' ) 138 | 139 | if( File.readable?( os_secrets_file )) 140 | config.vm.provider :openstack do |os, override| 141 | os.flavor = flavor 142 | 143 | os_config = YAML::load_file( os_secrets_file ) 144 | 145 | os.endpoint = os_config.fetch("endpoint") 146 | os.username = os_config.fetch("username") 147 | os.api_key = os_config.fetch("password") 148 | os.tenant= os_config.fetch("tenant") 149 | 150 | os.keypair_name = os_config.fetch("keypair_name") 151 | override.ssh.private_key_path = os_config.fetch("private_key_path") 152 | 153 | if security_groups != nil 154 | os.security_groups = security_groups 155 | end 156 | 157 | if network != nil 158 | os.network = network 159 | end 160 | 161 | os.floating_ip = :auto 162 | os.floating_ip_pool = "external-net" 163 | 164 | if Vagrant.has_plugin?("vagrant-hostmanager") 165 | if hostmanager_openstack_ips == "private" or hostmanager_openstack_ips == nil 166 | awsrequest = "local-ipv4" 167 | elsif hostmanager_openstack_ips == "public" 168 | awsrequest = "public-ipv4" 169 | end 170 | 171 | override.hostmanager.ip_resolver = proc do |vm| 172 | if $ip_cache[name] == nil 173 | vm.communicate.execute("curl -s http://169.254.169.254/latest/meta-data/" + awsrequest + " 2>&1") do |type,data| 174 | $ip_cache[name] = data if type == :stdout 175 | end 176 | end 177 | $ip_cache[name] 178 | end 179 | end 180 | 181 | if block_given? 182 | yield( os, override ) 183 | end 184 | end 185 | else 186 | puts "Skipping Openstack because of missing/non-readable #{os_secrets_file} file. Read https://github.com/jayjanssen/vagrant-percona/blob/master/README.md#os-setup for more information about setting up Openstack." 187 | end 188 | end 189 | 190 | # Provision this node with Puppet 191 | # -- config: vm config from Vagrantfile 192 | # -- manifest_file: puppet manifest to use (under puppet/manifests) 193 | def provision_puppet( config, manifest_file ) 194 | config.vm.provision manifest_file, type:"puppet", preserve_order: true do |puppet| 195 | puppet.manifest_file = manifest_file 196 | puppet.manifests_path = ["vm", "/vagrant/manifests"] 197 | puppet.options = "--verbose --modulepath /vagrant/modules" 198 | # puppet.options = "--verbose" 199 | if block_given? 200 | yield( puppet ) 201 | end 202 | 203 | # Check if the hostname is a proper string (won't be if config is an override config) 204 | # If string, then set the vagrant_hostname facter fact automatically so base::hostname works 205 | if config.vm.hostname.is_a?(String) 206 | puppet.facter["vagrant_hostname"] = config.vm.hostname 207 | end 208 | 209 | end 210 | end 211 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Vagrant + Percona 2 | 3 | ## Introduction 4 | 5 | This repository contains tools to build consistent environments for testing Percona software on a variety of platforms. This includes EC2 and Virtualbox for now, but more are possible going forward. 6 | 7 | Principles/goals of this environment: 8 | 9 | * Extremely Reusable 10 | * Small manifests to be used by multiple vagrant providers to combine components for needed boxes 11 | * Vagrantfiles are very descriptive about the whole environment needed. Preference given to making modules configurable rather than custom. 12 | * Useful for: 13 | * Conference tutorial environments 14 | * Training classes 15 | * Experimentation 16 | * Benchmarking 17 | * Manifest install categories: 18 | * MySQL and variants 19 | * MySQL tools 20 | * Benchmarking tools 21 | * Sample databases 22 | * Misc: local repos for conference VMs, 23 | 24 | ## Walkthrough 25 | 26 | This section should get you up and running. 27 | 28 | ### Software Requirements 29 | 30 | * Vagrant 1.6+: http://vagrantup.com 31 | * Vagrant AWS Plugin (optional): 32 | 33 | ``` 34 | vagrant plugin install vagrant-aws 35 | ``` 36 | 37 | * VirtualBox: https://www.virtualbox.org (optional) 38 | * VMware Fusion (not supported yet, but feasible) 39 | * Vagrant Host Manager Plugin 40 | 41 | ``` 42 | vagrant plugin install vagrant-hostmanager 43 | ``` 44 | 45 | ### Openstack Setup 46 | 47 | For this to run, you'll need a custom Vagrantbox with an image to boot from on your Openstack Cloud. I've created a CentOS one here: https://github.com/grypyrg/packer-percona, but it would need to be rebuilt in other clouds. 48 | 49 | Perconians can use a prebuilt image in our Openstack lab with this command: 50 | 51 | ``` 52 | vagrant box add grypyrg/centos-x86_64 --provider openstack 53 | ``` 54 | 55 | You'll also need your secrets setup in ~/.openstack_secrets: 56 | 57 | ```yaml 58 | --- 59 | endpoint: http://controller:5000/v2.0/tokens 60 | tenant: tenant_name 61 | username: your_user 62 | password: your_pw 63 | keypair_name: your_keypair_name 64 | private_key_path: the_path_to_your_pem_file 65 | ``` 66 | 67 | Finally, you'll need the vagrant-openstack-plugin: 68 | 69 | ``` 70 | vagrant plugin install vagrant-openstack-plugin 71 | ``` 72 | 73 | ### AWS Setup 74 | 75 | You can skip this section if you aren't planning on using AWS. 76 | 77 | In a nutshell, you need this: 78 | 79 | * AWS access key 80 | * AWS secret access key 81 | * A Keypair name and path for each AWS region you intend to use 82 | * Whatever security groups you'll need for the environments you intend to launch. 83 | 84 | #### AWS Details 85 | 86 | You'll need an AWS account setup with the following information in a file called ~/.aws_secrets: 87 | 88 | ```yaml 89 | access_key_id: YOUR_ACCESS_KEY 90 | secret_access_key: THE_ASSOCIATED_SECRET_KEY 91 | keypair_name: KEYPAIR_ID 92 | keypair_path: PATH_TO_KEYPAIR_PEM 93 | instance_name_prefix: SOME_NAME_PREFIX 94 | default_vpc_subnet_id: subnet-896602d0 95 | ``` 96 | 97 | #### Multi-region 98 | 99 | AWS Multi-region can be supported by adding a 'regions' hash to the .aws_secrets file: 100 | 101 | ```yaml 102 | access_key_id: YOUR_ACCESS_KEY 103 | secret_access_key: THE_ASSOCIATED_SECRET_KEY 104 | keypair_name: jay 105 | keypair_path: /Users/jayj/.ssh/jay-us-east-1.pem 106 | instance_name_prefix: Jay 107 | default_vpc_subnet_id: subnet-896602d0 108 | regions: 109 | us-east-1: 110 | keypair_name: jay 111 | keypair_path: /Users/jayj/.ssh/jay-us-east-1.pem 112 | default_vpc_subnet_id: subnet-896602d0 113 | us-west-1: 114 | keypair_name: jay 115 | keypair_path: /Users/jayj/.ssh/jay-us-west-1.pem 116 | eu-west-1: 117 | keypair_name: jay 118 | keypair_path: /Users/jayj/.ssh/jay-eu-west-1.pem 119 | ``` 120 | 121 | Note that the default 'keypair_name' and 'keypair_path' can still be used. Region will default to 'us-east-1' unless you specifically override it. 122 | 123 | #### Boxes and Multiple AWS Regions 124 | 125 | AMI's are region-specific. The AWS Vagrant boxes you use must include AMI's for each region in which you wish to deploy. 126 | 127 | For an example, see the regions listed here: https://vagrantcloud.com/grypyrg/centos-x86_64. 128 | 129 | Packer, which is used to build this box, can be configured to add more regions if desired, but it requires building a new box. 130 | 131 | #### AWS VPC Integration 132 | 133 | The latest versions of grypyrg/centos-x86-64 boxes require a VPC since AWS now requires VPC for all instances. 134 | 135 | As shown in the example above, you must set the `default_vpc_subnet_id` in the ~/.aws_secrets file. You can override this on a per-region basis. 136 | 137 | You can also pass a `subnet_id` into the `provider_aws` method using an override in your Vagrantfile. 138 | 139 | ### Clone this repo 140 | 141 | ```bash 142 | git clone 143 | cd vagrant-percona 144 | git submodule init 145 | git submodule update --recursive 146 | ``` 147 | 148 | ### Launch the box 149 | 150 | Launch your first box -- ps_sysbench is a good start. 151 | 152 | ```bash 153 | ln -sf Vagrantfile.ps_sysbench.rb Vagrantfile 154 | vagrant up 155 | vagrant ssh 156 | ``` 157 | 158 | ### Create Environments with create-new-env.sh 159 | 160 | When you create a lot of vagrant environments with vagrant-percona, creating/renaming those Vagrantfile files can get quite messy easily. 161 | 162 | The repository contains a small script that allows you to create a new environment, which will build a new directory with the proper Vagrantfile files and links to the puppet code. 163 | 164 | This allows you to have many many Vagrant environments configured simultaneously. 165 | 166 | ```bash 167 | vagrant-percona$ ./create-new-env.sh single_node ~/vagrant/testing-issue-428 168 | Creating 'single_node' Environment 169 | 170 | vagrant-percona$ cd ~/vagrant/testing-issue-428 171 | ~/vagrant/testing-issue-428$ vagrant up --provider=aws 172 | ~/vagrant/testing-issue-428$ vagrant ssh 173 | ``` 174 | 175 | ## Master/Slave 176 | 177 | This Vagrantfile will launch 2 (or more; edit the file and uncomment proper build line) MySQL servers in either VirtualBox or AWS. Running the ms-setup.pl script will set the first instance to be the master and all remaining nodes to be async slaves. 178 | 179 | ```bash 180 | ln -sf Vagrantfile.ms.rb Vagrantfile 181 | vagrant up --provider [aws|virtualbox] 182 | ./ms-setup.pl 183 | ``` 184 | 185 | ## PXC 186 | 187 | This Vagrantfile will launch 3 Percona 5.7 XtraDB Cluster nodes in either VirtualBox or AWS. The InnoDB Buffer Pool is set to 128MB. The first node is automatically bootstrapped to form the cluster. The remaining 2 nodes will join the first to form the cluster. 188 | 189 | Each Virtualbox instance is launched with 256MB of memory. 190 | 191 | Each EC2 instance will use the `m3.medium` instance type, which has 3.75GB of RAM. 192 | 193 | ```bash 194 | ln -sf Vagrantfile.pxc.rb Vagrantfile 195 | vagrant up 196 | ``` 197 | 198 | __NOTE:__ Due to Vagrant being able to parallel build in AWS, there is no guarantee "node 1" will bootstrap before the other 2. If this happens, node 2 and node 3 will be unable to join the cluster. It is therfore recommended you launch node 1 manually, first, then launch the remaining nodes. _(This is not an issue with Virtualbox as parallel builds are not supported.)_ 199 | 200 | Example: 201 | 202 | ```bash 203 | vagrant up node1 && sleep 5 && vagrant up 204 | ``` 205 | 206 | ## PXC (Big) 207 | 208 | This Vagrantfile will launch 3 Percona 5.7 XtraDB Cluster nodes in either VirtualBox or AWS. The InnoDB Buffer Pool is set to _12GB_. 209 | 210 | __WARNING:__ This requires a virtual machine with 15GB of RAM. Most consumer laptops and desktops do not have the RAM requirements to run multiple nodes of this configuration. 211 | 212 | Each EC2 instance will use the `m3.xlarge` instance type, which has 15GB of RAM. 213 | 214 | ```bash 215 | ln -sf Vagrantfile.pxc-big.rb Vagrantfile 216 | vagrant up 217 | ``` 218 | 219 | __NOTE:__ Due to Vagrant being able to parallel build in AWS, there is no guarantee "node 1" will bootstrap before the other 2. If this happens, node 2 and node 3 will be unable to join the cluster. It is therfore recommended you launch node 1 manually, first, then launch the remaining nodes. _(This is not an issue with Virtualbox as parallel builds are not supported.)_ 220 | 221 | Example: 222 | 223 | ```bash 224 | vagrant up node1 && sleep 5 && vagrant up 225 | ``` 226 | 227 | ## Using this repo to create benchmarks 228 | 229 | I use a system where I define this repo as a submodule in a test-specific git repo and do all the customization for the test there. 230 | 231 | ```bash 232 | git init some-test 233 | cd some-test 234 | git submodule add git@github.com:grypyrg/vagrant-percona.git 235 | ln -s vagrant-percona/lib 236 | ln -s vagrant-percona/manifests 237 | ln -s vagrant-percona/modules 238 | cp vagrant-percona/Vagrantfile.of_your_choice Vagrantfile 239 | vi Vagrantfile # customize for your test 240 | vagrant up 241 | ... 242 | ``` 243 | 244 | ## Cleanup 245 | 246 | ### Shutdown the vagrant instance(s) 247 | 248 | ``` 249 | vagrant destroy 250 | ``` 251 | --------------------------------------------------------------------------------