├── states ├── disks │ ├── init.sls │ └── README.md ├── ssd │ ├── README.md │ ├── files │ │ ├── 60-schedulers.rules │ │ ├── dofstrim.sh │ │ ├── data-format-disks-ext4.sh │ │ └── data-format-disks-xfs.sh │ └── init.sls ├── ipv6 │ ├── README.md │ └── disable.sls ├── local │ ├── README.md │ └── init.sls ├── users │ ├── files │ │ └── vimrc │ ├── README.md │ └── init.sls ├── chrony │ ├── README.md │ ├── client.sls │ ├── server.sls │ └── files │ │ ├── chrony.conf.client │ │ └── chrony.conf.server ├── cpuspeed │ └── init.sls ├── ntp │ ├── server.sls │ ├── client.sls │ ├── README.md │ └── files │ │ └── ntp.conf ├── openstack │ ├── neutron │ │ ├── files │ │ │ └── neutron-compute.sh │ │ ├── compute.sls │ │ └── controller.sls │ ├── nova │ │ ├── files │ │ │ ├── nova-compute.sh │ │ │ └── nova-controller.sh │ │ ├── compute.sls │ │ └── controller.sls │ ├── yumrepo │ │ └── init.sls │ ├── mysql │ │ ├── map.jinja │ │ └── init.sls │ ├── memcached │ │ └── init.sls │ ├── rabbitmq │ │ └── init.sls │ ├── clean-all.sh │ ├── glance │ │ ├── files │ │ │ └── glance.sh │ │ └── init.sls │ ├── keystone │ │ ├── files │ │ │ └── keystone.sh │ │ └── init.sls │ ├── auth │ │ └── init.sls │ ├── AddComputeNodes.md │ └── DeployWindowsCloudInstance.md ├── selinux │ ├── init.sls │ └── README.md ├── nfs │ └── README.md ├── kernel │ ├── init.sls │ └── README.md ├── packages │ └── init.sls ├── docker │ ├── init.sls │ ├── README.md │ └── registry.sls ├── ulimit │ ├── init.sls │ └── files │ │ └── 90-saltstack-base.conf ├── network │ ├── init.sls │ ├── bond.sls │ ├── wireshark.md │ ├── files │ │ └── set-irq-affinity.sh │ └── README.md ├── cpupower │ ├── init.sls │ └── README.md ├── grub │ └── init.sls ├── irqbalance │ └── init.sls ├── iptables │ ├── init.sls │ ├── files │ │ └── netfilter.conf │ └── README.md ├── pxeserver │ ├── files │ │ ├── pxelinux.cfg.default │ │ ├── tftp │ │ ├── dhcpd.conf │ │ ├── reposync.sh │ │ ├── ks6.cfg │ │ └── ks7.cfg │ ├── README.md │ └── init.sls ├── yumrepo │ ├── files │ │ └── local.repo │ ├── init.sls │ └── README.md ├── top.sls ├── tuned │ └── files │ │ └── tuned.conf └── pypiserver │ └── README.md ├── pillar ├── top.sls ├── docker-registry.sls └── superlab.sls ├── notes ├── 20150312_215209.jpg ├── 20180310_231645.jpg ├── vim.md ├── run-states.md ├── highstate.md ├── operations.md ├── README.md ├── memory.md ├── fpm_nodejs.md ├── multimedia.md ├── pxe-install.md ├── saltstack-base-setup.md ├── linux-tcp-tuning.md ├── network-setup.md ├── numa-cpu.md ├── grains.md ├── setup-salt.md ├── creating-bootable-media.md ├── centos-7-manual.md ├── ssl-howto.md ├── dell-powerconnect.md └── fpm_nginx_luajit.md ├── reactor └── sync_grains.sls ├── LICENSE └── README.md /states/disks/init.sls: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /states/ssd/README.md: -------------------------------------------------------------------------------- 1 | 2 | ### ssd 3 | -------------------------------------------------------------------------------- /states/ipv6/README.md: -------------------------------------------------------------------------------- 1 | 2 | ### ipv6 3 | -------------------------------------------------------------------------------- /states/local/README.md: -------------------------------------------------------------------------------- 1 | 2 | ### local 3 | -------------------------------------------------------------------------------- /states/users/files/vimrc: -------------------------------------------------------------------------------- 1 | :set expandtab 2 | :set tabstop=4 3 | -------------------------------------------------------------------------------- /pillar/top.sls: -------------------------------------------------------------------------------- 1 | 2 | base: 3 | '*': 4 | - superlab 5 | 6 | -------------------------------------------------------------------------------- /states/chrony/README.md: -------------------------------------------------------------------------------- 1 | 2 | ### Chrony Client and Server 3 | 4 | -------------------------------------------------------------------------------- /states/local/init.sls: -------------------------------------------------------------------------------- 1 | 2 | /etc/rc.d/rc.local: 3 | file.managed: 4 | - mode: 754 5 | 6 | 7 | -------------------------------------------------------------------------------- /notes/20150312_215209.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dkilcy/saltstack-base/HEAD/notes/20150312_215209.jpg -------------------------------------------------------------------------------- /notes/20180310_231645.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dkilcy/saltstack-base/HEAD/notes/20180310_231645.jpg -------------------------------------------------------------------------------- /reactor/sync_grains.sls: -------------------------------------------------------------------------------- 1 | sync_grains: 2 | local.saltutil.sync_grains: 3 | - tgt: {{ data['id'] }} 4 | -------------------------------------------------------------------------------- /states/cpuspeed/init.sls: -------------------------------------------------------------------------------- 1 | 2 | cpuspeed: 3 | service.dead: 4 | - name: cpuspeed 5 | - enable: False 6 | 7 | -------------------------------------------------------------------------------- /states/ntp/server.sls: -------------------------------------------------------------------------------- 1 | 2 | ntp: 3 | pkg.installed: 4 | - name: ntp 5 | 6 | ntpd: 7 | service.running: 8 | - enable: True 9 | 10 | -------------------------------------------------------------------------------- /states/openstack/neutron/files/neutron-compute.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | openstack extension list --network 4 | 5 | openstack network agent list 6 | 7 | -------------------------------------------------------------------------------- /states/selinux/init.sls: -------------------------------------------------------------------------------- 1 | 2 | 3 | /etc/selinux/config: 4 | file.replace: 5 | - pattern: 'SELINUX=enforcing' 6 | - repl: 'SELINUX=disabled' 7 | 8 | -------------------------------------------------------------------------------- /states/nfs/README.md: -------------------------------------------------------------------------------- 1 | 2 | ### NFS 3 | 4 | ### References 5 | - [http://forum.osmc.tv/showthread.php?tid=6825](http://forum.osmc.tv/showthread.php?tid=6825) 6 | -------------------------------------------------------------------------------- /states/kernel/init.sls: -------------------------------------------------------------------------------- 1 | 2 | {% for k,v in salt['pillar.get']('kernel:sysctl').items() %} 3 | {{ k }}: 4 | sysctl.present: 5 | - value: {{ v }} 6 | {% endfor %} 7 | 8 | -------------------------------------------------------------------------------- /notes/vim.md: -------------------------------------------------------------------------------- 1 | 2 | ### vim 3 | 4 | ``` 5 | :set expandtab 6 | :set tabstop=4 7 | :set shiftwidth=4 8 | ``` 9 | 10 | http://vim.wikia.com/wiki/Shifting_blocks_visually 11 | 12 | 13 | -------------------------------------------------------------------------------- /pillar/docker-registry.sls: -------------------------------------------------------------------------------- 1 | docker-registry: 2 | image: 'registry:2' 3 | username: local 4 | password: local 5 | volume: /var/lib/registry 6 | host: ws2.lab.local 7 | port: 5000 8 | -------------------------------------------------------------------------------- /states/ssd/files/60-schedulers.rules: -------------------------------------------------------------------------------- 1 | # set deadline scheduler for non-rotating disks 2 | ACTION=="add|change", KERNEL=="sd[a-z]", ATTR{queue/rotational}=="0", ATTR{queue/scheduler}="noop" 3 | 4 | -------------------------------------------------------------------------------- /notes/run-states.md: -------------------------------------------------------------------------------- 1 | 2 | ### Run States 3 | 4 | 1. From the Salt master, run the highstate: `salt state.highstate` 5 | 2. Verify that you can ssh as the devops user to the new machine 6 | 7 | -------------------------------------------------------------------------------- /notes/highstate.md: -------------------------------------------------------------------------------- 1 | 2 | ### Run Initial Highstate 3 | 4 | ``` 5 | salt 'workstation1' --state-output=mixed state.highstate test=True 6 | salt 'workstation1' --state-output=mixed state.highstate 7 | ``` 8 | 9 | -------------------------------------------------------------------------------- /states/packages/init.sls: -------------------------------------------------------------------------------- 1 | 2 | recommended_packages: 3 | pkg.installed: 4 | - pkgs: 5 | {% for pkg in salt['pillar.get']('packages:recommended') %} 6 | - {{ pkg }} 7 | {% endfor %} 8 | 9 | 10 | -------------------------------------------------------------------------------- /notes/operations.md: -------------------------------------------------------------------------------- 1 | 2 | ### Operations 3 | 4 | 5 | Sync repo to USB stick: `rsync -avh /var/www/html/repo/ /mnt/repo` 6 | Sync repo to another workstation: `rsync -avhz --delete /var/www/html/repo/ workstation1:/var/www/html/repo` 7 | 8 | -------------------------------------------------------------------------------- /states/docker/init.sls: -------------------------------------------------------------------------------- 1 | 2 | docker-engine: 3 | pkg.installed: 4 | - version: 1.13.1-1.el7 5 | # - version: 1.10.3-1.el7 6 | 7 | docker_service: 8 | service.running: 9 | - name: docker 10 | - enable: True 11 | 12 | -------------------------------------------------------------------------------- /states/ulimit/init.sls: -------------------------------------------------------------------------------- 1 | /etc/security/limits.d/99-saltstack-base.conf 2 | file.managed: 3 | - name: /etc/security/limits.d/90-saltstack-base.conf 4 | - source: salt://ulimit/files/90-saltstack-base.conf 5 | - replace: True 6 | -------------------------------------------------------------------------------- /states/network/init.sls: -------------------------------------------------------------------------------- 1 | 2 | /usr/local/bin/set-irq-affinity.sh: 3 | file.managed: 4 | - name: /usr/local/bin/set-irq-affinity.sh 5 | - source: salt://network/files/set-irq-affinity.sh 6 | - replace: True 7 | - mode: 755 8 | 9 | -------------------------------------------------------------------------------- /states/cpupower/init.sls: -------------------------------------------------------------------------------- 1 | 2 | cpupower: 3 | cmd.run: 4 | - name: '/bin/cpupower frequency-set --governor performance' 5 | 6 | file.append: 7 | - name: /etc/rc.d/rc.local 8 | - text: 9 | - '/bin/cpupower frequency-set --governor performance' 10 | -------------------------------------------------------------------------------- /notes/README.md: -------------------------------------------------------------------------------- 1 | 2 | ### Notes 3 | 4 | Red Hat Documentation: 5 | - [Red Hat Enterprise Linux 6 Documentation](https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/6/index.html) 6 | - [Red Hat Enterprise Linux 7 Documentation](https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/index.html) 7 | -------------------------------------------------------------------------------- /states/openstack/nova/files/nova-compute.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | openstack hypervisor list 4 | 5 | su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova 6 | 7 | openstack compute service list 8 | 9 | openstack catalog list 10 | 11 | openstack image list 12 | 13 | nova-status upgrade check 14 | 15 | 16 | -------------------------------------------------------------------------------- /states/grub/init.sls: -------------------------------------------------------------------------------- 1 | 2 | #/etc/default/grub: 3 | # file.replace: 4 | # - pattern: 'GRUB_CMDLINE_LINUX.*' 5 | # - repl: 'GRUB_CMDLINE_LINUX="elevator=deadline transparent_hugepage=never numa=off crashkernel=auto rhgb quiet"' 6 | 7 | #grub2-mkconfig: 8 | # cmd.run: 9 | # - name: 'grub2-mkconfig -o /boot/grub2/grub.cfg' 10 | 11 | -------------------------------------------------------------------------------- /states/irqbalance/init.sls: -------------------------------------------------------------------------------- 1 | 2 | 3 | /etc/sysconfig/irqbalance: 4 | file.replace: 5 | - pattern: '#IRQBALANCE_ONESHOT=' 6 | - repl: 'IRQBALANCE_ONESHOT=on' 7 | 8 | irqbalance_service: 9 | service.running: 10 | - name: irqbalance 11 | - enable: True 12 | - watch: 13 | - file: /etc/sysconfig/irqbalance 14 | 15 | 16 | -------------------------------------------------------------------------------- /states/iptables/init.sls: -------------------------------------------------------------------------------- 1 | 2 | # Important - iptables is required for OpenStack 3 | 4 | iptables: 5 | service.dead: 6 | - name: iptables 7 | - enable: False 8 | 9 | file.managed: 10 | - name: /etc/modprobe.d/netfilter.conf 11 | - source: salt://iptables/files/netfilter.conf 12 | - user: root 13 | - group: root 14 | - mode: 644 15 | 16 | -------------------------------------------------------------------------------- /states/ipv6/disable.sls: -------------------------------------------------------------------------------- 1 | 2 | base-/etc/sysctl.d/99-salt.conf: 3 | file.managed: 4 | - name: /etc/sysctl.d/99-salt.conf 5 | 6 | net.ipv6.conf.all.disable_ipv6: 7 | sysctl.present: 8 | - value: 1 9 | 10 | net.ipv6.conf.default.disable_ipv6: 11 | sysctl.present: 12 | - value: 1 13 | 14 | net.ipv6.conf.lo.disable_ipv6: 15 | sysctl.present: 16 | - value: 1 17 | -------------------------------------------------------------------------------- /states/ntp/client.sls: -------------------------------------------------------------------------------- 1 | ntp: 2 | pkg.installed: 3 | - name: ntp 4 | 5 | ntpd: 6 | service.running: 7 | - enable: True 8 | - watch: 9 | - file: /etc/ntp.conf 10 | 11 | file.managed: 12 | - name: /etc/ntp.conf 13 | - source: salt://ntp/files/ntp.conf 14 | - user: root 15 | - group: root 16 | - mode: 644 17 | - require: 18 | - pkg: ntp 19 | 20 | -------------------------------------------------------------------------------- /states/openstack/yumrepo/init.sls: -------------------------------------------------------------------------------- 1 | openstack-repo: 2 | pkgrepo.managed: 3 | - name: local-centos-openstack-ocata 4 | - humanname: local-centos-openstack-ocata 5 | - baseurl: {{ salt['pillar.get']('openstack:repo:baseurl') }} 6 | - gpgcheck: 0 7 | - enabled: True 8 | cmd.run: 9 | - name: yum -y clean all 10 | 11 | python-openstackclient: 12 | pkg.installed 13 | 14 | -------------------------------------------------------------------------------- /states/chrony/client.sls: -------------------------------------------------------------------------------- 1 | chrony: 2 | pkg.installed: 3 | - name: chrony 4 | 5 | chronyd: 6 | service.running: 7 | - enable: True 8 | - watch: 9 | - file: /etc/chrony.conf 10 | 11 | file.managed: 12 | - name: /etc/chrony.conf 13 | - source: salt://chrony/files/chrony.conf.client 14 | - user: root 15 | - group: root 16 | - mode: 644 17 | - require: 18 | - pkg: chrony 19 | 20 | -------------------------------------------------------------------------------- /states/chrony/server.sls: -------------------------------------------------------------------------------- 1 | chrony: 2 | pkg.installed: 3 | - name: chrony 4 | 5 | chronyd: 6 | service.running: 7 | - enable: True 8 | - watch: 9 | - file: /etc/chrony.conf 10 | 11 | file.managed: 12 | - name: /etc/chrony.conf 13 | - source: salt://chrony/files/chrony.conf.server 14 | - user: root 15 | - group: root 16 | - mode: 644 17 | - require: 18 | - pkg: chrony 19 | 20 | -------------------------------------------------------------------------------- /states/selinux/README.md: -------------------------------------------------------------------------------- 1 | 2 | ### selinux 3 | 4 | 1. Disable SELinux: 5 | 6 | ```bash 7 | sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config 8 | ``` 9 | 10 | 2. Reboot to implement the change: `reboot now` 11 | 12 | 3. Verify that SELinux and iptables are disabled. 13 | ```bash 14 | [root@workstation1 ~]# sestatus 15 | SELinux status: disabled 16 | [root@workstation1 ~]# 17 | ``` 18 | -------------------------------------------------------------------------------- /states/iptables/files/netfilter.conf: -------------------------------------------------------------------------------- 1 | alias ip_tables off 2 | alias iptable off 3 | alias iptable_nat off 4 | alias iptable_filter off 5 | alias x_tables off 6 | alias nf_nat off 7 | alias nf_conntrack_ipv4 off 8 | alias nf_conntrack off 9 | 10 | alias ip6_tables off 11 | alias ip6table off 12 | alias ip6table_nat off 13 | alias ip6table_filter off 14 | alias nf_nat_ipv6 off 15 | alias nf_conntrack_ipv6 off 16 | alias nf_conntrack off 17 | -------------------------------------------------------------------------------- /states/openstack/mysql/map.jinja: -------------------------------------------------------------------------------- 1 | {% set mysql = salt['grains.filter_by']({ 2 | 'Debian': { 3 | 'server': 'mysql-server', 4 | 'client': 'mysql-client', 5 | 'service': 'mysql', 6 | 'config': '/etc/mysql/my.cnf', 7 | 'python': 'python-mysqldb', 8 | }, 9 | 'RedHat': { 10 | 'server': 'mariadb-server', 11 | 'client': 'mariadb', 12 | 'service': 'mariadb', 13 | 'config': '/etc/my.cnf.d/openstack.cnf', 14 | 'python': 'python2-PyMySQL', 15 | }, 16 | }, merge=salt['pillar.get']('mysql:lookup')) %} 17 | -------------------------------------------------------------------------------- /states/openstack/memcached/init.sls: -------------------------------------------------------------------------------- 1 | 2 | memcached-pkgs: 3 | pkg.installed: 4 | - pkgs: 5 | - memcached 6 | - python-memcached 7 | 8 | /etc/sysconfig/memcached: 9 | file.replace: 10 | - pattern: 'OPTIONS="-l 127.0.0.1,::1"' 11 | - repl: 'OPTIONS="-l 127.0.0.1,{{ salt['grains.get']('fqdn_ip4:0') }}"' 12 | 13 | memcached-service: 14 | service.running: 15 | - name: memcached 16 | - enable: True 17 | - watch: 18 | - file: /etc/sysconfig/memcached 19 | 20 | -------------------------------------------------------------------------------- /states/ulimit/files/90-saltstack-base.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Reference: https://docs.mongodb.com/manual/reference/ulimit/ 3 | # 4 | * soft nproc 65535 5 | * hard nproc 65535 6 | * soft nofile 65535 7 | * hard nofile 65535 8 | * soft fsize unlimited 9 | * hard fsize unlimited 10 | * soft cpu unlimited 11 | * hard cpu unlimited 12 | * soft as unlimited 13 | * hard as unlimited 14 | -------------------------------------------------------------------------------- /states/ssd/init.sls: -------------------------------------------------------------------------------- 1 | 2 | ssd_trim: 3 | file.managed: 4 | - name: /usr/local/bin/dofstrim.sh 5 | - source: salt://ssd/files/dofstrim.sh 6 | - user: root 7 | - group: root 8 | - mode: 755 9 | 10 | ssd-cron.daily: 11 | file.append: 12 | - name: /etc/crontab 13 | - text: | 14 | 0 2 * * * root /usr/local/bin/dofstrim.sh > /var/log/dofstrim.out 2>&1 15 | 16 | scheduler.rules: 17 | file.managed: 18 | - name: /etc/udev/rules.d/60-schedulers.rules 19 | - source: salt://ssd/files/60-schedulers.rules 20 | 21 | -------------------------------------------------------------------------------- /states/users/README.md: -------------------------------------------------------------------------------- 1 | 2 | ### users 3 | 4 | Configures root user environment and users listed in the `users_list` pillar. 5 | 6 | - Creates a colorful prompt for root user 7 | - Installs .vimrc from `vim` state 8 | - For each user in the users_list pillar value: 9 | - creates the user and sets the default shell 10 | - adds the user to the sudoers list so it can execute a command as root without a password 11 | - creates the authorized_keys file using the public key defined in the pillar 12 | - Installs .vimrc from `vim` state 13 | - Creates a colorful prompt for the user 14 | 15 | 16 | -------------------------------------------------------------------------------- /states/pxeserver/files/pxelinux.cfg.default: -------------------------------------------------------------------------------- 1 | default menu.c32 2 | prompt 0 3 | timeout 300 4 | ONTIMEOUT local 5 | 6 | menu title ########## PXE Boot Menu ########## 7 | 8 | label 1 9 | menu label ^1) Install CentOS 7 - Kickstart 10 | kernel centos/7/vmlinuz 11 | append initrd=centos/7/initrd.img method=ftp://10.0.0.6/pub/centos/7 devfs=nomount ks=http://10.0.0.6/ks7.cfg 12 | 13 | label 2 14 | menu label ^2) Install CentOS 7 - Manual 15 | kernel centos/7/vmlinuz 16 | append initrd=centos/7/initrd.img method=ftp://10.0.0.6/pub/centos/7 devfs=nomount 17 | 18 | label 3 19 | menu label ^3) Boot from local drive 20 | -------------------------------------------------------------------------------- /states/kernel/README.md: -------------------------------------------------------------------------------- 1 | 2 | ### Kernel 3 | 4 | - List open files: `lsof` 5 | - List kernel parameters: `sysctl -a` 6 | - Show kernel ring buffer: `dmesg | tail` 7 | 8 | #### Memory: 9 | 10 | - Memory in MB: `free -m` 11 | - Memory in GB: `free -g` 12 | - Virtual memory stats in MB: `vmstat -S M` 13 | - Swap space: `swapon -s` 14 | - Top batch-mode, run once: `top -b -n 1` 15 | - To free pagecache: `echo 1 > /proc/sys/vm/drop_caches` 16 | - To free dentries and inodes: `echo 2 > /proc/sys/vm/drop_caches` 17 | - To free pagecache, dentries and inodes: `echo 3 > /proc/sys/vm/drop_caches` 18 | 19 | #### References 20 | 21 | 22 | -------------------------------------------------------------------------------- /states/ssd/files/dofstrim.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | echo "dofstrim started `date`" 4 | 5 | # this cronjob will discard unused blocks on the ssd mounted filesystems 6 | 7 | # get the locally mounted block devices - those starting with "/dev: 8 | # run df -k, pipe the result through grep and save the sixth field in 9 | # in the mountpoint array 10 | mountpoint=( $(df -k | grep ^/dev | grep -v '/dev/fuse' | awk '{print $6}') ) 11 | 12 | # loop through the array and run fstrim on every mountpoint 13 | for i in "${mountpoint[@]}" 14 | do 15 | echo "trimming $i" 16 | /usr/sbin/fstrim -v $i; 17 | done 18 | 19 | echo "dofstrim finished `date`" 20 | 21 | -------------------------------------------------------------------------------- /states/openstack/rabbitmq/init.sls: -------------------------------------------------------------------------------- 1 | 2 | rabbitmq-server: 3 | 4 | pkg.installed: 5 | - name: rabbitmq-server 6 | 7 | service.running: 8 | - name: rabbitmq-server 9 | - enable: True 10 | - reload: True 11 | - require: 12 | - pkg: rabbitmq-server 13 | 14 | rabbitmq_user.present: 15 | - name: {{ salt['pillar.get']('openstack:rabbitmq:user') }} 16 | - password: {{ salt['pillar.get']('openstack:rabbitmq:pass') }} 17 | - force: True 18 | - tags: 19 | - monitoring 20 | - user 21 | - perms: 22 | - '/': 23 | - '.*' 24 | - '.*' 25 | - '.*' 26 | # - runas: rabbitmq 27 | 28 | -------------------------------------------------------------------------------- /states/ssd/files/data-format-disks-ext4.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #exit 4 | 5 | umount /data 6 | dd if=/dev/zero of=/dev/sdb bs=1M count=512 7 | sed -i "/data/d" /etc/fstab 8 | 9 | for DEV in /dev/sdb 10 | do 11 | echo $DEV 12 | parted ${DEV} mklabel gpt -s 13 | parted ${DEV} mkpart primary 2048s 100% -s 14 | parted ${DEV} align-check optimal 1 15 | mkfs.ext4 ${DEV}1 16 | tune2fs -m0 -c0 -C0 -i0 ${DEV}1 17 | parted ${DEV} print 18 | done 19 | 20 | UUID=`blkid | grep /dev/sdb1 | awk {'print $2'}`; echo "${UUID} /data ext4 noatime,data=ordered,barrier=1,discard 0 0" >> /etc/fstab 21 | mkdir -p /data 22 | mount /data 23 | -------------------------------------------------------------------------------- /states/yumrepo/files/local.repo: -------------------------------------------------------------------------------- 1 | [local-base] 2 | name=CentOS-$releasever - Base 3 | baseurl=http://yumrepo/repo/centos/$releasever/os/$basearch/ 4 | gpgcheck=0 5 | enabled=1 6 | [local-updates] 7 | name=CentOS-$releasever - Updates 8 | baseurl=http://yumrepo/repo/centos/$releasever/updates/$basearch/ 9 | gpgcheck=0 10 | enabled=1 11 | [local-extras] 12 | name=CentOS-$releasever - Extras 13 | baseurl=http://yumrepo/repo/centos/$releasever/extras/$basearch/ 14 | gpgcheck=0 15 | enabled=1 16 | [local-epel] 17 | name=Extra Packages for Enterprise Linux $releasever - $basearch 18 | baseurl=http://yumrepo/repo/centos/$releasever/epel/$basearch/ 19 | gpgcheck=0 20 | enabled=1 21 | -------------------------------------------------------------------------------- /states/ssd/files/data-format-disks-xfs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #exit 4 | 5 | umount /data 6 | dd if=/dev/zero of=/dev/sdb bs=1M count=512 7 | sed -i "/data/d" /etc/fstab 8 | 9 | for DEV in /dev/sdb 10 | do 11 | echo $DEV 12 | parted ${DEV} mklabel gpt -s 13 | parted ${DEV} mkpart primary 2048s 100% -s 14 | parted ${DEV} align-check optimal 1 15 | mkfs.xfs -f -L /data -d agcount=64 -l size=128m,version=2 ${DEV}1 16 | parted ${DEV} print 17 | done 18 | 19 | UUID=`blkid | grep /dev/sdb1 | awk {'print $3'}`; echo "${UUID} /data xfs rw,noatime,logbufs=8,logbsize=256k,inode64 0 0" >> /etc/fstab 20 | mkdir -p /data 21 | mount /data 22 | 23 | -------------------------------------------------------------------------------- /states/yumrepo/init.sls: -------------------------------------------------------------------------------- 1 | 2 | #clean_yumrepo: 3 | # file.directory: 4 | # - name: /etc/yum.repos.d 5 | # - clean: True 6 | # - exclude_pat: "*local.repo*" 7 | 8 | /etc/yum.repos.d/CentOS-Base.repo: 9 | ini.options_present: 10 | - sections: 11 | base: 12 | enabled: 0 13 | updates: 14 | enabled: 0 15 | extras: 16 | enabled: 0 17 | 18 | /etc/yum.repos.d/local.repo: 19 | file.managed: 20 | - name: /etc/yum.repos.d/local.repo 21 | - source: salt://yumrepo/files/local.repo 22 | - replace: True 23 | 24 | yum_clean_all: 25 | cmd.run: 26 | - name: 'yum clean all' 27 | - require: 28 | - file: /etc/yum.repos.d/local.repo 29 | -------------------------------------------------------------------------------- /states/pxeserver/files/tftp: -------------------------------------------------------------------------------- 1 | # default: off 2 | # description: The tftp server serves files using the trivial file transfer \ 3 | # protocol. The tftp protocol is often used to boot diskless \ 4 | # workstations, download configuration files to network-aware printers, \ 5 | # and to start the installation process for some operating systems. 6 | service tftp 7 | { 8 | socket_type = dgram 9 | protocol = udp 10 | wait = yes 11 | user = root 12 | server = /usr/sbin/in.tftpd 13 | server_args = -s /var/lib/tftpboot 14 | disable = no 15 | per_source = 11 16 | cps = 100 2 17 | flags = IPv4 18 | } 19 | 20 | -------------------------------------------------------------------------------- /notes/memory.md: -------------------------------------------------------------------------------- 1 | 2 | ### Memory 3 | 4 | #### Clear RAM Memory Cache, Buffer and Swap Space on Linux 5 | 6 | - Clear PageCache only: `sync; echo 1 > /proc/sys/vm/drop_caches` 7 | - Clear dentries and inodes: `sync; echo 2 > /proc/sys/vm/drop_caches` 8 | - Clear PageCache, dentries and inodes: `sync; echo 3 > /proc/sys/vm/drop_caches` 9 | 10 | ``` 11 | ps -eo size,pid,user,command --sort -size | awk '{ hr=$1/1024 ; printf("%13.2f Mb ",hr) } { for ( x=4 ; x<=NF ; x++ ) { printf("%s ",$x) } print "" }' |cut -d "" -f2 | cut -d "-" -f1|head -30 12 | ``` 13 | 14 | ### References 15 | 16 | - [How to Clear RAM Memory Cache, Buffer and Swap Space on Linux](https://www.tecmint.com/clear-ram-memory-cache-buffer-and-swap-space-on-linux/) 17 | - [Linux Ate My RAM](https://www.linuxatemyram.com/) 18 | -------------------------------------------------------------------------------- /notes/fpm_nodejs.md: -------------------------------------------------------------------------------- 1 | 2 | ### Using FPM to create packages 3 | 4 | #### Setup FPM 5 | ``` 6 | yum groupinstall "Development Tools" 7 | yum install ruby-devel rubygems 8 | gem install fpm 9 | ``` 10 | 11 | #### Create NodeJS package 12 | ``` 13 | cd /usr/local/src 14 | 15 | wget http://nodejs.org/dist/node-v0.12.3.tar.gz 16 | tar zxvf node-latest.tar.gz 17 | cd node-v0.12.3 18 | ./configure --prefix=/usr/ 19 | make 20 | make install DESTDIR=/tmp/node-v0.12.3/ 21 | fpm -s dir -t rpm -n nodejs -v 0.12.3 -C /tmp/node-v0.12.3/ usr 22 | rpm -q -filesbypkg -p 23 | ``` 24 | 25 | 26 | #### References 27 | - [Effing Package Management](https://github.com/jordansissel/fpm) 28 | - [How do you install Node.JS on CentOS?](http://serverfault.com/questions/299288/how-do-you-install-node-js-on-centos) 29 | -------------------------------------------------------------------------------- /states/openstack/clean-all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ## exit 4 | 5 | echo "This is going to destroy the OpenStack cluster" 6 | echo "You have 10 seconds to abort...." 7 | sleep 10 8 | echo "Starting..." 9 | 10 | salt 'c*' cmd.run 'yum -y erase \*openstack\* ; yum -y erase libvirtd' 11 | salt 'c*' cmd.run 'rm -Rf /etc/nova ; rm -Rf /etc/neutron ; rm -Rf /etc/keystone ; rm -Rf /etc/glance ; rm -Rf /var/log/keystone ; rm -Rf /var/log/glance ; rm -Rf /var/log/nova ; rm -Rf /var/log/neutron' 12 | 13 | salt 'controller' cmd.run 'yum -y erase mod_wsgi httpd ; rm -Rf /etc/httpd ; rm -Rf /var/log/httpd' 14 | salt 'controller' cmd.run 'yum -y erase memcached rabbitmq-server' 15 | salt 'controller' cmd.run 'yum -y erase \*mariadb\* ; rm -Rf /var/lib/mysql ; rm -Rf /etc/my.cnf.db' 16 | 17 | echo "End of program." 18 | -------------------------------------------------------------------------------- /states/yumrepo/README.md: -------------------------------------------------------------------------------- 1 | 2 | ### yumrepo 3 | 4 | For configuring minions to use the local yum repository on the Salt master (aliased as `yumrepo`). 5 | 6 | Contents: 7 | - init.sls: 8 | - files: 9 | - `local.repo`: Points the base,updates,extras and epel repos to the local mirror aliased as `yumrepo` 10 | 11 | ### Notes 12 | 13 | - Install the EPEL on CentOS (requires CentOS-Extra repo): `yum install epel-release` 14 | - List all installed packages: `rpm -qa` 15 | - Show contents of an rpm: `rpm -q -filesbypkg -p gnome-applet-sensors-2.2.7-1.el6.rf.x86_64.rpm` 16 | - Show contents of a package: `repoquery --list openstack-selinux` 17 | - Download an rpm and all its dependencies: `repotrack salt-master` 18 | - List available repositories: `yum repolist` 19 | - Clean yum cache: `yum clean all` 20 | - Info about package: `yum info git` 21 | 22 | ### 23 | -------------------------------------------------------------------------------- /states/network/bond.sls: -------------------------------------------------------------------------------- 1 | 2 | /etc/sysconfig/network-scripts/ifcfg-bond0: 3 | file.managed: 4 | - name: /etc/sysconfig/network-scripts/ifcfg-bond0 5 | - contents: | 6 | DEVICE=bond0 7 | BOOTPROTO=static 8 | IPADDR={{ salt['grains.get']('fqdn_ip4:0') }} 9 | NETMASK=255.255.255.0 10 | ONBOOT=yes 11 | USERCTL=no 12 | BONDING_OPTS="miimon=100 mode=4 lacp_rate=1" 13 | MTU=9000 14 | 15 | {% set eth = 'enp0s20f' %} 16 | {% for num in range(0,4,1) %} 17 | 18 | /etc/sysconfig/network-scripts/ifcfg-{{ eth }}{{ num }}: 19 | file.managed: 20 | - name: /etc/sysconfig/network-scripts/ifcfg-{{ eth }}{{ num }} 21 | - contents: | 22 | DEVICE={{ eth }}{{ num }} 23 | NM_CONTROLLED=no 24 | ONBOOT=yes 25 | BOOTPROTO=none 26 | MASTER=bond0 27 | SLAVE=yes 28 | USERCTL=no 29 | {% endfor %} 30 | 31 | -------------------------------------------------------------------------------- /notes/multimedia.md: -------------------------------------------------------------------------------- 1 | ### Multimedia 2 | 3 | [http://wiki.centos.org/TipsAndTricks/MultimediaOnCentOS7](http://wiki.centos.org/TipsAndTricks/MultimediaOnCentOS7) 4 | 5 | - Audio: `aplay -l` 6 | - Test audio: `aplay -D plughw:0,0 /usr/share/sounds/alsa/Front_Center.wav` 7 | 8 | ``` 9 | # cat /etc/asound.conf 10 | # 11 | # Place your global alsa-lib configuration here... 12 | # 13 | pcm.!default { 14 | type hw 15 | card 0 16 | device 0 17 | } 18 | 19 | ``` 20 | 21 | ###### Adobe Flash 22 | ``` 23 | rpm -ivh http://linuxdownload.adobe.com/adobe-release/adobe-release-x86_64-1.0-1.noarch.rpm 24 | rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-adobe-linux 25 | 26 | yum install flash-plugin nspluginwrapper alsa-plugins-pulseaudio libcurl 27 | ``` 28 | 29 | ### References: 30 | - https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Installation_Guide/sect-making-usb-media.html#sect-making-usb-media-linux 31 | - http://www.pantz.org/software/parted/ 32 | -------------------------------------------------------------------------------- /states/top.sls: -------------------------------------------------------------------------------- 1 | 2 | base: 3 | 4 | 'G@saltstack-base:role:master': 5 | - local 6 | - selinux 7 | - iptables 8 | - network 9 | - kernel 10 | - packages 11 | - ssd 12 | {% if grains['os_family'] == 'RedHat' and grains['osmajorrelease'] == '7' %} 13 | - cpupower 14 | - chrony.server 15 | {% elif grains['os_family'] == 'RedHat' and grains['osmajorrelease'] == '6' %} 16 | - cpuspeed 17 | - ntp.server 18 | {% endif %} 19 | - users 20 | - irqbalance 21 | 22 | 'G@saltstack-base:role:minion': 23 | - local 24 | - selinux 25 | - iptables 26 | - network 27 | - network.bond 28 | - kernel 29 | - yumrepo 30 | - packages 31 | - ssd 32 | {% if grains['os_family'] == 'RedHat' and grains['osmajorrelease'] == '7' %} 33 | - cpupower 34 | - chrony.client 35 | {% elif grains['os_family'] == 'RedHat' and grains['osmajorrelease'] == '6' %} 36 | - cpuspeed 37 | - ntp.client 38 | {% endif %} 39 | - users 40 | - irqbalance 41 | -------------------------------------------------------------------------------- /states/openstack/glance/files/glance.sh: -------------------------------------------------------------------------------- 1 | 2 | # Create glance user 3 | #openstack user create --domain default --password glance glance 4 | 5 | # Add the admin role to the glance user and service project: 6 | #openstack role add --project service --user glance admin 7 | 8 | # Create the glance service entity: 9 | #openstack service create --name glance --description "OpenStack Image" image 10 | 11 | # Create the Image service API endpoints 12 | #openstack endpoint create --region RegionOne image public http://controller:9292 13 | #openstack endpoint create --region RegionOne image internal http://controller:9292 14 | #openstack endpoint create --region RegionOne image admin http://controller:9292 15 | 16 | openstack user list 17 | openstack user show glance 18 | 19 | openstack service list 20 | openstack service show glance 21 | 22 | openstack service list 23 | openstack endpoint list 24 | 25 | # 26 | # 27 | # 28 | 29 | openstack image list 30 | 31 | openstack image create "cirros" \ 32 | --file ./images/cirros-0.3.4-x86_64-disk.img \ 33 | --disk-format qcow2 --container-format bare \ 34 | --public 35 | 36 | openstack image list 37 | 38 | -------------------------------------------------------------------------------- /notes/pxe-install.md: -------------------------------------------------------------------------------- 1 | 2 | ### Install Supermicros (or other MintBox2) via PXE Server 3 | 4 | #### Install Supermicro from the PXE Server 5 | 6 | 1. Hook up your crash cart and boot the machine 7 | 2. Hold the F12 key to activate the Network Boot 8 | 3. At the boot menu, select CentOS 7 - Kickstart 9 | 4. Wait for the automated install to complete 10 | 5. Login from the console to verify the installation. 11 | 6. From the Salt Master: 12 | - accept the key from the salt minion: `salt-key -a ` 13 | - Verify connectivity to the Salt minion: `salt test.ping` 14 | - Assign the node the minion role: `salt grains.setvals "{'saltstack-base':{'role':'minion'}}"` 15 | - Configure networking: `salt state.sls network.bond` 16 | - Reboot: `salt cmd.run 'reboot'` 17 | 18 | Proceed to [Run States](run-states.md) 19 | 20 | 21 | #### Notes 22 | 23 | ALT: Setup networking for CentOS 6 servers 24 | ```bash 25 | salt '' state.sls iptables saltenv=base 26 | salt '' state.sls selinux saltenv=base 27 | salt '' state.sls network.bond saltenv=base 28 | ``` 29 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014-2015 David Kilcy 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /states/chrony/files/chrony.conf.client: -------------------------------------------------------------------------------- 1 | # Use public servers from the pool.ntp.org project. 2 | # Please consider joining the pool (http://www.pool.ntp.org/join.html). 3 | server ntp iburst 4 | 5 | # Record the rate at which the system clock gains/losses time. 6 | driftfile /var/lib/chrony/drift 7 | 8 | # Allow the system clock to be stepped in the first three updates 9 | # if its offset is larger than 1 second. 10 | makestep 1.0 3 11 | 12 | # Enable kernel synchronization of the real-time clock (RTC). 13 | rtcsync 14 | 15 | # Enable hardware timestamping on all interfaces that support it. 16 | #hwtimestamp * 17 | 18 | # Increase the minimum number of selectable sources required to adjust 19 | # the system clock. 20 | #minsources 2 21 | 22 | # Allow NTP client access from local network. 23 | #allow 192.168.0.0/16 24 | 25 | # Serve time even if not synchronized to a time source. 26 | #local stratum 10 27 | 28 | # Specify file containing keys for NTP authentication. 29 | #keyfile /etc/chrony.keys 30 | 31 | # Specify directory for log files. 32 | logdir /var/log/chrony 33 | 34 | # Select which information is logged. 35 | #log measurements statistics tracking 36 | 37 | -------------------------------------------------------------------------------- /states/network/wireshark.md: -------------------------------------------------------------------------------- 1 | 2 | ### wireshark 3 | 4 | Installs wireshark's `tshark` command-line tool. 5 | 6 | #### Example Wireshark commands 7 | 8 | - Show version: `tshark -v` 9 | - Show captureable interfaces and exit: `tshark -D` 10 | - Capture a specific protocol and interface: `tshark -f "tcp port 22" -i enp0s20f1` 11 | - Filter: `tshark -R "ip.addr == 192.168.0.1"` 12 | - Filter NFS traffic: `tshark -R nfs` 13 | - Capture packet data to a file: `tshark -w /tmp/capture.pcap` 14 | - Auto-save Captures to multiple files: `tshark -b filesize:10 -a files:5 -w /tmp/temp.pcap` 15 | * -b is the ring buffer option 16 | * -a is the capture autostop condition 17 | - Read packet data from a file: `tshark -r /tmp/capture.pcap` 18 | 19 | - Set capture buffer size to 2MB: `tshark -B 2` 20 | 21 | #### References 22 | - [tshark man page](https://www.wireshark.org/docs/man-pages/tshark.html) 23 | - [How to Use Wireshark Tshark to Specify File, Time, Buffer Capture Limits](http://www.thegeekstuff.com/2014/05/wireshark-file-buffer-size/) 24 | - [Tshark examples: howto capture and dissect network traffic](http://www.codealias.info/technotes/the_tshark_capture_and_filter_example_page) 25 | -------------------------------------------------------------------------------- /notes/saltstack-base-setup.md: -------------------------------------------------------------------------------- 1 | 2 | ### Setup Git and saltstack-base repository 3 | 4 | 5 | 1. Install git as **root** user: `yum install git` 6 | 2. Configure GitHub and pull projects as **devops** user 7 | ```bash 8 | git config --global user.name "dkilcy" 9 | git config --global user.email "david@kilcyconsulting.com" 10 | git config --global core.editor "vim" 11 | ``` 12 | 13 | 3. Clone the repository 14 | ```bash 15 | mkdir ~/git 16 | cd ~/git 17 | git clone https://github.com/dkilcy/saltstack-base.git 18 | ``` 19 | 20 | 4. Create a YAML file to hold the customized Salt configuration. As **root** user, execute `vi /etc/salt/master.d/99-salt-envs.conf` and add the following to the new file: 21 | ```yaml 22 | file_roots: 23 | base: 24 | - /srv/salt/base/states 25 | pillar_roots: 26 | base: 27 | - /srv/salt/base/pillar 28 | ``` 29 | 30 | 5. Point Salt to the development environment as **root** user. 31 | ```bash 32 | mkdir -p /srv/salt 33 | ln -sf /home/devops/git/saltstack-base /srv/salt/base 34 | ``` 35 | 36 | 6. Copy the hosts file from /home/devops/git/saltstack-base/network/files/hosts to /etc/hosts 37 | ```bash 38 | cp /home/devops/git/saltstack-base/network/files/hosts /etc/ 39 | ``` 40 | -------------------------------------------------------------------------------- /states/docker/README.md: -------------------------------------------------------------------------------- 1 | 2 | ### Docker Cheatsheet 3 | 4 | Images vs Containers: 5 | 6 | - An instance of an image is called a container. 7 | - If you start this image, you have a running container of this image. 8 | - You can have many running containers of the same image. 9 | 10 | Docker Command Summary: 11 | 12 | - Add user to docker group: `usermod -aG docker ironman` 13 | - Show running containers: `docker ps` 14 | - Show all containers: `docker ps -a` 15 | - Start bash session in container: `docker exec -it /bin/bash` 16 | - Run container: `docker run -d --name ` 17 | - Start container: `docker start ` 18 | - Inspect container: `docker inspect ` 19 | - View port mappings: `docker port ` 20 | - Show container logs: `docker logs ` 21 | - Get Container ID of the container you are in: 22 | ``` 23 | cat /proc/self/cgroup | grep -o -e "docker-.*.scope" | head -n 1 | sed "s/docker-\(.*\).scope/\\1/"` 24 | ``` 25 | - Stop container: `docker stop ` 26 | - Remove all docker containers: `docker rm $(docker ps -a -q)` 27 | - Remove all docker images: `docker rmi $(docker images -q)` 28 | 29 | ### References 30 | 31 | - [Engine reference](https://docs.docker.com/engine/reference/) 32 | -------------------------------------------------------------------------------- /notes/linux-tcp-tuning.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ### Linux TCP Tuning 4 | 5 | - Check Ring Buffer Size 6 | 7 | ``` 8 | [root@HSEDL-CHF01-01V dkilcy]# ethtool -g eth7 9 | Ring parameters for eth7: 10 | Pre-set maximums: 11 | RX: 4096 12 | RX Mini: 0 13 | RX Jumbo: 4096 14 | TX: 4096 15 | Current hardware settings: 16 | RX: 256 17 | RX Mini: 0 18 | RX Jumbo: 128 19 | TX: 512 20 | ``` 21 | 22 | ``` 23 | 18-02-08T11:17:12 root@hsepl-srs3-01 ~]# ethtool -g em1 24 | Ring parameters for em1: 25 | Pre-set maximums: 26 | RX: 4078 27 | RX Mini: 0 28 | RX Jumbo: 0 29 | TX: 4078 30 | Current hardware settings: 31 | RX: 453 32 | RX Mini: 0 33 | RX Jumbo: 0 34 | TX: 4078 35 | 36 | 18-02-08T11:17:17 root@hsepl-srs3-01 ~]# ethtool -g em2 37 | Ring parameters for em2: 38 | Pre-set maximums: 39 | RX: 4078 40 | RX Mini: 0 41 | RX Jumbo: 0 42 | TX: 4078 43 | Current hardware settings: 44 | RX: 453 45 | RX Mini: 0 46 | RX Jumbo: 0 47 | TX: 4078 48 | 49 | ``` 50 | 51 | ### References 52 | 53 | - [https://www.cyberciti.biz/faq/linux-tcp-tuning/](https://www.cyberciti.biz/faq/linux-tcp-tuning/) 54 | - 55 | -------------------------------------------------------------------------------- /states/ntp/README.md: -------------------------------------------------------------------------------- 1 | 2 | ### ntp 3 | 4 | 1. Setup ntpd server on the workstation to be an NTP time server; start and enable the service. 5 | 6 | ```bash 7 | yum install ntp 8 | systemctl start ntpd.service 9 | systemctl enable ntpd.service 10 | ``` 11 | 12 | 2. Verify the NTP installation 13 | 14 | ```bash 15 | [root@workstation1 ~]# ntpq -p 16 | remote refid st t when poll reach delay offset jitter 17 | ============================================================================== 18 | -y.ns.gin.ntt.ne 198.64.6.114 2 u 465 1024 375 38.859 -14.201 8.438 19 | *ntp.your.org .CDMA. 1 u 853 1024 377 29.042 1.957 4.626 20 | +www.linas.org 129.250.35.250 3 u 470 1024 377 44.347 1.349 5.194 21 | +ntp3.junkemailf 149.20.64.28 2 u 675 1024 337 78.504 4.305 3.001 22 | 23 | [root@workstation1 ~]# ntpq -c assoc 24 | 25 | ind assid status conf reach auth condition last_event cnt 26 | =========================================================== 27 | 1 3548 933a yes yes none outlyer sys_peer 3 28 | 2 3549 963a yes yes none sys.peer sys_peer 3 29 | 3 3550 9424 yes yes none candidate reachable 2 30 | 4 3551 9424 yes yes none candidate reachable 2 31 | [root@workstation1 ~]# 32 | ``` 33 | -------------------------------------------------------------------------------- /states/chrony/files/chrony.conf.server: -------------------------------------------------------------------------------- 1 | # Use public servers from the pool.ntp.org project. 2 | # Please consider joining the pool (http://www.pool.ntp.org/join.html). 3 | server 0.centos.pool.ntp.org iburst 4 | server 1.centos.pool.ntp.org iburst 5 | server 2.centos.pool.ntp.org iburst 6 | server 3.centos.pool.ntp.org iburst 7 | 8 | # Record the rate at which the system clock gains/losses time. 9 | driftfile /var/lib/chrony/drift 10 | 11 | # Allow the system clock to be stepped in the first three updates 12 | # if its offset is larger than 1 second. 13 | makestep 1.0 3 14 | 15 | # Enable kernel synchronization of the real-time clock (RTC). 16 | rtcsync 17 | 18 | # Enable hardware timestamping on all interfaces that support it. 19 | #hwtimestamp * 20 | 21 | # Increase the minimum number of selectable sources required to adjust 22 | # the system clock. 23 | #minsources 2 24 | 25 | # Allow NTP client access from local network. 26 | #allow 192.168.0.0/16 27 | allow 10.0.0.0/24 28 | 29 | # Serve time even if not synchronized to a time source. 30 | #local stratum 10 31 | 32 | # Specify file containing keys for NTP authentication. 33 | #keyfile /etc/chrony.keys 34 | 35 | # Specify directory for log files. 36 | logdir /var/log/chrony 37 | 38 | # Select which information is logged. 39 | #log measurements statistics tracking 40 | 41 | -------------------------------------------------------------------------------- /states/users/init.sls: -------------------------------------------------------------------------------- 1 | 2 | {% set user_list = salt['pillar.get']('user_list') %} 3 | 4 | /root/.bashrc: 5 | file.append: 6 | - text: 7 | - export PS1="\e[1;31m[\u@\h \W]$ \e[m" 8 | 9 | /root/.vimrc: 10 | file.managed: 11 | - name: /root/.vimrc 12 | - source: salt://users/files/vimrc 13 | - mode: 644 14 | - user: root 15 | - group: root 16 | 17 | {% for user in user_list %} 18 | 19 | {{ user.name }}: 20 | user.present: 21 | - name: {{ user.name }} 22 | - shell: {{ user.shell }} 23 | 24 | file.managed: 25 | - name: /etc/sudoers.d/{{ user.name }} 26 | - contents: | 27 | {{ user.name }} ALL=(ALL) NOPASSWD: ALL 28 | 29 | {{ user.name }}_authorized_keys: 30 | file.managed: 31 | - name: /home/{{ user.name }}/.ssh/authorized_keys 32 | - contents: {{ user.ssh_public_key }} 33 | - makedirs: True 34 | - dir_mode: 700 35 | - mode: 600 36 | - user: {{ user.name }} 37 | - group: {{ user.group }} 38 | 39 | {{ user.name }}_vimrc: 40 | file.managed: 41 | - name: /home/{{ user.name }}/.vimrc 42 | - source: salt://users/files/vimrc 43 | - mode: 644 44 | - user: {{ user.name }} 45 | - group: {{ user.group }} 46 | 47 | /home/{{ user.name }}/.bashrc: 48 | file.append: 49 | - text: 50 | - export PS1="\e[1;36m[\u@\h \W]$ \e[m" 51 | 52 | {% endfor %} 53 | -------------------------------------------------------------------------------- /notes/network-setup.md: -------------------------------------------------------------------------------- 1 | 2 | ### Network 3 | 4 | ### TP-Link Switch setup 5 | 6 | 1. Connect the USB console cable 7 | 2. Start minicom: `minicom -D /dev/ttyUSB0 -b 38400` 8 | 9 | ##### Set the external IP address 10 | ``` 11 | enable 12 | configure 13 | interface vlan 1 14 | ip address 10.0.0.240 255.255.255.0 15 | exit 16 | exit 17 | ``` 18 | 19 | On TL-SG3216: 20 | - System -> Device Description 21 | - Device Name: TL-SG<3216|3424>-<1..n> 22 | - Device Location: Lab 23 | 24 | VLAN -> 802.1Q VLAN 25 | From VLAN Config Tab: 26 | Create 3 VLANs 27 | - Create VLAN 100 28 | - Create VLAN 101 29 | - Create VLAN 102 30 | 31 | From Port Config Tab: 32 | - Select 15-16 as Link Type TRUNK, PVID 1, click Appy 33 | 34 | Other commands: 35 | ``` 36 | show vlan brief 37 | show vlan id 1 38 | 39 | show interface status 40 | show interface vlan 1 41 | 42 | show system-info 43 | show ip http secure-server 44 | ``` 45 | 46 | ### Hardware 47 | - picocom: `picocom -D /dev/ttyUSB0 -b 38400` 48 | - minicom: `minicom -D /dev/ttyUSB0 -b 38400` 49 | - Notes: 50 | - Make sure hardware flow control is OFF 51 | 52 | 53 | ##### References 54 | 55 | - [http://www.ccnpguide.com/end-to-end-vs-local-vlan-models/] 56 | - [http://www.informit.com/library/content.aspx?b=CCNP_Studies_Switching&seqNum=44] 57 | - [http://www.informit.com/library/content.aspx?b=CCNP_Studies_Switching&seqNum=18] 58 | -------------------------------------------------------------------------------- /states/openstack/nova/files/nova-controller.sh: -------------------------------------------------------------------------------- 1 | 2 | # Create the nova user 3 | #openstack user create --domain default --password nova nova 4 | 5 | # Add the admin role to the nova user 6 | #openstack role add --project service --user nova admin 7 | 8 | # Create the nova service entry 9 | #openstack service create --name nova --description "OpenStack Compute" compute 10 | 11 | # Create the Compute API service endpoints 12 | #openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1 13 | #openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1 14 | #openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1 15 | 16 | # Create a Placement service 17 | #openstack user create --domain default --password placement placement 18 | 19 | # Add the Placement user to the service project with the admin role 20 | #openstack role add --project service --user placement admin 21 | 22 | # Create the Placement API entry in the service catalog 23 | #openstack service create --name placement --description "Placement API" placement 24 | 25 | # Create the Placement API service endpoints 26 | #openstack endpoint create --region RegionOne placement public http://controller:8778 27 | #openstack endpoint create --region RegionOne placement internal http://controller:8778 28 | #openstack endpoint create --region RegionOne placement admin http://controller:8778 29 | 30 | # Verify novacello0 and cell1 are registered correctly 31 | nova-manage cell_v2 list_cells 32 | 33 | -------------------------------------------------------------------------------- /notes/numa-cpu.md: -------------------------------------------------------------------------------- 1 | #### Cacheline 2 | 3 | True Sharing/False Sharing: 4 | * True Sharing: 2 threads modifying the same memory location (ex: lock) 5 | * False Sharing: 2 threads accessing different location in same cache line 6 | 7 | True/False sharing really hurts performance when sharing is across NUMA nodes. 8 | 9 | How to diagnose: 10 | * numastat 11 | * perf 12 | * perf mem-loads (3.10 kernel) 13 | * c2c data-sharing (with perf in future) 14 | 15 | numastat -mczs 16 | numactl - can pin a process to a specific numa node 17 | 18 | At a high level do I have my CPUs and memory pinned OK? Try numastat. 19 | 20 | Checking the cpu-pinning: 21 | Where are the pids allowed to run? 22 | /proc//status file(s) 23 | 24 | Now can we determine what NUMA nodes each CPU belongs to? 25 | ``` 26 | numactl --hardware | grep cpus 27 | ``` 28 | We know know: 29 | - The majority of memory is pinned to the desired nodes 30 | - the CPUs are pinned to the nodes where the memory resides 31 | 32 | However we dont know: 33 | If any processes are accesing memory on a remote NUMA node 34 | If so, then: 35 | - Are they contending with other processes for any cachelines? 36 | - What CPUs are they running on 37 | - What are their PIDs and TIDs 38 | - What are the remote data addresses being accessed? 39 | - Where in the application those accesses are occurring (the instruction pointer)? 40 | - Are they reading or modifying the cachelines - and how often? 41 | 42 | #### References 43 | - [Processor Affinity](http://www.glennklockwood.com/comp/affinity.php) 44 | -------------------------------------------------------------------------------- /states/docker/registry.sls: -------------------------------------------------------------------------------- 1 | # 2 | # 3 | # 4 | 5 | {% set volume = salt['pillar.get']('docker-registry:volume') %} 6 | {% set port = salt['pillar.get']('docker-registry:port') %} 7 | 8 | # 9 | # Create docker volumes 10 | # 11 | {{ volume }}: 12 | file.directory: 13 | - makedirs: True 14 | {{ volume }}/data: 15 | file.directory 16 | {{ volume }}/auth: 17 | file.directory 18 | 19 | # Create user and password for registry 20 | # 21 | docker-registry-users: 22 | cmd.run: 23 | - name: htpasswd -Bbn {{ salt['pillar.get']('docker-registry:username') }} {{ salt['pillar.get']('docker-registry:password') }} > {{ volume }}/auth/htpasswd 24 | 25 | # 26 | # Pull the docker container if not present 27 | # 28 | {{ salt['pillar.get']('docker-registry:image') }}: 29 | dockerng.image_present 30 | 31 | # 32 | # Run the registry container 33 | # 34 | docker-registry-container: 35 | dockerng.running: 36 | - name: local-registry 37 | - image: '{{ salt['pillar.get']('docker-registry:image') }}' 38 | - detach: True 39 | - port_bindings: {{ port }}:{{ port }}/tcp 40 | - binds: 41 | - /etc/pki/docker:/certs:ro 42 | - {{ volume }}/data:/var/lib/registry:rw 43 | - {{ volume }}/auth:/auth:rw 44 | - environment: 45 | - REGISTRY_AUTH: htpasswd 46 | - REGISTRY_AUTH_HTPASSWD_REALM: "Registry Realm" 47 | - REGISTRY_AUTH_HTPASSWD_PATH: /auth/htpasswd 48 | - REGISTRY_HTTP_TLS_CERTIFICATE: /certs/registry.crt 49 | - REGISTRY_HTTP_TLS_KEY: /certs/registry.key 50 | - restart_policy: always 51 | 52 | -------------------------------------------------------------------------------- /states/openstack/keystone/files/keystone.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # https://docs.openstack.org/ocata/install-guide-rdo/keystone-users.html 4 | # 5 | 6 | . auth-openrc.sh 7 | . admin-openrc.sh 8 | 9 | # Create Service project 10 | #openstack project create --domain default --description "Service Project" service 11 | 12 | # Create Demo project 13 | #openstack project create --domain default --description "Demo Project" demo 14 | 15 | # Create Demo user 16 | #openstack user create --domain default --password demo demo 17 | 18 | # Create User role 19 | #openstack role create user 20 | 21 | # Add user role to demo user of demo project 22 | #openstack role add --project demo --user demo user 23 | 24 | # 25 | # https://docs.openstack.org/ocata/install-guide-rdo/keystone-verify.html 26 | # 27 | 28 | openstack project list 29 | openstack project show service 30 | 31 | openstack role list 32 | 33 | openstack user list 34 | openstack user show demo 35 | 36 | openstack service list 37 | openstack endpoint list 38 | 39 | # 40 | # 41 | # 42 | 43 | unset OS_AUTH_URL OS_PASSWORD 44 | 45 | openstack --os-auth-url http://controller:35357/v3 \ 46 | --os-project-domain-name default --os-user-domain-name default \ 47 | --os-project-name admin --os-username admin --os-password ${ADMIN_PASS} token issue 48 | 49 | . demo-openrc.sh 50 | 51 | openstack --os-auth-url http://controller:5000/v3 \ 52 | --os-project-domain-name default --os-user-domain-name default \ 53 | --os-project-name demo --os-username demo token issue 54 | 55 | 56 | . admin-openrc.sh 57 | 58 | -------------------------------------------------------------------------------- /notes/grains.md: -------------------------------------------------------------------------------- 1 | 2 | ### Salt Grains 3 | 4 | - Return all grains: `salt 'store1' grains.items` 5 | - Get one grain: `salt 'store1' grains.get id` 6 | - Get one grain: `salt 'store1' grains.get selinux:enabled` 7 | - Get a list of grains: `salt 'store1' grains.get saltversioninfo` 8 | - Get a list of grains: `salt 'store1' grains.get ip_interfaces` 9 | - Get the first item in the list: `salt 'store1' grains.get saltversioninfo:0` ## doesnt work???? 10 | - Get the first item in the list: `salt 'store1' grains.get ip_interfaces:bond0:0` 11 | - Setting a grain: `salt 'store1' grains.setvals "{'saltstack-base:{'role':'minion'}}"` 12 | - Match all grains that have saltstack-base:role grain: `# salt -G 'saltstack-base:role:minion' test.ping` 13 | - Using a grain in a state file: `{% set id = salt['pillar.get']('id') %}` 14 | - Jinja templating: `salt '*' cmd.run template=jinja 'echo {{ grains.id }} {{ grains.fqdn_ip4[0] }}'` 15 | - Matching minions in a pillar file using a grain: 16 | ```yaml 17 | base: 18 | 'G@saltstack-base:role:master': 19 | - local 20 | ``` 21 | - Conditional based on grain: 22 | ``` 23 | {% if grains['os_family'] == 'RedHat' and grains['osmajorrelease'] == '7' %} 24 | - cpupower 25 | {% elif grains['os_family'] == 'RedHat' and grains['osmajorrelease'] == '6' %} 26 | - cpuspeed 27 | {% endif %} 28 | ``` 29 | 30 | ##### References 31 | - [SaltStack Grains](http://docs.saltstack.com/en/latest/topics/targeting/grains.html) 32 | - [Using a grain in a configuration file](http://serverfault.com/questions/676796/how-to-use-saltstack-to-manage-different-config-file-for-different-minions) 33 | -------------------------------------------------------------------------------- /states/network/files/set-irq-affinity.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | if [ -z $1 ]; then 3 | echo "usage: $0 [2nd interface]" 4 | exit 1 5 | fi 6 | CORES=$((`cat /proc/cpuinfo | grep processor | tail -1 | awk '{print $3}'`+1)) 7 | hop=2 8 | 9 | if [ -z $2 ]; then 10 | limit_1=$((2**CORES)) 11 | echo "---------------------------------------" 12 | echo "Optimizing IRQs for Single port traffic" 13 | echo "---------------------------------------" 14 | else 15 | echo "-------------------------------------" 16 | echo "Optimizing IRQs for Dual port traffic" 17 | echo "-------------------------------------" 18 | limit_1=$(( 2**$((CORES/2)) )) 19 | limit_2=$((2**CORES)) 20 | IRQS_2=$(cat /proc/interrupts | grep $2 | awk '{print $1}' | sed 's/://') 21 | fi 22 | 23 | IRQS_1=$(cat /proc/interrupts | grep $1 | awk '{print $1}' | sed 's/://') 24 | 25 | if [ -z "$IRQS_1" ] ; then 26 | echo No IRQs found for $1. 27 | else 28 | echo Discovered irqs for $1: $IRQS_1 29 | mask=1 ; for IRQ in $IRQS_1 ; do echo Assign irq $IRQ mask 0x$(printf "%x" $mask) ; echo $(printf "%x" $mask) > /proc/irq/$IRQ/smp_affinity ; mask=$(( mask * $hop)) ; if [ $mask -ge $limit_1 ] ; then mask=1; fi ;done 30 | fi 31 | 32 | echo 33 | 34 | if [ "$2" != "" ]; then 35 | if [ -z "$IRQS_2" ]; then 36 | echo No IRQs found for $2. 37 | else 38 | echo Discovered irqs for $2: $IRQS_2 39 | mask=$limit_1 ; for IRQ in $IRQS_2 ; do echo Assign irq $IRQ mask 0x$(printf "%x" $mask) ; echo $(printf "%x" $mask) > /proc/irq/$IRQ/smp_affinity ; mask=$(( mask * $hop)) ; if [ $mask -ge $limit_2 ] ; then mask=$limit_1 ; fi ;done 40 | fi 41 | fi 42 | echo 43 | echo done. 44 | 45 | -------------------------------------------------------------------------------- /states/iptables/README.md: -------------------------------------------------------------------------------- 1 | ### Disable and unload iptables from the CentOS kernel 2 | 3 | Systems still take a performance hit if iptables is stopped. It needs to be unloaded from the kernel to see a boost in network performance 4 | 5 | - CentOS 6: `service iptables stop ; chkconfig iptables off` 6 | - CentOS 7: `systemctl stop iptables.service ; systemctl disable iptables.service` 7 | 8 | ##### Prevent the kernel modules from loading at boot. 9 | 10 | ``` 11 | [root@workstation1 ~]# cat /etc/modprobe.d/netfilter.conf 12 | alias ip_tables off 13 | alias iptable off 14 | alias iptable_nat off 15 | alias iptable_filter off 16 | alias x_tables off 17 | alias nf_nat off 18 | alias nf_conntrack_ipv4 off 19 | alias nf_conntrack off 20 | 21 | alias ip6_tables off 22 | alias ip6table off 23 | alias ip6table_nat off 24 | alias ip6table_filter off 25 | alias nf_nat_ipv6 off 26 | alias nf_conntrack_ipv6 off 27 | alias nf_conntrack off 28 | ``` 29 | 30 | Reboot the machine. Verify that iptables kernel modules are not loaded. 31 | 32 | ``` 33 | [root@workstation1 ~]# iptables -L 34 | modprobe: ERROR: could not find module by name='off' 35 | modprobe: ERROR: could not insert 'off': Function not implemented 36 | iptables v1.4.21: can't initialize iptables table `filter': Table does not exist (do you need to insmod?) 37 | Perhaps iptables or your kernel needs to be upgraded. 38 | 39 | [root@workstation1 ~]# lsmod | grep ip 40 | [root@workstation1 ~]# 41 | ``` 42 | 43 | #### References 44 | - [http://www.pc-freak.net/blog/resolving-nf_conntrack-table-full-dropping-packet-flood-message-in-dmesg-linux-kernel-log/](http://www.pc-freak.net/blog/resolving-nf_conntrack-table-full-dropping-packet-flood-message-in-dmesg-linux-kernel-log/) 45 | - [http://nginx.com/blog/nginx-se-linux-changes-upgrading-rhel-6-6/](http://nginx.com/blog/nginx-se-linux-changes-upgrading-rhel-6-6/) 46 | - [http://www.cyberciti.biz/faq/redhat-centos-disable-ipv6-networking/](http://www.cyberciti.biz/faq/redhat-centos-disable-ipv6-networking/) 47 | -------------------------------------------------------------------------------- /notes/setup-salt.md: -------------------------------------------------------------------------------- 1 | 2 | ### Setup Salt Masters and Minions 3 | 4 | Perform all these steps on the workstation node as **root** user 5 | 6 | #### Overview 7 | 8 | 1. Setup the Salt master and minion on the workstation machines. 9 | 2. Setup the Salt minion on all other machines (including the workstation machines). 10 | 3. Post-Install Setup 11 | 12 | #### Setup the Salt master 13 | 14 | 1. Install the Salt master on all workstation (utility) nodes. 15 | 16 | ```bash 17 | yum install salt-master 18 | ``` 19 | 20 | 2. Verify the installation 21 | 22 | ```bash 23 | [root@workstation2 ~]# salt --version 24 | salt 2015.5.0 (Lithium) 25 | [root@workstation2 ~]# 26 | ``` 27 | 28 | 3. Star the Salt master 29 | 30 | ```bash 31 | systemctl start salt-master.service 32 | systemctl enable salt-master.service 33 | ``` 34 | 35 | #### Setup the Salt minion 36 | 37 | 5. Install the Salt minion 38 | 39 | ```bash 40 | yum install salt-minion 41 | ``` 42 | 43 | 6. Verify the installation 44 | 45 | ```bash 46 | [root@workstation2 ~]# salt-call --version 47 | salt-call 2015.5.0 (Lithium) 48 | [root@workstation2 ~]# 49 | ``` 50 | 51 | 7. Set the minion ID as the short hostname 52 | 53 | ```bash 54 | hostname -s > /etc/salt/minion_id 55 | ``` 56 | 57 | 8. Start the Salt minion 58 | 59 | ```bash 60 | systemctl start salt-minion.service 61 | systemctl enable salt-minion.service 62 | ``` 63 | 64 | 9. Add the local minion to the master. 65 | 66 | ```bash 67 | [root@workstation2 ~]# salt-key -L 68 | Accepted Keys: 69 | Unaccepted Keys: 70 | workstation2 71 | Rejected Keys: 72 | ``` 73 | 74 | ```bash 75 | [root@workstation2 ~]# salt-key -A 76 | The following keys are going to be accepted: 77 | Unaccepted Keys: 78 | workstation2 79 | Proceed? [n/Y] 80 | Key for minion workstation2 accepted. 81 | ``` 82 | 83 | 10. Test the installation 84 | 85 | ```bash 86 | salt '*' test.ping 87 | ``` 88 | 89 | 11. Repeat for all other machines in the network designated as Salt minions. 90 | 91 | 92 | 93 | 94 | -------------------------------------------------------------------------------- /states/ntp/files/ntp.conf: -------------------------------------------------------------------------------- 1 | # For more information about this file, see the man pages 2 | # ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5). 3 | 4 | driftfile /var/lib/ntp/drift 5 | 6 | # Permit time synchronization with our time source, but do not 7 | # permit the source to query or modify the service on this system. 8 | restrict default kod nomodify notrap nopeer noquery 9 | restrict -6 default kod nomodify notrap nopeer noquery 10 | 11 | # Permit all access over the loopback interface. This could 12 | # be tightened as well, but to do so would effect some of 13 | # the administrative functions. 14 | restrict 127.0.0.1 15 | restrict -6 ::1 16 | allow 10.0.0.0/24 17 | 18 | # Hosts on local network are less restricted. 19 | #restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap 20 | 21 | # Use public servers from the pool.ntp.org project. 22 | # Please consider joining the pool (http://www.pool.ntp.org/join.html). 23 | server ntp 24 | 25 | #broadcast 192.168.1.255 autokey # broadcast server 26 | #broadcastclient # broadcast client 27 | #broadcast 224.0.1.1 autokey # multicast server 28 | #multicastclient 224.0.1.1 # multicast client 29 | #manycastserver 239.255.254.254 # manycast server 30 | #manycastclient 239.255.254.254 autokey # manycast client 31 | 32 | # Enable public key cryptography. 33 | #crypto 34 | 35 | includefile /etc/ntp/crypto/pw 36 | 37 | # Key file containing the keys and key identifiers used when operating 38 | # with symmetric key cryptography. 39 | keys /etc/ntp/keys 40 | 41 | # Specify the key identifiers which are trusted. 42 | #trustedkey 4 8 42 43 | 44 | # Specify the key identifier to use with the ntpdc utility. 45 | #requestkey 8 46 | 47 | # Specify the key identifier to use with the ntpq utility. 48 | #controlkey 8 49 | 50 | # Enable writing of statistics records. 51 | #statistics clockstats cryptostats loopstats peerstats 52 | 53 | # Disable the monitoring facility to prevent amplification attacks using ntpdc 54 | # monlist command when default restrict does not include the noquery flag. See 55 | # CVE-2013-5211 for more details. 56 | # Note: Monitoring will not be disabled with the limited restriction flag. 57 | disable monitor 58 | 59 | -------------------------------------------------------------------------------- /states/pxeserver/files/dhcpd.conf: -------------------------------------------------------------------------------- 1 | # 2 | # DHCP Server Configuration file. 3 | # see /usr/share/doc/dhcp*/dhcpd.conf.example 4 | # see dhcpd.conf(5) man page 5 | # 6 | 7 | ddns-update-style none; 8 | log-facility local7; 9 | authoritative; 10 | 11 | subnet 10.0.0.0 netmask 255.255.255.0 { 12 | range 10.0.0.100 10.0.0.116; 13 | default-lease-time 86400; 14 | max-lease-time 86400; 15 | option routers 10.0.0.1; 16 | option ip-forwarding off; 17 | option broadcast-address 10.0.0.255; 18 | option subnet-mask 255.255.255.0; 19 | option domain-name-servers 10.0.0.6; 20 | option domain-name-servers ws2.lab.local; 21 | option domain-name "lab.local"; 22 | 23 | allow booting; 24 | allow bootp; 25 | next-server 10.0.0.6; 26 | filename "/pxelinux.0"; 27 | 28 | host sys1 { 29 | hardware ethernet 0c:c4:7a:31:68:0c; 30 | fixed-address 10.0.0.31; 31 | option host-name "sys1"; 32 | } 33 | host sys2 { 34 | hardware ethernet 00:25:90:fc:99:10; 35 | fixed-address 10.0.0.32; 36 | option host-name "sys2"; 37 | } 38 | host sys3 { 39 | hardware ethernet 0C:C4:7A:31:60:0C; 40 | fixed-address 10.0.0.33; 41 | option host-name "sys3"; 42 | } 43 | host sys4 { 44 | hardware ethernet 0c:c4:7a:31:24:08; 45 | fixed-address 10.0.0.34; 46 | option host-name "sys4"; 47 | } 48 | host sys5 { 49 | hardware ethernet 0c:c4:7a:ab:2e:20; 50 | fixed-address 10.0.0.35; 51 | option host-name "sys5"; 52 | } 53 | 54 | host sys6 { 55 | hardware ethernet 00:25:90:f1:4f:96; 56 | fixed-address 10.0.0.41; 57 | option host-name "sys6"; 58 | } 59 | host sys7 { 60 | hardware ethernet 00:25:90:f1:0c:6c; 61 | fixed-address 10.0.0.42; 62 | option host-name "sys7"; 63 | } 64 | host sys8 { 65 | hardware ethernet 00:25:90:f1:0d:c0; 66 | fixed-address 10.0.0.43; 67 | option host-name "sys8"; 68 | } 69 | host sys9 { 70 | hardware ethernet 00:25:90:f1:0d:a8; 71 | fixed-address 10.0.0.44; 72 | option host-name "sys9"; 73 | } 74 | host sys10 { 75 | hardware ethernet 00:25:90:f1:53:82; 76 | fixed-address 10.0.0.45; 77 | option host-name "sys10"; 78 | } 79 | host sys11 { 80 | hardware ethernet 00:25:90:f1:0e:58; 81 | fixed-address 10.0.0.46; 82 | option host-name "sys11"; 83 | } 84 | } 85 | 86 | -------------------------------------------------------------------------------- /states/openstack/nova/compute.sls: -------------------------------------------------------------------------------- 1 | 2 | {% from "openstack/mysql/map.jinja" import mysql with context %} 3 | 4 | {% set rabbit_pass = salt['pillar.get']('openstack:auth:RABBIT_PASS') %} 5 | {% set nova_pass = salt['pillar.get']('openstack:auth:NOVA_PASS') %} 6 | {% set placement_pass = salt['pillar.get']('openstack:auth:PLACEMENT_PASS') %} 7 | 8 | {% set controller = salt['pillar.get']('openstack:controller:host') %} 9 | 10 | openstack-nova-compute-pkgs: 11 | pkg.installed: 12 | - pkgs: 13 | - openstack-nova-compute 14 | 15 | /etc/nova/nova.conf: 16 | ini.options_present: 17 | - sections: 18 | DEFAULT: 19 | enabled_apis: osapi_compute,metadata 20 | transport_url: rabbit://openstack:{{ rabbit_pass }}@{{ controller }} 21 | my_ip: {{ salt['grains.get']('fqdn_ip4:0') }} 22 | use_neutron: True 23 | firewall_driver: nova.virt.firewall.NoopFirewallDriver 24 | api: 25 | auth_strategy: keystone 26 | keystone_authtoken: 27 | auth_uri: 'http://{{ controller }}:5000' 28 | auth_url: 'http://{{ controller }}:35357' 29 | memcached_servers: '{{ controller }}:11211' 30 | auth_type: password 31 | project_domain_name: default 32 | user_domain_name: default 33 | project_name: service 34 | username: nova 35 | password: {{ nova_pass }} 36 | vnc: 37 | enabled: true 38 | vncserver_listen: 0.0.0.0 39 | vncserver_proxyclient_address: $my_ip 40 | novncproxy_base_url: http://{{ controller }}:6080/vnc_auto.html 41 | glance: 42 | api_servers: http://{{ controller }}:9292 43 | oslo_concurrency: 44 | lock_path: /var/lib/nova/tmp 45 | placement: 46 | os_region_name: RegionOne 47 | project_domain_name: default 48 | project_name: service 49 | auth_type: password 50 | user_domain_name: default 51 | auth_url: http://{{ controller }}:35357/v3 52 | username: placement 53 | password: {{ placement_pass }} 54 | 55 | libvirtd-service: 56 | service.running: 57 | - name: libvirtd 58 | - enable: True 59 | 60 | openstack-nova-compute-service: 61 | service.running: 62 | - name: openstack-nova-compute 63 | - enable: True 64 | - watch: 65 | - ini: /etc/nova/nova.conf 66 | 67 | -------------------------------------------------------------------------------- /notes/creating-bootable-media.md: -------------------------------------------------------------------------------- 1 | ### Creating Bootable Media 2 | 3 | #### Create USB stick with CentOS 7 Network Install image 4 | 5 | 1. Copy the contents of the ISO to a directory 6 | ``` 7 | mkdir centos-7-netinstall 8 | cd centos-7-netinstall/ 9 | cp /var/sw/images/CentOS-7.0-1406-x86_64-NetInstall.iso . 10 | mkdir in out 11 | mount -o loop CentOS-7.0-1406-x86_64-NetInstall.iso in 12 | cp -rT in/ out/ 13 | ``` 14 | 15 | 2. Modify the contents of the original ISO to load the kickstart configuration 16 | 17 | - Copy the kickstart file to ./out as ks.cfg 18 | - Edit the `isolinux/isolinux.cfg` file and do the following: 19 | - Add `inst.ks=hd:sda1:/ks.cfg` to the **append** entry for the `label linux` entry. 20 | - Remove the `quiet` option. 21 | - Update the menu label 22 | - Save the file 23 | 24 | 3. Create the new ISO image 25 | 26 | ``` 27 | chmod 664 isolinux/isolinux.bin 28 | ``` 29 | TODO: figure out correct mkisofs command for USB stick.... 30 | 31 | 4. Burn the image to USB stick 32 | 33 | ``` 34 | #dd if=/dev/zero of=/dev/sdb bs=4096 count=512 35 | #below works with DVD or NetInstall image, not any others... 36 | #dd if=CentOS-7.0-1406-x86_64-NetInstall-ks.iso of=/dev/sdb bs=4096 37 | ``` 38 | 39 | #### Create a bootable CentOS 7 minimal image with custom kickstart configuration 40 | 41 | 1. Copy the contents of the ISO to a directory 42 | ``` 43 | cd centos7-minimal 44 | cp /data/staging/CentOS-7.0-1406-x86_64-Minimal.iso . 45 | mkdir in out 46 | sudo mount -o loop CentOS-7.0-1406-x86_64-Minimal.iso in 47 | cp -rT in/ out/ 48 | ``` 49 | 50 | 2. Modify the contents of the original ISO to load the kickstart configuration 51 | 52 | - Change to the `out` directory: `cd out` 53 | - Create a directory called `ks` and copy the `ks.cfg` file to this directory 54 | 55 | - Edit the `isolinux/isolinux.cfg` file and do the following: 56 | - Add `inst.ks=cdrom:/dev/cdrom:/ks/ks.cfg` to the **append** entry for the `label linux` entry. 57 | - Remove the `quiet` option. 58 | - Update the menu label 59 | - Save the file 60 | 61 | 3. Create the new ISO image 62 | 63 | ``` 64 | chmod 664 isolinux/isolinux.bin 65 | mkisofs -o ../CentOS-7.0-1406-x86_64-Minimal.2014-11-04-1.iso -b isolinux/isolinux.bin -c isolinux/boot.cat \ 66 | -no-emul-boot -V 'CentOS 7 x86_64' -boot-load-size 4 -boot-info-table -R -J -v -T . 67 | ``` 68 | 4. Burn the image to media and test 69 | 70 | #### References 71 | 72 | [http://smorgasbork.com/component/content/article/35-linux/151-building-a-custom-centos-7-kickstart-disc-part-1] 73 | -------------------------------------------------------------------------------- /states/tuned/files/tuned.conf: -------------------------------------------------------------------------------- 1 | # 2 | # tuned configuration 3 | # 4 | # 5 | 6 | [main] 7 | summary=saltstack-base profile. Copied from throughput-performance profile with edits. 8 | include=virtual-guest 9 | 10 | [vm] 11 | transparent_hugepages=never 12 | 13 | [cpu] 14 | governor=performance 15 | energy_perf_bias=performance 16 | min_perf_pct=100 17 | 18 | [disk] 19 | readahead=>4096 20 | #elevator=deadline 21 | elevator=noop 22 | 23 | [sysctl] 24 | # ktune sysctl settings for rhel6 servers, maximizing i/o throughput 25 | # 26 | # Minimal preemption granularity for CPU-bound tasks: 27 | # (default: 1 msec# (1 + ilog(ncpus)), units: nanoseconds) 28 | kernel.sched_min_granularity_ns = 10000000 29 | 30 | # SCHED_OTHER wake-up granularity. 31 | # (default: 1 msec# (1 + ilog(ncpus)), units: nanoseconds) 32 | # 33 | # This option delays the preemption effects of decoupled workloads 34 | # and reduces their over-scheduling. Synchronous workloads will still 35 | # have immediate wakeup/sleep latencies. 36 | kernel.sched_wakeup_granularity_ns = 15000000 37 | 38 | # If a workload mostly uses anonymous memory and it hits this limit, the entire 39 | # working set is buffered for I/O, and any more write buffering would require 40 | # swapping, so it's time to throttle writes until I/O can catch up. Workloads 41 | # that mostly use file mappings may be able to use even higher values. 42 | # 43 | # The generator of dirty data starts writeback at this percentage (system default 44 | # is 20%) 45 | vm.dirty_ratio = 40 46 | 47 | # Start background writeback (via writeback threads) at this percentage (system 48 | # default is 10%) 49 | vm.dirty_background_ratio = 10 50 | 51 | # PID allocation wrap value. When the kernel's next PID value 52 | # reaches this value, it wraps back to a minimum PID value. 53 | # PIDs of value pid_max or larger are not allocated. 54 | # 55 | # A suggested value for pid_max is 1024 * <# of cpu cores/threads in system> 56 | # e.g., a box with 32 cpus, the default of 32768 is reasonable, for 64 cpus, 57 | # 65536, for 4096 cpus, 4194304 (which is the upper limit possible). 58 | #kernel.pid_max = 65536 59 | 60 | # The swappiness parameter controls the tendency of the kernel to move 61 | # processes out of physical memory and onto the swap disk. 62 | # 0 tells the kernel to avoid swapping processes out of physical memory 63 | # for as long as possible 64 | # 100 tells the kernel to aggressively swap processes out of physical memory 65 | # and move them to swap cache 66 | vm.swappiness=10 67 | 68 | # Turn off NUMA 69 | kernel.numa_balancing = 0 70 | -------------------------------------------------------------------------------- /states/openstack/neutron/compute.sls: -------------------------------------------------------------------------------- 1 | # 2 | # https://docs.openstack.org/ocata/install-guide-rdo/neutron-controller-install.html 3 | # 4 | 5 | {% from "openstack/mysql/map.jinja" import mysql with context %} 6 | 7 | {% set mysql_host = salt['pillar.get']('openstack:controller:host') %} 8 | 9 | {% set neutron_dbpass = salt['pillar.get']('openstack:auth:NEUTRON_DBPASS') %} 10 | {% set neutron_pass = salt['pillar.get']('openstack:auth:NEUTRON_PASS') %} 11 | {% set nova_pass = salt['pillar.get']('openstack:auth:NOVA_PASS') %} 12 | {% set rabbit_pass = salt['pillar.get']('openstack:auth:RABBIT_PASS') %} 13 | {% set metadata_secret = salt['pillar.get']('openstack:auth:METADATA_SECRET') %} 14 | {% set provider_interface_name = salt['pillar.get']('openstack:neutron:compute_provider_interface_name') %} 15 | {% set controller = salt['pillar.get']('openstack:controller:host') %} 16 | 17 | neutron-compute-packages: 18 | pkg.installed: 19 | - pkgs: 20 | - openstack-neutron-linuxbridge 21 | - ebtables 22 | - ipset 23 | 24 | /etc/neutron/neutron.conf: 25 | ini.options_present: 26 | - sections: 27 | DEFAULT: 28 | transport_url: rabbit://openstack:{{ rabbit_pass }}@controller 29 | auth_strategy: keystone 30 | # database: 31 | # connection: '' 32 | keystone_authtoken: 33 | auth_uri: http://{{ controller }}:5000 34 | auth_url: http://{{ controller }}:35357 35 | memcached_servers: {{ controller }}:11211 36 | auth_type: password 37 | project_domain_name: default 38 | user_domain_name: default 39 | project_name: service 40 | username: neutron 41 | password: {{ neutron_pass }} 42 | oslo_concurrency: 43 | lock_path: /var/lib/neutron/tmp 44 | 45 | /etc/neutron/plugins/ml2/linuxbridge_agent.ini: 46 | ini.options_present: 47 | - sections: 48 | linux_bridge: 49 | physical_interface_mappings: {{ provider_interface_name }} 50 | vxlan: 51 | enable_vxlan: false 52 | securitygroup: 53 | enable_security_group: true 54 | firewall_driver: neutron.agent.linux.iptables_firewall.IptablesFirewallDriver 55 | 56 | 57 | /etc/nova/nova.conf: 58 | ini.options_present: 59 | - sections: 60 | neutron: 61 | url: http://{{ controller }}:9696 62 | auth_url: http://{{ controller }}:35357 63 | auth_type: password 64 | project_domain_name: default 65 | user_domain_name: default 66 | region_name: RegionOne 67 | project_name: service 68 | username: neutron 69 | password: {{ neutron_pass }} 70 | # service_metadata_proxy: true 71 | # metadata_proxy_shared_secret: {{ metadata_secret }} 72 | 73 | restart-compute: 74 | cmd.run: 75 | - name: systemctl restart openstack-nova-compute 76 | 77 | neutron-linuxbridge-agent: 78 | service.running: 79 | - enable: True 80 | 81 | 82 | -------------------------------------------------------------------------------- /states/pxeserver/README.md: -------------------------------------------------------------------------------- 1 | 2 | ### Setup PXE Server on MintBox2 3 | 4 | Create a PXE Server that can serve out both CentOS 6 and 7 images. 5 | 6 | 1. As root, create the directory /var/tmp/iso and get the required binaries 7 | 8 | Using wget: 9 | 10 | ```bash 11 | mkdir -p /var/tmp/iso/centos/7 /var/tmp/iso/centos/6 12 | 13 | cd /var/tmp/iso/centos/7 14 | wget http://mirror.umd.edu/centos/7/isos/x86_64/CentOS-7-x86_64-Everything-1511.iso 15 | wget http://mirror.umd.edu/centos/7.2.1511/os/x86_64/isolinux/initrd.img 16 | wget http://mirror.umd.edu/centos/7.2.1511/os/x86_64/isolinux//vmlinuz 17 | 18 | cd /var/tmp/iso/centos/6 19 | wget http://mirror.umd.edu/centos/6/isos/x86_64/CentOS-6.7-x86_64-bin-DVD1.iso 20 | wget http://mirror.umd.edu/centos/6/os/x86_64/isolinux/initrd.img 21 | wget http://mirror.umd.edu/centos/6/os/x86_64/isolinux/vmlinuz 22 | ``` 23 | 24 | 25 | 2. Call the pxeserver state to setup the PXE Server components 26 | 27 | ```bash 28 | salt 'workstation1' state.sls pxeserver 29 | ``` 30 | 31 | State file does the following 32 | - Installs dhcp, httpd, syslinux, tftp-server and vsftpd 33 | - Creates the /var/www/html/repo directory to host files with Apache (destination of reposync.sh) 34 | - Installs the kickstart files to /var/www/html/repo 35 | - Generate the /etc/dhcpd.conf file 36 | - Recursively copies the bootloaders from /usr/share/syslinux/* to /var/lib/tftpboot 37 | - Installs the default to /var/lib/tftpboot/pxelinux.cfg directory 38 | - Installs the bootstrap files from /var/tmp/iso to /var/lib/tftpboot/centos/7 and 6 39 | - Mounts the CentOS 7 and 6 ISOs in /var/ftp/pub/centos (if you reboot the server you need to run the state again) 40 | - Copies the reposync.sh script to /usr/local/bin 41 | - Starts the httpd, dhcp, xvtpd and xinetd services 42 | 43 | 3. Run the reposync.sh script to populate the /var/www/html/repo directory. This can take a very long time for the 1st iteration. It uses the University of Maryland which is fast for me. You will want to change to the mirror that is fastest for you. Add to cron to run nightly to sync with your mirror. After you are done all the machines in the data center can update packages from your mirror. 44 | 45 | Using reposync.sh: 46 | ```bash 47 | /usr/local/bin/reposync.sh 48 | ``` 49 | 50 | ``` 51 | [root@workstation2 pillar]$ cat /etc/crontab 52 | SHELL=/bin/bash 53 | PATH=/sbin:/bin:/usr/sbin:/usr/bin 54 | MAILTO=root 55 | 56 | 0 4 * * * root /usr/local/bin/reposync.sh > /var/log/reposync.out 2>&1 57 | ``` 58 | 59 | 4. Verify the installation 60 | 61 | - ftp is available at ftp://10.0.0.6/pub 62 | - repo is available at http://yumrepo/repo/ 63 | 64 | ##### References 65 | - [CentOS: Install PXE Server On CentOS 7](ravindrayadava.blogspot.com/2014/10/centos-install-pxe-server-on-centos-7.html) 66 | - [Setting up a ‘PXE Network Boot Server’ for Multiple Linux Distribution Installations in RHEL/CentOS 7](http://www.tecmint.com/install-pxe-network-boot-server-in-centos-7/) 67 | - [PXE Boot Linux Install CentOS 6 – Part 1](https://conradjonesit.wordpress.com/2013/07/07/pxe-boot-linux-install-centos-6/) 68 | 69 | -------------------------------------------------------------------------------- /states/openstack/auth/init.sls: -------------------------------------------------------------------------------- 1 | {% set controller = salt['pillar.get']('openstack:controller:host') %} 2 | 3 | dir_setup: 4 | file.directory: 5 | - name: {{ salt['pillar.get']('openstack:tools_dir') }} 6 | - user: {{ salt['pillar.get']('openstack:user') }} 7 | - group: {{ salt['pillar.get']('openstack:user') }} 8 | - mode: 775 9 | 10 | auth_setup: 11 | file.managed: 12 | - name: {{ salt['pillar.get']('openstack:tools_dir') }}/auth-openrc.sh 13 | - user: {{ salt['pillar.get']('openstack:user') }} 14 | - group: {{ salt['pillar.get']('openstack:user') }} 15 | - mode: 755 16 | - create: True 17 | - contents: | 18 | export ADMIN_PASS={{ salt['pillar.get']('openstack:auth:ADMIN_PASS') }} 19 | export CINDER_DBPASS={{ salt['pillar.get']('openstack:auth:CINDER_DBPASS') }} 20 | export CINDER_PASS={{ salt['pillar.get']('openstack:auth:CINDER_PASS') }} 21 | export DASH_DBPASS={{ salt['pillar.get']('openstack:auth:DASH_DBPASS') }} 22 | export DEMO_PASS={{ salt['pillar.get']('openstack:auth:DEMO_PASS') }} 23 | export GLANCE_DBPASS={{ salt['pillar.get']('openstack:auth:GLANCE_DBPASS') }} 24 | export GLANCE_PASS={{ salt['pillar.get']('openstack:auth:GLANCE_DBPASS') }} 25 | export KEYSTONE_DBPASS={{ salt['pillar.get']('openstack:auth:KEYSTONE_DBPASS') }} 26 | export METADATA_SECRET={{ salt['pillar.get']('openstack:auth:METADATA_SECRET') }} 27 | export NEUTRON_DBPASS={{ salt['pillar.get']('openstack:auth:NEUTRON_DBPASS') }} 28 | export NEUTRON_PASS={{ salt['pillar.get']('openstack:auth:NEUTRON_PASS') }} 29 | export NOVA_DBPASS={{ salt['pillar.get']('openstack:auth:NOVA_DBPASS') }} 30 | export NOVA_PASS={{ salt['pillar.get']('openstack:auth:NOVA_PASS') }} 31 | export PLACEMENT_PASS={{ salt['pillar.get']('openstack:auth:PLACEMENT_PASS') }} 32 | export RABBIT_PASS={{ salt['pillar.get']('openstack:auth:RABBIT_PASS') }} 33 | 34 | admin_setup: 35 | file.managed: 36 | - name: {{ salt['pillar.get']('openstack:tools_dir') }}/admin-openrc.sh 37 | - user: {{ salt['pillar.get']('openstack:user') }} 38 | - group: {{ salt['pillar.get']('openstack:user') }} 39 | - mode: 755 40 | - create: True 41 | - contents: | 42 | export OS_USERNAME=admin 43 | export OS_PASSWORD=$ADMIN_PASS 44 | export OS_PROJECT_NAME=admin 45 | export OS_USER_DOMAIN_NAME=Default 46 | export OS_PROJECT_DOMAIN_NAME=Default 47 | export OS_AUTH_URL=http://{{ controller }}:35357/v3 48 | export OS_IDENTITY_API_VERSION=3 49 | export OS_IMAGE_API_VERSION=2 50 | 51 | demo_setup: 52 | file.managed: 53 | - name: {{ salt['pillar.get']('openstack:tools_dir') }}/demo-openrc.sh 54 | - user: {{ salt['pillar.get']('openstack:user') }} 55 | - group: {{ salt['pillar.get']('openstack:user') }} 56 | - mode: 755 57 | - contents: | 58 | export OS_PROJECT_DOMAIN_NAME=Default 59 | export OS_USER_DOMAIN_NAME=Default 60 | export OS_PROJECT_NAME=demo 61 | export OS_USERNAME=demo 62 | export OS_PASSWORD=$DEMO_PASS 63 | export OS_AUTH_URL=http://{{ controller }}:5000/v3 64 | export OS_IDENTITY_API_VERSION=3 65 | export OS_IMAGE_API_VERSION=2 66 | 67 | 68 | -------------------------------------------------------------------------------- /states/pxeserver/files/reposync.sh: -------------------------------------------------------------------------------- 1 | 2 | ##exit 3 | 4 | #################################################################################### 5 | ### CentOS 6 6 | #################################################################################### 7 | 8 | mkdir -p /var/www/html/repo/centos/6/os 9 | mkdir -p /var/www/html/repo/centos/6/updates 10 | mkdir -p /var/www/html/repo/centos/6/extras 11 | 12 | cd /var/www/html/repo/centos/6 13 | 14 | rsync -avrt --delete --exclude 'i386*' --exclude 'debug' --exclude 'drpms' rsync://mirror.umd.edu/centos/6/os . 15 | rsync -avrt --delete --exclude 'i386*' --exclude 'debug' --exclude 'drpms' rsync://mirror.umd.edu/centos/6/updates . 16 | rsync -avrt --delete --exclude 'i386*' --exclude 'debug' --exclude 'drpms' rsync://mirror.umd.edu/centos/6/extras . 17 | 18 | #################################################################################### 19 | ### CentOS 6 - EPEL 20 | #################################################################################### 21 | 22 | mkdir -p /var/www/html/repo/centos/6/epel 23 | 24 | cd /var/www/html/repo/centos/6/epel 25 | 26 | #rsync -avrt rsync://mirror.pnl.gov/epel/6/i386 . 27 | rsync -avrt --delete --exclude 'debug' --exclude 'drpms' rsync://mirror.pnl.gov/epel/6/x86_64 . 28 | 29 | #################################################################################### 30 | ### CentOS 7 31 | #################################################################################### 32 | 33 | mkdir -p /var/www/html/repo/centos/7/os 34 | mkdir -p /var/www/html/repo/centos/7/updates 35 | mkdir -p /var/www/html/repo/centos/7/extras 36 | 37 | cd /var/www/html/repo/centos/7 38 | 39 | rsync -avrt --delete --exclude 'debug' --exclude 'drpms' rsync://mirror.umd.edu/centos/7/os . 40 | rsync -avrt --delete --exclude 'debug' --exclude 'drpms' rsync://mirror.umd.edu/centos/7/updates . 41 | rsync -avrt --delete --exclude 'debug' --exclude 'drpms' rsync://mirror.umd.edu/centos/7/extras . 42 | 43 | #################################################################################### 44 | ### CentOS 7 - EPEL 45 | #################################################################################### 46 | 47 | mkdir -p /var/www/html/repo/centos/7/epel 48 | cd /var/www/html/repo/centos/7/epel 49 | rsync -avrt --delete --exclude 'debug' --exclude 'drpms' rsync://mirror.pnl.gov/epel/7/x86_64 . 50 | 51 | #################################################################################### 52 | ### Saltstack 53 | #################################################################################### 54 | 55 | reposync -p /var/www/html/repo/redhat/7/saltstack/latest --repoid=saltstack-repo --norepopath 56 | createrepo /var/www/html/repo/redhat/7/saltstack/latest 57 | 58 | #################################################################################### 59 | 60 | chown -R root.root /var/www/html/repo/* 61 | 62 | #################################################################################### 63 | # 64 | # rsync to USB stick: 65 | # 66 | # $ mount /dev/sdb1 /mnt 67 | # $ rsync -avt --delete /var/www/html/repo/ /mnt/repo 68 | # 69 | # rsync the to another system: 70 | # 71 | # $ rsync -avtz --delete /var/www/html/repo/ workstation1:/var/www/html/repo 72 | # 73 | #################################################################################### 74 | -------------------------------------------------------------------------------- /states/openstack/AddComputeNodes.md: -------------------------------------------------------------------------------- 1 | 2 | ## Add Compute Nodes 3 | 4 | 5 | 1. From the controller, add the compute service to the node 6 | 7 | ``` 8 | salt 'compute2' state.sls openstack.nova.compute 9 | ``` 10 | 11 | 2. Verify the changes 12 | 13 | ``` 14 | [root@controller ~]$ openstack hypervisor list 15 | +----+---------------------+-----------------+-----------+-------+ 16 | | ID | Hypervisor Hostname | Hypervisor Type | Host IP | State | 17 | +----+---------------------+-----------------+-----------+-------+ 18 | | 1 | compute1.lab.local | QEMU | 10.0.0.31 | up | 19 | | 2 | compute2.lab.local | QEMU | 10.0.0.32 | up | 20 | +----+---------------------+-----------------+-----------+-------+ 21 | [root@controller ~]$ openstack compute service list 22 | +----+------------------+----------------------+----------+---------+-------+----------------------------+ 23 | | ID | Binary | Host | Zone | Status | State | Updated At | 24 | +----+------------------+----------------------+----------+---------+-------+----------------------------+ 25 | | 3 | nova-consoleauth | controller.lab.local | internal | enabled | up | 2017-07-28T01:19:56.000000 | 26 | | 4 | nova-scheduler | controller.lab.local | internal | enabled | up | 2017-07-28T01:20:04.000000 | 27 | | 5 | nova-conductor | controller.lab.local | internal | enabled | up | 2017-07-28T01:20:05.000000 | 28 | | 6 | nova-compute | compute1.lab.local | nova | enabled | up | 2017-07-28T01:20:06.000000 | 29 | | 7 | nova-compute | compute2.lab.local | nova | enabled | up | 2017-07-28T01:20:00.000000 | 30 | +----+------------------+----------------------+----------+---------+-------+----------------------------+ 31 | [root@controller ~]$ 32 | ``` 33 | 34 | 3. From the controller, add the network service to the node 35 | 36 | ``` 37 | salt 'compute2' state.sls openstack.neutron.compute 38 | ``` 39 | 40 | 4. Verify the changes 41 | 42 | ``` 43 | [root@controller ~]$ openstack network agent list 44 | +--------------------------------------+--------------------+----------------------+-------------------+-------+-------+---------------------------+ 45 | | ID | Agent Type | Host | Availability Zone | Alive | State | Binary | 46 | +--------------------------------------+--------------------+----------------------+-------------------+-------+-------+---------------------------+ 47 | | 3262b218-c296-4584-a100-4dafd125797c | Linux bridge agent | compute2.lab.local | None | True | UP | neutron-linuxbridge-agent | 48 | | 4ef9648a-14e4-4e7b-b250-42d3545017c3 | Metadata agent | controller.lab.local | None | True | UP | neutron-metadata-agent | 49 | | 52fa7337-d6af-44c5-a61e-374a3e35fc6f | Linux bridge agent | compute1.lab.local | None | True | UP | neutron-linuxbridge-agent | 50 | | 726787a4-7332-4cf5-91c8-6477415a5d73 | Linux bridge agent | controller.lab.local | None | True | UP | neutron-linuxbridge-agent | 51 | | 9ab9a94a-f7a6-46f9-ad4a-52bf0efd704d | DHCP agent | controller.lab.local | nova | True | UP | neutron-dhcp-agent | 52 | +--------------------------------------+--------------------+----------------------+-------------------+-------+-------+---------------------------+ 53 | ``` 54 | 55 | -------------------------------------------------------------------------------- /states/disks/README.md: -------------------------------------------------------------------------------- 1 | 2 | #### Disks 3 | - Disk partition info: 4 | - `fdisk -l /dev/sda` 5 | - `parted -l /dev/sda` 6 | - Disk info: `hdparm -iI /dev/sda` 7 | - Disk speed measurement: `hdparm -tT /dev/sda` 8 | - DD write (cache): `dd if=/dev/zero of=/tmp/output.dat bs=1MB count=100` 9 | - DD write (synchronous IO): `dd if=/dev/zero of=/tmp/output.dat bs=1MB count=100 conv=fsync` 10 | - DD read: `dd if=/tmp/output.dat of=/dev/null bs=4096k` 11 | - Linux md devices (aka RAID): `mdadm` 12 | - Destroy data: `dd if=/dev/zero of=/dev/sdb bs=4096 count=512` 13 | 14 | ##### Block devices 15 | ``` 16 | lsblk 17 | blkid 18 | ``` 19 | 20 | ##### parted 21 | 22 | ``` 23 | parted /dev/sdb mklabel gpt -s 24 | parted /dev/sdb mkpart primary 0% 100% -s 25 | parted /dev/sdb align-check optimal 1 26 | 27 | mke2fs -t ext4 -O ^has_journal /dev/sdb1 28 | tune2fs -O ^has_journal /dev/sdb1 29 | ``` 30 | 31 | ``` 32 | [root@workstation2 ~]# salt 'ring-a1' partition.mklabel /dev/sdb gpt 33 | ring-a1: 34 | [root@workstation2 ~]# salt 'ring-a1' partition.mkpart /dev/sdb primary fs_type=ext2 start=2048s end=100% 35 | ring-a1: 36 | [root@workstation2 ~]# salt 'ring-a1' extfs.mkfs /dev/sdb1 fs_type=ext4 37 | ``` 38 | 39 | ##### LVM: 40 | ``` 41 | pvdisplay 42 | lvdisplay 43 | lvmdiskscan 44 | ``` 45 | 46 | ### dd 47 | 48 | - Use fdatasync attribute to show the status rate only after the data is completely written to disk. 49 | ``` 50 | [root@workstation2 mnt]# dd if=/dev/zero of=test1.dat bs=1MB count=100 51 | 100+0 records in 52 | 100+0 records out 53 | 100000000 bytes (100 MB) copied, 0.0998435 s, 1.0 GB/s 54 | [root@workstation2 mnt]# dd if=/dev/zero of=test2.dat bs=1MB count=100 conv=fdatasync 55 | 100+0 records in 56 | 100+0 records out 57 | 100000000 bytes (100 MB) copied, 11.1252 s, 9.0 MB/s 58 | [root@workstation2 mnt]# 59 | ``` 60 | 61 | ##### exFAT 62 | 63 | Mount a USB drive 64 | 65 | - Add support for exfat FS: `yum install fuse-exfat exfat-utils` 66 | - mount the partition as exfat: `mount -t exfat /dev/sdb1 /mnt` 67 | 68 | - Example: Using parted to create an exFAT filesystem on a microSD card for a GoPro Hero3 69 | ``` 70 | dd if=/dev/zero of=/dev/sdb bs=4096 count=512 71 | 72 | parted /dev/sdb mklabel msdos 73 | parted /dev/sdb mkpart primary fat32 0% 100% 74 | parted /dev/sdb print free 75 | parted /dev/sdb align-check opt 1 76 | /sbin/mkfs -t fat /dev/sdb1 77 | 78 | mount /dev/sdb1 /mnt 79 | 80 | ``` 81 | 82 | - Example: Using parted to create an ext4 filesystem on a USB stick: 83 | ``` 84 | parted -l 85 | dd if=/dev/zero of=/dev/sdb bs=4096 count=512 86 | 87 | parted /dev/sdb mklabel gpt 88 | parted /dev/sdb mkpart primary ext4 0% 100% 89 | parted /dev/sdb print free 90 | parted /dev/sdb align-check opt 1 91 | 92 | cat /proc/partitions 93 | 94 | /sbin/mkfs -t ext4 /dev/sdb1 95 | 96 | mount -t ext4 /dev/sdb1 /mnt 97 | 98 | ls -l /mnt 99 | df 100 | 101 | findmnt /dev/sdb1 102 | ``` 103 | 104 | Disk Performance: `dd if=/dev/zero of=test2.dat bs=1MB count=100 conv=fdatasync` 105 | 106 | | Device | Type | Speed | Interface | 107 | |--------|------|-------|-----------| 108 | | MicroSD c10 | Flash | 10 MB/s | USB 2.0| 109 | | Hitachi HCC54755 | HDD | 80 MB/s | SATA | 110 | | INTEL SSDSC2BB30 | SSD | 275 MB/s | SATA | 111 | 112 | #### References 113 | - [Disk Caching](http://www.linuxatemyram.com/index.html) 114 | - [http://blackbird.si/tips-for-optimizing-disk-performance-on-linux](http://blackbird.si/tips-for-optimizing-disk-performance-on-linux/) 115 | - [http://linuxmantra.com/2013/11/disk-read-ahead-in-linux.html](http://linuxmantra.com/2013/11/disk-read-ahead-in-linux.html) 116 | -------------------------------------------------------------------------------- /states/pxeserver/init.sls: -------------------------------------------------------------------------------- 1 | 2 | pxeserver_pkgs: 3 | pkg.installed: 4 | - pkgs: 5 | - dhcp 6 | - httpd 7 | - syslinux 8 | - tftp-server 9 | - vsftpd 10 | 11 | /var/www/html/repo: 12 | file.directory: 13 | - target: /var/www/html/repo 14 | 15 | /var/www/html/ks6.cfg: 16 | file.managed: 17 | - name: /var/www/html/ks6.cfg 18 | - source: salt://pxeserver/files/ks6.cfg 19 | 20 | /var/www/html/ks7.cfg: 21 | file.managed: 22 | - name: /var/www/html/ks7.cfg 23 | - source: salt://pxeserver/files/ks7.cfg 24 | 25 | /etc/dhcp/dhcpd.conf: 26 | file.managed: 27 | - name: /etc/dhcp/dhcpd.conf 28 | - source: salt://pxeserver/files/dhcpd.conf 29 | - template: jinja 30 | 31 | copy_bootloaders: 32 | cmd.run: 33 | - name: cp -r /usr/share/syslinux/* /var/lib/tftpboot/ 34 | 35 | pxelinux_default: 36 | file.managed: 37 | - name: /var/lib/tftpboot/pxelinux.cfg/default 38 | - source: salt://pxeserver/files/pxelinux.cfg.default 39 | - makedirs: True 40 | 41 | /var/lib/tftpboot/centos/7: 42 | file.directory: 43 | - name: /var/ftp/tftpboot/centos/7 44 | - makedirs: True 45 | 46 | /var/lib/tftpboot/centos/7/initrd.img: 47 | file.managed: 48 | - name: /var/ftp/tftpboot/centos/7/initrd.img 49 | - source: /var/www/html/repo/centos/7/os/x86_64/isolinux/initrd.img 50 | 51 | /var/lib/tftpboot/centos/7/vmlinuz: 52 | file.managed: 53 | - name: /var/ftp/tftpboot/centos/7/vmlinuz 54 | - source: /var/www/html/repo/centos/7/os/x86_64/isolinux/vmlinuz 55 | 56 | /var/ftp/pub/centos/7: 57 | file.directory: 58 | - name: /var/ftp/pub/centos/7 59 | - makedirs: True 60 | mount.mounted: 61 | - device: /var/tmp/iso/centos/7/CentOS-7-x86_64-Everything-1511.iso 62 | - opts: loop,ro 63 | - mkmnt: True 64 | - persist: True 65 | - fstype: iso9660 66 | 67 | #/var/lib/tftpboot/centos/6: 68 | # file.directory: 69 | # - name: /var/ftp/tftpboot/centos/6 70 | # - makedirs: True 71 | 72 | #/var/lib/tftpboot/centos/6/initrd.img: 73 | # file.managed: 74 | # - name: /var/ftp/tftpboot/centos/6/initrd.img 75 | # - source: /var/www/html/repo/centos/6/os/x86_64/isolinux/initrd.img 76 | 77 | #/var/lib/tftpboot/centos/6/vmlinuz: 78 | # file.managed: 79 | # - name: /var/ftp/tftpboot/centos/6/vmlinuz 80 | # - source: /var/www/html/repo/centos/6/os/x86_64/isolinux/vmlinuz 81 | 82 | #/var/ftp/pub/centos/6: 83 | # file.directory: 84 | # - name: /var/ftp/pub/centos/6 85 | # - makedirs: True 86 | # mount.mounted: 87 | # - device: /var/tmp/iso/centos/6/CentOS-6.8-x86_64-bin-DVD1.iso 88 | # - opts: loop,ro 89 | # - mkmnt: True 90 | # - persist: True 91 | # - fstype: iso9660 92 | 93 | reposync.sh: 94 | file.managed: 95 | - name: /usr/local/bin/reposync.sh 96 | - source: salt://pxeserver/files/reposync.sh 97 | - mode: 755 98 | 99 | pxeserver_httpd_service: 100 | service.running: 101 | - name: httpd 102 | - enable: True 103 | require: 104 | - file: /var/www/html/repo 105 | 106 | dhcp_service: 107 | service.running: 108 | - name: dhcpd 109 | - enable: True 110 | - reload: True 111 | - watch: 112 | - file: /etc/dhcp/dhcpd.conf 113 | 114 | vsftpd_service: 115 | service.running: 116 | - name: vsftpd 117 | - enable: True 118 | 119 | /etc/xinetd.d/tftp: 120 | file.managed: 121 | - name: /etc/xinetd.d/tftp 122 | - source: salt://pxeserver/files/tftp 123 | 124 | xinetd_service: 125 | service.running: 126 | - name: xinetd 127 | - enable: True 128 | - require: 129 | - file: /etc/xinetd.d/tftp 130 | 131 | -------------------------------------------------------------------------------- /states/pxeserver/files/ks6.cfg: -------------------------------------------------------------------------------- 1 | #version=RHEL6 2 | 3 | # the Setup Agent is not started the first time the system boots 4 | firstboot --disable 5 | 6 | # Keyboard layouts 7 | keyboard us 8 | # System language 9 | lang en_US.UTF-8 10 | # timezone 11 | timezone --utc UTC 12 | # text mode (no graphical mode) 13 | text 14 | # do not configure X 15 | skipx 16 | # non-interactive command line mode 17 | cmdline 18 | # install 19 | install 20 | 21 | ################################################################################ 22 | 23 | # root password 24 | rootpw --iscrypted $6$kwlYiwH6E7ns4Vre$qNbXf3oFF7YRhv9rLTSK81XHkzc2TmuVKZEZJ1s.UwXklNduDCTi9jUpdRp61ejwnxxn9GVMLcOVfhn6iKakT/ 25 | # System authorization information 26 | auth --enableshadow --passalgo=sha512 27 | 28 | user --groups=wheel --homedir=/home/devops --name=devops --password=$6$4vXFazWxYNlGkwXT$Ddbr5NO8SI0S9sO.SuBT6qzfNmAAauz1wgQFIIS4AQaYlrAOn.BQ.GKRWIGmbaG3ZlWZ2V0NYfL/4Z43aNf.w/ --iscrypted --gecos="devops" 29 | 30 | ################################################################################ 31 | 32 | selinux --disabled 33 | firewall --disabled 34 | 35 | ################################################################################ 36 | 37 | network --onboot yes --device eth0 --bootproto dhcp 38 | 39 | ################################################################################ 40 | 41 | # installation path 42 | url --url=http://10.0.0.6/repo/centos/6/os/x86_64 43 | 44 | # repository 45 | repo --name="CentOS Base" --baseurl=http://10.0.0.6/repo/centos/6/os/x86_64 46 | repo --name="CentOS Update" --baseurl=http://10.0.0.6/repo/centos/6/updates/x86_64 47 | repo --name="EPEL" --baseurl=http://10.0.0.6/repo/centos/6/epel/x86_64 48 | 49 | ################################################################################ 50 | # Partitioning 51 | ################################################################################ 52 | 53 | zerombr 54 | clearpart --all --initlabel 55 | bootloader --location=mbr --driveorder=sda 56 | 57 | part / --fstype="ext4" --ondisk=sda --size=1 --grow --fsoptions=rw,noatime 58 | part /boot --fstype="ext4" --ondisk=sda --size=512 --fsoptions=rw,noatime 59 | part swap --fstype="swap" --ondisk=sda --size=4096 60 | 61 | #part /var --fstype="ext4" --ondisk=sdb --size=1 --grow --fsoptions=rw,noatime 62 | 63 | ################################################################################ 64 | # Preinstall 65 | ################################################################################ 66 | %pre 67 | ###/usr/sbin/parted -s /dev/sda mklabel gpt 68 | 69 | %end 70 | ################################################################################ 71 | # Packages 72 | ################################################################################ 73 | %packages --nobase --ignoremissing 74 | @core 75 | -aic94xx-firmware* 76 | -alsa-* 77 | -avahi 78 | -ivtv* 79 | -iwl*firmware 80 | -ModemManager* 81 | -NetworkManager* 82 | -wpa_supplicant 83 | -ipw* 84 | -zd1211-firmware* 85 | -atmel-firmware 86 | -rt61pci-firmware 87 | curl 88 | parted 89 | salt-minion 90 | %end 91 | ################################################################################ 92 | # Postinstall 93 | ################################################################################ 94 | %post 95 | 96 | #IP=`ip a show enp0s20f0 | grep 'inet ' | awk '{print $2}' | cut -f1 -d '/'` 97 | #echo $IP > /root/ip.out 98 | ip a > /root/ip.out 99 | 100 | curl http://10.0.0.6/hosts > /etc/hosts 101 | 102 | hostname -s > /etc/salt/minion_id 103 | systemctl enable salt-minion.service 104 | 105 | %end 106 | ################################################################################ 107 | 108 | # Reboot 109 | reboot 110 | 111 | -------------------------------------------------------------------------------- /states/pxeserver/files/ks7.cfg: -------------------------------------------------------------------------------- 1 | #version=RHEL7 2 | 3 | eula --agreed 4 | 5 | # the Setup Agent is not started the first time the system boots 6 | firstboot --disable 7 | 8 | # Keyboard layouts 9 | keyboard --vckeymap=us --xlayouts='us' 10 | # System language 11 | lang en_US.UTF-8 12 | # timezone 13 | timezone --utc UTC 14 | # text mode (no graphical mode) 15 | text 16 | # do not configure X 17 | skipx 18 | # non-interactive command line mode 19 | cmdline 20 | # install 21 | install 22 | 23 | ################################################################################ 24 | 25 | # root password 26 | rootpw password1 27 | # System authorization information 28 | auth --enableshadow --passalgo=sha512 29 | 30 | user --groups=wheel --homedir=/home/devops --name=devops --password=password1 --gecos="devops" 31 | 32 | ################################################################################ 33 | 34 | selinux --disabled 35 | firewall --disabled 36 | 37 | network --bootproto=dhcp --device=enp0s20f0 --noipv6 --onboot=yes --mtu=9000 --activate 38 | # network 39 | #network --bootproto=static --device=enp0s20f1 --noipv6 --onboot=no 40 | #network --bootproto=static --device=enp0s20f2 --noipv6 --onboot=no 41 | #network --bootproto=static --device=enp0s20f3 --noipv6 --onboot=no 42 | 43 | ################################################################################ 44 | 45 | # installation path 46 | url --url=http://10.0.0.6/repo/centos/7/os/x86_64 47 | 48 | # repository 49 | repo --name="CentOS Base" --baseurl=http://10.0.0.6/repo/centos/7/os/x86_64 50 | repo --name="CentOS Update" --baseurl=http://10.0.0.6/repo/centos/7/updates/x86_64 51 | repo --name="EPEL" --baseurl=http://10.0.0.6/repo/centos/7/epel/x86_64 52 | 53 | ################################################################################ 54 | # Partitioning 55 | ################################################################################ 56 | 57 | zerombr 58 | clearpart --all --initlabel 59 | bootloader --location=mbr --boot-drive=sda 60 | 61 | part /boot --fstype="xfs" --ondisk=sda --size=512 --fsoptions=rw,noatime 62 | part swap --asprimary --fstype="swap" --ondisk=sda --size=4096 63 | part / --fstype="xfs" --ondisk=sda --size=1 --grow --fsoptions=rw,noatime 64 | 65 | ################################################################################ 66 | # Preinstall 67 | ################################################################################ 68 | #%pre 69 | ###/usr/sbin/parted -s /dev/sda mklabel gpt 70 | #%end 71 | ################################################################################ 72 | # Packages 73 | ################################################################################ 74 | %packages --nobase --ignoremissing 75 | @core 76 | -aic94xx-firmware* 77 | -alsa-* 78 | -avahi 79 | -ivtv* 80 | -iwl*firmware 81 | -ModemManager* 82 | -NetworkManager* 83 | -wpa_supplicant 84 | curl 85 | dstat 86 | iperf3 87 | lshw 88 | lsof 89 | net-snmp 90 | net-snmp-utils 91 | ntp 92 | numactl 93 | openssh-clients 94 | parted 95 | rsync 96 | salt-minion 97 | screen 98 | sysstat 99 | #teamd 100 | wget 101 | %end 102 | ################################################################################ 103 | # Postinstall 104 | ################################################################################ 105 | %post 106 | 107 | #IP=`ip a show enp0s20f0 | grep 'inet ' | awk '{print $2}' | cut -f1 -d '/'` 108 | #echo ${IP} > /root/enp0s20f0.out 109 | #ip a > /root/ip.out 110 | 111 | curl http://10.0.0.6/hosts > /etc/hosts 112 | 113 | mkdir -p /etc/salt 114 | hostname -s > /etc/salt/minion_id 115 | systemctl enable salt-minion.service 116 | 117 | %end 118 | ################################################################################ 119 | 120 | # Reboot 121 | reboot 122 | 123 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Salt tools for bare-metal provisioning 2 | 3 | Contents: 4 | 1. [Introduction](#introduction) 5 | 2. [Lab Infrastructure](#lab-infrastructure) 6 | 3. [Lab Setup](#labsetup) 7 | 4. [Useful Commands](#useful-commands) 8 | 5. [References](#references) 9 | 10 | 11 | ### Introduction 12 | 13 | This is the reference setup I use in my home lab. 14 | 15 | SaltStack (Salt) is used in conjunction with PXE server/kickstart to install and provision multiple bare-metal machines running CentOS. The machines designated as the Salt masters have the OS installed installed manually, and the Salt minions are installed via PXE/kickstart. 16 | 17 | Tested against salt 2015.5.10 (Lithium) 18 | 19 | ### Lab Infrastructure 20 | 21 | - 3 [MintBox2](http://www.fit-pc.com/web/products/mintbox/mintbox-2/) 22 | - 6 [Supermicro SYS-5018A-TN4](http://www.newegg.com/Product/Product.aspx?Item=N82E16816101836) 23 | - 5 [Supermicro SYS-5108A-FTN4](http://www.newegg.com/Product/Product.aspx?Item=N82E16816101837) 24 | - 4 [Dell Powerconnect 6224 L3 Switches](http://www.dell.com/us/business/p/powerconnect-6200-series/pd) 25 | 26 | ![Lab](notes/20180310_231645.jpg) 27 | 28 | The MintBox2 machines are Salt masters running CentOS 7 with the MATE desktop. They are also referred to as the **workstation** machines. They are also the PXE servers, yum mirror, and NTP servers. 29 | 30 | The Supermicros are Salt minions running CentOS 7. 31 | 32 | #### Network infrastructure 33 | 34 | On the Supermicro 5018-series hardware all 4 interfaces are bonded as bond0 using 802.3ad and LACP mode 4 35 | 36 | View the pillar configuration here: [pillar/superlab.sls](pillar/superlab.sls) 37 | 38 | If you plan to fork the repository, unless you use the exact same hardware and IP addressing scheme you need to change the pillar and other network infrastructure files appropriately. There should be no other hardcoded dependency changes to make. 39 | 40 | ### Lab Setup 41 | 42 | 1. [Install CentOS 7 on MintBox2](notes/centos-7-manual.md) 43 | 1. [Setup Salt Master and Minion on MintBox2](notes/setup-salt.md) 44 | 1. [Setup Git and saltstack-base repository on MintBox2](notes/saltstack-base-setup.md) 45 | 1. [Run Initial Highstate](notes/highstate.md) 46 | 1. [Setup PXE Server on MintBox2](states/pxeserver/README.md) 47 | 1. [Install Supermicros (or other MintBox2) via PXE Server](notes/pxe-install.md) 48 | 1. [Run States on Minions](notes/run-states.md) 49 | 50 | #### Assigning Roles to Machines 51 | 52 | ```bash 53 | salt '' grains.setvals "{'saltstack-base':{'role':'master'}}" 54 | salt '' grains.setvals "{'saltstack-base':{'role':'minion'}}" 55 | ``` 56 | 57 | ### Useful Commands 58 | 59 | Debug and output options: 60 | - Local Version: `salt-run manage.versions` 61 | - Output data using pprint: `salt 'store1' grains.items --output=pprint` 62 | - Output data using json: `salt 'store1' grains.items --output=json` 63 | - Debug level: `salt 'store1' --log-level=debug --state-output=mixed state.highstate test=True` 64 | - Verbose: `salt -v --log-level=debug --state-output=mixed 'store1' state.highstate test=True` 65 | 66 | Common commands: 67 | - Run a command: `salt '*' cmd.run 'date'` 68 | - Service restart: `salt '*' service.restart ntp` 69 | - View a file: `salt '*' cp.get_file_str /etc/hosts` 70 | - Look for a package: `salt 'store1' pkg.list_pkgs --output=json | grep ntp` 71 | - Copy a file: `salt-cp '*' /local/file /remote/file` 72 | 73 | State Execution: 74 | - Force a pillar refresh: `salt '*' saltutil.refresh_pillar` 75 | - Sync all: `salt '*' saltutil.sync_all` 76 | - Calling Highstate: `salt '*' state.highstate` 77 | 78 | Jobs: 79 | - Lookup result of a job: `salt-run jobs.lookup_jid 20150627120734094928` 80 | 81 | Help: 82 | - Show module docstrings: `salt 'store1' sys.doc test.ping` 83 | 84 | [Salt Grains](notes/grains.md) 85 | 86 | ### References 87 | 88 | - [Salt Module Index](http://docs.saltstack.com/en/latest/salt-modindex.html) 89 | 90 | 91 | -------------------------------------------------------------------------------- /states/openstack/keystone/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # https://docs.openstack.org/ocata/install-guide-rdo/keystone-install.html 3 | # 4 | 5 | {% from "openstack/mysql/map.jinja" import mysql with context %} 6 | 7 | {% set mysql_host = salt['pillar.get']('openstack:controller:host') %} 8 | {% set controller = salt['pillar.get']('openstack:controller:host') %} 9 | 10 | {% set keystone_dbpass = salt['pillar.get']('openstack:auth:KEYSTONE_DBPASS') %} 11 | 12 | # 13 | # Install and configure components 14 | # 15 | 16 | # 1. install the packages 17 | keystone-pkgs: 18 | pkg.installed: 19 | - pkgs: 20 | - openstack-keystone 21 | - httpd 22 | - mod_wsgi 23 | 24 | # 2. Edit the /etc/keystone/keystone.conf file and complete the following actions: 25 | /etc/keystone/keystone.conf: 26 | ini.options_present: 27 | - sections: 28 | database: 29 | connection: 'mysql+pymysql://keystone:{{ keystone_dbpass }}@{{ mysql_host }}/keystone' 30 | token: 31 | provider: fernet 32 | 33 | 34 | # 3. Populate the Identity service database: 35 | keystone_db_sync: 36 | cmd.run: 37 | - name: keystone-manage db_sync 38 | - user: keystone 39 | - shell: /bin/sh 40 | 41 | # 4. Initialize Fernet key repositories 42 | fernet_setup: 43 | cmd.run: 44 | - name: 'keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone' 45 | credential_setup: 46 | cmd.run: 47 | - name: 'keystone-manage credential_setup --keystone-user keystone --keystone-group keystone' 48 | 49 | # 5. Bootstrap the Identity service 50 | keystone-bootstrap: 51 | cmd.run: 52 | - name: 'keystone-manage bootstrap --bootstrap-password {{ salt['pillar.get']('openstack:auth:ADMIN_PASS') }} --bootstrap-admin-url http://{{ controller }}:35357/v3/ --bootstrap-internal-url http://{{ controller }}:5000/v3/ --bootstrap-public-url http://{{ controller }}:5000/v3/ --bootstrap-region-id RegionOne' 53 | 54 | # 55 | # Configure Apache HTTP server 56 | # 57 | 58 | /etc/httpd/conf/httpd.conf: 59 | file.replace: 60 | - name: /etc/httpd/conf/httpd.conf 61 | - pattern: '#ServerName www.example.com:80' 62 | - repl: 'ServerName {{ controller }}' 63 | 64 | /etc/httpd/conf.d/wsgi-keystone.conf: 65 | file.symlink: 66 | - name: /etc/httpd/conf.d/wsgi-keystone.conf 67 | - target: /usr/share/keystone/wsgi-keystone.conf 68 | 69 | httpd-service: 70 | service.running: 71 | - name: httpd 72 | - enable: True 73 | - watch: 74 | - file: /etc/httpd/conf/httpd.conf 75 | - file: /etc/httpd/conf.d/wsgi-keystone.conf 76 | 77 | create-service-project: 78 | cmd.run: 79 | - name: openstack project create --domain default --description "Service Project" service 80 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 81 | - unless: 82 | - openstack project show service 83 | 84 | create-demo-project: 85 | cmd.run: 86 | - name: openstack project create --domain default --description "Demo Project" demo 87 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 88 | - unless: 89 | - openstack project show demo 90 | 91 | create-demo-user: 92 | cmd.run: 93 | - name: openstack user create --domain default --password {{ salt['pillar.get']('openstack:auth:DEMO_PASS') }} demo 94 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 95 | - unless: 96 | - openstack user show demo 97 | 98 | create-demo-user-role: 99 | cmd.run: 100 | - name: openstack role create user 101 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 102 | - unless: 103 | - openstack role show user 104 | 105 | add-user-role-to-demo: 106 | cmd.run: 107 | - name: openstack role add --project demo --user demo user 108 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 109 | 110 | keystone.sh: 111 | file.managed: 112 | - name: {{ salt['pillar.get']('openstack:tools_dir') }}/keystone.sh 113 | - source: salt://openstack/keystone/files/keystone.sh 114 | 115 | -------------------------------------------------------------------------------- /states/cpupower/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ### Turn off CPU frequency governor on CentOS 7 4 | 5 | Change the frequency governor from **ondemand** to **performance** 6 | 7 | View current frequency info 8 | ``` 9 | $ cpupower frequency-info 10 | analyzing CPU 0: 11 | driver: acpi-cpufreq 12 | CPUs which run at the same hardware frequency: 0 13 | CPUs which need to have their frequency coordinated by software: 0 14 | maximum transition latency: 10.0 us. 15 | hardware limits: 1.20 GHz - 2.40 GHz 16 | available frequency steps: 2.40 GHz, 2.30 GHz, 2.20 GHz, 2.10 GHz, 2.00 GHz, 1.90 GHz, 1.80 GHz, 1.70 GHz, 1.60 GHz, 1.50 GHz, 1.40 GHz, 1.30 GHz, 1.20 GHz 17 | available cpufreq governors: conservative, userspace, powersave, ondemand, performance 18 | current policy: frequency should be within 1.20 GHz and 2.40 GHz. 19 | The governor "ondemand" may decide which speed to use 20 | within this range. 21 | current CPU frequency is 1.20 GHz (asserted by call to hardware). 22 | boost state support: 23 | Supported: no 24 | Active: no 25 | ``` 26 | 27 | Show the current CPU speed for each core 28 | ``` 29 | $ grep -E '^model name|^cpu MHz' /proc/cpuinfo 30 | model name : Intel(R) Atom(TM) CPU C2758 @ 2.40GHz 31 | cpu MHz : 2400.000 32 | model name : Intel(R) Atom(TM) CPU C2758 @ 2.40GHz 33 | cpu MHz : 1200.000 34 | model name : Intel(R) Atom(TM) CPU C2758 @ 2.40GHz 35 | cpu MHz : 1200.000 36 | model name : Intel(R) Atom(TM) CPU C2758 @ 2.40GHz 37 | cpu MHz : 1200.000 38 | model name : Intel(R) Atom(TM) CPU C2758 @ 2.40GHz 39 | cpu MHz : 1200.000 40 | model name : Intel(R) Atom(TM) CPU C2758 @ 2.40GHz 41 | cpu MHz : 1200.000 42 | model name : Intel(R) Atom(TM) CPU C2758 @ 2.40GHz 43 | cpu MHz : 1200.000 44 | model name : Intel(R) Atom(TM) CPU C2758 @ 2.40GHz 45 | cpu MHz : 1200.000 46 | 47 | ``` 48 | 49 | Set governor policy to **performance** 50 | ``` 51 | $ cpupower frequency-set --governor performance 52 | Setting cpu: 0 53 | Setting cpu: 1 54 | Setting cpu: 2 55 | Setting cpu: 3 56 | Setting cpu: 4 57 | Setting cpu: 5 58 | Setting cpu: 6 59 | Setting cpu: 7 60 | ``` 61 | 62 | Verify the change 63 | ``` 64 | $ cpupower frequency-info 65 | analyzing CPU 0: 66 | driver: acpi-cpufreq 67 | CPUs which run at the same hardware frequency: 0 68 | CPUs which need to have their frequency coordinated by software: 0 69 | maximum transition latency: 10.0 us. 70 | hardware limits: 1.20 GHz - 2.40 GHz 71 | available frequency steps: 2.40 GHz, 2.30 GHz, 2.20 GHz, 2.10 GHz, 2.00 GHz, 1.90 GHz, 1.80 GHz, 1.70 GHz, 1.60 GHz, 1.50 GHz, 1.40 GHz, 1.30 GHz, 1.20 GHz 72 | available cpufreq governors: conservative, userspace, powersave, ondemand, performance 73 | current policy: frequency should be within 1.20 GHz and 2.40 GHz. 74 | The governor "performance" may decide which speed to use 75 | within this range. 76 | current CPU frequency is 2.40 GHz (asserted by call to hardware). 77 | boost state support: 78 | Supported: no 79 | Active: no 80 | 81 | $ grep -E '^model name|^cpu MHz' /proc/cpuinfo 82 | model name : Intel(R) Atom(TM) CPU C2758 @ 2.40GHz 83 | cpu MHz : 2400.000 84 | model name : Intel(R) Atom(TM) CPU C2758 @ 2.40GHz 85 | cpu MHz : 2400.000 86 | model name : Intel(R) Atom(TM) CPU C2758 @ 2.40GHz 87 | cpu MHz : 2400.000 88 | model name : Intel(R) Atom(TM) CPU C2758 @ 2.40GHz 89 | cpu MHz : 2400.000 90 | model name : Intel(R) Atom(TM) CPU C2758 @ 2.40GHz 91 | cpu MHz : 2400.000 92 | model name : Intel(R) Atom(TM) CPU C2758 @ 2.40GHz 93 | cpu MHz : 2400.000 94 | model name : Intel(R) Atom(TM) CPU C2758 @ 2.40GHz 95 | cpu MHz : 2400.000 96 | model name : Intel(R) Atom(TM) CPU C2758 @ 2.40GHz 97 | cpu MHz : 2400.000 98 | ``` 99 | 100 | Persist the change to activate on reboot 101 | ``` 102 | echo "/bin/cpupower frequency-set --governor performance" >> /etc/rc.d/rc.local 103 | chmod +x /etc/rc.d/rc.local 104 | ``` 105 | 106 | ##### References 107 | 108 | - [http://unix.stackexchange.com/questions/77410/centos-conservative-governor-nice-error](http://unix.stackexchange.com/questions/77410/centos-conservative-governor-nice-error) 109 | - [http://www.servernoobs.com/avoiding-cpu-speed-scaling-in-modern-linux-distributions-running-cpu-at-full-speed-tips/](http://www.servernoobs.com/avoiding-cpu-speed-scaling-in-modern-linux-distributions-running-cpu-at-full-speed-tips/) 110 | 111 | -------------------------------------------------------------------------------- /pillar/superlab.sls: -------------------------------------------------------------------------------- 1 | 2 | docker: 3 | version: 1.13.1-1.el7 4 | 5 | openstack: 6 | repo: 7 | baseurl: http://yumrepo/repo/centos/7/centos-openstack-ocata 8 | controller: 9 | host: controller 10 | neutron: 11 | controller_provider_interface_name: 'provider:enp5s0' 12 | compute_provider_interface_name: 'provider:enp0s20f2' 13 | user: devops 14 | tools_dir: /home/devops/openstack 15 | auth: 16 | ADMIN_PASS: admin 17 | CINDER_DBPASS: cinder 18 | CINDER_PASS: cinder 19 | DASH_DBPASS: dash 20 | DEMO_PASS: demo 21 | GLANCE_DBPASS: glance 22 | GLANCE_PASS: glance 23 | KEYSTONE_DBPASS: keystone 24 | METADATA_SECRET: secret 25 | NEUTRON_DBPASS: neutron 26 | NEUTRON_PASS: neutron 27 | NOVA_DBPASS: nova 28 | NOVA_PASS: nova 29 | PLACEMENT_PASS: placement 30 | # below needs to be same as rabbitmq pass for openstack user. 31 | RABBIT_PASS: openstack 32 | rabbitmq: 33 | user: openstack 34 | pass: openstack 35 | env: 36 | OS_USERNAME: admin 37 | OS_PASSWORD: admin 38 | OS_PROJECT_NAME: admin 39 | OS_USER_DOMAIN_NAME: Default 40 | OS_PROJECT_DOMAIN_NAME: Default 41 | OS_AUTH_URL: http://controller:35357/v3 42 | OS_IDENTITY_API_VERSION: 3 43 | OS_IMAGE_API_VERSION: 2 44 | 45 | mysql: 46 | root_pass: password 47 | 48 | #pypiserver: 49 | # host: pypiserver 50 | # nginx_ssl_frontend_port: 7009 51 | # pypi_port: 7010 52 | # packages_dir: /var/www/html/pypiserver/packages 53 | # certificate: /data/certs/local.crt 54 | # certificate_key: /data/certs/local.pem 55 | 56 | user_list: 57 | [{ 58 | 'name':'devops', 59 | 'shell':'/bin/bash', 60 | 'group':'devops', 61 | 'ssh_public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfMGfTjkyCn8puZ/1RxQFB7wnGjNacg/zWccGy+mzNi6g6xPsl4NDMRTkqWbnPQddsBd/4bTUi0TsUkw/bsVqCRXpE43Nb4KNxrPgmb9S0Z1hF4r/8uzVoUCCvQBNXHuqLAPWx16yx47LtTdDav1UiLLrggWWwSezboy6Z77UhUMVbavzmRKHyf1ssRHCun4VQKeddAEaZ2B9HKqbqEiBD/zYg5FacXGFrkfbm59G1wjer2cjNNeESnrJFsADdW2eLLZUWS9JwM6dsYSIOlwAZyvkejqPEj1K6YEafdaVuiNxyyyTHgXhN/vLwN7YCSwxk5ZT+MDqqYrnco9B5AasP devops@ws2.lab.local' 62 | 63 | }] 64 | 65 | kernel: 66 | sysctl: 67 | fs.file-max: 737975 68 | kernel.sem: 250 32000 32 256 69 | # net.core.somaxconn: 512 70 | # net.ipv4.conf.all.accept_redirects: 1 71 | # net.ipv4.conf.all.send_redirects: 1 72 | net.ipv4.ip_local_port_range: 20001 65535 73 | net.ipv4.tcp_fin_timeout: 15 74 | net.ipv4.tcp_timestamps: 1 75 | net.ipv4.tcp_window_scaling: 1 76 | net.ipv4.tcp_syncookies: 0 77 | vm.swappiness: 1 78 | # vm.min_free_kbytes: 2000000 79 | 80 | systems: 81 | {% if grains['id'] == 'ws1' %} 82 | dhcp: 83 | subnet: 10.0.0.0 84 | range: 10.0.0.0 10.0.0.12 85 | domain-name-servers: 86 | - 10.0.0.5 87 | - ws1.lab.local 88 | {% elif grains['id'] == 'ws2' %} 89 | dhcp: 90 | subnet: 10.0.0.0 91 | range: 10.0.0.13 10.0.0.24 92 | domain-name-servers: 93 | - 10.0.0.6 94 | - ws2.lab.local 95 | {% endif %} 96 | 97 | packages: 98 | recommended: 99 | {% if grains['os_family'] == 'RedHat' and grains['osmajorrelease'] == '7' %} 100 | - iperf3 101 | - python2-pip 102 | - chrony 103 | {% elif grains['os_family'] == 'RedHat' and grains['osmajorrelease'] == '6' %} 104 | - python-pip 105 | - pdsh 106 | - ntp 107 | {% endif %} 108 | - bc 109 | - bind-utils 110 | - bonnie++ 111 | - chrony 112 | - createrepo 113 | - curl 114 | - dstat 115 | - e2fsprogs 116 | - fio 117 | - gcc 118 | - gdisk 119 | - hdparm 120 | - htop 121 | - iotop 122 | - iperf 123 | - irqbalance 124 | # - kernel-tools 125 | - libffi-devel 126 | - lshw 127 | - lsof 128 | - lvm2 129 | - net-snmp 130 | - net-snmp-perl 131 | - net-snmp-utils 132 | - ngrep 133 | - nmap 134 | - numactl 135 | - openldap-clients 136 | - openssh-clients 137 | - openssl-devel 138 | - parted 139 | - perf 140 | - pciutils 141 | - python-devel 142 | - python-virtualenv 143 | - rsync 144 | - screen 145 | - sdparm 146 | - smartmontools 147 | - strace 148 | - sysstat 149 | - tcpdump 150 | - telnet 151 | - traceroute 152 | - tuned 153 | - unzip 154 | # - util-linux-ng 155 | - vim-enhanced 156 | - wget 157 | - wireshark 158 | - yum-utils 159 | - zip 160 | -------------------------------------------------------------------------------- /states/openstack/glance/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # https://docs.openstack.org/ocata/install-guide-rdo/glance-install.html 3 | # 4 | 5 | {% from "openstack/mysql/map.jinja" import mysql with context %} 6 | 7 | {% set glance_dbpass = salt['pillar.get']('openstack:auth:GLANCE_DBPASS') %} 8 | {% set glance_pass = salt['pillar.get']('openstack:auth:GLANCE_PASS') %} 9 | 10 | {% set mysql_host = salt['pillar.get']('openstack:controller:host') %} 11 | {% set controller = salt['pillar.get']('openstack:controller:host') %} 12 | 13 | create-glance-user: 14 | cmd.run: 15 | - name: openstack user create --password {{ salt['pillar.get']('openstack:auth:GLANCE_PASS') }} glance 16 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 17 | - unless: 18 | - openstack user show glance 19 | 20 | # Add the admin role to the glance user and service project: 21 | add-admin-role-to-glance: 22 | cmd.run: 23 | - name: 'openstack role add --project service --user glance admin' 24 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 25 | 26 | # Create glance service entry 27 | create-glance-service: 28 | cmd.run: 29 | - name: 'openstack service create --name glance --description "OpenStack Image service" image' 30 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 31 | - unless: 32 | - openstack service show image 33 | 34 | 35 | # Create glance service API endpoints 36 | glance-public-service-endpoint: 37 | cmd.run: 38 | - name: 'openstack endpoint create --region RegionOne image public http://{{ controller }}:9292' 39 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 40 | # - unless: 41 | # - openstack endpoint list --service image --interface public 42 | 43 | glance-internal-service-endpoint: 44 | cmd.run: 45 | - name: 'openstack endpoint create --region RegionOne image internal http://{{ controller }}:9292' 46 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 47 | # - unless: 48 | # - openstack endpoint list --service image --interface internal 49 | 50 | glance-admin-service-endpoint: 51 | cmd.run: 52 | - name: 'openstack endpoint create --region RegionOne image admin http://{{ controller }}:9292' 53 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 54 | # - unless: 55 | # - openstack endpoint list --service image --interface admin 56 | 57 | # 58 | # 59 | # Install and configure components 60 | # 61 | 62 | # 1. install the packages 63 | glance-pkgs: 64 | pkg.installed: 65 | - pkgs: 66 | - openstack-glance 67 | 68 | # 2. Edit the /etc/glance/glance-api.conf file and complete the following actions: 69 | /etc/glance/glance-api.conf: 70 | ini.options_present: 71 | - sections: 72 | database: 73 | connection: 'mysql+pymysql://glance:{{ glance_dbpass }}@{{ mysql_host }}/glance' 74 | keystone_authtoken: 75 | auth_uri: http://{{ controller }}:5000 76 | auth_url: http://{{ controller }}:35357 77 | memcached_servers: {{ controller }}:11211 78 | auth_type: password 79 | project_domain_name: default 80 | user_domain_name: default 81 | project_name: service 82 | username: glance 83 | password: {{ glance_pass }} 84 | paste_deploy: 85 | flavor: keystone 86 | glance_store: 87 | stores: file,http 88 | default_store: file 89 | filesystem_store_datadir: /var/lib/glance/images/ 90 | 91 | # 3. 92 | /etc/glance/glance-registry.conf: 93 | ini.options_present: 94 | - sections: 95 | database: 96 | connection: 'mysql+pymysql://glance:{{ glance_dbpass }}@{{ mysql_host }}/glance' 97 | keystone_authtoken: 98 | auth_uri: http://{{ controller }}:5000 99 | auth_url: http://{{ controller }}:35357 100 | memcached_servers: {{ controller }}:11211 101 | auth_type: password 102 | project_domain_name: default 103 | user_domain_name: default 104 | project_name: service 105 | username: glance 106 | password: {{ glance_pass }} 107 | paste_deploy: 108 | flavor: keystone 109 | 110 | # 4. Populate the Image service database: 111 | glance_db_sync: 112 | cmd.run: 113 | - name: glance-manage db_sync 114 | - user: glance 115 | - shell: /bin/sh 116 | 117 | # 118 | # Finalize Installationb 119 | # 120 | 121 | glance-api-service: 122 | service.running: 123 | - name: openstack-glance-api 124 | - enable: True 125 | - watch: 126 | - ini: /etc/glance/glance-api.conf 127 | 128 | glance-registry-service: 129 | service.running: 130 | - name: openstack-glance-registry 131 | - enable: True 132 | - watch: 133 | - ini: /etc/glance/glance-registry.conf 134 | 135 | glance.sh: 136 | file.managed: 137 | - name: {{ salt['pillar.get']('openstack:tools_dir') }}/glance.sh 138 | - source: salt://openstack/glance/files/glance.sh 139 | 140 | -------------------------------------------------------------------------------- /notes/centos-7-manual.md: -------------------------------------------------------------------------------- 1 | 2 | ### Install CentOS 7 (Manual Install) 3 | 4 | #### Overview 5 | 6 | 1. [Install the CentOS 7 Operating System](#install-centos-7) 7 | 2. [Update the OS and install the EPEL](#update-the-os-and-install-the-epel) 8 | 3. [Disable SELinux and iptables](#disable-selinux-and-iptables) 9 | 4. [Install MATE Desktop](#install-mate-desktop) 10 | 5. [Setup devops user to sudo without a password](#setup-devops-user-to-sudo-without-password) 11 | 6. [Setup NTP Server](#setup-ntp-server) 12 | 7. [Verify the changes](#verify-the-changes) 13 | 14 | #### Install CentOS 7 15 | 16 | 1. Boot from media 17 | 2. Keyboard and Language 18 | 2. Under Software, click Software Selection. The Software Selection page appears. 19 | 3. Select Server with GUI, click Done in the upper-left. 20 | 4. Under System, click Installation Destination. The Installer Destiation page appeares. 21 | 5. Under Partitioning, select **I will configure partitioning**, click **Done** 22 | 6. The Manual Partitioning page appears. 23 | 7. Under **New mout points will use the following partitioning scheme** select **Standard Partition** 24 | 8. Click +. The **Add a new mount point** dialog will appear. 25 | 9. For Mount Point, select `/boot`. For Desired Capacity enter `512`. Click **Add Mount Point** 26 | 10. Click +. The **Add a new mount point** dialog will appear. 27 | 11. For Mount Point, select `swap`. For Desired Capacity enter `4096`. Click **Add Mount Point** 28 | 12. Click +. The **Add a new mount point** dialog will appear. 29 | 13. For Mount Point, select `/`. Do not enter anything into Desired Capacity. . Click **Add Mount Point** 30 | 14. Click Done in the upper left. The Summary of Changes window appears. Click **Accept Changes** 31 | 15. Under System, click **Network & Hostname** 32 | 16. Select ethernet interface. Click slider in upper-right On. Click **Configure** in lower-right 33 | 17. Click General Tab. Select **Automatically connect to this network when it is available** 34 | 18. Click IPv4 Tab. Configure the network information. Set the hostname to **workstation1** 35 | 19. Click Begin Installation 36 | 20. Add **devops** user 37 | 21. Set root password 38 | 22. Reboot 39 | 23. Accept the License agreement 40 | 24. Reboot 41 | 25. Login as **devops** user. Open a terminal window and `sudo su -` to root. 42 | 43 | #### Update the OS and install the EPEL 44 | 45 | ```bash 46 | yum update 47 | yum install epel-release 48 | ``` 49 | 50 | #### Disable SELinux and iptables 51 | 52 | ```bash 53 | sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config 54 | systemctl stop iptables.service 55 | systemctl disable iptables.service 56 | ``` 57 | 58 | Reboot to implement the change 59 | 60 | #### Install MATE Desktop 61 | 62 | ```bash 63 | yum groupinstall "MATE Desktop" 64 | ``` 65 | 66 | #### Setup devops user to sudo without password 67 | 68 | Using `visudo` allow devops user to sudo without password. 69 | Add `devops ALL=(ALL) NOPASSWD: ALL` to the end of the file. 70 | 71 | #### Setup NTP Server 72 | 73 | 1. Setup ntpd on the workstation to be an NTP time server 74 | 75 | ```bash 76 | yum install ntp 77 | systemctl start ntpd.service 78 | systemctl enable ntpd.service 79 | ``` 80 | 81 | 2. Verify the NTP installation 82 | 83 | ```bash 84 | [root@workstation1 ~]# ntpq -p 85 | remote refid st t when poll reach delay offset jitter 86 | ============================================================================== 87 | -y.ns.gin.ntt.ne 198.64.6.114 2 u 465 1024 375 38.859 -14.201 8.438 88 | *ntp.your.org .CDMA. 1 u 853 1024 377 29.042 1.957 4.626 89 | +www.linas.org 129.250.35.250 3 u 470 1024 377 44.347 1.349 5.194 90 | +ntp3.junkemailf 149.20.64.28 2 u 675 1024 337 78.504 4.305 3.001 91 | 92 | [root@workstation1 ~]# ntpq -c assoc 93 | 94 | ind assid status conf reach auth condition last_event cnt 95 | =========================================================== 96 | 1 3548 933a yes yes none outlyer sys_peer 3 97 | 2 3549 963a yes yes none sys.peer sys_peer 3 98 | 3 3550 9424 yes yes none candidate reachable 2 99 | 4 3551 9424 yes yes none candidate reachable 2 100 | [root@workstation1 ~]# 101 | ``` 102 | 103 | #### Verify the changes 104 | 105 | 1. Reboot: `reboot` 106 | 2. Log back in using MATE as **devops** user. Open a terminal window. 107 | 2. Verify that SELinux and iptables are disabled. 108 | 109 | ```bash 110 | [devops@workstation1 ~]$ sudo su - 111 | Last login: Tue Feb 17 19:56:47 EST 2015 on pts/0 112 | [root@workstation1 ~]# sestatus 113 | SELinux status: disabled 114 | [root@workstation1 ~]# systemctl status iptables.service 115 | iptables.service - IPv4 firewall with iptables 116 | Loaded: loaded (/usr/lib/systemd/system/iptables.service; disabled) 117 | Active: inactive (dead) 118 | 119 | [root@workstation1 ~]# 120 | ``` 121 | 122 | #### Recommended 123 | 124 | 1. Test and burn-in the hardware using Prime95 125 | 126 | Download [Prime95](http://www.mersenne.org/ftp_root/gimps/p95v285.linux64.tar.gz) 127 | 128 | -------------------------------------------------------------------------------- /notes/ssl-howto.md: -------------------------------------------------------------------------------- 1 | 2 | ## SSL Certificates and Key Management 3 | 4 | 1. [Install Required Packages](#install-required-packages-on-all-hosts) 5 | 2. [Create Root CA Certificate and Private Key](#create-root-ca-certificate-and-private-key) 6 | 3. [Create Self-Signed Certificate with Wildcard CN](#create-self-signed-certificate-with-wildcard-cn) 7 | 4. [How to View Certificates](#how-to-view-certificates) 8 | 5. [Adding Certificates to the OS Trust Store](#adding-certificates-to-the-os-trust-store) 9 | 6. [Create Certificate with Subject Alt Names](#create-certificate-with-subject-alt-names) 10 | 7. [Reset CA Trust Store](#reset-ca-trust-store) 11 | 7. [References](#references) 12 | 13 | 14 | ### Install Required Packages on all hosts 15 | 16 | ```bash 17 | yum install ca-certificates openssl 18 | ``` 19 | 20 | ### Create Root CA Certificate and Private Key 21 | 22 | Create a Root CA Certificate and self-sign it. 23 | 24 | 1. Create the CA private key: 25 | ``` 26 | openssl genrsa -out local.key 2048 27 | ``` 28 | 29 | 2. Create the CA Certificate and sign it with the CA private key 30 | ``` 31 | openssl req -new -x509 \ 32 | -extensions v3_ca \ 33 | -key local.key \ 34 | -out local.crt \ 35 | -days 3650 \ 36 | -subj "/C=US/ST=VA/L=Reston/O=Local/OU=Lab/CN=Superlab CA" 37 | ``` 38 | ** This command will return no output** 39 | 40 | #### Create Self-Signed Certificate with Wildcard CN 41 | 42 | 1. Create the private key: 43 | ``` 44 | openssl genrsa -out s3.key 2048 45 | ``` 46 | 47 | 2. Create the Certificate Signing Request (CSR) using the private key. 48 | 49 | In this example, this verifes all hosts in the `*.lab.local` domain 50 | ``` 51 | openssl req -new -key s3.key -out s3.csr \ 52 | -subj "/C=US/ST=VA/L=Reston/O=Local/OU=Lab 53 | /CN=*.lab.local" 54 | ``` 55 | 56 | 3. Create the Certificate using the CSR and sign it with the CA Certificate and CA Private Key. 57 | ``` 58 | openssl x509 -req -in s3.csr \ 59 | -CA local.crt \ 60 | -CAkey local.key \ 61 | -CAcreateserial \ 62 | -out s3.crt \ 63 | -days 3650 \ 64 | -sha256 65 | ``` 66 | 67 | 4. Create the PEM File from the Certificate and Private Key 68 | ``` 69 | cat s3.crt s3.key > s3.pem 70 | ``` 71 | 72 | ### How to View Certificates 73 | 74 | - `openssl req -text -noout -in s3.csr` 75 | - `openssl x509 -text -noout -in s3.crt` 76 | - `openssl s_client -showcerts -cert s3.crt -key s3.pem -connect app1.lab.local:8000` 77 | - `rpm -Vv ca-certificates` 78 | 79 | 80 | ### Adding Certificates to the OS Trust Store 81 | 82 | Not all applications use the trust store. 83 | 84 | 1. Create the /etc/pki/local directory on all hosts 85 | 86 | ```bash 87 | salt '*' cmd.run 'mkdir -p /etc/pki/local' 88 | ``` 89 | 90 | 2. Copy the certificates to all the hosts 91 | 92 | ```bash 93 | salt-cp '*' * /etc/pki/local/ 94 | ``` 95 | 96 | 3. Set the appropriate permissions 97 | 98 | ```bash 99 | salt '*' cmd.run 'chmod 644 /etc/pki/local/*' 100 | ``` 101 | 102 | 4. Add CA Root Certificate to Trust Store 103 | 104 | ```bash 105 | salt '*' cmd.run 'update-ca-trust force-enable' 106 | salt '*' cmd.run 'ln -s /etc/pki/local/local.crt /etc/pki/ca-trust/source/anchors/local.crt' 107 | salt '*' cmd.run 'update-ca-trust extract' 108 | ``` 109 | 110 | ### Create Certificate with Subject Alt Names 111 | 112 | Create an openssl-san.cnf file 113 | 114 | In this example, s3.lab.local is the LB/Endpoint and app1 thru app5.lab.local are the S3 connectors. 115 | 116 | ``` 117 | [req] 118 | distinguished_name = req_distinguished_name 119 | req_extensions = v3_req 120 | 121 | [req_distinguished_name] 122 | countryName = Country Name (2 letter code) 123 | countryName_default = US 124 | stateOrProvinceName = State or Province Name (full name) 125 | stateOrProvinceName_default = VA 126 | localityName = Locality Name (eg, city) 127 | localityName_default = Reston 128 | organizationalUnitName = Organizational Unit Name (eg, section) 129 | organizationalUnitName_default = Local 130 | commonName = S3 SAN 131 | commonName_max = 64 132 | 133 | [ v3_req ] 134 | # Extensions to add to a certificate request 135 | basicConstraints = CA:FALSE 136 | keyUsage = nonRepudiation, digitalSignature, keyEncipherment 137 | subjectAltName = @alt_names 138 | 139 | [alt_names] 140 | DNS.1 = app1.lab.local 141 | DNS.2 = app2.lab.local 142 | DNS.3 = app3.lab.local 143 | DNS.4 = app4.lab.local 144 | DNS.5 = app5.lab.local 145 | DNS.6 = s3.lab.local 146 | ``` 147 | 148 | 1.Generate private key 149 | ``` 150 | openssl genrsa -out s3.key 2048 151 | ``` 152 | 153 | 2. Create the CSR file 154 | 155 | ``` 156 | openssl req -new -out s3.csr -key s3.key -config openssl-san.cnf 157 | ``` 158 | 159 | 3. Self-sign and create the certificate: 160 | 161 | ``` 162 | openssl x509 -req -days 3650 -in s3.csr -signkey s3.key \ 163 | -out s3.crt -extensions v3_req -extfile openssl-san.cnf \ 164 | -CA local.crt -CAkey local.key -CAcreateserial 165 | ``` 166 | 167 | 4. Create the PEM file from the Certificate and Private Key 168 | 169 | ``` 170 | cat s3.crt s3.key > s3.pem 171 | ``` 172 | 173 | ### Reset CA Trust Store 174 | 175 | 1. Move everything out of the Trust Store 176 | 177 | ``` 178 | rpm -Vv ca-certificates 179 | ``` 180 | 181 | 2. Re-install ca-certificates 182 | 183 | ``` 184 | yum check-update ca-certificates; (($?==100)) && yum update ca-certificates || yum reinstall ca-certificates` 185 | ``` 186 | 187 | 3. Reset the Trust Store 188 | 189 | ``` 190 | update-ca-trust extract 191 | ``` 192 | 193 | ### References 194 | 195 | - http://apetec.com/support/GenerateSAN-CSR.htm 196 | - https://www.sslshopper.com/article-most-common-openssl-commands.html 197 | - https://superuser.com/questions/462295/openssl-ca-and-non-ca-certificate 198 | - [How to reset the list of trusted CA certificates in RHEL 6 & RHEL 7 ](https://access.redhat.com/solutions/1549003) 199 | 200 | -------------------------------------------------------------------------------- /states/pypiserver/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## pypiserver Setup 3 | 4 | Implementation of pypiserver on CentOS 7 5 | 6 | Tested versions. 7 | 8 | - CentOS Linux release 7.3.1611 (Core) 9 | 10 | 11 | ``` 12 | yum install python-pip libffi-devel gcc python-devel openssl-devel 13 | pip install --upgrade setuptools 14 | ``` 15 | 16 | ``` 17 | pip install --no-index --find-links /var/www/html/pypiserver/packages setuptools-36.0.1.zip 18 | ``` 19 | 20 | ``` 21 | yum install python-virtualenv 22 | mkdir -p ~/env ; cd ~/env 23 | virtualenv pypiserver 24 | source pypiserver/bin/activate 25 | ``` 26 | 27 | ``` 28 | pip install --no-index --find-links /var/www/html/pypiserver/packages pypiserver-1.2.0.zip 29 | ``` 30 | 31 | ``` 32 | pypi-server -p 8080 /var/www/html/pypiserver/packages 33 | ``` 34 | 35 | ``` 36 | deactivate 37 | ``` 38 | 39 | ### Reference Implementation 40 | 41 | Packages to be hosted by the pypiserver: 42 | 43 | ``` 44 | [root@ws2 ~]$ ls -l /var/www/html/pypiserver/packages/ 45 | total 22844 46 | -rw-rw-r-- 1 devops devops 2511062 Jun 11 13:01 ansible-2.2.1.0.tar.gz 47 | -rw-rw-r-- 1 devops devops 2512540 Jun 11 11:40 ansible-2.2.3.0.tar.gz 48 | -rw-rw-r-- 1 devops devops 5605 Jun 11 13:17 backports.ssl_match_hostname-3.5.0.1.tar.gz 49 | -rw-rw-r-- 1 devops devops 8317217 Jun 11 14:31 boto-2.45.0.tar.gz 50 | -rw-rw-r-- 1 devops devops 1460723 Jun 11 14:31 boto-2.47.0.tar.gz 51 | -rw-rw-r-- 1 devops devops 84129 Jun 11 13:11 docker-py-1.10.6.tar.gz 52 | -rw-rw-r-- 1 devops devops 71968 Jun 11 11:35 docker-py-1.8.1.tar.gz 53 | -rw-rw-r-- 1 devops devops 7555 Jun 11 13:16 docker-pycreds-0.2.1.tar.gz 54 | -rw-rw-r-- 1 devops devops 55579 Jun 11 13:07 ecdsa-0.13.tar.gz 55 | -rw-rw-r-- 1 devops devops 32475 Jun 11 13:16 ipaddress-1.0.18.tar.gz 56 | -rw-rw-r-- 1 devops devops 378300 Jun 11 13:39 Jinja2-2.7.2.tar.gz 57 | -rw-rw-r-- 1 devops devops 14356 Jun 11 13:32 MarkupSafe-1.0.tar.gz 58 | -rw-rw-r-- 1 devops devops 1362604 Jun 11 13:04 paramiko-1.16.1.tar.gz 59 | -rw-rw-r-- 1 devops devops 1139175 Jun 11 18:12 pip-8.1.1.tar.gz 60 | -rw-rw-r-- 1 devops devops 1197370 Jun 11 18:01 pip-9.0.1.tar.gz 61 | -rw-rw-r-- 1 devops devops 446240 Jun 11 13:42 pycrypto-2.6.1.tar.gz 62 | -rw-rw-r-- 1 devops devops 110437 Jun 11 18:18 pypiserver-1.2.0.zip 63 | -rw-rw-r-- 1 devops devops 253011 Jun 11 13:41 PyYAML-3.12.tar.gz 64 | -rw-rw-r-- 1 devops devops 545246 Jun 11 11:39 requests-2.12.1.tar.gz 65 | -rw-rw-r-- 1 devops devops 711296 Jun 11 13:13 setuptools-36.0.1.zip 66 | -rw-rw-r-- 1 devops devops 29630 Jun 11 14:16 six-1.10.0.tar.gz 67 | -rw-rw-r-- 1 devops devops 1863951 Jun 11 14:37 virtualenv-15.1.0.tar.gz 68 | -rw-rw-r-- 1 devops devops 196203 Jun 11 13:14 websocket_client-0.40.0.tar.gz 69 | -rw-rw-r-- 1 devops devops 37351 Jun 11 14:36 wsgiref-0.1.2.zip 70 | [root@ws2 ~]$ 71 | ``` 72 | 73 | #### Example Setup 74 | 75 | ``` 76 | [root@ws2 ~]$ yum install python-pip libffi-devel gcc python-devel openssl-devel 77 | Loaded plugins: fastestmirror, langpacks, priorities 78 | Loading mirror speeds from cached hostfile 79 | * epel: mirror.cs.princeton.edu 80 | Package python2-pip-8.1.2-5.el7.noarch already installed and latest version 81 | Package libffi-devel-3.0.13-18.el7.x86_64 already installed and latest version 82 | Package gcc-4.8.5-11.el7.x86_64 already installed and latest version 83 | Package python-devel-2.7.5-48.el7.x86_64 already installed and latest version 84 | Package 1:openssl-devel-1.0.1e-60.el7_3.1.x86_64 already installed and latest version 85 | Nothing to do 86 | [root@ws2 ~]$ pip install --upgrade setuptools 87 | Requirement already up-to-date: setuptools in /usr/lib/python2.7/site-packages 88 | You are using pip version 8.1.1, however version 9.0.1 is available. 89 | You should consider upgrading via the 'pip install --upgrade pip' command. 90 | [root@ws2 ~]$ yum install python-virtualenv 91 | Loaded plugins: fastestmirror, langpacks, priorities 92 | Loading mirror speeds from cached hostfile 93 | * epel: mirror.cs.princeton.edu 94 | Package python-virtualenv-1.10.1-3.el7.noarch already installed and latest version 95 | Nothing to do 96 | [root@ws2 ~]$ mkdir -p ~/env ; cd ~/env 97 | [root@ws2 env]$ virtualenv pypiserver 98 | New python executable in pypiserver/bin/python 99 | Installing setuptools, pip, wheel...done. 100 | [root@ws2 env]$ source pypiserver/bin/activate 101 | (pypiserver)[root@ws2 env]$ 102 | (pypiserver)[root@ws2 env]$ pip install --no-index --find-links /var/www/html/pypiserver/packages pypiserver 103 | Ignoring indexes: https://pypi.python.org/simple 104 | Collecting pypiserver 105 | Installing collected packages: pypiserver 106 | Successfully installed pypiserver-1.2.0 107 | (pypiserver)[root@ws2 env]$ pypi-server -p 8080 /var/www/html/pypiserver/packages 108 | 10.0.0.31 - - [11/Jun/2017 21:11:57] "GET /simple/pip/ HTTP/1.1" 200 397 109 | 10.0.0.31 - - [11/Jun/2017 21:11:57] "GET /packages/pip-8.1.1.tar.gz HTTP/1.1" 200 1139175 110 | ``` 111 | 112 | #### Test from client machine 113 | 114 | NOTE: `yumrepo` is a DNS alias for `ws2` machine used in the example above 115 | 116 | ``` 117 | [root@app1 tmp]$ grep yumrepo /etc/hosts 118 | 10.0.0.6 ws2.lab.local ws2 salt ntp yumrepo pxeserver 119 | [root@app1 tmp]$ 120 | [root@app1 tmp]$ cat ~/.pip/pip.conf 121 | [global] 122 | index-url = http://yumrepo:8080/simple/ 123 | trusted-host = yumrepo 124 | [root@app1 tmp]$ 125 | [root@app1 tmp]$ pip install pip==8.1.1 126 | Collecting pip==8.1.1 127 | Downloading http://yumrepo:8080/packages/pip-8.1.1.tar.gz (1.1MB) 128 | 100% |████████████████████████████████| 1.1MB 28.8MB/s 129 | Installing collected packages: pip 130 | Found existing installation: pip 9.0.1 131 | Uninstalling pip-9.0.1: 132 | Successfully uninstalled pip-9.0.1 133 | Running setup.py install for pip ... done 134 | Successfully installed pip-8.1.1 135 | [root@app1 tmp]$ 136 | ``` 137 | 138 | 139 | ### References 140 | 141 | - [https://pypiserver.readthedocs.io/en/latest/](https://pypiserver.readthedocs.io/en/latest/) 142 | - [https://pypi.python.org/pypi](https://pypi.python.org/pypi) 143 | -------------------------------------------------------------------------------- /notes/dell-powerconnect.md: -------------------------------------------------------------------------------- 1 | 2 | ### Dell PowerConnect 6224 Switch 3 | 4 | #### Connect to the console 5 | 6 | 1. Turn on the switch 7 | 2. Connect to the console: `minicom -D /dev/ttyUSB0 -b 9600` 8 | 3. Hit Enter. The `console>` prompt appears. 9 | 10 | - Superuser access: `enable` 11 | - Save the running config: `copy running-config startup-config` 12 | 13 | #### Set the IP address of the switch 14 | 15 | ``` 16 | enable 17 | configure 18 | ip address none 19 | ip address 10.0.0.226 255.255.255.0 20 | ip default-gateway 10.0.0.1 21 | exit 22 | show ip interface management 23 | ``` 24 | 25 | #### Update the Firmware from 2.x to 3.x 26 | 27 | 1. Verify switch status is OK: `show switch` 28 | 2. Disable persistent logging 29 | 30 | ``` 31 | config 32 | no logging file 33 | ``` 34 | 35 | 3. Shut down all ports except for 24 36 | 37 | ``` 38 | config 39 | interface range ethernet all 40 | shutdown 41 | exit 42 | interface ethernet 1/g24 43 | no shutdown 44 | exit 45 | exit 46 | ``` 47 | 48 | 4. Load the software onto the system 49 | 50 | ``` 51 | console#copy tftp://10.0.0.6/PC6200v3.3.14.2.stk image 52 | 53 | 54 | Mode........................................... TFTP 55 | Set TFTP Server IP............................. 10.0.0.6 56 | TFTP Path...................................... ./ 57 | TFTP Filename.................................. PC6200v3.3.14.2.stk 58 | Data Type...................................... Code 59 | Destination Filename........................... image 60 | 61 | Management access will be blocked for the duration of the transfer 62 | Are you sure you want to start? (y/n) y 63 | 64 | 65 | TFTP code transfer starting 66 | 67 | 9732180 bytes transferred 68 | 69 | Verifying CRC of file in Flash File System 70 | 71 | 72 | Unpacking the image file. 73 | 74 | TFTP download successful. All units updated code. 75 | 76 | 77 | File transfer operation completed successfully. 78 | 79 | console#show version 80 | 81 | Image Descriptions 82 | 83 | image1 : Factory Default 84 | image2 : V 2.2.0.3-14 85 | 86 | 87 | Images currently available on Flash 88 | 89 | -------------------------------------------------------------------- 90 | unit image1 image2 current-active next-active 91 | -------------------------------------------------------------------- 92 | 93 | 1 2.2.0.3 3.3.14.2 image1 image1 94 | 95 | console#boot system image2 96 | Activating image image2 .. 97 | 98 | console#show version 99 | 100 | Image Descriptions 101 | 102 | image1 : Factory Default 103 | image2 : V 2.2.0.3-14 104 | 105 | 106 | Images currently available on Flash 107 | 108 | -------------------------------------------------------------------- 109 | unit image1 image2 current-active next-active 110 | -------------------------------------------------------------------- 111 | 112 | 1 2.2.0.3 3.3.14.2 image1 image2 113 | 114 | 115 | console#reload 116 | 117 | Management switch has unsaved changes. 118 | Are you sure you want to continue? (y/n) y 119 | 120 | Configuration Not Saved! 121 | Are you sure you want to reload the stack? (y/n) y 122 | 123 | 124 | Reloading all switches. 125 | 126 | Boot Menu Version: 21 November 2008 127 | ... 128 | Boot Menu Version: 21 November 2008 129 | Select an option. If no selection in 10 seconds then 130 | operational code will start. 131 | 132 | 1 - Start operational code. 133 | 2 - Start Boot Menu. 134 | Select (1, 2):2 135 | 136 | 137 | 138 | Boot Menu 21 November 2008 139 | 140 | Options available 141 | 1 - Start operational code 142 | 2 - Change baud rate 143 | 3 - Retrieve event log using XMODEM 144 | 4 - Load new operational code using XMODEM 145 | 5 - Display operational code vital product data 146 | 6 - Reserved 147 | 7 - Update boot code 148 | 8 - Delete backup image 149 | 9 - Reset the system 150 | 10 - Restore configuration to factory defaults (delete config files) 151 | 11 - Activate Backup Image 152 | 12 - Password Recovery Procedure 153 | [Boot Menu] 7 154 | Do you wish to update Boot Code and reset the switch? (y/n) y 155 | 156 | ``` 157 | 158 | #### Set MTU to 9000 (9216) 159 | 160 | Set the MTU of 9000 on all ports: 161 | ``` 162 | enable 163 | configure 164 | interface range ethernet all 165 | mtu 9216 166 | exit 167 | ``` 168 | 169 | For LAG/LACP ports, must also do the following for each port channel (chX): 170 | ``` 171 | console(config)#interface port-channel 1 172 | console(config-if-ch1)#mtu 9216 173 | console(config-if-ch1)#exit 174 | ``` 175 | 176 | 177 | #### Change stack ports from "ethernet" to "stack" 178 | 179 | Stack ports need to be in "stack" mode in order for stacking to work. 180 | 181 | ``` 182 | config 183 | stack 184 | stack-port 1/xg1 stack 185 | stack-port 1/xg2 stack 186 | ``` 187 | 188 | - Reboot the switch 189 | 190 | #### References 191 | 192 | - [How to configure the optimal switch settings for an IP based SAN](http://en.community.dell.com/techcenter/enterprise-solutions/w/oracle_solutions/1422.how-to-configure-the-optimal-switch-settings-for-an-ip-based-san) 193 | - [PowerConnect Common Example Commands](http://en.community.dell.com/support-forums/network-switches/f/866/t/19445143) 194 | - [Firmware](http://www.dell.com/support/home/us/en/04/product-support/product/powerconnect-6224/drivers) 195 | - [Stacking switches](http://www.dell.com/downloads/global/products/pwcnt/en/pwcnt_stacking_switches.pdf) 196 | - [Factory Reset](http://dcomcomputers.blogspot.com/2013/09/how-to-factory-default-and-test-ports.html) 197 | - [MTU Oversize Packages](http://en.community.dell.com/techcenter/networking/f/4454/t/19415314) 198 | - [MTU and VLANs](http://en.community.dell.com/support-forums/network-switches/f/866/t/19602268) 199 | - [MTU and ESX](http://www.penguinpunk.net/blog/dell-powerconnect-and-jumbo-frames/) 200 | - [Configuring Stacking on Dell 6248 Switches](http://www.seanlabrie.com/2011/configuring-stacking-on-dell-6248-switches/) 201 | - [Set Jumbo Frame on LAG](https://forum.synology.com/enu/viewtopic.php?t=90788) 202 | -------------------------------------------------------------------------------- /states/openstack/neutron/controller.sls: -------------------------------------------------------------------------------- 1 | # 2 | # https://docs.openstack.org/ocata/install-guide-rdo/neutron-controller-install.html 3 | # 4 | 5 | {% from "openstack/mysql/map.jinja" import mysql with context %} 6 | 7 | {% set mysql_host = salt['pillar.get']('openstack:controller:host') %} 8 | 9 | {% set neutron_dbpass = salt['pillar.get']('openstack:auth:NEUTRON_DBPASS') %} 10 | {% set neutron_pass = salt['pillar.get']('openstack:auth:NEUTRON_PASS') %} 11 | {% set nova_pass = salt['pillar.get']('openstack:auth:NOVA_PASS') %} 12 | {% set rabbit_pass = salt['pillar.get']('openstack:auth:RABBIT_PASS') %} 13 | {% set metadata_secret = salt['pillar.get']('openstack:auth:METADATA_SECRET') %} 14 | {% set provider_interface_name = salt['pillar.get']('openstack:neutron:controller_provider_interface_name') %} 15 | {% set controller = salt['pillar.get']('openstack:controller:host') %} 16 | 17 | create-neutron-user: 18 | cmd.run: 19 | - name: openstack user create --domain default --password {{ salt['pillar.get']('openstack:auth:NEUTRON_PASS') }} neutron 20 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 21 | - unless: 22 | - openstack user show neutron 23 | 24 | add-admin-role-to-neutron: 25 | cmd.run: 26 | - name: openstack role add --project service --user neutron admin 27 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 28 | 29 | create-neutron-service: 30 | cmd.run: 31 | - name: openstack service create --name neutron --description "OpenStack Networking service" network 32 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 33 | - unless: 34 | - openstack service show network 35 | 36 | neutron-public-service-endpoint: 37 | cmd.run: 38 | - name: 'openstack endpoint create --region RegionOne network public http://{{ controller }}:9696' 39 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 40 | # - unless: 41 | # - openstack endpoint list --service network --interface public 42 | 43 | neutron-internal-service-endpoint: 44 | cmd.run: 45 | - name: 'openstack endpoint create --region RegionOne network internal http://{{ controller }}:9696' 46 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 47 | # - unless: 48 | # - openstack endpoint list --service network --interface internal 49 | 50 | neutron-admin-service-endpoint: 51 | cmd.run: 52 | - name: 'openstack endpoint create --region RegionOne network admin http://{{ controller }}:9696' 53 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 54 | # - unless: 55 | # - openstack endpoint list --service network --interface admin 56 | 57 | neutron-packages: 58 | pkg.installed: 59 | - pkgs: 60 | - openstack-neutron 61 | - openstack-neutron-ml2 62 | - openstack-neutron-linuxbridge 63 | - ebtables 64 | 65 | /etc/neutron/neutron.conf: 66 | ini.options_present: 67 | - sections: 68 | DEFAULT: 69 | core_plugin: ml2 70 | service_plugins: '' 71 | transport_url: rabbit://openstack:{{ rabbit_pass }}@controller 72 | auth_strategy: keystone 73 | notify_nova_on_port_status_changes: true 74 | notify_nova_on_port_data_changes: true 75 | database: 76 | connection: 'mysql+pymysql://neutron:{{ neutron_dbpass }}@{{ mysql_host }}/neutron' 77 | keystone_authtoken: 78 | auth_uri: http://{{ controller }}:5000 79 | auth_url: http://{{ controller }}:35357 80 | memcached_servers: {{ controller }}:11211 81 | auth_type: password 82 | project_domain_name: default 83 | user_domain_name: default 84 | project_name: service 85 | username: neutron 86 | password: {{ neutron_pass }} 87 | nova: 88 | auth_url: http://{{ controller }}:35357 89 | auth_type: password 90 | project_domain_name: default 91 | user_domain_name: default 92 | region_name: RegionOne 93 | project_name: service 94 | username: nova 95 | password: {{ nova_pass }} 96 | oslo_concurrency: 97 | lock_path: /var/lib/neutron/tmp 98 | 99 | /etc/neutron/plugins/ml2/ml2_conf.ini: 100 | ini.options_present: 101 | - sections: 102 | ml2: 103 | type_drivers: flat,vlan 104 | tenant_network_types: '' 105 | mechanism_drivers: linuxbridge 106 | extension_drivers: port_security 107 | ml2_type_flat: 108 | flat_networks: provider 109 | securitygroup: 110 | enable_ipset: true 111 | 112 | /etc/neutron/plugins/ml2/linuxbridge_agent.ini: 113 | ini.options_present: 114 | - sections: 115 | linux_bridge: 116 | physical_interface_mappings: {{ provider_interface_name }} 117 | vxlan: 118 | enable_vxlan: false 119 | securitygroup: 120 | enable_security_group: true 121 | firewall_driver: neutron.agent.linux.iptables_firewall.IptablesFirewallDriver 122 | 123 | /etc/neutron/dhcp_agent.ini: 124 | ini.options_present: 125 | - sections: 126 | DEFAULT: 127 | interface_driver: linuxbridge 128 | dhcp_driver: neutron.agent.linux.dhcp.Dnsmasq 129 | enable_isolated_metadata: true 130 | 131 | /etc/neutron/metadata_agent.ini: 132 | ini.options_present: 133 | - sections: 134 | DEFAULT: 135 | nova_metadata_ip: {{ controller }} 136 | metadata_proxy_shared_secret: {{ metadata_secret }} 137 | 138 | /etc/nova/nova.conf: 139 | ini.options_present: 140 | - sections: 141 | neutron: 142 | url: http://{{ controller }}:9696 143 | auth_url: http://{{ controller }}:35357 144 | auth_type: password 145 | project_domain_name: default 146 | user_domain_name: default 147 | region_name: RegionOne 148 | project_name: service 149 | username: neutron 150 | password: {{ neutron_pass }} 151 | service_metadata_proxy: true 152 | metadata_proxy_shared_secret: {{ metadata_secret }} 153 | 154 | /etc/neutron/plugin.ini: 155 | file.symlink: 156 | - name: /etc/neutron/plugin.ini 157 | - target: /etc/neutron/plugins/ml2/ml2_conf.ini 158 | 159 | neutron-db-manage: 160 | cmd.run: 161 | - name: "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" 162 | - user: neutron 163 | - shell: /bin/sh 164 | 165 | restart-compute: 166 | cmd.run: 167 | - name: systemctl restart openstack-nova-api 168 | 169 | neutron-server: 170 | service.running: 171 | - enable: True 172 | 173 | neutron-linuxbridge-agent: 174 | service.running: 175 | - enable: True 176 | - watch: 177 | - ini: /etc/neutron/plugins/ml2/linuxbridge_agent.ini 178 | 179 | neutron-dhcp-agent: 180 | service.running: 181 | - enable: True 182 | - watch: 183 | - ini: /etc/neutron/dhcp_agent.ini 184 | 185 | neutron-metadata-agent: 186 | service.running: 187 | - enable: True 188 | - watch: 189 | - ini: /etc/neutron/metadata_agent.ini 190 | 191 | 192 | -------------------------------------------------------------------------------- /states/openstack/nova/controller.sls: -------------------------------------------------------------------------------- 1 | # 2 | # https://docs.openstack.org/ocata/install-guide-rdo/nova-controller-install.html 3 | # 4 | 5 | {% from "openstack/mysql/map.jinja" import mysql with context %} 6 | 7 | {% set nova_dbpass = salt['pillar.get']('openstack:auth:NOVA_DBPASS') %} 8 | {% set nova_pass = salt['pillar.get']('openstack:auth:NOVA_PASS') %} 9 | {% set rabbit_pass = salt['pillar.get']('openstack:auth:RABBIT_PASS') %} 10 | {% set placement_pass = salt['pillar.get']('openstack:auth:PLACEMENT_PASS') %} 11 | 12 | {% set mysql_host = salt['pillar.get']('openstack:controller:host') %} 13 | {% set controller = salt['pillar.get']('openstack:controller:host') %} 14 | 15 | 16 | create-nova-user: 17 | cmd.run: 18 | - name: openstack user create --password {{ salt['pillar.get']('openstack:auth:NOVA_PASS') }} nova 19 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 20 | - unless: 21 | - openstack user show nova 22 | 23 | add-admin-role-to-nova: 24 | cmd.run: 25 | - name: openstack role add --project service --user nova admin 26 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 27 | 28 | create-nova-service: 29 | cmd.run: 30 | - name: openstack service create --name nova --description "OpenStack Compute service" compute 31 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 32 | - unless: 33 | - openstack service show compute 34 | 35 | nova-public-service-endpoint: 36 | cmd.run: 37 | - name: 'openstack endpoint create --region RegionOne compute public http://{{ controller }}:8774/v2.1' 38 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 39 | 40 | nova-internal-service-endpoint: 41 | cmd.run: 42 | - name: 'openstack endpoint create --region RegionOne compute internal http://{{ controller }}:8774/v2.1' 43 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 44 | 45 | nova-admin-service-endpoint: 46 | cmd.run: 47 | - name: 'openstack endpoint create --region RegionOne compute admin http://{{ controller }}:8774/v2.1' 48 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 49 | 50 | # Placement 51 | create-placement-user: 52 | cmd.run: 53 | - name: openstack user create --domain default --password {{ salt['pillar.get']('openstack:auth:PLACEMENT_PASS') }} placement 54 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 55 | - unless: 56 | - openstack user show placement 57 | 58 | add-admin-role-to-placement: 59 | cmd.run: 60 | - name: openstack role add --project service --user placement admin 61 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 62 | 63 | create-placement-service: 64 | cmd.run: 65 | - name: openstack service create --name placement --description "Placement API" placement 66 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 67 | - unless: 68 | - openstack service show placement 69 | 70 | placement-public-service-endpoint: 71 | cmd.run: 72 | - name: 'openstack endpoint create --region RegionOne placement public http://{{ controller }}:8778' 73 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 74 | 75 | placement-internal-service-endpoint: 76 | cmd.run: 77 | - name: 'openstack endpoint create --region RegionOne placement internal http://{{ controller }}:8778' 78 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 79 | 80 | placement-admin-service-endpoint: 81 | cmd.run: 82 | - name: 'openstack endpoint create --region RegionOne placement admin http://{{ controller }}:8778' 83 | - env: {{ salt['pillar.get']('openstack:env', {}) }} 84 | 85 | # 86 | # Install and configure components 87 | # 88 | 89 | # 1. install the packages 90 | nova-pkgs: 91 | pkg.installed: 92 | - pkgs: 93 | - openstack-nova-api 94 | - openstack-nova-conductor 95 | - openstack-nova-console 96 | - openstack-nova-novncproxy 97 | - openstack-nova-scheduler 98 | - openstack-nova-placement-api 99 | 100 | # This needs to go here for placement API endpoints to be created successfully ?? 101 | # packaging bug... 102 | /etc/httpd/conf.d/00-nova-placement-api.conf: 103 | file.append: 104 | - name: /etc/httpd/conf.d/00-nova-placement-api.conf 105 | - text: | 106 | 107 | = 2.4> 108 | Require all granted 109 | 110 | 111 | Order allow,deny 112 | Allow from all 113 | 114 | 115 | service.running: 116 | - name: httpd 117 | - watch: 118 | - file: /etc/httpd/conf.d/00-nova-placement-api.conf 119 | 120 | # 2. Edit the /etc/nova/nova-api.conf file and complete the following actions: 121 | /etc/nova/nova.conf: 122 | ini.options_present: 123 | - sections: 124 | DEFAULT: 125 | enabled_apis: osapi_compute,metadata 126 | transport_url: 'rabbit://openstack:{{ rabbit_pass }}@controller' 127 | my_ip: {{ salt['grains.get']('fqdn_ip4:0') }} 128 | use_neutron: True 129 | firewall_driver: nova.virt.firewall.NoopFirewallDriver 130 | database: 131 | connection: 'mysql+pymysql://nova:{{ nova_dbpass }}@{{ mysql_host }}/nova' 132 | api_database: 133 | connection: 'mysql+pymysql://nova:{{ nova_dbpass }}@{{ mysql_host }}/nova_api' 134 | keystone_authtoken: 135 | auth_uri: 'http://{{ controller }}:5000' 136 | auth_url: 'http://{{ controller }}:35357' 137 | memcached_servers: '{{ controller }}:11211' 138 | auth_type: password 139 | project_domain_name: default 140 | user_domain_name: default 141 | project_name: service 142 | username: nova 143 | password: {{ nova_pass }} 144 | vnc: 145 | enabled: true 146 | vncserver_listen: $my_ip 147 | vncserver_proxyclient_address: $my_ip 148 | glance: 149 | api_servers: 'http://{{ controller }}:9292' 150 | oslo_concurrency: 151 | lock_path: /var/lib/nova/tmp 152 | placement: 153 | os_region_name: RegionOne 154 | project_domain_name: default 155 | project_name: service 156 | auth_type: password 157 | user_domain_name: default 158 | auth_url: 'http://{{ controller }}:35357/v3' 159 | username: placement 160 | password: {{ placement_pass }} 161 | scheduler: 162 | discover_hosts_in_cells_interval: 300 163 | 164 | populate_nova_api_database: 165 | cmd.run: 166 | - name: nova-manage api_db sync 167 | - user: nova 168 | - shell: /bin/sh 169 | 170 | register_cell0_db: 171 | cmd.run: 172 | - name: nova-manage cell_v2 map_cell0 173 | - user: nova 174 | - shell: /bin/sh 175 | 176 | create_cell1_cell: 177 | cmd.run: 178 | - name: nova-manage cell_v2 create_cell --name=cell1 --verbose 179 | - user: nova 180 | - shell: /bin/sh 181 | 182 | populate_nova_database: 183 | cmd.run: 184 | - name: nova-manage db sync 185 | - user: nova 186 | - shell: /bin/sh 187 | 188 | 189 | openstack-nova-api.service: 190 | service.running: 191 | - enable: True 192 | - watch: 193 | - ini: /etc/nova/nova.conf 194 | 195 | openstack-nova-consoleauth.service: 196 | service.running: 197 | - enable: True 198 | - watch: 199 | - ini: /etc/nova/nova.conf 200 | 201 | openstack-nova-scheduler.service: 202 | service.running: 203 | - enable: True 204 | - watch: 205 | - ini: /etc/nova/nova.conf 206 | 207 | openstack-nova-conductor.service: 208 | service.running: 209 | - enable: True 210 | - watch: 211 | - ini: /etc/nova/nova.conf 212 | 213 | openstack-nova-novncproxy.service: 214 | service.running: 215 | - enable: True 216 | - watch: 217 | - ini: /etc/nova/nova.conf 218 | 219 | -------------------------------------------------------------------------------- /notes/fpm_nginx_luajit.md: -------------------------------------------------------------------------------- 1 | 2 | ### Build NGINX with LuaJIT support on CentOS 6.6 3 | 4 | ``` 5 | yum -y groupinstall "Development Tools" 6 | cd /usr/local/src 7 | ``` 8 | 9 | ``` 10 | wget http://luajit.org/download/LuaJIT-2.0.4.tar.gz 11 | wget http://tengine.taobao.org/download/tengine-2.1.0.tar.gz 12 | for i in `ls`; do tar zxvf $i; done 13 | ``` 14 | 15 | ``` 16 | cd LuaJIT-2.0.4 17 | make PREFIX=/usr 18 | rm -Rf /tmp/LuaJIT-2.0.4 19 | make 20 | make install PREFIX=/usr DESTDIR=/tmp/LuaJIT-2.0.4 21 | make install PREFIX=/usr 22 | ``` 23 | 24 | ``` 25 | yum install pcre-devel openssl-devel 26 | 27 | cd tengine-2.1.0 28 | ./configure --with-http_lua_module \ 29 | --prefix=/usr \ 30 | --conf-path=/etc/nginx/nginx.conf \ 31 | --with-luajit-inc=/usr/include/luajit-2.0 \ 32 | --with-luajit-lib=/usr/lib \ 33 | --error-log-path=/var/log/nginx/error.log \ 34 | --http-log-path=/var/log/nginx/access.log \ 35 | --pid-path=/var/run/nginx.pid \ 36 | --lock-path=/var/run/nginx.lock 37 | make 38 | make install PREFIX=/usr DESTDIR=/tmp/tengine-2.1.0 39 | make install PREFIX=/usr 40 | ``` 41 | 42 | ``` 43 | [root@conn1 sbin]$ which nginx 44 | /usr/sbin/nginx 45 | ``` 46 | 47 | ``` 48 | [root@conn1 sbin]$ nginx -V 49 | Tengine version: Tengine/2.1.0 (nginx/1.6.2) 50 | built by gcc 4.4.7 20120313 (Red Hat 4.4.7-11) (GCC) 51 | TLS SNI support enabled 52 | configure arguments: --with-http_lua_module --prefix=/usr --conf-path=/etc/nginx/nginx.conf --with-luajit-inc=/usr/include/luajit-2.0 --with-luajit-lib=/usr/lib --error-log-path=/var/log/nginx/error.log --http-log-path=/var/log/nginx/access.log --pid-path=/var/run/nginx.pid --lock-path=/var/run/nginx.lock 53 | loaded modules: 54 | ngx_core_module (static) 55 | ngx_errlog_module (static) 56 | ngx_conf_module (static) 57 | ngx_dso_module (static) 58 | ngx_syslog_module (static) 59 | ngx_events_module (static) 60 | ngx_event_core_module (static) 61 | ngx_epoll_module (static) 62 | ngx_procs_module (static) 63 | ngx_proc_core_module (static) 64 | ngx_openssl_module (static) 65 | ngx_regex_module (static) 66 | ngx_http_module (static) 67 | ngx_http_core_module (static) 68 | ngx_http_log_module (static) 69 | ngx_http_upstream_module (static) 70 | ngx_http_static_module (static) 71 | ngx_http_autoindex_module (static) 72 | ngx_http_index_module (static) 73 | ngx_http_auth_basic_module (static) 74 | ngx_http_access_module (static) 75 | ngx_http_limit_conn_module (static) 76 | ngx_http_limit_req_module (static) 77 | ngx_http_geo_module (static) 78 | ngx_http_map_module (static) 79 | ngx_http_split_clients_module (static) 80 | ngx_http_referer_module (static) 81 | ngx_http_rewrite_module (static) 82 | ngx_http_ssl_module (static) 83 | ngx_http_proxy_module (static) 84 | ngx_http_fastcgi_module (static) 85 | ngx_http_uwsgi_module (static) 86 | ngx_http_scgi_module (static) 87 | ngx_http_memcached_module (static) 88 | ngx_http_empty_gif_module (static) 89 | ngx_http_browser_module (static) 90 | ngx_http_user_agent_module (static) 91 | ngx_http_upstream_ip_hash_module (static) 92 | ngx_http_upstream_consistent_hash_module (static) 93 | ngx_http_upstream_check_module (static) 94 | ngx_http_upstream_least_conn_module (static) 95 | ngx_http_reqstat_module (static) 96 | ngx_http_upstream_keepalive_module (static) 97 | ngx_http_upstream_dynamic_module (static) 98 | ngx_http_stub_status_module (static) 99 | ngx_http_write_filter_module (static) 100 | ngx_http_header_filter_module (static) 101 | ngx_http_chunked_filter_module (static) 102 | ngx_http_range_header_filter_module (static) 103 | ngx_http_gzip_filter_module (static) 104 | ngx_http_postpone_filter_module (static) 105 | ngx_http_ssi_filter_module (static) 106 | ngx_http_charset_filter_module (static) 107 | ngx_http_userid_filter_module (static) 108 | ngx_http_footer_filter_module (static) 109 | ngx_http_trim_filter_module (static) 110 | ngx_http_headers_filter_module (static) 111 | ngx_http_lua_module (static) 112 | ngx_http_upstream_session_sticky_module (static) 113 | ngx_http_copy_filter_module (static) 114 | ngx_http_range_body_filter_module (static) 115 | ngx_http_not_modified_filter_module (static) 116 | ``` 117 | ``` 118 | [root@conn1 nginx]$ nginx 119 | [root@conn1 nginx]$ ps -ef | grep nginx 120 | root 11012 1 0 01:53 ? 00:00:00 nginx: master process nginx 121 | nobody 11013 11012 0 01:53 ? 00:00:00 nginx: worker process 122 | root 11015 1806 0 01:53 pts/0 00:00:00 grep nginx 123 | [root@conn1 nginx]$ curl localhost 124 | 125 | 126 | 127 | Welcome to tengine! 128 | 135 | 136 | 137 |

Welcome to tengine!

138 |

If you see this page, the tengine web server is successfully installed and 139 | working. Further configuration is required.

140 | 141 |

For online documentation and support please refer to 142 | tengine.taobao.org.

143 | 144 |

Thank you for using tengine.

145 | 146 | 147 | ``` 148 | 149 | https://rubygems.org/gems/ 150 | 151 | ``` 152 | [root@conn1 fpm-1.3.3]$ pwd 153 | /usr/local/src/fpm-1.3.3 154 | [root@conn1 fpm-1.3.3]$ ls -l 155 | total 1484 156 | -rw-r--r-- 1 root root 15360 Jun 16 02:07 arr-pm-0.0.10.gem 157 | -rw-r--r-- 1 root root 89600 Jun 16 02:07 backports-3.6.4.gem 158 | -rw-r--r-- 1 root root 20992 Jun 16 02:07 cabin-0.7.1.gem 159 | -rw-r--r-- 1 root root 28672 Jun 16 02:07 childprocess-0.5.6.gem 160 | -rw-r--r-- 1 root root 24576 Jun 16 02:07 clamp-0.6.5.gem 161 | -rw-r--r-- 1 root root 881152 Jun 16 02:07 ffi-1.9.8.gem 162 | -rw-r--r-- 1 root root 114176 Jun 16 02:07 fpm-1.3.3.gem 163 | -rw-r--r-- 1 root root 9728 Jun 16 02:07 insist-1.0.0.gem 164 | -rw-r--r-- 1 root root 152064 Jun 16 02:07 json-1.8.3.gem 165 | -rw-r--r-- 1 root root 135680 Jun 16 02:07 pry-0.10.1.gem 166 | -rw-r--r-- 1 root root 10240 Jun 16 02:07 rspec-3.3.0.gem 167 | -rw-r--r-- 1 root root 14336 Jun 16 02:07 stud-0.0.19.gem 168 | [root@conn1 fpm-1.3.3]$ 169 | ``` 170 | 171 | ``` 172 | rm: remove regular file `clamp-1.0.0.gem'? y 173 | [root@conn1 fpm-1.3.3]$ gem install fpm 174 | ERROR: http://rubygems.org/ does not appear to be a repository 175 | Successfully installed clamp-0.6.5 176 | Successfully installed fpm-1.3.3 177 | 2 gems installed 178 | Installing ri documentation for clamp-0.6.5... 179 | Installing ri documentation for fpm-1.3.3... 180 | Installing RDoc documentation for clamp-0.6.5... 181 | Installing RDoc documentation for fpm-1.3.3... 182 | ``` 183 | 184 | ``` 185 | [root@conn1 fpm-1.3.3]$ fpm 186 | Missing required -s flag. What package source did you want? {:level=>:warn} 187 | Missing required -t flag. What package output did you want? {:level=>:warn} 188 | No parameters given. You need to pass additional command arguments so that I know what you want to build packages from. For example, for '-s dir' you would pass a list of files and directories. For '-s gem' you would pass a one or more gems to package from. As a full example, this will make an rpm of the 'json' rubygem: `fpm -s gem -t rpm json` {:level=>:warn} 189 | Fix the above problems, and you'll be rolling packages in no time! {:level=>:fatal} 190 | ``` 191 | 192 | ``` 193 | mkdir -p /tmp/tengine-2.1.0/etc/init.d/ 194 | cp /etc/init.d/nginx /tmp/tengine-2.1.0/etc/init.d/ 195 | 196 | cd /usr/local/src/tengine-2.1.0 197 | fpm -f -s dir -t rpm -n tengine -v 2.1.0 -p tengine-2.1.0-1.el6.x86_64.rpm -d LuaJIT -C /tmp/tengine-2.1.0 usr etc 198 | rpm -q -filesbypkg -p tengine-2.1.0-1.el6.x86_64.rpm 199 | ``` 200 | 201 | ``` 202 | fpm -f -s dir -t rpm -n LuaJIT -v 2.0.4 -p LuaJIT-2.0.4-1.el6.x86_64.rpm -C /tmp/LuaJIT-2.0.4 usr 203 | rpm -q -filesbypkg -p LuaJIT-2.0.4-1.el6.x86_64.rpm 204 | ``` 205 | 206 | 207 | -------------------------------------------------------------------------------- /states/network/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Network 4 | 5 | 1. [General](#general) 6 | 2. [CentOS 7 Teaming](#centos-7-teaming) 7 | 3. [CentOS 7 Static IP](#centos-7-static-ip) 8 | 4. [CentOS 6 Bonding](#centos-6-bonding) 9 | 5. [References](#references) 10 | 11 | ### General 12 | 13 | - Stats: `ss` 14 | - Add default route `ip route add default via 192.168.1.1 dev enp5s0` 15 | - IP: `ip addr` `ip route` 16 | - Show which process is using a port: `netstat -anp | grep 8775` 17 | - Set MTU to 9000: `ip link set enp0s20f2 mtu 9000` 18 | - Test Jumbo Frames: `ping -M do -s 8972 192.168.1.6` 19 | The reason for the 8972 is that the ICMP/ping implementation doesn’t encapsulate the 28 byte ICMP (8) + IP (20) (ping + standard internet protocol packet) header – thus we must take the 9000 and subtract 28 = 8972. 20 | - Disable NetworkManager 21 | ```bash 22 | service NetworkManager stop 23 | chkconfig NetworkManager off 24 | service network start 25 | chkconfig network on 26 | ``` 27 | - TCP Memory Stack 28 | ``` 29 | # cat /proc/sys/net/ipv4/tcp_mem 30 | 93231 124311 186462 31 | # cat /proc/net/sockstat 32 | sockets: used 609 33 | TCP: inuse 5 orphan 0 tw 0 alloc 6 mem 2 34 | UDP: inuse 3 mem 2 35 | UDPLITE: inuse 0 36 | RAW: inuse 0 37 | FRAG: inuse 0 memory 0 38 | ``` 39 | 40 | 41 | #### Multi-queue hashing algoritms 42 | 43 | ``` 44 | [root@store1 ~]$ ethtool -n enp0s20f0 rx-flow-hash udp4 45 | UDP over IPV4 flows use these fields for computing Hash flow key: 46 | IP SA 47 | IP DA 48 | 49 | [root@store1 ~]$ ethtool -N enp0s20f0 rx-flow-hash udp4 sdfn 50 | [root@store1 ~]$ ethtool -n enp0s20f0 rx-flow-hash udp4 51 | UDP over IPV4 flows use these fields for computing Hash flow key: 52 | IP SA 53 | IP DA 54 | L4 bytes 0 & 1 [TCP/UDP src port] 55 | L4 bytes 2 & 3 [TCP/UDP dst port] 56 | 57 | [root@store1 ~]$ 58 | ``` 59 | 60 | ### CentOS 7 Teaming 61 | 62 | Teaming replaces bonding in CentOS 7 release. 63 | 64 | ``` 65 | [root@ring-a6 ~]$ cat /etc/sysconfig/network-scripts/ifcfg-team0 66 | DEVICE="team0" 67 | DEVICETYPE="Team" 68 | ONBOOT="yes" 69 | BOOTPROTO="none" 70 | IPADDR=10.0.0.66 71 | NETMASK=255.255.255.0 72 | #TEAM_CONFIG='{"runnner":{"name":"roundrobin"}}' 73 | TEAM_CONFIG='{"runnner":{"name":"lacp"}}' 74 | MTU=9000 75 | [root@ring-a6 ~]$ cat /etc/sysconfig/network-scripts/ifcfg-enp0s20f0 76 | DEVICE="enp0s20f0" 77 | DEVICETYPE="TeamPort" 78 | ONBOOT="yes" 79 | BOOTPROTO="none" 80 | TEAM_MASTER="team0" 81 | IPV6INIT="no" 82 | [root@ring-a6 ~]$ cat /etc/sysconfig/network-scripts/ifcfg-enp0s20f1 83 | DEVICE="enp0s20f1" 84 | DEVICETYPE="TeamPort" 85 | ONBOOT="yes" 86 | BOOTPROTO="none" 87 | TEAM_MASTER="team0" 88 | IPV6INIT="no" 89 | ``` 90 | 91 | #### Verify setup 92 | 93 | ``` 94 | [root@ring-a6 ~]$ teamdctl team0 state view 95 | setup: 96 | runner: roundrobin 97 | ports: 98 | enp0s20f0 99 | link watches: 100 | link summary: up 101 | instance[link_watch_0]: 102 | name: ethtool 103 | link: up 104 | enp0s20f1 105 | link watches: 106 | link summary: up 107 | instance[link_watch_0]: 108 | name: ethtool 109 | link: up 110 | [root@ring-a6 ~]$ teamnl team0 ports 111 | 3: enp0s20f1: up 0Mbit FD 112 | 2: enp0s20f0: up 0Mbit FD 113 | [root@ring-a6 ~]$ 114 | ``` 115 | 116 | ``` 117 | [root@store1 ~]$ teamnl team0 ports 118 | 3: enp0s20f1: up 1000Mbit FD 119 | 2: enp0s20f0: up 1000Mbit FD 120 | [root@store1 ~]$ 121 | ``` 122 | 123 | #### IP address information 124 | ``` 125 | [root@ring-a6 ~]$ ip a 126 | 1: lo: mtu 65536 qdisc noqueue state UNKNOWN 127 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 128 | inet 127.0.0.1/8 scope host lo 129 | valid_lft forever preferred_lft forever 130 | 2: enp0s20f0: mtu 9000 qdisc mq master team0 state UP qlen 1000 131 | link/ether 00:25:90:f1:0e:58 brd ff:ff:ff:ff:ff:ff 132 | 3: enp0s20f1: mtu 9000 qdisc mq master team0 state UP qlen 1000 133 | link/ether 00:25:90:f1:0e:58 brd ff:ff:ff:ff:ff:ff 134 | 4: enp0s20f2: mtu 9000 qdisc mq master team1 state UP qlen 1000 135 | link/ether 00:25:90:f1:0e:5a brd ff:ff:ff:ff:ff:ff 136 | 5: enp0s20f3: mtu 9000 qdisc mq master team1 state UP qlen 1000 137 | link/ether 00:25:90:f1:0e:5a brd ff:ff:ff:ff:ff:ff 138 | 6: team0: mtu 9000 qdisc noqueue state UP 139 | link/ether 00:25:90:f1:0e:58 brd ff:ff:ff:ff:ff:ff 140 | inet 10.0.0.66/24 brd 10.0.0.255 scope global team0 141 | valid_lft forever preferred_lft forever 142 | 7: team1: mtu 9000 qdisc noqueue state UP 143 | link/ether 00:25:90:f1:0e:5a brd ff:ff:ff:ff:ff:ff 144 | inet 10.0.1.66/24 brd 10.0.1.255 scope global team1 145 | valid_lft forever preferred_lft forever 146 | [root@ring-a6 ~]$ 147 | 148 | ``` 149 | 150 | ### CentOS 7 Static IP 151 | 152 | Edit /etc/sysconfig/network 153 | ``` 154 | NETWORKING=yes 155 | HOSTNAME=store8.mgmt 156 | GATEWAY=10.241.127.1 157 | ``` 158 | 159 | Edit /etc/sysconfig/network-scripts/ifcfg- 160 | ``` 161 | BOOTPROTO=static 162 | IPADDR=10.241.127.115 163 | ``` 164 | 165 | Disable NetworkManager and enable networking 166 | ``` 167 | systemctl disable NetworkManager.service 168 | systemctl enable network.service 169 | reboot 170 | ``` 171 | 172 | ### CentOS 6 Bonding 173 | 174 | Bonding runs in user and kernel space. 175 | 176 | #### Example: eth1/eth2 bonded to bond0 177 | 178 | Create /etc/modprobe.d/bonding.conf to load the kernel module 179 | ``` 180 | alias bond0 bonding 181 | ``` 182 | 183 | Create /etc/sysconfig/network-scripts/ifcfg-bond0 184 | ``` 185 | DEVICE=bond0 186 | BOOTPROTO=static 187 | IPADDR=10.0.1.21 188 | NETMASK=255.255.255.0 189 | ONBOOT=yes 190 | USERCTL=no 191 | BONDING_OPTS="miimon=100 mode=1" 192 | ``` 193 | Bonding options: 194 | - LACP: `BONDING_OPTS="miimon=100 mode=4 lacp_rate=1 xmit_hash_policy=layer3+4"` 195 | 196 | 197 | Edit /etc/sysconfig/network-scripts/ifcfg-eth1 198 | ``` 199 | DEVICE=eth1 200 | HWADDR="00:25:90:F1:0D:A8" 201 | TYPE=Ethernet 202 | UUID=d4df31a8-0549-4568-953c-2a9db136d53c 203 | ONBOOT=yes 204 | NM_CONTROLLED=no 205 | BOOTPROTO=none 206 | USERCTL=no 207 | MASTER=bond0 208 | SLAVE=yes 209 | ``` 210 | 211 | Edit /etc/sysconfig/network-scripts/ifcfg-eth2 212 | ``` 213 | DEVICE=eth2 214 | HWADDR="00:25:90:F1:0D:A9" 215 | TYPE=Ethernet 216 | UUID="0334725a-f9bd-4e89-a2f5-bf1590547af3" 217 | ONBOOT=yes 218 | NM_CONTROLLED=no 219 | BOOTPROTO=none 220 | USERCTL=no 221 | MASTER=bond0 222 | SLAVE=yes 223 | ``` 224 | 225 | Restart networking 226 | ``` 227 | $ service network restart 228 | ``` 229 | 230 | Check IP info 231 | ``` 232 | $ route 233 | Kernel IP routing table 234 | Destination Gateway Genmask Flags Metric Ref Use Iface 235 | 10.0.0.0 * 255.255.255.0 U 0 0 0 eth0 236 | 10.0.1.0 * 255.255.255.0 U 0 0 0 bond0 237 | link-local * 255.255.0.0 U 1006 0 0 eth0 238 | link-local * 255.255.0.0 U 1014 0 0 bond0 239 | 240 | $ ifconfig -a 241 | bond0 Link encap:Ethernet HWaddr 00:25:90:F1:0D:A8 242 | inet addr:10.0.1.21 Bcast:10.0.1.255 Mask:255.255.255.0 243 | UP BROADCAST RUNNING MASTER MULTICAST MTU:1500 Metric:1 244 | RX packets:228450 errors:0 dropped:0 overruns:0 frame:0 245 | TX packets:2 errors:0 dropped:0 overruns:0 carrier:0 246 | collisions:0 txqueuelen:0 247 | RX bytes:124810606 (119.0 MiB) TX bytes:84 (84.0 b) 248 | ... 249 | eth1 Link encap:Ethernet HWaddr 00:25:90:F1:0D:A8 250 | UP BROADCAST RUNNING SLAVE MULTICAST MTU:1500 Metric:1 251 | RX packets:109939 errors:0 dropped:0 overruns:0 frame:0 252 | TX packets:0 errors:0 dropped:0 overruns:0 carrier:0 253 | collisions:0 txqueuelen:1000 254 | RX bytes:62007840 (59.1 MiB) TX bytes:0 (0.0 b) 255 | Memory:df360000-df380000 256 | 257 | eth2 Link encap:Ethernet HWaddr 00:25:90:F1:0D:A8 258 | UP BROADCAST RUNNING SLAVE MULTICAST MTU:1500 Metric:1 259 | RX packets:118511 errors:0 dropped:0 overruns:0 frame:0 260 | TX packets:2 errors:0 dropped:0 overruns:0 carrier:0 261 | collisions:0 txqueuelen:1000 262 | RX bytes:62802766 (59.8 MiB) TX bytes:84 (84.0 b) 263 | Memory:df340000-df360000 264 | ``` 265 | 266 | Check bonding driver 267 | ``` 268 | $ cat /proc/net/bonding/bond0 269 | Ethernet Channel Bonding Driver: v3.6.0 (September 26, 2009) 270 | 271 | Bonding Mode: fault-tolerance (active-backup) 272 | Primary Slave: None 273 | Currently Active Slave: eth2 274 | MII Status: up 275 | MII Polling Interval (ms): 100 276 | Up Delay (ms): 0 277 | Down Delay (ms): 0 278 | 279 | Slave Interface: eth1 280 | MII Status: up 281 | Speed: 1000 Mbps 282 | Duplex: full 283 | Link Failure Count: 0 284 | Permanent HW addr: 00:25:90:f1:0d:a8 285 | Slave queue ID: 0 286 | 287 | Slave Interface: eth2 288 | MII Status: up 289 | Speed: 1000 Mbps 290 | Duplex: full 291 | Link Failure Count: 0 292 | Permanent HW addr: 00:25:90:f1:0d:a9 293 | Slave queue ID: 0 294 | ``` 295 | 296 | ### References: 297 | - [RHEL: Linux Bond / Team Multiple Network Interfaces (NIC) Into a Single Interface](http://www.cyberciti.biz/tips/linux-bond-or-team-multiple-network-interfaces-nic-into-single-interface.html) 298 | - [CentOS Teaming with VLANS](http://techgnat.blogspot.com/2014/12/centos-teaming-with-vlans.html) 299 | - [http://dak1n1.com/blog/7-performance-tuning-intel-10gbe](http://dak1n1.com/blog/7-performance-tuning-intel-10gbe) 300 | - [https://www.unixmen.com/configure-static-ip-centos-7/](https://www.unixmen.com/configure-static-ip-centos-7/) 301 | - [https://www.mylesgray.com/hardware/test-jumbo-frames-working/](https://www.mylesgray.com/hardware/test-jumbo-frames-working/) 302 | - [https://blog.cloudflare.com/how-to-receive-a-million-packets/](https://blog.cloudflare.com/how-to-receive-a-million-packets/) 303 | -------------------------------------------------------------------------------- /states/openstack/mysql/init.sls: -------------------------------------------------------------------------------- 1 | {% from "openstack/mysql/map.jinja" import mysql with context %} 2 | 3 | {% set mysql_host = salt['pillar.get']('openstack:controller:host') %} 4 | {% set mysql_root_password = salt['pillar.get']('mysql:root_pass') %} 5 | {% set controller = salt['pillar.get']('openstack:controller:host') %} 6 | 7 | {% set keystone_dbpass = salt['pillar.get']('openstack:auth:KEYSTONE_DBPASS') %} 8 | {% set glance_dbpass = salt['pillar.get']('openstack:auth:GLANCE_DBPASS') %} 9 | {% set nova_dbpass = salt['pillar.get']('openstack:auth:NOVA_DBPASS') %} 10 | {% set neutron_dbpass = salt['pillar.get']('openstack:auth:NEUTRON_DBPASS') %} 11 | 12 | 13 | 14 | mysql-server: 15 | 16 | pkg.installed: 17 | - pkgs: 18 | - {{ mysql.client }} 19 | - {{ mysql.server }} 20 | - {{ mysql.python }} 21 | 22 | file.managed: 23 | - name: {{ mysql.config }} 24 | - mode: 644 25 | - user: root 26 | - group: root 27 | # - require: 28 | # - pkg: {{ mysql.server }} 29 | - contents: | 30 | [mysqld] 31 | bind-address = 0.0.0.0 32 | default-storage-engine = innodb 33 | innodb_file_per_table = on 34 | max_connections = 4096 35 | collation-server = utf8_general_ci 36 | character-set-server = utf8 37 | 38 | service.running: 39 | - name: {{ mysql.service }} 40 | - enable: True 41 | # - require: 42 | # - pkg: {{ mysql.server }} 43 | 44 | mysql_user.present: 45 | - name: root 46 | - host: localhost 47 | - password: {{ salt['pillar.get']('mysql:root_pass') }} 48 | - password_hash: '*' 49 | - require: 50 | - service: {{ mysql.service }} 51 | 52 | # 53 | # Identity Service 54 | # 55 | 56 | keystone_db: 57 | mysql_database.present: 58 | - name: keystone 59 | - host: {{ mysql_host }} 60 | - connection_user: root 61 | - connection_pass: '{{ mysql_root_password }}' 62 | - connection_charset: utf8 63 | - require: 64 | - service: {{ mysql.service }} 65 | 66 | keystone_grant_localhost: 67 | mysql_user.present: 68 | - name: keystone 69 | - host: localhost 70 | - password: {{ keystone_dbpass }} 71 | - connection_user: root 72 | - connection_pass: '{{ mysql_root_password }}' 73 | - connection_charset: utf8 74 | - require: 75 | - service: {{ mysql.service }} 76 | 77 | mysql_grants.present: 78 | - grant: all privileges 79 | - database: keystone.* 80 | - user: keystone 81 | - host: localhost 82 | - connection_user: root 83 | - connection_pass: '{{ mysql_root_password }}' 84 | - connection_charset: utf8 85 | - require: 86 | - service: {{ mysql.service }} 87 | 88 | keystone_grant_all: 89 | mysql_user.present: 90 | - name: keystone 91 | - host: '%' 92 | - password: {{ keystone_dbpass }} 93 | - connection_user: root 94 | - connection_pass: '{{ mysql_root_password }}' 95 | - connection_charset: utf8 96 | - require: 97 | - service: {{ mysql.service }} 98 | 99 | mysql_grants.present: 100 | - grant: all privileges 101 | - database: keystone.* 102 | - user: keystone 103 | - host: '%' 104 | - connection_user: root 105 | - connection_pass: '{{ mysql_root_password }}' 106 | - connection_charset: utf8 107 | - require: 108 | - service: {{ mysql.service }} 109 | 110 | keystone_grant_controller: 111 | mysql_user.present: 112 | - name: keystone 113 | - host: '{{ salt['grains.get']('nodename') }}' 114 | - password: {{ keystone_dbpass }} 115 | - connection_user: root 116 | - connection_pass: '{{ mysql_root_password }}' 117 | - connection_charset: utf8 118 | - require: 119 | - service: {{ mysql.service }} 120 | 121 | mysql_grants.present: 122 | - grant: all privileges 123 | - database: keystone.* 124 | - user: keystone 125 | - host: '{{ salt['grains.get']('nodename') }}' 126 | - connection_user: root 127 | - connection_pass: '{{ mysql_root_password }}' 128 | - connection_charset: utf8 129 | - require: 130 | - service: {{ mysql.service }} 131 | 132 | # 133 | # Image Service 134 | # 135 | 136 | glance_db: 137 | mysql_database.present: 138 | - name: glance 139 | - host: {{ mysql_host }} 140 | - connection_user: root 141 | - connection_pass: '{{ mysql_root_password }}' 142 | - connection_charset: utf8 143 | - require: 144 | - service: {{ mysql.service }} 145 | # - pkg: {{ mysql.python }} 146 | 147 | # 148 | # Grant proper access to the glance database: 149 | # 150 | 151 | glance_grant_localhost: 152 | mysql_user.present: 153 | - name: glance 154 | - host: localhost 155 | - password: {{ glance_dbpass }} 156 | - connection_user: root 157 | - connection_pass: '{{ mysql_root_password }}' 158 | - connection_charset: utf8 159 | - require: 160 | - service: {{ mysql.service }} 161 | 162 | mysql_grants.present: 163 | - grant: all privileges 164 | - database: glance.* 165 | - user: glance 166 | - host: localhost 167 | - connection_user: root 168 | - connection_pass: '{{ mysql_root_password }}' 169 | - connection_charset: utf8 170 | - require: 171 | - service: {{ mysql.service }} 172 | 173 | glance_grant_all: 174 | mysql_user.present: 175 | - name: glance 176 | - host: '%' 177 | - password: {{ glance_dbpass }} 178 | - connection_user: root 179 | - connection_pass: '{{ mysql_root_password }}' 180 | - connection_charset: utf8 181 | - require: 182 | - service: {{ mysql.service }} 183 | 184 | mysql_grants.present: 185 | - grant: all privileges 186 | - database: glance.* 187 | - user: glance 188 | - host: '%' 189 | - connection_user: root 190 | - connection_pass: '{{ mysql_root_password }}' 191 | - connection_charset: utf8 192 | - require: 193 | - service: {{ mysql.service }} 194 | 195 | glance_grant_controller: 196 | mysql_user.present: 197 | - name: glance 198 | - host: '{{ salt['grains.get']('nodename') }}' 199 | - password: {{ glance_dbpass }} 200 | - connection_user: root 201 | - connection_pass: '{{ mysql_root_password }}' 202 | - connection_charset: utf8 203 | - require: 204 | - service: {{ mysql.service }} 205 | 206 | mysql_grants.present: 207 | - grant: all privileges 208 | - database: glance.* 209 | - user: glance 210 | - host: '{{ salt['grains.get']('nodename') }}' 211 | - connection_user: root 212 | - connection_pass: '{{ mysql_root_password }}' 213 | - connection_charset: utf8 214 | - require: 215 | - service: {{ mysql.service }} 216 | 217 | # 218 | # Nova Controller 219 | # 220 | 221 | {% for db in ['nova_api','nova','nova_cell0'] %} 222 | 223 | {{ db }}_db: 224 | mysql_database.present: 225 | - name: {{ db }} 226 | - host: {{ mysql_host }} 227 | - connection_user: root 228 | - connection_pass: '{{ mysql_root_password }}' 229 | - connection_charset: utf8 230 | - require: 231 | - service: {{ mysql.service }} 232 | 233 | # 234 | # Grant proper access to the nova database: 235 | # 236 | 237 | {{ db }}_grant_localhost: 238 | mysql_user.present: 239 | - name: nova 240 | - host: localhost 241 | - password: {{ nova_dbpass }} 242 | - connection_user: root 243 | - connection_pass: '{{ mysql_root_password }}' 244 | - connection_charset: utf8 245 | - require: 246 | - service: {{ mysql.service }} 247 | 248 | mysql_grants.present: 249 | - grant: all privileges 250 | - database: {{ db }}.* 251 | - user: nova 252 | - host: localhost 253 | - connection_user: root 254 | - connection_pass: '{{ mysql_root_password }}' 255 | - connection_charset: utf8 256 | - require: 257 | - service: {{ mysql.service }} 258 | 259 | {{ db }}_grant_all: 260 | mysql_user.present: 261 | - name: nova 262 | - host: '%' 263 | - password: {{ nova_dbpass }} 264 | - connection_user: root 265 | - connection_pass: '{{ mysql_root_password }}' 266 | - connection_charset: utf8 267 | - require: 268 | - service: {{ mysql.service }} 269 | 270 | mysql_grants.present: 271 | - grant: all privileges 272 | - database: {{ db }}.* 273 | - user: nova 274 | - host: '%' 275 | - connection_user: root 276 | - connection_pass: '{{ mysql_root_password }}' 277 | - connection_charset: utf8 278 | - require: 279 | - service: {{ mysql.service }} 280 | 281 | {{ db }}_grant_controller: 282 | mysql_user.present: 283 | - name: nova 284 | - host: '{{ salt['grains.get']('nodename') }}' 285 | - password: {{ nova_dbpass }} 286 | - connection_user: root 287 | - connection_pass: '{{ mysql_root_password }}' 288 | - connection_charset: utf8 289 | - require: 290 | - service: {{ mysql.service }} 291 | 292 | mysql_grants.present: 293 | - grant: all privileges 294 | - database: {{ db }}.* 295 | - user: nova 296 | - host: '{{ salt['grains.get']('nodename') }}' 297 | - connection_user: root 298 | - connection_pass: '{{ mysql_root_password }}' 299 | - connection_charset: utf8 300 | - require: 301 | - service: {{ mysql.service }} 302 | 303 | {% endfor %} 304 | 305 | # 306 | # Neutron 307 | # 308 | 309 | neutron_db: 310 | mysql_database.present: 311 | - name: neutron 312 | - host: {{ mysql_host }} 313 | - connection_user: root 314 | - connection_pass: '{{ mysql_root_password }}' 315 | - connection_charset: utf8 316 | - require: 317 | - service: {{ mysql.service }} 318 | 319 | neutron_grant_localhost: 320 | mysql_user.present: 321 | - name: neutron 322 | - host: localhost 323 | - password: {{ neutron_dbpass }} 324 | - connection_user: root 325 | - connection_pass: '{{ mysql_root_password }}' 326 | - connection_charset: utf8 327 | - require: 328 | - service: {{ mysql.service }} 329 | 330 | mysql_grants.present: 331 | - grant: all privileges 332 | - database: neutron.* 333 | - user: neutron 334 | - host: localhost 335 | - connection_user: root 336 | - connection_pass: '{{ mysql_root_password }}' 337 | - connection_charset: utf8 338 | - require: 339 | - service: {{ mysql.service }} 340 | 341 | neutron_grant_all: 342 | mysql_user.present: 343 | - name: neutron 344 | - host: '%' 345 | - password: {{ neutron_dbpass }} 346 | - connection_user: root 347 | - connection_pass: '{{ mysql_root_password }}' 348 | - connection_charset: utf8 349 | - require: 350 | - service: {{ mysql.service }} 351 | 352 | mysql_grants.present: 353 | - grant: all privileges 354 | - database: neutron.* 355 | - user: neutron 356 | - host: '%' 357 | - connection_user: root 358 | - connection_pass: '{{ mysql_root_password }}' 359 | - connection_charset: utf8 360 | - require: 361 | - service: {{ mysql.service }} 362 | 363 | neutron_grant_controller: 364 | mysql_user.present: 365 | - name: neutron 366 | - host: '{{ salt['grains.get']('nodename') }}' 367 | - password: {{ neutron_dbpass }} 368 | - connection_user: root 369 | - connection_pass: '{{ mysql_root_password }}' 370 | - connection_charset: utf8 371 | - require: 372 | - service: {{ mysql.service }} 373 | 374 | mysql_grants.present: 375 | - grant: all privileges 376 | - database: neutron.* 377 | - user: neutron 378 | - host: '{{ salt['grains.get']('nodename') }}' 379 | - connection_user: root 380 | - connection_pass: '{{ mysql_root_password }}' 381 | - connection_charset: utf8 382 | - require: 383 | - service: {{ mysql.service }} 384 | 385 | -------------------------------------------------------------------------------- /states/openstack/DeployWindowsCloudInstance.md: -------------------------------------------------------------------------------- 1 | 2 | ## Deploy Windows Cloud Instance on OpenStack Ocata 3 | 4 | 1. Download the evaulation image for KVM from https://cloudbase.it/windows-cloud-images/. Decompress the image and save it to the /home/devops/openstack/images directory on the controller. 5 | 6 | 2. Deploy the downloaded image to Glance 7 | 8 | Perform the following steps on the **controller** with the **admin** environment 9 | 10 | ``` 11 | [root@controller openstack]$ . admin-openrc.sh 12 | [root@controller openstack]$ cd images/ 13 | [root@controller images]$ openstack image create "win2012-r2-std-eval-20170321" \ 14 | --file windows_server_2012_r2_standard_eval_kvm_20170321.qcow2 \ 15 | --disk-format qcow2 --container-format bare \ 16 | --property hypervisor_type=QEMU --property os_type=windows \ 17 | --public 18 | +------------------+------------------------------------------------------+ 19 | | Field | Value | 20 | +------------------+------------------------------------------------------+ 21 | | checksum | a05ead3a04ae663da77eee5d2cb2fa73 | 22 | | container_format | bare | 23 | | created_at | 2017-07-29T17:28:07Z | 24 | | disk_format | qcow2 | 25 | | file | /v2/images/74cb96cd-1aef-4770-8f87-c6694a372a3b/file | 26 | | id | 74cb96cd-1aef-4770-8f87-c6694a372a3b | 27 | | min_disk | 0 | 28 | | min_ram | 0 | 29 | | name | win2012-r2-std-eval-20170321 | 30 | | owner | 049bc1d6c4924390840e3d94ecdff939 | 31 | | properties | hypervisor_type='QEMU', os_type='windows' | 32 | | protected | False | 33 | | schema | /v2/schemas/image | 34 | | size | 12001017856 | 35 | | status | active | 36 | | tags | | 37 | | updated_at | 2017-07-29T17:29:42Z | 38 | | virtual_size | None | 39 | | visibility | public | 40 | +------------------+------------------------------------------------------+ 41 | ``` 42 | 43 | #### Launch an instance (DHCP) 44 | 45 | Perform the following steps on the **controller** with the **admin** environment 46 | 47 | ``` 48 | [root@controller openstack]$ . admin-openrc.sh 49 | [root@controller openstack]$ openstack server create --flavor m1.medium --image "win2012-r2-std-eval-20170321" \ 50 | --nic net-id=a275e07f-6e11-4ce1-92c1-40c32e764428 \ 51 | --security-group windows-default --key-name devops-key \ 52 | win2012r2-test-1 53 | +-------------------------------------+---------------------------------------------------------------------+ 54 | | Field | Value | 55 | +-------------------------------------+---------------------------------------------------------------------+ 56 | | OS-DCF:diskConfig | MANUAL | 57 | | OS-EXT-AZ:availability_zone | | 58 | | OS-EXT-SRV-ATTR:host | None | 59 | | OS-EXT-SRV-ATTR:hypervisor_hostname | None | 60 | | OS-EXT-SRV-ATTR:instance_name | | 61 | | OS-EXT-STS:power_state | NOSTATE | 62 | | OS-EXT-STS:task_state | scheduling | 63 | | OS-EXT-STS:vm_state | building | 64 | | OS-SRV-USG:launched_at | None | 65 | | OS-SRV-USG:terminated_at | None | 66 | | accessIPv4 | | 67 | | accessIPv6 | | 68 | | addresses | | 69 | | adminPass | sExxFrDB346Q | 70 | | config_drive | | 71 | | created | 2017-07-29T17:30:35Z | 72 | | flavor | m1.medium (3) | 73 | | hostId | | 74 | | id | cc70ac8d-d2f8-4c1f-9202-d396b9e54cbd | 75 | | image | win2012-r2-std-eval-20170321 (74cb96cd-1aef-4770-8f87-c6694a372a3b) | 76 | | key_name | devops-key | 77 | | name | win2012r2-test-1 | 78 | | progress | 0 | 79 | | project_id | 049bc1d6c4924390840e3d94ecdff939 | 80 | | properties | | 81 | | security_groups | name='windows-default' | 82 | | status | BUILD | 83 | | updated | 2017-07-29T17:30:35Z | 84 | | user_id | a3c712f29e7e4101ba7b7eb1bbb57a28 | 85 | | volumes_attached | | 86 | +-------------------------------------+---------------------------------------------------------------------+ 87 | ``` 88 | 89 | Wait for the status to go to ACTIVE 90 | ``` 91 | [root@controller openstack]$ openstack server list 92 | +--------------------------------------+------------------+--------+---------------------+------------------------------+ 93 | | ID | Name | Status | Networks | Image Name | 94 | +--------------------------------------+------------------+--------+---------------------+------------------------------+ 95 | | cc70ac8d-d2f8-4c1f-9202-d396b9e54cbd | win2012r2-test-1 | BUILD | provider=10.0.0.204 | win2012-r2-std-eval-20170321 | 96 | | c08726b5-52c4-4c02-a092-e9e15a83343c | test-instance | ACTIVE | provider=10.0.0.207 | cirros-0.3.4 | 97 | +--------------------------------------+------------------+--------+---------------------+------------------------------+ 98 | [root@controller openstack]$ openstack server list 99 | +--------------------------------------+------------------+--------+---------------------+------------------------------+ 100 | | ID | Name | Status | Networks | Image Name | 101 | +--------------------------------------+------------------+--------+---------------------+------------------------------+ 102 | | cc70ac8d-d2f8-4c1f-9202-d396b9e54cbd | win2012r2-test-1 | ACTIVE | provider=10.0.0.204 | win2012-r2-std-eval-20170321 | 103 | | c08726b5-52c4-4c02-a092-e9e15a83343c | test-instance | ACTIVE | provider=10.0.0.207 | cirros-0.3.4 | 104 | +--------------------------------------+------------------+--------+---------------------+------------------------------+ 105 | ``` 106 | ** NOTE: The instance tan take some time to fully spin up after entering ACTIVE status** 107 | 108 | 6. Connect to the console and get the Administrator password 109 | 110 | ``` 111 | [root@controller openstack]$ openstack server list 112 | +--------------------------------------+------------------+--------+---------------------+------------------------------+ 113 | | ID | Name | Status | Networks | Image Name | 114 | +--------------------------------------+------------------+--------+---------------------+------------------------------+ 115 | | cc70ac8d-d2f8-4c1f-9202-d396b9e54cbd | win2012r2-test-1 | ACTIVE | provider=10.0.0.204 | win2012-r2-std-eval-20170321 | 116 | | c08726b5-52c4-4c02-a092-e9e15a83343c | test-instance | ACTIVE | provider=10.0.0.207 | cirros-0.3.4 | 117 | +--------------------------------------+------------------+--------+---------------------+------------------------------+ 118 | [root@controller openstack]$ openstack console url show win2012r2-test-1 119 | +-------+---------------------------------------------------------------------------------+ 120 | | Field | Value | 121 | +-------+---------------------------------------------------------------------------------+ 122 | | type | novnc | 123 | | url | http://controller:6080/vnc_auto.html?token=2bed2a3e-c43d-4003-8383-67f0d4221c66 | 124 | +-------+---------------------------------------------------------------------------------+ 125 | [root@controller openstack]$ nova get-password win2012r2-test-1 /home/devops/.ssh/id_rsa 126 | fo0RMXmcfeOQwFsJvojd 127 | ``` 128 | 129 | ### Using FreeRDP to connect to the Instance 130 | 131 | 1. Connect to the instance via RDP 132 | 133 | ``` 134 | yum install freerdp 135 | xfreerdp -u Admin 10.0.0.204 136 | ``` 137 | 138 | ### Launch an instance with a fixed IP 139 | 140 | A dynamic IP is still configured in Internet Protocol Version 4 (TCP/IPv4) Properties as the static IP is managed on the Openstack level and not on the VM level. 141 | 142 | 1. Create a port 143 | 144 | ``` 145 | [root@controller openstack]$ openstack port create --network provider \ 146 | --fixed-ip subnet=2a20fb19-9150-4878-8f74-6ab44317ad56,ip-address=10.0.0.216 \ 147 | --security-group f25811d8-8d41-4976-a9ea-9bea8e0bad15 \ 148 | res216 149 | +-----------------------+---------------------------------------------------------------------------+ 150 | | Field | Value | 151 | +-----------------------+---------------------------------------------------------------------------+ 152 | | admin_state_up | UP | 153 | | allowed_address_pairs | | 154 | | binding_host_id | | 155 | | binding_profile | | 156 | | binding_vif_details | | 157 | | binding_vif_type | unbound | 158 | | binding_vnic_type | normal | 159 | | created_at | 2017-07-29T20:45:17Z | 160 | | description | | 161 | | device_id | | 162 | | device_owner | | 163 | | dns_assignment | None | 164 | | dns_name | None | 165 | | extra_dhcp_opts | | 166 | | fixed_ips | ip_address='10.0.0.216', subnet_id='2a20fb19-9150-4878-8f74-6ab44317ad56' | 167 | | id | 72871cef-f511-4414-b731-70afb2ff65ae | 168 | | ip_address | None | 169 | | mac_address | fa:16:3e:00:59:87 | 170 | | name | res216 | 171 | | network_id | a275e07f-6e11-4ce1-92c1-40c32e764428 | 172 | | option_name | None | 173 | | option_value | None | 174 | | port_security_enabled | True | 175 | | project_id | 049bc1d6c4924390840e3d94ecdff939 | 176 | | qos_policy_id | None | 177 | | revision_number | 5 | 178 | | security_groups | fff0ae97-9395-4601-ad9e-199767996ff5 | 179 | | status | DOWN | 180 | | subnet_id | None | 181 | | updated_at | 2017-07-29T20:45:17Z | 182 | +-----------------------+---------------------------------------------------------------------------+ 183 | ``` 184 | 185 | 2. Launch instance 186 | 187 | ``` 188 | [root@controller openstack]$ openstack server create --flavor m1.large --image "win2012-r2-std-eval-20170321" \ 189 | --nic port-id=72871cef-f511-4414-b731-70afb2ff65ae \ 190 | --security-group windows-default --key-name devops-key \ 191 | win2012r2-s1 192 | ``` 193 | 194 | ### Configure Active Directory and DNS Server 195 | 196 | 1. Login as Administrator. 197 | 2. Configure the server for static IP address and set the FQDN. Then reestart the computer. 198 | 3. From the Server Manager Dashboard, set the Active Directory and DNS Roles 199 | * a. Click Add roles and features. The Add Roles and Features Wizard appears. 200 | * b. Click Next at Before you begin screen. 201 | * c. The Installation Type page appears. Make sure Roles-based or feature-based installation is checked. Click Next. 202 | * d. The Server Selection page appears. MAke sure Select a server from the server pool is clicked, and the server is highlighted. Click Next. 203 | * e. The Server Roles dialog appears. Check: 204 | - Active Directory Domain Services 205 | - DNS Server 206 | * d. Take the defaults for the remaining steps and click Next 207 | * e. Click Install 208 | 209 | 4. You should see AD DS and DNS added to the list column in the Server Manager Dashboard 210 | 5. Promote the server to Domain Controller 211 | * a. Select notification icon in Dashboard and click Promote this server to a domain controller 212 | * b. For Select the deployment operation choose Add a new forest 213 | - Root domain name: **lab.local** 214 | * c. Type the Directory Services Restore Mode (DSRM) password: **DSRMpassword1** 215 | * e. Ignore the authoritative parent zone warning - click Next 216 | * f. Verify NetBIOS name: **LAB** 217 | * g. Review your selections and click Install 218 | * h. OS restarts 219 | 6. Log back into the OS and set the correct time zone (EST) 220 | 7. Create a **testuser1@lab.local** user in Active Directory Users and Computers as a test 221 | 222 | 223 | ### References 224 | 225 | - [OpenStack Windows Server 2012 R2 Evaluation Image](https://cloudbase.it/openstack-windows-server-2012-r2-evalution-images/ 226 | ) 227 | - [How to Assign a Private Static IP to an Azure VM](https://social.technet.microsoft.com/wiki/contents/articles/23447.how-to-assign-a-private-static-ip-to-an-azure-vm.aspx) 228 | - [Creating an Instance with a Specific Fixed IP](http://ibm-blue-box-help.github.io/help-documentation/openstack/userdocs/Creating-an-Instance-with-a-Specific-Fixed-IP/) 229 | - [How to Create An Instance With Static IP](http://ibm-blue-box-help.github.io/help-documentation/openstack/userdocs/Creating_Instances_With_Static_IP/) 230 | - [https://social.technet.microsoft.com/Forums/en-US/024cce0f-f2f1-4714-abc9-1a4ecf40638a/what-difference-between-primary-dns-suffix-and-connectionspecific-dns-suffix?forum=winserverNIS](https://social.technet.microsoft.com/Forums/en-US/024cce0f-f2f1-4714-abc9-1a4ecf40638a/what-difference-between-primary-dns-suffix-and-connectionspecific-dns-suffix?forum=winserverNIS) 231 | 232 | 233 | 234 | 235 | --------------------------------------------------------------------------------