├── exports
├── ansible
├── roles
│ ├── prepare_host
│ │ ├── files
│ │ │ ├── dhcpd.conf
│ │ │ └── ganesha.conf
│ │ └── tasks
│ │ │ └── main.yml
│ ├── common
│ │ ├── vars
│ │ │ ├── files.yml
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ ├── dhclient.j2
│ │ │ ├── oracle-nfs-mount.j2
│ │ │ ├── 99-asm-disks.rules.j2
│ │ │ └── rac-node-networks.j2
│ │ └── tasks
│ │ │ └── start_rac_node.yml
│ ├── add_diskgroup
│ │ └── tasks
│ │ │ └── main.yml
│ ├── add_database
│ │ └── tasks
│ │ │ └── main.yml
│ ├── add_rac_nodes
│ │ └── tasks
│ │ │ └── main.yml
│ ├── create_first_rac_node
│ │ └── tasks
│ │ │ └── main.yml
│ └── create_oracle_image
│ │ └── tasks
│ │ └── main.yml
├── add_database.yml
├── prepare_host.yml
├── create_first_rac_node.yml
├── create_oracle_image.yml
├── add_NDATA_diskgroup.yml
├── create_second_rac_node.yml
└── test
│ ├── print_var.yml
│ └── test.yml
├── Dockerfile-racnode
├── dbus_contexts
├── grid_security_limits.conf
└── Dockerfile
├── coreos-bootstrap.yml
├── rac_and_docker_miller.pptx
├── Dockerfile-bind
├── keys.conf
├── db.10.10.10
├── db.11.11.11
├── named.conf.custom-zones
├── db.example.com
├── named.conf
└── Dockerfile
├── TODO.md
├── oraclenfs.mount
├── tools_config.rsp
├── 99-asm-disks.rules
├── dhclient-rac1-eth-pub.service
├── dhclient-rac2-eth-pub.service
├── dhclient-rac1-eth-priv.service
├── dhclient-rac2-eth-priv.service
├── viewcap.sh
├── Dockerfile-nfs
├── entrypoint.sh
└── Dockerfile
├── dhcpd.conf
├── ganesha.conf
├── networks-rac1.sh
├── networks-rac2.sh
├── fixssh.sh
├── ANSIBLE_SETUP.md
├── cloud-config
├── ANSIBLE.md
├── COREOS.md
├── README.md
└── docker.py
/exports:
--------------------------------------------------------------------------------
1 | /oraclenfs (rw)
2 |
--------------------------------------------------------------------------------
/ansible/roles/prepare_host/files/dhcpd.conf:
--------------------------------------------------------------------------------
1 | ../../../../dhcpd.conf
--------------------------------------------------------------------------------
/ansible/roles/prepare_host/files/ganesha.conf:
--------------------------------------------------------------------------------
1 | ../../../../ganesha.conf
--------------------------------------------------------------------------------
/Dockerfile-racnode/dbus_contexts:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/coreos-bootstrap.yml:
--------------------------------------------------------------------------------
1 | - hosts: all
2 | gather_facts: False
3 | roles:
4 | - defunctzombie.coreos-bootstrap
5 |
--------------------------------------------------------------------------------
/rac_and_docker_miller.pptx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Seth-Miller/12c-rac-docker/HEAD/rac_and_docker_miller.pptx
--------------------------------------------------------------------------------
/Dockerfile-bind/keys.conf:
--------------------------------------------------------------------------------
1 | key dnsupdate. {
2 | algorithm HMAC-MD5;
3 | secret "REPLACE_WITH_SECRET_KEY";
4 | };
5 |
--------------------------------------------------------------------------------
/ansible/roles/common/vars/files.yml:
--------------------------------------------------------------------------------
1 |
2 | file_locations:
3 | file1:
4 | file2:
5 | file3:
6 | file4:
7 | file5:
8 | file6:
9 | file7:
10 |
--------------------------------------------------------------------------------
/TODO.md:
--------------------------------------------------------------------------------
1 | * Check for the existence of the disk devices early.
2 | * Make the add_database role generic for any database by using a variable.
3 | * Add inventory directory creation into Ansible.
4 |
--------------------------------------------------------------------------------
/ansible/add_database.yml:
--------------------------------------------------------------------------------
1 |
2 |
3 | - hosts: all
4 | any_errors_fatal: True
5 | vars_files:
6 | - roles/common/vars/files.yml
7 | roles:
8 | - common
9 | - add_database
10 |
--------------------------------------------------------------------------------
/ansible/prepare_host.yml:
--------------------------------------------------------------------------------
1 |
2 |
3 | - hosts: all
4 | any_errors_fatal: True
5 | vars_files:
6 | - roles/common/vars/files.yml
7 | roles:
8 | - common
9 | - prepare_host
10 |
--------------------------------------------------------------------------------
/ansible/create_first_rac_node.yml:
--------------------------------------------------------------------------------
1 |
2 |
3 | - hosts: all
4 | any_errors_fatal: True
5 | vars_files:
6 | - roles/common/vars/files.yml
7 | roles:
8 | - common
9 | - create_first_rac_node
10 |
--------------------------------------------------------------------------------
/ansible/create_oracle_image.yml:
--------------------------------------------------------------------------------
1 |
2 |
3 | - hosts: all
4 | any_errors_fatal: True
5 | vars_files:
6 | - roles/common/vars/files.yml
7 | roles:
8 | - common
9 | - create_oracle_image
10 |
--------------------------------------------------------------------------------
/oraclenfs.mount:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Mount oraclenfs NFS volume to /oraclenfs
3 |
4 | [Mount]
5 | What=10.10.10.12:/oraclenfs
6 | Where=/oraclenfs
7 | Type=nfs
8 | Options=rw,bg,hard,nointr,rsize=32768,wsize=32768,tcp,actimeo=0,vers=3,timeo=600
9 |
--------------------------------------------------------------------------------
/ansible/add_NDATA_diskgroup.yml:
--------------------------------------------------------------------------------
1 |
2 |
3 | - hosts: all
4 | any_errors_fatal: True
5 | vars_files:
6 | - roles/common/vars/files.yml
7 | roles:
8 | - common
9 | - { role: add_diskgroup, this_disk_group: NDATA }
10 |
--------------------------------------------------------------------------------
/ansible/create_second_rac_node.yml:
--------------------------------------------------------------------------------
1 |
2 |
3 | - hosts: all
4 | any_errors_fatal: True
5 | vars_files:
6 | - roles/common/vars/files.yml
7 | roles:
8 | - common
9 | - { role: add_rac_nodes, this_rac_node: rac2 }
10 |
--------------------------------------------------------------------------------
/tools_config.rsp:
--------------------------------------------------------------------------------
1 | oracle.assistants.asm|S_ASMPASSWORD=oracle_4U
2 | oracle.assistants.asm|S_ASMMONITORPASSWORD=oracle_4U
3 | oracle.crs|oracle_install_crs_ConfigureMgmtDB=FALSE
4 | oracle.crs|oracle_install_crs_MgmtDB_CDB=FALSE
5 | oracle.crs|oracle_install_crs_MgmtDB_Std=FALSE
6 |
--------------------------------------------------------------------------------
/99-asm-disks.rules:
--------------------------------------------------------------------------------
1 | KERNEL=="sdd", SYMLINK+="asmdisks/asm-clu-121-DATA-disk1", OWNER="54421", GROUP="54422"
2 | KERNEL=="sde", SYMLINK+="asmdisks/asm-clu-121-DATA-disk2", OWNER="54421", GROUP="54422"
3 | KERNEL=="sdf", SYMLINK+="asmdisks/asm-clu-121-DATA-disk3", OWNER="54421", GROUP="54422"
4 |
--------------------------------------------------------------------------------
/Dockerfile-racnode/grid_security_limits.conf:
--------------------------------------------------------------------------------
1 | grid soft nofile 1024
2 | grid hard nofile 65536
3 | grid soft nproc 16384
4 | grid hard nproc 16384
5 | grid soft stack 10240
6 | grid hard stack 32768
7 | grid hard memlock 134217728
8 | grid soft memlock 134217728
9 |
--------------------------------------------------------------------------------
/dhclient-rac1-eth-pub.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Start DHCP client for eth-pub
3 | ConditionPathExists=/sys/class/net/eth-pub
4 |
5 | [Service]
6 | ExecStart=/usr/sbin/dhclient -d -H rac1 -pf /var/run/dhclient-eth-pub.pid eth-pub
7 | ExecStop=/usr/sbin/dhclient -x eth-pub
8 |
9 | [Install]
10 | WantedBy=multi-user.target
11 |
--------------------------------------------------------------------------------
/dhclient-rac2-eth-pub.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Start DHCP client for eth-pub
3 | ConditionPathExists=/sys/class/net/eth-pub
4 |
5 | [Service]
6 | ExecStart=/usr/sbin/dhclient -d -H rac2 -pf /var/run/dhclient-eth-pub.pid eth-pub
7 | ExecStop=/usr/sbin/dhclient -x eth-pub
8 |
9 | [Install]
10 | WantedBy=multi-user.target
11 |
--------------------------------------------------------------------------------
/dhclient-rac1-eth-priv.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Start DHCP client for eth-priv
3 | ConditionPathExists=/sys/class/net/eth-priv
4 |
5 | [Service]
6 | ExecStart=/usr/sbin/dhclient -d -H rac1-priv -pf /var/run/dhclient-eth-priv.pid eth-priv
7 | ExecStop=/usr/sbin/dhclient -x eth-priv
8 |
9 | [Install]
10 | WantedBy=multi-user.target
11 |
--------------------------------------------------------------------------------
/dhclient-rac2-eth-priv.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Start DHCP client for eth-priv
3 | ConditionPathExists=/sys/class/net/eth-priv
4 |
5 | [Service]
6 | ExecStart=/usr/sbin/dhclient -d -H rac2-priv -pf /var/run/dhclient-eth-priv.pid eth-priv
7 | ExecStop=/usr/sbin/dhclient -x eth-priv
8 |
9 | [Install]
10 | WantedBy=multi-user.target
11 |
--------------------------------------------------------------------------------
/Dockerfile-bind/db.10.10.10:
--------------------------------------------------------------------------------
1 | $TTL 86400
2 | @ IN SOA example.com. root.example.com. (
3 | 1 ; Serial
4 | 604800 ; Refresh
5 | 86400 ; Retry
6 | 2419200 ; Expire
7 | 86400 ) ; Negative Cache TTL
8 | ;
9 | @ IN NS localhost.
10 |
--------------------------------------------------------------------------------
/Dockerfile-bind/db.11.11.11:
--------------------------------------------------------------------------------
1 | $TTL 86400
2 | @ IN SOA example.com. root.example.com. (
3 | 1 ; Serial
4 | 604800 ; Refresh
5 | 86400 ; Retry
6 | 2419200 ; Expire
7 | 86400 ) ; Negative Cache TTL
8 | ;
9 | @ IN NS localhost.
10 |
--------------------------------------------------------------------------------
/ansible/test/print_var.yml:
--------------------------------------------------------------------------------
1 |
2 |
3 | - hosts: all
4 | gather_facts: False
5 | vars_files:
6 | - ../roles/common/vars/main.yml
7 | - ../roles/common/vars/files.yml
8 |
9 | tasks:
10 |
11 | - debug:
12 | msg: >
13 | {{
14 | installation_files |
15 | selectattr('type', 'equalto', 'database') |
16 | map(attribute='installer_parameters') | first
17 | }}
18 |
--------------------------------------------------------------------------------
/Dockerfile-bind/named.conf.custom-zones:
--------------------------------------------------------------------------------
1 | zone "example.com." {
2 | type master;
3 | allow-update { key dnsupdate.; };
4 | file "/etc/bind/db.example.com";
5 | };
6 |
7 | zone "10.10.10.in-addr.arpa." {
8 | type master;
9 | allow-update { key dnsupdate.; };
10 | file "/etc/bind/db.10.10.10";
11 | };
12 |
13 | zone "11.11.11.in-addr.arpa." {
14 | type master;
15 | allow-update { key dnsupdate.; };
16 | file "/etc/bind/db.11.11.11";
17 | };
18 |
--------------------------------------------------------------------------------
/ansible/roles/common/templates/dhclient.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Start DHCP client for {{ item.1.internal_network_name }}
3 | ConditionPathExists=/sys/class/net/{{ item.1.internal_network_name }}
4 |
5 | [Service]
6 | ExecStart=/usr/sbin/dhclient -d -H {{ item.1.dhcp_hostname }} -pf {{ item.1.dhclient_pid }} {{ item.1.internal_network_name }}
7 | ExecStop=/usr/sbin/dhclient -x {{ item.1.internal_network_name }}
8 |
9 | [Install]
10 | WantedBy=multi-user.target
11 |
--------------------------------------------------------------------------------
/ansible/roles/common/templates/oracle-nfs-mount.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Mount oraclenfs NFS volume to /oraclenfs
3 |
4 | [Mount]
5 | {# Lookup IP address of NFS server #}
6 | What={{
7 | docker.containers |
8 | selectattr('type', 'equalto', 'nfs') |
9 | map(attribute='networks') | first |
10 | selectattr('name', 'equalto', 'pub') | list |
11 | map(attribute='ipv4_address') | first
12 | }}:/oraclenfs
13 | Where=/oraclenfs
14 | Type=nfs
15 | Options=rw,bg,hard,nointr,rsize=32768,wsize=32768,tcp,actimeo=0,vers=3,timeo=600
16 |
--------------------------------------------------------------------------------
/viewcap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 |
4 | CONTAINER=$1
5 |
6 | pidlist ()
7 | {
8 | local thispid=$1;
9 | local fulllist=;
10 | local childlist=;
11 | childlist=$(ps --ppid $thispid -o pid h);
12 | for pid in $childlist;
13 | do
14 | fulllist="$(pidlist $pid) $fulllist";
15 | done;
16 | echo "$thispid $fulllist"
17 | }
18 |
19 | pscap | awk '$2 ~ /'$(pidlist \
20 | $(docker inspect --format {{.State.Pid}} $CONTAINER) | \
21 | sed -e 's/^\s*\|\s*$//g' -e 's/\s\+/|/g')'/' | \
22 | sort -n -k 2
23 |
--------------------------------------------------------------------------------
/Dockerfile-bind/db.example.com:
--------------------------------------------------------------------------------
1 | $TTL 86400
2 | @ IN SOA example.com. root.example.com. (
3 | 1 ; Serial
4 | 604800 ; Refresh
5 | 86400 ; Retry
6 | 2419200 ; Expire
7 | 86400 ) ; Negative Cache TTL
8 | ;
9 | @ IN NS localhost.
10 | clu-121-gns A 10.10.10.20
11 | $ORIGIN clu-121.example.com.
12 | @ NS clu-121-gns.example.com.
13 |
--------------------------------------------------------------------------------
/Dockerfile-nfs/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 |
4 | function init_rpc {
5 | echo "Starting rpcbind"
6 | rpcbind || return 0
7 | rpc.statd -L || return 0
8 | rpc.idmapd || return 0
9 | sleep 1
10 | }
11 |
12 | function init_dbus {
13 | echo "Starting dbus"
14 | #rm -f /var/run/dbus/system_bus_socket
15 | rm -f /var/run/messagebus.pid
16 | dbus-uuidgen --ensure
17 | dbus-daemon --system --fork
18 | sleep 1
19 | }
20 |
21 |
22 | init_rpc
23 | init_dbus
24 |
25 | echo "Starting Ganesha NFS"
26 |
27 | exec /usr/bin/ganesha.nfsd -F -L /dev/stdout -N NIV_EVENT
28 |
--------------------------------------------------------------------------------
/ansible/roles/common/templates/99-asm-disks.rules.j2:
--------------------------------------------------------------------------------
1 | {% for diskgroup in asm_disk_groups %}
2 | {% for disk in diskgroup.disks %}
3 | {% if disk.type == 'block' %}
4 | KERNEL=="{{ disk.rawpath | replace('/dev/', '', 1) }}", SYMLINK+="{{ disk.udevpath | replace('/dev/', '', 1) }}", OWNER="{{ operating_system.grid_infrastructure.users | selectattr('title', 'equalto', 'owner') | map(attribute='uid') | first }}", GROUP="{{ operating_system.grid_infrastructure.groups | selectattr('title', 'equalto', 'osasm') | map(attribute='gid') | first }}"
5 | {% endif %}
6 | {% endfor %}
7 | {% endfor %}
8 |
--------------------------------------------------------------------------------
/Dockerfile-bind/named.conf:
--------------------------------------------------------------------------------
1 | // This is the primary configuration file for the BIND DNS server named.
2 | //
3 | // Please read /usr/share/doc/bind9/README.Debian.gz for information on the
4 | // structure of BIND configuration files in Debian, *BEFORE* you customize
5 | // this configuration file.
6 | //
7 | // If you are just adding zones, please do that in /etc/bind/named.conf.local
8 |
9 | include "/etc/bind/named.conf.options";
10 | include "/etc/bind/named.conf.local";
11 | include "/etc/bind/named.conf.default-zones";
12 | include "/etc/bind/keys.conf";
13 | include "/etc/bind/named.conf.custom-zones";
14 |
--------------------------------------------------------------------------------
/dhcpd.conf:
--------------------------------------------------------------------------------
1 | # Global options
2 | ddns-update-style interim;
3 | update-static-leases on;
4 | update-conflict-detection false;
5 | authoritative;
6 | option subnet-mask 255.255.255.0;
7 | option domain-name "example.com";
8 | option domain-name-servers 10.10.10.10;
9 | default-lease-time 86400;
10 |
11 | include "/keys/keys.conf";
12 |
13 | zone example.com. {
14 | primary 10.10.10.10;
15 | key dnsupdate.;
16 | }
17 |
18 | zone 10.10.10.in-addr.arpa. {
19 | primary 10.10.10.10;
20 | key dnsupdate.;
21 | }
22 |
23 | zone 11.11.11.in-addr.arpa. {
24 | primary 10.10.10.10;
25 | key dnsupdate.;
26 | }
27 |
28 | # Subnet definition
29 | subnet 10.10.10.0 netmask 255.255.255.0 {
30 | range 10.10.10.100 10.10.10.150;
31 | }
32 |
33 | subnet 11.11.11.0 netmask 255.255.255.0 {
34 | range 11.11.11.100 11.11.11.200;
35 | }
36 |
--------------------------------------------------------------------------------
/ganesha.conf:
--------------------------------------------------------------------------------
1 | ###################################################
2 | #
3 | # EXPORT
4 | #
5 | # To function, all that is required is an EXPORT
6 | #
7 | # Define the absolute minimal export
8 | #
9 | ###################################################
10 |
11 | EXPORT
12 | {
13 | # Export Id (mandatory, each EXPORT must have a unique Export_Id)
14 | Export_Id = 77;
15 |
16 | # Exported path (mandatory)
17 | Path = /oraclenfs;
18 |
19 | # Pseudo Path (required for NFS v4)
20 | Pseudo = /oraclenfs;
21 |
22 | # Required for access (default is None)
23 | # Could use CLIENT blocks instead
24 | Access_Type = RW;
25 |
26 | # Root squash
27 | Squash = No_Root_Squash;
28 |
29 | # SecType = "sys";
30 |
31 | # Exporting FSAL
32 | FSAL {
33 | Name = VFS;
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/Dockerfile-nfs/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM oraclelinux:7.2
2 | MAINTAINER sethmiller.sm@gmail.com
3 |
4 |
5 | # Update the operating system
6 | RUN ["yum", "-y", "update"]
7 |
8 |
9 | # Add the YUM repositories
10 | ADD ["http://public-yum.oracle.com/public-yum-ol7.repo", "/etc/yum.repos.d/"]
11 | RUN ["rpm", "--install", "https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm"]
12 |
13 |
14 | # Download and import the gpg key
15 | ADD ["http://public-yum.oracle.com/RPM-GPG-KEY-oracle-ol7", "/etc/yum.repos.d/"]
16 | RUN ["rpm", "--import", "/etc/yum.repos.d/RPM-GPG-KEY-oracle-ol7"]
17 |
18 |
19 | # Install necessary packages
20 | RUN ["yum", "-y", "install", "nfs-ganesha-vfs"]
21 |
22 |
23 | # Clean the yum cache
24 | RUN ["yum", "clean", "all"]
25 |
26 |
27 | # Create dbus directory
28 | RUN ["mkdir", "-p", "/var/run/dbus"]
29 |
30 |
31 | # Add the entrypoint script
32 | ADD ["entrypoint.sh", "/"]
33 |
34 |
35 | # Start the entrypoint script
36 | ENTRYPOINT ["/entrypoint.sh"]
37 |
--------------------------------------------------------------------------------
/ansible/test/test.yml:
--------------------------------------------------------------------------------
1 |
2 |
3 | - hosts: all
4 | gather_facts: False
5 |
6 |
7 | tasks:
8 | - include_vars: main.yml
9 |
10 | - docker_network:
11 | name: "{{ item.name }}"
12 | appends: True
13 | state: present
14 | ipam_options:
15 | subnet: "{{ item.subnet }}/{{ item.cidr }}"
16 | with_items:
17 | - "{{ docker.networks }}"
18 | register: network_stuff
19 |
20 | - name: Create container
21 | docker_container:
22 | name: bind
23 | hostname: bind
24 | image: sethmiller/bind
25 | state: started
26 | restart: True
27 | interactive: True
28 | tty: True
29 | published_ports:
30 | - 53:53/tcp
31 | - 53:53/udp
32 | volumes:
33 | - /srv/docker/bind:/data
34 | env:
35 | WEBMIN_ENABLED: "false"
36 | command: "-4"
37 | networks:
38 | - name: "{{ docker.networks | selectattr('type', 'equalto', 'public') | map(attribute='name') | first }}"
39 | with_sequence: count=2
40 |
--------------------------------------------------------------------------------
/Dockerfile-bind/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM sethmiller/bind:latest
2 | MAINTAINER sethmiller.sm@gmail.com
3 |
4 |
5 | RUN ["mkdir", "-p", "/etc/keys"]
6 |
7 |
8 | # Generate keys for BIND utilities like nsupdate and rndc
9 | RUN ["dnssec-keygen", \
10 | "-K", "/etc/keys", \
11 | "-a", "HMAC-MD5", \
12 | "-b", "512", \
13 | "-n", "USER", \
14 | "-r", "/dev/urandom", \
15 | "dnsupdate."]
16 |
17 |
18 | # Config file for keys
19 | COPY ["keys.conf", "/etc/bind/"]
20 |
21 |
22 | # Add the key generated above to /etc/bind/keys.conf
23 | RUN SECRET_KEY=$(grep '^Key:' /etc/keys/Kdnsupdate.*.private | awk '{print $2}') \
24 | && sed -i 's!REPLACE_WITH_SECRET_KEY!'${SECRET_KEY?}'!' /etc/bind/keys.conf
25 |
26 |
27 | # Config file for zones
28 | COPY ["named.conf.custom-zones", "/etc/bind/"]
29 |
30 |
31 | # Database files
32 | COPY ["db.example.com", "/etc/bind/"]
33 | COPY ["db.10.10.10", "/etc/bind/"]
34 | COPY ["db.11.11.11", "/etc/bind/"]
35 |
36 |
37 | # Overwrite named.conf to include keys.conf and named.conf.custom-zones
38 | COPY ["named.conf", "/etc/bind/"]
39 |
40 |
41 | # Copy the key files
42 | RUN cp /etc/keys/Kdnsupdate.* /etc/bind/
43 |
--------------------------------------------------------------------------------
/ansible/roles/add_diskgroup/tasks/main.yml:
--------------------------------------------------------------------------------
1 |
2 | - name: Create the disk group
3 | # docker exec rac1 su - grid -c "ORACLE_SID=+ASM1 /u01/app/12.1.0/grid/bin/asmca \
4 | # -silent -createDiskGroup \
5 | # -diskGroupName NDATA \
6 | # -redundancy EXTERNAL \
7 | # -disk '/oraclenfs/asm-clu-121-NDATA-disk1' \
8 | # -disk '/oraclenfs/asm-clu-121-NDATA-disk2' \
9 | # -disk '/oraclenfs/asm-clu-121-NDATA-disk3'"
10 | command: >-
11 | /usr/bin/docker exec {{ first_rac_node }}
12 | /usr/bin/su -
13 | {{
14 | operating_system.grid_infrastructure.users |
15 | selectattr('title', 'equalto', 'owner') |
16 | map(attribute='name') | first
17 | }} -c "ORACLE_SID=+ASM1
18 | {{
19 | oracle_binaries |
20 | selectattr('type', 'equalto', 'grid') |
21 | selectattr('version', 'equalto', '12.1.0.2') |
22 | map(attribute='oracle_home') | first
23 | }}/bin/asmca -silent -createDiskGroup
24 | -diskGroupName {{ this_disk_group }}
25 | -redundancy EXTERNAL
26 | {% for disk in asm_disk_groups |
27 | selectattr('group', 'equalto', this_disk_group) |
28 | list | map(attribute='disks') | first %}
29 | -disk \"{{ disk.path }}\"
30 | {% endfor %}"
31 | tags:
32 | - create_disk_group
33 |
--------------------------------------------------------------------------------
/ansible/roles/common/templates/rac-node-networks.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 |
4 | NSPID=$(/usr/bin/docker inspect --format={% raw %}'{{ .State.Pid }}'{% endraw %} {{ item.name }})
5 |
6 | /usr/bin/rm -rf "/var/run/netns/${NSPID?}"
7 | /usr/bin/mkdir -p "/var/run/netns"
8 | /usr/bin/ln -s "/proc/${NSPID?}/ns/net" "/var/run/netns/${NSPID?}"
9 |
10 | {% for network in item.networks %}
11 | BRIDGE=$(/usr/bin/docker network ls -q -f NAME={{ network.name }})
12 | /usr/bin/ip link del dev {{ network.external_network_name }} 2>/dev/null
13 | /usr/bin/ip link add name {{ network.external_network_name }} mtu 1500 type veth peer name {{ network.internal_network_name }} mtu 1500
14 | /usr/bin/sleep 5
15 | /usr/bin/ip link set {{ network.external_network_name }} master br-${BRIDGE?}
16 | /usr/bin/ip link set {{ network.external_network_name }} up
17 | /usr/bin/ip link set {{ network.internal_network_name }} netns ${NSPID?}
18 | /usr/bin/ip netns exec ${NSPID?} /usr/bin/ip link set {{ network.internal_network_name }} up
19 |
20 | /usr/bin/docker exec {{ item.name }} \
21 | /usr/bin/rm -f /etc/systemd/system/dhclient-{{ item.name }}-{{ network.internal_network_name }}.service
22 |
23 | /usr/bin/docker exec {{ item.name }} \
24 | /usr/bin/ln -s \
25 | /usr/lib/custom_services/dhclient-{{ item.name }}-{{ network.internal_network_name }}.service \
26 | /etc/systemd/system/dhclient-{{ item.name }}-{{ network.internal_network_name }}.service
27 |
28 | /usr/bin/docker exec {{ item.name }} \
29 | /usr/bin/systemctl stop dhclient-{{ item.name }}-{{ network.internal_network_name }}.service
30 |
31 | /usr/bin/docker exec {{ item.name }} \
32 | /usr/bin/systemctl daemon-reload
33 |
34 | /usr/bin/docker exec {{ item.name }} \
35 | /usr/bin/systemctl start dhclient-{{ item.name }}-{{ network.internal_network_name }}.service
36 |
37 | {% endfor %}
38 |
39 | /usr/bin/rm -rf "/var/run/netns/${NSPID?}"
40 |
--------------------------------------------------------------------------------
/ansible/roles/add_database/tasks/main.yml:
--------------------------------------------------------------------------------
1 |
2 | - block:
3 | - name: Create database
4 | # docker exec rac1 su - oracle -c "/u01/app/oracle/product/12.1.0/dbhome_1/bin/dbca \
5 | # -createDatabase -silent \
6 | # -templateName General_Purpose.dbc \
7 | # -gdbName orcl \
8 | # -sysPassword oracle_4U \
9 | # -systemPassword oracle_4U \
10 | # -storageType ASM \
11 | # -diskGroupName DATA \
12 | # -recoveryGroupName DATA \
13 | # -characterSet AL32UTF8 \
14 | # -nationalCharacterSet UTF8 \
15 | # -totalMemory 1024 \
16 | # -emConfiguration none \
17 | # -nodelist rac1,rac2 \
18 | # -createAsContainerDatabase True'
19 | command: >
20 | /usr/bin/docker exec {{ first_rac_node }} su -
21 | {{
22 | operating_system.database.users |
23 | selectattr('title', 'equalto', 'owner') |
24 | map(attribute='name') | first
25 | }} -c "{{
26 | oracle_binaries |
27 | selectattr('type', 'equalto', 'database') |
28 | selectattr('version', 'equalto', '12.1.0.2') |
29 | map(attribute='oracle_home') | first
30 | }}/bin/dbca
31 | {{
32 | databases |
33 | selectattr('version', 'equalto', '12.1.0.2') |
34 | map(attribute='parameters') |
35 | first | join(' ')
36 | }}"
37 | register: create_database_result
38 | changed_when: (create_database_result.rc == 0 ) or (create_database_result.rc == 6 )
39 | failed_when: (create_database_result.rc != 0) and (create_database_result.rc != 6 )
40 | always:
41 | - name: Print readable previous command output
42 | debug:
43 | var: create_database_result.stdout_lines
44 | tags:
45 | - create_database
46 |
--------------------------------------------------------------------------------
/networks-rac1.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 |
4 | NSPID=$(/usr/bin/docker inspect --format='{{ .State.Pid }}' rac1)
5 |
6 | /usr/bin/rm -rf "/var/run/netns/${NSPID?}"
7 | /usr/bin/mkdir -p "/var/run/netns"
8 | /usr/bin/ln -s "/proc/${NSPID?}/ns/net" "/var/run/netns/${NSPID?}"
9 |
10 | BRIDGE=$(/usr/bin/docker network ls -q -f NAME=pub)
11 | /usr/bin/ip link del dev rac1-pub 2>/dev/null
12 | /usr/bin/ip link add name rac1-pub mtu 1500 type veth peer name eth-pub mtu 1500
13 | /usr/bin/sleep 5
14 | /usr/bin/ip link set rac1-pub master br-${BRIDGE?}
15 | /usr/bin/ip link set rac1-pub up
16 | /usr/bin/ip link set eth-pub netns ${NSPID?}
17 | /usr/bin/ip netns exec ${NSPID?} /usr/bin/ip link set eth-pub up
18 |
19 | /usr/bin/docker exec rac1 \
20 | /usr/bin/rm -f /etc/systemd/system/dhclient-rac1-eth-pub.service
21 |
22 | /usr/bin/docker exec rac1 \
23 | /usr/bin/ln -s \
24 | /usr/lib/custom_services/dhclient-rac1-eth-pub.service \
25 | /etc/systemd/system/dhclient-rac1-eth-pub.service
26 |
27 | /usr/bin/docker exec rac1 \
28 | /usr/bin/systemctl stop dhclient-rac1-eth-pub.service
29 |
30 | /usr/bin/docker exec rac1 \
31 | /usr/bin/systemctl daemon-reload
32 |
33 | /usr/bin/docker exec rac1 \
34 | /usr/bin/systemctl start dhclient-rac1-eth-pub.service
35 |
36 | BRIDGE=$(/usr/bin/docker network ls -q -f NAME=priv)
37 | /usr/bin/ip link del dev rac1-priv 2>/dev/null
38 | /usr/bin/ip link add name rac1-priv mtu 1500 type veth peer name eth-priv mtu 1500
39 | /usr/bin/sleep 5
40 | /usr/bin/ip link set rac1-priv master br-${BRIDGE?}
41 | /usr/bin/ip link set rac1-priv up
42 | /usr/bin/ip link set eth-priv netns ${NSPID?}
43 | /usr/bin/ip netns exec ${NSPID?} /usr/bin/ip link set eth-priv up
44 |
45 | /usr/bin/docker exec rac1 \
46 | /usr/bin/rm -f /etc/systemd/system/dhclient-rac1-eth-priv.service
47 |
48 | /usr/bin/docker exec rac1 \
49 | /usr/bin/ln -s \
50 | /usr/lib/custom_services/dhclient-rac1-eth-priv.service \
51 | /etc/systemd/system/dhclient-rac1-eth-priv.service
52 |
53 | /usr/bin/docker exec rac1 \
54 | /usr/bin/systemctl stop dhclient-rac1-eth-priv.service
55 |
56 | /usr/bin/docker exec rac1 \
57 | /usr/bin/systemctl daemon-reload
58 |
59 | /usr/bin/docker exec rac1 \
60 | /usr/bin/systemctl start dhclient-rac1-eth-priv.service
61 |
62 |
63 | /usr/bin/rm -rf "/var/run/netns/${NSPID?}"
64 |
--------------------------------------------------------------------------------
/networks-rac2.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 |
4 | NSPID=$(/usr/bin/docker inspect --format='{{ .State.Pid }}' rac2)
5 |
6 | /usr/bin/rm -rf "/var/run/netns/${NSPID?}"
7 | /usr/bin/mkdir -p "/var/run/netns"
8 | /usr/bin/ln -s "/proc/${NSPID?}/ns/net" "/var/run/netns/${NSPID?}"
9 |
10 | BRIDGE=$(/usr/bin/docker network ls -q -f NAME=pub)
11 | /usr/bin/ip link del dev rac2-pub 2>/dev/null
12 | /usr/bin/ip link add name rac2-pub mtu 1500 type veth peer name eth-pub mtu 1500
13 | /usr/bin/sleep 5
14 | /usr/bin/ip link set rac2-pub master br-${BRIDGE?}
15 | /usr/bin/ip link set rac2-pub up
16 | /usr/bin/ip link set eth-pub netns ${NSPID?}
17 | /usr/bin/ip netns exec ${NSPID?} /usr/bin/ip link set eth-pub up
18 |
19 | /usr/bin/docker exec rac2 \
20 | /usr/bin/rm -f /etc/systemd/system/dhclient-rac2-eth-pub.service
21 |
22 | /usr/bin/docker exec rac2 \
23 | /usr/bin/ln -s \
24 | /usr/lib/custom_services/dhclient-rac2-eth-pub.service \
25 | /etc/systemd/system/dhclient-rac2-eth-pub.service
26 |
27 | /usr/bin/docker exec rac2 \
28 | /usr/bin/systemctl stop dhclient-rac2-eth-pub.service
29 |
30 | /usr/bin/docker exec rac2 \
31 | /usr/bin/systemctl daemon-reload
32 |
33 | /usr/bin/docker exec rac2 \
34 | /usr/bin/systemctl start dhclient-rac2-eth-pub.service
35 |
36 | BRIDGE=$(/usr/bin/docker network ls -q -f NAME=priv)
37 | /usr/bin/ip link del dev rac2-priv 2>/dev/null
38 | /usr/bin/ip link add name rac2-priv mtu 1500 type veth peer name eth-priv mtu 1500
39 | /usr/bin/sleep 5
40 | /usr/bin/ip link set rac2-priv master br-${BRIDGE?}
41 | /usr/bin/ip link set rac2-priv up
42 | /usr/bin/ip link set eth-priv netns ${NSPID?}
43 | /usr/bin/ip netns exec ${NSPID?} /usr/bin/ip link set eth-priv up
44 |
45 | /usr/bin/docker exec rac2 \
46 | /usr/bin/rm -f /etc/systemd/system/dhclient-rac2-eth-priv.service
47 |
48 | /usr/bin/docker exec rac2 \
49 | /usr/bin/ln -s \
50 | /usr/lib/custom_services/dhclient-rac2-eth-priv.service \
51 | /etc/systemd/system/dhclient-rac2-eth-priv.service
52 |
53 | /usr/bin/docker exec rac2 \
54 | /usr/bin/systemctl stop dhclient-rac2-eth-priv.service
55 |
56 | /usr/bin/docker exec rac2 \
57 | /usr/bin/systemctl daemon-reload
58 |
59 | /usr/bin/docker exec rac2 \
60 | /usr/bin/systemctl start dhclient-rac2-eth-priv.service
61 |
62 |
63 | /usr/bin/rm -rf "/var/run/netns/${NSPID?}"
64 |
--------------------------------------------------------------------------------
/fixssh.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 |
4 |
5 | FIXHOSTS=$@
6 | ROOTKEYS=[]
7 | GRIDKEYS=[]
8 | ORACLEKEYS=[]
9 | MYLOOP=0
10 |
11 |
12 | for MYHOST in ${FIXHOSTS?}; do
13 |
14 | docker exec ${MYHOST?} sh -c "touch /etc/ssh/ssh_known_hosts && chmod 644 /etc/ssh/ssh_known_hosts" || \
15 | echo "Unable to generate ssh_known_hosts on ${MYHOST}"
16 |
17 | docker exec ${MYHOST?} sh -c "ssh-keyscan -t ecdsa ${FIXHOSTS?} >> /etc/ssh/ssh_known_hosts 2> /dev/null" || \
18 | echo "Unable to scan for known_hosts keys on ${MYHOST}"
19 |
20 | docker exec ${MYHOST?} sh -c "[[ -f ~/.ssh/id_rsa ]] || ssh-keygen -q -N '' -f ~/.ssh/id_rsa" || \
21 | echo "Unable to generate root SSH key on ${MYHOST}"
22 |
23 | ROOTKEYS[${MYLOOP?}]=$(docker exec ${MYHOST?} sh -c "cat ~/.ssh/id_rsa.pub") || \
24 | echo "Unable to get root key from ${MYHOST}"
25 |
26 | docker exec --user grid ${MYHOST?} sh -c "[[ -f ~/.ssh/id_rsa ]] || ssh-keygen -q -N '' -f ~/.ssh/id_rsa" || \
27 | echo "Unable to generate grid SSH key on ${MYHOST}"
28 |
29 | GRIDKEYS[${MYLOOP?}]=$(docker exec --user grid ${MYHOST?} sh -c "cat ~/.ssh/id_rsa.pub") || \
30 | echo "Unable to get grid key from ${MYHOST}"
31 |
32 | docker exec --user oracle ${MYHOST?} sh -c "[[ -f ~/.ssh/id_rsa ]] || ssh-keygen -q -N '' -f ~/.ssh/id_rsa" || \
33 | echo "Unable to generate oracle SSH key on ${MYHOST}"
34 |
35 | ORACLEKEYS[${MYLOOP?}]=$(docker exec --user oracle ${MYHOST?} sh -c "cat ~/.ssh/id_rsa.pub") || \
36 | echo "Unable to get oracle key from ${MYHOST}"
37 |
38 | let MYLOOP++
39 |
40 | done
41 |
42 |
43 | for MYHOST in ${FIXHOSTS?}; do
44 |
45 | for MYKEY in $(seq 1 ${#ROOTKEYS[@]}); do
46 |
47 | let MYKEY--
48 |
49 | docker exec ${MYHOST?} sh -c "echo ${ROOTKEYS[${MYKEY?}]} >> ~/.ssh/authorized_keys"
50 |
51 | done || echo "Unable to add root public SSH keys on ${MYHOST}"
52 |
53 |
54 | for MYKEY in $(seq 1 ${#GRIDKEYS[@]}); do
55 |
56 | let MYKEY--
57 |
58 | docker exec --user grid ${MYHOST?} sh -c "echo ${GRIDKEYS[${MYKEY?}]} >> ~/.ssh/authorized_keys"
59 |
60 | done || echo "Unable to add grid public SSH keys on ${MYHOST}"
61 |
62 |
63 | for MYKEY in $(seq 1 ${#ORACLEKEYS[@]}); do
64 |
65 | let MYKEY--
66 |
67 | docker exec --user oracle ${MYHOST?} sh -c "echo ${ORACLEKEYS[${MYKEY?}]} >> ~/.ssh/authorized_keys"
68 |
69 | done || echo "Unable to add oracle public SSH keys on ${MYHOST}"
70 |
71 |
72 | docker exec ${MYHOST?} sh -c "chmod 600 ~/.ssh/authorized_keys"
73 |
74 | docker exec --user grid ${MYHOST?} sh -c "chmod 600 ~/.ssh/authorized_keys"
75 |
76 | docker exec --user oracle ${MYHOST?} sh -c "chmod 600 ~/.ssh/authorized_keys"
77 |
78 | done
79 |
80 |
81 |
82 |
--------------------------------------------------------------------------------
/ANSIBLE_SETUP.md:
--------------------------------------------------------------------------------
1 | # Ansible
2 | Ansible will be used to set automate the setup and configuration of the project. Ansible will be running from your workstation or a remote server with SSH access to the host running the containers. These instructions assume the host OS is already running and configured correctly.
3 |
4 | ### Download Ansible
5 | The requirements and instructions for Ansible can be found [here] (http://docs.ansible.com/ansible/intro_installation.html).
6 |
7 |
8 | ## SSH
9 | Establish SSH shared key authentication between the Ansible host and the Docker host.
10 |
11 | Generate an SSH key pair on the Ansible host if necessary.
12 | ```
13 | ssh-keygen
14 | ```
15 |
16 | Copy the SSH public key to the Docker host.
17 | ```
18 | ssh-copy-id core@
19 | ```
20 |
21 | If the Docker host is CoreOS, you'll probably need to manually add the public key since password authentication is disabled by default. Add the public key to your cloud-config file under `ssh_authorized_keys:`. Reboot the Docker host or manually add the public key.
22 | ```
23 | update-ssh-keys -a ansible << EOF
24 |
25 | EOF
26 | ```
27 |
28 | Establish an SSH connection between the Ansible host and the Docker host to populate the known_hosts file.
29 | ```
30 | ssh core@
31 | ```
32 |
33 |
34 | ## Ansible Inventory
35 | Add the Docker host IP to the Ansible inventory. If you're using CoreOS, a couple of variables need to be set as part of the host definition as well.
36 | ```
37 | ansible_ssh_user=core ansible_python_interpreter=/home/core/bin/python
38 | ```
39 |
40 |
41 | ## CoreOS Bootstrap
42 | CoreOS is an intentionally lean OS and doesn't include an installation of Python which Ansible relies on heavily. If you're using CoreOS, you'll need to bootstrap the OS with a minimal installation of Python called Pypy. Fortunately, the CoreOS developers have developed an easy method to make this work. More info can be found [here] (https://github.com/defunctzombie/ansible-coreos-bootstrap).
43 |
44 | ### Install the CoreOS bootstrap
45 | ```
46 | ansible-galaxy install defunctzombie.coreos-bootstrap
47 | ```
48 |
49 | ### Run the bootstrap
50 | Update the `hosts: ` line of the coreos-bootstrap.yml file to reflect the CoreOS hosts you want to bootstrap. If you only have your CoreOS Docker host defined in your Ansible hosts file, use the file the way it is and it will update all of the hosts in your Ansible hosts file.
51 | ```
52 | ansible-playbook coreos-bootstrap.yml
53 | ```
54 |
55 | ### Install docker-py
56 | ```
57 | ansible all -m pip -a "executable=/home/core/bin/pip name=docker-py"
58 | ```
59 |
60 | Verify ansible is working with these commands.
61 | ```
62 | ansible all -m setup
63 | ansible all -m ping
64 | ```
65 |
--------------------------------------------------------------------------------
/ansible/roles/add_rac_nodes/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # Used to provision an additional rac node container after rac1 has been created
2 | # 'this_rac_node' and 'this_image' variables must be set
3 | # All jinja2 templates are in the common role templates folder
4 |
5 |
6 | ##########################################################################################
7 | ############# Create an additional rac node container
8 | ##########################################################################################
9 |
10 | - name: Create additional rac node container
11 | # call the start_rac_node.yml common task, the 'this_rac_node' variable was set by the calling
12 | # playbook, the 'this_image' variable is set to the rac node image which was created by the
13 | # 'create_oracle_image' role
14 | # ../../common/tasks/start_rac_node.yml this_image=giinstalled
15 | include: ../../common/tasks/start_rac_node.yml this_image=giinstalled
16 | tags:
17 | - create_additional_container
18 |
19 |
20 | ##########################################################################################
21 | ############# Modify the container to join the existing cluster
22 | ##########################################################################################
23 |
24 | - block:
25 | - name: Add new rac node container into cluster
26 | # docker exec rac1 su - grid -c '/u01/app/12.1.0/grid/addnode/addnode.sh \
27 | # "CLUSTER_NEW_NODES={rac2}" "CLUSTER_NEW_VIRTUAL_HOSTNAMES={rac2-vip}" \
28 | # -waitforcompletion -silent -ignoreSysPrereqs -force -noCopy'
29 | command: >
30 | /usr/bin/docker exec {{ first_rac_node }} su -
31 | {{
32 | operating_system.grid_infrastructure.users |
33 | selectattr('title', 'equalto', 'owner') |
34 | map(attribute='name') | first
35 | }} -c '{{
36 | oracle_binaries |
37 | selectattr('type', 'equalto', 'grid') |
38 | selectattr('version', 'equalto', '12.1.0.2') |
39 | map(attribute='oracle_home') | first
40 | }}/addnode/addnode.sh
41 | "CLUSTER_NEW_NODES={% raw %}{{% endraw %}{{ this_rac_node }}{% raw %}}{% endraw %}"
42 | "CLUSTER_NEW_VIRTUAL_HOSTNAMES={% raw %}{{% endraw %}{{ this_rac_node }}-vip{% raw %}}{% endraw %}"
43 | -waitforcompletion -silent -ignoreSysPrereqs -force -noCopy'
44 | register: add_new_node_result
45 | changed_when: (add_new_node_result.rc == 0 ) or (add_new_node_result.rc == 6 )
46 | failed_when: (add_new_node_result.rc != 0) and (add_new_node_result.rc != 6 )
47 |
48 | always:
49 | - name: Print readable previous command output
50 | debug:
51 | var: add_new_node_result.stdout_lines
52 | tags:
53 | - add_new_node_to_cluster
54 |
55 |
56 | - block:
57 | - name: Modify the root script to show output
58 | # when the binaries are installed silently, the root script does not show output by default,
59 | # this step modifies the root script to show the output
60 | # docker exec rac2 sed -i '/rootmacro.sh/s/$/ -stdout/' /u01/app/12.1.0/grid/root.sh"
61 | command: >
62 | /usr/bin/docker exec {{ this_rac_node }}
63 | sed -i '/rootmacro.sh/s/$/ -stdout/'
64 | {{
65 | oracle_binaries |
66 | selectattr('type', 'equalto', 'grid') |
67 | selectattr('version', 'equalto', '12.1.0.2') |
68 | map(attribute='oracle_home') | first
69 | }}/root.sh
70 |
71 |
72 | - name: Run grid infrastructure root scripts
73 | # docker exec rac2 /u01/app/12.1.0/grid/root.sh
74 | command: >
75 | /usr/bin/docker exec {{ this_rac_node }}
76 | {{
77 | oracle_binaries |
78 | selectattr('type', 'equalto', 'grid') |
79 | selectattr('version', 'equalto', '12.1.0.2') |
80 | map(attribute='oracle_home') | first
81 | }}/root.sh
82 | tags:
83 | - add_new_node_root
84 |
85 | - name: Relink database binaries for RAC
86 | # the database binaries are installed when there is only one node so they are not enabled for RAC,
87 | # this step relinks the 'oracle' executable for RAC
88 | # docker exec rac2 su - oracle -c 'export ORACLE_HOME=/u01/app/oracle/product/12.1.0/dbhome_1 && \
89 | # make -f $ORACLE_HOME/rdbms/lib/ins_rdbms.mk rac_on && \
90 | # make -f $ORACLE_HOME/rdbms/lib/ins_rdbms.mk ioracle'
91 | command: >
92 | /usr/bin/docker exec {{ this_rac_node }} su -
93 | {{
94 | operating_system.database.users |
95 | selectattr('title', 'equalto', 'owner') |
96 | map(attribute='name') | first
97 | }} -c
98 | "export ORACLE_HOME={{
99 | oracle_binaries |
100 | selectattr('type', 'equalto', 'database') |
101 | selectattr('version', 'equalto', '12.1.0.2') |
102 | map(attribute='oracle_home') | first
103 | }}
104 | && make -f $ORACLE_HOME/rdbms/lib/ins_rdbms.mk rac_on
105 | && make -f $ORACLE_HOME/rdbms/lib/ins_rdbms.mk ioracle"
106 | tags:
107 | - relink_for_rac
108 |
--------------------------------------------------------------------------------
/cloud-config:
--------------------------------------------------------------------------------
1 | #cloud-config
2 |
3 |
4 | # Hostname of your CoreOS VM
5 | hostname: mycoreos
6 |
7 |
8 | # Stores the public key for SSH shared-key authentication, update with your SSH RSA public key
9 | ssh_authorized_keys:
10 | - ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAA...
11 |
12 |
13 | coreos:
14 | units:
15 |
16 | # Creates an LVM thinpool for Docker storage, will only run if the logical volume does not exist
17 | # Change /dev/sdb to the disk you want to use for Docker storage
18 | - name: create-docker-lvm-thinpool.service
19 | command: start
20 | content: |
21 | [Unit]
22 | After=lvm2-monitor.service
23 | Requires=lvm2-monitor.service
24 | ConditionPathExists=!/dev/mapper/docker-thinpool
25 | [Service]
26 | Type=oneshot
27 | ExecStart=/usr/sbin/pvcreate /dev/sdb
28 | ExecStart=/usr/sbin/vgcreate docker /dev/sdb
29 | ExecStart=/usr/sbin/lvcreate --wipesignatures y -n thinpool docker -l 95%VG
30 | ExecStart=/usr/sbin/lvcreate --wipesignatures y -n thinpoolmeta docker -l 1%VG
31 | ExecStart=/usr/sbin/lvconvert -y --zero n -c 512K --thinpool docker/thinpool --poolmetadata docker/thinpoolmeta
32 | ExecStart=/usr/sbin/lvchange --metadataprofile docker-thinpool docker/thinpool
33 |
34 |
35 | # Adds a TCP socket for Docker so docker can be managed remotely
36 | - name: docker-tcp.socket
37 | command: start
38 | enable: true
39 | content: |
40 | [Unit]
41 | Description=Docker Socket for the API
42 |
43 | [Socket]
44 | ListenStream=2375
45 | BindIPv6Only=both
46 | Service=docker.service
47 |
48 | [Install]
49 | WantedBy=sockets.target
50 |
51 |
52 | # Updates the systemd Docker service to use Direct LVM storage and changes the container size to 25GB
53 | # Requires the LVM thinpool from the previous unit exists
54 | - name: docker.service
55 | drop-ins:
56 | - name: 10.docker_opts.conf
57 | content: |
58 | [Service]
59 | Environment="DOCKER_OPTS=--storage-driver=devicemapper --storage-opt=dm.thinpooldev=/dev/mapper/docker-thinpool --storage-opt=dm.use_deferred_removal=true --storage-opt=dm.basesize=25G"
60 |
61 |
62 | # Creates a new LVM volume group for custom logical volumes, will only run if the volume group does not exist
63 | # Change /dev/sdc to the disk you want to use for the data volume group
64 | - name: create-data-volume-group.service
65 | command: start
66 | content: |
67 | [Unit]
68 | Description=Create data volume group
69 | After=lvm2-activation.service
70 | Requires=lvm2-activation.service
71 | ConditionPathExists=/etc/check_vg.sh
72 | [Service]
73 | Type=oneshot
74 | ExecStart=/bin/sh /etc/check_vg.sh data /dev/sdc
75 |
76 |
77 | # Creates a new LVM volume and file system for storing installation files, will only run if the logical volume does not exist
78 | - name: create-oracledata-volume.service
79 | command: start
80 | content: |
81 | [Unit]
82 | Description=Create oracledata logical volume and create an ext4 filesystem
83 | After=create-data-volume-group.service
84 | Requires=create-data-volume-group.service
85 | ConditionPathExists=!/dev/mapper/data-oracledata
86 | [Service]
87 | Type=oneshot
88 | ExecStart=/usr/sbin/lvcreate -y -n oracledata data -l 30%VG
89 | ExecStart=/usr/sbin/mkfs.ext4 /dev/mapper/data-oracledata
90 |
91 |
92 | # Mounts the file system for storing installation files from previous unit
93 | - name: oracledata.mount
94 | command: start
95 | content: |
96 | [Unit]
97 | Description=Mount oracledata volume to /oracledata
98 | Requires=dev-mapper-data\x2doracledata.device
99 | After=dev-mapper-data\x2doracledata.device
100 | [Mount]
101 | What=/dev/mapper/data-oracledata
102 | Where=/oracledata
103 | Type=ext4
104 |
105 |
106 | # Creates a new LVM volume and file system for using NFS files for an ASM disk group, will only run if the logical volume does not exist
107 | - name: create-oraclenfs-volume.service
108 | command: start
109 | content: |
110 | [Unit]
111 | Description=Create oraclenfs logical volume and create an ext4 filesystem
112 | After=create-data-volume-group.service
113 | Requires=create-data-volume-group.service
114 | ConditionPathExists=!/dev/mapper/data-oraclenfs
115 | [Service]
116 | Type=oneshot
117 | ExecStart=/usr/sbin/lvcreate -y -n oraclenfs data -l 30%VG
118 | ExecStart=/usr/sbin/mkfs.ext4 /dev/mapper/data-oraclenfs
119 |
120 |
121 | # Mounts the file system for using NFS files for an ASM disk group from previous unit
122 | - name: oraclenfs.mount
123 | command: start
124 | content: |
125 | [Unit]
126 | Description=Mount oraclenfs volume to /oraclenfs
127 | Requires=dev-mapper-data\x2doraclenfs.device
128 | After=dev-mapper-data\x2doraclenfs.device
129 | [Mount]
130 | What=/dev/mapper/data-oraclenfs
131 | Where=/oraclenfs
132 | Type=ext4
133 |
134 |
135 | write_files:
136 |
137 | # Required for the volume group check
138 | - path: /etc/check_vg.sh
139 | owner: root
140 | content: |
141 | if ! $(/usr/sbin/vgs $1 >/dev/null 2>&1); then
142 | /usr/sbin/pvcreate $2
143 | /usr/sbin/vgcreate $1 $2
144 | /usr/sbin/vgs $1 >/dev/null 2>&1
145 | fi
146 |
147 | # Required for the Docker storage LVM thinpool
148 | - path: /etc/lvm/profile/docker-thinpool.profile
149 | permissions: 0644
150 | owner: root
151 | content: |
152 | activation {
153 | thin_pool_autoextend_threshold=80
154 | thin_pool_autoextend_percent=20
155 | }
156 |
157 |
158 | # Add bash profile preferences
159 | - path: /etc/profile.env
160 | content: |
161 | alias ll='ls -l --color=auto'
162 |
--------------------------------------------------------------------------------
/Dockerfile-racnode/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM oraclelinux:7.2
2 | MAINTAINER sethmiller.sm@gmail.com
3 |
4 |
5 | # Passwords for grid and oracle users
6 | ENV ["GRID_PASSWORD", "oracle_4U"]
7 | ENV ["ORACLE_PASSWORD", "oracle_4U"]
8 |
9 |
10 | ###################################################################################
11 | ## System Updates
12 | ###################################################################################
13 |
14 | # Update the operating system
15 | RUN ["yum", "-y", "update"]
16 |
17 |
18 | # Add the oracle YUM public repositories
19 | ADD ["http://public-yum.oracle.com/public-yum-ol7.repo", "/etc/yum.repos.d/"]
20 |
21 |
22 | # Download and import the gpg key
23 | ADD ["http://public-yum.oracle.com/RPM-GPG-KEY-oracle-ol7", "/etc/yum.repos.d/"]
24 | RUN ["rpm", "--import", "/etc/yum.repos.d/RPM-GPG-KEY-oracle-ol7"]
25 |
26 |
27 | # Install necessary packages
28 | RUN ["yum", "-y", "install", \
29 | "oracle-rdbms-server-12cR1-preinstall", \
30 | "vim", \
31 | "net-tools", \
32 | "tigervnc-server", \
33 | "xterm", \
34 | "iscsi-initiator-utils", \
35 | "elfutils-libelf-devel", \
36 | "motif", \
37 | "lshw", \
38 | "python-pip", \
39 | "tar"]
40 |
41 |
42 | # Clean the yum cache
43 | RUN ["yum", "clean", "all"]
44 |
45 |
46 | ###################################################################################
47 | ## Users and Groups
48 | ###################################################################################
49 |
50 | # Add groups for grid infrastructure
51 | RUN ["groupadd", "--force", "--gid", "54321", "oinstall"]
52 | RUN ["groupmod", "--gid", "54321", "oinstall"]
53 | RUN ["groupadd", "--gid", "54421", "asmdba"]
54 | RUN ["groupadd", "--gid", "54422", "asmadmin"]
55 | RUN ["groupadd", "--gid", "54423", "asmoper"]
56 |
57 |
58 | # Add groups for database
59 | RUN ["groupadd", "--force", "--gid", "54322", "dba"]
60 | RUN ["groupmod", "--gid", "54322", "dba"]
61 | RUN ["groupadd", "--gid", "54323", "oper"]
62 | RUN ["groupadd", "--gid", "54324", "backupdba"]
63 | RUN ["groupadd", "--gid", "54325", "dgdba"]
64 | RUN ["groupadd", "--gid", "54326", "kmdba"]
65 | RUN ["groupadd", "--gid", "54327", "racdba"]
66 |
67 |
68 | # Add grid infrastructure owner
69 | RUN useradd --create-home --uid 54421 --gid oinstall --groups dba,asmdba,asmadmin,asmoper grid || \
70 | (RES=$? && ( [ $RES -eq 9 ] && exit 0 || exit $RES))
71 | RUN ["usermod", "--uid", "54421", "--gid", "oinstall", "--groups", "dba,asmdba,asmadmin,asmoper", "grid"]
72 |
73 |
74 | # Add database owner
75 | RUN useradd --create-home --uid 54321 --gid oinstall --groups dba,asmdba,oper,backupdba,dgdba,kmdba,racdba oracle || \
76 | (RES=$? && ( [ $RES -eq 9 ] && exit 0 || exit $RES))
77 | RUN ["usermod", "--uid", "54321", "--gid", "oinstall", "--groups", "dba,asmdba,oper,backupdba,dgdba,kmdba,racdba", "oracle"]
78 |
79 |
80 | # Give grid and oracle users passwords
81 | RUN echo "grid:${GRID_PASSWORD}" | chpasswd
82 | RUN echo "oracle:${ORACLE_PASSWORD}" | chpasswd
83 |
84 |
85 | # Add ulimits configuration file for grid user
86 | # oracle user ulimits configuration file already added by oracle-rdbms-server-12cR1-preinstall
87 | ADD ["grid_security_limits.conf", "/etc/security/limits.d/"]
88 |
89 |
90 | ###################################################################################
91 | ## SSH Shared Keys
92 | ###################################################################################
93 |
94 | # Create SSH shared key directory for the oracle user
95 | RUN ["mkdir", "-p", "-m", "0700", "/home/oracle/.ssh/"]
96 |
97 |
98 | # Generate SSH shared keys for the oracle user
99 | RUN ssh-keygen -q -C '' -N '' -f /home/oracle/.ssh/id_rsa
100 |
101 |
102 | # Create the authorized_keys file for the oracle user
103 | RUN cat /home/oracle/.ssh/id_rsa.pub > /home/oracle/.ssh/authorized_keys
104 |
105 |
106 | # Change ownership of the SSH shared key files for the oracle user
107 | RUN chown -R oracle:oinstall /home/oracle/.ssh
108 |
109 |
110 | # Change permissions of the authorized_keys file for the oracle user
111 | RUN ["chmod", "0640", "/home/oracle/.ssh/authorized_keys"]
112 |
113 |
114 | # Create SSH shared key directory for the grid user
115 | RUN ["mkdir", "-p", "-m", "0700", "/home/grid/.ssh/"]
116 |
117 |
118 | # Generate SSH shared keys for the grid user
119 | RUN ssh-keygen -q -C '' -N '' -f /home/grid/.ssh/id_rsa
120 |
121 |
122 | # Create the authorized_keys file for the grid user
123 | RUN cat /home/grid/.ssh/id_rsa.pub > /home/grid/.ssh/authorized_keys
124 |
125 |
126 | # Change ownership of the SSH shared key files for the grid user
127 | RUN chown -R grid:oinstall /home/grid/.ssh
128 |
129 |
130 | # Change permissions of the authorized_keys file for the grid user
131 | RUN ["chmod", "0640", "/home/grid/.ssh/authorized_keys"]
132 |
133 |
134 | # Generate SSH host ECDSA shared keys
135 | RUN ssh-keygen -q -C '' -N '' -t ecdsa -f /etc/ssh/ssh_host_ecdsa_key
136 |
137 |
138 | # Create the ssh_known_hosts file
139 | RUN for NODE in rac1 rac2; do (echo -n "$NODE " && cat /etc/ssh/ssh_host_ecdsa_key.pub) >> /etc/ssh/ssh_known_hosts; done
140 |
141 |
142 | ###################################################################################
143 | ## Files and Directories
144 | ###################################################################################
145 |
146 | # Create installation root directory
147 | RUN ["mkdir", "-p", "/u01"]
148 | RUN ["chgrp", "oinstall", "/u01"]
149 | RUN ["chmod", "0775", "/u01"]
150 |
151 |
152 | ###################################################################################
153 | ## Misc
154 | ###################################################################################
155 |
156 | # Allow non-privileged users the ability to execute the ping command
157 | RUN ["chmod", "4755", "/bin/ping"]
158 |
159 |
160 | # SELinux bug fix
161 | RUN ["mkdir", "-p", "/etc/selinux/targeted/contexts/"]
162 | ADD ["dbus_contexts", "/etc/selinux/targeted/contexts/"]
163 |
164 |
165 | # Hide/disable the ttyS0 serial console service
166 | RUN ["systemctl", "mask", "serial-getty@ttyS0.service"]
167 |
--------------------------------------------------------------------------------
/ANSIBLE.md:
--------------------------------------------------------------------------------
1 | # Ansible orchestration for 12c RAC in Docker Containers
2 | Multiple node Oracle RAC cluster running in Docker containers.
3 |
4 |
5 | ## Setup
6 | If you're running CoreOS for your Docker host, some setup is required before proceeding. Instructions can be found in [ANSIBLE_SETUP.md] (https://github.com/Seth-Miller/12c-rac-docker/blob/master/ANSIBLE_SETUP.md)
7 |
8 |
9 | ## Ansible
10 | All of the ansible scripts are in the [ansible] (https://github.com/Seth-Miller/12c-rac-docker/tree/master/ansible) directory in this repository. Throughout these instructions it is assumed you are working from the ansible directory.
11 |
12 | Besides the instructions found here, the yaml files are heavily commented with information and examples.
13 |
14 | The tasks in the Ansible roles follow very closely to the instructions found in the [README.md] (https://github.com/Seth-Miller/12c-rac-docker/blob/master/README.md).
15 |
16 |
17 | ## Common Variables
18 | The variables for all of the roles are contained in the [roles/common/vars] (https://github.com/Seth-Miller/12c-rac-docker/tree/master/ansible/roles/common/vars) directory. All of the roles reference variables in the [main.yml] (https://github.com/Seth-Miller/12c-rac-docker/blob/master/ansible/roles/common/vars/main.yml) file.
19 |
20 | There is a second file called [files.yml] (https://github.com/Seth-Miller/12c-rac-docker/blob/master/ansible/roles/common/vars/files.yml) that is referenced for the file locations of the grid infrastructure and database installation files. This file was left intentionally blank so you can fill out the locations for these files based on your environment.
21 |
22 | It is important that all of the playbooks reference the common role as well as the files.yml file.
23 | ```
24 | vars_files:
25 | - roles/common/vars/files.yml
26 | roles:
27 | - common
28 | ```
29 |
30 |
31 | ## Prepare the Docker host
32 | The [prepare_host.yml] (https://github.com/Seth-Miller/12c-rac-docker/blob/master/ansible/prepare_host.yml) file starts the prepare_host role. These tasks not only prepare the Docker host, they also build the containers that support the Oracle RAC cluster, including the DNS/BIND, DHCPD, and NFS server containers.
33 |
34 | Run the prepare_host playbook.
35 | ```
36 | ansible-playbook prepare_host.yml
37 | ```
38 |
39 | Tags can be used to limit which tasks are executed in each playbook. If you want to only prepare the ASM file and block devices, add the `asm` tag.
40 | ```
41 | ansible-playbook prepare_host.yml --tags=asm
42 | ```
43 |
44 | Here is a list of tags and their descriptions for the prepare_host tasks.
45 |
46 | Tag | Description
47 | ------------- | --------------------------------------
48 | asm | Manage the ASM block and file devices
49 | create_docker_networks | Creates public and private Docker networks
50 | create_directory | Creates directories for container configuration files
51 | config_files | Copies config files to DHCPD and NFS containers
52 | create_container | Creates the BIND, DHCPD, and NFS containers
53 | installation_files | Downloads and unzips the Oracle installation files
54 |
55 |
56 | ## Create the RAC node image
57 | The [create_oracle_image.yml] (https://github.com/Seth-Miller/12c-rac-docker/blob/master/ansible/create_oracle_image.yml) file starts the create_oracle_image role. These tasks create the RAC node image which will be used by all RAC node containers. The image preparation consists of installing the grid infrastructure software, the database software, and patches for both. The image will be committed locally on the Docker host and called `giinstalled`.
58 |
59 | Once the image has been created, it will not need to change until new binaries or new patches need to be applied.
60 |
61 | Here is a list of tags and their descriptions for the create_oracle_image tasks.
62 |
63 | Tag | Description
64 | ------------- | --------------------------------------
65 | create_rac1_container | Creates the rac1 container
66 | install_grid | Installs the grid infrastructure binaries
67 | install_database | Installs the database binaries
68 | opatch | Updates opatch in both grid infrastructure and database homes
69 | apply_patch | Applies the bundle and one-off patches to the grid infrastructure and database homes
70 | commit_rac1 | Commits the prepared RAC node container to the giinstalled image
71 |
72 |
73 | ## Create the first RAC node container
74 | The [create_first_rac_node.yml] (https://github.com/Seth-Miller/12c-rac-docker/blob/master/ansible/create_first_rac_node.yml) file starts the create_first_rac_node role. These tasks create the first RAC node container (rac1) from the giinstalled image created in the previous step. These tasks configure the grid infrastructure for a cluster and start the cluster processes. It also relinks the `oracle` executable for RAC.
75 |
76 | Here is a list of tags and their descriptions for the create_first_rac_node tasks.
77 |
78 | Tag | Description
79 | ------------- | --------------------------------------
80 | create_rac1_container | Creates the rac1 container
81 | configure_grid | Configures the grid infrastructure for the cluster
82 | configure_grid_root | Executes the grid infrastructure root scripts that start the cluster processes
83 | configure_grid_tools | Finishes configuring the grid infrastructure
84 | relink_for_rac | Relinks the oracle executable for RAC
85 |
86 |
87 | ## Create the second RAC node container
88 | The [create_second_rac_node.yml] (https://github.com/Seth-Miller/12c-rac-docker/blob/master/ansible/create_second_rac_node.yml) file starts the add_rac_nodes role, setting the `this_rac_node` variable to rac2. These tasks create the second RAC node container (rac2) from the giinstalled image. These tasks differ from the first RAC node container creation in that they add the rac2 container to the existing cluster running on rac1. It also relinks the `oracle` executable for RAC.
89 |
90 | Here is a list of tags and their descriptions for the add_rac_nodes tasks.
91 |
92 | Tag | Description
93 | ------------- | --------------------------------------
94 | create_additional_container | Creates the rac2 container
95 | add_new_node_to_cluster | Runs addnode.sh on rac1 to add rac2 to the existing cluster
96 | add_new_node_root | Executes the grid infrastructure root scripts that starts the cluster processes
97 | relink_for_rac | Relinks the oracle executable for RAC
98 |
99 |
100 | ## Add orcl RAC database
101 | The [add_database.yml] (https://github.com/Seth-Miller/12c-rac-docker/blob/master/ansible/add_database.yml) file starts the add_database role. These tasks create the orcl Oracle database in the cluster using the `dbca` utility.
102 |
103 | Here is a list of tags and their descriptions for the add_database tasks.
104 |
105 | Tag | Description
106 | ------------- | --------------------------------------
107 | create_database | Creates the orcl database
108 |
109 |
110 | ## Add NDATA ASM disk group
111 | The [add_NDATA_diskgroup.yml] (https://github.com/Seth-Miller/12c-rac-docker/blob/master/ansible/add_NDATA_diskgroup.yml) file starts the add_diskgroup role, setting the `this_disk_group` variable to NDATA. These tasks create the NDATA ASM disk group.
112 |
113 | Here is a list of tags and their descriptions for the add_database tasks.
114 |
115 | Tag | Description
116 | ------------- | --------------------------------------
117 | create_disk_group | Creates the NDATA ASM disk group
118 |
--------------------------------------------------------------------------------
/ansible/roles/common/tasks/start_rac_node.yml:
--------------------------------------------------------------------------------
1 | # Used to provision a rac node container
2 | # 'this_rac_node' and 'this_image' variables must be set
3 | # All jinja2 templates are in the common role templates folder
4 |
5 |
6 | ##########################################################################################
7 | ############# Create directories and scripts for the rac node container
8 | ##########################################################################################
9 |
10 | - name: Create rac_nodes custom directories
11 | # this directory holds all of the scripts shared by the rac node containers
12 | # /srv/docker/rac_nodes/custom_services
13 | file:
14 | state: directory
15 | path: "{{ operating_system.rac_node_directory }}"
16 | mode: 0777
17 | become: True
18 | tags:
19 | - create_rac_nodes_custom_dirs
20 |
21 |
22 | - name: Create dhclient service files for rac node container
23 | # used by systemd in each of the rac node containers to start dhclient processes for the
24 | # public and private networks
25 | # /srv/docker/rac_nodes/custom_services/dhclient-rac1-eth-pub.service
26 | # /srv/docker/rac_nodes/custom_services/dhclient-rac1-eth-priv.service
27 | template:
28 | src: ../../common/templates/dhclient.j2
29 | dest: "{{ operating_system.rac_node_directory }}/dhclient-{{ item.0.name }}-{{ item.1.internal_network_name }}.service"
30 | owner: root
31 | group: root
32 | mode: 0644
33 | become: True
34 | with_subelements:
35 | - "{{ docker.containers | selectattr('name', 'equalto', this_rac_node) | list }}"
36 | - networks
37 | tags:
38 | - add_dhclient_services
39 |
40 |
41 | - name: Create nfs mount files for rac node container
42 | # used by systemd in each of the rac node containers to mount the NFS shares from the
43 | # ganesha NFS container
44 | # /srv/docker/rac_nodes/custom_services/oraclenfs.mount
45 | template:
46 | src: ../../common/templates/oracle-nfs-mount.j2
47 | dest: "{{ operating_system.rac_node_directory }}/oraclenfs.mount"
48 | owner: root
49 | group: root
50 | mode: 0744
51 | become: True
52 | tags:
53 | - add_oracle_nfs_scripts
54 |
55 |
56 | - name: Create network management scripts for rac node container
57 | # scripts created on the docker host to manage the public and private networks in the
58 | # rac node containers, scripts can be executed manually to recreate the networks on
59 | # each rac node container
60 | # /srv/docker/scripts/networks-rac1.sh
61 | template:
62 | src: ../../common/templates/rac-node-networks.j2
63 | dest: "{{ operating_system.scripts_directory }}/networks-{{ item.name }}.sh"
64 | owner: root
65 | group: root
66 | mode: 0744
67 | become: True
68 | with_items:
69 | - "{{ docker.containers | selectattr('name', 'equalto', this_rac_node) | list }}"
70 | tags:
71 | - add_network_scripts
72 |
73 |
74 | - name: Create asm disk udev configuration file
75 | # used by udev in each of the rac node containers create symlinks as well as
76 | # change the ownership and permissions of the ASM block devices
77 | # /srv/docker/rac_nodes/custom_services/99-asm-disk.rules
78 | template:
79 | src: ../../common/templates/99-asm-disks.rules.j2
80 | dest: "{{ operating_system.rac_node_directory }}/99-asm-disk.rules"
81 | owner: root
82 | group: root
83 | mode: 0644
84 | become: True
85 | tags:
86 | - create_asm_disk_udev_file
87 |
88 |
89 | ##########################################################################################
90 | ############# Create the rac node docker container
91 | ##########################################################################################
92 |
93 | - name: Create rac node container
94 | # docker run --detach --privileged --name rac1 --hostname rac1 \
95 | # --volume /srv/docker/rac_nodes/custom_services:/usr/lib/custom_services \
96 | # --volume /oracledata/stage:/stage --volume /sys/fs/cgroup:/sys/fs/cgroup:ro \
97 | # --shm-size 2048m --dns 10.10.10.10 sethmiller/giready \
98 | # /usr/lib/systemd/systemd --system --unit=multi-user.target
99 | docker_container:
100 | name: "{{ item.name }}"
101 | hostname: "{{ item.hostname }}"
102 | image: "{{ this_image }}"
103 | state: started
104 | privileged: True
105 | volumes:
106 | "{{ item.volumes }}"
107 | command: "{{ item.command }}"
108 | shm_size: "{{ item.shm_size }}"
109 | dns_servers: "{{ item.dns }}"
110 | with_items:
111 | - "{{ docker.containers | selectattr('name', 'equalto', this_rac_node) | list }}"
112 | tags:
113 | - create_rac_node_container
114 |
115 |
116 | ##########################################################################################
117 | ############# Start rac node container services and networks
118 | ##########################################################################################
119 |
120 | - name: Enable asm disk udev configuration
121 | # link the udev configuration files to the udev rules.d directory
122 | # docker exec rac1 ln -s /usr/lib/custom_services/99-asm-disk.rules /etc/udev/rules.d/
123 | command: >
124 | /usr/bin/docker exec {{ this_rac_node }}
125 | /usr/bin/ln -s /usr/lib/custom_services/99-asm-disk.rules /etc/udev/rules.d/
126 | become: True
127 | register: command_result
128 | changed_when: (command_result.rc == 0 ) and ('File exists' not in command_result.stderr)
129 | failed_when: (command_result.rc != 0) and ('File exists' not in command_result.stderr)
130 | tags:
131 | - enable_udev
132 |
133 |
134 | - name: Reload udev rules
135 | # reload udev rules to make ASM disk changes take effect
136 | # docker exec rac1 udevadm control --reload-rules
137 | # docker exec rac1 udevadm trigger
138 | command: "/usr/bin/docker exec {{ this_rac_node }} {{ item }}"
139 | become: True
140 | with_items:
141 | - /usr/sbin/udevadm control --reload-rules
142 | - /usr/sbin/udevadm trigger
143 | tags:
144 | - reload_udev
145 |
146 |
147 | - name: Add networks to rac node
148 | # execute the network scripts created by the add_network_scripts task above
149 | # /srv/docker/scripts/networks-rac1.sh
150 | command: "{{ operating_system.scripts_directory }}/networks-{{ item.name }}.sh"
151 | become: True
152 | with_items:
153 | - "{{ docker.containers | selectattr('name', 'equalto', this_rac_node) | list }}"
154 | tags:
155 | - add_rac_node_networks
156 |
157 |
158 | - name: Enable nfs mount
159 | # link the nfs mount systemd configuration files to the systemd directory
160 | # docker exec rac1 ln -s /usr/lib/custom_services/oraclenfs.mount /etc/systemd/system/
161 | command: >
162 | /usr/bin/docker exec {{ this_rac_node }}
163 | /usr/bin/ln -s /usr/lib/custom_services/oraclenfs.mount /etc/systemd/system/
164 | become: True
165 | register: command_result
166 | changed_when: (command_result.rc == 0 ) and ('File exists' not in command_result.stderr)
167 | failed_when: (command_result.rc != 0) and ('File exists' not in command_result.stderr)
168 | tags:
169 | - enable_nfs_mount
170 |
171 |
172 | - name: Reload systemd
173 | # reload systemd to enable the nfs mount service
174 | # docker exec rac1 systemctl daemon-reload
175 | command: >
176 | /usr/bin/docker exec {{ this_rac_node }}
177 | /usr/bin/systemctl daemon-reload
178 | become: True
179 | tags:
180 | - reload_systemd_for_nfs
181 |
182 |
183 | - name: Start nfs mount
184 | # start the systemd nfs mount service
185 | # docker exec rac1 systemctl start oraclenfs.mount
186 | command: >
187 | /usr/bin/docker exec {{ this_rac_node }}
188 | /usr/bin/systemctl start oraclenfs.mount
189 | become: True
190 | tags:
191 | - start_nfs_mount
192 |
--------------------------------------------------------------------------------
/COREOS.md:
--------------------------------------------------------------------------------
1 | # CoreOS
2 | This repository was created and built on CoreOS. It was the intention of this project to not only run Oracle RAC in Docker containers but to have all of the supporting elements running in Docker containers as well. Other than the Docker and shared storage, there is nothing on the host OS that cannot easily be recreated.
3 |
4 | https://coreos.com/docs/
5 |
6 | Once the VM is running, the IP you will use to connect an ssh client will be displayed on the VM console. If you are following the instructions below and using port forwarding, you will connect to 127.0.0.1 on port 2222. Connect to the VM over SSH with the username `core` using shared key authentication.
7 |
8 | ## Cloud Config
9 | The configuration file for CoreOS needs to be mounted as an ISO to the VM which means you will need a utility to create an ISO. The recommendation is to use mkisofs which is part of the open source cdrtools. This is in most Linux repos and can be added to Cygwin. If you are on Windows, it can be downloaded from [here] (http://sourceforge.net/projects/tumagcc/files/schily-cdrtools-3.02a05.7z/download).
10 |
11 | The cloud config file must be named `user_data` and the path to the user_data file on the ISO must be `/openstack/latest/user_data`. Download the cloud-config file from this repository and modify the ssh_authorized_keys to reflect your SSH public key.
12 |
13 | Execute these steps to create the ISO on Windows.
14 | ```
15 | mkdir coreos\cloud-config\openstack\latest\
16 | copy cloud-config coreos\cloud-config\openstack\latest\user_data
17 | mkisofs.exe -R -V config-2 -o mycoreos.iso coreos/cloud-config
18 | ```
19 |
20 | Execute these steps to create the ISO on Linux or Cygwin.
21 | ```
22 | mkdir -p coreos/cloud-config/openstack/latest/
23 | cp cloud-config coreos/cloud-config/openstack/latest/user_data
24 | mkisofs -R -V config-2 -o mycoreos.iso coreos/cloud-config
25 | ```
26 |
27 |
28 | ## CoreOS VM
29 | CoreOS can be deployed on a number of different platforms. This project at a minimum will require the VM have these specifications.
30 | - 1 network interface accessible from SSH client
31 | - Memory 8 GB
32 | - Two additional 100 GB thin provisioned hard disks
33 | - Three additional 8 GB thin provisioned hard disks
34 |
35 |
36 | ## VirtualBox
37 | Download the latest stable release of the virtual disk image.
38 | https://stable.release.core-os.net/amd64-usr/current/coreos_production_image.bin.bz2
39 |
40 | Set environment variables in Windows CLI
41 | ```
42 | REM Set the VBOXMANAGE variable to the full path of VBoxManage.exe
43 | set VBOXMANAGE="C:\Program Files\Oracle\VirtualBox\VBoxManage.exe"
44 |
45 | REM Set the MACHINE_FOLDER variable to the VirtualBox Machine Folder setting
46 | REM By default the machine folder setting is "%HOMEDRIVE%%HOMEPATH%\VirtualBox VMs"
47 | set MACHINE_FOLDER=%HOMEDRIVE%%HOMEPATH%\VirtualBox VMs
48 |
49 | REM Set the COREOS_VM_NAME variable to the name of the VM you are going to create
50 | set COREOS_VM_NAME=mycoreos
51 |
52 | REM Set the CLOUD_CONFIG variable to the full path of the cloud-config ISO
53 | set CLOUD_CONFIG=C:\coreos\mycoreos.iso
54 | ```
55 |
56 | Unzip the download file using the open source bzip2 library. 7zip does not work for this. If you are using windows, a precompiled bzip2 download can be found [here] (http://gnuwin32.sourceforge.net/downlinks/bzip2-bin-zip.php).
57 | ```
58 | bunzip2 coreos_production_image.bin.bz2
59 | ```
60 |
61 | Convert the bin to a virtual disk image (VDI) using VBoxManage.
62 | ```
63 | %VBOXMANAGE% convertfromraw coreos_production_image.bin coreos_production_image.vdi
64 | ```
65 |
66 | Create the VM.
67 | ```
68 | %VBOXMANAGE% createvm --name %COREOS_VM_NAME% --register --ostype "Linux26_64"
69 | ```
70 |
71 | Clone the downloaded CoreOS image into the VM folder.
72 | ```
73 | %VBOXMANAGE% clonemedium coreos_production_image.vdi "%MACHINE_FOLDER%\%COREOS_VM_NAME%\%COREOS_VM_NAME%_production_image.vdi"
74 | ```
75 |
76 | Optionally resize the disk to 10 GB. This will leave room to add modifications to the OS.
77 | ```
78 | %VBOXMANAGE% modifymedium "%MACHINE_FOLDER%\%COREOS_VM_NAME%\%COREOS_VM_NAME%_production_image.vdi" --resize 10240
79 | ```
80 |
81 | Create an additional thin provisioned disk for Docker storage.
82 | ```
83 | %VBOXMANAGE% createmedium disk --filename "%MACHINE_FOLDER%\%COREOS_VM_NAME%\%COREOS_VM_NAME%_docker.vdi" --size 102400
84 | ```
85 |
86 | Create an additional thin provisioned disk for Oracle installation file storage.
87 | ```
88 | %VBOXMANAGE% createmedium disk --filename "%MACHINE_FOLDER%\%COREOS_VM_NAME%\%COREOS_VM_NAME%_oracledata.vdi" --size 102400
89 | ```
90 |
91 | Create additional thin provisioned disks for ASM disk devices.
92 | ```
93 | %VBOXMANAGE% createmedium disk --filename "%MACHINE_FOLDER%\%COREOS_VM_NAME%\%COREOS_VM_NAME%_ASM_DATA1.vdi" --size 8192
94 | %VBOXMANAGE% createmedium disk --filename "%MACHINE_FOLDER%\%COREOS_VM_NAME%\%COREOS_VM_NAME%_ASM_DATA2.vdi" --size 8192
95 | %VBOXMANAGE% createmedium disk --filename "%MACHINE_FOLDER%\%COREOS_VM_NAME%\%COREOS_VM_NAME%_ASM_DATA3.vdi" --size 8192
96 | ```
97 |
98 | Add a storage controller to the VM.
99 | ```
100 | %VBOXMANAGE% storagectl %COREOS_VM_NAME% --name "SATA" --add sata
101 | ```
102 |
103 | Add an additional IDE storage controller to the VM
104 | ```
105 | %VBOXMANAGE% storagectl %COREOS_VM_NAME% --name "IDE" --add ide
106 | ```
107 |
108 | Attach the disks to the SATA storage controller.
109 | ```
110 | %VBOXMANAGE% storageattach %COREOS_VM_NAME% --storagectl "SATA" --type hdd --port 0 --medium "%MACHINE_FOLDER%\%COREOS_VM_NAME%\%COREOS_VM_NAME%_production_image.vdi"
111 | %VBOXMANAGE% storageattach %COREOS_VM_NAME% --storagectl "SATA" --type hdd --port 1 --medium "%MACHINE_FOLDER%\%COREOS_VM_NAME%\%COREOS_VM_NAME%_docker.vdi"
112 | %VBOXMANAGE% storageattach %COREOS_VM_NAME% --storagectl "SATA" --type hdd --port 2 --medium "%MACHINE_FOLDER%\%COREOS_VM_NAME%\%COREOS_VM_NAME%_oracledata.vdi"
113 | %VBOXMANAGE% storageattach %COREOS_VM_NAME% --storagectl "SATA" --type hdd --port 3 --medium "%MACHINE_FOLDER%\%COREOS_VM_NAME%\%COREOS_VM_NAME%_ASM_DATA1.vdi"
114 | %VBOXMANAGE% storageattach %COREOS_VM_NAME% --storagectl "SATA" --type hdd --port 4 --medium "%MACHINE_FOLDER%\%COREOS_VM_NAME%\%COREOS_VM_NAME%_ASM_DATA2.vdi"
115 | %VBOXMANAGE% storageattach %COREOS_VM_NAME% --storagectl "SATA" --type hdd --port 5 --medium "%MACHINE_FOLDER%\%COREOS_VM_NAME%\%COREOS_VM_NAME%_ASM_DATA3.vdi"
116 | ```
117 |
118 | Attach the cloud-config ISO to the IDE storage controller
119 | ```
120 | %VBOXMANAGE% storageattach %COREOS_VM_NAME% --storagectl "IDE" --type dvddrive --port 0 --medium %CLOUD_CONFIG% --device 0
121 | ```
122 |
123 | Change the VM memory to a minimum of 8 GB
124 | ```
125 | %VBOXMANAGE% modifyvm %COREOS_VM_NAME% --memory 8192
126 | ```
127 |
128 | Create a port forwarding rule to connect a local ssh client to the NAT network
129 | ```
130 | %VBOXMANAGE% modifyvm %COREOS_VM_NAME% --natpf1 "guestssh,tcp,127.0.0.1,2222,,22"
131 |
132 | ```
133 |
134 |
135 | ## Container Linux Configuration / Ignition
136 | ### This section is not yet complete
137 | An alternative to using cloud-config for the configuration of CoreOS is to use Container Linux Configuration and Ignition. Ignition allows for more flexibility in most cases because the configuration is done earlier in the boot process and has deeper hooks into the operating system.
138 |
139 | The CoreOS toolbox is a method of using tools not installed in CoreOS by creating a container and namespace where these tools can be installed and used. Use the CoreOS toolbox to build the config transpiler (ct).
140 | ```
141 | toolbox yum --nogpgcheck -y install go git
142 | toolbox git clone https://github.com/coreos/container-linux-config-transpiler.git
143 | toolbox --chdir=/container-linux-config-transpiler ./build
144 | ```
145 |
146 | The `ct` utility is now available in the CoreOS toolbox. The ct utility can read the configuration from stdin and will print the ignition config in JSON format on stdout. Here is an example with a config that just has the ssh authorized keys for the core user.
147 | ```yaml
148 | # ct.config
149 | passwd:
150 | users:
151 | - name: core
152 | ssh_authorized_keys:
153 | - ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEA4giEY9NfIhEd16jBxAYSDAx+Drc
154 | ```
155 | ```bash
156 | core ~ $ cat ct.config | toolbox /container-linux-config-transpiler/bin/ct
157 | Spawning container core-fedora-latest on /var/lib/toolbox/core-fedora-latest.
158 | Press ^] three times within 1s to kill container.
159 |
160 |
161 | {"ignition":{"version":"2.0.0","config":{}},"storage":{},"systemd":{},"networkd":{},"passwd":{"users":[{"name":"core","sshAuthorizedKeys":["ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEA4giEY9NfIhEd16jBxAYSDAx+Drc"]}]}}
162 | Container core-fedora-latest exited successfully.
163 | ```
164 |
165 | For VMware vApp, we need the base64 encoded version of the ignition file.
166 | ```
167 | cat ct.config | toolbox /container-linux-config-transpiler/bin/ct 2>/dev/null | base64 -w0 && echo
168 | ```
169 |
170 | ***
171 |
172 |
173 | # TODO
174 |
175 | ## VMware
176 | CoreOS provides pre-built OVA templates for VMware which makes it tremendously easy to both deploy and configure. Refer to the CoreOS documentation for additional details `https://coreos.com/os/docs/latest/booting-on-vmware.html`.
177 |
178 | Download the latest stable release of the OVA.
179 | https://stable.release.core-os.net/amd64-usr/current/coreos_production_vmware_ova.ova
180 |
181 | ### ESXi
182 | Use the vSphere Client to deploy the VM as follows:
183 | 1. In the menu, click `File` > `Deploy OVF Template...`
184 | 2. In the wizard, specify the location of the OVA file downloaded earlier
185 | 3. Name your VM
186 | 4. Choose "thin provision" for the disk format
187 | 5. Choose your network settings
188 | 6. Confirm the settings, then click "Finish"
189 |
190 |
191 |
--------------------------------------------------------------------------------
/ansible/roles/create_first_rac_node/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # Used to provision the first rac node container
2 | # 'this_rac_node' and 'this_image' variables must be set
3 | # All jinja2 templates are in the common role templates folder
4 |
5 |
6 | ##########################################################################################
7 | ############# Create the first rac node container
8 | ##########################################################################################
9 |
10 | - name: Create rac1 container
11 | # call the start_rac_node.yml common task, the 'this_image' variable is set to the
12 | # rac node image which was created by the 'create_oracle_image' role
13 | # ../../common/tasks/start_rac_node.yml this_rac_node=rac1 this_image=giinstalled
14 | include: ../../common/tasks/start_rac_node.yml this_rac_node="{{ first_rac_node }}" this_image=giinstalled
15 | tags:
16 | - create_rac1_container
17 |
18 |
19 | ##########################################################################################
20 | ############# Configure the grid infrastructure binaries
21 | ##########################################################################################
22 |
23 | - block:
24 | - name: Configure grid infrastructure binaries
25 | # the grid infrastructure binaries have already been installed as part of the image,
26 | # this step configures them
27 | # docker exec rac1 su - grid -c ' /u01/app/12.1.0/grid/crs/config/config.sh \
28 | # -waitforcompletion -ignoreSysPrereqs -silent \
29 | # "INVENTORY_LOCATION=/u01/app/oraInventory" \
30 | # "oracle.install.option=CRS_CONFIG" \
31 | # "ORACLE_BASE=/u01/app/grid" \
32 | # "ORACLE_HOME=/u01/app/12.1.0/grid" \
33 | # "oracle.install.asm.OSDBA=asmdba" \
34 | # "oracle.install.asm.OSOPER=asmoper" \
35 | # "oracle.install.asm.OSASM=asmadmin" \
36 | # "oracle.install.crs.config.gpnp.scanName=clu-121-scan.clu-121.example.com" \
37 | # "oracle.install.crs.config.gpnp.scanPort=1521 " \
38 | # "oracle.install.crs.config.ClusterType=STANDARD" \
39 | # "oracle.install.crs.config.clusterName=clu-121" \
40 | # "oracle.install.crs.config.gpnp.configureGNS=true" \
41 | # "oracle.install.crs.config.autoConfigureClusterNodeVIP=true" \
42 | # "oracle.install.crs.config.gpnp.gnsOption=CREATE_NEW_GNS" \
43 | # "oracle.install.crs.config.gpnp.gnsSubDomain=clu-121.example.com" \
44 | # "oracle.install.crs.config.gpnp.gnsVIPAddress=clu-121-gns.example.com" \
45 | # "oracle.install.crs.config.clusterNodes=rac1:AUTO" \
46 | # "oracle.install.crs.config.networkInterfaceList=eth-pub:10.10.10.0:1,eth-priv:11.11.11.0:2" \
47 | # "oracle.install.crs.config.storageOption=LOCAL_ASM_STORAGE" \
48 | # "oracle.install.crs.config.useIPMI=false" \
49 | # "oracle.install.asm.SYSASMPassword=oracle_4U" \
50 | # "oracle.install.asm.monitorPassword=oracle_4U" \
51 | # "oracle.install.asm.diskGroup.name=DATA" \
52 | # "oracle.install.asm.diskGroup.redundancy=EXTERNAL" \
53 | # "oracle.install.asm.diskGroup.disks=/dev/asmdisks/asm-clu-121-DATA-disk1,/dev/asmdisks/asm-clu-121-DATA-disk2,/dev/asmdisks/asm-clu-121-DATA-disk3" \
54 | # "oracle.install.asm.diskGroup.diskDiscoveryString=/dev/asmdisks/*" \
55 | # "oracle.install.asm.useExistingDiskGroup=false"'
56 | command: >
57 | /usr/bin/docker exec {{ first_rac_node }} su -
58 | {{
59 | operating_system.grid_infrastructure.users |
60 | selectattr('title', 'equalto', 'owner') |
61 | map(attribute='name') | first
62 | }} -c "{{
63 | oracle_binaries |
64 | selectattr('type', 'equalto', 'grid') |
65 | selectattr('version', 'equalto', '12.1.0.2') |
66 | map(attribute='oracle_home') | first
67 | }}/crs/config/config.sh
68 | {{
69 | installation_files |
70 | selectattr('type', 'equalto', 'grid') |
71 | map(attribute='configuration_parameters') |
72 | first | join(' ')
73 | }}"
74 | register: configure_grid_binaries_result
75 | changed_when: (configure_grid_binaries_result.rc == 0 ) or (configure_grid_binaries_result.rc == 6 )
76 | failed_when: (configure_grid_binaries_result.rc != 0) and (configure_grid_binaries_result.rc != 6 )
77 | always:
78 | - name: Print readable previous command output
79 | debug:
80 | var: configure_grid_binaries_result.stdout_lines
81 | tags:
82 | - configure_grid
83 |
84 |
85 | - block:
86 | - name: Modify the root script to show output
87 | # when the binaries are installed silently, the root script does not show output by default,
88 | # this step modifies the root script to show the output
89 | # docker exec rac1 sed -i '/rootmacro.sh/s/$/ -stdout/' /u01/app/12.1.0/grid/root.sh"
90 | command: >
91 | /usr/bin/docker exec {{ first_rac_node }}
92 | sed -i '/rootmacro.sh/s/$/ -stdout/'
93 | {{
94 | oracle_binaries |
95 | selectattr('type', 'equalto', 'grid') |
96 | selectattr('version', 'equalto', '12.1.0.2') |
97 | map(attribute='oracle_home') | first
98 | }}/root.sh
99 |
100 |
101 | - name: Run grid infrastructure root scripts
102 | # docker exec rac1 /u01/app/12.1.0/grid/root.sh
103 | command: >
104 | /usr/bin/docker exec {{ first_rac_node }}
105 | {{
106 | oracle_binaries |
107 | selectattr('type', 'equalto', 'grid') |
108 | selectattr('version', 'equalto', '12.1.0.2') |
109 | map(attribute='oracle_home') | first
110 | }}/root.sh
111 | register: configure_grid_root_result
112 | tags:
113 | - configure_grid_root
114 |
115 |
116 | - block:
117 | - name: Create grid tools config response file
118 | # when installing the grid infrastructure in silent mode, the extra step of running the grid
119 | # tools configuration script is necessary, this step creates the response file for the script
120 | # /srv/docker/rac_nodes/custom_services/tools_config.rsp
121 | # oracle.assistants.asm|S_ASMPASSWORD=
122 | # oracle.assistants.asm|S_ASMMONITORPASSWORD=
123 | # oracle.crs|oracle_install_crs_ConfigureMgmtDB=FALSE
124 | # oracle.crs|oracle_install_crs_MgmtDB_CDB=FALSE
125 | # oracle.crs|oracle_install_crs_MgmtDB_Std=FALSE
126 | lineinfile:
127 | dest: "{{ operating_system.rac_node_directory }}/tools_config.rsp"
128 | state: present
129 | create: yes
130 | line: "{{ item }}"
131 | mode: 0644
132 | become: True
133 | with_items:
134 | - "{{ installation_files | selectattr('type', 'equalto', 'grid') | map(attribute='tools_configuration_parameters') | first | list }}"
135 |
136 |
137 | - name: Configure grid infrastructure tools
138 | # run the tools configuration script with the response file created from the last task
139 | # docker exec rac1 su - grid -c "/u01/app/12.1.0/grid/cfgtoollogs/configToolAllCommands \
140 | # RESPONSE_FILE=/usr/lib/custom_services/tools_config.rsp"
141 | command: >
142 | /usr/bin/docker exec {{ first_rac_node }} su -
143 | {{
144 | operating_system.grid_infrastructure.users |
145 | selectattr('title', 'equalto', 'owner') |
146 | map(attribute='name') | first
147 | }} -c "{{
148 | oracle_binaries |
149 | selectattr('type', 'equalto', 'grid') |
150 | selectattr('version', 'equalto', '12.1.0.2') |
151 | map(attribute='oracle_home') | first
152 | }}/cfgtoollogs/configToolAllCommands
153 | RESPONSE_FILE=/usr/lib/custom_services/tools_config.rsp"
154 | register: configure_grid_tools_result
155 | always:
156 | - name: Print readable previous command output
157 | debug:
158 | var: configure_grid_tools_result.stdout_lines
159 |
160 |
161 | - name: Remove grid tools config response file
162 | # rm -f /srv/docker/rac_nodes/custom_services/tools_config.rsp
163 | file:
164 | path: "{{ operating_system.rac_node_directory }}/tools_config.rsp"
165 | state: absent
166 | become: True
167 | tags:
168 | - configure_grid_tools
169 |
170 |
171 | ##########################################################################################
172 | ############# Enable the database binaries for RAC
173 | ##########################################################################################
174 |
175 | - name: Relink database binaries for RAC
176 | # the database binaries are installed when there is only one node so they are not enabled for RAC,
177 | # this step relinks the 'oracle' executable for RAC
178 | # docker exec rac1 su - oracle -c 'export ORACLE_HOME=/u01/app/oracle/product/12.1.0/dbhome_1 && \
179 | # make -f $ORACLE_HOME/rdbms/lib/ins_rdbms.mk rac_on && \
180 | # make -f $ORACLE_HOME/rdbms/lib/ins_rdbms.mk ioracle'
181 | command: >
182 | /usr/bin/docker exec {{ first_rac_node }} su -
183 | {{
184 | operating_system.database.users |
185 | selectattr('title', 'equalto', 'owner') |
186 | map(attribute='name') | first
187 | }} -c
188 | "export ORACLE_HOME={{
189 | oracle_binaries |
190 | selectattr('type', 'equalto', 'database') |
191 | selectattr('version', 'equalto', '12.1.0.2') |
192 | map(attribute='oracle_home') | first
193 | }}
194 | && make -f $ORACLE_HOME/rdbms/lib/ins_rdbms.mk rac_on
195 | && make -f $ORACLE_HOME/rdbms/lib/ins_rdbms.mk ioracle"
196 | tags:
197 | - relink_for_rac
198 |
--------------------------------------------------------------------------------
/ansible/roles/prepare_host/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # Prepare docker host and create supporting containers (bind, dhcpd, nfs)
2 |
3 |
4 | ##########################################################################################
5 | ############# Prepare ASM block and file devices
6 | ##########################################################################################
7 |
8 | - block:
9 | - name: Create ASM file devices, skips any block devices
10 | file:
11 | state: touch
12 | path: "{{ item.1.path }}"
13 | become: True
14 | with_subelements:
15 | - "{{ asm_disk_groups }}"
16 | - disks
17 | when: item.1.type == "file"
18 | register: asm_disk_files
19 | tags:
20 | - create_asm_files
21 |
22 |
23 | - name: Inflate ASM file devices, only if they have not already been inflated
24 | # dd if=/dev/zero of=/oraclenfs/asm-clu-121-NDATA-disk1 bs=1024k count=2000
25 | # dd if=/dev/zero of=/oraclenfs/asm-clu-121-NDATA-disk2 bs=1024k count=2000
26 | # dd if=/dev/zero of=/oraclenfs/asm-clu-121-NDATA-disk3 bs=1024k count=2000
27 | command: "dd if=/dev/zero of={{ item.dest }} bs=1024k count=2000"
28 | become: True
29 | with_items:
30 | - "{{ asm_disk_files.results }}"
31 | when: ( item.changed == true ) and ( item.state == 'file' ) and ( item.size == 0 )
32 | tags:
33 | - inflate_asm_files
34 | tags:
35 | - asm_files
36 | - asm
37 |
38 |
39 | - name: Set permissions and ownership on ASM file devices
40 | # chown 54421 /oraclenfs/asm*
41 | # chgrp 54422 /oraclenfs/asm*
42 | # chmod g+w /oraclenfs/asm*
43 | file:
44 | state: file
45 | path: "{{ item.1.path }}"
46 | owner: "{{ operating_system.grid_infrastructure.users | selectattr('title', 'equalto', 'owner') | map(attribute='uid') | first }}"
47 | group: "{{ operating_system.grid_infrastructure.groups | selectattr('title', 'equalto', 'osasm') | map(attribute='gid') | first }}"
48 | mode: 0660
49 | become: True
50 | with_subelements:
51 | - "{{ asm_disk_groups }}"
52 | - disks
53 | when: item.1.type == "file"
54 | tags:
55 | - check_asm_files
56 | - asm_files
57 | - asm
58 |
59 |
60 | - name: Check that ASM block devices exist, skips any file devices
61 | stat:
62 | path: "{{ item.1.rawpath }}"
63 | with_subelements:
64 | - "{{ asm_disk_groups }}"
65 | - disks
66 | when: item.1.type == "block"
67 | tags:
68 | - check_asm_blocks
69 | - asm_blocks
70 | - asm
71 |
72 |
73 | ##########################################################################################
74 | ############# Create docker networks
75 | ##########################################################################################
76 |
77 | - name: Create Docker networks
78 | # docker network create --subnet=10.10.10.0/24 pub
79 | # docker network create --subnet=11.11.11.0/24 priv
80 | docker_network:
81 | name: "{{ item.name }}"
82 | appends: True
83 | state: present
84 | ipam_options:
85 | subnet: "{{ item.subnet }}/{{ item.cidr }}"
86 | with_items:
87 | - "{{ docker.networks }}"
88 | tags:
89 | - create_docker_networks
90 |
91 |
92 | ##########################################################################################
93 | ############# Create host configuration directories
94 | ##########################################################################################
95 |
96 | - name: Create DHCPD container configuration directory
97 | # mkdir -p /srv/docker/dhcpd
98 | # chmod 0777 /srv/docker/dhcpd
99 | file:
100 | state: directory
101 | path: "{{ item.config_directory }}"
102 | mode: 0777
103 | become: True
104 | with_items:
105 | - "{{ docker.containers | selectattr('name', 'equalto', 'dhcpd') | list }}"
106 | tags:
107 | - create_dhcpd_config_dir
108 | - create_directory
109 |
110 |
111 | - name: Create rac node containers custom directories
112 | # mkdir -p /srv/docker/rac_nodes/custom_services
113 | # chmod 0777 /srv/docker/rac_nodes/custom_services
114 | file:
115 | state: directory
116 | path: "{{ operating_system.rac_node_directory }}"
117 | mode: 0777
118 | become: True
119 | tags:
120 | - create_rac_nodes_custom_dirs
121 | - create_directory
122 |
123 |
124 | - name: Create scripts directories
125 | # mkdir -p /srv/docker/scripts
126 | # chmod 0777 /srv/docker/scripts
127 | file:
128 | state: directory
129 | path: "{{ operating_system.scripts_directory }}"
130 | mode: 0777
131 | become: True
132 | tags:
133 | - create_scripts_dir
134 | - create_directory
135 |
136 |
137 | - name: Create NFS container configuration directory
138 | # mkdir -p /srv/docker/nfs
139 | # chmod 0777 /srv/docker/nfs
140 | file:
141 | state: directory
142 | path: "{{ item.config_directory }}"
143 | mode: 0777
144 | become: True
145 | with_items:
146 | - "{{ docker.containers | selectattr('name', 'equalto', 'nfs') | list }}"
147 | tags:
148 | - create_nfs_config_dir
149 | - create_directory
150 |
151 |
152 | - name: Create installation file directories
153 | # mkdir -p /oracledata/stage/12.1.0
154 | # chmod 0777 /oracledata/stage/12.1.0
155 | file:
156 | state: directory
157 | path: "/oracledata/stage/{{ item.version }}"
158 | mode: 0777
159 | become: True
160 | with_items:
161 | - "{{ installation_files }}"
162 | tags:
163 | - create_installation_file_dirs
164 | - installation_files
165 |
166 |
167 | ##########################################################################################
168 | ############# Copy container configuration files
169 | ##########################################################################################
170 |
171 | - name: Copy DHCPD container configuration file
172 | # cp dhcpd.conf /srv/docker/dhcpd/
173 | copy:
174 | src: "{{ item.config_file }}"
175 | dest: "{{ item.config_directory }}/"
176 | with_items:
177 | - "{{ docker.containers | selectattr('name', 'equalto', 'dhcpd') | list }}"
178 | tags:
179 | - copy_dhcpd_config_file
180 | - config_files
181 |
182 |
183 | - name: Copy NFS container configuration file
184 | # cp ganesha.conf /srv/docker/nfs/
185 | copy:
186 | src: "{{ item.config_file }}"
187 | dest: "{{ item.config_directory }}/"
188 | with_items:
189 | - "{{ docker.containers | selectattr('name', 'equalto', 'nfs') | list }}"
190 | tags:
191 | - copy_nfs_config_file
192 | - config_files
193 |
194 |
195 | ##########################################################################################
196 | ############# Create supporting containers
197 | ##########################################################################################
198 |
199 | - name: Create BIND container
200 | # docker create --name bind --hostname bind --publish 53:53/tcp --publish 53:53/udp \
201 | # --volume /srv/docker/bind:/data --env WEBMIN_ENABLED=false sethmiller/bind -4
202 | # docker network connect --ip 10.10.10.10 pub bind
203 | # docker start bind
204 | docker_container:
205 | name: bind
206 | hostname: "{{ item.hostname }}"
207 | image: "{{ item.image }}"
208 | state: started
209 | published_ports:
210 | "{{ item.ports }}"
211 | volumes:
212 | "{{ item.volumes }}"
213 | networks:
214 | "{{ item.networks }}"
215 | env:
216 | "{{ item.env }}"
217 | command: "{{ item.command }}"
218 | with_items:
219 | - "{{ docker.containers | selectattr('name', 'equalto', 'bind') | list }}"
220 | tags:
221 | - create_bind_container
222 | - create_container
223 |
224 |
225 | - name: Create DHCPD container
226 | # docker create --name dhcpd --hostname dhcpd --volume /srv/docker/dhcpd:/data \
227 | # --volume /srv/docker/bind/bind/etc:/keys --dns 10.10.10.10 networkboot/dhcpd
228 | # docker network connect --ip 10.10.10.11 pub dhcpd
229 | # docker network connect --ip 11.11.11.11 priv dhcpd
230 | # docker start dhcpd
231 | docker_container:
232 | name: dhcpd
233 | hostname: "{{ item.hostname }}"
234 | image: "{{ item.image }}"
235 | state: started
236 | volumes:
237 | "{{ item.volumes }}"
238 | networks:
239 | "{{ item.networks }}"
240 | dns_servers:
241 | "{{ item.dns }}"
242 | with_items:
243 | - "{{ docker.containers | selectattr('name', 'equalto', 'dhcpd') | list }}"
244 | tags:
245 | - create_dhcpd_container
246 | - create_container
247 |
248 |
249 | - name: Create NFS container
250 | # docker run --detach --privileged --name nfs --hostname nfs --volume /srv/docker/nfs:/etc/ganesha \
251 | # --volume /oraclenfs:/oraclenfs --dns 10.10.10.10 sethmiller/nf
252 | # docker network connect --ip 10.10.10.12 pub nfs
253 | docker_container:
254 | name: nfs
255 | hostname: "{{ item.hostname }}"
256 | image: "{{ item.image }}"
257 | state: started
258 | privileged: True
259 | volumes:
260 | "{{ item.volumes }}"
261 | networks:
262 | "{{ item.networks }}"
263 | dns_servers:
264 | "{{ item.dns }}"
265 | with_items:
266 | - "{{ docker.containers | selectattr('name', 'equalto', 'nfs') | list }}"
267 | tags:
268 | - create_nfs_container
269 | - create_container
270 |
271 |
272 | ##########################################################################################
273 | ############# Prepare Oracle installation files
274 | ##########################################################################################
275 |
276 | - name: Download installation files only if they are not already present
277 | get_url:
278 | url: "{{ item.1.name }}"
279 | dest: "/oracledata/stage/{{ item.1.name | basename }}"
280 | become: True
281 | with_subelements:
282 | - "{{ installation_files }}"
283 | - files
284 | tags:
285 | - download_installation_files
286 | - installation_files
287 |
288 |
289 | - name: Unzip installation files only if the 'creates' file does not already exist
290 | unarchive:
291 | src: "/oracledata/stage/{{ item.1.name | basename }}"
292 | dest: "/oracledata/stage/{{ item.0.version }}/"
293 | creates: "/oracledata/stage/{{ item.0.version }}/{{ item.1.creates }}"
294 | remote_src: True
295 | owner: "{{ operating_system.grid_infrastructure.users | selectattr('title', 'equalto', 'owner') | map(attribute='uid') | first }}"
296 | group: "{{ operating_system.grid_infrastructure.groups | selectattr('title', 'equalto', 'inventory') | map(attribute='gid') | first }}"
297 | become: True
298 | with_subelements:
299 | - "{{ installation_files }}"
300 | - files
301 | tags:
302 | - unzip_installation_files
303 | - installation_files
304 |
--------------------------------------------------------------------------------
/ansible/roles/create_oracle_image/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # Create the docker image that will be used to create the rac node containers
2 |
3 |
4 | ##########################################################################################
5 | ############# Create rac1 container
6 | ##########################################################################################
7 |
8 | - name: Create rac1 container
9 | # Call the start_rac_node.yml common task
10 | # ../../common/tasks/start_rac_node.yml this_rac_node=rac1 this_image=sethmiller/giready
11 | include: >
12 | ../../common/tasks/start_rac_node.yml
13 | this_rac_node="{{ first_rac_node }}"
14 | this_image="{{
15 | docker.containers |
16 | selectattr('name', 'equalto', first_rac_node) |
17 | map(attribute='image') | first
18 | }}"
19 | tags:
20 | - create_rac1_container
21 |
22 |
23 | ##########################################################################################
24 | ############# Install the grid infrastructure and database binaries
25 | ##########################################################################################
26 |
27 | - block:
28 | - name: Install grid infrastructure binaries
29 | # docker exec rac1 su - grid -c '/stage/12.1.0.2/grid/runInstaller -waitforcompletion \
30 | # -ignoreSysPrereqs -silent -force \
31 | # "INVENTORY_LOCATION=/u01/app/oraInventory" \
32 | # "UNIX_GROUP_NAME=oinstall" \
33 | # "ORACLE_HOME=/u01/app/12.1.0/grid" \
34 | # "ORACLE_BASE=/u01/app/grid" \
35 | # "oracle.install.option=CRS_SWONLY" \
36 | # "oracle.install.asm.OSDBA=asmdba" \
37 | # "oracle.install.asm.OSOPER=asmoper" \
38 | # "oracle.install.asm.OSASM=asmadmin"'
39 | command: >
40 | /usr/bin/docker exec {{ first_rac_node }} su -
41 | {{
42 | operating_system.grid_infrastructure.users |
43 | selectattr('title', 'equalto', 'owner') |
44 | map(attribute='name') | first
45 | }} -c "/stage/{{
46 | installation_files |
47 | selectattr('type', 'equalto', 'grid') |
48 | map(attribute='version') | first
49 | }}/grid/runInstaller
50 | {{
51 | installation_files |
52 | selectattr('type', 'equalto', 'grid') |
53 | map(attribute='installer_parameters') |
54 | first | join(' ')
55 | }}"
56 | register: install_grid_binaries_result
57 | changed_when: (install_grid_binaries_result.rc == 0 ) or (install_grid_binaries_result.rc == 6 )
58 | failed_when: (install_grid_binaries_result.rc != 0) and (install_grid_binaries_result.rc != 6 )
59 |
60 |
61 | - name: Run grid infrastructure root scripts
62 | # docker exec rac1 /u01/app/oraInventory/orainstRoot.sh
63 | # docker exec rac1 /u01/app/oraInventory/root.sh
64 | command: "/usr/bin/docker exec {{ first_rac_node }} {{ item }}"
65 | with_items:
66 | - >
67 | {{ oracle_binaries |
68 | selectattr('type', 'equalto', 'all') |
69 | map(attribute='oracle_inventory') | first }}/orainstRoot.sh
70 | - >
71 | {{ oracle_binaries |
72 | selectattr('type', 'equalto', 'grid') |
73 | selectattr('version', 'equalto', '12.1.0.2') |
74 | map(attribute='oracle_home') | first }}/root.sh
75 | always:
76 | - name: Print readable previous command output
77 | debug:
78 | var: install_grid_binaries_result.stdout_lines
79 | tags:
80 | - install_grid
81 |
82 |
83 | - block:
84 | - name: Install database binaries
85 | # docker exec rac1 su - oracle -c '/stage/12.1.0.2/database/runInstaller -waitforcompletion \
86 | # -ignoreSysPrereqs -silent -force \
87 | # "oracle.install.option=INSTALL_DB_SWONLY" \
88 | # "INVENTORY_LOCATION=/u01/app/oraInventory" \
89 | # "UNIX_GROUP_NAME=oinstall" \
90 | # "ORACLE_HOME=/u01/app/oracle/product/12.1.0/dbhome_1" \
91 | # "ORACLE_BASE=/u01/app/oracle" \
92 | # "oracle.install.db.InstallEdition=EE" \
93 | # "oracle.install.db.DBA_GROUP=dba" \
94 | # "oracle.install.db.OPER_GROUP=oper" \
95 | # "oracle.install.db.BACKUPDBA_GROUP=backupdba" \
96 | # "oracle.install.db.DGDBA_GROUP=dgdba" \
97 | # "oracle.install.db.KMDBA_GROUP=kmdba" \
98 | # "DECLINE_SECURITY_UPDATES=true"'
99 | command: >
100 | /usr/bin/docker exec {{ first_rac_node }} su -
101 | {{
102 | operating_system.database.users |
103 | selectattr('title', 'equalto', 'owner') |
104 | map(attribute='name') | first
105 | }} -c "/stage/{{
106 | installation_files |
107 | selectattr('type', 'equalto', 'database') |
108 | map(attribute='version') | first
109 | }}/database/runInstaller
110 | {{
111 | installation_files |
112 | selectattr('type', 'equalto', 'database') |
113 | map(attribute='installer_parameters') |
114 | first | join(' ')
115 | }}"
116 | register: install_database_binaries_result
117 | changed_when: (install_database_binaries_result.rc == 0 ) or (install_database_binaries_result.rc == 6 )
118 | failed_when: (install_database_binaries_result.rc != 0) and (install_database_binaries_result.rc != 6 )
119 |
120 |
121 | - name: Run database installer root scripts
122 | # docker exec rac1 /u01/app/oracle/product/12.1.0/dbhome_1/root.sh
123 | command: "/usr/bin/docker exec {{ first_rac_node }} {{ item }}"
124 | with_items:
125 | - >
126 | {{ oracle_binaries |
127 | selectattr('type', 'equalto', 'database') |
128 | selectattr('version', 'equalto', '12.1.0.2') |
129 | map(attribute='oracle_home') | first }}/root.sh
130 | always:
131 | - name: Print readable previous command output
132 | debug:
133 | var: install_database_binaries_result.stdout_lines
134 | tags:
135 | - install_database
136 |
137 |
138 | ##########################################################################################
139 | ############# Update OPatch
140 | ##########################################################################################
141 |
142 | - name: Update OPatch for grid infrastructure
143 | # docker exec rac1 su - grid -c "cp -a /stage/12.1.0.2/OPatch/* /u01/app/12.1.0/grid/OPatch/"
144 | command: >
145 | /usr/bin/docker exec {{ first_rac_node }} su -
146 | {{
147 | operating_system.grid_infrastructure.users |
148 | selectattr('title', 'equalto', 'owner') |
149 | map(attribute='name') | first
150 | }} -c "cp -a /stage/{{
151 | installation_files |
152 | selectattr('type', 'equalto', 'opatch') |
153 | map(attribute='version') | first
154 | }}/OPatch/*
155 | {{
156 | oracle_binaries |
157 | selectattr('type', 'equalto', 'grid') |
158 | selectattr('version', 'equalto', '12.1.0.2') |
159 | map(attribute='oracle_home') | first
160 | }}/OPatch/"
161 | tags:
162 | - update_opatch_for_grid
163 | - opatch
164 |
165 |
166 | - name: Update OPatch for database
167 | # docker exec rac1 su - oracle -c "cp -a /stage/12.1.0.2/OPatch/* /u01/app/oracle/product/12.1.0/dbhome_1/OPatch/"
168 | command: >
169 | /usr/bin/docker exec {{ first_rac_node }} su -
170 | {{
171 | operating_system.database.users |
172 | selectattr('title', 'equalto', 'owner') |
173 | map(attribute='name') | first
174 | }} -c "cp -a /stage/{{
175 | installation_files |
176 | selectattr('type', 'equalto', 'opatch') |
177 | map(attribute='version') | first
178 | }}/OPatch/*
179 | {{
180 | oracle_binaries |
181 | selectattr('type', 'equalto', 'database') |
182 | selectattr('version', 'equalto', '12.1.0.2') |
183 | map(attribute='oracle_home') | first
184 | }}/OPatch/"
185 | tags:
186 | - update_opatch_for_database
187 | - opatch
188 |
189 |
190 | ##########################################################################################
191 | ############# Apply bundle patch
192 | ##########################################################################################
193 |
194 | - name: Apply bundle patch to grid infrastructure
195 | # docker exec rac1 su - grid -c "/u01/app/12.1.0/grid/OPatch/opatch apply \
196 | # /stage/12.1.0.2/23615334/21436941 -oh /u01/app/12.1.0/grid -silent"
197 | command: >
198 | /usr/bin/docker exec {{ first_rac_node }} su -
199 | {{
200 | operating_system.grid_infrastructure.users |
201 | selectattr('title', 'equalto', 'owner') |
202 | map(attribute='name') | first
203 | }} -c "
204 | {{
205 | oracle_binaries |
206 | selectattr('type', 'equalto', 'grid') |
207 | selectattr('version', 'equalto', '12.1.0.2') |
208 | map(attribute='oracle_home') | first
209 | }}/OPatch/opatch apply /stage/{{
210 | installation_files |
211 | selectattr('type', 'equalto', 'bundle') |
212 | map(attribute='version') | first
213 | }}/{{ item }} -oh
214 | {{
215 | oracle_binaries |
216 | selectattr('type', 'equalto', 'grid') |
217 | selectattr('version', 'equalto', '12.1.0.2') |
218 | map(attribute='oracle_home') | first
219 | }} -silent"
220 | with_items:
221 | - "{{ installation_files | selectattr('type', 'equalto', 'bundle') | map(attribute='patch_numbers') | first }}"
222 | tags:
223 | - apply_bundle_patch_to_grid
224 | - apply_patch
225 |
226 |
227 | - name: Cleanup .opatch_storage in grid infrastructure home
228 | # docker exec rac1 su - grid -c "find /u01/app/12.1.0/grid/.patch_storage -mindepth 1 -type d -exec rm -rf {} +"
229 | command: >
230 | /usr/bin/docker exec {{ first_rac_node }} su -
231 | {{
232 | operating_system.grid_infrastructure.users |
233 | selectattr('title', 'equalto', 'owner') |
234 | map(attribute='name') | first
235 | }} -c "find
236 | {{
237 | oracle_binaries |
238 | selectattr('type', 'equalto', 'grid') |
239 | selectattr('version', 'equalto', '12.1.0.2') |
240 | map(attribute='oracle_home') | first
241 | }}/.patch_storage/ -mindepth 1 -type d -exec rm -rf {} +"
242 | tags:
243 | - cleanup_opatch_storage_for_grid
244 | - apply_patch
245 |
246 |
247 | - name: Apply bundle patch to database
248 | # docker exec rac1 su - oracle -c "/u01/app/oracle/product/12.1.0/dbhome_1/OPatch/opatch apply \
249 | # /stage/12.1.0.2/23615334/21436941 -oh /u01/app/oracle/product/12.1.0/dbhome_1 -silent"
250 | command: >
251 | /usr/bin/docker exec {{ first_rac_node }} su -
252 | {{
253 | operating_system.database.users |
254 | selectattr('title', 'equalto', 'owner') |
255 | map(attribute='name') | first
256 | }} -c "
257 | {{
258 | oracle_binaries |
259 | selectattr('type', 'equalto', 'database') |
260 | selectattr('version', 'equalto', '12.1.0.2') |
261 | map(attribute='oracle_home') | first
262 | }}/OPatch/opatch apply /stage/{{
263 | installation_files |
264 | selectattr('type', 'equalto', 'bundle') |
265 | map(attribute='version') | first
266 | }}/{{ item }} -oh
267 | {{
268 | oracle_binaries |
269 | selectattr('type', 'equalto', 'database') |
270 | selectattr('version', 'equalto', '12.1.0.2') |
271 | map(attribute='oracle_home') | first
272 | }} -silent"
273 | with_items:
274 | - "{{ installation_files | selectattr('type', 'equalto', 'bundle') | map(attribute='patch_numbers') | first }}"
275 | tags:
276 | - apply_bundle_patch_to_database
277 | - apply_patch
278 |
279 |
280 | ##########################################################################################
281 | ############# Apply one-off patches
282 | ##########################################################################################
283 |
284 | - name: Apply one-off patches to database
285 | # docker exec rac1 su - oracle -c "/u01/app/oracle/product/12.1.0/dbhome_1/OPatch/opatch apply \
286 | # /stage/12.1.0.2/19404068 -oh /u01/app/oracle/product/12.1.0/dbhome_1 -silent"
287 | command: >
288 | /usr/bin/docker exec {{ first_rac_node }} su -
289 | {{
290 | operating_system.database.users |
291 | selectattr('title', 'equalto', 'owner') |
292 | map(attribute='name') | first
293 | }} -c "
294 | {{
295 | oracle_binaries |
296 | selectattr('type', 'equalto', 'database') |
297 | selectattr('version', 'equalto', '12.1.0.2') |
298 | map(attribute='oracle_home') | first
299 | }}/OPatch/opatch apply /stage/{{
300 | installation_files |
301 | selectattr('type', 'equalto', 'oneoff') |
302 | map(attribute='version') | first
303 | }}/{{ item }} -oh
304 | {{
305 | oracle_binaries |
306 | selectattr('type', 'equalto', 'database') |
307 | selectattr('version', 'equalto', '12.1.0.2') |
308 | map(attribute='oracle_home') | first
309 | }} -silent"
310 | with_items:
311 | - "{{ installation_files | selectattr('type', 'equalto', 'oneoff') | map(attribute='patch_numbers') | first }}"
312 | tags:
313 | - apply_oneoff_patches_to_database
314 | - apply_patch
315 |
316 |
317 | - name: Cleanup .opatch_storage in database home
318 | # docker exec rac1 su - grid -c "find /u01/app/oracle/product/12.1.0/dbhome_1/.patch_storage -mindepth 1 -type d -exec rm -rf {} +"
319 | command: >
320 | /usr/bin/docker exec {{ first_rac_node }} su -
321 | {{
322 | operating_system.database.users |
323 | selectattr('title', 'equalto', 'owner') |
324 | map(attribute='name') | first
325 | }} -c "find
326 | {{
327 | oracle_binaries |
328 | selectattr('type', 'equalto', 'database') |
329 | selectattr('version', 'equalto', '12.1.0.2') |
330 | map(attribute='oracle_home') | first
331 | }}/.patch_storage/ -mindepth 1 -type d -exec rm -rf {} +"
332 | tags:
333 | - cleanup_opatch_storage_for_database
334 | - apply_patch
335 |
336 |
337 | ##########################################################################################
338 | ############# Commit rac1 container to an image
339 | ##########################################################################################
340 |
341 | - name: Commit rac1 container to an image
342 | # docker commit rac1 giinstalled
343 | command: "/usr/bin/docker commit {{ first_rac_node }} giinstalled"
344 | tags:
345 | - commit_rac1
346 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # 12c-rac-docker
2 | Multiple node Oracle RAC cluster running in Docker containers.
3 |
4 |
5 | ## How to use
6 | This setup uses block devices for the ASM DATA diskgroup which the grid infrastructure requires during installation. The recommendation is to use three disks that are at least 4GB each in size.
7 |
8 | It is important when creating the BIND and DHCPD containers that the BIND container is created first. The reason is that there is a key created as part of the BIND image build that DHCPD will use for dynamic dns updates and the key needs to exist when the DHCPD container is created.
9 |
10 | The passwords for the non-privileged user accounts are all set to `oracle_4U`.
11 |
12 | This project was built using CoreOS. See the [COREOS.md] (https://github.com/Seth-Miller/12c-rac-docker/blob/master/COREOS.md) file for instructions on how to use CoreOS for this project.
13 |
14 | ### Ansible
15 | This project has been automated using Ansible. Instructions for using Ansible can be found in [ANSIBLE.md] (https://github.com/Seth-Miller/12c-rac-docker/blob/master/ANSIBLE.md). If you want to go through the manual process, proceed with this document.
16 |
17 |
18 | ## Oracle installation files
19 | Download the Oracle 12c Grid Infrastructure and Database installation files and unzip them in a directory on the host. The directory will be mounted as a volume in the RAC node containers for installation. The host directory used in this example is `/oracledata/stage/12.1.0.2`. Once unzipped, there should be a `grid` and `database` folder in `/oracledata/stage/12.1.0.2`.
20 |
21 |
22 | ## ASM
23 | Udev is used in the RAC node containers to give the ASM block devices correct permissions and friendly names. ASMLib could also be used but I stopped using that a couple of years ago because it appears that it will go away at some point in favor of ASM Filter Driver (AFD).
24 |
25 | Modify the `99-asm-disks.rules` file to reflect the devices on the host system that you have designated as ASM disks. For example, I have designated /dev/sdd, /dev/sde, and /dev/sdf as the three disks that will be used in my DATA ASM disk group.
26 | ```
27 | KERNEL=="sdd", SYMLINK+="asmdisks/asm-clu-121-DATA-disk1", OWNER="54421", GROUP="54422"
28 | KERNEL=="sde", SYMLINK+="asmdisks/asm-clu-121-DATA-disk2", OWNER="54421", GROUP="54422"
29 | KERNEL=="sdf", SYMLINK+="asmdisks/asm-clu-121-DATA-disk3", OWNER="54421", GROUP="54422"
30 | ```
31 |
32 | NFS is used in the RAC node containers for the NDATA ASM disk group which uses file devices over NFS. The directory on the host OS that will be shared across the RAC node containers is `/oraclenfs`. Create three files on the host OS using `dd`.
33 | ```
34 | sudo dd if=/dev/zero of=/oraclenfs/asm-clu-121-NDATA-disk1 bs=1024k count=2000
35 | sudo dd if=/dev/zero of=/oraclenfs/asm-clu-121-NDATA-disk2 bs=1024k count=2000
36 | sudo dd if=/dev/zero of=/oraclenfs/asm-clu-121-NDATA-disk3 bs=1024k count=2000
37 |
38 | sudo chown 54421 /oraclenfs/asm*
39 | sudo chgrp 54422 /oraclenfs/asm*
40 | sudo chmod g+w /oraclenfs/asm*
41 | ```
42 |
43 |
44 | ## Networks
45 |
46 | The BIND, DHCPD, and RAC containers communicate over a 10.10.10.0/24 network. This is known within the cluster as the public network.
47 |
48 | Create the public virtual network.
49 | ```
50 | docker network create --subnet=10.10.10.0/24 pub
51 | ```
52 |
53 | The 11.11.11.0/24 network is known within the cluster as the private network. This will be used as the cluster interconnect. DHCPD will also serve IP addresses on this network.
54 |
55 | Create the private virtual network.
56 | ```
57 | docker network create --subnet=11.11.11.0/24 priv
58 | ```
59 |
60 |
61 | ## BIND
62 | The BIND container will be used for DNS for the cluster.
63 |
64 | Create the BIND container but don't start it until the networks have been added. Unless you need it, disable the administration GUI `--env WEBMIN_ENABLED=false`. The `-4` option prevents the named/bind process from listening on the IPV6 networks.
65 | ```
66 | docker create \
67 | --name bind \
68 | --hostname bind \
69 | --publish 53:53/tcp \
70 | --publish 53:53/udp \
71 | --volume /srv/docker/bind:/data \
72 | --env WEBMIN_ENABLED=false \
73 | sethmiller/bind \
74 | -4
75 | ```
76 |
77 | Connect the 10.10.10.0/24 network to the BIND container.
78 | ```
79 | docker network connect --ip 10.10.10.10 pub bind
80 | ```
81 |
82 | Start the BIND container.
83 | ```
84 | docker start bind
85 | ```
86 |
87 |
88 | ## DHCPD
89 | The DHCPD container will be used for generating IP addresses needed by the cluster nodes. It is also responsible for updating DNS with hostname IP pairs.
90 |
91 | Create the configuration directory.
92 | ```
93 | sudo mkdir -p /srv/docker/dhcpd
94 | sudo chmod 777 /srv/docker/dhcpd
95 | ```
96 |
97 | Copy the dhcpd.conf file to the configuration directory.
98 | ```
99 | cp dhcpd.conf /srv/docker/dhcpd/
100 | ```
101 |
102 | Create the DHCPD container but don't start it until the networks have been added.
103 | ```
104 | docker create \
105 | --name dhcpd \
106 | --hostname dhcpd \
107 | --volume /srv/docker/dhcpd:/data \
108 | --volume /srv/docker/bind/bind/etc:/keys \
109 | --dns 10.10.10.10 \
110 | networkboot/dhcpd
111 | ```
112 |
113 | Connect the pub and priv docker networks to the DHCPD container.
114 | ```
115 | docker network connect --ip 10.10.10.11 pub dhcpd
116 | docker network connect --ip 11.11.11.11 priv dhcpd
117 | ```
118 |
119 | Start the DHCPD container.
120 | ```
121 | docker start dhcpd
122 | ```
123 |
124 |
125 | ## NFS
126 | The NFS server will share a host OS directory with the RAC node containers over NFS.
127 |
128 | Create the configuration directory.
129 | ```
130 | sudo mkdir -p /srv/docker/nfs
131 | sudo chmod 777 /srv/docker/nfs
132 | ```
133 |
134 | Copy the ganesha.conf file to the configuration directory.
135 | ```
136 | cp ganesha.conf /srv/docker/nfs/
137 | ```
138 |
139 | Create the NFS container.
140 | ```
141 | docker run \
142 | --detach \
143 | --privileged \
144 | --name nfs \
145 | --hostname nfs \
146 | --volume /srv/docker/nfs:/etc/ganesha \
147 | --volume /oraclenfs:/oraclenfs \
148 | --dns 10.10.10.10 \
149 | sethmiller/nfs
150 | ```
151 |
152 | Connect the pub docker network to the NFS container.
153 | ```
154 | docker network connect --ip 10.10.10.12 pub nfs
155 | ```
156 |
157 |
158 | ## RAC Node Image
159 | The RAC node container will be used for the grid infrastructure and database software. This process can be duplicated to create as many nodes as you want in your cluster.
160 |
161 | Create a custom service and a scripts directory.
162 | ```
163 | sudo mkdir -p /srv/docker/rac_nodes/custom_services
164 | sudo mkdir -p /srv/docker/scripts
165 |
166 | sudo chmod 777 /srv/docker/rac_nodes/custom_services
167 | sudo chmod 777 /srv/docker/scripts
168 | ```
169 |
170 | Copy the dhclient and network scripts from the repository to the custom service and scripts directories respectively.
171 | ```
172 | cp dhclient-rac1-eth-pub.service /srv/docker/rac_nodes/custom_services/
173 | cp dhclient-rac1-eth-priv.service /srv/docker/rac_nodes/custom_services/
174 |
175 | cp networks-rac1.sh /srv/docker/scripts/
176 | ```
177 |
178 | Create the RAC node container. The `/srv/docker/rac_nodes/custom_services` directory holds configuration files shared among all of the RAC node containers. The `/oracledata/stage` directory holds the Oracle installation files. The `/sys/fs/cgroup` directory is necessary for systemd to run in the containers. The grid installation will fail without at least 1.5GB of shared memory.
179 | ```
180 | docker run \
181 | --detach \
182 | --privileged \
183 | --name rac1 \
184 | --hostname rac1 \
185 | --volume /srv/docker/rac_nodes/custom_services:/usr/lib/custom_services \
186 | --volume /oracledata/stage:/stage \
187 | --volume /sys/fs/cgroup:/sys/fs/cgroup:ro \
188 | --shm-size 2048m \
189 | --dns 10.10.10.10 \
190 | sethmiller/giready \
191 | /usr/lib/systemd/systemd --system --unit=multi-user.target
192 | ```
193 |
194 | Add the two custom networks to the RAC node container. I initially tried to use the `docker network connect` commands that were used for the DHCPD container but the name of the network adapter must be consistent in all the RAC node container and `docker network connect` does not allow you to specify an adapter name. I used to use a script called pipework but the results were inconsistent so I found the network namespace commands it was using and put them into individual scripts.
195 |
196 | Unlike the native docker network functions, the virtual adapters are not deleted automatically when the container is removed. There can be consequences if you are recreating your RAC containers over and over again without deleting the virtual adapters so the `ip link delete` commands were added to the scripts to delete any previously existing virtual adapters before creating the new ones needed by the RAC node container.
197 | ```
198 | sudo /srv/docker/scripts/networks-rac1.sh
199 | ```
200 |
201 | Copy the udev configuration file from the repository for the ASM disks into the custom services directory.
202 | ```
203 | cp 99-asm-disks.rules /srv/docker/rac_nodes/custom_services/
204 | ```
205 |
206 | Link the udev configuration file to the udev rules.d directory in the RAC node container.
207 | ```
208 | docker exec rac1 ln -s /usr/lib/custom_services/99-asm-disks.rules /etc/udev/rules.d/
209 | ```
210 |
211 | Tell udev to read the new rules configuration.
212 | ```
213 | docker exec rac1 udevadm control --reload-rules
214 | docker exec rac1 udevadm trigger
215 | ```
216 |
217 | Now my ASM disk devices look like this in the RAC node container.
218 | ```
219 | $ docker exec rac1 ls -l /dev/sd[d-f]
220 | brw-rw----. 1 grid asmadmin 8, 48 Oct 17 16:49 /dev/sdd
221 | brw-rw----. 1 grid asmadmin 8, 64 Oct 17 16:49 /dev/sde
222 | brw-rw----. 1 grid asmadmin 8, 80 Oct 17 16:49 /dev/sdf
223 | $ docker exec rac1 ls -ld /dev/asmdisks/
224 | drwxr-xr-x. 2 root root 100 Oct 17 16:49 /dev/asmdisks/
225 | $ docker exec rac1 ls -l /dev/asmdisks/
226 | total 0
227 | lrwxrwxrwx. 1 root root 6 Oct 17 16:49 asm-clu-121-DATA-disk1 -> ../sdd
228 | lrwxrwxrwx. 1 root root 6 Oct 17 16:49 asm-clu-121-DATA-disk2 -> ../sde
229 | lrwxrwxrwx. 1 root root 6 Oct 17 16:49 asm-clu-121-DATA-disk3 -> ../sdf
230 | ```
231 |
232 | Connect to the RAC node container and execute the grid infrastructure installer. This will install the grid software only.
233 |
234 | During the installation, you will see the message `Some of the optional prerequisites are not met`. This is normal and a consequence of running in a container.
235 | ```
236 | docker exec rac1 su - grid -c ' \
237 | /stage/12.1.0.2/grid/runInstaller -waitforcompletion \
238 | -ignoreSysPrereqs -silent -force \
239 | "INVENTORY_LOCATION=/u01/app/oraInventory" \
240 | "UNIX_GROUP_NAME=oinstall" \
241 | "ORACLE_HOME=/u01/app/12.1.0/grid" \
242 | "ORACLE_BASE=/u01/app/grid" \
243 | "oracle.install.option=CRS_SWONLY" \
244 | "oracle.install.asm.OSDBA=asmdba" \
245 | "oracle.install.asm.OSOPER=asmoper" \
246 | "oracle.install.asm.OSASM=asmadmin"'
247 | ```
248 |
249 | Run the two root scripts as root in the RAC node container.
250 | ```
251 | docker exec rac1 /u01/app/oraInventory/orainstRoot.sh
252 | docker exec rac1 /u01/app/12.1.0/grid/root.sh
253 | ```
254 |
255 | Connect to the RAC node container and execute the database installer. This will install the database software only.
256 |
257 | During the installation, you will see the message `Some of the optional prerequisites are not met`. This is normal and a consequence of running in a container.
258 | ```
259 | docker exec rac1 su - oracle -c ' \
260 | /stage/12.1.0.2/database/runInstaller -waitforcompletion \
261 | -ignoreSysPrereqs -silent -force \
262 | "oracle.install.option=INSTALL_DB_SWONLY" \
263 | "INVENTORY_LOCATION=/u01/app/oraInventory" \
264 | "UNIX_GROUP_NAME=oinstall" \
265 | "ORACLE_HOME=/u01/app/oracle/product/12.1.0/dbhome_1" \
266 | "ORACLE_BASE=/u01/app/oracle" \
267 | "oracle.install.db.InstallEdition=EE" \
268 | "oracle.install.db.DBA_GROUP=dba" \
269 | "oracle.install.db.OPER_GROUP=oper" \
270 | "oracle.install.db.BACKUPDBA_GROUP=backupdba" \
271 | "oracle.install.db.DGDBA_GROUP=dgdba" \
272 | "oracle.install.db.KMDBA_GROUP=kmdba" \
273 | "DECLINE_SECURITY_UPDATES=true"'
274 | ```
275 |
276 | Run the root script as root in the RAC node container.
277 | ```
278 | docker exec rac1 /u01/app/oracle/product/12.1.0/dbhome_1/root.sh
279 | ```
280 |
281 | Exit the RAC node container and create a new image which will be used as the base of any additional RAC node containers.
282 | ```
283 | docker commit rac1 giinstalled
284 | ```
285 |
286 | ## First RAC Node Container (rac1)
287 | Create a new RAC node container from the image you just created.
288 | ```
289 | docker rm -f rac1
290 |
291 | docker run \
292 | --detach \
293 | --privileged \
294 | --name rac1 \
295 | --hostname rac1 \
296 | --volume /srv/docker/rac_nodes/custom_services:/usr/lib/custom_services \
297 | --volume /oracledata/stage:/stage \
298 | --volume /sys/fs/cgroup:/sys/fs/cgroup:ro \
299 | --shm-size 2048m \
300 | --dns 10.10.10.10 \
301 | giinstalled \
302 | /usr/lib/systemd/systemd --system --unit=multi-user.target
303 | ```
304 |
305 | Start the networks in the RAC node container as was done previously.
306 | ```
307 | sudo /srv/docker/scripts/networks-rac1.sh
308 | ```
309 |
310 | Configure the installed grid infrastructure.
311 |
312 | During the configuration, you will see the message `Some of the optional prerequisites are not met`. This is normal and a consequence of running in a container.
313 | ```
314 | docker exec rac1 su - grid -c ' \
315 | /u01/app/12.1.0/grid/crs/config/config.sh -waitforcompletion \
316 | -ignoreSysPrereqs -silent \
317 | "INVENTORY_LOCATION=/u01/app/oraInventory" \
318 | "oracle.install.option=CRS_CONFIG" \
319 | "ORACLE_BASE=/u01/app/grid" \
320 | "ORACLE_HOME=/u01/app/12.1.0/grid" \
321 | "oracle.install.asm.OSDBA=asmdba" \
322 | "oracle.install.asm.OSOPER=asmoper" \
323 | "oracle.install.asm.OSASM=asmadmin" \
324 | "oracle.install.crs.config.gpnp.scanName=clu-121-scan.clu-121.example.com" \
325 | "oracle.install.crs.config.gpnp.scanPort=1521 " \
326 | "oracle.install.crs.config.ClusterType=STANDARD" \
327 | "oracle.install.crs.config.clusterName=clu-121" \
328 | "oracle.install.crs.config.gpnp.configureGNS=true" \
329 | "oracle.install.crs.config.autoConfigureClusterNodeVIP=true" \
330 | "oracle.install.crs.config.gpnp.gnsOption=CREATE_NEW_GNS" \
331 | "oracle.install.crs.config.gpnp.gnsSubDomain=clu-121.example.com" \
332 | "oracle.install.crs.config.gpnp.gnsVIPAddress=clu-121-gns.example.com" \
333 | "oracle.install.crs.config.clusterNodes=rac1:AUTO" \
334 | "oracle.install.crs.config.networkInterfaceList=eth-pub:10.10.10.0:1,eth-priv:11.11.11.0:2" \
335 | "oracle.install.crs.config.storageOption=LOCAL_ASM_STORAGE" \
336 | "oracle.install.crs.config.useIPMI=false" \
337 | "oracle.install.asm.SYSASMPassword=oracle_4U" \
338 | "oracle.install.asm.monitorPassword=oracle_4U" \
339 | "oracle.install.asm.diskGroup.name=DATA" \
340 | "oracle.install.asm.diskGroup.redundancy=EXTERNAL" \
341 | "oracle.install.asm.diskGroup.disks=/dev/asmdisks/asm-clu-121-DATA-disk1,/dev/asmdisks/asm-clu-121-DATA-disk2,/dev/asmdisks/asm-clu-121-DATA-disk3" \
342 | "oracle.install.asm.diskGroup.diskDiscoveryString=/dev/asmdisks/*,/oraclenfs/asm*" \
343 | "oracle.install.asm.useExistingDiskGroup=false"'
344 | ```
345 |
346 | Run the root script as the root user.
347 | ```
348 | docker exec rac1 /u01/app/12.1.0/grid/root.sh
349 | ```
350 |
351 | Copy the tools configuration assistant response file from the repository to the custom services directory. Change the passwords in the file if necessary before copying. To save on resources and time, the response file is configured to not install the management database (GIMR). If you want to install the GIMR, remove the last three lines of the response file.
352 | ```
353 | cp tools_config.rsp /srv/docker/rac_nodes/custom_services/
354 | ```
355 |
356 | Run the tools configuration assistant.
357 | ```
358 | docker exec rac1 su - grid -c '/u01/app/12.1.0/grid/cfgtoollogs/configToolAllCommands \
359 | RESPONSE_FILE=/usr/lib/custom_services/tools_config.rsp'
360 | ```
361 |
362 | Delete the tools configuration assistant response file.
363 | ```
364 | rm -f /srv/docker/rac_nodes/custom_services/tools_config.rsp
365 | ```
366 |
367 | Since the cluster was not active when the database binaries were installed, the database installation was not enabled for RAC. This step recompiles the `oracle` executable for RAC.
368 | ```
369 | docker exec rac1 su - oracle -c 'export ORACLE_HOME=/u01/app/oracle/product/12.1.0/dbhome_1 && \
370 | make -f $ORACLE_HOME/rdbms/lib/ins_rdbms.mk rac_on && \
371 | make -f $ORACLE_HOME/rdbms/lib/ins_rdbms.mk ioracle'
372 | ```
373 |
374 |
375 | ## Second RAC Node Container (rac2)
376 | Create a second RAC node container.
377 | ```
378 | docker run \
379 | --detach \
380 | --privileged \
381 | --name rac2 \
382 | --hostname rac2 \
383 | --volume /srv/docker/rac_nodes/custom_services:/usr/lib/custom_services \
384 | --volume /oracledata/stage:/stage \
385 | --volume /sys/fs/cgroup:/sys/fs/cgroup:ro \
386 | --shm-size 2048m \
387 | --dns 10.10.10.10 \
388 | giinstalled \
389 | /usr/lib/systemd/systemd --system --unit=multi-user.target
390 | ```
391 |
392 | Copy the dhclient and network scripts from the repository to the custom service and scripts directories respectively.
393 | ```
394 | cp dhclient-rac2-eth-pub.service /srv/docker/rac_nodes/custom_services/
395 | cp dhclient-rac2-eth-priv.service /srv/docker/rac_nodes/custom_services/
396 |
397 | cp networks-rac2.sh /srv/docker/scripts/
398 | ```
399 |
400 | Start the networks in the RAC node container.
401 | ```
402 | sudo /srv/docker/scripts/networks-rac2.sh
403 | ```
404 |
405 | Configure the grid infrastructure installation to join the existing cluster. Keep in mind that these commands must be executed on a node already part of the cluster (rac1).
406 | ```
407 | docker exec rac1 su - grid -c '/u01/app/12.1.0/grid/addnode/addnode.sh \
408 | "CLUSTER_NEW_NODES={rac2}" "CLUSTER_NEW_VIRTUAL_HOSTNAMES={rac2-vip}" \
409 | -waitforcompletion -silent -ignoreSysPrereqs -force -noCopy'
410 | ```
411 |
412 | Run the root script as the root user.
413 | ```
414 | docker exec rac2 /u01/app/12.1.0/grid/root.sh
415 | ```
416 |
417 | Recompile the `oracle` executable for RAC.
418 | ```
419 | docker exec rac2 su - oracle -c 'export ORACLE_HOME=/u01/app/oracle/product/12.1.0/dbhome_1 && \
420 | make -f $ORACLE_HOME/rdbms/lib/ins_rdbms.mk rac_on && \
421 | make -f $ORACLE_HOME/rdbms/lib/ins_rdbms.mk ioracle'
422 | ```
423 |
424 |
425 | ## Optional Tasks
426 |
427 | Create a database.
428 | ```
429 | docker exec rac1 su - oracle -c ' \
430 | /u01/app/oracle/product/12.1.0/dbhome_1/bin/dbca -createDatabase -silent \
431 | -templateName General_Purpose.dbc \
432 | -gdbName orcl \
433 | -sysPassword oracle_4U \
434 | -systemPassword oracle_4U \
435 | -storageType ASM \
436 | -diskGroupName DATA \
437 | -recoveryGroupName DATA \
438 | -characterSet AL32UTF8 \
439 | -nationalCharacterSet UTF8 \
440 | -totalMemory 1024 \
441 | -emConfiguration none \
442 | -nodelist rac1,rac2 \
443 | -createAsContainerDatabase True'
444 | ```
445 |
446 | Create the NDATA ASM disk group.
447 | ```
448 | cp oraclenfs.mount /srv/docker/rac_nodes/custom_services/
449 |
450 | docker exec rac1 ln -s /usr/lib/custom_services/oraclenfs.mount /etc/systemd/system/
451 | docker exec rac2 ln -s /usr/lib/custom_services/oraclenfs.mount /etc/systemd/system/
452 |
453 | docker exec rac1 systemctl daemon-reload
454 | docker exec rac2 systemctl daemon-reload
455 |
456 | docker exec rac1 systemctl start oraclenfs.mount
457 | docker exec rac2 systemctl start oraclenfs.mount
458 |
459 | docker exec rac1 su - grid -c "ORACLE_SID=+ASM1 /u01/app/12.1.0/grid/bin/asmca \
460 | -silent -createDiskGroup \
461 | -diskGroupName NDATA \
462 | -redundancy EXTERNAL \
463 | -disk '/oraclenfs/asm-clu-121-NDATA-disk1' \
464 | -disk '/oraclenfs/asm-clu-121-NDATA-disk2' \
465 | -disk '/oraclenfs/asm-clu-121-NDATA-disk3'"
466 |
467 | \
468 | \
469 | \
470 | \
471 | '"
472 | ```
473 |
474 | Confirm the clusterware resources are running.
475 | ```
476 | docker exec rac1 /u01/app/12.1.0/grid/bin/crsctl status resource -t
477 | ```
478 |
479 | ***
480 | If the ASM disks have existing headers that you want to clear, use dd to wipe out the headers.
481 | !!!WARNING!!! This will destroy these disks and anything on them. Make sure you are clearing the right disks.
482 | ```
483 | for i in sdd sde sdf; do
484 | sudo dd if=/dev/zero of=/dev/$i bs=100M count=1
485 | done
486 | ```
487 |
--------------------------------------------------------------------------------
/ansible/roles/common/vars/main.yml:
--------------------------------------------------------------------------------
1 | # Main variables file
2 | # Installation file locations are sourced from the files.yml in this same directory
3 |
4 |
5 | # ASM disk groups can be composed of block devices, file devices or both
6 | asm_disk_groups:
7 | - group: DATA
8 | disks:
9 | - rawpath: /dev/sdd
10 | udevpath: /dev/asmdisks/asm-clu-121-DATA-disk1
11 | type: block
12 | - rawpath: /dev/sde
13 | udevpath: /dev/asmdisks/asm-clu-121-DATA-disk2
14 | type: block
15 | - rawpath: /dev/sdf
16 | udevpath: /dev/asmdisks/asm-clu-121-DATA-disk3
17 | type: block
18 | - group: NDATA
19 | disks:
20 | - path: /oraclenfs/asm-clu-121-NDATA-disk1
21 | type: file
22 | - path: /oraclenfs/asm-clu-121-NDATA-disk2
23 | type: file
24 | - path: /oraclenfs/asm-clu-121-NDATA-disk3
25 | type: file
26 |
27 |
28 | # All OS users and groups should already exist
29 | operating_system:
30 | rac_node_directory: /srv/docker/rac_nodes/custom_services
31 | scripts_directory: /srv/docker/scripts
32 |
33 | grid_infrastructure:
34 | users:
35 | - name: grid
36 | title: owner
37 | uid: 54421
38 | groups:
39 | - name: oinstall
40 | title: inventory
41 | gid: 54321
42 | - name: asmdba
43 | title: osdba
44 | gid: 54421
45 | - name: asmadmin
46 | title: osasm
47 | gid: 54422
48 | - name: asmoper
49 | title: osoper
50 | gid: 54423
51 |
52 | database:
53 | users:
54 | - name: oracle
55 | title: owner
56 | uid: 54321
57 | groups:
58 | - name: dba
59 | title: osdba
60 | gid: 54322
61 | - name: oper
62 | title: osoper
63 | gid: 54323
64 | - name: backupdba
65 | title: osbackupdba
66 | gid: 54324
67 | - name: dgdba
68 | title: osdgdba
69 | gid: 54325
70 | - name: kmdba
71 | title: oskmdba
72 | gid: 54326
73 | - name: racdba
74 | title: osracdba
75 | gid: 54327
76 |
77 |
78 | oracle_binaries:
79 | - type: all
80 | oracle_inventory: /u01/app/oraInventory
81 |
82 | - type: grid
83 | version: 12.1.0.2
84 | oracle_base: /u01/app/grid
85 | oracle_home: /u01/app/12.1.0/grid
86 | cluster_name: clu-121
87 | scan_name: clu-121-scan.clu-121.example.com
88 | scan_port: 1521
89 | cluster_type: STANDARD
90 | cluster_storage: LOCAL_ASM_STORAGE
91 | cluster_asm_dg_name: DATA
92 | cluster_asm_dg_redundancy: EXTERNAL
93 | asm_discovery_string: /dev/asmdisks/*,/oraclenfs/asm*
94 | gns_vip: clu-121-gns.example.com # Needs to already exist in DNS
95 | gns_sub_domain: clu-121.example.com # Needs to already exist in DNS
96 | install_management_database: False
97 | asm_passwords:
98 | sysasm: oracle_4U
99 | asmsnmp: oracle_4U
100 |
101 | - type: database
102 | version: 12.1.0.2
103 | oracle_base: /u01/app/oracle
104 | oracle_home: /u01/app/oracle/product/12.1.0/dbhome_1
105 | database_passwords:
106 | sys: oracle_4U
107 | system: oracle_4U
108 |
109 |
110 | # Find 'file_locations' variables in the files.yml in this same directory
111 | installation_files:
112 | - type: opatch
113 | version: 12.1.0.2
114 | files:
115 | - name: "{{ file_locations.file1 }}"
116 | creates: OPatch/opatch
117 |
118 | # Should include all patch subdirectories in bundle patch
119 | # opatch will automatically skip patches that don't apply to the home that is being patched
120 | - type: bundle
121 | version: 12.1.0.2
122 | patch_numbers:
123 | - 23615334/21436941
124 | - 23615334/23177536
125 | - 23615334/23144544
126 | - 23615334/23054341
127 | - 23615334/23520664
128 | files:
129 | - name: "{{ file_locations.file2 }}"
130 | # Unzip each file and pick one of the files or directories it creates
131 | creates: 23615334/23144544/23144544/files/rdbms/lib
132 |
133 | - type: oneoff
134 | version: 12.1.0.2
135 | patch_numbers:
136 | - 19404068
137 | files:
138 | - name: "{{ file_locations.file3 }}"
139 | creates: 19404068/files/rdbms/lib
140 |
141 | - type: grid
142 | version: 12.1.0.2
143 | files:
144 | - name: "{{ file_locations.file4 }}"
145 | creates: grid/stage/Components/oracle.oraolap.dbscripts/12.1.0.2.0
146 | - name: "{{ file_locations.file5 }}"
147 | creates: grid/stage/Components/oracle.has.crs/12.1.0.2.0
148 | installer_parameters:
149 | - -waitforcompletion
150 | - -ignoreSysPrereqs
151 | - -silent
152 | - -force
153 | - oracle.install.option=CRS_SWONLY
154 | - "INVENTORY_LOCATION={{ oracle_binaries |
155 | selectattr('type', 'equalto', 'all') |
156 | map(attribute='oracle_inventory') | first }}"
157 | - "UNIX_GROUP_NAME={{ operating_system.grid_infrastructure.groups |
158 | selectattr('title', 'equalto', 'inventory') |
159 | map(attribute='name') | first }}"
160 | - "ORACLE_HOME={{ oracle_binaries |
161 | selectattr('type', 'equalto', 'grid') |
162 | selectattr('version', 'equalto', '12.1.0.2') |
163 | map(attribute='oracle_home') | first }}"
164 | - "ORACLE_BASE={{ oracle_binaries |
165 | selectattr('type', 'equalto', 'grid') |
166 | selectattr('version', 'equalto', '12.1.0.2') |
167 | map(attribute='oracle_base') | first }}"
168 | - "oracle.install.asm.OSDBA={{ operating_system.grid_infrastructure.groups |
169 | selectattr('title', 'equalto', 'osdba') |
170 | map(attribute='name') | first }}"
171 | - "oracle.install.asm.OSOPER={{ operating_system.grid_infrastructure.groups |
172 | selectattr('title', 'equalto', 'osoper') |
173 | map(attribute='name') | first }}"
174 | - "oracle.install.asm.OSASM={{ operating_system.grid_infrastructure.groups |
175 | selectattr('title', 'equalto', 'osasm') |
176 | map(attribute='name') | first }}"
177 | configuration_parameters:
178 | - -waitforcompletion
179 | - -ignoreSysPrereqs
180 | - -silent
181 | - -force
182 | - oracle.install.option=CRS_CONFIG
183 | - oracle.install.crs.config.gpnp.configureGNS=true
184 | - oracle.install.crs.config.autoConfigureClusterNodeVIP=true
185 | - oracle.install.crs.config.gpnp.gnsOption=CREATE_NEW_GNS
186 | - oracle.install.crs.config.useIPMI=false
187 | - oracle.install.asm.useExistingDiskGroup=false
188 | - "INVENTORY_LOCATION={{ oracle_binaries |
189 | selectattr('type', 'equalto', 'all') |
190 | map(attribute='oracle_inventory') | first }}"
191 | - "UNIX_GROUP_NAME={{ operating_system.grid_infrastructure.groups |
192 | selectattr('title', 'equalto', 'inventory') |
193 | map(attribute='name') | first }}"
194 | - "ORACLE_HOME={{ oracle_binaries |
195 | selectattr('type', 'equalto', 'grid') |
196 | selectattr('version', 'equalto', '12.1.0.2') |
197 | map(attribute='oracle_home') | first }}"
198 | - "ORACLE_BASE={{ oracle_binaries |
199 | selectattr('type', 'equalto', 'grid') |
200 | selectattr('version', 'equalto', '12.1.0.2') |
201 | map(attribute='oracle_base') | first }}"
202 | - "oracle.install.asm.OSDBA={{ operating_system.grid_infrastructure.groups |
203 | selectattr('title', 'equalto', 'osdba') |
204 | map(attribute='name') | first }}"
205 | - "oracle.install.asm.OSOPER={{ operating_system.grid_infrastructure.groups |
206 | selectattr('title', 'equalto', 'osoper') |
207 | map(attribute='name') | first }}"
208 | - "oracle.install.asm.OSASM={{ operating_system.grid_infrastructure.groups |
209 | selectattr('title', 'equalto', 'osasm') |
210 | map(attribute='name') | first }}"
211 | - "oracle.install.crs.config.gpnp.scanName={{ oracle_binaries |
212 | selectattr('type', 'equalto', 'grid') |
213 | selectattr('version', 'equalto', '12.1.0.2') |
214 | map(attribute='scan_name') | first }}"
215 | - "oracle.install.crs.config.gpnp.scanPort={{ oracle_binaries |
216 | selectattr('type', 'equalto', 'grid') |
217 | selectattr('version', 'equalto', '12.1.0.2') |
218 | map(attribute='scan_port') | first }}"
219 | - "oracle.install.crs.config.ClusterType={{ oracle_binaries |
220 | selectattr('type', 'equalto', 'grid') |
221 | selectattr('version', 'equalto', '12.1.0.2') |
222 | map(attribute='cluster_type') | first }}"
223 | - "oracle.install.crs.config.clusterName={{ oracle_binaries |
224 | selectattr('type', 'equalto', 'grid') |
225 | selectattr('version', 'equalto', '12.1.0.2') |
226 | map(attribute='cluster_name') | first }}"
227 | - "oracle.install.crs.config.gpnp.gnsSubDomain={{ oracle_binaries |
228 | selectattr('type', 'equalto', 'grid') |
229 | selectattr('version', 'equalto', '12.1.0.2') |
230 | map(attribute='gns_sub_domain') | first }}"
231 | - "oracle.install.crs.config.gpnp.gnsVIPAddress={{ oracle_binaries |
232 | selectattr('type', 'equalto', 'grid') |
233 | selectattr('version', 'equalto', '12.1.0.2') |
234 | map(attribute='gns_vip') | first }}"
235 | - "oracle.install.crs.config.clusterNodes={{ first_rac_node }}:AUTO"
236 | - "oracle.install.crs.config.networkInterfaceList={{
237 | docker.networks |
238 | selectattr('type', 'equalto', 'public') |
239 | map(attribute='network_name') | first
240 | }}:{{
241 | docker.networks |
242 | selectattr('type', 'equalto', 'public') |
243 | map(attribute='subnet') | first
244 | }}:1,{{
245 | docker.networks |
246 | selectattr('type', 'equalto', 'private') |
247 | map(attribute='network_name') | first
248 | }}:{{
249 | docker.networks |
250 | selectattr('type', 'equalto', 'private') |
251 | map(attribute='subnet') | first
252 | }}:2"
253 | - "oracle.install.crs.config.storageOption={{ oracle_binaries |
254 | selectattr('type', 'equalto', 'grid') |
255 | selectattr('version', 'equalto', '12.1.0.2') |
256 | map(attribute='cluster_storage') | first }}"
257 | - "oracle.install.asm.SYSASMPassword={{ oracle_binaries |
258 | selectattr('type', 'equalto', 'grid') |
259 | selectattr('version', 'equalto', '12.1.0.2') |
260 | map(attribute='asm_passwords.sysasm') | first }}"
261 | - "oracle.install.asm.monitorPassword={{ oracle_binaries |
262 | selectattr('type', 'equalto', 'grid') |
263 | selectattr('version', 'equalto', '12.1.0.2') |
264 | map(attribute='asm_passwords.asmsnmp') | first }}"
265 | - "oracle.install.asm.diskGroup.name={{ oracle_binaries |
266 | selectattr('type', 'equalto', 'grid') |
267 | selectattr('version', 'equalto', '12.1.0.2') |
268 | map(attribute='cluster_asm_dg_name') | first }}"
269 | - "oracle.install.asm.diskGroup.redundancy={{ oracle_binaries |
270 | selectattr('type', 'equalto', 'grid') |
271 | selectattr('version', 'equalto', '12.1.0.2') |
272 | map(attribute='cluster_asm_dg_redundancy') | first }}"
273 | - "oracle.install.asm.diskGroup.disks=
274 | {%- set comma = joiner(',') -%}
275 | {% for disk in asm_disk_groups |
276 | selectattr('group', 'equalto',
277 | oracle_binaries |
278 | selectattr('type', 'equalto', 'grid') |
279 | selectattr('version', 'equalto', '12.1.0.2') |
280 | map(attribute='cluster_asm_dg_name') | first) |
281 | map(attribute='disks') -%}
282 | {% for path in disk | map(attribute='udevpath') -%}
283 | {{ comma() }}{{ path }}
284 | {%- endfor %}
285 | {%- endfor %}"
286 | - "oracle.install.asm.diskGroup.diskDiscoveryString={{ oracle_binaries |
287 | selectattr('type', 'equalto', 'grid') |
288 | selectattr('version', 'equalto', '12.1.0.2') |
289 | map(attribute='asm_discovery_string') | first }}"
290 | tools_configuration_parameters:
291 | - "oracle.assistants.asm|S_ASMPASSWORD={{ oracle_binaries |
292 | selectattr('type', 'equalto', 'grid') |
293 | selectattr('version', 'equalto', '12.1.0.2') |
294 | map(attribute='asm_passwords.sysasm') | first }}"
295 | - "oracle.assistants.asm|S_ASMMONITORPASSWORD={{ oracle_binaries |
296 | selectattr('type', 'equalto', 'grid') |
297 | selectattr('version', 'equalto', '12.1.0.2') |
298 | map(attribute='asm_passwords.asmsnmp') | first }}"
299 | - "oracle.crs|oracle_install_crs_ConfigureMgmtDB={{ oracle_binaries |
300 | selectattr('type', 'equalto', 'grid') |
301 | selectattr('version', 'equalto', '12.1.0.2') |
302 | map(attribute='install_management_database') | first }}"
303 | - "oracle.crs|oracle_install_crs_MgmtDB_CDB={{ oracle_binaries |
304 | selectattr('type', 'equalto', 'grid') |
305 | selectattr('version', 'equalto', '12.1.0.2') |
306 | map(attribute='install_management_database') | first }}"
307 | - "oracle.crs|oracle_install_crs_MgmtDB_Std={{ oracle_binaries |
308 | selectattr('type', 'equalto', 'grid') |
309 | selectattr('version', 'equalto', '12.1.0.2') |
310 | map(attribute='install_management_database') | first }}"
311 |
312 | - type: database
313 | version: 12.1.0.2
314 | files:
315 | - name: "{{ file_locations.file6 }}"
316 | creates: database/stage/Components/oracle.oraolap.dbscripts/12.1.0.2.0
317 | - name: "{{ file_locations.file7 }}"
318 | creates: database/stage/Components/oracle.rdbms/12.1.0.2.0
319 | installer_parameters:
320 | - -waitforcompletion
321 | - -ignoreSysPrereqs
322 | - -silent
323 | - -force
324 | - oracle.install.option=INSTALL_DB_SWONLY
325 | - oracle.install.db.InstallEdition=EE
326 | - DECLINE_SECURITY_UPDATES=true
327 | - "INVENTORY_LOCATION={{ oracle_binaries |
328 | selectattr('type', 'equalto', 'all') |
329 | map(attribute='oracle_inventory') | first }}"
330 | - "UNIX_GROUP_NAME={{ operating_system.grid_infrastructure.groups |
331 | selectattr('title', 'equalto', 'inventory') |
332 | map(attribute='name') | first }}"
333 | - "ORACLE_HOME={{ oracle_binaries |
334 | selectattr('type', 'equalto', 'database') |
335 | selectattr('version', 'equalto', '12.1.0.2') |
336 | map(attribute='oracle_home') | first }}"
337 | - "ORACLE_BASE={{ oracle_binaries |
338 | selectattr('type', 'equalto', 'database') |
339 | selectattr('version', 'equalto', '12.1.0.2') |
340 | map(attribute='oracle_base') | first }}"
341 | - "oracle.install.db.DBA_GROUP={{ operating_system.database.groups |
342 | selectattr('title', 'equalto', 'osdba') |
343 | map(attribute='name') | first }}"
344 | - "oracle.install.db.OPER_GROUP={{ operating_system.database.groups |
345 | selectattr('title', 'equalto', 'osoper') |
346 | map(attribute='name') | first }}"
347 | - "oracle.install.db.BACKUPDBA_GROUP={{ operating_system.database.groups |
348 | selectattr('title', 'equalto', 'osbackupdba') |
349 | map(attribute='name') | first }}"
350 | - "oracle.install.db.DGDBA_GROUP={{ operating_system.database.groups |
351 | selectattr('title', 'equalto', 'osdgdba') |
352 | map(attribute='name') | first }}"
353 | - "oracle.install.db.KMDBA_GROUP={{ operating_system.database.groups |
354 | selectattr('title', 'equalto', 'oskmdba') |
355 | map(attribute='name') | first }}"
356 |
357 | databases:
358 | - version: 12.1.0.2
359 | parameters:
360 | - -createDatabase
361 | - -silent
362 | - -templateName General_Purpose.dbc
363 | - -gdbName orcl
364 | - "-sysPassword {{ oracle_binaries |
365 | selectattr('type', 'equalto', 'database') |
366 | selectattr('version', 'equalto', '12.1.0.2') |
367 | map(attribute='database_passwords.sys') | first }}"
368 | - "-systemPassword {{ oracle_binaries |
369 | selectattr('type', 'equalto', 'database') |
370 | selectattr('version', 'equalto', '12.1.0.2') |
371 | map(attribute='database_passwords.system') | first }}"
372 | - -storageType ASM
373 | - -diskGroupName DATA
374 | - -recoveryGroupName DATA
375 | - -characterSet AL32UTF8
376 | - -nationalCharacterSet UTF8
377 | - -totalMemory 1024
378 | - -emConfiguration none
379 | - -nodelist rac1,rac2
380 | - -createAsContainerDatabase True
381 |
382 | docker:
383 | networks:
384 | - type: public
385 | name: pub
386 | subnet: 10.10.10.0
387 | cidr: 24
388 | network_name: eth-pub
389 | - type: private
390 | name: priv
391 | subnet: 11.11.11.0
392 | cidr: 24
393 | network_name: eth-priv
394 |
395 | containers:
396 | - name: bind
397 | hostname: bind
398 | type: dns
399 | image: sethmiller/bind
400 | ports:
401 | - "53:53/tcp"
402 | - "53:53/udp"
403 | volumes:
404 | - "/srv/docker/bind:/data"
405 | networks:
406 | - name: "pub"
407 | ipv4_address: "10.10.10.10"
408 | command: "-4"
409 | env:
410 | WEBMIN_ENABLED: "false"
411 |
412 | - name: dhcpd
413 | hostname: dhcpd
414 | type: dhcp
415 | image: networkboot/dhcpd
416 | config_directory: /srv/docker/dhcpd
417 | config_file: dhcpd.conf
418 | volumes:
419 | - "/srv/docker/dhcpd:/data"
420 | - "/srv/docker/bind/bind/etc:/keys"
421 | networks:
422 | - name: "pub"
423 | ipv4_address: "10.10.10.11"
424 | - name: "priv"
425 | ipv4_address: "11.11.11.11"
426 | dns: 10.10.10.10
427 |
428 | - name: nfs
429 | hostname: nfs
430 | type: nfs
431 | image: sethmiller/nfs
432 | config_directory: /srv/docker/nfs
433 | config_file: ganesha.conf
434 | volumes:
435 | - "/srv/docker/nfs:/etc/ganesha"
436 | - "/oraclenfs:/oraclenfs"
437 | networks:
438 | - name: "pub"
439 | ipv4_address: "10.10.10.12"
440 | dns: 10.10.10.10
441 |
442 | - name: rac1
443 | hostname: rac1
444 | type: rac_node
445 | designation: first
446 | image: sethmiller/giready
447 | volumes:
448 | - "/srv/docker/rac_nodes/custom_services:/usr/lib/custom_services"
449 | - "/oracledata/stage:/stage"
450 | - "/sys/fs/cgroup:/sys/fs/cgroup:ro"
451 | networks:
452 | - name: pub
453 | dhcp_hostname: rac1
454 | dhclient_pid: /var/run/dhclient-eth-pub.pid
455 | ipv4_address: 0.0.0.0
456 | cidr: 24
457 | internal_network_name: eth-pub
458 | external_network_name: rac1-pub
459 | - name: priv
460 | dhcp_hostname: rac1-priv
461 | dhclient_pid: /var/run/dhclient-eth-priv.pid
462 | ipv4_address: 0.0.0.0
463 | cidr: 24
464 | internal_network_name: eth-priv
465 | external_network_name: rac1-priv
466 | dns: 10.10.10.10
467 | shm_size: 2048m
468 | command: "/usr/lib/systemd/systemd --system --unit=multi-user.target"
469 |
470 | - name: rac2
471 | hostname: rac2
472 | type: rac_node
473 | designation: additional
474 | image: giinstalled
475 | volumes:
476 | - "/srv/docker/rac_nodes/custom_services:/usr/lib/custom_services"
477 | - "/oracledata/stage:/stage"
478 | - "/sys/fs/cgroup:/sys/fs/cgroup:ro"
479 | networks:
480 | - name: pub
481 | dhcp_hostname: rac2
482 | dhclient_pid: /var/run/dhclient-eth-pub.pid
483 | ipv4_address: 0.0.0.0
484 | cidr: 24
485 | internal_network_name: eth-pub
486 | external_network_name: rac2-pub
487 | - name: priv
488 | dhcp_hostname: rac2-priv
489 | dhclient_pid: /var/run/dhclient-eth-priv.pid
490 | ipv4_address: 0.0.0.0
491 | cidr: 24
492 | internal_network_name: eth-priv
493 | external_network_name: rac2-priv
494 | dns: 10.10.10.10
495 | shm_size: 2048m
496 | command: "/usr/lib/systemd/systemd --system --unit=multi-user.target"
497 |
498 |
499 | first_rac_node: >-
500 | {{ docker.containers |
501 | selectattr('designation', 'defined') |
502 | selectattr('type', 'equalto', 'rac_node') |
503 | selectattr('designation', 'equalto', 'first') |
504 | map(attribute='name') | first }}
505 |
--------------------------------------------------------------------------------
/docker.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #
3 | # (c) 2016 Paul Durivage
4 | # Chris Houseknecht
5 | # James Tanner
6 | #
7 | # This file is part of Ansible.
8 | #
9 | # Ansible is free software: you can redistribute it and/or modify
10 | # it under the terms of the GNU General Public License as published by
11 | # the Free Software Foundation, either version 3 of the License, or
12 | # (at your option) any later version.
13 | #
14 | # Ansible is distributed in the hope that it will be useful,
15 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 | # GNU General Public License for more details.
18 | #
19 | # You should have received a copy of the GNU General Public License
20 | # along with Ansible. If not, see .
21 | #
22 |
23 | DOCUMENTATION = '''
24 |
25 | Docker Inventory Script
26 | =======================
27 | The inventory script generates dynamic inventory by making API requests to one or more Docker APIs. It's dynamic
28 | because the inventory is generated at run-time rather than being read from a static file. The script generates the
29 | inventory by connecting to one or many Docker APIs and inspecting the containers it finds at each API. Which APIs the
30 | script contacts can be defined using environment variables or a configuration file.
31 |
32 | Requirements
33 | ------------
34 |
35 | Using the docker modules requires having docker-py
36 | installed on the host running Ansible. To install docker-py:
37 |
38 | pip install docker-py
39 |
40 |
41 | Run for Specific Host
42 | ---------------------
43 | When run for a specific container using the --host option this script returns the following hostvars:
44 |
45 | {
46 | "ansible_ssh_host": "",
47 | "ansible_ssh_port": 0,
48 | "docker_apparmorprofile": "",
49 | "docker_args": [],
50 | "docker_config": {
51 | "AttachStderr": false,
52 | "AttachStdin": false,
53 | "AttachStdout": false,
54 | "Cmd": [
55 | "/hello"
56 | ],
57 | "Domainname": "",
58 | "Entrypoint": null,
59 | "Env": null,
60 | "Hostname": "9f2f80b0a702",
61 | "Image": "hello-world",
62 | "Labels": {},
63 | "OnBuild": null,
64 | "OpenStdin": false,
65 | "StdinOnce": false,
66 | "Tty": false,
67 | "User": "",
68 | "Volumes": null,
69 | "WorkingDir": ""
70 | },
71 | "docker_created": "2016-04-18T02:05:59.659599249Z",
72 | "docker_driver": "aufs",
73 | "docker_execdriver": "native-0.2",
74 | "docker_execids": null,
75 | "docker_graphdriver": {
76 | "Data": null,
77 | "Name": "aufs"
78 | },
79 | "docker_hostconfig": {
80 | "Binds": null,
81 | "BlkioWeight": 0,
82 | "CapAdd": null,
83 | "CapDrop": null,
84 | "CgroupParent": "",
85 | "ConsoleSize": [
86 | 0,
87 | 0
88 | ],
89 | "ContainerIDFile": "",
90 | "CpuPeriod": 0,
91 | "CpuQuota": 0,
92 | "CpuShares": 0,
93 | "CpusetCpus": "",
94 | "CpusetMems": "",
95 | "Devices": null,
96 | "Dns": null,
97 | "DnsOptions": null,
98 | "DnsSearch": null,
99 | "ExtraHosts": null,
100 | "GroupAdd": null,
101 | "IpcMode": "",
102 | "KernelMemory": 0,
103 | "Links": null,
104 | "LogConfig": {
105 | "Config": {},
106 | "Type": "json-file"
107 | },
108 | "LxcConf": null,
109 | "Memory": 0,
110 | "MemoryReservation": 0,
111 | "MemorySwap": 0,
112 | "MemorySwappiness": null,
113 | "NetworkMode": "default",
114 | "OomKillDisable": false,
115 | "PidMode": "host",
116 | "PortBindings": null,
117 | "Privileged": false,
118 | "PublishAllPorts": false,
119 | "ReadonlyRootfs": false,
120 | "RestartPolicy": {
121 | "MaximumRetryCount": 0,
122 | "Name": ""
123 | },
124 | "SecurityOpt": [
125 | "label:disable"
126 | ],
127 | "UTSMode": "",
128 | "Ulimits": null,
129 | "VolumeDriver": "",
130 | "VolumesFrom": null
131 | },
132 | "docker_hostnamepath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/hostname",
133 | "docker_hostspath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/hosts",
134 | "docker_id": "9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14",
135 | "docker_image": "0a6ba66e537a53a5ea94f7c6a99c534c6adb12e3ed09326d4bf3b38f7c3ba4e7",
136 | "docker_logpath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14-json.log",
137 | "docker_mountlabel": "",
138 | "docker_mounts": [],
139 | "docker_name": "/hello-world",
140 | "docker_networksettings": {
141 | "Bridge": "",
142 | "EndpointID": "",
143 | "Gateway": "",
144 | "GlobalIPv6Address": "",
145 | "GlobalIPv6PrefixLen": 0,
146 | "HairpinMode": false,
147 | "IPAddress": "",
148 | "IPPrefixLen": 0,
149 | "IPv6Gateway": "",
150 | "LinkLocalIPv6Address": "",
151 | "LinkLocalIPv6PrefixLen": 0,
152 | "MacAddress": "",
153 | "Networks": {
154 | "bridge": {
155 | "EndpointID": "",
156 | "Gateway": "",
157 | "GlobalIPv6Address": "",
158 | "GlobalIPv6PrefixLen": 0,
159 | "IPAddress": "",
160 | "IPPrefixLen": 0,
161 | "IPv6Gateway": "",
162 | "MacAddress": ""
163 | }
164 | },
165 | "Ports": null,
166 | "SandboxID": "",
167 | "SandboxKey": "",
168 | "SecondaryIPAddresses": null,
169 | "SecondaryIPv6Addresses": null
170 | },
171 | "docker_path": "/hello",
172 | "docker_processlabel": "",
173 | "docker_resolvconfpath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/resolv.conf",
174 | "docker_restartcount": 0,
175 | "docker_short_id": "9f2f80b0a7023",
176 | "docker_state": {
177 | "Dead": false,
178 | "Error": "",
179 | "ExitCode": 0,
180 | "FinishedAt": "2016-04-18T02:06:00.296619369Z",
181 | "OOMKilled": false,
182 | "Paused": false,
183 | "Pid": 0,
184 | "Restarting": false,
185 | "Running": false,
186 | "StartedAt": "2016-04-18T02:06:00.272065041Z",
187 | "Status": "exited"
188 | }
189 | }
190 |
191 | Groups
192 | ------
193 | When run in --list mode (the default), container instances are grouped by:
194 |
195 | - container id
196 | - container name
197 | - container short id
198 | - image_name (image_)
199 | - docker_host
200 | - running
201 | - stopped
202 |
203 |
204 | Configuration:
205 | --------------
206 | You can control the behavior of the inventory script by passing arguments, defining environment variables, or
207 | creating a configuration file named docker.yml (sample provided in ansible/contrib/inventory). The order of precedence
208 | is command line args, then the docker.yml file and finally environment variables.
209 |
210 | Environment variables:
211 | ......................
212 |
213 | To connect to a single Docker API the following variables can be defined in the environment to control the connection
214 | options. These are the same environment variables used by the Docker modules.
215 |
216 | DOCKER_HOST
217 | The URL or Unix socket path used to connect to the Docker API. Defaults to unix://var/run/docker.sock.
218 |
219 | DOCKER_API_VERSION:
220 | The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported
221 | by docker-py.
222 |
223 | DOCKER_TIMEOUT:
224 | The maximum amount of time in seconds to wait on a response fromm the API. Defaults to 60 seconds.
225 |
226 | DOCKER_TLS:
227 | Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server.
228 | Defaults to False.
229 |
230 | DOCKER_TLS_VERIFY:
231 | Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
232 | Default is False
233 |
234 | DOCKER_TLS_HOSTNAME:
235 | When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults
236 | to localhost.
237 |
238 | DOCKER_CERT_PATH:
239 | Path to the directory containing the client certificate, client key and CA certificate.
240 |
241 | DOCKER_SSL_VERSION:
242 | Provide a valid SSL version number. Default value determined by docker-py, which at the time of this writing
243 | was 1.0
244 |
245 | In addition to the connection variables there are a couple variables used to control the execution and output of the
246 | script:
247 |
248 | DOCKER_CONFIG_FILE
249 | Path to the configuration file. Defaults to ./docker.yml.
250 |
251 | DOCKER_PRIVATE_SSH_PORT:
252 | The private port (container port) on which SSH is listening for connections. Defaults to 22.
253 |
254 | DOCKER_DEFAULT_IP:
255 | The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'.
256 |
257 |
258 | Configuration File
259 | ..................
260 |
261 | Using a configuration file provides a means for defining a set of Docker APIs from which to build an inventory.
262 |
263 | The default name of the file is derived from the name of the inventory script. By default the script will look for
264 | basename of the script (i.e. docker) with an extension of '.yml'.
265 |
266 | You can also override the default name of the script by defining DOCKER_CONFIG_FILE in the environment.
267 |
268 | Here's what you can define in docker_inventory.yml:
269 |
270 | defaults
271 | Defines a default connection. Defaults will be taken from this and applied to any values not provided
272 | for a host defined in the hosts list.
273 |
274 | hosts
275 | If you wish to get inventory from more than one Docker host, define a hosts list.
276 |
277 | For the default host and each host in the hosts list define the following attributes:
278 |
279 | host:
280 | description: The URL or Unix socket path used to connect to the Docker API.
281 | required: yes
282 |
283 | tls:
284 | description: Connect using TLS without verifying the authenticity of the Docker host server.
285 | default: false
286 | required: false
287 |
288 | tls_verify:
289 | description: Connect using TLS without verifying the authenticity of the Docker host server.
290 | default: false
291 | required: false
292 |
293 | cert_path:
294 | description: Path to the client's TLS certificate file.
295 | default: null
296 | required: false
297 |
298 | cacert_path:
299 | description: Use a CA certificate when performing server verification by providing the path to a CA certificate file.
300 | default: null
301 | required: false
302 |
303 | key_path:
304 | description: Path to the client's TLS key file.
305 | default: null
306 | required: false
307 |
308 | version:
309 | description: The Docker API version.
310 | required: false
311 | default: will be supplied by the docker-py module.
312 |
313 | timeout:
314 | description: The amount of time in seconds to wait on an API response.
315 | required: false
316 | default: 60
317 |
318 | default_ip:
319 | description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface
320 | '0.0.0.0'.
321 | required: false
322 | default: 127.0.0.1
323 |
324 | private_ssh_port:
325 | description: The port containers use for SSH
326 | required: false
327 | default: 22
328 |
329 | Examples
330 | --------
331 |
332 | # Connect to the Docker API on localhost port 4243 and format the JSON output
333 | DOCKER_HOST=tcp://localhost:4243 ./docker.py --pretty
334 |
335 | # Any container's ssh port exposed on 0.0.0.0 will be mapped to
336 | # another IP address (where Ansible will attempt to connect via SSH)
337 | DOCKER_DEFAULT_IP=1.2.3.4 ./docker.py --pretty
338 |
339 | # Run as input to a playbook:
340 | ansible-playbook -i ~/projects/ansible/contrib/inventory/docker.py docker_inventory_test.yml
341 |
342 | # Simple playbook to invoke with the above example:
343 |
344 | - name: Test docker_inventory
345 | hosts: all
346 | connection: local
347 | gather_facts: no
348 | tasks:
349 | - debug: msg="Container - {{ inventory_hostname }}"
350 |
351 | '''
352 |
353 | import os
354 | import sys
355 | import json
356 | import argparse
357 | import re
358 | import yaml
359 |
360 | from collections import defaultdict
361 | # Manipulation of the path is needed because the docker-py
362 | # module is imported by the name docker, and because this file
363 | # is also named docker
364 | for path in [os.getcwd(), '', os.path.dirname(os.path.abspath(__file__))]:
365 | try:
366 | del sys.path[sys.path.index(path)]
367 | except:
368 | pass
369 |
370 | HAS_DOCKER_PY = True
371 | HAS_DOCKER_ERROR = False
372 |
373 | try:
374 | from docker import Client
375 | from docker.errors import APIError, TLSParameterError
376 | from docker.tls import TLSConfig
377 | from docker.constants import DEFAULT_TIMEOUT_SECONDS, DEFAULT_DOCKER_API_VERSION
378 | except ImportError as exc:
379 | HAS_DOCKER_ERROR = str(exc)
380 | HAS_DOCKER_PY = False
381 |
382 | DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
383 | DEFAULT_TLS = False
384 | DEFAULT_TLS_VERIFY = False
385 | DEFAULT_IP = '127.0.0.1'
386 | DEFAULT_SSH_PORT = '22'
387 |
388 | BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1, True]
389 | BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0, False]
390 |
391 |
392 | DOCKER_ENV_ARGS = dict(
393 | config_file='DOCKER_CONFIG_FILE',
394 | docker_host='DOCKER_HOST',
395 | api_version='DOCKER_API_VERSION',
396 | cert_path='DOCKER_CERT_PATH',
397 | ssl_version='DOCKER_SSL_VERSION',
398 | tls='DOCKER_TLS',
399 | tls_verify='DOCKER_TLS_VERIFY',
400 | timeout='DOCKER_TIMEOUT',
401 | private_ssh_port='DOCKER_DEFAULT_SSH_PORT',
402 | default_ip='DOCKER_DEFAULT_IP',
403 | )
404 |
405 |
406 | def fail(msg):
407 | sys.stderr.write("%s\n" % msg)
408 | sys.exit(1)
409 |
410 |
411 | def log(msg, pretty_print=False):
412 | if pretty_print:
413 | print(json.dumps(msg, sort_keys=True, indent=2))
414 | else:
415 | print(msg + u'\n')
416 |
417 |
418 | class AnsibleDockerClient(Client):
419 | def __init__(self, auth_params, debug):
420 |
421 | self.auth_params = auth_params
422 | self.debug = debug
423 | self._connect_params = self._get_connect_params()
424 |
425 | try:
426 | super(AnsibleDockerClient, self).__init__(**self._connect_params)
427 | except APIError as exc:
428 | self.fail("Docker API error: %s" % exc)
429 | except Exception as exc:
430 | self.fail("Error connecting: %s" % exc)
431 |
432 | def fail(self, msg):
433 | fail(msg)
434 |
435 | def log(self, msg, pretty_print=False):
436 | if self.debug:
437 | log(msg, pretty_print)
438 |
439 | def _get_tls_config(self, **kwargs):
440 | self.log("get_tls_config:")
441 | for key in kwargs:
442 | self.log(" %s: %s" % (key, kwargs[key]))
443 | try:
444 | tls_config = TLSConfig(**kwargs)
445 | return tls_config
446 | except TLSParameterError as exc:
447 | self.fail("TLS config error: %s" % exc)
448 |
449 | def _get_connect_params(self):
450 | auth = self.auth_params
451 |
452 | self.log("auth params:")
453 | for key in auth:
454 | self.log(" %s: %s" % (key, auth[key]))
455 |
456 | if auth['tls'] or auth['tls_verify']:
457 | auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
458 |
459 | if auth['tls'] and auth['cert_path'] and auth['key_path']:
460 | # TLS with certs and no host verification
461 | tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
462 | verify=False,
463 | ssl_version=auth['ssl_version'])
464 | return dict(base_url=auth['docker_host'],
465 | tls=tls_config,
466 | version=auth['api_version'],
467 | timeout=auth['timeout'])
468 |
469 | if auth['tls']:
470 | # TLS with no certs and not host verification
471 | tls_config = self._get_tls_config(verify=False,
472 | ssl_version=auth['ssl_version'])
473 | return dict(base_url=auth['docker_host'],
474 | tls=tls_config,
475 | version=auth['api_version'],
476 | timeout=auth['timeout'])
477 |
478 | if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
479 | # TLS with certs and host verification
480 | if auth['cacert_path']:
481 | tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
482 | ca_cert=auth['cacert_path'],
483 | verify=True,
484 | assert_hostname=auth['tls_hostname'],
485 | ssl_version=auth['ssl_version'])
486 | else:
487 | tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
488 | verify=True,
489 | assert_hostname=auth['tls_hostname'],
490 | ssl_version=auth['ssl_version'])
491 |
492 | return dict(base_url=auth['docker_host'],
493 | tls=tls_config,
494 | version=auth['api_version'],
495 | timeout=auth['timeout'])
496 |
497 | if auth['tls_verify'] and auth['cacert_path']:
498 | # TLS with cacert only
499 | tls_config = self._get_tls_config(ca_cert=auth['cacert_path'],
500 | assert_hostname=auth['tls_hostname'],
501 | verify=True,
502 | ssl_version=auth['ssl_version'])
503 | return dict(base_url=auth['docker_host'],
504 | tls=tls_config,
505 | version=auth['api_version'],
506 | timeout=auth['timeout'])
507 |
508 | if auth['tls_verify']:
509 | # TLS with verify and no certs
510 | tls_config = self._get_tls_config(verify=True,
511 | assert_hostname=auth['tls_hostname'],
512 | ssl_version=auth['ssl_version'])
513 | return dict(base_url=auth['docker_host'],
514 | tls=tls_config,
515 | version=auth['api_version'],
516 | timeout=auth['timeout'])
517 | # No TLS
518 | return dict(base_url=auth['docker_host'],
519 | version=auth['api_version'],
520 | timeout=auth['timeout'])
521 |
522 | def _handle_ssl_error(self, error):
523 | match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
524 | if match:
525 | msg = "You asked for verification that Docker host name matches %s. The actual hostname is %s. " \
526 | "Most likely you need to set DOCKER_TLS_HOSTNAME or pass tls_hostname with a value of %s. " \
527 | "You may also use TLS without verification by setting the tls parameter to true." \
528 | % (self.auth_params['tls_hostname'], match.group(1))
529 | self.fail(msg)
530 | self.fail("SSL Exception: %s" % (error))
531 |
532 |
533 | class EnvArgs(object):
534 | def __init__(self):
535 | self.config_file = None
536 | self.docker_host = None
537 | self.api_version = None
538 | self.cert_path = None
539 | self.ssl_version = None
540 | self.tls = None
541 | self.tls_verify = None
542 | self.tls_hostname = None
543 | self.timeout = None
544 | self.default_ssh_port = None
545 | self.default_ip = None
546 |
547 |
548 | class DockerInventory(object):
549 |
550 | def __init__(self):
551 | self._args = self._parse_cli_args()
552 | self._env_args = self._parse_env_args()
553 | self.groups = defaultdict(list)
554 | self.hostvars = defaultdict(dict)
555 |
556 | def run(self):
557 | config_from_file = self._parse_config_file()
558 | if not config_from_file:
559 | config_from_file = dict()
560 | docker_hosts = self.get_hosts(config_from_file)
561 |
562 | for host in docker_hosts:
563 | client = AnsibleDockerClient(host, self._args.debug)
564 | self.get_inventory(client, host)
565 |
566 | if not self._args.host:
567 | self.groups['docker_hosts'] = [host.get('docker_host') for host in docker_hosts]
568 | self.groups['_meta'] = dict(
569 | hostvars=self.hostvars
570 | )
571 | print(self._json_format_dict(self.groups, pretty_print=self._args.pretty))
572 | else:
573 | print(self._json_format_dict(self.hostvars.get(self._args.host, dict()), pretty_print=self._args.pretty))
574 |
575 | sys.exit(0)
576 |
577 | def get_inventory(self, client, host):
578 |
579 | ssh_port = host.get('default_ssh_port')
580 | default_ip = host.get('default_ip')
581 | hostname = host.get('docker_host')
582 |
583 | try:
584 | containers = client.containers(all=True)
585 | except Exception as exc:
586 | self.fail("Error fetching containers for host %s - %s" % (hostname, str(exc)))
587 |
588 | for container in containers:
589 | id = container.get('Id')
590 | short_id = id[:13]
591 |
592 | try:
593 | name = container.get('Names', list()).pop(0).lstrip('/')
594 | except IndexError:
595 | name = short_id
596 |
597 | if not self._args.host or (self._args.host and self._args.host in [name, id, short_id]):
598 | try:
599 | inspect = client.inspect_container(id)
600 | except Exception as exc:
601 | self.fail("Error inspecting container %s - %s" % (name, str(exc)))
602 |
603 | running = inspect.get('State', dict()).get('Running')
604 |
605 | # Add container to groups
606 | image_name = inspect.get('Config', dict()).get('Image')
607 | if image_name:
608 | self.groups["image_%s" % (image_name)].append(name)
609 |
610 | self.groups[id].append(name)
611 | self.groups[name].append(name)
612 | if short_id not in self.groups.keys():
613 | self.groups[short_id].append(name)
614 | self.groups[hostname].append(name)
615 |
616 | if running is True:
617 | self.groups['running'].append(name)
618 | else:
619 | self.groups['stopped'].append(name)
620 |
621 | # Figure ous ssh IP and Port
622 | try:
623 | # Lookup the public facing port Nat'ed to ssh port.
624 | port = client.port(container, ssh_port)[0]
625 | except (IndexError, AttributeError, TypeError):
626 | port = dict()
627 |
628 | try:
629 | ip = default_ip if port['HostIp'] == '0.0.0.0' else port['HostIp']
630 | except KeyError:
631 | ip = ''
632 |
633 | facts = dict(
634 | ansible_ssh_host=ip,
635 | ansible_ssh_port=port.get('HostPort', int()),
636 | docker_name=name,
637 | docker_short_id=short_id
638 | )
639 |
640 | for key in inspect:
641 | fact_key = self._slugify(key)
642 | facts[fact_key] = inspect.get(key)
643 |
644 | self.hostvars[name].update(facts)
645 |
646 | def _slugify(self, value):
647 | return 'docker_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_'))
648 |
649 | def get_hosts(self, config):
650 | '''
651 | Determine the list of docker hosts we need to talk to.
652 |
653 | :param config: dictionary read from config file. can be empty.
654 | :return: list of connection dictionaries
655 | '''
656 | hosts = list()
657 |
658 | hosts_list = config.get('hosts')
659 | defaults = config.get('defaults', dict())
660 | self.log('defaults:')
661 | self.log(defaults, pretty_print=True)
662 | def_host = defaults.get('host')
663 | def_tls = defaults.get('tls')
664 | def_tls_verify = defaults.get('tls_verify')
665 | def_tls_hostname = defaults.get('tls_hostname')
666 | def_ssl_version = defaults.get('ssl_version')
667 | def_cert_path = defaults.get('cert_path')
668 | def_cacert_path = defaults.get('cacert_path')
669 | def_key_path = defaults.get('key_path')
670 | def_version = defaults.get('version')
671 | def_timeout = defaults.get('timeout')
672 | def_ip = defaults.get('default_ip')
673 | def_ssh_port = defaults.get('private_ssh_port')
674 |
675 | if hosts_list:
676 | # use hosts from config file
677 | for host in hosts_list:
678 | docker_host = host.get('host') or def_host or self._args.docker_host or \
679 | self._env_args.docker_host or DEFAULT_DOCKER_HOST
680 | api_version = host.get('version') or def_version or self._args.api_version or \
681 | self._env_args.api_version or DEFAULT_DOCKER_API_VERSION
682 | tls_hostname = host.get('tls_hostname') or def_tls_hostname or self._args.tls_hostname or \
683 | self._env_args.tls_hostname
684 | tls_verify = host.get('tls_verify') or def_tls_verify or self._args.tls_verify or \
685 | self._env_args.tls_verify or DEFAULT_TLS_VERIFY
686 | tls = host.get('tls') or def_tls or self._args.tls or self._env_args.tls or DEFAULT_TLS
687 | ssl_version = host.get('ssl_version') or def_ssl_version or self._args.ssl_version or \
688 | self._env_args.ssl_version
689 |
690 | cert_path = host.get('cert_path') or def_cert_path or self._args.cert_path or \
691 | self._env_args.cert_path
692 | if cert_path and cert_path == self._env_args.cert_path:
693 | cert_path = os.path.join(cert_path, 'cert.pem')
694 |
695 | cacert_path = host.get('cacert_path') or def_cacert_path or self._args.cacert_path or \
696 | self._env_args.cert_path
697 | if cacert_path and cacert_path == self._env_args.cert_path:
698 | cacert_path = os.path.join(cacert_path, 'ca.pem')
699 |
700 | key_path = host.get('key_path') or def_key_path or self._args.key_path or \
701 | self._env_args.cert_path
702 | if key_path and key_path == self._env_args.cert_path:
703 | key_path = os.path.join(key_path, 'key.pem')
704 |
705 | timeout = host.get('timeout') or def_timeout or self._args.timeout or self._env_args.timeout or \
706 | DEFAULT_TIMEOUT_SECONDS
707 | default_ip = host.get('default_ip') or def_ip or self._args.default_ip_address or \
708 | DEFAULT_IP
709 | default_ssh_port = host.get('private_ssh_port') or def_ssh_port or self._args.private_ssh_port or \
710 | DEFAULT_SSH_PORT
711 | host_dict = dict(
712 | docker_host=docker_host,
713 | api_version=api_version,
714 | tls=tls,
715 | tls_verify=tls_verify,
716 | tls_hostname=tls_hostname,
717 | cert_path=cert_path,
718 | cacert_path=cacert_path,
719 | key_path=key_path,
720 | ssl_version=ssl_version,
721 | timeout=timeout,
722 | default_ip=default_ip,
723 | default_ssh_port=default_ssh_port,
724 | )
725 | hosts.append(host_dict)
726 | else:
727 | # use default definition
728 | docker_host = def_host or self._args.docker_host or self._env_args.docker_host or DEFAULT_DOCKER_HOST
729 | api_version = def_version or self._args.api_version or self._env_args.api_version or \
730 | DEFAULT_DOCKER_API_VERSION
731 | tls_hostname = def_tls_hostname or self._args.tls_hostname or self._env_args.tls_hostname
732 | tls_verify = def_tls_verify or self._args.tls_verify or self._env_args.tls_verify or DEFAULT_TLS_VERIFY
733 | tls = def_tls or self._args.tls or self._env_args.tls or DEFAULT_TLS
734 | ssl_version = def_ssl_version or self._args.ssl_version or self._env_args.ssl_version
735 |
736 | cert_path = def_cert_path or self._args.cert_path or self._env_args.cert_path
737 | if cert_path and cert_path == self._env_args.cert_path:
738 | cert_path = os.path.join(cert_path, 'cert.pem')
739 |
740 | cacert_path = def_cacert_path or self._args.cacert_path or self._env_args.cert_path
741 | if cacert_path and cacert_path == self._env_args.cert_path:
742 | cacert_path = os.path.join(cacert_path, 'ca.pem')
743 |
744 | key_path = def_key_path or self._args.key_path or self._env_args.cert_path
745 | if key_path and key_path == self._env_args.cert_path:
746 | key_path = os.path.join(key_path, 'key.pem')
747 |
748 | timeout = def_timeout or self._args.timeout or self._env_args.timeout or DEFAULT_TIMEOUT_SECONDS
749 | default_ip = def_ip or self._args.default_ip_address or DEFAULT_IP
750 | default_ssh_port = def_ssh_port or self._args.private_ssh_port or DEFAULT_SSH_PORT
751 | host_dict = dict(
752 | docker_host=docker_host,
753 | api_version=api_version,
754 | tls=tls,
755 | tls_verify=tls_verify,
756 | tls_hostname=tls_hostname,
757 | cert_path=cert_path,
758 | cacert_path=cacert_path,
759 | key_path=key_path,
760 | ssl_version=ssl_version,
761 | timeout=timeout,
762 | default_ip=default_ip,
763 | default_ssh_port=default_ssh_port,
764 | )
765 | hosts.append(host_dict)
766 | self.log("hosts: ")
767 | self.log(hosts, pretty_print=True)
768 | return hosts
769 |
770 | def _parse_config_file(self):
771 | config = dict()
772 | config_path = None
773 |
774 | if self._args.config_file:
775 | config_path = self._args.config_file
776 | elif self._env_args.config_file:
777 | config_path = self._env_args.config_file
778 |
779 | if config_path:
780 | try:
781 | config_file = os.path.abspath(config_path)
782 | except:
783 | config_file = None
784 |
785 | if config_file and os.path.exists(config_file):
786 | with open(config_file) as f:
787 | try:
788 | config = yaml.safe_load(f.read())
789 | except Exception as exc:
790 | self.fail("Error: parsing %s - %s" % (config_path, str(exc)))
791 | return config
792 |
793 | def log(self, msg, pretty_print=False):
794 | if self._args.debug:
795 | log(msg, pretty_print)
796 |
797 | def fail(self, msg):
798 | fail(msg)
799 |
800 | def _parse_env_args(self):
801 | args = EnvArgs()
802 | for key, value in DOCKER_ENV_ARGS.items():
803 | if os.environ.get(value):
804 | val = os.environ.get(value)
805 | if val in BOOLEANS_TRUE:
806 | val = True
807 | if val in BOOLEANS_FALSE:
808 | val = False
809 | setattr(args, key, val)
810 | return args
811 |
812 | def _parse_cli_args(self):
813 | # Parse command line arguments
814 |
815 | basename = os.path.splitext(os.path.basename(__file__))[0]
816 | default_config = basename + '.yml'
817 |
818 | parser = argparse.ArgumentParser(
819 | description='Return Ansible inventory for one or more Docker hosts.')
820 | parser.add_argument('--list', action='store_true', default=True,
821 | help='List all containers (default: True)')
822 | parser.add_argument('--debug', action='store_true', default=False,
823 | help='Send debug messages to STDOUT')
824 | parser.add_argument('--host', action='store',
825 | help='Only get information for a specific container.')
826 | parser.add_argument('--pretty', action='store_true', default=False,
827 | help='Pretty print JSON output(default: False)')
828 | parser.add_argument('--config-file', action='store', default=default_config,
829 | help="Name of the config file to use. Default is %s" % (default_config))
830 | parser.add_argument('--docker-host', action='store', default=None,
831 | help="The base url or Unix sock path to connect to the docker daemon. Defaults to %s"
832 | % (DEFAULT_DOCKER_HOST))
833 | parser.add_argument('--tls-hostname', action='store', default='localhost',
834 | help="Host name to expect in TLS certs. Defaults to 'localhost'")
835 | parser.add_argument('--api-version', action='store', default=None,
836 | help="Docker daemon API version. Defaults to %s" % (DEFAULT_DOCKER_API_VERSION))
837 | parser.add_argument('--timeout', action='store', default=None,
838 | help="Docker connection timeout in seconds. Defaults to %s"
839 | % (DEFAULT_TIMEOUT_SECONDS))
840 | parser.add_argument('--cacert-path', action='store', default=None,
841 | help="Path to the TLS certificate authority pem file.")
842 | parser.add_argument('--cert-path', action='store', default=None,
843 | help="Path to the TLS certificate pem file.")
844 | parser.add_argument('--key-path', action='store', default=None,
845 | help="Path to the TLS encryption key pem file.")
846 | parser.add_argument('--ssl-version', action='store', default=None,
847 | help="TLS version number")
848 | parser.add_argument('--tls', action='store_true', default=None,
849 | help="Use TLS. Defaults to %s" % (DEFAULT_TLS))
850 | parser.add_argument('--tls-verify', action='store_true', default=None,
851 | help="Verify TLS certificates. Defaults to %s" % (DEFAULT_TLS_VERIFY))
852 | parser.add_argument('--private-ssh-port', action='store', default=None,
853 | help="Default private container SSH Port. Defaults to %s" % (DEFAULT_SSH_PORT))
854 | parser.add_argument('--default-ip-address', action='store', default=None,
855 | help="Default container SSH IP address. Defaults to %s" % (DEFAULT_IP))
856 | return parser.parse_args()
857 |
858 | def _json_format_dict(self, data, pretty_print=False):
859 | # format inventory data for output
860 | if pretty_print:
861 | return json.dumps(data, sort_keys=True, indent=4)
862 | else:
863 | return json.dumps(data)
864 |
865 |
866 | def main():
867 |
868 | if not HAS_DOCKER_PY:
869 | fail("Failed to import docker-py. Try `pip install docker-py` - %s" % (HAS_DOCKER_ERROR))
870 |
871 | DockerInventory().run()
872 |
873 | main()
874 |
--------------------------------------------------------------------------------