├── LICENSE ├── README.md ├── demos ├── HA │ ├── README.md │ ├── Windows.md │ ├── build-8GB.sh │ ├── build.sh │ ├── cluster-failover.md │ ├── cluster-recovery.md │ └── manual-barclamps.md ├── README.md ├── heat-wordpress │ ├── .gitignore │ ├── README.md │ ├── build.sh │ ├── heat-template-wordpress.json.tmpl │ ├── prep-wordpress-project.sh │ └── suse_techtalk_orchestrating_service_deployment_in_suse_cloud.pdf └── lib │ └── common.sh ├── docs ├── FAQ.md ├── HOWTO.md ├── debugging.md ├── prerequisites.md └── vagrant-libvirt.md ├── kiwi ├── .gitignore ├── README.md ├── build-lib.sh ├── cloud-admin │ ├── README.md │ ├── build-image.sh │ ├── mount-repos.sh │ ├── source │ │ ├── .gitignore │ │ ├── bootsplash.tar │ │ ├── config.sh │ │ ├── config.xml.tmpl │ │ └── root │ │ │ ├── etc │ │ │ ├── YaST2 │ │ │ │ ├── control.xml.live │ │ │ │ ├── firstboot.xml │ │ │ │ ├── firstboot │ │ │ │ │ ├── congratulate.txt │ │ │ │ │ └── welcome.txt │ │ │ │ └── licenses │ │ │ │ │ ├── license-cloud-beta.txt │ │ │ │ │ ├── license-ses.txt │ │ │ │ │ ├── license-sleha.txt │ │ │ │ │ ├── license-sles.txt │ │ │ │ │ └── license.txt │ │ │ ├── hostname │ │ │ ├── hosts │ │ │ ├── issue │ │ │ ├── issue.live │ │ │ ├── issue.vagrant │ │ │ ├── motd │ │ │ ├── profile.d │ │ │ │ └── EULA.sh │ │ │ ├── resolv.conf │ │ │ ├── sudoers.d │ │ │ │ └── vagrant │ │ │ ├── sysconfig │ │ │ │ └── network │ │ │ │ │ ├── dhcp │ │ │ │ │ ├── ifcfg-eth0.dhcp │ │ │ │ │ ├── ifcfg-eth0.static │ │ │ │ │ └── routes │ │ │ └── systemd │ │ │ │ └── system │ │ │ │ └── appliance-firstboot.service │ │ │ ├── home │ │ │ └── vagrant │ │ │ │ └── .ssh │ │ │ │ └── authorized_keys │ │ │ ├── opt │ │ │ └── dell │ │ │ │ └── crowbar_framework │ │ │ │ └── config │ │ │ │ └── repos-cloud.yml │ │ │ ├── patches │ │ │ ├── 0001-Disable-calamari-role-in-Crowbar-for-appliance.patch │ │ │ ├── 0001-crowbar_register-Add-entry-to-etc-hosts-for-resolvin.patch │ │ │ └── apply-patches │ │ │ ├── root │ │ │ ├── DRBD.yaml │ │ │ ├── NFS.yaml │ │ │ ├── bin │ │ │ │ ├── node-sh-vars │ │ │ │ └── setup-node-aliases.sh │ │ │ └── simple-cloud.yaml │ │ │ └── usr │ │ │ ├── bin │ │ │ └── appliance-firstboot │ │ │ ├── lib │ │ │ └── firstboot │ │ │ │ └── wait-for-crowbar-init │ │ │ └── share │ │ │ ├── YaST2 │ │ │ └── clients │ │ │ │ ├── firstboot_license1.ycp │ │ │ │ ├── firstboot_license2.ycp │ │ │ │ ├── firstboot_license3.ycp │ │ │ │ └── firstboot_license4.ycp │ │ │ └── firstboot │ │ │ ├── licenses │ │ │ ├── 1 │ │ │ │ └── license.txt │ │ │ ├── 2 │ │ │ │ └── license.txt │ │ │ ├── 3 │ │ │ │ └── license.txt │ │ │ └── 4 │ │ │ │ └── license.txt │ │ │ └── scripts │ │ │ └── cloud-appliance.sh │ └── umount-repos.sh └── sles12-sp2 │ ├── README.md │ ├── build-image.sh │ └── source │ ├── .gitignore │ ├── bootsplash.tar │ ├── config.sh │ ├── config.xml.tmpl │ └── root │ ├── etc │ ├── YaST2 │ │ └── licenses │ │ │ ├── license-sles.txt │ │ │ └── license.txt │ ├── hostname │ ├── issue │ ├── motd │ ├── profile.d │ │ └── EULA.sh │ ├── sudoers.d │ │ └── vagrant │ ├── sysconfig │ │ └── network │ │ │ ├── dhcp │ │ │ ├── ifcfg-eth0 │ │ │ └── ifcfg-eth1 │ └── systemd │ │ └── system │ │ └── appliance-firstboot.service │ ├── home │ └── vagrant │ │ └── .ssh │ │ └── authorized_keys │ └── usr │ ├── bin │ └── appliance-firstboot │ └── share │ └── firstboot │ └── scripts │ └── cloud-appliance.sh └── vagrant ├── .gitignore ├── Gemfile ├── Gemfile.lock ├── README.md ├── Vagrantfile ├── building-boxes ├── README.md ├── box.make ├── cloud-admin │ ├── .gitignore │ ├── Makefile │ ├── README.md │ ├── Vagrantfile │ ├── box.ovf │ ├── cloud-admin.json │ └── metadata.json ├── generic-box-Vagrantfile ├── pxe │ ├── Makefile │ ├── Vagrantfile │ ├── metadata-libvirt.json │ ├── metadata-virtualbox.json │ ├── pxe-16GB-libvirt.box │ └── pxe-16GB.qcow2 └── sles12-sp2 │ ├── .gitignore │ ├── Makefile │ ├── README.md │ ├── Vagrantfile │ ├── box.ovf │ ├── metadata.json │ └── sles12-sp2.json ├── configs ├── 1-controller-1-compute.yaml ├── 2-controllers-0-compute.yaml ├── 2-controllers-1-compute.yaml └── 2-controllers-2-computes.yaml └── provisioning ├── admin ├── HA-cloud-no-compute.yaml ├── HA-cloud.yaml ├── HA-compute-cloud-demo.yaml ├── HA-compute-cloud.yaml ├── apply-fix-lp#1691831.patch ├── barclamp-network-ignore-eth0.patch ├── increase-SBD-timeout-30s.patch ├── install-suse-cloud.sh ├── network.json ├── prep-admin.sh ├── provision-root-files.sh ├── setup-node-aliases.sh ├── simple-cloud.yaml ├── switch-admin-ip.sh └── switch-vdisks.sh ├── controller ├── provision-root-files.sh ├── start-testvm └── upload-cirros └── non-admin ├── deps-release ├── register-with-suse-cloud ├── store-vagrant-name.sh └── update-motd /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2014 SUSE LINUX Products GmbH, Nuernberg, Germany 2 | Copyright 2014 Florian Haas, Hastexo 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Vagrant resources for SUSE OpenStack Cloud 2 | 3 | This repository contains resources for rapid virtualized deployment of 4 | [SUSE OpenStack Cloud](https://www.suse.com/products/suse-cloud/) via 5 | [Vagrant](http://www.vagrantup.com/). 6 | 7 | ## Contents 8 | 9 | * Resources for [automatically preparing and presenting demos](demos/) 10 | of functionality within SUSE OpenStack Cloud - these essentially 11 | reduce the task of setup to a one-line command. 12 | * A [HOWTO guide](docs/HOWTO.md) documenting how to use Vagrant and 13 | the provided [`Vagrantfile`](vagrant/Vagrantfile) to deploy a SUSE 14 | Cloud environment of (by default) 4 VMs via a single `vagrant up` 15 | command. Use this instead of one of the [demos](demos/) if you 16 | want more manual control over the setup of Crowbar barclamps and 17 | OpenStack. 18 | 19 | ## Support, bugs, development etc. 20 | 21 | If you experience a bug or other issue, or want to check the list 22 | of known issues and other ongoing development, please refer to the 23 | [github issue tracker](https://github.com/SUSE-Cloud/suse-cloud-vagrant/issues/). 24 | 25 | ## History 26 | 27 | These resources were originally built for 28 | [an OpenStack HA workshop session given on 2014/05/15 at the OpenStack summit in Atlanta](http://openstacksummitmay2014atlanta.sched.org/event/d3db2188dfed4459f8fbd03f5b405b81#.U4C6NXWx1Qo). 29 | Video, slides, and other material from that workshop are available 30 | [here](https://github.com/aspiers/openstacksummit2014-atlanta). 31 | 32 | They were further enhanced for 33 | [a further workshop on 2014/11/03 at the OpenStack summit in Paris](https://openstacksummitnovember2014paris.sched.org/event/70cf22bce26516e9d6ae4ae45e966954). 34 | -------------------------------------------------------------------------------- /demos/HA/README.md: -------------------------------------------------------------------------------- 1 | # Automatically deploying a highly available OpenStack cloud 2 | 3 | This demo shows how easy it is to use the Pacemaker barclamp to deploy 4 | an OpenStack cloud which has highly available services. 5 | 6 | ## Prerequisites 7 | 8 | First ensure you have 9 | [the described prerequisites](../../docs/prerequisites.md). 10 | 11 | You'll need at least 16GB RAM on the host for a full HA cloud, 12 | since this demo defaults to using 13 | [`vagrant/configs/2-controllers-1-compute.yaml`](../../vagrant/configs/2-controllers-1-compute.yaml) 14 | in order to determine the number, size, and shape of the VMs 15 | the [`Vagrantfile`](../../vagrant/Vagrantfile) will boot, 16 | and 17 | [`vagrant/provisioning/admin/HA-cloud.yaml`](../../vagrant/provisioning/admin/HA-cloud.yaml) 18 | in order to determine how the Crowbar barclamps are applied. 19 | 20 | However you may be able to get away with 8GB RAM by only setting up 21 | the two clustered controller nodes running OpenStack services, and 22 | skipping the compute node and deployment of Nova and Heat. Obviously 23 | this will prevent you from booting instances in the OpenStack cloud 24 | via Nova, but you should still be able to test the HA functionality. 25 | 26 | So if your host only has 8GB RAM, when following the instructions 27 | below, **either** first copy and paste this into your terminal: 28 | 29 | export VAGRANT_CONFIG_FILE=configs/2-controllers-0-compute.yaml 30 | export PROPOSALS_YAML=/root/HA-cloud-no-compute.yaml 31 | 32 | **or** type `source build-8GB.sh` which does exactly the same but is 33 | easier to type. This will configure `build.sh` below to use the 34 | alternative profile which uses less RAM. 35 | 36 | N.B. The value for `VAGRANT_CONFIG_FILE` should either be an absolute 37 | path, or relative to 38 | [the directory containing `Vagrantfile`](../../vagrant), whereas the 39 | value for `PROPOSALS_YAML` points to a path *inside* the admin server 40 | VM, so should start with `/root/...`. 41 | 42 | Whichever files you use, you can optionally tune the number, size, and 43 | shape of the VMs being booted, by editing whichever file 44 | `$VAGRANT_CONFIG_FILE` points to, and you can tune the barclamp 45 | proposal parameters by editing whichever file `$PROPOSALS_YAML` points 46 | to. 47 | 48 | ## Preparing the demo 49 | 50 | **Read this whole section before running anything!** 51 | 52 | If you are using Windows, please see [this page](Windows.md). 53 | 54 | Then depending on your preferred hypervisor, simply run: 55 | 56 | ./build.sh virtualbox 57 | 58 | or 59 | 60 | ./build.sh kvm 61 | 62 | This will perform the following steps: 63 | 64 | * Use Vagrant to build one Crowbar admin node, two controller nodes, 65 | and a compute node, including an extra DRBD disk on each controller 66 | and a shared SBD disk. 67 | * Run [`/root/bin/setup-node-aliases.sh`](../../vagrant/provisioning/admin/setup-node-aliases.sh) 68 | to set aliases in Crowbar for the controller and compute nodes 69 | * Create and apply a standard set of Crowbar proposals as described 70 | in detail below. 71 | 72 | If you prefer to perform any of these steps manually as part of the 73 | demo (e.g. creating the proposals and/or preparing the cloud for the 74 | demo), you can easily comment those steps out of `build.sh`. 75 | 76 | N.B. All steps run by `./build.sh` are idempotent, so you can safely 77 | run it as many times as you need. 78 | 79 | ## Deployment of a highly available OpenStack cloud 80 | 81 | This section describes how to manually set up the barclamp proposals. 82 | By default `./build.sh` will automatically do this for you, but if you 83 | prefer to do it manually, simply comment out the lines which call 84 | `crowbar batch` near the end of the script, and then follow the 85 | instructions in this page: 86 | 87 | * [Guide to manual application of barclamp proposals for an HA cloud](manual-barclamps.md) 88 | 89 | If you want, you can even mix'n'match the manual and automatic 90 | approaches, by adding `--include` / `--exclude` options to the 91 | invocation of `crowbar batch` filtering which proposals get applied, 92 | and/or by editing 93 | [`/root/HA-cloud.yaml`](../../vagrant/provisioning/admin/HA-cloud.yaml) 94 | on the Crowbar admin node, and commenting out certain proposals. 95 | However, you should be aware that the proposals need to be applied in 96 | the order given, regardless of whether they are applied manually or 97 | automatically. 98 | 99 | ### Watching the cluster being built 100 | 101 | #### Crowbar web UI 102 | 103 | At any time whilst `build.sh` is running and has reached the point 104 | where Crowbar is up and running, you can browse 105 | [the Crowbar web UI](http://192.168.124.10:3000/) (username and 106 | password are both `crowbar` by default) to see the current state of 107 | your cloud infrastructure. 108 | 109 | #### Hawk web UI 110 | 111 | As soon as the Pacemaker barclamp's `cluster1` proposal has been 112 | applied (i.e. showing a green bubble icon in the Crowbar web UI), you 113 | can connect to the [Hawk web UI](http://clusterlabs.org/wiki/Hawk) via: 114 | 115 | * `controller1`: [https://192.168.124.81:7630](https://192.168.124.81:7630) 116 | or [https://localhost:7630](https://localhost:7630) 117 | * `controller2`: [https://192.168.124.82:7630](https://192.168.124.82:7630) 118 | or [https://localhost:7631](https://localhost:7631) 119 | 120 | Then log in with username `hacluster` and password `crowbar`, and 121 | watch as Chef automatically adds new resources to the cluster. 122 | 123 | #### Chef log files 124 | 125 | If you're interested in a more internal glimpse of how Crowbar is 126 | orchestrating Chef behind the scenes to configure resources across the 127 | nodes, 128 | [connect to the `admin` VM](../../docs/HOWTO.md#connecting-to-the-vms) 129 | and then type: 130 | 131 | tail -f /var/log/crowbar/chef-client/*.log 132 | 133 | ## Playing with High Availability 134 | 135 | Please see the following pages: 136 | 137 | * [testing failover](cluster-failover.md) - how to do nasty 138 | things to your OpenStack infrastructure cluster! 139 | * [cluster recovery](cluster-recovery.md) - a quick guide 140 | for how to recover your cluster to a healthy state after 141 | doing nasty things to it :-) 142 | 143 | ## Performing Vagrant operations 144 | 145 | If you want to use `vagrant` to control the VMs, e.g. `vagrant halt` / 146 | `destroy`, then first `cd` to the `vagrant/` subdirectory of the git 147 | repository: 148 | 149 | cd ../../vagrant 150 | 151 | If you are using `libvirt`, you will probably need to prefix `vagrant` 152 | with `bundle exec` every time you run it, e.g.: 153 | 154 | bundle exec vagrant status 155 | bundle exec vagrant halt compute1 156 | 157 | See [the `vagrant-libvirt` page](../../docs/vagrant-libvirt.md) for 158 | more information. 159 | -------------------------------------------------------------------------------- /demos/HA/Windows.md: -------------------------------------------------------------------------------- 1 | # Running the HA demo on Windows 2 | 3 | Firstly make sure you have all 4 | [the prerequisites](../../docs/prerequisites.md). In particular, 5 | ideally you will have installed 6 | [Git for Windows](http://msysgit.github.io/) which provides a 7 | [GNU-like `bash` environment](http://msysgit.github.io/#bash) from 8 | which you can run the standard [`build.sh` script provided](build.sh). 9 | However this page explains how to run the demo both with and without 10 | it installed. 11 | 12 | ## Running via Git Bash 13 | 14 | If you have Git for Windows installed, simply 15 | launch [Git BASH](http://msysgit.github.io/#bash) from the Start menu, 16 | make sure you are in the right directory and then run `build.sh`: 17 | 18 | cd path/to/this/git/repository 19 | cd demos/HA 20 | 21 | and then continue following [the normal README](README.md). 22 | 23 | ## Running without Git Bash 24 | 25 | If you don't have Git for Windows installed, you will have to type 26 | some commands manually, as follows. 27 | 28 | If you have less than 16GB of RAM (but at least 8GB), type this: 29 | 30 | set VAGRANT_CONFIG_FILE=configs/2-controllers-0-compute.yaml 31 | 32 | Make sure you are in the right directory: 33 | 34 | cd path\to\this\git\repository 35 | cd vagrant 36 | 37 | Your shell should now be in the directory which contains 38 | `Vagrantfile`, `configs/` etc. If not then the following will not work! 39 | 40 | vagrant up 41 | 42 | This will take some time to boot all the nodes. Once it's finished, 43 | log into the admin node either via: 44 | 45 | vagrant ssh admin -- -l root 46 | 47 | or simply via the console in the VirtualBox GUI. The password is 48 | `vagrant`. 49 | 50 | Now type the following: 51 | 52 | setup-node-aliases.sh 53 | 54 | If you have 16GB of RAM, type: 55 | 56 | crowbar batch build /root/HA-cloud.yaml 57 | 58 | If you have less (but at least 8GB), instead type: 59 | 60 | crowbar batch build /root/HA-cloud-no-compute.yaml 61 | -------------------------------------------------------------------------------- /demos/HA/build-8GB.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # If you only have 8GB of RAM, source this file before running build.sh: 4 | # 5 | # source ./build-8GB 6 | # ./build.sh virtualbox 7 | 8 | export VAGRANT_CONFIG_FILE=configs/2-controllers-0-compute.yaml 9 | export PROPOSALS_YAML=/root/HA-cloud-no-compute.yaml 10 | -------------------------------------------------------------------------------- /demos/HA/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | : ${VAGRANT_CONFIG_FILE:=configs/2-controllers-1-compute.yaml} 4 | export VAGRANT_CONFIG_FILE 5 | : ${PROPOSALS_YAML:=/root/HA-cloud.yaml} 6 | 7 | here=$(cd `dirname $0` && pwd) 8 | source $here/../lib/common.sh 9 | vagrant_dir=$(cd $here/../../vagrant && pwd) 10 | 11 | usage () { 12 | # Call as: usage [EXITCODE] [USAGE MESSAGE] 13 | exit_code=1 14 | if [[ "$1" == [0-9] ]]; then 15 | exit_code="$1" 16 | shift 17 | fi 18 | if [ -n "$1" ]; then 19 | echo >&2 "$*" 20 | echo 21 | fi 22 | 23 | me=`basename $0` 24 | 25 | cat <&2 26 | Usage: $me [options] HYPERVISOR 27 | Options: 28 | -h, --help Show this help and exit 29 | 30 | Hypervisor can be 'kvm' or 'virtualbox'. 31 | EOF 32 | exit "$exit_code" 33 | } 34 | 35 | parse_args () { 36 | if [ ${#ARGV[@]} != 1 ]; then 37 | usage 38 | fi 39 | 40 | hypervisor="${ARGV[0]}" 41 | } 42 | 43 | main () { 44 | parse_opts "$@" 45 | parse_args 46 | 47 | check_vagrant_config 48 | check_hypervisor 49 | use_bundler_with_kvm 50 | 51 | if ! vagrant up --no-parallel; then 52 | die "vagrant up failed; aborting" 53 | fi 54 | 55 | vagrant_ssh_config 56 | setup_node_aliases 57 | setup_node_sh_vars 58 | switch_to_kvm_if_required 59 | batch_build_proposals "$PROPOSALS_YAML" 60 | 61 | cat <<'EOF' 62 | 63 | Success! 64 | 65 | A highly-available OpenStack cloud has been built. You can now test failover. 66 | EOF 67 | } 68 | 69 | main "$@" 70 | -------------------------------------------------------------------------------- /demos/HA/cluster-failover.md: -------------------------------------------------------------------------------- 1 | # Testing failover of the OpenStack infrastructure cluster 2 | 3 | Firstly please note that this virtualized Vagrant environment is not a 4 | fair reflection of a production cluster! Whilst it should be 5 | sufficiently robust for most testing and demonstration purposes, 6 | there are some limitations. This is particularly true if your host 7 | machine does not have a lot of spare RAM after all VMs are fully up 8 | and running OpenStack services. 9 | 10 | ## Introduction 11 | 12 | * [Connect to the `controller1` and `controller2` VMs](../../docs/HOWTO.md#connecting-to-the-vms) 13 | * On `controller1` and `controller2`, run `crm_mon` command. 14 | * You can also monitor the cluster from the 15 | [Hawk web interface](README.md#hawk-web-ui) running on either 16 | controller (choose the one which you don't plan to kill during 17 | failover testing!) 18 | * On `controller1` run: 19 | 20 | source .openrc 21 | keystone user-list 22 | keystone service-list 23 | neutron net-list 24 | nova list # (N.B. in 8GB environments you won't have nova) 25 | 26 | ## Failover scenarios for services 27 | 28 | * On `controller1` or `controller2` try to kill OpenStack services 29 | using commands like: 30 | 31 | pkill keystone-all 32 | pkill glance-api 33 | pkill nova-api 34 | 35 | * Watch `crm_mon` and/or Hawk to see how all services are kept running 36 | by Pacemaker. 37 | 38 | ## Failover scenarios for nodes 39 | 40 | * Kill the `controller2` VM by powering it off via your hypervisor. 41 | * Watch `crm_mon` and/or Hawk (on `controller1`!) to see how the 42 | active/passive services are failed over by Pacemaker. This 43 | process will take longer in this Vagrant environment than it would 44 | in a production environment, due to timeouts which have been 45 | increased to match the lower performance of this virtualized 46 | setup. 47 | 48 | At this point you should probably read 49 | [how to recover a degraded cluster](cluster-recovery.md). 50 | -------------------------------------------------------------------------------- /demos/HA/cluster-recovery.md: -------------------------------------------------------------------------------- 1 | # Recovering a SUSE OpenStack Cloud controller cluster to a healthy state 2 | 3 | [Abusing a test cluster to see how it handles failures](cluster-failover.md) 4 | is fun :-) However depending on the kind of abuse, you might find that 5 | one of your nodes refuses to rejoin the cluster. 6 | 7 | This is most likely because it was not cleanly shut down - either 8 | because you deliberately killed it, or because it got 9 | [fenced](http://en.wikipedia.org/wiki/Fencing_(computing)), 10 | i.e. automatically killed by the 11 | [STONITH](http://clusterlabs.org/doc/crm_fencing.html) mechanism. 12 | This is the correct behaviour - it is required in order to protect 13 | your cluster against data loss or corruption. 14 | 15 | However, recovering a degraded (but *still functioning*) cluster to 16 | full strength requires some manual intervention. Again, this is 17 | intentional design in order to protect the cluster. 18 | 19 | This document explains how to spot fencing, and what to do if it 20 | happens. 21 | 22 | ## Symptoms of a degraded cluster 23 | 24 | Here are some of the symptoms you may see: 25 | 26 | * A VM rebooted without you asking it to. 27 | * The Crowbar web UI may show a red bubble icon next to 28 | a controller node. 29 | * The Hawk web UI stops responding on one of the controller 30 | nodes. (You should still be able to use the other one.) 31 | * Your `ssh` connection to a controller node freezes. 32 | * OpenStack services will stop responding for a short while. 33 | 34 | ## Recovering from a degraded cluster 35 | 36 | Ensure that the node which got fenced is booted up again. It will not 37 | automatically rejoin the cluster, because we only have a 2-node 38 | cluster, so quorum is impossible therefore we have to defend against 39 | fencing loops. (This is the "Do not start corosync on boot after 40 | fencing" setting in the 41 | [Pacemaker barclamp proposal](http://192.168.124.10:3000/crowbar/pacemaker/1.0/proposals/cluster1).) 42 | 43 | ### Recovering the Pacemaker cluster 44 | 45 | Therefore to tell the node it can safely rejoin the cluster, 46 | [connect to the node](../../docs/HOWTO.md#connecting-to-the-vms) 47 | and type: 48 | 49 | rm /var/spool/corosync/block_automatic_start 50 | 51 | Now you should be able to start the cluster again, e.g. 52 | 53 | service openais start 54 | 55 | ### Recovering Crowbar and Chef 56 | 57 | However, this is not sufficient, because all nodes (including the 58 | Crowbar admin server node) need to be aware that this node is back 59 | online. Whilst Pacemaker takes care of that to a large extent, 60 | Crowbar and Chef still need to do one or two things. So you need 61 | to ensure that the node is (re-)registered with Crowbar: 62 | 63 | service crowbar_join start 64 | 65 | and then trigger a Chef run on the other controller node by connecting 66 | to it and running: 67 | 68 | chef-client 69 | 70 | ## Failcounts 71 | 72 | In some circumstances, some cluster resources may have exceeded their 73 | [maximum failcount](http://clusterlabs.org/doc/en-US/Pacemaker/1.1/html/Pacemaker_Explained/s-failure-migration.html), 74 | in which case they will need manually "cleaning up" before they start 75 | again. This Pacemaker feature is provided in order to prevent broken 76 | resources from "flapping" in an infinite loop. Clean-up of all 77 | stopped resources can be done with a single command: 78 | 79 | crm_resource -o | \ 80 | awk '/\tStopped |Timed Out/ { print $1 }' | \ 81 | xargs -n1 crm resource cleanup 82 | 83 | You can also clean up services individually via the 84 | [Hawk web interface](README.md#hawk-web-ui). 85 | 86 | ### Maintenance mode 87 | 88 | Another complication which can occasionally crop up during recovery is 89 | related to Pacemaker's "maintenance mode". During normal operation, 90 | `chef-client` sometimes needs to place a node into 91 | [maintenance mode](http://crmsh.nongnu.org/crm.8.html#cmdhelp_node_standby) 92 | (e.g. so that it can safely restart a service after a configuration 93 | file has been updated). Rather than risk "flapping" the maintenance 94 | mode status multiple times per run, it keeps the node in standby until 95 | the `chef-client` run finishes. However if the run fails, the node 96 | can be left in maintenance mode, requiring manual remediation. You 97 | can tell if this happens because for all resources on that node, 98 | `crm_mon` etc. will show `(unmanaged)`, and Hawk will display a little 99 | wrench icon. 100 | 101 | You can disable maintenance mode very easily from the node itself: 102 | 103 | crm node ready 104 | 105 | ## Summary 106 | 107 | Currently, cluster recovery can be a bit complicated, especially in 108 | the 2 node case. We have plans to make this process more 109 | user-friendly in the near future! 110 | 111 | ## Debugging 112 | 113 | Please see [this debugging page](../../docs/debugging.md). 114 | -------------------------------------------------------------------------------- /demos/HA/manual-barclamps.md: -------------------------------------------------------------------------------- 1 | ## Manual deployment of a highly available OpenStack cloud 2 | 3 | The [`build.sh`](build.sh) script offered will automatically use the 4 | `crowbar batch build` tool to deploy all the barclamps necessary in 5 | order to build a highly available cloud. However, as described in 6 | [the demo's README](README.md), it is also possible to deploy these 7 | manually from [the Crowbar web interface](http://192.168.124.10:3000), 8 | and this document provides the exact manual steps which achieve the 9 | same effect. 10 | 11 | ### Deploy a Pacemaker cluster 12 | 13 | * Go to tab *Barclamps* → *OpenStack* and click on *Edit* for *Pacemaker* 14 | * Change `proposal_1` to `cluster1` and click *Create* 15 | * Scroll down to *Deployment*, and drag the `controller1` and `controller2` 16 | nodes to both roles (**pacemaker-cluster-member** and **hawk-server**) 17 | * Scroll back up and change the following options: 18 | * *STONITH* section: 19 | * Change *Configuration mode for STONITH* to 20 | **STONITH - Configured with STONITH Block Devices (SBD)** 21 | * Enter `/dev/sdc` for both nodes under **Block devices for node** 22 | (or `/dev/vdc` if you are running on KVM / libvirt) 23 | * *DRBD* section: 24 | * Change *Prepare cluster for DRBD* to `true` 25 | (`controller1` and `controller2` should have free disk to claim) 26 | * *Pacemaker GUI* section: 27 | * Change *Setup non-web GUI (hb_gui)* to `true` 28 | * Click on *Apply* to deploy Pacemaker 29 | 30 | **Hint:** You can follow the deployment by typing 31 | `tail -f /var/log/crowbar/chef_client/*` on the admin node. 32 | 33 | ### Deploy Barclamps / OpenStack / Database 34 | 35 | * Go to tab *Barclamps* → *OpenStack* and click on *Create* for Database 36 | * Under *Deployment*, remove the node that is assigned to the 37 | **database-server** role, and drag `cluster1` to the **database-server** role 38 | * Change the following options: 39 | * *High Availability* section: 40 | * Change *Storage mode* to **DRBD** 41 | * Change *Size to Allocate for DRBD Device* to **1** 42 | * Click on *Apply* to deploy the database 43 | 44 | ### Deploy Barclamps / OpenStack / RabbitMQ 45 | 46 | * Go to tab *Barclamps* → *OpenStack* and click on *Create* for RabbitMQ 47 | * Under *Deployment*, remove the node that is assigned to the 48 | **rabbitmq-server** role, and drag `cluster1` to the 49 | **rabbitmq-server** role 50 | * Change the following options: 51 | * *High Availability* section: 52 | * Change *Storage mode* to **DRBD** 53 | * Change *Size to Allocate for DRBD Device* to **1** 54 | * Click on *Apply* to deploy RabbitMQ 55 | 56 | ### Deploy Barclamps / OpenStack / Keystone 57 | 58 | * Go to tab *Barclamps* → *OpenStack* and click on *Create* for Keystone 59 | * Do not change any option 60 | * Under *Deployment*, remove the node that is assigned to the 61 | **keystone-server** role, and drag `cluster1` to the **keystone-server** role 62 | * Click on *Apply* to deploy Keystone 63 | 64 | ### Deploy Barclamps / OpenStack / Glance 65 | 66 | **N.B.!** To simplify the HA setup of Glance for the workshop, a NFS 67 | export has been automatically setup on the admin node, and mounted on 68 | /var/lib/glance on both controller nodes. Reliable shared storage is 69 | highly recommended for production; also note that alternatives exist 70 | (for instance, using the swift or ceph backends). 71 | 72 | * Go to tab *Barclamps* → *OpenStack* and click on *Create* for Glance 73 | * Do not change any option 74 | * Under *Deployment*, remove the node that is assigned to the **glance-server** role, and drag `cluster1` to the **glance-server** role 75 | * Click on *Apply* to deploy Glance 76 | 77 | ### Deploy Barclamps / OpenStack / Cinder 78 | 79 | **N.B.!** The cinder volumes will be stored on the compute node. The 80 | controller nodes are not used to allow easy testing of failover. On a 81 | real setup, using a SAN to store the volumes would be recommended. 82 | 83 | * Go to tab *Barclamps* → *OpenStack* and click on *Create* for Cinder 84 | * Remove the `default` backend 85 | * Add a new backend with the following options: 86 | * Change *Type of Volume* to **Local File** 87 | * Change *Name for Backend* to **local** 88 | * Under *Deployment*, remove the node that is assigned to the **cinder-controller** role, and drag `cluster1` to the **cinder-controller** role 89 | * Under *Deployment*, remove the node that is assigned to the **cinder-volume** role, and drag **compute1** to the **cinder-volume** role 90 | * Click on *Apply* to deploy Cinder 91 | 92 | ### Deploy Barclamps / OpenStack / Neutron 93 | 94 | * Go to tab *Barclamps* → *OpenStack* and click on *Create* for Neutron 95 | * Do not change any option 96 | * Change the following options: 97 | * Change *Modular Layer 2 mechanism drivers* to **linuxbridge** 98 | * Under *Deployment*, remove the node that is assigned to the **neutron-server** role, and drag `cluster1` to the **neutron-server** role 99 | * remove the node that is assigned to the **neutron-network** role, and drag and drop `cluster1` to **neutron-network** role 100 | * Click on *Apply* to deploy Neutron 101 | 102 | ### Deploy Barclamps / OpenStack / Nova 103 | 104 | * Go to tab *Barclamps* → *OpenStack* and click on *Create* for Nova 105 | * Do not change any option 106 | * Under *Deployment*: 107 | * remove all nodes which are assigned to roles such as **nova-multi-controller** and **nova-multi-compute-xen** 108 | * drag `cluster1` to the **nova-multi-controller** role 109 | * drag **compute1** to the **nova-multi-compute-qemu** role 110 | * Click on *Apply* to deploy Nova 111 | 112 | ### Deploy Barclamps / OpenStack / Horizon 113 | 114 | * Go to tab *Barclamps* → *OpenStack* and click on *Create* for Horizon 115 | * Do not change any option 116 | * Under *Deployment*, remove the node that is assigned to the **nova_dashboard-server** role, and drag `cluster1` to the **nova_dashboard-server** role 117 | * Click on *Apply* to deploy Horizon 118 | 119 | ## Playing with Cloud 120 | 121 | ### Introduction 122 | 123 | * To log into the OpenStack Dashboard (Horizon): 124 | * In the Crowbar web UI, click on *Nodes* 125 | * Click on `controller1` 126 | * Click on the **OpenStack Dashboard (admin)** link 127 | * login: `admin`, password: `crowbar` 128 | * Choose the `Project` tab and for *Current Project* select `openstack` 129 | 130 | ### Upload image 131 | 132 | * Go to Images & Snapshots from Manage Compute section 133 | 134 | * Click on **Create Image** button and provide the following data: 135 | * *Name* - image name 136 | * *Image Source* - Image File 137 | * *Image File* - click on Browse button to choose image file 138 | * Format - QCOW2 - QEMU Emulator 139 | * Minimum Disk GB - 0 (no minimum) 140 | * Minimum RAM MB - 0 (no minimum) 141 | * Public - check this option 142 | * Protected - leave unchecked 143 | * Click on **Create Image** button to proceed and upload image 144 | 145 | ### Launching VM instance 146 | 147 | * Go to Instances from Manage Compute section 148 | * Click on Launch Instance button 149 | * In the *Details* tab set up 150 | * Availability Zone - nova 151 | * Instance Name - name of new VM 152 | * Flavor - `m1.tiny` 153 | * Instance Count - 1 154 | * Instance Boot Source - Boot from image 155 | * Image Name - choose uploaded image file 156 | * in Networking tab set up 157 | * drag and drop fixed network from Available Networks to Selected Networks 158 | * click on Launch button and wait until new VM instance will be ready 159 | -------------------------------------------------------------------------------- /demos/README.md: -------------------------------------------------------------------------------- 1 | # Pre-canned demos of SUSE OpenStack Cloud functionality 2 | 3 | This subdirectory contains resources for automatically 4 | preparing and presenting demos of SUSE OpenStack Cloud functionality 5 | from within a Vagrant environment. 6 | 7 | ## Demos 8 | 9 | Currently there are two demos available: 10 | 11 | * [Automatically deploying a Wordpress stack on OpenStack](heat-wordpress/) using 12 | [OpenStack Orchestration (a.k.a. Heat)](https://wiki.openstack.org/wiki/Heat) 13 | * [Automatically deploying highly available OpenStack services](HA/) using 14 | the Pacemaker barclamp 15 | 16 | You are warmly encouraged to 17 | [send pull requests](https://help.github.com/articles/using-pull-requests/) 18 | adding more demos! You can use the existing ones as examples of how 19 | to build new ones. 20 | -------------------------------------------------------------------------------- /demos/heat-wordpress/.gitignore: -------------------------------------------------------------------------------- 1 | MySQL*.qcow2 2 | Wordpress*.qcow2 3 | heat-template-wordpress.json 4 | suseuser.pem 5 | -------------------------------------------------------------------------------- /demos/heat-wordpress/README.md: -------------------------------------------------------------------------------- 1 | # Deploying Wordpress on OpenStack via Heat 2 | 3 | This demo shows how easy it is to use 4 | [OpenStack Orchestration (a.k.a. Heat)](https://wiki.openstack.org/wiki/Heat) 5 | to deploy a Wordpress blogging application stack on OpenStack. 6 | 7 | It uses a Heat template file which installs two instances: one running 8 | a WordPress deployment, and the other using a local MySQL database to 9 | store the data. 10 | 11 | The demo is based on 12 | [a SUSE TechTalk by Rick Ashford](suse_techtalk_orchestrating_service_deployment_in_suse_cloud.pdf); 13 | see the slides for more information. 14 | 15 | ## Preparing the demo 16 | 17 | First ensure you have 18 | [the described prerequisites](../../docs/prerequisites.md). 19 | 20 | Next, read this whole section before running anything :-) 21 | 22 | Then depending on your preferred hypervisor, simply run: 23 | 24 | ./build.sh virtualbox 25 | 26 | or 27 | 28 | ./build.sh kvm 29 | 30 | This will perform the following steps: 31 | 32 | * Build an admin node, a controller node, and a compute node via Vagrant 33 | * Run [`/root/bin/setup-node-aliases.sh`](../../vagrant/provisioning/admin/setup-node-aliases.sh) 34 | to set aliases in Crowbar for the controller and compute nodes 35 | * Create and apply a standard set of Crowbar proposals 36 | * Download the Wordpress and MySQL VM appliances 37 | * Prepare the OpenStack cloud for the demo: 38 | * Create a new `Wordpress` project 39 | * Create a new `suseuser` with password `suseuser` 40 | * Associate the user with the project 41 | * Create `MySQLSecGroup` and `WWWSecGroup` security groups 42 | and populate with the necessary firewall rules. 43 | * Upload the VM appliance images to Glance. 44 | * Generate a key pair for the `suseuser` 45 | * Subtitute the correct ids into the heat template 46 | * Copy the `suseuser.pem` private key and the generated 47 | `heat-template-wordpress.json` back into this directory 48 | so that they can be used from the demo host. 49 | 50 | You can also optionally tune the number, size, and shape of the VMs 51 | being booted, by editing 52 | [`vagrant/configs/1-controller-1-compute.yaml`](../../vagrant/configs/1-controller-1-compute.yaml), 53 | and tune the barclamp proposal parameters by editing 54 | [`vagrant/provisioning/admin/HA-cloud.yaml`](../../vagrant/provisioning/admin/HA-cloud.yaml). 55 | 56 | N.B. All steps run by `./build.sh` are idempotent, so you can safely 57 | run it as many times as you need. 58 | 59 | If you prefer to perform any of these steps manually as part of the 60 | demo (e.g. creating the proposals and/or preparing the cloud for the 61 | demo), you can easily comment those steps out of `build.sh` or 62 | `prep-wordpress-project.sh`. 63 | 64 | For example, you could comment out the lines which call 65 | `crowbar batch` near the end of the script, and then apply 66 | the proposals manually. 67 | 68 | If you want, you can even mix'n'match the manual and automatic 69 | approaches, by adding `--include` / `--exclude` options to the 70 | invocation of `crowbar batch` filtering which proposals get applied, 71 | and/or by editing 72 | [`/root/simple-cloud.yaml`](../../vagrant/provisioning/admin/simple-cloud.yaml) 73 | on the Crowbar admin node, and commenting out certain proposals. 74 | However, you should be aware that the proposals need to be applied in 75 | the order given, regardless of whether they are applied manually or 76 | automatically. 77 | 78 | ## Crowbar web UI 79 | 80 | At any time whilst `build.sh` is running and has reached the point 81 | where Crowbar is up and running, you can browse 82 | [the Crowbar web UI](http://192.168.124.10:3000/) (username and 83 | password are both `crowbar` by default) to see the current state of 84 | your cloud infrastructure. 85 | 86 | ## Showing the demo 87 | 88 | **FIXME - need some more detail here** 89 | 90 | * Log in to the OpenStack dashboard as `suseuser` 91 | * Click on *Orchestration* 92 | * Launch a new stack 93 | * Upload the `.json` file 94 | * Give the name `Wordpress` to the new stack 95 | * Give a password `suseuser` for lifecycle operations 96 | * Click the launch button 97 | * Watch the stack boot up in the various views 98 | * Log in to the Wordpress web UI and go through the installation 99 | * Use the IP of the MySQL instance on the newly created private network 100 | * The database name, user, and password are all `wordpress` 101 | 102 | ## Performing Vagrant operations 103 | 104 | If you want to use `vagrant` to control the VMs, e.g. `vagrant halt` / 105 | `destroy`, then first `cd` to the `vagrant/` subdirectory of the git 106 | repository: 107 | 108 | cd ../../vagrant 109 | 110 | If you are using `libvirt`, you will probably need to prefix `vagrant` 111 | with `bundle exec` every time you run it, e.g.: 112 | 113 | bundle exec vagrant status 114 | bundle exec vagrant halt compute1 115 | 116 | See [the `vagrant-libvirt` page](../../docs/vagrant-libvirt.md) for 117 | more information. 118 | -------------------------------------------------------------------------------- /demos/heat-wordpress/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | : ${VAGRANT_CONFIG_FILE:=configs/1-controller-1-compute.yaml} 4 | export VAGRANT_CONFIG_FILE 5 | : ${PROPOSALS_YAML:=/root/simple-cloud.yaml} 6 | 7 | here=$(cd `dirname $0` && pwd) 8 | source $here/../lib/common.sh 9 | vagrant_dir=$(cd $here/../../vagrant && pwd) 10 | 11 | usage () { 12 | # Call as: usage [EXITCODE] [USAGE MESSAGE] 13 | exit_code=1 14 | if [[ "$1" == [0-9] ]]; then 15 | exit_code="$1" 16 | shift 17 | fi 18 | if [ -n "$1" ]; then 19 | echo >&2 "$*" 20 | echo 21 | fi 22 | 23 | me=`basename $0` 24 | 25 | cat <&2 26 | Usage: $me [options] HYPERVISOR 27 | Options: 28 | -h, --help Show this help and exit 29 | 30 | Hypervisor can be 'kvm' or 'virtualbox'. 31 | EOF 32 | exit "$exit_code" 33 | } 34 | 35 | parse_args () { 36 | if [ ${#ARGV[@]} != 1 ]; then 37 | usage 38 | fi 39 | 40 | hypervisor="${ARGV[0]}" 41 | } 42 | 43 | main () { 44 | parse_opts "$@" 45 | parse_args 46 | 47 | check_vagrant_config 48 | check_hypervisor 49 | use_bundler_with_kvm 50 | 51 | if ! vagrant up --no-parallel; then 52 | die "vagrant up failed; aborting" 53 | fi 54 | 55 | vagrant_ssh_config 56 | setup_node_aliases 57 | setup_node_sh_vars 58 | switch_to_kvm_if_required 59 | batch_build_proposals "$PROPOSALS_YAML" 60 | 61 | cd $here 62 | echo "Downloading appliances to $here ..." 63 | wget -c https://www.dropbox.com/s/7334ic3d86aypsq/MySQL.x86_64-0.0.3.qcow2 64 | wget -c https://www.dropbox.com/s/xtsk9lbcqludu72/Wordpress.x86_64-0.0.8.qcow2 65 | 66 | echo "Copying appliances to controller1 ..." 67 | vrsync -avLP \ 68 | *.qcow2 prep-wordpress-project.sh \ 69 | heat-template-wordpress.json.tmpl \ 70 | controller1: 71 | 72 | echo "Preparing Wordpress project in OpenStack cloud ..." 73 | if ! vssh controller1 sudo ./prep-wordpress-project.sh; then 74 | die "Failed to prepare Wordpress project in OpenStack; aborting" 75 | fi 76 | 77 | for f in heat-template-wordpress.json suseuser.pem; do 78 | if ! vscp controller1:$f .; then 79 | die "Failed to scp $f back to host; aborting" 80 | fi 81 | done 82 | 83 | cat <<'EOF' 84 | 85 | Success! 86 | 87 | The Wordpress project has been prepared. You can now create and 88 | launch a new Wordpress stack using the heat-template-wordpress.json 89 | file in this directory. 90 | EOF 91 | } 92 | 93 | main "$@" 94 | -------------------------------------------------------------------------------- /demos/heat-wordpress/heat-template-wordpress.json.tmpl: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion" : "2010-09-09", 3 | 4 | "Description" : "AWS CloudFormation Sample Template WordPress_Multi_Instance: WordPress is web software you can use to create a beautiful website or blog. This template installs two instances: one running a WordPress deployment and the other using a local MySQL database to store the data.", 5 | 6 | "Parameters" : { 7 | 8 | "FloatingNetwork" : { 9 | "Description" : "ID of the Nova floating network", 10 | "Type" : "String", 11 | "Default" : "@@FloatingNetworkId@@" 12 | }, 13 | 14 | "FixedNetwork" : { 15 | "Description" : "ID of the Nova fixed network", 16 | "Type" : "String", 17 | "Default" : "@@FixedNetworkId@@" 18 | }, 19 | 20 | "KeyName" : { 21 | "Description" : "Name of an existing EC2 KeyPair to enable SSH access to the instances", 22 | "Type" : "String", 23 | "Default" : "suseuser" 24 | }, 25 | 26 | "InstanceType" : { 27 | "Description" : "WebServer EC2 instance type", 28 | "Type" : "String", 29 | "Default" : "m1.tiny", 30 | "AllowedValues" : [ "m1.tiny", "m1.small", "m1.medium", "m1.large", "m1.xlarge" ], 31 | "ConstraintDescription" : "must be a valid EC2 instance type." 32 | }, 33 | 34 | "WordpressVolumeSize" : { 35 | "Description" : "Database Volume size", 36 | "Type" : "Number", 37 | "Default" : "1", 38 | "MinValue" : "1", 39 | "MaxValue" : "1024", 40 | "ConstraintDescription" : "must be between 1 and 1024 Gb." 41 | }, 42 | 43 | "MySQLVolumeSize" : { 44 | "Description" : "Database Volume size", 45 | "Type" : "Number", 46 | "Default" : "1", 47 | "MinValue" : "1", 48 | "MaxValue" : "1024", 49 | "ConstraintDescription" : "must be between 1 and 1024 Gb." 50 | }, 51 | 52 | "WordpressVersion": { 53 | "Default": "0.0.8", 54 | "Description" : "Wordpress Image Version", 55 | "Type": "String", 56 | "AllowedValues" : [ "0.0.8" ] 57 | }, 58 | 59 | "MySQLVersion": { 60 | "Default": "0.0.3", 61 | "Description" : "MySQL Image Version", 62 | "Type": "String", 63 | "AllowedValues" : [ "0.0.3" ] 64 | }, 65 | 66 | "WWWSecGroup": { 67 | "Description" : "UUID of project www security group", 68 | "Type": "String", 69 | "Default": "@@WWWSecGroupId@@" 70 | }, 71 | 72 | "MySQLSecGroup": { 73 | "Description" : "UUID of project mysql security group", 74 | "Type": "String", 75 | "Default": "@@MySQLSecGroupId@@" 76 | }, 77 | 78 | "DefaultSecGroup": { 79 | "Description" : "UUID of project default security group", 80 | "Type": "String", 81 | "Default": "@@DefaultSecGroupId@@" 82 | } 83 | 84 | }, 85 | 86 | "Mappings" : { 87 | "WordpressVersionMap": { 88 | "0.0.8": {"kvm": "Wordpress-0.0.8-kvm"} 89 | }, 90 | "MySQLVersionMap": { 91 | "0.0.3": {"kvm": "MySQL-0.0.3-kvm"} 92 | } 93 | }, 94 | 95 | "Resources" : { 96 | 97 | "data": { 98 | "Type" : "OS::Quantum::Net", 99 | "Properties" : { 100 | "name" : "data", 101 | "admin_state_up": "true" 102 | } 103 | }, 104 | 105 | "subnet": { 106 | "Type": "OS::Quantum::Subnet", 107 | "Properties": { 108 | "name" : "data", 109 | "network_id" : { "Ref" : "data"}, 110 | "ip_version": 4, 111 | "cidr": "172.16.0.0/24", 112 | "allocation_pools" : [{"start": "172.16.0.2", "end": "172.16.0.150"}] 113 | } 114 | }, 115 | 116 | "MySQLPort": { 117 | "Type": "OS::Quantum::Port", 118 | "Properties": { 119 | "network_id": { "Ref" : "data" }, 120 | "security_groups" : [ { "Ref" : "DefaultSecGroup" }, { "Ref" : "MySQLSecGroup" } ], 121 | "admin_state_up": "true" 122 | } 123 | }, 124 | 125 | "WordpressPort1": { 126 | "Type": "OS::Quantum::Port", 127 | "Properties": { 128 | "network_id": { "Ref" : "FixedNetwork" }, 129 | "security_groups" : [ { "Ref" : "DefaultSecGroup" }, { "Ref" : "WWWSecGroup" } ], 130 | "admin_state_up": "true" 131 | } 132 | }, 133 | 134 | "WordpressPort2": { 135 | "Type": "OS::Quantum::Port", 136 | "Properties": { 137 | "network_id": { "Ref" : "data" }, 138 | "security_groups" : [ { "Ref" : "DefaultSecGroup" }, { "Ref" : "WWWSecGroup" } ], 139 | "admin_state_up": "true" 140 | } 141 | }, 142 | 143 | "Floating_IP": { 144 | "Type": "OS::Quantum::FloatingIP", 145 | "Properties": { 146 | "floating_network_id": { "Ref" : "FloatingNetwork" } 147 | } 148 | }, 149 | 150 | "floating_ip_assoc": { 151 | "Type": "OS::Quantum::FloatingIPAssociation", 152 | "Properties": { 153 | "floatingip_id": { "Ref" : "Floating_IP" }, 154 | "port_id": { "Ref" : "WordpressPort1" } 155 | } 156 | }, 157 | 158 | "MySQLServer": { 159 | "Type": "AWS::EC2::Instance", 160 | "Properties": { 161 | "ImageId" : {"Fn::FindInMap": ["MySQLVersionMap",{"Ref": "MySQLVersion"},"kvm"]}, 162 | "InstanceType" : { "Ref" : "InstanceType" }, 163 | "KeyName" : { "Ref" : "KeyName" }, 164 | "AvailabilityZone" : "nova", 165 | "NetworkInterfaces" : [ { "Ref" : "MySQLPort" } ] 166 | } 167 | }, 168 | 169 | "WordpressServer": { 170 | "Type": "AWS::EC2::Instance", 171 | "DependsOn": "MySQLServer", 172 | "Properties": { 173 | "ImageId" : {"Fn::FindInMap": ["WordpressVersionMap",{"Ref": "WordpressVersion"},"kvm"]}, 174 | "InstanceType" : { "Ref" : "InstanceType" }, 175 | "KeyName" : { "Ref" : "KeyName" }, 176 | "AvailabilityZone" : "nova", 177 | "NetworkInterfaces" : [ { "Ref" : "WordpressPort1" }, { "Ref" : "WordpressPort2" } ] 178 | } 179 | }, 180 | 181 | "WordpressDataVolume" : { 182 | "Type" : "AWS::EC2::Volume", 183 | "Properties" : { 184 | "Size" : { "Ref" : "WordpressVolumeSize" }, 185 | "AvailabilityZone" : { "Fn::GetAtt" : [ "WordpressServer", "AvailabilityZone" ]}, 186 | "Tags" : [{ "Key" : "Usage", "Value" : "Application Volume" }] 187 | } 188 | }, 189 | 190 | "MySQLDataVolume" : { 191 | "Type" : "AWS::EC2::Volume", 192 | "Properties" : { 193 | "Size" : { "Ref" : "MySQLVolumeSize" }, 194 | "AvailabilityZone" : { "Fn::GetAtt" : [ "MySQLServer", "AvailabilityZone" ]}, 195 | "Tags" : [{ "Key" : "Usage", "Value" : "Database Volume" }] 196 | } 197 | }, 198 | 199 | "WordpressMountPoint" : { 200 | "Type" : "AWS::EC2::VolumeAttachment", 201 | "Properties" : { 202 | "InstanceId" : { "Ref" : "WordpressServer" }, 203 | "VolumeId" : { "Ref" : "WordpressDataVolume" }, 204 | "Device" : "/dev/vdc" 205 | } 206 | }, 207 | 208 | "MySQLMountPoint" : { 209 | "Type" : "AWS::EC2::VolumeAttachment", 210 | "Properties" : { 211 | "InstanceId" : { "Ref" : "MySQLServer" }, 212 | "VolumeId" : { "Ref" : "MySQLDataVolume" }, 213 | "Device" : "/dev/vdc" 214 | } 215 | } 216 | }, 217 | 218 | "Outputs" : { 219 | "WebsiteURL" : { 220 | "Value" : { "Fn::Join" : ["", ["http://", { "Fn::GetAtt" : [ "WordpressServer", "PublicIp" ]} ]] }, 221 | "Description" : "URL for Wordpress blog" 222 | } 223 | } 224 | } 225 | -------------------------------------------------------------------------------- /demos/heat-wordpress/prep-wordpress-project.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | DEMO_PROJECT=Wordpress 6 | DEMO_USER=suseuser 7 | DEMO_PASSWORD=$DEMO_USER # security is our top priority 8 | MYSQL_IMAGE_FILE=MySQL.x86_64-0.0.3.qcow2 9 | WORDPRESS_IMAGE_FILE=Wordpress.x86_64-0.0.8.qcow2 10 | MYSQL_IMAGE_NAME=MySQL-0.0.3-kvm 11 | WORDPRESS_IMAGE_NAME=Wordpress-0.0.8-kvm 12 | 13 | source ~/.openrc 14 | 15 | # FIXME: would be nicer to use Chef for all this. 16 | 17 | exists () { 18 | object="$1" name="$2" 19 | shift 2 20 | if ! list=$( openstack $object list "$@" -c Name -f csv ); then 21 | echo "openstack $object list failed; aborting" >&2 22 | exit 1 23 | fi 24 | if echo "$list" | grep -q "^\"${name}\""; then 25 | echo "'$name' $object already exists" 26 | return 0 27 | else 28 | return $? 29 | fi 30 | } 31 | 32 | if ! exists project $DEMO_PROJECT; then 33 | echo "Creating $DEMO_PROJECT project ..." 34 | openstack project create $DEMO_PROJECT 35 | fi 36 | 37 | if ! exists user $DEMO_USER; then 38 | echo "Creating $DEMO_USER user ..." 39 | openstack user create $DEMO_USER --password $DEMO_USER --project $DEMO_PROJECT 40 | fi 41 | 42 | # User already has _member_ role from --project above 43 | for role in admin; do 44 | if ! exists 'user role' $role --project $DEMO_PROJECT $DEMO_USER; then 45 | echo "Giving $DEMO_USER $role role in $DEMO_PROJECT project ..." 46 | # Cloud 4's new openstack CLI didn't support adding roles to users: 47 | #openstack user role add $role --user $DEMO_USER --project $DEMO_PROJECT 48 | # FIXME: worth revisiting with Cloud 5 49 | keystone user-role-add --user $DEMO_USER --role $role --tenant $DEMO_PROJECT 50 | fi 51 | done 52 | 53 | export OS_USERNAME="$DEMO_USER" 54 | export OS_PASSWORD="$DEMO_PASSWORD" 55 | export OS_TENANT_NAME="$DEMO_PROJECT" 56 | 57 | ensure_tcp_rule () { 58 | group="$1" dst_port="$2" 59 | if openstack security group rule list $group -f csv | 60 | grep -q '"tcp","0.0.0.0/0",'"\"$dst_port:$dst_port\""; then 61 | #if nova secgroup-list-rules $group | egrep -q " tcp +\| +\| $group "; then 62 | echo "TCP port $dst_port already allowed for $group" 63 | else 64 | echo "Allowing TCP port $dst_port for $group ..." 65 | openstack security group rule create --proto tcp --dst-port $dst_port $group 66 | #nova secgroup-add-rule $group 67 | fi 68 | } 69 | 70 | for group in MySQLSecGroup WWWSecGroup; do 71 | if ! exists 'security group' $group; then 72 | echo "Creating security group $group ..." 73 | openstack security group create $group 74 | fi 75 | 76 | if openstack security group rule list $group -f csv | grep -q '"icmp"'; then 77 | #if nova secgroup-list-rules $group | grep -q " icmp .* | $group "; then 78 | echo "ICMP already allowed for $group" 79 | else 80 | echo "Allowing ICMP for $group ..." 81 | openstack security group rule create --proto icmp --dst-port -1 $group 82 | fi 83 | 84 | ensure_tcp_rule $group 22 85 | done 86 | 87 | ensure_tcp_rule MySQLSecGroup 3306 88 | ensure_tcp_rule WWWSecGroup 80 89 | 90 | # Need to get security group ids to substitute into heat template. 91 | # Wanted to do: 92 | # 93 | # eval `openstack security group show $group -f shell --variable id` 94 | # 95 | # but it fails (only) for default group - bizarre. So we take a 96 | # different approach: 97 | 98 | eval $( 99 | openstack security group list -f csv -c ID -c Name | grep -v '^"ID"' | \ 100 | sed 's/^"\(.\+\)","\(.\+\)".*/\2_id=\1/' 101 | ) 102 | 103 | network_id () { 104 | network="$1" 105 | eval `neutron net-show $network -f shell -F id` 106 | echo "$id" 107 | } 108 | 109 | fixed_network_id=`network_id fixed` 110 | floating_network_id=`network_id floating` 111 | 112 | echo "Writing heat-template-wordpress.json from template ..." 113 | sed "s/@@DefaultSecGroupId@@/$default_id/; 114 | s/@@WWWSecGroupId@@/$WWWSecGroup_id/; 115 | s/@@MySQLSecGroupId@@/$MySQLSecGroup_id/; 116 | 117 | s/@@FloatingNetworkId@@/$floating_network_id/; 118 | s/@@FixedNetworkId@@/$fixed_network_id/; 119 | " heat-template-wordpress.json.tmpl \ 120 | > heat-template-wordpress.json 121 | 122 | if ! exists image $MYSQL_IMAGE_NAME; then 123 | echo "Uploading $MYSQL_IMAGE_FILE as $MYSQL_IMAGE_NAME ..." 124 | openstack image create \ 125 | --file $MYSQL_IMAGE_FILE \ 126 | --owner $DEMO_PROJECT \ 127 | $MYSQL_IMAGE_NAME 128 | fi 129 | 130 | if ! exists image $WORDPRESS_IMAGE_NAME; then 131 | echo "Uploading $WORDPRESS_IMAGE_FILE as $WORDPRESS_IMAGE_NAME ..." 132 | openstack image create \ 133 | --file $WORDPRESS_IMAGE_FILE \ 134 | --owner $DEMO_PROJECT \ 135 | $WORDPRESS_IMAGE_NAME 136 | fi 137 | 138 | if ! exists keypair $DEMO_USER; then 139 | openstack keypair create $DEMO_USER > $DEMO_USER.pem 140 | fi 141 | -------------------------------------------------------------------------------- /demos/heat-wordpress/suse_techtalk_orchestrating_service_deployment_in_suse_cloud.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE-Cloud/suse-cloud-appliances/ab2eb83db8687a6d535968ba4514ffffed7fb8ea/demos/heat-wordpress/suse_techtalk_orchestrating_service_deployment_in_suse_cloud.pdf -------------------------------------------------------------------------------- /demos/lib/common.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | VAGRANT_SSH_CONFIG=/tmp/ssh-config.vagrant 4 | 5 | die () { 6 | echo >&2 "$*" 7 | exit 1 8 | } 9 | 10 | parse_opts () { 11 | while [ $# != 0 ]; do 12 | case "$1" in 13 | -h|--help) 14 | usage 0 15 | ;; 16 | -*) 17 | usage "Unrecognised option: $1" 18 | ;; 19 | *) 20 | break 21 | ;; 22 | esac 23 | done 24 | 25 | ARGV=( "$@" ) 26 | } 27 | 28 | vagrant () { 29 | cd $vagrant_dir 30 | 31 | if [ -n "$VAGRANT_USE_BUNDLER" ]; then 32 | VAGRANT_I_KNOW_WHAT_IM_DOING_PLEASE_BE_QUIET=1 \ 33 | bundle exec vagrant "$@" 34 | else 35 | command vagrant "$@" 36 | fi 37 | } 38 | 39 | read_libvirt_docs () { 40 | cat <&2 41 | 42 | Make sure you have read docs/vagrant-libvirt.md, which is available 43 | online here: 44 | 45 | https://github.com/SUSE-Cloud/suse-cloud-vagrant/blob/master/docs/vagrant-libvirt.md 46 | EOF 47 | exit 1 48 | } 49 | 50 | init_bundler () { 51 | if [ -z "$VAGRANT_USE_BUNDLER" ]; then 52 | return 53 | fi 54 | 55 | if ! which bundle >/dev/null 2>&1; then 56 | echo "Bundler is required! Please install first." >&2 57 | read_libvirt_docs 58 | fi 59 | 60 | cd $vagrant_dir 61 | if ! bundle install --path vendor/bundle; then 62 | echo "bundle install failed; cannot proceed." >&2 63 | read_libvirt_docs 64 | fi 65 | } 66 | 67 | check_hypervisor () { 68 | case "$hypervisor" in 69 | kvm) 70 | check_nested_kvm 71 | check_ksm 72 | export VAGRANT_DEFAULT_PROVIDER=libvirt 73 | ;; 74 | virtualbox) 75 | check_virtualbox_version 76 | check_virtualbox_env 77 | export VAGRANT_DEFAULT_PROVIDER=virtualbox 78 | ;; 79 | *) 80 | usage "Unrecognised hypervisor '$hypervisor'" 81 | ;; 82 | esac 83 | } 84 | 85 | use_bundler_with_kvm () { 86 | if [ $hypervisor = kvm ]; then 87 | # Required so vagrant-libvirt can deal with shared disks. 88 | export VAGRANT_USE_BUNDLER=yes 89 | init_bundler 90 | fi 91 | } 92 | 93 | on_linux () { 94 | [ "`uname -s`" = Linux ] 95 | } 96 | 97 | check_nested_kvm () { 98 | if ! on_linux; then 99 | return 100 | fi 101 | 102 | if grep -q Intel /proc/cpuinfo; then 103 | cpu=intel 104 | elif grep -q AMD /proc/cpuinfo; then 105 | cpu=amd 106 | else 107 | echo "WARNING: couldn't detect either Intel or AMD CPU; skipping nested KVM check" >&2 108 | return 109 | fi 110 | 111 | kmod=kvm-$cpu 112 | nested=/sys/module/kvm_$cpu/parameters/nested 113 | if ! [ -e $nested ]; then 114 | cat <&2 115 | Your host's kvm_$cpu kernel module is not loaded! 116 | 117 | Make sure its nested parameter is enabled and then load it, e.g. by 118 | running these commands: 119 | 120 | # Needs root access, so su or sudo first! 121 | echo "options $kmod nested=1" > /etc/modprobe.d/90-nested-kvm.conf 122 | 123 | # Load the kernel module 124 | modprobe $kmod 125 | 126 | Then re-run this script. 127 | EOF 128 | exit 1 129 | fi 130 | 131 | if ! grep -q Y $nested; then 132 | cat <&2 133 | Your host's kvm_$cpu kernel module needs the nested parameter enabled. 134 | To enable it, run these commands: 135 | 136 | # Needs root access, so su or sudo first! 137 | echo "options $kmod nested=1" > /etc/modprobe.d/90-nested-kvm.conf 138 | # Reload the kernel module (shutdown any running VMs first): 139 | rmmod $kmod 140 | modprobe $kmod 141 | 142 | Then re-run this script. 143 | EOF 144 | exit 1 145 | fi 146 | } 147 | 148 | check_ksm () { 149 | if ! on_linux; then 150 | return 151 | fi 152 | 153 | if ! grep -q 1 /sys/kernel/mm/ksm/run; then 154 | cat <<'EOF' 155 | You don't have Kernel SamePage Merging (KSM) enabled! 156 | This could reduce your memory usage quite a bit. 157 | To enable it, hit Control-C and then run this command as 158 | root: 159 | 160 | echo 1 > /sys/kernel/mm/ksm/run 161 | 162 | Alternatively, press Enter to proceed regardless ... 163 | EOF 164 | read 165 | fi 166 | } 167 | 168 | check_virtualbox_version () { 169 | if ! version=$( VBoxManage --help | head -n1 | awk '{print $NF}' ); then 170 | echo "WARNING: Couldn't determine VirtualBox version; carrying on anyway." >&2 171 | return 172 | fi 173 | 174 | case "$version" in 175 | [1-3].*|4.[012].*) 176 | die "Your VirtualBox is old ($version); please upgrade to the most recent version!" 177 | ;; 178 | 4.3.[0-9]) 179 | echo "WARNING: Your VirtualBox is old-ish. Please consider upgrading." >&2 180 | ;; 181 | [4-9].*) 182 | # New enough! 183 | ;; 184 | *) 185 | die "Unrecognised VirtualBox version '$version'" 186 | ;; 187 | esac 188 | } 189 | 190 | check_virtualbox_env () { 191 | if ! on_linux; then 192 | return 193 | fi 194 | 195 | if ! groups | grep -q vboxusers; then 196 | die "The current user does not have access to the 'vboxusers' group. This is necessary for VirtualBox to function correctly." 197 | fi 198 | 199 | if ! which lsmod >/dev/null 2>&1; then 200 | die "BUG: on Linux but no lsmod found?! Huh?" 201 | fi 202 | 203 | if ! lsmod | grep -q vboxdrv; then 204 | die "Your system doesn't have the vboxdrv kernel module loaded. This is necessary for VirtualBox to function correctly." 205 | fi 206 | } 207 | 208 | check_vagrant_config () { 209 | cd $vagrant_dir 210 | if [ -n "$VAGRANT_CONFIG_FILE" ]; then 211 | if [ ! -e "$VAGRANT_CONFIG_FILE" ]; then 212 | echo "VAGRANT_CONFIG_FILE points to non-existent file $VAGRANT_CONFIG_FILE" >&2 213 | die "It should be an absolute path or relative to $vagrant_dir." 214 | fi 215 | if ! grep -q '^vms: *$' "$VAGRANT_CONFIG_FILE"; then 216 | die "$VAGRANT_CONFIG_FILE is not a valid config for Vagrantfile" 217 | fi 218 | fi 219 | } 220 | 221 | vagrant_ssh_config () { 222 | vagrant ssh-config > $VAGRANT_SSH_CONFIG 223 | } 224 | 225 | setup_node_aliases () { 226 | if ! vssh admin sudo /root/bin/setup-node-aliases.sh; then 227 | die "Failed to set up node aliases; aborting" 228 | fi 229 | } 230 | 231 | setup_node_sh_vars () { 232 | if vssh admin sudo \ 233 | sh -c '/root/bin/node-sh-vars > /tmp/.crowbar-nodes-roles.cache' 234 | then 235 | echo "Wrote node/role shell variables to /tmp/.crowbar-nodes-roles.cache" 236 | else 237 | die "Failed to set up node shell variables; aborting" 238 | fi 239 | } 240 | 241 | switch_to_kvm_if_required () { 242 | if [ $hypervisor != kvm ]; then 243 | return 244 | fi 245 | 246 | echo "Can do nested hardware virtualization; switching to KVM ..." 247 | # I tried and failed to do this with a glob and a single call to 248 | # sudo - saw some inexplicable interaction between ssh/sudo/sh. 249 | # If anyone can show me how I'd be grateful! 250 | if ! vssh admin \ 251 | 'sudo find /root -maxdepth 1 -name *.yaml | 252 | xargs sudo sed -i \ 253 | "s/nova-multi-compute-.*:/nova-multi-compute-kvm:/"' 254 | then 255 | die "Failed to switch YAML files to use KVM" 256 | fi 257 | } 258 | 259 | batch_build_proposals () { 260 | yaml="$1" 261 | 262 | if ! vssh admin sudo stdbuf -oL \ 263 | crowbar batch --timeout 1200 build $yaml 264 | then 265 | die "Failed to set up proposals; aborting" 266 | fi 267 | } 268 | 269 | vssh () { 270 | ssh -F $VAGRANT_SSH_CONFIG "$@" 271 | } 272 | 273 | vscp () { 274 | scp -F $VAGRANT_SSH_CONFIG "$@" 275 | } 276 | 277 | vrsync () { 278 | rsync -e "ssh -F $VAGRANT_SSH_CONFIG" "$@" 279 | } 280 | -------------------------------------------------------------------------------- /docs/FAQ.md: -------------------------------------------------------------------------------- 1 | # FAQ 2 | 3 | Here are some commonly encountered issues, and suggested solutions. 4 | If none of them help, try this [guide to debugging](debugging.md). 5 | 6 | ## Vagrant tells me to run `vagrant init` 7 | 8 | You're running `vagrant` from the wrong directory. You should be 9 | in the [`vagrant/` subdirectory of the git repository](../vagrant/), 10 | which contains the `Vagrantfile`, the `demos/` subdirectory etc. 11 | 12 | ## I have issues with Vagrant and libvirt 13 | 14 | Please check [this `vagrant-libvirt` page](vagrant-libvirt.md). 15 | 16 | ## One of my Crowbar controller nodes won't rejoin the HA cluster 17 | 18 | Please see [this guide to cluster recovery](../demos/HA/cluster-recovery.md). 19 | 20 | ## I get a GRUB error on boot of a VM 21 | 22 | Most likely your box download got corrupted or truncated. 23 | 24 | ## Vagrant fails during the creation of my controllers 25 | 26 | If the Vagrant build fails with the following error: 27 | 28 | Defined interface to use (eth1) does not seem to be on the admin network. 29 | Is DHCP used for it? 30 | 31 | Then VirtualBox has a host-only network conflicting with the one from 32 | Vagrant. Please refer to the **Vagrant `virtualbox` provider** section 33 | in [the prerequisites](prerequisites.md) to fix this, and then restart 34 | the Vagrant build. 35 | 36 | ## Vagrant fails to create a host-only network on VirtualBox 37 | 38 | Progress state: NS_ERROR_FAILURE 39 | VBoxManage: error: Failed to create the host-only adapter 40 | VBoxManage: error: VBoxNetAdpCtl: Error while adding new interface: VBoxNetAdpCtl: ioctl failed for /dev/vboxnetctl: Inappropriate ioctl for devic 41 | VBoxManage: error: Details: code NS_ERROR_FAILURE (0x80004005), component HostNetworkInterface, interface IHostNetworkInterface 42 | VBoxManage: error: Context: "int handleCreate(HandlerArg*, int, int*)" at line 66 of file VBoxManageHostonly.cpp 43 | 44 | Firstly make sure you don't have another network (e.g. bridge 45 | interface) configured to use 192.168.124.0/24. If not, try 46 | restarting VirtualBox services. On MacOS X: 47 | 48 | sudo /Library/StartupItems/VirtualBox/VirtualBox restart 49 | 50 | On SUSE / Red Hat: 51 | 52 | service vboxdrv restart 53 | 54 | ## `crowbar batch` fails on start 55 | 56 | If `crowbar batch` fails with an error like: 57 | 58 | /opt/dell/bin/barclamp_lib.rb:536:in `eval': (): found character that cannot start any token while scanning for the next token at line 19 column 11 (Psych::SyntaxError) 59 | from /usr/lib64/ruby/2.1.0/psych.rb:370:in `parse_stream' 60 | from /usr/lib64/ruby/2.1.0/psych.rb:318:in `parse' 61 | from /usr/lib64/ruby/2.1.0/psych.rb:245:in `load' 62 | from /opt/dell/bin/crowbar_batch:100:in `build' 63 | from (eval):1:in `run_sub_command' 64 | from /opt/dell/bin/barclamp_lib.rb:536:in `eval' 65 | from /opt/dell/bin/barclamp_lib.rb:536:in `run_sub_command' 66 | from /opt/dell/bin/barclamp_lib.rb:540:in `run_command' 67 | from /opt/dell/bin/crowbar_batch:553:in `main' 68 | from /opt/dell/bin/crowbar_batch:556:in `
' 69 | 70 | This is most likely happening because the aliases for the nodes were not 71 | created. Simply run the `setup-node-aliases.sh` command on the node 72 | `admin` and then run `crowbar batch` again. 73 | -------------------------------------------------------------------------------- /docs/HOWTO.md: -------------------------------------------------------------------------------- 1 | # How to automatically deploy SUSE OpenStack Cloud via Vagrant 2 | 3 | ## Prerequisites 4 | 5 | Please see the [prerequisites page](prerequisites.md) for information 6 | on hardware requirements and how to set up Vagrant to work with your 7 | hypervisor. 8 | 9 | ## SUSE OpenStack Cloud installation 10 | 11 | N.B. The following steps describe semi-automatic booting of the cloud 12 | infrastructure via Vagrant. Another more fully automated option is 13 | to use one of the [pre-canned demos](../demos/README.md). 14 | 15 | * Tell Vagrant which provider you want to use, e.g. 16 | 17 | export VAGRANT_DEFAULT_PROVIDER=libvirt 18 | 19 | With Vagrant 1.6.x, VirtualBox is the default; although future 20 | versions (i.e. 1.7.x) will instead automatically detect the best 21 | default provider. 22 | * Depending on what cloud configuration you desire, either use Vagrant 23 | to sequentially provision all four VMs from the default configuration 24 | (1 admin + 2 controllers + 1 compute node) in one go: 25 | 26 | cd vagrant 27 | vagrant up 28 | 29 | or keep reading to find out how to choose which VMs to 30 | provision. 31 | 32 | There is always exactly one admin server node. The quantity, shape, 33 | and size of all nodes are determined by a YAML config file. The 34 | default is 35 | [`2-controllers-1-compute.yaml`](../vagrant/configs/2-controllers-1-compute.yaml) 36 | but there are other examples in 37 | [the same directory](../vagrant/configs/). 38 | 39 | You can change the number of controller nodes and compute notes from 40 | the defaults of 2 and 1 respectively by editing this file or by 41 | pointing the `Vagrantfile` at an alternative config file: 42 | 43 | export VAGRANT_CONFIG_FILE=/path/to/other/vagrant.yaml 44 | 45 | `vagrant up --no-parallel` will cause all the VMs to be provisioned in 46 | the order listed in the YAML config file. Typically this is: 47 | 48 | 1. `admin` - the Crowbar admin server node. After boot-up, 49 | `install-suse-cloud` will automatically run. This takes quite a 50 | few minutes to complete, since it has to start several services. 51 | Once you see the next VM start to boot, you know it has completed 52 | installation, at which point you can visit 53 | [the Crowbar web UI](http://192.168.124.10:3000/) (username and 54 | password are both `crowbar` by default) and watch the other nodes 55 | come online one by one. 56 | 2. The controller node(s) in numerical order: `controller1`, then 57 | `controller2` etc. These will run the OpenStack infrastructure 58 | services, typically within a Pacemaker cluster. 59 | 3. The compute nodes in numerical order: `compute1`, then `compute2` 60 | etc. 61 | 62 | It will take a few minutes to provision each VM, since not only does 63 | Vagrant need to copy a fresh virtual disk from the box for each VM, 64 | but also on first boot the VMs will register against Crowbar and then 65 | perform some orchestrated setup via Chef. 66 | 67 | Alternatively, you can provision each VM individually, e.g. 68 | 69 | vagrant up admin 70 | vagrant up controller1 71 | 72 | and so on. Similarly, you can rebuild an individual VM from scratch 73 | in the normal Vagrant way, e.g. 74 | 75 | vagrant destroy compute1 76 | vagrant up compute1 77 | 78 | or the entire cloud in one go: 79 | 80 | vagrant destroy 81 | vagrant up --no-parallel 82 | 83 | **CAUTION!** If you are using libvirt, please see [this page about 84 | usage `vagrant-libvirt`](vagrant-libvirt.md) for known pitfalls. 85 | 86 | ## Connecting to the VMs 87 | 88 | Of course you can log in on the VM consoles, via the hypervisor's 89 | GUI. 90 | 91 | You can also ssh via `vagrant`, e.g.: 92 | 93 | vagrant ssh admin 94 | vagrant ssh controller1 -- -l root 95 | 96 | or directly to the admin node: 97 | 98 | ssh root@192.168.124.10 99 | 100 | The root password is `vagrant`, as per 101 | [convention](https://docs.vagrantup.com/v2/boxes/base.html). 102 | 103 | ## Setting up node aliases 104 | 105 | By default, Crowbar and Chef name nodes according to the MAC address 106 | of their primary interface. This is not very human-friendly, so 107 | Crowbar offers the option of assigning aliases (e.g. `controller1`, 108 | `compute1` etc.) to nodes. This Vagrant environment provides a simple 109 | script to automate that: once you have booted your controller and 110 | compute nodes, simply `ssh` to the admin server as per above, and run: 111 | 112 | setup-node-aliases.sh 113 | 114 | After you have done this, the admin server's DNS tables will update, 115 | and you will be able to `ssh` conveniently from the admin node to 116 | other nodes, e.g. 117 | 118 | ssh controller1 119 | 120 | etc. 121 | 122 | N.B. The `./build.sh` demo scripts take care of this automatically. 123 | 124 | ## Setting up node/role shell variables (OPTIONAL) 125 | 126 | If you want to poke around behind the scenes and see how Crowbar uses 127 | Chef, you can use the `knife` command on the admin server. However it 128 | can quickly get tedious figuring out the correct node or role name 129 | corresponding to say, `controller2`. Therefore the `Vagrantfile` 130 | automatically installs a handy 131 | [`node-sh-vars` script](../vagrant/provisioning/admin/node-sh-vars) 132 | which can set convenient shell variables for the node and role names. 133 | Once you have booted your controller and compute nodes, simply `ssh` 134 | to the admin server as per above, and run: 135 | 136 | node-sh-vars > /tmp/.crowbar-nodes-roles.cache 137 | source /tmp/.crowbar-nodes-roles.cache 138 | 139 | Now you can easily look at Chef nodes via `knife`, e.g. 140 | 141 | knife node show $controller1 142 | knife node list 143 | 144 | and similarly for roles: 145 | 146 | knife role show $controller1r 147 | knife role list 148 | 149 | The `Vagrantfile` also sets up `.bash_profile` so that 150 | `/tmp/.crowbar-nodes-roles.cache` is automatically `source`d 151 | on login. 152 | 153 | N.B. The `./build.sh` demo scripts take care of this automatically. 154 | 155 | ## Trying out SUSE OpenStack Cloud 156 | 157 | * Browse around [the Crowbar web UI](http://192.168.124.10:3000/) 158 | (username and password are given above) 159 | * See the provided resources for 160 | [automatically preparing and presenting demos](../demos/README.md) of 161 | functionality within SUSE OpenStack Cloud. 162 | * Read the 163 | [official SUSE OpenStack Cloud product documentation](https://www.suse.com/documentation/suse-cloud-5/). 164 | -------------------------------------------------------------------------------- /docs/debugging.md: -------------------------------------------------------------------------------- 1 | # A crash course in debugging SUSE OpenStack Cloud issues 2 | 3 | **Please first check [the FAQ](FAQ.md).** 4 | 5 | This is a very minimal guide to debugging SUSE OpenStack Cloud issues. 6 | You can also consult the 7 | [official product documentation](https://www.suse.com/documentation/suse-cloud-5/book_cloud_deploy/data/cha_depl_trouble.html). 8 | 9 | ## Architecture 10 | 11 | Understanding 12 | [SUSE OpenStack Cloud's architecture](https://www.suse.com/documentation/suse-cloud-5/book_cloud_deploy/data/cha_depl_arch.html) 13 | is an essential part of being able to effectively debug issues. 14 | 15 | ## Debugging Chef 16 | 17 | There are three places where you might need to look for Chef 18 | client failures: 19 | 20 | 1. Applying Crowbar barclamp proposals will cause `chef-client` to 21 | immediately run on all nodes which that proposal affects. 22 | If something goes wrong, the following log files on the admin 23 | server node should prove informative: 24 | 25 | * `/var/log/crowbar/chef-client/*.log` (there is one per node) 26 | 27 | 2. Crowbar causes `chef-client` to run on each node on boot up, when 28 | the node registers against Crowbar via the `crowbar_join` service. 29 | The logs for this are found on the node itself, under 30 | 31 | * `/var/log/crowbar/crowbar_join/` 32 | 33 | 3. Chef will also automatically run every 15 minutes on each node, 34 | and log to `/var/log/chef/client.log` on the node itself. 35 | 36 | ## Debugging Crowbar 37 | 38 | This is the main log for the Crowbar server: 39 | 40 | * `/var/log/crowbar/production.log` 41 | 42 | ## Debugging Pacemaker 43 | 44 | Pacemaker logs to `/var/log/messages` on each node in the cluster. 45 | [Hawk](../demos/HA/README.md#hawk-web-ui) has some very useful history 46 | exploration functionality which makes it easier to get a 47 | chronologically sorted, cluster-wide view of events. 48 | 49 | Please also see [this guide to cluster recovery](../demos/HA/cluster-recovery.md). 50 | 51 | ## Other log files 52 | 53 | See the official SUSE OpenStack Cloud documentation for 54 | [a full list of log files](https://www.suse.com/documentation/suse-cloud-5/book_cloud_deploy/data/cha_deploy_logs.html). 55 | -------------------------------------------------------------------------------- /docs/vagrant-libvirt.md: -------------------------------------------------------------------------------- 1 | # Using Vagrant with libvirt 2 | 3 | There is a nice 4 | [`vagrant-libvirt` plugin](https://github.com/pradels/vagrant-libvirt) 5 | available, which you could install via: 6 | 7 | vagrant plugin install vagrant-libvirt 8 | 9 | **However** there are several known bugs with the plugin for which the 10 | fixes are not yet released, including: 11 | 12 | * [#252](https://github.com/pradels/vagrant-libvirt/pull/252) and 13 | [#256](https://github.com/pradels/vagrant-libvirt/pull/256) 14 | -- required if sharing disks between VMs (in particular, **the 15 | [HA demo](../demos/HA/) requires a shared SBD disk for cluster 16 | fencing**) 17 | * [#255](https://github.com/pradels/vagrant-libvirt/pull/255) 18 | -- required if using the plugin from git 19 | * [#261](https://github.com/pradels/vagrant-libvirt/pull/261) 20 | -- required if using a pre-release version of Vagrant from its 21 | [git `master` branch](https://github.com/mitchellh/vagrant) 22 | * [#262](https://github.com/pradels/vagrant-libvirt/pull/262) 23 | -- required if using ruby-libvirt >= 0.5 24 | * [#263](https://github.com/pradels/vagrant-libvirt/pull/263) 25 | -- required to allow setting cache mode on additional disks 26 | 27 | ## Addressing known bugs 28 | 29 | If you are using one of the [pre-canned demos](../demos/) then these 30 | steps are taken care of automatically by `build.sh`. 31 | 32 | Otherwise, if any of these bugs affect you (especially if you are 33 | planning to try [the HA demo](../demos/HA/)), you have two options. 34 | One is to simply use VirtualBox instead. This will provide an easier 35 | installation, although there are some minor drawbacks 36 | [already mentioned in the prerequisites document](prerequisites.md#hypervisor). 37 | 38 | The other option is to use specially patched unofficial versions of 39 | `vagrant-libvirt` and associated gems. This is more complicated, 40 | although we have made it easier by automating the installation via 41 | Ruby's *bundler* utility and 42 | [the provided `Gemfile` / `Gemfile.lock`](../vagrant/Gemfile). 43 | 44 | ### Installing patched gems via `bundle install` 45 | 46 | Firstly you will need some development packages, since some of the 47 | gems need to be compiled against pre-installed libraries. 48 | 49 | On openSUSE: 50 | 51 | sudo zypper install {ruby,libvirt,libxml2,libxslt}-devel rubygem-bundler 52 | 53 | On Fedora: 54 | 55 | sudo yum install {ruby,libvirt,libxml2,libxslt}-devel rubygem-bundler 56 | 57 | On Ubuntu: 58 | 59 | sudo apt-get install libxslt-dev ruby-dev libxml2-dev ruby-bundler 60 | 61 | Then install the gems described in 62 | [the provided `Gemfile` / `Gemfile.lock`](../vagrant/Gemfile): 63 | 64 | # First cd to the directory where you cloned the git repository! 65 | cd vagrant 66 | bundle install --path vendor/bundle 67 | 68 | If during installation, you get errors associated with `nokogiri`, 69 | try: 70 | 71 | bundle config build.nokogiri --use-system-libraries 72 | 73 | and then re-run the installation. 74 | 75 | Once bundler has finished installing, every time you want to run 76 | vagrant, you **must** be in this same directory which contains 77 | `Gemfile.lock`, and you **must** prefix **every** `vagrant` command 78 | with `bundle exec`, e.g.: 79 | 80 | bundle exec vagrant up admin 81 | bundle exec vagrant status 82 | 83 | etc. 84 | 85 | #### Updating an existing box 86 | 87 | **CAUTION!** If you are using libvirt, it is not sufficient to do 88 | `vagrant box update` or `vagrant box add --force`, or even `vagrant box 89 | remove`; you will also have to manually remove the old image from 90 | `/var/lib/libvirt/images` and then do: 91 | 92 | virsh pool-refresh default 93 | 94 | before adding the new version, due to 95 | [this bug](https://github.com/pradels/vagrant-libvirt/issues/85#issuecomment-55419054). 96 | 97 | ## Trouble-shooting other problems with `vagrant-libvirt` 98 | 99 | * **`vagrant up` results in an error `Call to virStorageVolGetInfo 100 | failed: cannot stat file`.** 101 | 102 | This is https://github.com/pradels/vagrant-libvirt/issues/51 and 103 | your libvirt default pool is probably corrupt (e.g. by directly 104 | deleting image files from it without `virsh vol-delete`). Do 105 | `virsh pool-refresh default` and try again. 106 | 107 | * **`vagrant up` fails with some other error.** 108 | 109 | vagrant-libvirt is not yet robust at cleaning up after failures, 110 | especially concerning disks. The following process can often 111 | clear a transient error: 112 | 113 | * `vagrant destroy` 114 | * Manually delete any associated disk volumes which got 115 | left behind in `/var/lib/libvirt/images`. 116 | * `virsh pool-refresh default` 117 | * `vagrant up` 118 | 119 | See [this bug](https://github.com/pradels/vagrant-libvirt/issues/85#issuecomment-55419054) 120 | for more information. 121 | -------------------------------------------------------------------------------- /kiwi/.gitignore: -------------------------------------------------------------------------------- 1 | */image 2 | config.xml 3 | -------------------------------------------------------------------------------- /kiwi/README.md: -------------------------------------------------------------------------------- 1 | # KIWI appliances 2 | 3 | This subdirectory contains two 4 | [KIWI](https://en.opensuse.org/Portal:KIWI) virtual appliance image 5 | definitions which are used to build `.vmdk` virtual disks. 6 | [These will then be converted into Vagrant boxes](../vagrant/) which 7 | are used to build a whole cloud via Vagrant. 8 | 9 | However, since these files were originally built, 10 | [KIWI has learnt how to build Vagrant boxes directly](https://github.com/openSUSE/kiwi/pull/353), 11 | and so has [OBS](http://openbuildservice.org/), so it is not any 12 | longer recommended to build boxes using only these files. Some of 13 | these files are still used by the new build process; others are left 14 | here for posterity. 15 | 16 | If you want to build the boxes yourself, it is strongly recommended 17 | that you 18 | [contact us](https://forums.suse.com/forumdisplay.php?65-SUSE-OpenStack-Cloud) 19 | for help first, otherwise there is a risk you will waste a lot of your 20 | own time. 21 | 22 | ## What is KIWI? 23 | 24 | Here's [a quick introduction to KIWI](http://doc.opensuse.org/projects/kiwi/doc/#chap.introduction) 25 | in case you need it. 26 | 27 | ## Installing KIWI 28 | 29 | Unfortunately KIWI currently only runs on SUSE-based systems. There 30 | is definitely appetite to port it to other distributions but noone has 31 | had the time to do so yet. Another interesting option for the future 32 | would be to rebuild this appliance using [Packer](http://www.packer.io/). 33 | [Pull requests](https://help.github.com/articles/using-pull-requests) 34 | are very welcome - just [fork this repository](https://github.com/fghaas/openstacksummit2014-atlanta/fork)! 35 | 36 | 1. If you're on SLES12 SP2, add the openSUSE:Tools repository first 37 | to get the latest KIWI version. For example: 38 | 39 | sudo zypper ar http://download.opensuse.org/repositories/openSUSE:/Tools/SLE_12_SP2/ openSUSE:Tools 40 | 41 | If you're on openSUSE 13.1, you should already have the Updates 42 | repository containing the latest KIWI version. 43 | 44 | 2. Now install the required KIWI packages: 45 | 46 | sudo zypper in kiwi kiwi-desc-vmxboot 47 | 48 | ## Building the virtual appliances 49 | 50 | There are two different virtual appliances defined within this 51 | subdirectory: 52 | 53 | * [`cloud-admin`](cloud-admin/) - the SUSE OpenStack Cloud admin node, 54 | which runs Crowbar and Chef, and 55 | * [`sles12-sp2`](sles12-sp2/) - a cut-down preload image of SUSE Linux 56 | Enterprise Server (SLES) 12 SP2, which will be used to provision 57 | two controller nodes (forming an HA cluster), and a compute node. 58 | 59 | Instructions for building them are contained within the READMEs in 60 | those subdirectories. 61 | -------------------------------------------------------------------------------- /kiwi/build-lib.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # ======================================================================== 4 | # Shell functions for building Crowbar appliances 5 | # 6 | # Requires Kiwi (http://opensuse.github.io/kiwi/). 7 | # 8 | # Build performance: 9 | # - Automatically builds in RAM (tmpfs) if there's enough free memory. This 10 | # brings the build time down significantly, especially without SSDs. 11 | # - Automatically creates and uses Kiwi's boot image cache, which saves about 1 12 | # min with tmpfs. 13 | # - Had added support for Kiwi's image cache, but this had negligible speed-up 14 | # in my setup so removed the code to reduce complexity. 15 | # - Had added auto-detection of pigz (parallel gzip), but this also had 16 | # negligible speed-up in my setup so excluded it too. 17 | # 18 | # ======================================================================== 19 | 20 | function ensure_root { 21 | if [ $USER != 'root' ]; then 22 | echo "Please run as root." 23 | exit 1 24 | fi 25 | } 26 | 27 | function check_kiwi { 28 | ensure_root 29 | kiwi=`command -v kiwi` 30 | if [ $? -ne 0 ]; then 31 | echo "Kiwi is required but not found on your system." 32 | echo "Run the following command to install kiwi:" 33 | echo 34 | echo " zypper install kiwi kiwi-tools kiwi-desc-vmxboot" 35 | echo 36 | exit 1 37 | fi 38 | } 39 | 40 | function warn { 41 | echo >&2 -e "$*" 42 | } 43 | 44 | function unclean_exit { 45 | local exit_code=$? 46 | 47 | warn "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" 48 | warn "\nWARNING: premature termination!" 49 | if df $TMP_DIR | egrep -q "^tmpfs"; then 50 | warn "\nLeaving $TMP_DIR mounted." 51 | warn "You must umount it yourself in order to free RAM." 52 | fi 53 | warn "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" 54 | 55 | exit $exit_code 56 | } 57 | 58 | function clean_up { 59 | echo >&2 -e "$*" 60 | 61 | # Only clean up once 62 | [ "$CLEAN" ] && return 63 | CLEAN=true 64 | 65 | echo "** Cleaning up" 66 | 67 | # Save the image and log files 68 | mkdir -p $OUTPUT_DIR 69 | mv $TMP_DIR/{build/image-root.log,*.qcow2,*.vmdk} $OUTPUT_DIR 2>/dev/null || true 70 | local USER=`stat -c %U .` 71 | local GROUP=`stat -c %G .` 72 | chown -R $USER.$GROUP $OUTPUT_DIR 73 | 74 | # Save initrd as boot image cache 75 | rsync -ql $TMP_DIR/initrd-* $BOOT_CACHE_DIR 2>/dev/null || true 76 | 77 | if df $TMP_DIR | egrep -q "^tmpfs"; then 78 | umount $TMP_DIR 79 | fi 80 | rm -rf $TMP_DIR 81 | } 82 | 83 | function create_tmpfs { 84 | mkdir -p $TMP_DIR 85 | 86 | local ram_required=$((TMPFS_SIZE)) 87 | local free_ram=`free -m | awk '/^-\/\+ buffers/{print $4}'` 88 | echo "** Free RAM: $free_ram MB; RAM required: $ram_required MB" 89 | if [ "$free_ram" -lt "$ram_required" ]; then 90 | echo "** tmpfs: Skipping, insufficient free RAM" 91 | return 92 | fi 93 | 94 | if df $TMP_DIR | egrep -q "^tmpfs"; then 95 | echo "** tmpfs: Reusing existing mount point" 96 | else 97 | echo "** tmpfs: Creating new volume ($TMPFS_SIZE MB)" 98 | mount -t tmpfs -o size=${TMPFS_SIZE}M,noatime tmpfs $TMP_DIR 99 | fi 100 | } 101 | 102 | function fill_config_xml_template () { 103 | # crowbar-prep.sh uses HOST_MIRROR for the same purpose, 104 | # so reuse that as a default if it's set. 105 | : ${MIRRORS:=${HOST_MIRROR:-/data/install/mirrors}} 106 | sed "s,@@MIRRORS@@,$MIRRORS," $here/source/config.xml.tmpl \ 107 | > $here/source/config.xml 108 | } 109 | 110 | function run_kiwi { 111 | if [ -z "$NO_TMPFS" ]; then 112 | create_tmpfs 113 | fi 114 | mkdir -p $BOOT_CACHE_DIR 115 | echo "** Running kiwi (with $BOOT_CACHE_DIR as boot image cache)" 116 | time $kiwi --build source/ -d $TMP_DIR --prebuiltbootimage $BOOT_CACHE_DIR "$@" 117 | echo "** Appliance created successfully!" 118 | } 119 | 120 | build_image () { 121 | check_kiwi 122 | trap unclean_exit SIGINT SIGTERM 123 | fill_config_xml_template 124 | run_kiwi "$@" 125 | clean_up 126 | } 127 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/README.md: -------------------------------------------------------------------------------- 1 | # KIWI appliance for Crowbar admin node 2 | 3 | **Please ensure that you have first read the 4 | [general information on KIWI](../README.md).** 5 | 6 | The KIWI appliance definition in this subdirectory is for building a 7 | Crowbar admin node on top of SLES12 SP2. Once provisioned, this node 8 | will be responsible for automatically provisioning the rest of the 9 | OpenStack cloud (in a highly available configuration, if requested by 10 | the cloud operator). 11 | 12 | ## Building the KIWI image 13 | 14 | First [ensure that you have KIWI installed](../README.md). 15 | 16 | ### Obtaining the required software 17 | 18 | There are two ways to build this appliance from scratch: 19 | 20 | 1. The slow (but more "complete" and fully supported) way, which requires the following: 21 | * [SUSE Linux Enterprise Server (SLES) 12 SP2 installation media](https://download.suse.com/Download?buildid=qb71v1YjQjI~) (you only need `SLES-12-SP2-DVD-x86_64-GM-DVD1.iso`; DVD2 is the source code) 22 | * [SUSE Linux Enterprise High Availability Extension (SLE HA) 12 SP2](https://download.suse.com/Download?buildid=HEl-kKOjqS0~) (again, you only need `SLE-12-SP2-HA-DVD-x86_64-GM-CD1.iso`) 23 | * [SUSE OpenStack Cloud 7 installation media](https://download.suse.com/Download?buildid=xY_5_IAEbxI~) (again, you only need SUSE-OPENSTACK-CLOUD-7-x86_64-GM-DVD1.iso) 24 | * Package repositories containing updates for each of the above, to obtain the latest bugfixes and enhancements. 25 | * Updates are available via subscriptions with a 60-day free evaluation; however all these products are Free Software, so of course you can still use them fully after 60 days - you just won't continue getting updates. 26 | * The easiest way to obtain the updates is probably via the Subscription Management Tool (SMT) ([more info on SMT here](https://www.suse.com/solutions/tools/smt.html)). 27 | * Here are the links for free 60-day evaluations of [SLES](https://www.suse.com/products/server/eval.html), [SUSE OpenStack Cloud](https://www.suse.com/products/suse-cloud/), and [SLE HA](https://www.suse.com/products/highavailability/eval.html). 28 | 29 | This way takes quite some time (and about 15GB of spare disk) to 30 | set up, because you need to first build an SMT environment, and 31 | then mirror all the packages (including updates) for SLES 12 SP3, 32 | SLE 12 HA SP2, SLE 12 SDK SP2, and SUSE OpenStack Cloud 7. 33 | 34 | 2. The quick way (which is currently only supported on a best effort 35 | basis) drastically reduces the number of dependencies by relying 36 | on: 37 | 38 | * a specially constructed `SUSE-OPENSTACK-CLOUD-7-DEPS` `.iso` 39 | which contains the minimal set of packages which SUSE OpenStack 40 | Cloud 7 requires from SLES 12 SP2 and SLE 12 HA SP2 including the 41 | latest updates, and 42 | * an `.iso` of the latest (unreleased) development build of 43 | SUSE OpenStack Cloud, which contains the latest updates. 44 | 45 | These are currently provided on demand only. 46 | 47 | Both ways also require: 48 | 49 | * [SLE 12 SDK SP2](https://download.suse.com/Download?buildid=g3e7P21X6Lw~) (although 50 | if you are willing to tolerate a slightly ugly `grub` boot menu then you can avoid 51 | this requirement by commenting out the SDK packages and repositories in 52 | [`source/config.xml.tmpl`](source/config.xml.tmpl)), and 53 | * [VirtualBox Guest Additions `.iso`](http://download.virtualbox.org/virtualbox/). 54 | Mount the `.iso` on the image-building host, and copy the 55 | `VBoxLinuxAdditions.run` file into `source/root/tmp` under this 56 | directory. 57 | 58 | ### Setting up the mountpoints 59 | 60 | The appliance [`config.xml` template](source/config.xml.tmpl) 61 | currently assumes certain mountpoints are set up on the system which 62 | will build the image. For the slow way: 63 | 64 | * `/mnt/sles-12-sp2`: SLES 12 SP2 installation media 65 | * `/mnt/suse-cloud-7`: SUSE OpenStack Cloud 7 installation media 66 | 67 | For the quick way: 68 | 69 | * `/mnt/sles-12-sp2`: the `SUSE-OPENSTACK-CLOUD-7-DEPS` `.iso` 70 | * `/mnt/suse-cloud-7`: the `.iso` of the latest development build of SUSE OpenStack Cloud 71 | * `/mnt/sle-12-sdk-sp2`: SLE 12 SDK SP2 installation media (although 72 | this can be omitted as per above. FIXME: this also currently requires 73 | editing the [`config.xml` template](source/config.xml.tmpl).) 74 | 75 | It also assumes that the update channels will have been mirrored to 76 | certain locations. For the slow way: 77 | 78 | * `/data/install/mirrors/SLE-12-SP2-SDK/sle-12-x86_64` 79 | * `/data/install/mirrors/SLE12-HA-SP2-Pool/sle-12-x86_64` 80 | * `/data/install/mirrors/SLE12-HA-SP2-Updates/sle-12-x86_64` 81 | * `/data/install/mirrors/SUSE-Cloud-7-Pool/sle-12-x86_64` 82 | * `/data/install/mirrors/SUSE-Cloud-7-Updates/sle-12-x86_64` 83 | 84 | For the quick way: 85 | 86 | * `/data/install/mirrors/SLE-12-SP2-SDK/sle-12-x86_64` 87 | 88 | (FIXME: this currently requires editing the 89 | [`config.xml` template](source/config.xml.tmpl).) 90 | 91 | You can optionally specify an alternate location to 92 | `/data/install/mirrors` by ading an extra `sudo` parameter before 93 | `./build-image.sh`., e.g. 94 | 95 | sudo MIRRORS='/srv/www/htdocs/repo/$RCE' ./build-image.sh 96 | 97 | might be a typical case if you are mirroring via SMT. 98 | 99 | Finally, if you want the appliance to contain the necessary media and 100 | repositories embedded under `/srv/tftpboot` (recommended, since this 101 | is required in order that the Crowbar admin node can serve packages to 102 | the other nodes), then you can bind-mount those repositories into the 103 | kiwi overlay filesystem by running the following script prior to 104 | building the KIWI image: 105 | 106 | sudo ./mount-repos.sh 107 | 108 | ### Building the image and cleaning up 109 | 110 | Now you can build the image by running: 111 | 112 | cd kiwi 113 | sudo KIWI_BUILD_TMP_DIR=/tmp/kiwi-build ./build-image.sh 114 | 115 | The resulting `.vmdk` image will be in the `image/` directory. The 116 | build log is there too on successful build. If something went wrong 117 | then everything is left in `/tmp/kiwi-build`, and you will need to 118 | clean that directory up in order to reclaim the disk space. 119 | 120 | You can `umount` the overlay bind-mounts as follows: 121 | 122 | sudo ./umount-repos.sh 123 | 124 | To speed up builds, the script automatically builds on a dedicated 125 | `tmpfs` filesystem (i.e. in RAM) if it detects sufficient memory. If 126 | the build succeeds it will automatically `umount` the RAM disk; 127 | however on any type of failure you will need to manually `umount` it 128 | in order to reclaim a huge chunk of RAM! You can disable use of 129 | `tmpfs` by including `NO_TMPFS=y` as an extra `sudo` parameter before 130 | `./build-image.sh`. 131 | 132 | **BEWARE!** There is 133 | [an obscure kernel bug](https://bugzilla.novell.com/show_bug.cgi?id=895204) 134 | which can cause processes to latch onto mounts created by `kiwi` 135 | within the chroot, preventing the chroot from being properly cleaned 136 | up until those processes are killed. See the bug for how to detect 137 | these guilty processes. If you are using `tmpfs`, this is 138 | particularly serious because the kernel will not free the RAM used by 139 | the filesystem until the processes are killed. **It is very easy to 140 | kill a system due to extreme low memory after a few kiwi builds if you 141 | do not work around this bug after each build.** 142 | 143 | The boot images are also automatically cached in 144 | `/var/cache/kiwi/bootimage` to speed up subsequent builds. You'll 145 | need to manually delete the files there to clear the cache, but 146 | there's usually no need for that. 147 | 148 | ## Building and installing the Vagrant box 149 | 150 | Once you have the `.vmdk` built, do: 151 | 152 | cd ../../vagrant/cloud-admin 153 | 154 | and follow the instructions in 155 | [the corresponding README](../../vagrant/cloud-admin/README.md). 156 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/build-image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | # Script for building SUSE OpenStack Cloud admin appliance 4 | # 5 | # See ../build-lib.sh for more info. 6 | 7 | here=$( dirname "$0" ) 8 | . $here/../build-lib.sh 9 | 10 | # Determine minimum RAM required for use of tmpfs 11 | if [ -e $here/source/root/srv/tftpboot/suse-11.3/install/content.key ] 12 | then 13 | # This is a guess, but we need a *lot* in this case. 14 | : ${TMPFS_SIZE:=13500} 15 | else 16 | cat <&2 17 | WARNING: It appears you do not have the installation media and repositories 18 | set up in your overlay filesystem. The image will be missing these. 19 | Press Enter to continue or Control-C to quit ... 20 | EOF 21 | read 22 | : ${TMPFS_SIZE:=8500} 23 | fi 24 | 25 | BOOT_CACHE_DIR=/var/cache/kiwi/bootimage 26 | OUTPUT_DIR=image 27 | TMP_DIR="${KIWI_BUILD_TMP_DIR:-build-tmp}" 28 | CLEAN= 29 | 30 | build_image "$@" 31 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/mount-repos.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script determines which repositories get embedded within 4 | # the admin node appliance. 5 | 6 | repos=( 7 | #SLES12-SP2-Pool # not needed since we have installation media 8 | #SLES12-SP2-Updates # embedded within SUSE-OPENSTACK-CLOUD-7-DEPS 9 | 10 | #SLE12-HA-SP2-{Pool,Updates} # embedded within SUSE-OPENSTACK-CLOUD-7-DEPS 11 | 12 | #SUSE-Cloud-7-Pool # not needed since we have installation media 13 | #SUSE-Cloud-7-Updates # not needed since we're using a Devel:Cloud:7:Staging .iso 14 | 15 | # Devel:Cloud:7 16 | # Devel:Cloud:7:Staging 17 | ) 18 | 19 | function bind_mount { 20 | src="$1" mnt="$2" 21 | if mount | grep -q " $mnt "; then 22 | echo "already mounted: $mnt" 23 | else 24 | mkdir -p "$mnt" 25 | if mount --bind "$src" "$mnt"; then 26 | echo "mount succeeded: $mnt" 27 | else 28 | echo >&2 "Failed to mount $src on $mnt" 29 | exit 1 30 | fi 31 | fi 32 | } 33 | 34 | function setup_overlay { 35 | : ${MIRROR_DIR:=/data/install/mirrors} 36 | here=$( cd `dirname "$0"`; pwd -P ) 37 | tftpboot=$here/source/root/srv/tftpboot 38 | bind_mount /mnt/suse-cloud-sle12-deps $tftpboot/suse-12.2/install 39 | bind_mount /mnt/suse-cloud-7 $tftpboot/suse-12.2/repos/Cloud 40 | for repo in "${repos[@]}"; do 41 | bind_mount $MIRROR_DIR/$repo $tftpboot/suse-12.2/repos/$repo 42 | done 43 | } 44 | 45 | setup_overlay 46 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/.gitignore: -------------------------------------------------------------------------------- 1 | root/srv/tftpboot/repos/ 2 | root/srv/tftpboot/suse-11.3/repos/ 3 | root/srv/tftpboot/suse-11.3/install/ 4 | root/tmp/VBoxLinuxAdditions.run 5 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/bootsplash.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE-Cloud/suse-cloud-appliances/ab2eb83db8687a6d535968ba4514ffffed7fb8ea/kiwi/cloud-admin/source/bootsplash.tar -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #================ 3 | # FILE : config.sh 4 | #---------------- 5 | # PROJECT : OpenSuSE KIWI Image System 6 | # COPYRIGHT : (c) 2006 SUSE LINUX Products GmbH. All rights reserved 7 | # : 8 | # AUTHOR : Marcus Schaefer 9 | # : 10 | # BELONGS TO : Operating System images 11 | # : 12 | # DESCRIPTION : configuration script for SUSE based 13 | # : operating systems 14 | # : 15 | # : 16 | # STATUS : BETA 17 | #---------------- 18 | #====================================== 19 | # Functions... 20 | #-------------------------------------- 21 | test -f /.kconfig && . /.kconfig 22 | test -f /.profile && . /.profile 23 | 24 | #====================================== 25 | # Greeting... 26 | #-------------------------------------- 27 | echo "Configure image: [$kiwi_iname]..." 28 | 29 | #====================================== 30 | # Mount system filesystems 31 | #-------------------------------------- 32 | baseMount 33 | 34 | #====================================== 35 | # Setup baseproduct link 36 | #-------------------------------------- 37 | suseSetupProduct 38 | 39 | #====================================== 40 | # Add missing gpg keys to rpm 41 | #-------------------------------------- 42 | suseImportBuildKey 43 | 44 | #====================================== 45 | # Activate services 46 | #-------------------------------------- 47 | baseInsertService sshd 48 | 49 | #====================================== 50 | # Setup default target, multi-user 51 | #-------------------------------------- 52 | baseSetRunlevel 3 53 | 54 | #====================================== 55 | # SuSEconfig 56 | #-------------------------------------- 57 | suseConfig 58 | 59 | 60 | #====================================== 61 | # Sysconfig Update 62 | #-------------------------------------- 63 | echo '** Update sysconfig entries...' 64 | baseUpdateSysConfig /etc/sysconfig/network/config FIREWALL no 65 | baseUpdateSysConfig /etc/sysconfig/console CONSOLE_FONT lat9w-16.psfu 66 | 67 | 68 | #====================================== 69 | # Custom changes for Cloud 70 | #-------------------------------------- 71 | USE_YAST_FIRSTBOOT=1 72 | 73 | if [ "$kiwi_type" == "oem" ]; then 74 | echo "** Customizing config for appliance..." 75 | else 76 | # nothing yet 77 | true 78 | fi 79 | 80 | if [ "$kiwi_type" == "iso" ]; then 81 | echo "** Customizing config for live image..." 82 | mv /etc/issue.live /etc/issue 83 | mv /etc/YaST2/control.xml.live /etc/YaST2/firstboot.xml 84 | else 85 | # remove live ISO files 86 | rm /etc/issue.live 87 | rm /etc/YaST2/control.xml.live 88 | fi 89 | 90 | if [ "$kiwi_type" == "vmx" -a -d /home/vagrant ]; then 91 | echo "** Customizing config for Vagrant..." 92 | mv /etc/issue.vagrant /etc/issue 93 | # one dhcp network interface for vagrant + one static network interface 94 | mv /etc/sysconfig/network/ifcfg-eth0.dhcp /etc/sysconfig/network/ifcfg-eth0 95 | mv /etc/sysconfig/network/ifcfg-eth0.static /etc/sysconfig/network/ifcfg-eth1 96 | # use firstboot service 97 | USE_YAST_FIRSTBOOT=0 98 | else 99 | # files required by vagrant 100 | rm /etc/sudoers.d/vagrant 101 | rm -r /home/vagrant/ 102 | # remove vagrant-specific files 103 | rm /etc/profile.d/EULA.sh 104 | rm /etc/issue.vagrant 105 | # static network config, one interface 106 | rm /etc/sysconfig/network/ifcfg-eth0.dhcp 107 | mv /etc/sysconfig/network/ifcfg-eth0.static /etc/sysconfig/network/ifcfg-eth0 108 | fi 109 | 110 | # Working around broken timezone support for SLE 12 in kiwi 111 | systemd-firstboot --timezone=UTC 112 | 113 | # This avoids annoyingly long timeouts on reverse DNS 114 | # lookups when connecting via ssh. 115 | sed -i 's/^#\?UseDNS.*/UseDNS no/' /etc/ssh/sshd_config 116 | 117 | # Default behaviour of less drives me nuts! 118 | sed -i 's/\(LESS="\)/\1-X /' /etc/profile 119 | 120 | cat <>/root/.bash_profile 121 | if [ -e /tmp/.crowbar-nodes-roles.cache ]; then 122 | source /tmp/.crowbar-nodes-roles.cache 123 | fi 124 | EOF 125 | 126 | if [ $USE_YAST_FIRSTBOOT -eq 1 ]; then 127 | echo "** Enabling YaST firstboot..." 128 | baseUpdateSysConfig /etc/sysconfig/firstboot FIRSTBOOT_WELCOME_DIR /etc/YaST2/firstboot/ 129 | baseUpdateSysConfig /etc/sysconfig/firstboot FIRSTBOOT_FINISH_FILE /etc/YaST2/firstboot/congratulate.txt 130 | touch /var/lib/YaST2/reconfig_system 131 | 132 | # Do not rely on yast2-firstboot to run our scripts as this create a 133 | # dependency loop for systemd (as firstboot is blocking other systemd 134 | # services), and makes it impossible for chef-solo to start services 135 | #chkconfig appliance-firstboot off 136 | echo "** Enabling firstboot service..." 137 | chkconfig appliance-firstboot on 138 | # prevent yast2 from calling the firstboot scripts 139 | baseUpdateSysConfig /etc/sysconfig/firstboot SCRIPT_DIR /usr/share/firstboot/scripts-no 140 | else 141 | echo "** Enabling firstboot service..." 142 | chkconfig appliance-firstboot on 143 | 144 | # remove yast firstboot files 145 | zypper --non-interactive rm yast2-firstboot 146 | rm /etc/YaST2/firstboot.xml 147 | rm -r /etc/YaST2/firstboot/ 148 | rm -r /usr/share/firstboot/licenses/ 149 | fi 150 | 151 | echo "** Setting up zypper repos..." 152 | # -K disables local caching of rpm files, since they are already local 153 | # to the VM (or at least to its host in the NFS / synced folders cases), 154 | # so caching would just unnecessarily bloat the VM. 155 | zypper --non-interactive ar -K -t yast2 file:///srv/tftpboot/suse-12.2/x86_64/install DEPS-ISO 156 | 157 | echo "** Patching Crowbar for appliance..." 158 | /patches/apply-patches 159 | rm -rf /patches 160 | 161 | # Scrap pointless 45 second tcpdump per interface 162 | sed -i 's/45/1/' /opt/dell/chef/cookbooks/ohai/files/default/plugins/crowbar.rb 163 | 164 | # Create the NFS export for shared storage for HA PostgreSQL and RabbitMQ 165 | mkdir -p /nfs/{postgresql,rabbitmq} 166 | echo '/nfs/postgresql <%= @admin_subnet %>/<%= @admin_netmask %>(rw,async,no_root_squash,no_subtree_check)' >> /opt/dell/chef/cookbooks/nfs-server/templates/default/exports.erb 167 | echo '/nfs/rabbitmq <%= @admin_subnet %>/<%= @admin_netmask %>(rw,async,no_root_squash,no_subtree_check)' >> /opt/dell/chef/cookbooks/nfs-server/templates/default/exports.erb 168 | 169 | # Create the directory for shared glance storage 170 | mkdir -p /var/lib/glance 171 | echo '/var/lib/glance <%= @admin_subnet %>/<%= @admin_netmask %>(rw,async,no_root_squash,no_subtree_check)' >> /opt/dell/chef/cookbooks/nfs-server/templates/default/exports.erb 172 | 173 | # Create the directory for cinder NFS storage 174 | mkdir -p /var/lib/cinder 175 | echo '/var/lib/cinder <%= @admin_subnet %>/<%= @admin_netmask %>(rw,async,no_root_squash,no_subtree_check)' >> /opt/dell/chef/cookbooks/nfs-server/templates/default/exports.erb 176 | 177 | # Create the directory for shared nova instances storage 178 | mkdir -p /var/lib/nova/instances 179 | echo '/var/lib/nova/instances <%= @admin_subnet %>/<%= @admin_netmask %>(rw,async,no_root_squash,no_subtree_check)' >> /opt/dell/chef/cookbooks/nfs-server/templates/default/exports.erb 180 | 181 | echo "** Enabling additional services..." 182 | # helps with gpg in VMs 183 | chkconfig haveged on 184 | # we want ntpd to start early, as it doesn't reply to ntpdate 5 minutes, 185 | # which can slow node discovery (new behavior happening because it 186 | # doesn't synchronize with any other servers, see bsc#954982) 187 | chkconfig ntpd on 188 | 189 | 190 | #====================================== 191 | # SSL Certificates Configuration 192 | #-------------------------------------- 193 | echo '** Rehashing SSL Certificates...' 194 | c_rehash 195 | 196 | 197 | #====================================== 198 | # Umount kernel filesystems 199 | #-------------------------------------- 200 | baseCleanMount 201 | 202 | #====================================== 203 | # Exit safely 204 | #-------------------------------------- 205 | exit 0 206 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/config.xml.tmpl: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | SUSE OpenStack Cloud Team 5 | cloud-devel@suse.de 6 | SUSE OpenStack Cloud 7 Admin Server 7 | 8 | 9 | 10 | 11 | 12 | true 13 | 512 14 | false 15 | false 16 | false 17 | SUSE OpenStack Cloud 7 Admin Server 18 | 19 | 20 | 23 | 34 | 0.0.1 35 | zypper 36 | false 37 | false 38 | studio 39 | studio 40 | UTC 41 | utc 42 | us.map.gz 43 | en_US 44 | 45 | 46 | 47 | 48 | 49 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 114 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/etc/YaST2/firstboot/congratulate.txt: -------------------------------------------------------------------------------- 1 |

2 | The first part of the setup of the SUSE OpenStack Cloud Admin Server has 3 | completed successfully! 4 |

5 |

6 | Complete the setup of the SUSE OpenStack Cloud Admin Server by connecting to 7 | the web interface at http://192.168.124.10/ (or the IP address you 8 | picked for the Admin Server in the admin network during the initial settings). 9 |

10 |

11 | If you changed the IP range of the admin network, then you will need to update 12 | the IP address of the Admin Server accordingly in /etc/hosts and 13 | /etc/sysconfig/network/ifcfg-eth0 as well as the default route in 14 | /etc/sysconfig/network/routes and the DNS server in 15 | /etc/resolv.conf. Reconfiguring the network interface will also be 16 | required; this can be achieved with ifdown eth0 && ifup eth0. 17 |

18 |

19 | Please visit us at https://www.suse.com/. 20 |

21 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/etc/YaST2/firstboot/welcome.txt: -------------------------------------------------------------------------------- 1 |

Welcome!

2 |

3 | There are a few more steps to take before the SUSE OpenStack Cloud Admin 4 | Server is ready to use. 5 |

6 |

7 | Please agree to the End User License Agreements, and then define the initial 8 | settings to use for the deployment. This guide will then end, and you will be 9 | able to complete the setup by connecting to the web interface of the SUSE 10 | OpenStack Cloud Admin Server. 11 |

12 |

13 | As part of the initial settings, it is important that the network 14 | configuration that you defined is correct: if not, you will need to start 15 | over. 16 | Please refer to the SUSE OpenStack Cloud Deployment Guide for more 17 | information. 18 | In case of doubt, you may execute again yast2 crowbar after this 19 | guide to double-check or change again the settings before proceeding further. 20 |

21 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/etc/YaST2/licenses/license.txt: -------------------------------------------------------------------------------- 1 | license-sles.txt -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/etc/hostname: -------------------------------------------------------------------------------- 1 | cloud7-admin.openstack.site 2 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/etc/hosts: -------------------------------------------------------------------------------- 1 | # 2 | # hosts This file describes a number of hostname-to-address 3 | # mappings for the TCP/IP subsystem. It is mostly 4 | # used at boot time, when no name servers are running. 5 | # On small systems, this file can be used instead of a 6 | # "named" name server. 7 | # Syntax: 8 | # 9 | # IP-Address Full-Qualified-Hostname Short-Hostname 10 | # 11 | 12 | 127.0.0.1 localhost 13 | 14 | # special IPv6 addresses 15 | ::1 localhost ipv6-localhost ipv6-loopback 16 | 17 | fe00::0 ipv6-localnet 18 | 19 | ff00::0 ipv6-mcastprefix 20 | ff02::1 ipv6-allnodes 21 | ff02::2 ipv6-allrouters 22 | ff02::3 ipv6-allhosts 23 | 24 | 192.168.124.10 cloud7-admin.openstack.site cloud7-admin 25 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/etc/issue: -------------------------------------------------------------------------------- 1 | 2 | Welcome to SUSE Linux Enterprise Server 12 SP2 (x86_64) - Kernel \r (\l). 3 | 4 | You may login as 'root' (default password: 'linux'). 5 | 6 | 7 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/etc/issue.live: -------------------------------------------------------------------------------- 1 | 2 | Welcome to SUSE Linux Enterprise Server 12 SP2 (x86_64) - Kernel \r (\l). 3 | 4 | You may login as 'root' (default password: 'linux'). 5 | 6 | If desired and not already done, this live image can be installed on the disk by 7 | calling 'yast2 live-installer'. 8 | 9 | 10 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/etc/issue.vagrant: -------------------------------------------------------------------------------- 1 | 2 | Welcome to SUSE Linux Enterprise Server 12 SP2 (x86_64) - Kernel \r (\l). 3 | 4 | You may login as 'root' (default password: 'vagrant'). 5 | 6 | 7 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/etc/motd: -------------------------------------------------------------------------------- 1 | ____ 2 | /@ ~-. 3 | \/ __ .- | SUSE OpenStack Cloud 7 Admin Server 4 | // // @ 5 | 6 | WARNING! This appliance contains unsupported code! 7 | Do NOT use this appliance for production deployments! 8 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/etc/profile.d/EULA.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | check_eula () { 4 | local LICENSE_FILE 5 | local answer 6 | 7 | if [ ! -e $HOME/.eula-accepted ]; then 8 | touch $HOME/.eula-accepted 9 | fi 10 | 11 | for LICENSE_FILE in /etc/YaST2/licenses/license-*.txt; do 12 | if grep -q "$LICENSE_FILE" $HOME/.eula-accepted; then 13 | continue 14 | fi 15 | 16 | # Code stolen from 17 | # https://github.com/SUSE/studio/blob/master/kiwi-job/templates/SLES11_SP3/root/etc/init.d/suse_studio_firstboot.in 18 | stty -nl ixon ignbrk -brkint 19 | 20 | if [ `uname -m` == "s390x" ]; then 21 | cat $LICENSE_FILE 22 | else 23 | less $LICENSE_FILE 2>/dev/null || more $LICENSE_FILE 2>/dev/null || cat $LICENSE_FILE 24 | fi 25 | 26 | answer= 27 | until [ "$answer" == "y" ] || [ "$answer" == "Y" ]; 28 | do 29 | echo -n "Do you accept the EULA? [y/n] " 30 | read -e answer 31 | if [ "$answer" == "n" ] || [ "$answer" == "N" ]; then 32 | exit 33 | fi 34 | done 35 | 36 | echo "$LICENSE_FILE" >> $HOME/.eula-accepted 37 | done 38 | } 39 | 40 | case "$-" in 41 | *i*) 42 | check_eula 43 | ;; 44 | esac 45 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/etc/resolv.conf: -------------------------------------------------------------------------------- 1 | nameserver 192.168.124.1 2 | nameserver 8.8.8.8 3 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/etc/sudoers.d/vagrant: -------------------------------------------------------------------------------- 1 | vagrant ALL=(ALL) NOPASSWD:ALL 2 | Defaults:vagrant !requiretty 3 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/etc/sysconfig/network/dhcp: -------------------------------------------------------------------------------- 1 | DHCLIENT_DEBUG="no" 2 | DHCLIENT_SET_HOSTNAME="yes" 3 | DHCLIENT_MODIFY_RESOLV_CONF="yes" 4 | DHCLIENT_SET_DEFAULT_ROUTE="yes" 5 | DHCLIENT_MODIFY_NTP_CONF="no" 6 | DHCLIENT_MODIFY_NIS_CONF="no" 7 | DHCLIENT_SET_DOMAINNAME="no" 8 | DHCLIENT_KEEP_SEARCHLIST="no" 9 | DHCLIENT_LEASE_TIME="" 10 | DHCLIENT_TIMEOUT="999999" 11 | DHCLIENT_REBOOT_TIMEOUT="" 12 | DHCLIENT_HOSTNAME_OPTION="AUTO" 13 | DHCLIENT_CLIENT_ID="" 14 | DHCLIENT_VENDOR_CLASS_ID="" 15 | DHCLIENT_RELEASE_BEFORE_QUIT="no" 16 | DHCLIENT_SCRIPT_EXE="" 17 | DHCLIENT_ADDITIONAL_OPTIONS="" 18 | DHCLIENT_SLEEP="0" 19 | DHCLIENT_WAIT_AT_BOOT="15" 20 | DHCLIENT_UDP_CHECKSUM="no" 21 | DHCLIENT_MODIFY_SMB_CONF="yes" 22 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/etc/sysconfig/network/routes: -------------------------------------------------------------------------------- 1 | default 192.168.124.1 - - 2 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/etc/systemd/system/appliance-firstboot.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Execute the first boot script 3 | Before=xdm.service getty@tty1.service 4 | After=network.service 5 | 6 | [Service] 7 | Type=oneshot 8 | #ExecStartPre=-/usr/bin/plymouth --hide-splash 9 | ExecStart=/usr/bin/appliance-firstboot 10 | #StandardInput=tty 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/home/vagrant/.ssh/authorized_keys: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key 2 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/opt/dell/crowbar_framework/config/repos-cloud.yml: -------------------------------------------------------------------------------- 1 | --- 2 | suse-12.2: 3 | x86_64: 4 | ptf: 5 | name: "PTF" 6 | required: "mandatory" 7 | features: ["os", "ha", "openstack", "ceph"] 8 | url: 9 | ask_on_error: false 10 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/patches/0001-Disable-calamari-role-in-Crowbar-for-appliance.patch: -------------------------------------------------------------------------------- 1 | From 89d179458c7f4d4a1f8430558c2b8f8263679ccf Mon Sep 17 00:00:00 2001 2 | From: Vincent Untz 3 | Date: Mon, 11 Apr 2016 21:26:06 +0200 4 | Subject: [PATCH] Disable calamari role in Crowbar for appliance 5 | 6 | The appliance doesn't ship calamari, due to conflicting django 7 | requirement with horizon. 8 | --- 9 | chef/data_bags/crowbar/template-ceph.json | 1 - 10 | crowbar_framework/app/models/ceph_service.rb | 2 +- 11 | crowbar_framework/app/views/barclamp/ceph/_edit_attributes.html.haml | 2 +- 12 | 3 files changed, 2 insertions(+), 3 deletions(-) 13 | 14 | diff --git a/chef/data_bags/crowbar/template-ceph.json b/chef/data_bags/crowbar/template-ceph.json 15 | index cf66b46..2f71f2a 100644 16 | --- a/chef/data_bags/crowbar/template-ceph.json 17 | +++ b/chef/data_bags/crowbar/template-ceph.json 18 | @@ -63,7 +63,6 @@ 19 | }, 20 | "elements": {}, 21 | "element_order": [ 22 | - [ "ceph-calamari" ], 23 | [ "ceph-mon" ], 24 | [ "ceph-osd" ], 25 | [ "ceph-radosgw" ], 26 | diff --git a/crowbar_framework/app/models/ceph_service.rb b/crowbar_framework/app/models/ceph_service.rb 27 | index 8d447fe..06079d7 100644 28 | --- a/crowbar_framework/app/models/ceph_service.rb 29 | +++ b/crowbar_framework/app/models/ceph_service.rb 30 | @@ -144,7 +144,7 @@ class CephService < PacemakerServiceObject 31 | calamari_node = calamari_nodes.first 32 | 33 | base["deployment"]["ceph"]["elements"] = { 34 | - "ceph-calamari" => calamari_node.nil? ? [] : [calamari_node.name], 35 | + #"ceph-calamari" => calamari_node.nil? ? [] : [calamari_node.name], 36 | "ceph-mon" => mon_nodes.map { |x| x.name }, 37 | "ceph-osd" => osd_nodes.map { |x| x.name }, 38 | "ceph-mds" => mds_node.nil? ? [] : [mds_node.name], 39 | diff --git a/crowbar_framework/app/views/barclamp/ceph/_edit_attributes.html.haml b/crowbar_framework/app/views/barclamp/ceph/_edit_attributes.html.haml 40 | index bedde22..282c7f0 100644 41 | --- a/crowbar_framework/app/views/barclamp/ceph/_edit_attributes.html.haml 42 | +++ b/crowbar_framework/app/views/barclamp/ceph/_edit_attributes.html.haml 43 | @@ -23,7 +23,7 @@ 44 | = string_field %w(radosgw ssl keyfile) 45 | = boolean_field %w(radosgw ssl insecure) 46 | 47 | - %fieldset 48 | + -#%fieldset 49 | %legend 50 | = t(".calamari.header") 51 | 52 | -- 53 | 2.10.1 54 | 55 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/patches/0001-crowbar_register-Add-entry-to-etc-hosts-for-resolvin.patch: -------------------------------------------------------------------------------- 1 | From ae8b597c104af7c474ff87a5388c98ad1b2cae9e Mon Sep 17 00:00:00 2001 2 | From: Vincent Untz 3 | Date: Fri, 18 Nov 2016 10:59:10 +0100 4 | Subject: [PATCH] crowbar_register: Add entry to /etc/hosts for resolving our 5 | hostname 6 | 7 | This helps make sure that, in case the DNS server is down, we can still 8 | resolve our hostname. We already do that in the autoyast profile, but we 9 | forgot to add this to crowbar_register. 10 | 11 | See https://github.com/crowbar/barclamp-provisioner/pull/344 12 | --- 13 | .../cookbooks/provisioner/templates/suse/crowbar_register.erb | 11 +++++++++++ 14 | 1 file changed, 11 insertions(+) 15 | 16 | diff --git a/chef/cookbooks/provisioner/templates/suse/crowbar_register.erb b/chef/cookbooks/provisioner/templates/suse/crowbar_register.erb 17 | index 5ffbc92..43dc23d 100644 18 | --- a/chef/cookbooks/provisioner/templates/suse/crowbar_register.erb 19 | +++ b/chef/cookbooks/provisioner/templates/suse/crowbar_register.erb 20 | @@ -466,4 +466,15 @@ post_state $HOSTNAME "installed" 21 | # Wait for DHCP to update 22 | sleep 30 23 | 24 | +# Make sure we can always resolve our hostname; we use DHCP to find what's our 25 | +# admin IP 26 | +DHCP_VARS=$(mktemp) 27 | +/usr/lib/wicked/bin/wickedd-dhcp4 --test --test-output $DHCP_VARS $BOOTDEV 28 | +if test $? -eq 0; then 29 | + eval $(grep ^IPADDR= "$DHCP_VARS") 30 | + ADMIN_IP=${IPADDR%%/*} 31 | + echo "$ADMIN_IP $HOSTNAME ${HOSTNAME%%.*}" >> /etc/hosts 32 | +fi 33 | +rm -f "$DHCP_VARS" 34 | + 35 | /usr/sbin/crowbar_join --setup --debug --verbose 36 | -- 37 | 2.10.1 38 | 39 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/patches/apply-patches: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | cd $(dirname $0) 4 | patch -p1 -d /opt/dell < 0001-Disable-calamari-role-in-Crowbar-for-appliance.patch 5 | patch -p1 -d /opt/dell < 0001-crowbar_register-Add-entry-to-etc-hosts-for-resolvin.patch 6 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/root/DRBD.yaml: -------------------------------------------------------------------------------- 1 | # Input file for 'crowbar batch build' command which sets up a cloud 2 | # with a single 2-node HA cluster using DRBD and manually configured 3 | # STONITH (can be SBD, for instance), and a single KVM compute node. 4 | # 5 | # If nodes are libvirt VMs, it is possible to configure STONITH with 6 | # libvirt (hence no need for manual configuration): it's simply a matter 7 | # of uncommenting a few lines in the pacemaker proposal below. 8 | # 9 | # The 192.168.124.10 IP address must be replaced with the IP address of 10 | # the admin server in case it got changed. 11 | --- 12 | global_options: 13 | - action_for_existing_proposals: skip # could also be e.g. 'recreate' or 'overwrite' 14 | proposals: 15 | - barclamp: nfs_client 16 | name: controller 17 | attributes: 18 | exports: 19 | glance: 20 | nfs_server: 192.168.124.10 21 | export: "/var/lib/glance" 22 | mount_path: "/var/lib/glance" 23 | mount_options: 24 | - '' 25 | deployment: 26 | elements: 27 | nfs-client: 28 | - "@@controller1@@" 29 | - "@@controller2@@" 30 | - barclamp: pacemaker 31 | name: controller 32 | action_if_exists: overwrite 33 | attributes: 34 | stonith: 35 | mode: manual 36 | # If libvirt can be used for STONITH, comment the line above, and 37 | # uncomment the next three lines. 38 | # mode: libvirt 39 | # libvirt: 40 | # hypervisor_ip: 192.168.124.1 41 | drbd: 42 | enabled: true 43 | deployment: 44 | elements: 45 | hawk-server: 46 | - "@@controller1@@" 47 | - "@@controller2@@" 48 | pacemaker-cluster-member: 49 | - "@@controller1@@" 50 | - "@@controller2@@" 51 | - barclamp: database 52 | # Proposal name defaults to 'default'. 53 | # Default attributes are good enough, so we just need to assign 54 | # nodes to roles: 55 | attributes: 56 | ha: 57 | storage: 58 | mode: drbd 59 | drbd: 60 | size: 1 61 | deployment: 62 | elements: 63 | database-server: 64 | - cluster:controller 65 | - barclamp: rabbitmq 66 | attributes: 67 | ha: 68 | storage: 69 | mode: drbd 70 | drbd: 71 | size: 1 72 | deployment: 73 | elements: 74 | rabbitmq-server: 75 | - cluster:controller 76 | - barclamp: keystone 77 | deployment: 78 | elements: 79 | keystone-server: 80 | - cluster:controller 81 | - barclamp: glance 82 | deployment: 83 | elements: 84 | glance-server: 85 | - cluster:controller 86 | - barclamp: cinder 87 | wipe_attributes: 88 | - volumes 89 | attributes: 90 | volumes: 91 | - backend_name: local 92 | backend_driver: local 93 | local: 94 | file_size: 200 95 | volume_name: cinder-volumes 96 | file_name: /var/lib/cinder/volume.raw 97 | deployment: 98 | elements: 99 | cinder-controller: 100 | - cluster:controller 101 | cinder-volume: 102 | - "@@compute1@@" 103 | - barclamp: neutron 104 | deployment: 105 | elements: 106 | neutron-server: 107 | - cluster:controller 108 | neutron-network: 109 | - cluster:controller 110 | - barclamp: nova 111 | attributes: 112 | kvm: 113 | ksm_enabled: true 114 | deployment: 115 | elements: 116 | nova-controller: 117 | - cluster:controller 118 | nova-compute-kvm: 119 | - "@@compute1@@" 120 | - "@@compute2@@" 121 | - barclamp: horizon 122 | deployment: 123 | elements: 124 | horizon-server: 125 | - cluster:controller 126 | - barclamp: heat 127 | deployment: 128 | elements: 129 | heat-server: 130 | - cluster:controller 131 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/root/NFS.yaml: -------------------------------------------------------------------------------- 1 | # Input file for 'crowbar batch build' command which sets up a cloud 2 | # with a single 2-node HA cluster using NFS and manually configured 3 | # STONITH (can be SBD, for instance), and a single KVM compute node. 4 | # 5 | # If nodes are libvirt VMs, it is possible to configure STONITH with 6 | # libvirt (hence no need for manual configuration): it's simply a matter 7 | # of uncommenting a few lines in the pacemaker proposal below. 8 | # 9 | # The 192.168.124.10 IP address must be replaced with the IP address of 10 | # the admin server in case it got changed. 11 | --- 12 | global_options: 13 | - action_for_existing_proposals: skip # could also be e.g. 'recreate' or 'overwrite' 14 | proposals: 15 | - barclamp: nfs_client 16 | name: controller 17 | attributes: 18 | exports: 19 | glance: 20 | nfs_server: 192.168.124.10 21 | export: "/var/lib/glance" 22 | mount_path: "/var/lib/glance" 23 | mount_options: 24 | - '' 25 | deployment: 26 | elements: 27 | nfs-client: 28 | - "@@controller1@@" 29 | - "@@controller2@@" 30 | - barclamp: pacemaker 31 | name: controller 32 | action_if_exists: overwrite 33 | attributes: 34 | stonith: 35 | mode: manual 36 | # If libvirt can be used for STONITH, comment the line above, and 37 | # uncomment the next three lines. 38 | # mode: libvirt 39 | # libvirt: 40 | # hypervisor_ip: 192.168.124.1 41 | drbd: 42 | enabled: false 43 | deployment: 44 | elements: 45 | hawk-server: 46 | - "@@controller1@@" 47 | - "@@controller2@@" 48 | pacemaker-cluster-member: 49 | - "@@controller1@@" 50 | - "@@controller2@@" 51 | - barclamp: database 52 | # Proposal name defaults to 'default'. 53 | # Default attributes are good enough, so we just need to assign 54 | # nodes to roles: 55 | attributes: 56 | ha: 57 | storage: 58 | mode: shared 59 | shared: 60 | fstype: nfs 61 | options: rw 62 | device: 192.168.124.10:/nfs/postgresql 63 | drbd: 64 | size: 1 65 | deployment: 66 | elements: 67 | database-server: 68 | - cluster:controller 69 | - barclamp: rabbitmq 70 | attributes: 71 | ha: 72 | storage: 73 | mode: shared 74 | shared: 75 | fstype: nfs 76 | options: rw 77 | device: 192.168.124.10:/nfs/rabbitmq 78 | drbd: 79 | size: 1 80 | deployment: 81 | elements: 82 | rabbitmq-server: 83 | - cluster:controller 84 | - barclamp: keystone 85 | deployment: 86 | elements: 87 | keystone-server: 88 | - cluster:controller 89 | - barclamp: glance 90 | deployment: 91 | elements: 92 | glance-server: 93 | - cluster:controller 94 | - barclamp: cinder 95 | wipe_attributes: 96 | - volumes 97 | attributes: 98 | volumes: 99 | - backend_name: local 100 | backend_driver: local 101 | local: 102 | file_size: 200 103 | volume_name: cinder-volumes 104 | file_name: /var/lib/cinder/volume.raw 105 | deployment: 106 | elements: 107 | cinder-controller: 108 | - cluster:controller 109 | cinder-volume: 110 | - "@@compute1@@" 111 | - barclamp: neutron 112 | deployment: 113 | elements: 114 | neutron-server: 115 | - cluster:controller 116 | neutron-network: 117 | - cluster:controller 118 | - barclamp: nova 119 | attributes: 120 | kvm: 121 | ksm_enabled: true 122 | deployment: 123 | elements: 124 | nova-controller: 125 | - cluster:controller 126 | nova-compute-kvm: 127 | - "@@compute1@@" 128 | - "@@compute2@@" 129 | - barclamp: horizon 130 | deployment: 131 | elements: 132 | horizon-server: 133 | - cluster:controller 134 | - barclamp: heat 135 | deployment: 136 | elements: 137 | heat-server: 138 | - cluster:controller 139 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/root/bin/node-sh-vars: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script was written for any Crowbar hacker who gets fed up of 4 | # copying and pasting Chef node and role names when using knife! 5 | # It outputs some shell code setting easy-to-type variables for the 6 | # nodes and roles in the Crowbar cluster, where the variables being set 7 | # are an abbreviated version of the aliases, with an 'r' suffix for the 8 | # corresponding roles. For example, if you have nodes aliased 'node1', 9 | # 'node2', and 'node3', you'll get something like: 10 | # 11 | # n1=d52-54-00-b5-4c-c6.cloud.site # node name for node1 12 | # n2=d52-54-00-e5-6b-a0.cloud.site # node name for node2 13 | # n3=d52-54-00-03-fb-01.cloud.site # node name for node3 14 | # n1r=crowbar-d52-54-00-b5-4c-c6_cloud_site # role name for node1 15 | # n2r=crowbar-d52-54-00-e5-6b-a0_cloud_site # role name for node2 16 | # n3r=crowbar-d52-54-00-03-fb-01_cloud_site # role name for node3 17 | # 18 | # Recommended installation: 19 | # 20 | # Copy to your admin node somewhere in its $PATH 21 | # 22 | # Example usage from admin node: 23 | # 24 | # eval "$( /path/to/this/script )" 25 | # knife node show $n1 26 | # knife role show $n1r 27 | # 28 | # Example usage from outside admin node: 29 | # 30 | # eval "$( ssh root@192.168.124.10 /path/to/this/script )" 31 | # ssh root@192.168.124.10 knife node show $n1 32 | # ssh root@192.168.124.10 knife role show $n1r 33 | # 34 | # And of course you can cache the result, to quickly reuse across multiple shells: 35 | # 36 | # ssh root@192.168.124.10 /path/to/this/script > /tmp/.crowbar-nodes-roles.cache 37 | # source /tmp/.crowbar-nodes-roles.cache 38 | 39 | for machine in $( crowbar machines list ); do 40 | [ "$machine" = $( hostname -f ) ] && continue 41 | alias=$( crowbar machines show "$machine" | sed -n '/^ *"alias": "\(.\+\)",\?/{s//\1/;p}' ) 42 | [ -z "$alias" ] && continue 43 | short="${alias/node/n}" 44 | short="${short//-/}" 45 | echo "$short=$machine # node name for $alias" 46 | role="crowbar-${machine//./_}" 47 | echo "${short}r=$role # role name for $alias" 48 | done | \ 49 | sort -k3 -k1 | \ 50 | column -t | \ 51 | sed 's/\([^ ]\) \([^ #]\)/\1 \2/g' 52 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/root/bin/setup-node-aliases.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script assigns aliases for a 4-node deployment with a 2-node 4 | # cluster for HA on the controller. This is a typical setup for demos, 5 | # hence its presence here. 6 | 7 | nodes=( $( knife node list ) ) 8 | aliases=( 9 | admin # DUMMY, only used for alignment 10 | controller1 11 | controller2 12 | compute1 13 | compute2 14 | ) 15 | 16 | error_count=0 17 | for (( i=1; i < ${#nodes[@]}; i++ )); do 18 | node="${nodes[$i]}" 19 | alias="${aliases[$i]}" 20 | echo "Setting alias $alias for $node ... " 21 | if ! crowbar machines rename $node $alias; then 22 | error_count=$(( error_count + 1 )) 23 | fi 24 | done 25 | 26 | exit $error_count 27 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/root/simple-cloud.yaml: -------------------------------------------------------------------------------- 1 | # Input file for 'crowbar batch build' command which sets up a cloud 2 | # with a single controller and a single KVM compute node. 3 | --- 4 | global_options: 5 | - action_for_existing_proposals: skip # could also be e.g. 'recreate' or 'overwrite' 6 | proposals: 7 | - barclamp: provisioner 8 | attributes: 9 | shell_prompt: USER@ALIAS:CWD SUFFIX 10 | - barclamp: database 11 | # Proposal name defaults to 'default'. 12 | # Default attributes are good enough, so we just need to assign 13 | # nodes to roles: 14 | deployment: 15 | elements: 16 | database-server: 17 | - "@@controller1@@" 18 | - barclamp: rabbitmq 19 | deployment: 20 | elements: 21 | rabbitmq-server: 22 | - "@@controller1@@" 23 | - barclamp: keystone 24 | deployment: 25 | elements: 26 | keystone-server: 27 | - "@@controller1@@" 28 | - barclamp: glance 29 | deployment: 30 | elements: 31 | glance-server: 32 | - "@@controller1@@" 33 | - barclamp: cinder 34 | wipe_attributes: 35 | - volumes 36 | attributes: 37 | volumes: 38 | - backend_name: local 39 | backend_driver: local 40 | local: 41 | file_size: 2000 42 | volume_name: cinder-volumes 43 | file_name: /var/lib/cinder/volume.raw 44 | deployment: 45 | elements: 46 | cinder-controller: 47 | - "@@controller1@@" 48 | cinder-volume: 49 | - "@@compute1@@" 50 | - barclamp: neutron 51 | deployment: 52 | elements: 53 | neutron-server: 54 | - "@@controller1@@" 55 | neutron-network: 56 | - "@@controller1@@" 57 | - barclamp: nova 58 | attributes: 59 | kvm: 60 | ksm_enabled: true 61 | deployment: 62 | elements: 63 | nova-controller: 64 | - "@@controller1@@" 65 | nova-compute-kvm: 66 | - "@@compute1@@" 67 | - barclamp: horizon 68 | deployment: 69 | elements: 70 | horizon-server: 71 | - "@@controller1@@" 72 | - barclamp: heat 73 | deployment: 74 | elements: 75 | heat-server: 76 | - "@@controller1@@" 77 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/usr/bin/appliance-firstboot: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is intended to run only on first boot so it disables itself after 4 | # running. 5 | 6 | for SCRIPT in /usr/share/firstboot/scripts/*; do 7 | echo "Running $SCRIPT..." 8 | $SCRIPT 9 | done 10 | 11 | # disable itself, as we only need this to run on first boot 12 | systemctl disable appliance-firstboot 13 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/usr/lib/firstboot/wait-for-crowbar-init: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function get_status_code 4 | { 5 | curl -w '%{http_code}' -o /dev/null -s --max-time 10 '-HAccept: application/vnd.crowbar.v2.0+json' http://localhost:4567/api/status 6 | } 7 | 8 | max_tries=30 9 | tries=1 10 | while [ $tries -le $max_tries ]; do 11 | code=$(get_status_code) 12 | # we get 000 until the server replies 13 | if [ "x$code" != "x000" ]; then 14 | exit 0 15 | fi 16 | tries=$(($tries + 1)) 17 | sleep 1 18 | done 19 | 20 | >&2 echo "Cannot verify that crowbar-init got started..." 21 | exit 1 22 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/usr/share/YaST2/clients/firstboot_license1.ycp: -------------------------------------------------------------------------------- 1 | /* 2 | * 3 | * Module: firstboot_timezone.ycp 4 | * 5 | * Author: Ladislav Slezak 6 | * 7 | * Submodules: 8 | * 9 | * 10 | * Purpose: display a license in running system 11 | * 12 | * 13 | * 14 | * $Id:$ 15 | */ 16 | 17 | { 18 | textdomain "firstboot"; 19 | 20 | import "Misc"; 21 | import "GetInstArgs"; 22 | 23 | 24 | any result = nil; 25 | 26 | map args = GetInstArgs::argmap(); 27 | args["directory"] = "/usr/share/firstboot/licenses/1"; 28 | args["action"] = Misc::SysconfigRead(.sysconfig.firstboot.LICENSE_REFUSAL_ACTION, "abort"); 29 | 30 | y2milestone("inst_license options: %1", args); 31 | 32 | // currently the pattern is hardcoded in ProductLicense::AskLicenseAgreement() function to license.txt and license_%1.txt 33 | // args["patterns"] = [Misc::SysconfigRead(.sysconfig.firstboot.directory, "eula.txt")]; 34 | 35 | result = WFM::CallFunction( "inst_license", [ args ] ); 36 | 37 | if (result == `halt) 38 | { 39 | UI::CloseDialog(); 40 | y2milestone("Halting the system..."); 41 | SCR::Execute(.target.bash, "/sbin/halt"); 42 | } 43 | 44 | return result; 45 | 46 | } 47 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/usr/share/YaST2/clients/firstboot_license2.ycp: -------------------------------------------------------------------------------- 1 | /* 2 | * 3 | * Module: firstboot_timezone.ycp 4 | * 5 | * Author: Ladislav Slezak 6 | * 7 | * Submodules: 8 | * 9 | * 10 | * Purpose: display a license in running system 11 | * 12 | * 13 | * 14 | * $Id:$ 15 | */ 16 | 17 | { 18 | textdomain "firstboot"; 19 | 20 | import "Misc"; 21 | import "GetInstArgs"; 22 | 23 | 24 | any result = nil; 25 | 26 | map args = GetInstArgs::argmap(); 27 | args["directory"] = "/usr/share/firstboot/licenses/2"; 28 | args["action"] = Misc::SysconfigRead(.sysconfig.firstboot.LICENSE_REFUSAL_ACTION, "abort"); 29 | 30 | y2milestone("inst_license options: %1", args); 31 | 32 | // currently the pattern is hardcoded in ProductLicense::AskLicenseAgreement() function to license.txt and license_%1.txt 33 | // args["patterns"] = [Misc::SysconfigRead(.sysconfig.firstboot.directory, "eula.txt")]; 34 | 35 | result = WFM::CallFunction( "inst_license", [ args ] ); 36 | 37 | if (result == `halt) 38 | { 39 | UI::CloseDialog(); 40 | y2milestone("Halting the system..."); 41 | SCR::Execute(.target.bash, "/sbin/halt"); 42 | } 43 | 44 | return result; 45 | 46 | } 47 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/usr/share/YaST2/clients/firstboot_license3.ycp: -------------------------------------------------------------------------------- 1 | /* 2 | * 3 | * Module: firstboot_timezone.ycp 4 | * 5 | * Author: Ladislav Slezak 6 | * 7 | * Submodules: 8 | * 9 | * 10 | * Purpose: display a license in running system 11 | * 12 | * 13 | * 14 | * $Id:$ 15 | */ 16 | 17 | { 18 | textdomain "firstboot"; 19 | 20 | import "Misc"; 21 | import "GetInstArgs"; 22 | 23 | 24 | any result = nil; 25 | 26 | map args = GetInstArgs::argmap(); 27 | args["directory"] = "/usr/share/firstboot/licenses/3"; 28 | args["action"] = Misc::SysconfigRead(.sysconfig.firstboot.LICENSE_REFUSAL_ACTION, "abort"); 29 | 30 | y2milestone("inst_license options: %1", args); 31 | 32 | // currently the pattern is hardcoded in ProductLicense::AskLicenseAgreement() function to license.txt and license_%1.txt 33 | // args["patterns"] = [Misc::SysconfigRead(.sysconfig.firstboot.directory, "eula.txt")]; 34 | 35 | result = WFM::CallFunction( "inst_license", [ args ] ); 36 | 37 | if (result == `halt) 38 | { 39 | UI::CloseDialog(); 40 | y2milestone("Halting the system..."); 41 | SCR::Execute(.target.bash, "/sbin/halt"); 42 | } 43 | 44 | return result; 45 | 46 | } 47 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/usr/share/YaST2/clients/firstboot_license4.ycp: -------------------------------------------------------------------------------- 1 | /* 2 | * 3 | * Module: firstboot_timezone.ycp 4 | * 5 | * Author: Ladislav Slezak 6 | * 7 | * Submodules: 8 | * 9 | * 10 | * Purpose: display a license in running system 11 | * 12 | * 13 | * 14 | * $Id:$ 15 | */ 16 | 17 | { 18 | textdomain "firstboot"; 19 | 20 | import "Misc"; 21 | import "GetInstArgs"; 22 | 23 | 24 | any result = nil; 25 | 26 | map args = GetInstArgs::argmap(); 27 | args["directory"] = "/usr/share/firstboot/licenses/4"; 28 | args["action"] = Misc::SysconfigRead(.sysconfig.firstboot.LICENSE_REFUSAL_ACTION, "abort"); 29 | 30 | y2milestone("inst_license options: %1", args); 31 | 32 | // currently the pattern is hardcoded in ProductLicense::AskLicenseAgreement() function to license.txt and license_%1.txt 33 | // args["patterns"] = [Misc::SysconfigRead(.sysconfig.firstboot.directory, "eula.txt")]; 34 | 35 | result = WFM::CallFunction( "inst_license", [ args ] ); 36 | 37 | if (result == `halt) 38 | { 39 | UI::CloseDialog(); 40 | y2milestone("Halting the system..."); 41 | SCR::Execute(.target.bash, "/sbin/halt"); 42 | } 43 | 44 | return result; 45 | 46 | } 47 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/source/root/usr/share/firstboot/scripts/cloud-appliance.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # install man, as it's convenient -- not done when building the appliance to 4 | # keep it smaller; but skipping if we're on livecd to not abuse disk space 5 | if [ ! -d /livecd ]; then 6 | zypper --non-interactive install man 7 | fi 8 | 9 | # automatically do the inital setup; we don't want to connect to an external 10 | # database with the appliance 11 | systemctl start apache2 12 | systemctl start crowbar-init 13 | /usr/lib/firstboot/wait-for-crowbar-init 14 | crowbarctl database create 15 | -------------------------------------------------------------------------------- /kiwi/cloud-admin/umount-repos.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mount | awk '/source\/root\/srv\/tftpboot/ {print $3}' | xargs -r sudo umount 4 | -------------------------------------------------------------------------------- /kiwi/sles12-sp2/README.md: -------------------------------------------------------------------------------- 1 | # KIWI appliance for Crowbar client nodes 2 | 3 | **Please ensure that you have first read the 4 | [general information on KIWI](../README.md).** 5 | 6 | The KIWI appliance definition in this subdirectory is for building a a 7 | simple SLES12 SP2 JeOS image which will form the basis for the cloud 8 | controller node(s), compute node(s), and storage node(s). Once each 9 | of these nodes boots up, it will register against the Crowbar admin 10 | node, and subsequently execute further provisioning steps as 11 | instructed by the admin node. 12 | 13 | ## Building the KIWI image 14 | 15 | First [ensure that you have KIWI installed](../README.md). 16 | 17 | ### Obtaining the required software 18 | 19 | Building this appliance from scratch requires the following: 20 | 21 | * [SUSE Linux Enterprise Server (SLES) 12 SP2 installation media](https://download.suse.com/Download?buildid=qb71v1YjQjI~) (you only need `SLES-12-SP2-DVD-x86_64-GM-DVD1.iso`; DVD2 is the source code) 22 | * [VirtualBox Guest Additions `.iso`](http://download.virtualbox.org/virtualbox/). Mount the `.iso` on the image-building host, and copy the `VBoxLinuxAdditions.run` file into `source/root/tmp` under this directory. 23 | 24 | ### Setting up the mountpoints 25 | 26 | The appliance config currently assumes the following mountpoint is 27 | set up on the system which will build the image: 28 | 29 | * `/mnt/sles-12-sp2`: SLES12 SP2 installation media 30 | 31 | It also assumes that the SDK channel will have been mirrored to 32 | the following location: 33 | 34 | * `/data/install/mirrors/SLE-12-SP2-SDK/sle-12-x86_64` 35 | 36 | You can optionally specify an alternate location to 37 | `/data/install/mirrors` by ading an extra `sudo` parameter before 38 | `./build-image.sh`., e.g. 39 | 40 | sudo MIRRORS='/srv/www/htdocs/repo/$RCE' ./build-image.sh 41 | 42 | might be a typical case if you are mirroring via SMT. 43 | 44 | ### Building the image and cleaning up 45 | 46 | Now you can build the image by running: 47 | 48 | cd kiwi 49 | sudo KIWI_BUILD_TMP_DIR=/tmp/kiwi-build ./build-image.sh 50 | 51 | The resulting `.vmdk` image will be in the `image/` directory. The 52 | build log is there too on successful build. If something went wrong 53 | then everything is left in `/tmp/kiwi-build`, and you will need to 54 | clean that directory up in order to reclaim the disk space. 55 | 56 | To speed up builds, the script automatically builds on a dedicated 57 | `tmpfs` filesystem (i.e. in RAM) if it detects sufficient memory. If 58 | the build succeeds it will automatically `umount` the RAM disk; 59 | however on any type of failure you will need to manually `umount` it 60 | in order to reclaim a huge chunk of RAM! You can disable use of 61 | `tmpfs` by including `NO_TMPFS=y` as an extra `sudo` parameter before 62 | `./build-image.sh`. 63 | 64 | **BEWARE!** There is 65 | [an obscure kernel bug](https://bugzilla.novell.com/show_bug.cgi?id=895204) 66 | which can cause processes to latch onto mounts created by `kiwi` 67 | within the chroot, preventing the chroot from being properly cleaned 68 | up until those processes are killed. See the bug for how to detect 69 | these guilty processes. If you are using `tmpfs`, this is 70 | particularly serious because the kernel will not free the RAM used by 71 | the filesystem until the processes are killed. **It is very easy to 72 | kill a system due to extreme low memory after a few kiwi builds if you 73 | do not work around this bug after each build.** 74 | 75 | The boot images are also automatically cached in 76 | `/var/cache/kiwi/bootimage` to speed up subsequent builds. You'll need 77 | to manually delete the files there to clear the cache, but there's 78 | usually no need for that. 79 | 80 | ## Building and installing the Vagrant box 81 | 82 | Once you have the `.vmdk` built, do: 83 | 84 | cd ../../vagrant/sles12-sp2 85 | 86 | and follow the instructions in 87 | [the corresponding README](../../vagrant/sles12-sp2/README.md). 88 | -------------------------------------------------------------------------------- /kiwi/sles12-sp2/build-image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | # Script for building SUSE OpenStack Cloud admin appliance 4 | # 5 | # See ../build-lib.sh for more info. 6 | 7 | here=$( dirname "$0" ) 8 | . $here/../build-lib.sh 9 | 10 | : ${TMPFS_SIZE:=16500} 11 | 12 | BOOT_CACHE_DIR=/var/cache/kiwi/bootimage 13 | OUTPUT_DIR=image 14 | TMP_DIR="${KIWI_BUILD_TMP_DIR:-build-tmp}" 15 | CLEAN= 16 | 17 | build_image "$@" 18 | -------------------------------------------------------------------------------- /kiwi/sles12-sp2/source/.gitignore: -------------------------------------------------------------------------------- 1 | root/tmp/VBoxLinuxAdditions.run 2 | -------------------------------------------------------------------------------- /kiwi/sles12-sp2/source/bootsplash.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE-Cloud/suse-cloud-appliances/ab2eb83db8687a6d535968ba4514ffffed7fb8ea/kiwi/sles12-sp2/source/bootsplash.tar -------------------------------------------------------------------------------- /kiwi/sles12-sp2/source/config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #================ 3 | # FILE : config.sh 4 | #---------------- 5 | # PROJECT : OpenSuSE KIWI Image System 6 | # COPYRIGHT : (c) 2006 SUSE LINUX Products GmbH. All rights reserved 7 | # : 8 | # AUTHOR : Marcus Schaefer 9 | # : 10 | # BELONGS TO : Operating System images 11 | # : 12 | # DESCRIPTION : configuration script for SUSE based 13 | # : operating systems 14 | # : 15 | # : 16 | # STATUS : BETA 17 | #---------------- 18 | #====================================== 19 | # Functions... 20 | #-------------------------------------- 21 | test -f /.kconfig && . /.kconfig 22 | test -f /.profile && . /.profile 23 | 24 | #====================================== 25 | # Greeting... 26 | #-------------------------------------- 27 | echo "Configure image: [$kiwi_iname]..." 28 | 29 | #====================================== 30 | # Mount system filesystems 31 | #-------------------------------------- 32 | baseMount 33 | 34 | #====================================== 35 | # Setup baseproduct link 36 | #-------------------------------------- 37 | suseSetupProduct 38 | 39 | #====================================== 40 | # Add missing gpg keys to rpm 41 | #-------------------------------------- 42 | suseImportBuildKey 43 | 44 | #====================================== 45 | # Activate services 46 | #-------------------------------------- 47 | baseInsertService sshd 48 | 49 | #====================================== 50 | # Setup default target, multi-user 51 | #-------------------------------------- 52 | baseSetRunlevel 3 53 | 54 | #====================================== 55 | # SuSEconfig 56 | #-------------------------------------- 57 | suseConfig 58 | 59 | 60 | #====================================== 61 | # Sysconfig Update 62 | #-------------------------------------- 63 | echo '** Update sysconfig entries...' 64 | baseUpdateSysConfig /etc/sysconfig/network/config FIREWALL no 65 | baseUpdateSysConfig /etc/sysconfig/console CONSOLE_FONT lat9w-16.psfu 66 | 67 | 68 | #====================================== 69 | # Custom changes for Cloud 70 | #-------------------------------------- 71 | echo "** Enabling firstboot service..." 72 | chkconfig appliance-firstboot on 73 | 74 | # Working around broken timezone support for SLE 12 in kiwi 75 | systemd-firstboot --timezone=UTC 76 | 77 | # This avoids annoyingly long timeouts on reverse DNS 78 | # lookups when connecting via ssh. 79 | sed -i 's/^#\?UseDNS.*/UseDNS no/' /etc/ssh/sshd_config 80 | 81 | # Default behaviour of less drives me nuts! 82 | sed -i 's/\(LESS="\)/\1-X /' /etc/profile 83 | 84 | echo "** Enabling additional services..." 85 | # helps with gpg in VMs 86 | chkconfig haveged on 87 | 88 | 89 | #====================================== 90 | # SSL Certificates Configuration 91 | #-------------------------------------- 92 | echo '** Rehashing SSL Certificates...' 93 | c_rehash 94 | 95 | 96 | #====================================== 97 | # Umount kernel filesystems 98 | #-------------------------------------- 99 | baseCleanMount 100 | 101 | #====================================== 102 | # Exit safely 103 | #-------------------------------------- 104 | exit 0 105 | -------------------------------------------------------------------------------- /kiwi/sles12-sp2/source/config.xml.tmpl: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | SUSE OpenStack Cloud Team 5 | cloud-devel@suse.de 6 | SLES 12 SP2 7 | 8 | 9 | 21 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 50 32 | 33 | 0.0.1 34 | zypper 35 | false 36 | false 37 | studio 38 | studio 39 | UTC 40 | utc 41 | us.map.gz 42 | en_US 43 | 44 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | -------------------------------------------------------------------------------- /kiwi/sles12-sp2/source/root/etc/YaST2/licenses/license.txt: -------------------------------------------------------------------------------- 1 | license-sles.txt -------------------------------------------------------------------------------- /kiwi/sles12-sp2/source/root/etc/hostname: -------------------------------------------------------------------------------- 1 | sles12-sp2.openstack.site 2 | -------------------------------------------------------------------------------- /kiwi/sles12-sp2/source/root/etc/issue: -------------------------------------------------------------------------------- 1 | 2 | Welcome to SUSE Linux Enterprise Server 12 SP2 (x86_64) - Kernel \r (\l). 3 | 4 | You may login as 'root' (default password: 'vagrant'). 5 | 6 | 7 | -------------------------------------------------------------------------------- /kiwi/sles12-sp2/source/root/etc/motd: -------------------------------------------------------------------------------- 1 | ____ 2 | /@ ~-. 3 | \/ __ .- | SLES 12 SP2 Infrastructure Node 4 | // // @ 5 | 6 | WARNING! This appliance contains unsupported code! 7 | Do NOT use this appliance for production deployments! 8 | -------------------------------------------------------------------------------- /kiwi/sles12-sp2/source/root/etc/profile.d/EULA.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | check_eula () { 4 | local LICENSE_FILE 5 | local answer 6 | 7 | if [ ! -e $HOME/.eula-accepted ]; then 8 | touch $HOME/.eula-accepted 9 | fi 10 | 11 | for LICENSE_FILE in /etc/YaST2/licenses/license-*.txt; do 12 | if grep -q "$LICENSE_FILE" $HOME/.eula-accepted; then 13 | continue 14 | fi 15 | 16 | # Code stolen from 17 | # https://github.com/SUSE/studio/blob/master/kiwi-job/templates/SLES11_SP3/root/etc/init.d/suse_studio_firstboot.in 18 | stty -nl ixon ignbrk -brkint 19 | 20 | if [ `uname -m` == "s390x" ]; then 21 | cat $LICENSE_FILE 22 | else 23 | less $LICENSE_FILE 2>/dev/null || more $LICENSE_FILE 2>/dev/null || cat $LICENSE_FILE 24 | fi 25 | 26 | answer= 27 | until [ "$answer" == "y" ] || [ "$answer" == "Y" ]; 28 | do 29 | echo -n "Do you accept the EULA? [y/n] " 30 | read -e answer 31 | if [ "$answer" == "n" ] || [ "$answer" == "N" ]; then 32 | exit 33 | fi 34 | done 35 | 36 | echo "$LICENSE_FILE" >> $HOME/.eula-accepted 37 | done 38 | } 39 | 40 | case "$-" in 41 | *i*) 42 | check_eula 43 | ;; 44 | esac 45 | -------------------------------------------------------------------------------- /kiwi/sles12-sp2/source/root/etc/sudoers.d/vagrant: -------------------------------------------------------------------------------- 1 | vagrant ALL=(ALL) NOPASSWD:ALL 2 | Defaults:vagrant !requiretty 3 | -------------------------------------------------------------------------------- /kiwi/sles12-sp2/source/root/etc/sysconfig/network/dhcp: -------------------------------------------------------------------------------- 1 | DHCLIENT_DEBUG="no" 2 | DHCLIENT_SET_HOSTNAME="yes" 3 | DHCLIENT_MODIFY_RESOLV_CONF="yes" 4 | DHCLIENT_SET_DEFAULT_ROUTE="yes" 5 | DHCLIENT_MODIFY_NTP_CONF="no" 6 | DHCLIENT_MODIFY_NIS_CONF="no" 7 | DHCLIENT_SET_DOMAINNAME="no" 8 | DHCLIENT_KEEP_SEARCHLIST="no" 9 | DHCLIENT_LEASE_TIME="" 10 | DHCLIENT_TIMEOUT="999999" 11 | DHCLIENT_REBOOT_TIMEOUT="" 12 | DHCLIENT_HOSTNAME_OPTION="AUTO" 13 | DHCLIENT_CLIENT_ID="" 14 | DHCLIENT_VENDOR_CLASS_ID="" 15 | DHCLIENT_RELEASE_BEFORE_QUIT="no" 16 | DHCLIENT_SCRIPT_EXE="" 17 | DHCLIENT_ADDITIONAL_OPTIONS="" 18 | DHCLIENT_SLEEP="0" 19 | DHCLIENT_WAIT_AT_BOOT="15" 20 | DHCLIENT_UDP_CHECKSUM="no" 21 | DHCLIENT_MODIFY_SMB_CONF="yes" 22 | -------------------------------------------------------------------------------- /kiwi/sles12-sp2/source/root/etc/systemd/system/appliance-firstboot.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Execute the first boot script 3 | Before=xdm.service getty@tty1.service 4 | After=network.service 5 | 6 | [Service] 7 | Type=oneshot 8 | #ExecStartPre=-/usr/bin/plymouth --hide-splash 9 | ExecStart=/usr/bin/appliance-firstboot 10 | #StandardInput=tty 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /kiwi/sles12-sp2/source/root/home/vagrant/.ssh/authorized_keys: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key 2 | -------------------------------------------------------------------------------- /kiwi/sles12-sp2/source/root/usr/bin/appliance-firstboot: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is intended to run only on first boot so it disables itself after 4 | # running. 5 | 6 | for SCRIPT in /usr/share/firstboot/scripts/*; do 7 | echo "Running $SCRIPT..." 8 | $SCRIPT 9 | done 10 | 11 | # disable itself, as we only need this to run on first boot 12 | systemctl disable appliance-firstboot 13 | -------------------------------------------------------------------------------- /kiwi/sles12-sp2/source/root/usr/share/firstboot/scripts/cloud-appliance.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # install man, as it's convenient -- not done when building the appliance to 4 | # keep it smaller; but skipping if we're on livecd to not abuse disk space 5 | if [ ! -d /livecd ]; then 6 | zypper --non-interactive install man 7 | fi 8 | -------------------------------------------------------------------------------- /vagrant/.gitignore: -------------------------------------------------------------------------------- 1 | .bundle/ 2 | vendor/ 3 | .vagrant/machines/ 4 | cinder-flat.vmdk 5 | cinder.vmdk 6 | glance-flat.vmdk 7 | glance.vmdk 8 | sbd-flat.vmdk 9 | sbd.vmdk 10 | drbd-*.vmdk 11 | -------------------------------------------------------------------------------- /vagrant/Gemfile: -------------------------------------------------------------------------------- 1 | source 'https://rubygems.org' 2 | 3 | group :development do 4 | # bundle config local.vagrant ~/.GIT/3rd-party/vagrant 5 | if ENV['VAGRANT_FROM_GIT'] 6 | # upstream git master currently breaks with libvirt, so use fixes: 7 | gem 'vagrant', github: 'aspiers/vagrant', branch: 'working' 8 | else 9 | gem 'vagrant', '>= 1.6.5', github: 'mitchellh/vagrant', tag: 'v1.6.5' 10 | end 11 | gem 'pry' 12 | end 13 | 14 | group :plugins do 15 | gem 'ruby-libvirt', '~> 0.5.2' #, git: 'git://libvirt.org/ruby-libvirt.git', branch: 'master' 16 | 17 | #gem 'vagrant-libvirt', github: 'pradels/vagrant-libvirt', branch: 'master' 18 | gem 'vagrant-libvirt', github: 'aspiers/vagrant-libvirt', branch: 'working' 19 | 20 | #gem 'fog', github: 'fog/fog', branch: 'master' 21 | gem 'fog', github: 'aspiers/fog', branch: 'fix/vols' 22 | 23 | gem 'fog-core', github: 'fog/fog-core', branch: 'master' 24 | 25 | gem 'sahara', github: 'jedi4ever/sahara' 26 | end 27 | -------------------------------------------------------------------------------- /vagrant/Gemfile.lock: -------------------------------------------------------------------------------- 1 | GIT 2 | remote: git://github.com/aspiers/fog.git 3 | revision: 572bcb7e784a5fe7200eea83f8de5d2d01a1a96d 4 | branch: fix/vols 5 | specs: 6 | fog (1.24.0) 7 | fog-brightbox 8 | fog-core (~> 1.24) 9 | fog-json 10 | fog-radosgw (>= 0.0.2) 11 | fog-sakuracloud (>= 0.0.4) 12 | fog-softlayer 13 | ipaddress (~> 0.5) 14 | nokogiri (~> 1.5, >= 1.5.11) 15 | 16 | GIT 17 | remote: git://github.com/aspiers/vagrant-libvirt.git 18 | revision: e07e9621f8df1d0f203731fc77e88f52ea65d35e 19 | branch: working 20 | specs: 21 | vagrant-libvirt (0.0.23) 22 | fog (~> 1.15) 23 | nokogiri (~> 1.6.0) 24 | ruby-libvirt (~> 0.4) 25 | 26 | GIT 27 | remote: git://github.com/fog/fog-core.git 28 | revision: 741f915911c0253a505b0ee52c9a6a41eaaf8a1b 29 | branch: master 30 | specs: 31 | fog-core (1.24.0) 32 | builder 33 | excon (~> 0.38) 34 | formatador (~> 0.2) 35 | mime-types 36 | net-scp (~> 1.1) 37 | net-ssh (>= 2.1.3) 38 | 39 | GIT 40 | remote: git://github.com/jedi4ever/sahara.git 41 | revision: 9d28b50f42e58e8226306695e781511ce2af9d4d 42 | specs: 43 | sahara (0.0.17) 44 | popen4 (~> 0.1.2) 45 | 46 | GIT 47 | remote: git://github.com/mitchellh/vagrant.git 48 | revision: c38e17cc29f69aaea1610451333396494181a481 49 | tag: v1.6.5 50 | specs: 51 | vagrant (1.6.5) 52 | bundler (>= 1.5.2, < 1.7.0) 53 | childprocess (~> 0.5.0) 54 | erubis (~> 2.7.0) 55 | hashicorp-checkpoint (~> 0.1.1) 56 | i18n (~> 0.6.0) 57 | listen (~> 2.7.1) 58 | log4r (~> 1.1.9, < 1.1.11) 59 | net-scp (~> 1.1.0) 60 | net-ssh (>= 2.6.6, < 2.10.0) 61 | nokogiri (= 1.6.3.1) 62 | rb-kqueue (~> 0.2.0) 63 | wdm (~> 0.1.0) 64 | winrm (~> 1.1.3) 65 | 66 | GEM 67 | remote: https://rubygems.org/ 68 | specs: 69 | Platform (0.4.0) 70 | akami (1.2.2) 71 | gyoku (>= 0.4.0) 72 | nokogiri 73 | builder (3.2.2) 74 | celluloid (0.16.0) 75 | timers (~> 4.0.0) 76 | childprocess (0.5.5) 77 | ffi (~> 1.0, >= 1.0.11) 78 | coderay (1.1.0) 79 | erubis (2.7.0) 80 | excon (0.41.0) 81 | ffi (1.9.6) 82 | fog-brightbox (0.6.1) 83 | fog-core (~> 1.22) 84 | fog-json 85 | inflecto 86 | fog-json (1.0.0) 87 | multi_json (~> 1.0) 88 | fog-radosgw (0.0.3) 89 | fog-core (>= 1.21.0) 90 | fog-json 91 | fog-xml (>= 0.0.1) 92 | fog-sakuracloud (0.1.1) 93 | fog-core 94 | fog-json 95 | fog-softlayer (0.3.24) 96 | fog-core 97 | fog-json 98 | fog-xml (0.1.1) 99 | fog-core 100 | nokogiri (~> 1.5, >= 1.5.11) 101 | formatador (0.2.5) 102 | gssapi (1.0.3) 103 | ffi (>= 1.0.1) 104 | gyoku (1.2.2) 105 | builder (>= 2.1.2) 106 | hashicorp-checkpoint (0.1.4) 107 | hitimes (1.2.2) 108 | httpclient (2.5.1) 109 | httpi (0.9.7) 110 | rack 111 | i18n (0.6.11) 112 | inflecto (0.0.2) 113 | ipaddress (0.8.0) 114 | listen (2.7.11) 115 | celluloid (>= 0.15.2) 116 | rb-fsevent (>= 0.9.3) 117 | rb-inotify (>= 0.9) 118 | little-plugger (1.1.3) 119 | log4r (1.1.10) 120 | logging (1.8.2) 121 | little-plugger (>= 1.1.3) 122 | multi_json (>= 1.8.4) 123 | method_source (0.8.2) 124 | mime-types (2.4.3) 125 | mini_portile (0.6.0) 126 | multi_json (1.10.1) 127 | net-scp (1.1.2) 128 | net-ssh (>= 2.6.5) 129 | net-ssh (2.9.1) 130 | nokogiri (1.6.3.1) 131 | mini_portile (= 0.6.0) 132 | nori (1.1.5) 133 | open4 (1.3.4) 134 | popen4 (0.1.2) 135 | Platform (>= 0.4.0) 136 | open4 (>= 0.4.0) 137 | pry (0.10.1) 138 | coderay (~> 1.1.0) 139 | method_source (~> 0.8.1) 140 | slop (~> 3.4) 141 | rack (1.5.2) 142 | rb-fsevent (0.9.4) 143 | rb-inotify (0.9.5) 144 | ffi (>= 0.5.0) 145 | rb-kqueue (0.2.3) 146 | ffi (>= 0.5.0) 147 | ruby-libvirt (0.5.2) 148 | rubyntlm (0.1.1) 149 | savon (0.9.5) 150 | akami (~> 1.0) 151 | builder (>= 2.1.2) 152 | gyoku (>= 0.4.0) 153 | httpi (~> 0.9) 154 | nokogiri (>= 1.4.0) 155 | nori (~> 1.0) 156 | wasabi (~> 1.0) 157 | slop (3.6.0) 158 | timers (4.0.1) 159 | hitimes 160 | uuidtools (2.1.5) 161 | wasabi (1.0.0) 162 | nokogiri (>= 1.4.0) 163 | wdm (0.1.0) 164 | winrm (1.1.3) 165 | gssapi (~> 1.0.0) 166 | httpclient (~> 2.2, >= 2.2.0.2) 167 | logging (~> 1.6, >= 1.6.1) 168 | nokogiri (~> 1.5) 169 | rubyntlm (~> 0.1.1) 170 | savon (= 0.9.5) 171 | uuidtools (~> 2.1.2) 172 | 173 | PLATFORMS 174 | ruby 175 | 176 | DEPENDENCIES 177 | fog! 178 | fog-core! 179 | pry 180 | ruby-libvirt (~> 0.5.2) 181 | sahara! 182 | vagrant (>= 1.6.5)! 183 | vagrant-libvirt! 184 | -------------------------------------------------------------------------------- /vagrant/README.md: -------------------------------------------------------------------------------- 1 | # Vagrant resources for SUSE OpenStack Cloud 2 | 3 | This directory contains: 4 | 5 | * A [`Vagrantfile`](Vagrantfile) and associated files for deploying a 6 | SUSE OpenStack Cloud environment of (by default) 4 VMs via a single 7 | `vagrant up` command. Use this instead of one of the 8 | [demos](../demos/) if you want more manual control over the setup of 9 | Crowbar barclamps and OpenStack. See the 10 | [HOWTO guide](../docs/HOWTO.md) for more information. 11 | * A [`box-building/`](box-building/) subdirectory containing a now 12 | obsolete way of building Vagrant boxes. 13 | -------------------------------------------------------------------------------- /vagrant/building-boxes/README.md: -------------------------------------------------------------------------------- 1 | # Building the Vagrant boxes (superceded by kiwi / OBS build!) 2 | 3 | This subdirectory contains resources for building two 4 | [Vagrant](http://vagrantup.com) boxes from the `.vmdk` virtual disks 5 | generated by the [KIWI](https://en.opensuse.org/Portal:KIWI) virtual 6 | appliance image definitions in [`../kiwi`](../kiwi/). 7 | 8 | However, since these files were originally built, 9 | [KIWI has learnt how to build Vagrant boxes directly](https://github.com/openSUSE/kiwi/pull/353), 10 | and so has [OBS](http://openbuildservice.org/), so it is not any 11 | longer recommended to build boxes using these files. They are left 12 | here for posterity. 13 | 14 | There are two different boxes defined within this subdirectory: 15 | 16 | * [`cloud-admin`](cloud-admin/) - the SUSE OpenStack Cloud admin node, 17 | which runs Crowbar and Chef, and 18 | * [`sles12-sp2`](sles12-sp2/) - a cut-down preload image of SUSE Linux 19 | Enterprise Server (SLES) 12 SP2, which will be used to provision 20 | two controller nodes (forming an HA cluster), and a compute node. 21 | 22 | Obsolete instructions for building them are contained within the 23 | READMEs in those subdirectories. 24 | -------------------------------------------------------------------------------- /vagrant/building-boxes/box.make: -------------------------------------------------------------------------------- 1 | # Deprecated! - please consider using OBS instead, as explained in the 2 | # README.md. 3 | 4 | KIWI_IMAGE_DIR = ../../kiwi/$(BOX_NAME)/image 5 | 6 | BOX_FILE = $(BOX_NAME).box 7 | BOX_VMDK = box-disk1.vmdk 8 | OVF = box.ovf 9 | 10 | COMPONENTS = Vagrantfile metadata.json $(OVF) $(KIWI_IMAGE_DIR)/$(VMDK) 11 | 12 | default: $(BOX_FILE) 13 | 14 | $(BOX_FILE): $(COMPONENTS) 15 | tar --transform="s,.*\.vmdk,$(BOX_VMDK)," -hcvf $@ $(COMPONENTS) 16 | -------------------------------------------------------------------------------- /vagrant/building-boxes/cloud-admin/.gitignore: -------------------------------------------------------------------------------- 1 | *.box 2 | *.vmdk 3 | -------------------------------------------------------------------------------- /vagrant/building-boxes/cloud-admin/Makefile: -------------------------------------------------------------------------------- 1 | # Deprecated! - please consider using OBS instead, as explained in the 2 | # README.md. 3 | 4 | # This builds a vagrant box with a very similar end result to "vagrant 5 | # package", except the .ovf file is taken from this git repository, 6 | # and the .vmdk is taken from the kiwi build. 7 | 8 | BOX_NAME = cloud7-admin 9 | VMDK = cloud7-admin.x86_64-0.0.1.vmdk 10 | 11 | include ../box.make 12 | -------------------------------------------------------------------------------- /vagrant/building-boxes/cloud-admin/README.md: -------------------------------------------------------------------------------- 1 | # Vagrant box for Crowbar admin node 2 | 3 | ## Building the box 4 | 5 | **N.B. [These instructions are now obsolete!](../README.md)** 6 | 7 | First you need to build the `.vmdk` as detailed in 8 | [the README for the corresponding KIWI appliance](../../../kiwi/cloud-admin/README.md). 9 | 10 | Then, `cd` to the directory containing this README, and type: 11 | 12 | make 13 | 14 | This will create the `.box` file in the current directory, which you 15 | can then install the Vagrant box via: 16 | 17 | vagrant box add cloud-admin.json 18 | -------------------------------------------------------------------------------- /vagrant/building-boxes/cloud-admin/Vagrantfile: -------------------------------------------------------------------------------- 1 | ../generic-box-Vagrantfile -------------------------------------------------------------------------------- /vagrant/building-boxes/cloud-admin/cloud-admin.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "suse/cloud7-admin", 3 | "description": "This box contains a SUSE OpenStack Cloud 7 admin node preinstalled on SLES12 SP2 x86_64.", 4 | "versions": [{ 5 | "version": "0.0.1", 6 | "providers": [{ 7 | "name": "virtualbox", 8 | "url": "cloud7-admin.box" 9 | }] 10 | }] 11 | } 12 | -------------------------------------------------------------------------------- /vagrant/building-boxes/cloud-admin/metadata.json: -------------------------------------------------------------------------------- 1 | { 2 | "provider": "virtualbox", 3 | "format": "vmdk", 4 | "virtual_size": 16 5 | } 6 | -------------------------------------------------------------------------------- /vagrant/building-boxes/generic-box-Vagrantfile: -------------------------------------------------------------------------------- 1 | Vagrant::Config.run do |config| 2 | # This Vagrantfile is auto-generated by `vagrant package` to contain 3 | # the MAC address of the box. Custom configuration should be placed in 4 | # the actual `Vagrantfile` in this box. 5 | config.vm.base_mac = "08002765C48F" 6 | end 7 | 8 | # Load include vagrant file if it exists after the auto-generated 9 | # so it can override any of the settings 10 | include_vagrantfile = File.expand_path("../include/_Vagrantfile", __FILE__) 11 | load include_vagrantfile if File.exist?(include_vagrantfile) 12 | -------------------------------------------------------------------------------- /vagrant/building-boxes/pxe/Makefile: -------------------------------------------------------------------------------- 1 | SIZE = 16 2 | NAME = pxe-$(SIZE)GB 3 | QCOW2 = $(NAME).qcow2 4 | VMDK = $(NAME).vmdk 5 | LIBVIRT_BOX_FILE = $(NAME)-libvirt.box 6 | VIRTUALBOX_BOX_FILE = $(NAME)-virtualbox.box 7 | COMMON_COMPONENTS = Vagrantfile 8 | LIBVIRT_COMPONENTS = $(COMMON_COMPONENTS) $(QCOW2) metadata-libvirt.json 9 | VIRTUALBOX_COMPONENTS = $(COMMON_COMPONENTS) $(VMDK) metadata-virtualbox.json 10 | 11 | default: $(LIBVIRT_BOX_FILE) 12 | 13 | $(QCOW2): 14 | qemu-img create -f qcow2 $@ $(SIZE)G 15 | 16 | $(LIBVIRT_BOX_FILE): $(LIBVIRT_COMPONENTS) Makefile 17 | tar -jhcvf $@ \ 18 | --transform="s,metadata-libvirt,metadata," \ 19 | $(LIBVIRT_COMPONENTS) 20 | @echo Created $@ 21 | 22 | $(VIRTUALBOX_BOX_FILE): $(VIRTUALBOX_COMPONENTS) Makefile 23 | tar -jhcvf $@ \ 24 | --transform="s,metadata-virtualbox,metadata,; 25 | s,.*\.vmdk,$(BOX_VMDK)," \ 26 | $(VIRTUALBOX_COMPONENTS) 27 | @echo Created $@ 28 | -------------------------------------------------------------------------------- /vagrant/building-boxes/pxe/Vagrantfile: -------------------------------------------------------------------------------- 1 | ../generic-box-Vagrantfile -------------------------------------------------------------------------------- /vagrant/building-boxes/pxe/metadata-libvirt.json: -------------------------------------------------------------------------------- 1 | { 2 | "provider": "libvirt", 3 | "format": "qcow2", 4 | "virtual_size": 16 5 | } 6 | -------------------------------------------------------------------------------- /vagrant/building-boxes/pxe/metadata-virtualbox.json: -------------------------------------------------------------------------------- 1 | { 2 | "provider": "virtualbox", 3 | "format": "vmdk", 4 | "virtual_size": 16 5 | } 6 | -------------------------------------------------------------------------------- /vagrant/building-boxes/pxe/pxe-16GB-libvirt.box: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE-Cloud/suse-cloud-appliances/ab2eb83db8687a6d535968ba4514ffffed7fb8ea/vagrant/building-boxes/pxe/pxe-16GB-libvirt.box -------------------------------------------------------------------------------- /vagrant/building-boxes/pxe/pxe-16GB.qcow2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE-Cloud/suse-cloud-appliances/ab2eb83db8687a6d535968ba4514ffffed7fb8ea/vagrant/building-boxes/pxe/pxe-16GB.qcow2 -------------------------------------------------------------------------------- /vagrant/building-boxes/sles12-sp2/.gitignore: -------------------------------------------------------------------------------- 1 | *.box 2 | *.vmdk 3 | -------------------------------------------------------------------------------- /vagrant/building-boxes/sles12-sp2/Makefile: -------------------------------------------------------------------------------- 1 | # Deprecated! - please consider using OBS instead, as explained in the 2 | # README.md. 3 | 4 | # This builds a vagrant box with a very similar end result to "vagrant 5 | # package", except the .ovf file is taken from this git repository, 6 | # and the .vmdk is taken from the kiwi build. 7 | 8 | BOX_NAME = sles12-sp2 9 | VMDK = sles12-sp2.x86_64-0.0.1.vmdk 10 | 11 | include ../box.make 12 | -------------------------------------------------------------------------------- /vagrant/building-boxes/sles12-sp2/README.md: -------------------------------------------------------------------------------- 1 | # Vagrant box for Crowbar client node 2 | 3 | ## Building the box 4 | 5 | **N.B. [These instructions are now obsolete!](../README.md)** 6 | 7 | First you need to build the `.vmdk` as detailed in 8 | [the README for the corresponding KIWI appliance](../../../kiwi/sles12-sp2/README.md). 9 | 10 | Then, `cd` to the directory containing this README, and type: 11 | 12 | make 13 | 14 | This will create the `.box` file in the current directory, which you 15 | can then install the Vagrant box via: 16 | 17 | vagrant box add sles12-sp2.json 18 | -------------------------------------------------------------------------------- /vagrant/building-boxes/sles12-sp2/Vagrantfile: -------------------------------------------------------------------------------- 1 | ../generic-box-Vagrantfile -------------------------------------------------------------------------------- /vagrant/building-boxes/sles12-sp2/metadata.json: -------------------------------------------------------------------------------- 1 | { 2 | "provider": "virtualbox", 3 | "format": "vmdk", 4 | "virtual_size": 16 5 | } 6 | -------------------------------------------------------------------------------- /vagrant/building-boxes/sles12-sp2/sles12-sp2.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "suse/sles12-sp2", 3 | "description": "This box contains a SLES12 SP2 x86_64 JeOS appliance.", 4 | "versions": [{ 5 | "version": "0.0.1", 6 | "providers": [{ 7 | "name": "virtualbox", 8 | "url": "sles12-sp2.box" 9 | }] 10 | }] 11 | } 12 | -------------------------------------------------------------------------------- /vagrant/configs/1-controller-1-compute.yaml: -------------------------------------------------------------------------------- 1 | # ../Vagrantfile will use this config file to build a cloud 2 | # with 1 controller and 1 compute node. You'll probably need 3 | # at least 12GB RAM on the host for this! 4 | --- 5 | networks: 6 | management: 192.168.101.0/24 # FIXME: currently only used for libvirt 7 | crowbar: 8 | admin_ip: 192.168.124.10 9 | pool_start: 192.168.124.50 # dummy IP, not actually used 10 | vms: 11 | - name: DEFAULT 12 | cpus: 2 13 | ram: 4000 14 | box: suse/sles12sp2 15 | - name: admin 16 | personality: admin 17 | primary: true 18 | box: suse/cloud7-admin 19 | forward_ports: 20 | # Set the VAGRANT_INSECURE_PORT_FORWARDS environment variable 21 | # to 'yes' if you want these forwarded ports accessible from 22 | # other machines which can route to your host. This is useful 23 | # when testing/demoing the cloud from a different machine. 24 | - {host: 8000, guest: 80} # Crowbar admin web UI 25 | - name: controller1 26 | personality: controller 27 | ram: 6000 28 | forward_ports: 29 | # See above note about VAGRANT_INSECURE_PORT_FORWARDS. 30 | - {host: 8080, guest: 80} # Horizon dashboard 31 | - name: compute1 32 | personality: compute 33 | ram: 2000 34 | -------------------------------------------------------------------------------- /vagrant/configs/2-controllers-0-compute.yaml: -------------------------------------------------------------------------------- 1 | # ../Vagrantfile will use this config file to build a cloud with 2 HA 2 | # controllers and 0 compute nodes. You'll probably need at least 16GB 3 | # RAM on the host for this! 4 | --- 5 | networks: 6 | management: 192.168.101.0/24 # FIXME: currently only used for libvirt 7 | crowbar: 8 | admin_ip: 192.168.124.10 9 | pool_start: 192.168.124.50 # dummy IP, not actually used 10 | vms: 11 | - name: DEFAULT 12 | cpus: 2 13 | ram: 4000 14 | box: suse/sles12sp2 15 | - name: admin 16 | personality: admin 17 | primary: true 18 | box: suse/cloud7-admin 19 | forward_ports: 20 | # Set the VAGRANT_INSECURE_PORT_FORWARDS environment variable 21 | # to 'yes' if you want these forwarded ports accessible from 22 | # other machines which can route to your host. This is useful 23 | # when testing/demoing the cloud from a different machine. 24 | - {host: 8000, guest: 80} # Crowbar admin web UI 25 | - name: controller1 26 | personality: controller 27 | ram: 6000 28 | drbd: true 29 | sbd: create 30 | forward_ports: 31 | # See above note about VAGRANT_INSECURE_PORT_FORWARDS. 32 | - {host: 7630, guest: 7630} # Hawk web UI for HA clusters 33 | - name: controller2 34 | personality: controller 35 | ram: 6000 36 | drbd: true 37 | sbd: share 38 | forward_ports: 39 | # See above note about VAGRANT_INSECURE_PORT_FORWARDS. 40 | - {host: 7631, guest: 7630} # Hawk web UI for HA clusters 41 | -------------------------------------------------------------------------------- /vagrant/configs/2-controllers-1-compute.yaml: -------------------------------------------------------------------------------- 1 | # ../Vagrantfile will use this config file to build a cloud with 2 HA 2 | # controllers and 1 compute node. You'll probably need at least 18GB 3 | # RAM on the host for this! 4 | --- 5 | networks: 6 | management: 192.168.101.0/24 # FIXME: currently only used for libvirt 7 | crowbar: 8 | admin_ip: 192.168.124.10 9 | pool_start: 192.168.124.50 # dummy IP, not actually used 10 | vms: 11 | - name: DEFAULT 12 | cpus: 2 13 | ram: 4000 14 | box: suse/sles12sp2 15 | - name: admin 16 | personality: admin 17 | primary: true 18 | box: suse/cloud7-admin 19 | forward_ports: 20 | # Set the VAGRANT_INSECURE_PORT_FORWARDS environment variable 21 | # to 'yes' if you want these forwarded ports accessible from 22 | # other machines which can route to your host. This is useful 23 | # when testing/demoing the cloud from a different machine. 24 | - {host: 8000, guest: 80} # Crowbar admin web UI 25 | - name: controller1 26 | personality: controller 27 | ram: 6000 28 | drbd: true 29 | sbd: create 30 | forward_ports: 31 | # See above note about VAGRANT_INSECURE_PORT_FORWARDS. 32 | - {host: 7630, guest: 7630} # Hawk web UI for HA clusters 33 | - name: controller2 34 | personality: controller 35 | ram: 6000 36 | drbd: true 37 | sbd: share 38 | forward_ports: 39 | # See above note about VAGRANT_INSECURE_PORT_FORWARDS. 40 | - {host: 7631, guest: 7630} # Hawk web UI for HA clusters 41 | - name: compute1 42 | personality: compute 43 | ram: 1800 44 | -------------------------------------------------------------------------------- /vagrant/configs/2-controllers-2-computes.yaml: -------------------------------------------------------------------------------- 1 | # ../Vagrantfile will use this config file to build a cloud with 2 HA 2 | # controllers and 2 compute nodes. You'll probably need at least 20GB 3 | # RAM on the host for this! 4 | --- 5 | networks: 6 | management: 192.168.101.0/24 # FIXME: currently only used for libvirt 7 | crowbar: 8 | admin_ip: 192.168.124.10 9 | pool_start: 192.168.124.50 # dummy IP, not actually used 10 | vms: 11 | - name: DEFAULT 12 | cpus: 2 13 | ram: 4000 14 | box: suse/sles12sp2 15 | - name: admin 16 | personality: admin 17 | primary: true 18 | box: suse/cloud7-admin 19 | forward_ports: 20 | # Set the VAGRANT_INSECURE_PORT_FORWARDS environment variable 21 | # to 'yes' if you want these forwarded ports accessible from 22 | # other machines which can route to your host. This is useful 23 | # when testing/demoing the cloud from a different machine. 24 | - {host: 8000, guest: 80} # Crowbar admin web UI 25 | - name: controller1 26 | personality: controller 27 | ram: 6000 28 | drbd: true 29 | sbd: create 30 | forward_ports: 31 | # See above note about VAGRANT_INSECURE_PORT_FORWARDS. 32 | - {host: 7630, guest: 7630} # Hawk web UI for HA clusters 33 | - name: controller2 34 | personality: controller 35 | ram: 6000 36 | drbd: true 37 | sbd: share 38 | forward_ports: 39 | # See above note about VAGRANT_INSECURE_PORT_FORWARDS. 40 | - {host: 7631, guest: 7630} # Hawk web UI for HA clusters 41 | - name: compute1 42 | personality: compute 43 | ram: 1800 44 | sbd: share 45 | - name: compute2 46 | personality: compute 47 | ram: 1800 48 | sbd: share 49 | -------------------------------------------------------------------------------- /vagrant/provisioning/admin/HA-cloud-no-compute.yaml: -------------------------------------------------------------------------------- 1 | # Input file for 'crowbar autobuild' command which sets up a cloud 2 | # with a single 2-node HA cluster using DRBD and SBD for STONITH, and 3 | # no compute node. 4 | --- 5 | global_options: 6 | - action_for_existing_proposals: skip # could also be e.g. 'recreate' or 'overwrite' 7 | proposals: 8 | - barclamp: provisioner 9 | attributes: 10 | shell_prompt: USER@ALIAS:CWD SUFFIX 11 | - barclamp: nfs_client 12 | name: glance 13 | attributes: 14 | exports: 15 | "/var/lib/glance": 16 | nfs_server: 192.168.124.10 17 | export: "/var/lib/glance" 18 | mount_path: "/var/lib/glance" 19 | mount_options: 20 | - '' 21 | deployment: 22 | elements: 23 | nfs-client: 24 | - "@@controller1@@" 25 | - "@@controller2@@" 26 | - barclamp: pacemaker 27 | name: services 28 | action_if_exists: overwrite 29 | attributes: 30 | stonith: 31 | mode: sbd 32 | sbd: 33 | watchdog_module: softdog 34 | nodes: 35 | @@controller1@@: 36 | devices: 37 | - /dev/sdb 38 | @@controller2@@: 39 | devices: 40 | - /dev/sdb 41 | drbd: 42 | enabled: true 43 | deployment: 44 | elements: 45 | hawk-server: 46 | - "@@controller1@@" 47 | - "@@controller2@@" 48 | pacemaker-cluster-member: 49 | - "@@controller1@@" 50 | - "@@controller2@@" 51 | - barclamp: database 52 | # Proposal name defaults to 'default'. 53 | # Default attributes are good enough, so we just need to assign 54 | # nodes to roles: 55 | attributes: 56 | ha: 57 | storage: 58 | mode: drbd 59 | drbd: 60 | size: 1 61 | deployment: 62 | elements: 63 | database-server: 64 | - cluster:services 65 | - barclamp: rabbitmq 66 | attributes: 67 | ha: 68 | storage: 69 | mode: drbd 70 | drbd: 71 | size: 1 72 | deployment: 73 | elements: 74 | rabbitmq-server: 75 | - cluster:services 76 | - barclamp: keystone 77 | deployment: 78 | elements: 79 | keystone-server: 80 | - cluster:services 81 | - barclamp: glance 82 | deployment: 83 | elements: 84 | glance-server: 85 | - cluster:services 86 | # - barclamp: cinder 87 | # wipe_attributes: 88 | # - volumes 89 | # attributes: 90 | # volumes: 91 | # - backend_name: local 92 | # backend_driver: local 93 | # local: 94 | # file_size: 2000 95 | # volume_name: cinder-volumes 96 | # file_name: /var/lib/cinder/volume.raw 97 | # deployment: 98 | # elements: 99 | # cinder-controller: 100 | # - cluster:services 101 | # cinder-volume: 102 | # - "@@compute1@@" 103 | - barclamp: neutron 104 | attributes: 105 | ml2_mechanism_drivers: 106 | - linuxbridge 107 | ml2_type_drivers: 108 | - vlan 109 | ml2_type_drivers_default_provider_network: vlan 110 | ml2_type_drivers_default_tenant_network: vlan 111 | deployment: 112 | elements: 113 | neutron-server: 114 | - cluster:services 115 | neutron-network: 116 | - cluster:services 117 | # - barclamp: nova 118 | # attributes: 119 | # use_migration: true 120 | # kvm: 121 | # ksm_enabled: true 122 | # deployment: 123 | # elements: 124 | # nova-controller: 125 | # - cluster:services 126 | # nova-compute-qemu: 127 | # - "@@compute1@@" 128 | # - barclamp: horizon 129 | # deployment: 130 | # elements: 131 | # horizon-server: 132 | # - cluster:services 133 | # - barclamp: heat 134 | # deployment: 135 | # elements: 136 | # heat-server: 137 | # - cluster:services 138 | -------------------------------------------------------------------------------- /vagrant/provisioning/admin/HA-cloud.yaml: -------------------------------------------------------------------------------- 1 | # Input file for 'crowbar autobuild' command which sets up a cloud 2 | # with a single 2-node HA cluster using DRBD and SBD for STONITH, and 3 | # a single KVM compute node. 4 | --- 5 | global_options: 6 | - action_for_existing_proposals: skip # could also be e.g. 'recreate' or 'overwrite' 7 | proposals: 8 | - barclamp: provisioner 9 | attributes: 10 | shell_prompt: USER@ALIAS:CWD SUFFIX 11 | - barclamp: nfs_client 12 | name: glance 13 | attributes: 14 | exports: 15 | "/var/lib/glance": 16 | nfs_server: 192.168.124.10 17 | export: "/var/lib/glance" 18 | mount_path: "/var/lib/glance" 19 | mount_options: 20 | - '' 21 | deployment: 22 | elements: 23 | nfs-client: 24 | - "@@controller1@@" 25 | - "@@controller2@@" 26 | - barclamp: pacemaker 27 | name: services 28 | action_if_exists: overwrite 29 | attributes: 30 | stonith: 31 | mode: sbd 32 | sbd: 33 | watchdog_module: softdog 34 | nodes: 35 | @@controller1@@: 36 | devices: 37 | - /dev/sdb 38 | @@controller2@@: 39 | devices: 40 | - /dev/sdb 41 | drbd: 42 | enabled: true 43 | deployment: 44 | elements: 45 | hawk-server: 46 | - "@@controller1@@" 47 | - "@@controller2@@" 48 | pacemaker-cluster-member: 49 | - "@@controller1@@" 50 | - "@@controller2@@" 51 | - barclamp: database 52 | # Proposal name defaults to 'default'. 53 | # Default attributes are good enough, so we just need to assign 54 | # nodes to roles: 55 | attributes: 56 | ha: 57 | storage: 58 | mode: drbd 59 | drbd: 60 | size: 1 61 | deployment: 62 | elements: 63 | database-server: 64 | - cluster:services 65 | - barclamp: rabbitmq 66 | attributes: 67 | ha: 68 | storage: 69 | mode: drbd 70 | drbd: 71 | size: 1 72 | deployment: 73 | elements: 74 | rabbitmq-server: 75 | - cluster:services 76 | - barclamp: keystone 77 | deployment: 78 | elements: 79 | keystone-server: 80 | - cluster:services 81 | - barclamp: glance 82 | deployment: 83 | elements: 84 | glance-server: 85 | - cluster:services 86 | - barclamp: cinder 87 | wipe_attributes: 88 | - volumes 89 | attributes: 90 | volumes: 91 | - backend_name: local 92 | backend_driver: local 93 | local: 94 | file_size: 2000 95 | volume_name: cinder-volumes 96 | file_name: /var/lib/cinder/volume.raw 97 | deployment: 98 | elements: 99 | cinder-controller: 100 | - cluster:services 101 | cinder-volume: 102 | - "@@compute1@@" 103 | - barclamp: neutron 104 | attributes: 105 | ml2_mechanism_drivers: 106 | - linuxbridge 107 | ml2_type_drivers: 108 | - vlan 109 | ml2_type_drivers_default_provider_network: vlan 110 | ml2_type_drivers_default_tenant_network: vlan 111 | deployment: 112 | elements: 113 | neutron-server: 114 | - cluster:services 115 | neutron-network: 116 | - cluster:services 117 | - barclamp: nova 118 | attributes: 119 | use_migration: true 120 | kvm: 121 | ksm_enabled: true 122 | deployment: 123 | elements: 124 | nova-controller: 125 | - cluster:services 126 | nova-compute-qemu: 127 | - "@@compute1@@" 128 | - barclamp: horizon 129 | deployment: 130 | elements: 131 | horizon-server: 132 | - cluster:services 133 | - barclamp: heat 134 | deployment: 135 | elements: 136 | heat-server: 137 | - cluster:services 138 | -------------------------------------------------------------------------------- /vagrant/provisioning/admin/HA-compute-cloud-demo.yaml: -------------------------------------------------------------------------------- 1 | # Input file for 'crowbar autobuild' command which sets up a cloud 2 | # with a single 2-node HA cluster using DRBD and SBD for STONITH, and 3 | # no compute node yet (preparation for compute HA demo). 4 | --- 5 | global_options: 6 | - action_for_existing_proposals: skip # could also be e.g. 'recreate' or 'overwrite' 7 | proposals: 8 | - barclamp: provisioner 9 | attributes: 10 | shell_prompt: USER@ALIAS:CWD SUFFIX 11 | - barclamp: nfs_client 12 | name: glance 13 | attributes: 14 | exports: 15 | "/var/lib/glance": 16 | nfs_server: 192.168.124.10 17 | export: "/var/lib/glance" 18 | mount_path: "/var/lib/glance" 19 | mount_options: 20 | - '' 21 | deployment: 22 | elements: 23 | nfs-client: 24 | - "@@controller1@@" 25 | - "@@controller2@@" 26 | - barclamp: nfs_client 27 | name: nova 28 | attributes: 29 | exports: 30 | "/var/lib/nova/instances": 31 | nfs_server: 192.168.124.10 32 | export: "/var/lib/nova/instances" 33 | mount_path: "/var/lib/nova/instances" 34 | mount_options: 35 | - '' 36 | deployment: 37 | elements: 38 | nfs-client: 39 | - "@@compute1@@" 40 | - "@@compute2@@" 41 | - barclamp: pacemaker 42 | name: services 43 | action_if_exists: overwrite 44 | attributes: 45 | stonith: 46 | mode: sbd 47 | sbd: 48 | watchdog_module: softdog 49 | nodes: 50 | @@controller1@@: 51 | devices: 52 | - /dev/sdb 53 | @@controller2@@: 54 | devices: 55 | - /dev/sdb 56 | drbd: 57 | enabled: true 58 | deployment: 59 | elements: 60 | hawk-server: 61 | - "@@controller1@@" 62 | - "@@controller2@@" 63 | pacemaker-cluster-member: 64 | - "@@controller1@@" 65 | - "@@controller2@@" 66 | - barclamp: database 67 | # Proposal name defaults to 'default'. 68 | # Default attributes are good enough, so we just need to assign 69 | # nodes to roles: 70 | attributes: 71 | ha: 72 | storage: 73 | mode: drbd 74 | drbd: 75 | size: 1 76 | deployment: 77 | elements: 78 | database-server: 79 | - cluster:services 80 | - barclamp: rabbitmq 81 | attributes: 82 | ha: 83 | storage: 84 | mode: drbd 85 | drbd: 86 | size: 1 87 | deployment: 88 | elements: 89 | rabbitmq-server: 90 | - cluster:services 91 | - barclamp: keystone 92 | deployment: 93 | elements: 94 | keystone-server: 95 | - cluster:services 96 | - barclamp: glance 97 | deployment: 98 | elements: 99 | glance-server: 100 | - cluster:services 101 | - barclamp: cinder 102 | wipe_attributes: 103 | - volumes 104 | attributes: 105 | volumes: 106 | - backend_name: local 107 | backend_driver: local 108 | local: 109 | file_size: 2000 110 | volume_name: cinder-volumes 111 | file_name: /var/lib/cinder/volume.raw 112 | deployment: 113 | elements: 114 | cinder-controller: 115 | - cluster:services 116 | cinder-volume: 117 | - "@@compute1@@" 118 | - "@@compute2@@" 119 | - barclamp: neutron 120 | attributes: 121 | ml2_mechanism_drivers: 122 | - linuxbridge 123 | ml2_type_drivers: 124 | - vlan 125 | ml2_type_drivers_default_provider_network: vlan 126 | ml2_type_drivers_default_tenant_network: vlan 127 | deployment: 128 | elements: 129 | neutron-server: 130 | - cluster:services 131 | neutron-network: 132 | - cluster:services 133 | - barclamp: nova 134 | attributes: 135 | use_shared_instance_storage: true 136 | use_migration: true 137 | kvm: 138 | ksm_enabled: true 139 | deployment: 140 | elements: 141 | nova-controller: 142 | - cluster:services 143 | - barclamp: horizon 144 | deployment: 145 | elements: 146 | horizon-server: 147 | - cluster:services 148 | - barclamp: heat 149 | deployment: 150 | elements: 151 | heat-server: 152 | - cluster:services 153 | -------------------------------------------------------------------------------- /vagrant/provisioning/admin/HA-compute-cloud.yaml: -------------------------------------------------------------------------------- 1 | # Input file for 'crowbar autobuild' command which sets up a cloud 2 | # with a single 2-node HA cluster using DRBD and SBD for STONITH, and 3 | # two KVM compute node as part of the HA cluster. 4 | --- 5 | global_options: 6 | - action_for_existing_proposals: skip # could also be e.g. 'recreate' or 'overwrite' 7 | proposals: 8 | - barclamp: provisioner 9 | attributes: 10 | shell_prompt: USER@ALIAS:CWD SUFFIX 11 | - barclamp: nfs_client 12 | name: glance 13 | attributes: 14 | exports: 15 | "/var/lib/glance": 16 | nfs_server: 192.168.124.10 17 | export: "/var/lib/glance" 18 | mount_path: "/var/lib/glance" 19 | mount_options: 20 | - '' 21 | deployment: 22 | elements: 23 | nfs-client: 24 | - "@@controller1@@" 25 | - "@@controller2@@" 26 | - barclamp: nfs_client 27 | name: nova 28 | attributes: 29 | exports: 30 | "/var/lib/nova/instances": 31 | nfs_server: 192.168.124.10 32 | export: "/var/lib/nova/instances" 33 | mount_path: "/var/lib/nova/instances" 34 | mount_options: 35 | - '' 36 | deployment: 37 | elements: 38 | nfs-client: 39 | - "@@compute1@@" 40 | - "@@compute2@@" 41 | - barclamp: pacemaker 42 | name: services 43 | action_if_exists: overwrite 44 | attributes: 45 | stonith: 46 | mode: sbd 47 | sbd: 48 | watchdog_module: softdog 49 | nodes: 50 | @@controller1@@: 51 | devices: 52 | - /dev/sdb 53 | @@controller2@@: 54 | devices: 55 | - /dev/sdb 56 | @@compute1@@: 57 | devices: 58 | - /dev/sdb 59 | @@compute2@@: 60 | devices: 61 | - /dev/sdb 62 | drbd: 63 | enabled: true 64 | deployment: 65 | elements: 66 | hawk-server: 67 | - "@@controller1@@" 68 | - "@@controller2@@" 69 | pacemaker-cluster-member: 70 | - "@@controller1@@" 71 | - "@@controller2@@" 72 | pacemaker-remote: 73 | - "@@compute1@@" 74 | - "@@compute2@@" 75 | - barclamp: database 76 | # Proposal name defaults to 'default'. 77 | # Default attributes are good enough, so we just need to assign 78 | # nodes to roles: 79 | attributes: 80 | ha: 81 | storage: 82 | mode: drbd 83 | drbd: 84 | size: 1 85 | deployment: 86 | elements: 87 | database-server: 88 | - cluster:services 89 | - barclamp: rabbitmq 90 | attributes: 91 | ha: 92 | storage: 93 | mode: drbd 94 | drbd: 95 | size: 1 96 | deployment: 97 | elements: 98 | rabbitmq-server: 99 | - cluster:services 100 | - barclamp: keystone 101 | deployment: 102 | elements: 103 | keystone-server: 104 | - cluster:services 105 | - barclamp: glance 106 | deployment: 107 | elements: 108 | glance-server: 109 | - cluster:services 110 | - barclamp: cinder 111 | wipe_attributes: 112 | - volumes 113 | attributes: 114 | volumes: 115 | - backend_name: local 116 | backend_driver: local 117 | local: 118 | file_size: 2000 119 | volume_name: cinder-volumes 120 | file_name: /var/lib/cinder/volume.raw 121 | deployment: 122 | elements: 123 | cinder-controller: 124 | - cluster:services 125 | cinder-volume: 126 | - "@@compute1@@" 127 | - "@@compute2@@" 128 | - barclamp: neutron 129 | attributes: 130 | ml2_mechanism_drivers: 131 | - linuxbridge 132 | ml2_type_drivers: 133 | - vlan 134 | ml2_type_drivers_default_provider_network: vlan 135 | ml2_type_drivers_default_tenant_network: vlan 136 | deployment: 137 | elements: 138 | neutron-server: 139 | - cluster:services 140 | neutron-network: 141 | - cluster:services 142 | - barclamp: nova 143 | attributes: 144 | use_shared_instance_storage: true 145 | use_migration: true 146 | kvm: 147 | ksm_enabled: true 148 | deployment: 149 | elements: 150 | nova-controller: 151 | - cluster:services 152 | nova-compute-qemu: 153 | - remotes:services 154 | - barclamp: horizon 155 | deployment: 156 | elements: 157 | horizon-server: 158 | - cluster:services 159 | - barclamp: heat 160 | deployment: 161 | elements: 162 | heat-server: 163 | - cluster:services 164 | -------------------------------------------------------------------------------- /vagrant/provisioning/admin/apply-fix-lp#1691831.patch: -------------------------------------------------------------------------------- 1 | diff --git a/chef/cookbooks/nova/recipes/compute.rb b/chef/cookbooks/nova/recipes/compute.rb 2 | index 97ecda00e..4ceb14e52 100644 3 | --- a/chef/cookbooks/nova/recipes/compute.rb 4 | +++ b/chef/cookbooks/nova/recipes/compute.rb 5 | @@ -257,6 +257,13 @@ nova_package "compute" do 6 | no_crm_maintenance_mode true 7 | end 8 | 9 | +bash "apply fix for lp#1691831" do 10 | + code <<-EOH 11 | + sed -i "s/if ex.errno != errno.EPERM:/if ex.errno != errno.EACCES:/g" /usr/lib/python2.7/site-packages/nova/virt/libvirt/driver.py 12 | + EOH 13 | + only_if { ::File.exists? "/usr/lib/python2.7/site-packages/nova/virt/libvirt/driver.py" } 14 | +end 15 | + 16 | cookbook_file "/etc/nova/nova-compute.conf" do 17 | source "nova-compute.conf" 18 | owner "root" 19 | -------------------------------------------------------------------------------- /vagrant/provisioning/admin/barclamp-network-ignore-eth0.patch: -------------------------------------------------------------------------------- 1 | From ed1fea70a5fe31e4a2b68a4e3317eea8a56659fa Mon Sep 17 00:00:00 2001 2 | From: Ralf Haferkamp 3 | Date: Tue, 6 May 2014 15:47:44 +0200 4 | Subject: [PATCH] Added hack to ignore eth0 to allow use by vagrant 5 | 6 | --- 7 | chef/cookbooks/network/recipes/default.rb | 4 ++++ 8 | chef/data_bags/crowbar/bc-template-network.json | 12 ++++++------ 9 | 2 files changed, 10 insertions(+), 6 deletions(-) 10 | 11 | diff --git a/chef/cookbooks/network/recipes/default.rb b/chef/cookbooks/network/recipes/default.rb 12 | index e9fb9d2..0f13e71 100644 13 | --- a/chef/cookbooks/network/recipes/default.rb 14 | +++ b/chef/cookbooks/network/recipes/default.rb 15 | @@ -328,6 +328,8 @@ Nic.refresh_all 16 | 17 | # Kill any nics that we don't want hanging around anymore. 18 | Nic.nics.reverse_each do |nic| 19 | + # Hack to make vagrant happy 20 | + next if nic.name == "eth0" 21 | next if ifs[nic.name] 22 | # If we are bringing this node under management, kill any nics we did not 23 | # configure, except for loopback interfaces. 24 | @@ -482,6 +484,8 @@ when "suse" 25 | ethtool_options = ethtool_options.join(" ") 26 | 27 | Nic.nics.each do |nic| 28 | + # Hack to make vagrant happy 29 | + next if nic.name == "eth0" 30 | next unless ifs[nic.name] 31 | template "/etc/sysconfig/network/ifcfg-#{nic.name}" do 32 | source "suse-cfg.erb" 33 | diff --git a/chef/data_bags/crowbar/template-network.json b/chef/data_bags/crowbar/template-network.json 34 | index 1057e44..1182f8d 100644 35 | --- a/chef/data_bags/crowbar/template-network.json 36 | +++ b/chef/data_bags/crowbar/template-network.json 37 | @@ -80,13 +80,13 @@ 38 | "pattern": "team/.*/.*", 39 | "conduit_list": { 40 | "intf0": { 41 | - "if_list": [ "1g1", "1g2" ] 42 | + "if_list": [ "1g2", "1g3" ] 43 | }, 44 | "intf1": { 45 | - "if_list": [ "1g1", "1g2" ] 46 | + "if_list": [ "1g2", "1g3" ] 47 | }, 48 | "intf2": { 49 | - "if_list": [ "1g1", "1g2" ] 50 | + "if_list": [ "1g2", "1g3" ] 51 | } 52 | } 53 | }, 54 | @@ -108,13 +108,13 @@ 55 | "pattern": "single/.*/.*", 56 | "conduit_list": { 57 | "intf0": { 58 | - "if_list": [ "?1g1" ] 59 | + "if_list": [ "?1g2" ] 60 | }, 61 | "intf1": { 62 | - "if_list": [ "?1g1" ] 63 | + "if_list": [ "?1g2" ] 64 | }, 65 | "intf2": { 66 | - "if_list": [ "?1g1" ] 67 | + "if_list": [ "?1g2" ] 68 | } 69 | } 70 | }, 71 | -- 72 | 2.6.0 73 | 74 | -------------------------------------------------------------------------------- /vagrant/provisioning/admin/increase-SBD-timeout-30s.patch: -------------------------------------------------------------------------------- 1 | From ff9766595c0f265c77d4359e8f95d2850243257b Mon Sep 17 00:00:00 2001 2 | From: Adam Spiers 3 | Date: Sun, 2 Nov 2014 12:59:31 +0000 4 | Subject: [PATCH] increase SBD timeout to 30s for Vagrant environments 5 | 6 | --- 7 | chef/cookbooks/pacemaker/templates/suse/sysconfig_sbd.erb | 2 +- 8 | 1 file changed, 1 insertion(+), 1 deletion(-) 9 | 10 | diff --git a/chef/cookbooks/pacemaker/templates/suse/sysconfig_sbd.erb b/chef/cookbooks/pacemaker/templates/suse/sysconfig_sbd.erb 11 | index f6e71a9..94caab2 100644 12 | --- a/chef/cookbooks/pacemaker/templates/suse/sysconfig_sbd.erb 13 | +++ b/chef/cookbooks/pacemaker/templates/suse/sysconfig_sbd.erb 14 | @@ -49,4 +49,4 @@ SBD_WATCHDOG=yes 15 | # Additional options for starting sbd 16 | # 17 | # The next line enables the watchdog support, and makes SBD checks Pacemaker quorum and node health: 18 | -SBD_OPTS="-W -P -n <%= @node_name %>" 19 | +SBD_OPTS="-W -P -I 30 -n <%= @node_name %>" 20 | -- 21 | 2.10.1 22 | 23 | -------------------------------------------------------------------------------- /vagrant/provisioning/admin/install-suse-cloud.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | export PATH="$PATH:/sbin:/usr/sbin/" 6 | 7 | # The appliance-firstboot service will deal with all the crowbar-init bits; we 8 | # need to wait for this to be completed. And one way to check this is to check 9 | # if the service is enabled, because at the end of the service, it disables 10 | # itself. 11 | while systemctl -q is-enabled appliance-firstboot; do 12 | echo "Waiting for appliance-firstboot to complete..." 13 | sleep 2 14 | done 15 | 16 | # Simply don't fail on zypper being already used, and retry a bit instead 17 | export ZYPP_LOCK_TIMEOUT=120 18 | 19 | # To trick install-suse-clouds check for "screen". It should be safe 20 | # to run without screen here, as install-suse-cloud won't pull the network 21 | # from eth0 because we patched the network cookbook accordingly. 22 | export STY="dummy" 23 | 24 | # ensure cloud_admin pattern is fully installed 25 | # otherwise the check in install-suse-cloud will fail. 26 | zypper -n install -t pattern cloud_admin 27 | 28 | install-suse-cloud -v 29 | 30 | . /etc/profile.d/crowbar.sh 31 | crowbar network allocate_ip default cloud7-admin.openstack.site public host 32 | chef-client 33 | -------------------------------------------------------------------------------- /vagrant/provisioning/admin/prep-admin.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | cd /tmp 6 | 7 | patch -d /opt/dell -p1 < barclamp-network-ignore-eth0.patch 8 | rm -f barclamp-network-ignore-eth0.patch 9 | patch -d /opt/dell -p1 < increase-SBD-timeout-30s.patch 10 | rm -f increase-SBD-timeout-30s.patch 11 | patch -d /opt/dell -p1 < apply-fix-lp#1691831.patch 12 | rm -f apply-fix-lp#1691831.patch 13 | 14 | cp network.json /etc/crowbar/network.json 15 | rm -f network.json 16 | -------------------------------------------------------------------------------- /vagrant/provisioning/admin/provision-root-files.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Vagrant's file provisioner runs as the vagrant user: 6 | # http://docs.vagrantup.com/v2/provisioning/file.html 7 | # so files intended for root also have to be moved to 8 | # the right place. 9 | 10 | cd /tmp 11 | 12 | mkdir -p /root/bin 13 | mv setup-node-aliases.sh /root/bin 14 | 15 | # remove pre-existing yaml files from appliance for the demo, to avoid 16 | # confusion 17 | rm /root/*.yaml 18 | 19 | mv *.yaml /root 20 | -------------------------------------------------------------------------------- /vagrant/provisioning/admin/setup-node-aliases.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | nodes=( $( knife node list | grep '^ *d' ) ) 4 | 5 | error_count=0 6 | for node in "${nodes[@]}"; do 7 | alias=$( ssh -n $node cat .vagrant-guest-name 2>/dev/null ) 8 | if [ -n "$alias" ]; then 9 | echo "Setting alias $alias for $node ... " 10 | crowbar machines rename $node $alias 11 | else 12 | echo "WARNING: couldn't retrieve /root/.vagrant-guest-name from $node" >&2 13 | error_count=$(( error_count + 1 )) 14 | fi 15 | done 16 | 17 | exit $error_count 18 | -------------------------------------------------------------------------------- /vagrant/provisioning/admin/simple-cloud.yaml: -------------------------------------------------------------------------------- 1 | # Input file for 'crowbar autobuild' command which sets up a 2 | # cloud with a single controller and a single KVM compute node. 3 | --- 4 | global_options: 5 | - action_for_existing_proposals: skip # could also be e.g. 'recreate' or 'overwrite' 6 | proposals: 7 | - barclamp: provisioner 8 | attributes: 9 | shell_prompt: USER@ALIAS:CWD SUFFIX 10 | - barclamp: database 11 | # Proposal name defaults to 'default'. 12 | # Default attributes are good enough, so we just need to assign 13 | # nodes to roles: 14 | deployment: 15 | elements: 16 | database-server: 17 | - "@@controller1@@" 18 | - barclamp: rabbitmq 19 | deployment: 20 | elements: 21 | rabbitmq-server: 22 | - "@@controller1@@" 23 | - barclamp: keystone 24 | deployment: 25 | elements: 26 | keystone-server: 27 | - "@@controller1@@" 28 | - barclamp: glance 29 | deployment: 30 | elements: 31 | glance-server: 32 | - "@@controller1@@" 33 | - barclamp: cinder 34 | wipe_attributes: 35 | - volumes 36 | attributes: 37 | volumes: 38 | - backend_name: local 39 | backend_driver: local 40 | local: 41 | file_size: 2000 42 | volume_name: cinder-volumes 43 | file_name: /var/lib/cinder/volume.raw 44 | deployment: 45 | elements: 46 | cinder-controller: 47 | - "@@controller1@@" 48 | cinder-volume: 49 | - "@@compute1@@" 50 | - barclamp: neutron 51 | attributes: 52 | ml2_mechanism_drivers: 53 | - linuxbridge 54 | ml2_type_drivers: 55 | - vlan 56 | ml2_type_drivers_default_provider_network: vlan 57 | ml2_type_drivers_default_tenant_network: vlan 58 | deployment: 59 | elements: 60 | neutron-server: 61 | - "@@controller1@@" 62 | neutron-network: 63 | - "@@controller1@@" 64 | - barclamp: nova 65 | attributes: 66 | use_migration: true 67 | kvm: 68 | ksm_enabled: true 69 | deployment: 70 | elements: 71 | nova-controller: 72 | - "@@controller1@@" 73 | nova-compute-qemu: 74 | - "@@compute1@@" 75 | - barclamp: horizon 76 | deployment: 77 | elements: 78 | horizon-server: 79 | - "@@controller1@@" 80 | - barclamp: heat 81 | deployment: 82 | elements: 83 | heat-server: 84 | - "@@controller1@@" 85 | -------------------------------------------------------------------------------- /vagrant/provisioning/admin/switch-admin-ip.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | admin_ip="$1" 4 | 5 | sed -i "s,192.168.124.10,${admin_ip},g" /root/*-cloud*.yaml 6 | -------------------------------------------------------------------------------- /vagrant/provisioning/admin/switch-vdisks.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # FIXME: parametrize instead of doing this hack 4 | DMIDECODE=/usr/sbin/dmidecode # $PATH doesn't have /usr/sbin here (due to sudo?) 5 | if $DMIDECODE | grep -q VirtualBox; then 6 | echo "On VirtualBox; using SCSI virtual disks ..." 7 | elif $DMIDECODE | egrep -iq 'Bochs|QEMU'; then 8 | echo "On KVM; switching to virtio disks" 9 | sed -i 's,/dev/sd\([a-z]\),/dev/vd\1,' /root/*-cloud*.yaml 10 | else 11 | echo "ERROR: Couldn't figure out what hypervisor we're on?!" >&2 12 | $DMIDECODE >&2 13 | exit 1 14 | fi 15 | -------------------------------------------------------------------------------- /vagrant/provisioning/controller/provision-root-files.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | admin_ip="$1" 6 | 7 | # Vagrant's file provisioner runs as the vagrant user: 8 | # http://docs.vagrantup.com/v2/provisioning/file.html 9 | # so files intended for root also have to be moved to 10 | # the right place. 11 | 12 | cd /tmp 13 | 14 | sed -i "s,192.168.124.10,${admin_ip},g" upload-cirros 15 | 16 | mkdir -p /root/bin 17 | mv upload-cirros /root/bin 18 | mv start-testvm /root/bin 19 | -------------------------------------------------------------------------------- /vagrant/provisioning/controller/start-testvm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | if [ ! -f ~/.openrc ]; then 6 | echo "No .openrc file!" 7 | exit 1 8 | fi 9 | 10 | source .openrc 11 | 12 | echo -n "Starting test instance ... " 13 | FIXED_NETWORK_ID=$(openstack --insecure network show -f value -c id fixed) 14 | TESTVM_ID=$(openstack --insecure server create \ 15 | -f value -c id \ 16 | --wait \ 17 | --nic net-id=$FIXED_NETWORK_ID \ 18 | --image cirros-machine \ 19 | --flavor m1.tiny \ 20 | testvm) 21 | TESTVM_HOST=$(openstack --insecure server show \ 22 | -f value -c OS-EXT-SRV-ATTR:host \ 23 | $TESTVM_ID) 24 | echo "done." 25 | 26 | echo -n "Adding a floating IP ... " 27 | TESTVM_FIXED_ADDRESS=$(openstack --insecure server show \ 28 | -f value -c addresses $TESTVM_ID | sed "s/fixed=//g") 29 | TESTVM_PORT_ID=$(openstack --insecure port list \ 30 | -f value -c ID -c "Fixed IP Addresses" | grep $TESTVM_FIXED_ADDRESS | awk '{ print $1 }') 31 | FLOATING_IP_ID=$(openstack --insecure floating ip create \ 32 | -f value -c id \ 33 | --port $TESTVM_PORT_ID \ 34 | floating) 35 | FLOATING_IP=$(openstack --insecure floating ip show \ 36 | -f value -c floating_ip_address $FLOATING_IP_ID) 37 | echo "done." 38 | 39 | echo -n "Allowing ICMP and SSH access ... " 40 | PROJECT_ID=$(openstack --insecure project show \ 41 | -f value -c id \ 42 | openstack) 43 | SECURITY_GROUP_ID=$(openstack --insecure security group list \ 44 | -f value -c ID -c Project | grep $PROJECT_ID | awk '{ print $1 }') 45 | openstack --insecure security group rule create \ 46 | --proto icmp $SECURITY_GROUP_ID > /dev/null 47 | openstack --insecure security group rule create \ 48 | --proto tcp --dst-port 22 $SECURITY_GROUP_ID > /dev/null 49 | echo "done." 50 | 51 | NOVA_EVACUATE_HOST=$(crm resource show nova-evacuate | sed "s/.*running on: //g") 52 | 53 | echo 54 | echo "Test instance: testvm" 55 | echo "Floating IP of testvm: $FLOATING_IP" 56 | echo "Compute node hosting testvm: $TESTVM_HOST" 57 | echo "Controller node running nova-evacuate: $NOVA_EVACUATE_HOST" 58 | -------------------------------------------------------------------------------- /vagrant/provisioning/controller/upload-cirros: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | admin_ip="192.168.124.10" 6 | 7 | if [ ! -f ~/.openrc ]; then 8 | echo "No .openrc file!" 9 | exit 1 10 | fi 11 | 12 | CIRROS_TAR=cirros-0.3.4-x86_64-uec.tar.gz 13 | TEMP=$(mktemp -d) 14 | 15 | cleanup() { 16 | rm -rf $TEMP 17 | } 18 | 19 | function findfirst() { 20 | find $TEMP -name "$1" | head -1 21 | } 22 | 23 | trap cleanup INT EXIT 24 | 25 | wget -q --no-verbose http://${admin_ip}:8091/files/tempest/$CIRROS_TAR --directory-prefix=$TEMP 26 | tar -xzf $TEMP/$CIRROS_TAR -C $TEMP 27 | 28 | source .openrc 29 | 30 | echo -n "Adding kernel ... " 31 | KERNEL_ID=$(openstack --insecure image create \ 32 | -f value -c id \ 33 | --public --container-format aki --disk-format aki \ 34 | --file $(findfirst '*-vmlinuz') \ 35 | "cirros-kernel") 36 | echo "done." 37 | 38 | echo -n "Adding ramdisk ... " 39 | RAMDISK_ID=$(openstack --insecure image create \ 40 | -f value -c id \ 41 | --public --container-format ari --disk-format ari \ 42 | --file $(findfirst '*-initrd') \ 43 | "cirros-ramdisk") 44 | echo "done." 45 | 46 | echo -n "Adding image ... " 47 | MACHINE_ID=$(openstack --insecure image create \ 48 | -f value -c id \ 49 | --public --container-format ami --disk-format ami \ 50 | --property kernel_id=$KERNEL_ID \ 51 | --property ramdisk_id=$RAMDISK_ID \ 52 | --file $(findfirst '*.img') \ 53 | "cirros-machine") 54 | echo "done." 55 | -------------------------------------------------------------------------------- /vagrant/provisioning/non-admin/deps-release: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Replace sles-release with suse-openstack-cloud-deps-release; this is 6 | # required to install sle-ha-release. 7 | zypper --non-interactive in --auto-agree-with-licenses --replacefiles suse-openstack-cloud-deps-release 8 | -------------------------------------------------------------------------------- /vagrant/provisioning/non-admin/register-with-suse-cloud: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | admin_ip="$1" 6 | 7 | export PATH="$PATH:/sbin:/usr/sbin/" 8 | 9 | count=0 10 | while true; do 11 | wget http://${admin_ip}:8091/suse-12.2/x86_64/crowbar_register || true 12 | [ -f crowbar_register ] && break 13 | count=$((count+1)) 14 | if [ $count -ge 90 ]; then 15 | echo "Giving up on fetching crowbar_register..." 1>&2 16 | false 17 | fi 18 | sleep 10 19 | done 20 | 21 | zypper ar http://${admin_ip}:8091/suse-12.2/x86_64/install cloud7-deps 22 | 23 | # To trick crowbar_register check for "screen". It should be safe 24 | # to run without screen here, as crowbar_register won't pull the network 25 | # from eth0 because we patched the network cookbook accordingly. 26 | export STY="dummy" 27 | 28 | chmod a+x crowbar_register 29 | ./crowbar_register --force --interface eth1 --gpg-auto-import-keys --no-gpg-checks 30 | -------------------------------------------------------------------------------- /vagrant/provisioning/non-admin/store-vagrant-name.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | name="$1" 6 | dst=/root/.vagrant-guest-name 7 | 8 | echo "Writing '$name' to $dst" 9 | echo "$1" > $dst 10 | -------------------------------------------------------------------------------- /vagrant/provisioning/non-admin/update-motd: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | sed -i "s/SLES 12 SP2/SUSE OpenStack Cloud 7/" /etc/motd 6 | --------------------------------------------------------------------------------