├── .gitignore ├── .gitreview ├── AUTHORS ├── HACKING.rst ├── LICENSE ├── README.md ├── eucarc ├── exercise.sh ├── exerciserc ├── exercises ├── aggregates.sh ├── boot_from_volume.sh ├── bundle.sh ├── client-args.sh ├── client-env.sh ├── euca.sh ├── floating_ips.sh ├── horizon.sh ├── quantum-adv-test.sh ├── sec_groups.sh ├── swift.sh └── volumes.sh ├── extras.d ├── 80-tempest.sh └── README ├── files ├── apache-horizon.template ├── apts │ ├── baremetal │ ├── ceilometer-collector │ ├── cinder │ ├── general │ ├── glance │ ├── horizon │ ├── keystone │ ├── ldap │ ├── n-api │ ├── n-cpu │ ├── n-novnc │ ├── n-vol │ ├── nova │ ├── postgresql │ ├── quantum │ ├── ryu │ ├── swift │ └── tls-proxy ├── default_catalog.templates ├── horizon_settings.py ├── keystone_data.sh ├── ldap │ ├── manager.ldif.in │ └── openstack.ldif ├── rpms-suse │ ├── ceilometer-collector │ ├── cinder │ ├── general │ ├── glance │ ├── horizon │ ├── keystone │ ├── n-api │ ├── n-cpu │ ├── n-novnc │ ├── n-vol │ ├── nova │ ├── postgresql │ ├── quantum │ ├── ryu │ └── swift ├── rpms │ ├── ceilometer-collector │ ├── cinder │ ├── general │ ├── glance │ ├── horizon │ ├── keystone │ ├── ldap │ ├── n-api │ ├── n-cpu │ ├── n-novnc │ ├── n-spice │ ├── n-vol │ ├── nova │ ├── postgresql │ ├── quantum │ ├── ryu │ └── swift ├── sources.list └── swift │ ├── rsyncd.conf │ └── rsyslog.conf ├── functions ├── lib ├── baremetal ├── ceilometer ├── cinder ├── database ├── databases │ ├── mysql │ └── postgresql ├── glance ├── heat ├── horizon ├── keystone ├── ldap ├── nova ├── quantum ├── quantum_plugins │ ├── README.md │ ├── bigswitch_floodlight │ ├── brocade │ ├── linuxbridge │ ├── openvswitch │ ├── ovs_base │ └── ryu ├── quantum_thirdparty │ ├── README.md │ ├── bigswitch_floodlight │ └── ryu ├── rpc_backend ├── swift ├── tempest ├── template └── tls ├── openrc ├── rejoin-stack.sh ├── samples ├── local.sh └── localrc ├── stack.sh ├── stackrc ├── tests └── functions.sh ├── tools ├── build_bm.sh ├── build_bm_multi.sh ├── build_pxe_env.sh ├── build_ramdisk.sh ├── build_tempest.sh ├── build_uec.sh ├── build_uec_ramdisk.sh ├── build_usb_boot.sh ├── copy_dev_environment_to_uec.sh ├── create_userrc.sh ├── get_uec_image.sh ├── info.sh ├── install_openvpn.sh ├── install_prereqs.sh ├── jenkins │ ├── README.md │ ├── adapters │ │ ├── euca.sh │ │ ├── floating_ips.sh │ │ ├── swift.sh │ │ └── volumes.sh │ ├── build_configuration.sh │ ├── configurations │ │ ├── kvm.sh │ │ └── xs.sh │ ├── jenkins_home │ │ ├── .gitignore │ │ ├── build_jenkins.sh │ │ ├── clean.sh │ │ ├── jobs │ │ │ ├── diablo-kvm_ha │ │ │ │ ├── config.xml │ │ │ │ └── configurations │ │ │ │ │ └── axis-ADAPTER │ │ │ │ │ ├── euca │ │ │ │ │ └── config.xml │ │ │ │ │ └── floatingips │ │ │ │ │ └── config.xml │ │ │ └── diablo-xs_ha │ │ │ │ └── config.xml │ │ └── print_summary.py │ └── run_test.sh ├── make_cert.sh ├── uec │ └── meta.py ├── upload_image.sh ├── warm_apts_for_uec.sh └── xen │ ├── README.md │ ├── build_domU_multi.sh │ ├── build_xva.sh │ ├── devstackubuntupreseed.cfg │ ├── files │ ├── fstab │ └── hvc0.conf │ ├── install_os_domU.sh │ ├── prepare_guest.sh │ ├── prepare_guest_template.sh │ ├── scripts │ ├── install-os-vpx.sh │ ├── install_ubuntu_template.sh │ ├── manage-vdi │ ├── mkxva │ ├── on_exit.sh │ ├── templatedelete.sh │ └── uninstall-os-vpx.sh │ ├── templates │ ├── hosts.in │ ├── interfaces.in │ ├── menu.lst.in │ └── ova.xml.in │ └── xenrc └── unstack.sh /.gitignore: -------------------------------------------------------------------------------- 1 | proto 2 | *~ 3 | .*.sw[nop] 4 | *.log 5 | *.log.[1-9] 6 | src 7 | localrc 8 | local.sh 9 | files/*.gz 10 | files/images 11 | stack-screenrc 12 | *.pem 13 | accrc 14 | .stackenv 15 | -------------------------------------------------------------------------------- /.gitreview: -------------------------------------------------------------------------------- 1 | [gerrit] 2 | host=review.openstack.org 3 | port=29418 4 | project=openstack-dev/devstack.git 5 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | Aaron Lee 2 | Aaron Rosen 3 | Adam Gandelman 4 | Akihiro MOTOKI 5 | Andrew Laski 6 | Andy Smith 7 | Anthony Young 8 | Armando Migliaccio 9 | Brad Hall 10 | Chmouel Boudjnah 11 | Dan Prince 12 | Dean Troyer 13 | Devin Carlen 14 | Doug hellmann 15 | Eddie Hebert 16 | Eoghan Glynn 17 | Eric Windisch 18 | Gabriel Hurley 19 | Gary Kotton 20 | Hengqing Hu 21 | Hua ZHANG 22 | Isaku Yamahata 23 | Jake Dahn 24 | James E. Blair 25 | Jason Cannavale 26 | Jay Pipes 27 | Jesse Andrews 28 | Joe Gordon 29 | Johannes Erdfelt 30 | John Postlethwait 31 | Josh Kearney 32 | Justin Shepherd 33 | Ken Pepple 34 | Kiall Mac Innes 35 | Matt Joyce 36 | Osamu Habuka 37 | Russell Bryant 38 | Scott Moser 39 | Sumit Naiksatam 40 | Thierry Carrez 41 | Todd Willey 42 | Tres Henry 43 | Vincent Untz 44 | Vishvananda Ishaya 45 | Yun Mao 46 | Yong Sheng Gong 47 | Zhongyue Luo 48 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | DevStack is a set of scripts and utilities to quickly deploy an OpenStack cloud. 2 | 3 | # Goals 4 | 5 | * To quickly build dev OpenStack environments in a clean Ubuntu or Fedora environment 6 | * To describe working configurations of OpenStack (which code branches work together? what do config files look like for those branches?) 7 | * To make it easier for developers to dive into OpenStack so that they can productively contribute without having to understand every part of the system at once 8 | * To make it easy to prototype cross-project features 9 | * To sanity-check OpenStack builds (used in gating commits to the primary repos) 10 | 11 | Read more at http://devstack.org (built from the gh-pages branch) 12 | 13 | IMPORTANT: Be sure to carefully read `stack.sh` and any other scripts you execute before you run them, as they install software and may alter your networking configuration. We strongly recommend that you run `stack.sh` in a clean and disposable vm when you are first getting started. 14 | 15 | # Devstack on Xenserver 16 | 17 | If you would like to use Xenserver as the hypervisor, please refer to the instructions in `./tools/xen/README.md`. 18 | 19 | # Versions 20 | 21 | The devstack master branch generally points to trunk versions of OpenStack components. For older, stable versions, look for branches named stable/[release] in the DevStack repo. For example, you can do the following to create a diablo OpenStack cloud: 22 | 23 | git checkout stable/diablo 24 | ./stack.sh 25 | 26 | You can also pick specific OpenStack project releases by setting the appropriate `*_BRANCH` variables in `localrc` (look in `stackrc` for the default set). Usually just before a release there will be milestone-proposed branches that need to be tested:: 27 | 28 | GLANCE_REPO=https://github.com/openstack/glance.git 29 | GLANCE_BRANCH=milestone-proposed 30 | 31 | # Start A Dev Cloud 32 | 33 | Installing in a dedicated disposable vm is safer than installing on your dev machine! To start a dev cloud: 34 | 35 | ./stack.sh 36 | 37 | When the script finishes executing, you should be able to access OpenStack endpoints, like so: 38 | 39 | * Horizon: http://myhost/ 40 | * Keystone: http://myhost:5000/v2.0/ 41 | 42 | We also provide an environment file that you can use to interact with your cloud via CLI: 43 | 44 | # source openrc file to load your environment with osapi and ec2 creds 45 | . openrc 46 | # list instances 47 | nova list 48 | 49 | If the EC2 API is your cup-o-tea, you can create credentials and use euca2ools: 50 | 51 | # source eucarc to generate EC2 credentials and set up the environment 52 | . eucarc 53 | # list instances using ec2 api 54 | euca-describe-instances 55 | 56 | # Customizing 57 | 58 | You can override environment variables used in `stack.sh` by creating file name `localrc`. It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host. 59 | 60 | # Database Backend 61 | 62 | Multiple database backends are available. The available databases are defined in the lib/databases directory. 63 | To choose a database backend, add a line to your `localrc` like: 64 | 65 | use_database postgresql 66 | 67 | By default, the mysql database backend is used. 68 | 69 | # RPC Backend 70 | 71 | Multiple RPC backends are available. Currently, this 72 | includes RabbitMQ (default), Qpid, and ZeroMQ. Your backend of 73 | choice may be selected via the `localrc`. 74 | 75 | Note that selecting more than one RPC backend will result in a failure. 76 | 77 | Example (ZeroMQ): 78 | 79 | ENABLED_SERVICES="$ENABLED_SERVICES,-rabbit,-qpid,zeromq" 80 | 81 | Example (Qpid): 82 | 83 | ENABLED_SERVICES="$ENABLED_SERVICES,-rabbit,-zeromq,qpid" 84 | 85 | # Swift 86 | 87 | Swift is not installed by default, you can enable easily by adding this to your `localrc`: 88 | 89 | enable_service swift 90 | 91 | If you want a minimal Swift install with only Swift and Keystone you can have this instead in your `localrc`: 92 | 93 | disable_all_services 94 | enable_service key mysql swift 95 | 96 | If you use Swift with Keystone, Swift will authenticate against it. You will need to make sure to use the Keystone URL to auth against. 97 | 98 | If you are enabling `swift3` in `ENABLED_SERVICES` devstack will install the swift3 middleware emulation. Swift will be configured to act as a S3 endpoint for Keystone so effectively replacing the `nova-objectstore`. 99 | 100 | Only Swift proxy server is launched in the screen session all other services are started in background and managed by `swift-init` tool. 101 | 102 | By default Swift will configure 3 replicas (and one spare) which could be IO intensive on a small vm, if you only want to do some quick testing of the API you can choose to only have one replica by customizing the variable `SWIFT_REPLICAS` in your `localrc`. 103 | -------------------------------------------------------------------------------- /eucarc: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # source eucarc [username] [tenantname] 4 | # 5 | # Create EC2 credentials for the current user as defined by OS_TENANT_NAME:OS_USERNAME 6 | # Optionally set the tenant/username via openrc 7 | 8 | if [[ -n "$1" ]]; then 9 | USERNAME=$1 10 | fi 11 | if [[ -n "$2" ]]; then 12 | TENANT=$2 13 | fi 14 | 15 | # Find the other rc files 16 | RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) 17 | 18 | # Get user configuration 19 | source $RC_DIR/openrc 20 | 21 | # Set the ec2 url so euca2ools works 22 | export EC2_URL=$(keystone catalog --service ec2 | awk '/ publicURL / { print $4 }') 23 | 24 | # Create EC2 credentials for the current user 25 | CREDS=$(keystone ec2-credentials-create) 26 | export EC2_ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }') 27 | export EC2_SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }') 28 | 29 | # Euca2ools Certificate stuff for uploading bundles 30 | # See exercises/bundle.sh to see how to get certs using nova cli 31 | NOVA_KEY_DIR=${NOVA_KEY_DIR:-$RC_DIR} 32 | export S3_URL=$(keystone catalog --service s3 | awk '/ publicURL / { print $4 }') 33 | export EC2_USER_ID=42 # nova does not use user id, but bundling requires it 34 | export EC2_PRIVATE_KEY=${NOVA_KEY_DIR}/pk.pem 35 | export EC2_CERT=${NOVA_KEY_DIR}/cert.pem 36 | export NOVA_CERT=${NOVA_KEY_DIR}/cacert.pem 37 | export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set 38 | alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user ${EC2_USER_ID} --ec2cert ${NOVA_CERT}" 39 | alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}" 40 | 41 | -------------------------------------------------------------------------------- /exercise.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # **exercise.sh** 4 | 5 | # Keep track of the current devstack directory. 6 | TOP_DIR=$(cd $(dirname "$0") && pwd) 7 | 8 | # Import common functions 9 | source $TOP_DIR/functions 10 | 11 | # Load local configuration 12 | source $TOP_DIR/stackrc 13 | 14 | # Run everything in the exercises/ directory that isn't explicitly disabled 15 | 16 | # comma separated list of script basenames to skip 17 | # to refrain from exercising euca.sh use SKIP_EXERCISES=euca 18 | SKIP_EXERCISES=${SKIP_EXERCISES:-""} 19 | 20 | # Locate the scripts we should run 21 | EXERCISE_DIR=$(dirname "$0")/exercises 22 | basenames=$(for b in `ls $EXERCISE_DIR/*.sh`; do basename $b .sh; done) 23 | 24 | # Track the state of each script 25 | passes="" 26 | failures="" 27 | skips="" 28 | 29 | # Loop over each possible script (by basename) 30 | for script in $basenames; do 31 | if [[ ,$SKIP_EXERCISES, =~ ,$script, ]] ; then 32 | skips="$skips $script" 33 | else 34 | echo "=====================================================================" 35 | echo Running $script 36 | echo "=====================================================================" 37 | $EXERCISE_DIR/$script.sh 38 | exitcode=$? 39 | if [[ $exitcode == 55 ]]; then 40 | skips="$skips $script" 41 | elif [[ $exitcode -ne 0 ]] ; then 42 | failures="$failures $script" 43 | else 44 | passes="$passes $script" 45 | fi 46 | fi 47 | done 48 | 49 | # output status of exercise run 50 | echo "=====================================================================" 51 | for script in $skips; do 52 | echo SKIP $script 53 | done 54 | for script in $passes; do 55 | echo PASS $script 56 | done 57 | for script in $failures; do 58 | echo FAILED $script 59 | done 60 | echo "=====================================================================" 61 | 62 | if [ -n "$failures" ] ; then 63 | exit 1 64 | fi 65 | -------------------------------------------------------------------------------- /exerciserc: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # source exerciserc 4 | # 5 | # Configure the DevStack exercise scripts 6 | # For best results, source this _after_ stackrc/localrc as it will set 7 | # values only if they are not already set. 8 | 9 | # Max time to wait while vm goes from build to active state 10 | export ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30} 11 | 12 | # Max time to wait for proper IP association and dis-association. 13 | export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15} 14 | 15 | # Max time till the vm is bootable 16 | export BOOT_TIMEOUT=${BOOT_TIMEOUT:-30} 17 | 18 | # Max time from run instance command until it is running 19 | export RUNNING_TIMEOUT=${RUNNING_TIMEOUT:-$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))} 20 | 21 | # Max time to wait for a vm to terminate 22 | export TERMINATE_TIMEOUT=${TERMINATE_TIMEOUT:-30} 23 | 24 | # Max time to wait for a euca-volume command to propogate 25 | export VOLUME_TIMEOUT=${VOLUME_TIMEOUT:-30} 26 | 27 | # Max time to wait for a euca-delete command to propogate 28 | export VOLUME_DELETE_TIMEOUT=${SNAPSHOT_DELETE_TIMEOUT:-60} 29 | 30 | # The size of the volume we want to boot from; some storage back-ends 31 | # do not allow a disk resize, so it's important that this can be tuned 32 | export DEFAULT_VOLUME_SIZE=${DEFAULT_VOLUME_SIZE:-1} 33 | -------------------------------------------------------------------------------- /exercises/aggregates.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # **aggregates.sh** 4 | 5 | # This script demonstrates how to use host aggregates: 6 | # * Create an Aggregate 7 | # * Updating Aggregate details 8 | # * Testing Aggregate metadata 9 | # * Testing Aggregate delete 10 | # * Testing General Aggregates (https://blueprints.launchpad.net/nova/+spec/general-host-aggregates) 11 | # * Testing add/remove hosts (with one host) 12 | 13 | echo "**************************************************" 14 | echo "Begin DevStack Exercise: $0" 15 | echo "**************************************************" 16 | 17 | # This script exits on an error so that errors don't compound and you see 18 | # only the first error that occurred. 19 | set -o errexit 20 | 21 | # Print the commands being run so that we can see the command that triggers 22 | # an error. It is also useful for following allowing as the install occurs. 23 | set -o xtrace 24 | 25 | 26 | # Settings 27 | # ======== 28 | 29 | # Keep track of the current directory 30 | EXERCISE_DIR=$(cd $(dirname "$0") && pwd) 31 | TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) 32 | 33 | # Import common functions 34 | source $TOP_DIR/functions 35 | 36 | # Import configuration 37 | source $TOP_DIR/openrc 38 | 39 | # Import exercise configuration 40 | source $TOP_DIR/exerciserc 41 | 42 | # Test as the admin user 43 | . openrc admin admin 44 | 45 | 46 | # Create an aggregate 47 | # =================== 48 | 49 | AGGREGATE_NAME=test_aggregate_$RANDOM 50 | AGGREGATE2_NAME=test_aggregate_$RANDOM 51 | AGGREGATE_A_ZONE=nova 52 | 53 | exit_if_aggregate_present() { 54 | aggregate_name=$1 55 | 56 | if [ $(nova aggregate-list | grep -c " $aggregate_name ") == 0 ]; then 57 | echo "SUCCESS $aggregate_name not present" 58 | else 59 | echo "ERROR found aggregate: $aggregate_name" 60 | exit -1 61 | fi 62 | } 63 | 64 | exit_if_aggregate_present $AGGREGATE_NAME 65 | 66 | AGGREGATE_ID=$(nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1) 67 | AGGREGATE2_ID=$(nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1) 68 | 69 | # check aggregate created 70 | nova aggregate-list | grep -q " $AGGREGATE_NAME " || die "Aggregate $AGGREGATE_NAME not created" 71 | 72 | 73 | # Ensure creating a duplicate fails 74 | # ================================= 75 | 76 | if nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE; then 77 | echo "ERROR could create duplicate aggregate" 78 | exit -1 79 | fi 80 | 81 | 82 | # Test aggregate-update (and aggregate-details) 83 | # ============================================= 84 | AGGREGATE_NEW_NAME=test_aggregate_$RANDOM 85 | 86 | nova aggregate-update $AGGREGATE_ID $AGGREGATE_NEW_NAME 87 | nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_NEW_NAME 88 | nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_A_ZONE 89 | 90 | nova aggregate-update $AGGREGATE_ID $AGGREGATE_NAME $AGGREGATE_A_ZONE 91 | nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_NAME 92 | nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_A_ZONE 93 | 94 | 95 | # Test aggregate-set-metadata 96 | # =========================== 97 | META_DATA_1_KEY=asdf 98 | META_DATA_2_KEY=foo 99 | META_DATA_3_KEY=bar 100 | 101 | #ensure no additional metadata is set 102 | nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}" 103 | 104 | nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_1_KEY}=123 105 | nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY 106 | nova aggregate-details $AGGREGATE_ID | grep 123 107 | 108 | nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_2_KEY}=456 109 | nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY 110 | nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY 111 | 112 | nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_2_KEY ${META_DATA_3_KEY}=789 113 | nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY 114 | nova aggregate-details $AGGREGATE_ID | grep $META_DATA_3_KEY 115 | 116 | nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die "ERROR metadata was not cleared" 117 | 118 | nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_3_KEY $META_DATA_1_KEY 119 | nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}" 120 | 121 | 122 | # Test aggregate-add/remove-host 123 | # ============================== 124 | if [ "$VIRT_DRIVER" == "xenserver" ]; then 125 | echo "TODO(johngarbutt) add tests for add/remove host from pool aggregate" 126 | fi 127 | FIRST_HOST=$(nova host-list | grep compute | get_field 1 | head -1) 128 | # Make sure can add two aggregates to same host 129 | nova aggregate-add-host $AGGREGATE_ID $FIRST_HOST 130 | nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST 131 | if nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST; then 132 | echo "ERROR could add duplicate host to single aggregate" 133 | exit -1 134 | fi 135 | nova aggregate-remove-host $AGGREGATE2_ID $FIRST_HOST 136 | nova aggregate-remove-host $AGGREGATE_ID $FIRST_HOST 137 | 138 | # Test aggregate-delete 139 | # ===================== 140 | nova aggregate-delete $AGGREGATE_ID 141 | nova aggregate-delete $AGGREGATE2_ID 142 | exit_if_aggregate_present $AGGREGATE_NAME 143 | 144 | set +o xtrace 145 | echo "**************************************************" 146 | echo "End DevStack Exercise: $0" 147 | echo "**************************************************" 148 | -------------------------------------------------------------------------------- /exercises/bundle.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # **bundle.sh** 4 | 5 | # we will use the ``euca2ools`` cli tool that wraps the python boto 6 | # library to test ec2 bundle upload compatibility 7 | 8 | echo "*********************************************************************" 9 | echo "Begin DevStack Exercise: $0" 10 | echo "*********************************************************************" 11 | 12 | # This script exits on an error so that errors don't compound and you see 13 | # only the first error that occured. 14 | set -o errexit 15 | 16 | # Print the commands being run so that we can see the command that triggers 17 | # an error. It is also useful for following allowing as the install occurs. 18 | set -o xtrace 19 | 20 | 21 | # Settings 22 | # ======== 23 | 24 | # Keep track of the current directory 25 | EXERCISE_DIR=$(cd $(dirname "$0") && pwd) 26 | TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) 27 | 28 | # Import common functions 29 | source $TOP_DIR/functions 30 | 31 | # Import EC2 configuration 32 | source $TOP_DIR/eucarc 33 | 34 | # Import exercise configuration 35 | source $TOP_DIR/exerciserc 36 | 37 | # Remove old certificates 38 | rm -f $TOP_DIR/cacert.pem 39 | rm -f $TOP_DIR/cert.pem 40 | rm -f $TOP_DIR/pk.pem 41 | 42 | # Get Certificates 43 | nova x509-get-root-cert $TOP_DIR/cacert.pem 44 | nova x509-create-cert $TOP_DIR/pk.pem $TOP_DIR/cert.pem 45 | 46 | # Max time to wait for image to be registered 47 | REGISTER_TIMEOUT=${REGISTER_TIMEOUT:-15} 48 | 49 | BUCKET=testbucket 50 | IMAGE=bundle.img 51 | truncate -s 5M /tmp/$IMAGE 52 | euca-bundle-image -i /tmp/$IMAGE || die "Failure bundling image $IMAGE" 53 | 54 | euca-upload-bundle --debug -b $BUCKET -m /tmp/$IMAGE.manifest.xml || die "Failure uploading bundle $IMAGE to $BUCKET" 55 | 56 | AMI=`euca-register $BUCKET/$IMAGE.manifest.xml | cut -f2` 57 | die_if_not_set AMI "Failure registering $BUCKET/$IMAGE" 58 | 59 | # Wait for the image to become available 60 | if ! timeout $REGISTER_TIMEOUT sh -c "while euca-describe-images | grep $AMI | grep -q available; do sleep 1; done"; then 61 | echo "Image $AMI not available within $REGISTER_TIMEOUT seconds" 62 | exit 1 63 | fi 64 | 65 | # Clean up 66 | euca-deregister $AMI || die "Failure deregistering $AMI" 67 | 68 | set +o xtrace 69 | echo "*********************************************************************" 70 | echo "SUCCESS: End DevStack Exercise: $0" 71 | echo "*********************************************************************" 72 | -------------------------------------------------------------------------------- /exercises/client-args.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # **client-args.sh** 4 | 5 | # Test OpenStack client authentication aguemnts handling 6 | 7 | echo "*********************************************************************" 8 | echo "Begin DevStack Exercise: $0" 9 | echo "*********************************************************************" 10 | 11 | # This script exits on an error so that errors don't compound and you see 12 | # only the first error that occured. 13 | set -o errexit 14 | 15 | # Print the commands being run so that we can see the command that triggers 16 | # an error. It is also useful for following allowing as the install occurs. 17 | set -o xtrace 18 | 19 | 20 | # Settings 21 | # ======== 22 | 23 | # Keep track of the current directory 24 | EXERCISE_DIR=$(cd $(dirname "$0") && pwd) 25 | TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) 26 | 27 | # Import common functions 28 | source $TOP_DIR/functions 29 | 30 | # Import configuration 31 | source $TOP_DIR/openrc 32 | 33 | # Import exercise configuration 34 | source $TOP_DIR/exerciserc 35 | 36 | # Unset all of the known NOVA_* vars 37 | unset NOVA_API_KEY 38 | unset NOVA_ENDPOINT_NAME 39 | unset NOVA_PASSWORD 40 | unset NOVA_PROJECT_ID 41 | unset NOVA_REGION_NAME 42 | unset NOVA_URL 43 | unset NOVA_USERNAME 44 | unset NOVA_VERSION 45 | 46 | # Save the known variables for later 47 | export x_TENANT_NAME=$OS_TENANT_NAME 48 | export x_USERNAME=$OS_USERNAME 49 | export x_PASSWORD=$OS_PASSWORD 50 | export x_AUTH_URL=$OS_AUTH_URL 51 | 52 | # Unset the usual variables to force argument processing 53 | unset OS_TENANT_NAME 54 | unset OS_USERNAME 55 | unset OS_PASSWORD 56 | unset OS_AUTH_URL 57 | 58 | # Common authentication args 59 | TENANT_ARG="--os_tenant_name=$x_TENANT_NAME" 60 | TENANT_ARG_DASH="--os-tenant-name=$x_TENANT_NAME" 61 | ARGS="--os_username=$x_USERNAME --os_password=$x_PASSWORD --os_auth_url=$x_AUTH_URL" 62 | ARGS_DASH="--os-username=$x_USERNAME --os-password=$x_PASSWORD --os-auth-url=$x_AUTH_URL" 63 | 64 | # Set global return 65 | RETURN=0 66 | 67 | # Keystone client 68 | # --------------- 69 | if [[ "$ENABLED_SERVICES" =~ "key" ]]; then 70 | if [[ "$SKIP_EXERCISES" =~ "key" ]] ; then 71 | STATUS_KEYSTONE="Skipped" 72 | else 73 | echo -e "\nTest Keystone" 74 | if keystone $TENANT_ARG_DASH $ARGS_DASH catalog --service identity; then 75 | STATUS_KEYSTONE="Succeeded" 76 | else 77 | STATUS_KEYSTONE="Failed" 78 | RETURN=1 79 | fi 80 | fi 81 | fi 82 | 83 | # Nova client 84 | # ----------- 85 | 86 | if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then 87 | if [[ "$SKIP_EXERCISES" =~ "n-api" ]] ; then 88 | STATUS_NOVA="Skipped" 89 | STATUS_EC2="Skipped" 90 | else 91 | # Test OSAPI 92 | echo -e "\nTest Nova" 93 | if nova $TENANT_ARG_DASH $ARGS_DASH flavor-list; then 94 | STATUS_NOVA="Succeeded" 95 | else 96 | STATUS_NOVA="Failed" 97 | RETURN=1 98 | fi 99 | fi 100 | fi 101 | 102 | # Cinder client 103 | # ------------- 104 | 105 | if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then 106 | if [[ "$SKIP_EXERCISES" =~ "c-api" ]] ; then 107 | STATUS_CINDER="Skipped" 108 | else 109 | echo -e "\nTest Cinder" 110 | if cinder $TENANT_ARG_DASH $ARGS_DASH list; then 111 | STATUS_CINDER="Succeeded" 112 | else 113 | STATUS_CINDER="Failed" 114 | RETURN=1 115 | fi 116 | fi 117 | fi 118 | 119 | # Glance client 120 | # ------------- 121 | 122 | if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then 123 | if [[ "$SKIP_EXERCISES" =~ "g-api" ]] ; then 124 | STATUS_GLANCE="Skipped" 125 | else 126 | echo -e "\nTest Glance" 127 | if glance $TENANT_ARG_DASH $ARGS_DASH image-list; then 128 | STATUS_GLANCE="Succeeded" 129 | else 130 | STATUS_GLANCE="Failed" 131 | RETURN=1 132 | fi 133 | fi 134 | fi 135 | 136 | # Swift client 137 | # ------------ 138 | 139 | if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then 140 | if [[ "$SKIP_EXERCISES" =~ "swift" ]] ; then 141 | STATUS_SWIFT="Skipped" 142 | else 143 | echo -e "\nTest Swift" 144 | if swift $TENANT_ARG_DASH $ARGS_DASH stat; then 145 | STATUS_SWIFT="Succeeded" 146 | else 147 | STATUS_SWIFT="Failed" 148 | RETURN=1 149 | fi 150 | fi 151 | fi 152 | 153 | set +o xtrace 154 | 155 | # Results 156 | # ------- 157 | 158 | function report() { 159 | if [[ -n "$2" ]]; then 160 | echo "$1: $2" 161 | fi 162 | } 163 | 164 | echo -e "\n" 165 | report "Keystone" $STATUS_KEYSTONE 166 | report "Nova" $STATUS_NOVA 167 | report "Cinder" $STATUS_CINDER 168 | report "Glance" $STATUS_GLANCE 169 | report "Swift" $STATUS_SWIFT 170 | 171 | if (( $RETURN == 0 )); then 172 | echo "*********************************************************************" 173 | echo "SUCCESS: End DevStack Exercise: $0" 174 | echo "*********************************************************************" 175 | fi 176 | 177 | exit $RETURN 178 | -------------------------------------------------------------------------------- /exercises/client-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # **client-env.sh** 4 | 5 | # Test OpenStack client enviroment variable handling 6 | 7 | echo "*********************************************************************" 8 | echo "Begin DevStack Exercise: $0" 9 | echo "*********************************************************************" 10 | 11 | # This script exits on an error so that errors don't compound and you see 12 | # only the first error that occured. 13 | set -o errexit 14 | 15 | # Print the commands being run so that we can see the command that triggers 16 | # an error. It is also useful for following allowing as the install occurs. 17 | set -o xtrace 18 | 19 | 20 | # Settings 21 | # ======== 22 | 23 | # Keep track of the current directory 24 | EXERCISE_DIR=$(cd $(dirname "$0") && pwd) 25 | TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) 26 | 27 | # Import common functions 28 | source $TOP_DIR/functions 29 | 30 | # Import configuration 31 | source $TOP_DIR/openrc 32 | 33 | # Import exercise configuration 34 | source $TOP_DIR/exerciserc 35 | 36 | # Unset all of the known NOVA_* vars 37 | unset NOVA_API_KEY 38 | unset NOVA_ENDPOINT_NAME 39 | unset NOVA_PASSWORD 40 | unset NOVA_PROJECT_ID 41 | unset NOVA_REGION_NAME 42 | unset NOVA_URL 43 | unset NOVA_USERNAME 44 | unset NOVA_VERSION 45 | 46 | for i in OS_TENANT_NAME OS_USERNAME OS_PASSWORD OS_AUTH_URL; do 47 | is_set $i 48 | if [[ $? -ne 0 ]]; then 49 | echo "$i expected to be set" 50 | ABORT=1 51 | fi 52 | done 53 | if [[ -n "$ABORT" ]]; then 54 | exit 1 55 | fi 56 | 57 | # Set global return 58 | RETURN=0 59 | 60 | # Keystone client 61 | # --------------- 62 | if [[ "$ENABLED_SERVICES" =~ "key" ]]; then 63 | if [[ "$SKIP_EXERCISES" =~ "key" ]] ; then 64 | STATUS_KEYSTONE="Skipped" 65 | else 66 | echo -e "\nTest Keystone" 67 | if keystone catalog --service identity; then 68 | STATUS_KEYSTONE="Succeeded" 69 | else 70 | STATUS_KEYSTONE="Failed" 71 | RETURN=1 72 | fi 73 | fi 74 | fi 75 | 76 | # Nova client 77 | # ----------- 78 | 79 | if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then 80 | if [[ "$SKIP_EXERCISES" =~ "n-api" ]] ; then 81 | STATUS_NOVA="Skipped" 82 | STATUS_EC2="Skipped" 83 | else 84 | # Test OSAPI 85 | echo -e "\nTest Nova" 86 | if nova flavor-list; then 87 | STATUS_NOVA="Succeeded" 88 | else 89 | STATUS_NOVA="Failed" 90 | RETURN=1 91 | fi 92 | 93 | # Test EC2 API 94 | echo -e "\nTest EC2" 95 | # Get EC2 creds 96 | source $TOP_DIR/eucarc 97 | 98 | if euca-describe-images; then 99 | STATUS_EC2="Succeeded" 100 | else 101 | STATUS_EC2="Failed" 102 | RETURN=1 103 | fi 104 | 105 | # Clean up side effects 106 | unset NOVA_VERSION 107 | fi 108 | fi 109 | 110 | # Cinder client 111 | # ------------- 112 | 113 | if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then 114 | if [[ "$SKIP_EXERCISES" =~ "c-api" ]] ; then 115 | STATUS_CINDER="Skipped" 116 | else 117 | echo -e "\nTest Cinder" 118 | if cinder list; then 119 | STATUS_CINDER="Succeeded" 120 | else 121 | STATUS_CINDER="Failed" 122 | RETURN=1 123 | fi 124 | fi 125 | fi 126 | 127 | # Glance client 128 | # ------------- 129 | 130 | if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then 131 | if [[ "$SKIP_EXERCISES" =~ "g-api" ]] ; then 132 | STATUS_GLANCE="Skipped" 133 | else 134 | echo -e "\nTest Glance" 135 | if glance image-list; then 136 | STATUS_GLANCE="Succeeded" 137 | else 138 | STATUS_GLANCE="Failed" 139 | RETURN=1 140 | fi 141 | fi 142 | fi 143 | 144 | # Swift client 145 | # ------------ 146 | 147 | if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then 148 | if [[ "$SKIP_EXERCISES" =~ "swift" ]] ; then 149 | STATUS_SWIFT="Skipped" 150 | else 151 | echo -e "\nTest Swift" 152 | if swift stat; then 153 | STATUS_SWIFT="Succeeded" 154 | else 155 | STATUS_SWIFT="Failed" 156 | RETURN=1 157 | fi 158 | fi 159 | fi 160 | 161 | set +o xtrace 162 | 163 | # Results 164 | # ------- 165 | 166 | function report() { 167 | if [[ -n "$2" ]]; then 168 | echo "$1: $2" 169 | fi 170 | } 171 | 172 | echo -e "\n" 173 | report "Keystone" $STATUS_KEYSTONE 174 | report "Nova" $STATUS_NOVA 175 | report "EC2" $STATUS_EC2 176 | report "Cinder" $STATUS_CINDER 177 | report "Glance" $STATUS_GLANCE 178 | report "Swift" $STATUS_SWIFT 179 | 180 | if (( $RETURN == 0 )); then 181 | echo "*********************************************************************" 182 | echo "SUCCESS: End DevStack Exercise: $0" 183 | echo "*********************************************************************" 184 | fi 185 | 186 | exit $RETURN 187 | -------------------------------------------------------------------------------- /exercises/horizon.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # **horizon.sh** 4 | 5 | # Sanity check that horizon started if enabled 6 | 7 | echo "*********************************************************************" 8 | echo "Begin DevStack Exercise: $0" 9 | echo "*********************************************************************" 10 | 11 | # This script exits on an error so that errors don't compound and you see 12 | # only the first error that occured. 13 | set -o errexit 14 | 15 | # Print the commands being run so that we can see the command that triggers 16 | # an error. It is also useful for following allowing as the install occurs. 17 | set -o xtrace 18 | 19 | 20 | # Settings 21 | # ======== 22 | 23 | # Keep track of the current directory 24 | EXERCISE_DIR=$(cd $(dirname "$0") && pwd) 25 | TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) 26 | 27 | # Import common functions 28 | source $TOP_DIR/functions 29 | 30 | # Import configuration 31 | source $TOP_DIR/openrc 32 | 33 | # Import exercise configuration 34 | source $TOP_DIR/exerciserc 35 | 36 | is_service_enabled horizon || exit 55 37 | 38 | # can we get the front page 39 | curl http://$SERVICE_HOST 2>/dev/null | grep -q '

Log In

' || die "Horizon front page not functioning!" 40 | 41 | set +o xtrace 42 | echo "*********************************************************************" 43 | echo "SUCCESS: End DevStack Exercise: $0" 44 | echo "*********************************************************************" 45 | 46 | -------------------------------------------------------------------------------- /exercises/sec_groups.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # **sec_groups.sh** 4 | 5 | # Test security groups via the command line 6 | 7 | echo "*********************************************************************" 8 | echo "Begin DevStack Exercise: $0" 9 | echo "*********************************************************************" 10 | 11 | # This script exits on an error so that errors don't compound and you see 12 | # only the first error that occured. 13 | set -o errexit 14 | 15 | # Print the commands being run so that we can see the command that triggers 16 | # an error. It is also useful for following allowing as the install occurs. 17 | set -o xtrace 18 | 19 | 20 | # Settings 21 | # ======== 22 | 23 | # Keep track of the current directory 24 | EXERCISE_DIR=$(cd $(dirname "$0") && pwd) 25 | TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) 26 | 27 | # Import common functions 28 | source $TOP_DIR/functions 29 | 30 | # Import configuration 31 | source $TOP_DIR/openrc 32 | 33 | # Import exercise configuration 34 | source $TOP_DIR/exerciserc 35 | 36 | 37 | # Testing Security Groups 38 | # ======================= 39 | 40 | # List security groups 41 | nova secgroup-list 42 | 43 | # Create random name for new sec group and create secgroup of said name 44 | SEC_GROUP_NAME="ex-secgroup-$(openssl rand -hex 4)" 45 | nova secgroup-create $SEC_GROUP_NAME 'a test security group' 46 | 47 | # Add some rules to the secgroup 48 | RULES_TO_ADD=( 22 3389 5900 ) 49 | 50 | for RULE in "${RULES_TO_ADD[@]}"; do 51 | nova secgroup-add-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0 52 | done 53 | 54 | # Check to make sure rules were added 55 | SEC_GROUP_RULES=( $(nova secgroup-list-rules $SEC_GROUP_NAME | grep -v \- | grep -v 'Source Group' | cut -d '|' -f3 | tr -d ' ') ) 56 | for i in "${RULES_TO_ADD[@]}"; do 57 | skip= 58 | for j in "${SEC_GROUP_RULES[@]}"; do 59 | [[ $i == $j ]] && { skip=1; break; } 60 | done 61 | [[ -n $skip ]] || exit 1 62 | done 63 | 64 | # Delete rules and secgroup 65 | for RULE in "${RULES_TO_ADD[@]}"; do 66 | nova secgroup-delete-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0 67 | done 68 | 69 | # Delete secgroup 70 | nova secgroup-delete $SEC_GROUP_NAME || \ 71 | die "Failure deleting security group $SEC_GROUP_NAME" 72 | 73 | set +o xtrace 74 | echo "*********************************************************************" 75 | echo "SUCCESS: End DevStack Exercise: $0" 76 | echo "*********************************************************************" 77 | -------------------------------------------------------------------------------- /exercises/swift.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # **swift.sh** 4 | 5 | # Test swift via the ``swift`` command line from ``python-swiftclient` 6 | 7 | echo "*********************************************************************" 8 | echo "Begin DevStack Exercise: $0" 9 | echo "*********************************************************************" 10 | 11 | # This script exits on an error so that errors don't compound and you see 12 | # only the first error that occured. 13 | set -o errexit 14 | 15 | # Print the commands being run so that we can see the command that triggers 16 | # an error. It is also useful for following allowing as the install occurs. 17 | set -o xtrace 18 | 19 | 20 | # Settings 21 | # ======== 22 | 23 | # Keep track of the current directory 24 | EXERCISE_DIR=$(cd $(dirname "$0") && pwd) 25 | TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) 26 | 27 | # Import common functions 28 | source $TOP_DIR/functions 29 | 30 | # Import configuration 31 | source $TOP_DIR/openrc 32 | 33 | # Import exercise configuration 34 | source $TOP_DIR/exerciserc 35 | 36 | # If swift is not enabled we exit with exitcode 55 which mean 37 | # exercise is skipped. 38 | is_service_enabled swift || exit 55 39 | 40 | # Container name 41 | CONTAINER=ex-swift 42 | 43 | 44 | # Testing Swift 45 | # ============= 46 | 47 | # Check if we have to swift via keystone 48 | swift stat || die "Failure geting status" 49 | 50 | # We start by creating a test container 51 | swift post $CONTAINER || die "Failure creating container $CONTAINER" 52 | 53 | # add some files into it. 54 | swift upload $CONTAINER /etc/issue || die "Failure uploading file to container $CONTAINER" 55 | 56 | # list them 57 | swift list $CONTAINER || die "Failure listing contents of container $CONTAINER" 58 | 59 | # And we may want to delete them now that we have tested that 60 | # everything works. 61 | swift delete $CONTAINER || die "Failure deleting container $CONTAINER" 62 | 63 | set +o xtrace 64 | echo "*********************************************************************" 65 | echo "SUCCESS: End DevStack Exercise: $0" 66 | echo "*********************************************************************" 67 | -------------------------------------------------------------------------------- /extras.d/80-tempest.sh: -------------------------------------------------------------------------------- 1 | # tempest.sh - DevStack extras script 2 | 3 | source $TOP_DIR/lib/tempest 4 | 5 | if [[ "$1" == "stack" ]]; then 6 | # Configure Tempest last to ensure that the runtime configuration of 7 | # the various OpenStack services can be queried. 8 | if is_service_enabled tempest; then 9 | echo_summary "Configuring Tempest" 10 | install_tempest 11 | configure_tempest 12 | init_tempest 13 | fi 14 | fi 15 | 16 | if [[ "$1" == "unstack" ]]; then 17 | # no-op 18 | : 19 | fi 20 | 21 | 22 | -------------------------------------------------------------------------------- /extras.d/README: -------------------------------------------------------------------------------- 1 | The extras.d directory contains project initialization scripts to be 2 | sourced by stack.sh at the end of its run. This is expected to be 3 | used by external projects that want to be configured, started and 4 | stopped with DevStack. 5 | 6 | Order is controlled by prefixing the script names with the a two digit 7 | sequence number. Script names must end with '.sh'. This provides a 8 | convenient way to disable scripts by simoy renaming them. 9 | 10 | DevStack reserves the sequence numbers 00 through 09 and 90 through 99 11 | for its own use. 12 | 13 | The scripts are called with an argument of 'stack' by stack.sh and 14 | with an argument of 'unstack' by unstack.sh. 15 | -------------------------------------------------------------------------------- /files/apache-horizon.template: -------------------------------------------------------------------------------- 1 | 2 | WSGIScriptAlias / %HORIZON_DIR%/openstack_dashboard/wsgi/django.wsgi 3 | WSGIDaemonProcess horizon user=%USER% group=%GROUP% processes=3 threads=10 home=%HORIZON_DIR% 4 | WSGIApplicationGroup %{GLOBAL} 5 | 6 | SetEnv APACHE_RUN_USER %USER% 7 | SetEnv APACHE_RUN_GROUP %GROUP% 8 | WSGIProcessGroup horizon 9 | 10 | DocumentRoot %HORIZON_DIR%/.blackhole/ 11 | Alias /media %HORIZON_DIR%/openstack_dashboard/static 12 | 13 | 14 | Options FollowSymLinks 15 | AllowOverride None 16 | 17 | 18 | 19 | Options Indexes FollowSymLinks MultiViews 20 | AllowOverride None 21 | Order allow,deny 22 | allow from all 23 | 24 | 25 | ErrorLog /var/log/%APACHE_NAME%/horizon_error.log 26 | LogLevel warn 27 | CustomLog /var/log/%APACHE_NAME%/horizon_access.log combined 28 | 29 | 30 | WSGISocketPrefix /var/run/%APACHE_NAME% 31 | -------------------------------------------------------------------------------- /files/apts/baremetal: -------------------------------------------------------------------------------- 1 | busybox 2 | dnsmasq 3 | gcc 4 | ipmitool 5 | make 6 | open-iscsi 7 | qemu-kvm 8 | syslinux 9 | tgt 10 | -------------------------------------------------------------------------------- /files/apts/ceilometer-collector: -------------------------------------------------------------------------------- 1 | python-pymongo 2 | mongodb-server 3 | -------------------------------------------------------------------------------- /files/apts/cinder: -------------------------------------------------------------------------------- 1 | tgt 2 | lvm2 3 | -------------------------------------------------------------------------------- /files/apts/general: -------------------------------------------------------------------------------- 1 | bridge-utils 2 | pep8 3 | pylint 4 | python-pip 5 | screen 6 | unzip 7 | wget 8 | psmisc 9 | git 10 | lsof # useful when debugging 11 | openssh-server 12 | vim-nox 13 | locate # useful when debugging 14 | python-virtualenv 15 | python-unittest2 16 | iputils-ping 17 | wget 18 | curl 19 | tcpdump 20 | euca2ools # only for testing client 21 | tar 22 | python-cmd2 # dist:precise 23 | python-netaddr 24 | -------------------------------------------------------------------------------- /files/apts/glance: -------------------------------------------------------------------------------- 1 | gcc 2 | libxml2-dev 3 | python-dev 4 | python-eventlet 5 | python-routes 6 | python-greenlet 7 | python-argparse # dist:oneiric 8 | python-sqlalchemy 9 | python-wsgiref 10 | python-pastedeploy 11 | python-xattr 12 | python-iso8601 13 | -------------------------------------------------------------------------------- /files/apts/horizon: -------------------------------------------------------------------------------- 1 | apache2 # NOPRIME 2 | libapache2-mod-wsgi # NOPRIME 3 | python-beautifulsoup 4 | python-dateutil 5 | python-paste 6 | python-pastedeploy 7 | python-anyjson 8 | python-routes 9 | python-xattr 10 | python-sqlalchemy 11 | python-webob 12 | python-kombu 13 | pylint 14 | pep8 15 | python-eventlet 16 | python-nose 17 | python-sphinx 18 | python-mox 19 | python-kombu 20 | python-coverage 21 | python-cherrypy3 # why? 22 | python-migrate 23 | nodejs 24 | nodejs-legacy # dist:quantal 25 | python-netaddr 26 | -------------------------------------------------------------------------------- /files/apts/keystone: -------------------------------------------------------------------------------- 1 | python-setuptools 2 | python-dev 3 | python-lxml 4 | python-pastescript 5 | python-pastedeploy 6 | python-paste 7 | sqlite3 8 | python-pysqlite2 9 | python-sqlalchemy 10 | python-mysqldb 11 | python-webob 12 | python-greenlet 13 | python-routes 14 | libldap2-dev 15 | libsasl2-dev 16 | python-bcrypt 17 | -------------------------------------------------------------------------------- /files/apts/ldap: -------------------------------------------------------------------------------- 1 | ldap-utils 2 | slapd # NOPRIME 3 | python-ldap 4 | -------------------------------------------------------------------------------- /files/apts/n-api: -------------------------------------------------------------------------------- 1 | gcc # temporary because this pulls in glance to get the client without running the glance prereqs 2 | python-dateutil 3 | -------------------------------------------------------------------------------- /files/apts/n-cpu: -------------------------------------------------------------------------------- 1 | # Stuff for diablo volumes 2 | lvm2 3 | open-iscsi 4 | open-iscsi-utils 5 | genisoimage 6 | sysfsutils 7 | sg3-utils 8 | -------------------------------------------------------------------------------- /files/apts/n-novnc: -------------------------------------------------------------------------------- 1 | python-numpy 2 | -------------------------------------------------------------------------------- /files/apts/n-vol: -------------------------------------------------------------------------------- 1 | tgt 2 | lvm2 3 | -------------------------------------------------------------------------------- /files/apts/nova: -------------------------------------------------------------------------------- 1 | dnsmasq-base 2 | dnsmasq-utils # for dhcp_release only available in dist:oneiric,precise,quantal 3 | kpartx 4 | parted 5 | arping # only available in dist:natty 6 | iputils-arping # only available in dist:oneiric 7 | mysql-server # NOPRIME 8 | python-mysqldb 9 | python-xattr # needed for glance which is needed for nova --- this shouldn't be here 10 | python-lxml # needed for glance which is needed for nova --- this shouldn't be here 11 | kvm 12 | gawk 13 | iptables 14 | ebtables 15 | sqlite3 16 | sudo 17 | kvm 18 | libvirt-bin # NOPRIME 19 | libjs-jquery-tablesorter # Needed for coverage html reports 20 | vlan 21 | curl 22 | rabbitmq-server # NOPRIME 23 | qpidd # dist:precise NOPRIME 24 | socat # used by ajaxterm 25 | python-mox 26 | python-paste 27 | python-migrate 28 | python-gflags 29 | python-greenlet 30 | python-libvirt 31 | python-libxml2 32 | python-routes 33 | python-netaddr 34 | python-numpy # used by websockify for spice console 35 | python-pastedeploy 36 | python-eventlet 37 | python-cheetah 38 | python-carrot 39 | python-tempita 40 | python-sqlalchemy 41 | python-suds 42 | python-lockfile 43 | python-m2crypto 44 | python-boto 45 | python-kombu 46 | python-feedparser 47 | python-iso8601 48 | python-qpid # dist:precise 49 | -------------------------------------------------------------------------------- /files/apts/postgresql: -------------------------------------------------------------------------------- 1 | python-psycopg2 2 | -------------------------------------------------------------------------------- /files/apts/quantum: -------------------------------------------------------------------------------- 1 | ebtables 2 | iptables 3 | iputils-ping 4 | iputils-arping 5 | mysql-server #NOPRIME 6 | sudo 7 | python-boto 8 | python-iso8601 9 | python-paste 10 | python-routes 11 | python-suds 12 | python-netaddr 13 | python-pastedeploy 14 | python-greenlet 15 | python-kombu 16 | python-eventlet 17 | python-sqlalchemy 18 | python-mysqldb 19 | python-pyudev 20 | python-qpid # dist:precise 21 | dnsmasq-base 22 | dnsmasq-utils # for dhcp_release only available in dist:oneiric,precise,quantal 23 | rabbitmq-server # NOPRIME 24 | qpid # NOPRIME 25 | sqlite3 26 | vlan 27 | -------------------------------------------------------------------------------- /files/apts/ryu: -------------------------------------------------------------------------------- 1 | python-setuptools 2 | python-gevent 3 | python-gflags 4 | python-netifaces 5 | python-sphinx 6 | -------------------------------------------------------------------------------- /files/apts/swift: -------------------------------------------------------------------------------- 1 | curl 2 | gcc 3 | memcached 4 | python-configobj 5 | python-coverage 6 | python-dev 7 | python-eventlet 8 | python-greenlet 9 | python-netifaces 10 | python-nose 11 | python-pastedeploy 12 | python-setuptools 13 | python-simplejson 14 | python-webob 15 | python-xattr 16 | sqlite3 17 | xfsprogs 18 | -------------------------------------------------------------------------------- /files/apts/tls-proxy: -------------------------------------------------------------------------------- 1 | stud # only available in dist:precise,quantal 2 | -------------------------------------------------------------------------------- /files/default_catalog.templates: -------------------------------------------------------------------------------- 1 | # config for TemplatedCatalog, using camelCase because I don't want to do 2 | # translations for legacy compat 3 | catalog.RegionOne.identity.publicURL = http://%SERVICE_HOST%:$(public_port)s/v2.0 4 | catalog.RegionOne.identity.adminURL = http://%SERVICE_HOST%:$(admin_port)s/v2.0 5 | catalog.RegionOne.identity.internalURL = http://%SERVICE_HOST%:$(public_port)s/v2.0 6 | catalog.RegionOne.identity.name = Identity Service 7 | 8 | 9 | catalog.RegionOne.compute.publicURL = http://%SERVICE_HOST%:8774/v2/$(tenant_id)s 10 | catalog.RegionOne.compute.adminURL = http://%SERVICE_HOST%:8774/v2/$(tenant_id)s 11 | catalog.RegionOne.compute.internalURL = http://%SERVICE_HOST%:8774/v2/$(tenant_id)s 12 | catalog.RegionOne.compute.name = Compute Service 13 | 14 | 15 | catalog.RegionOne.volume.publicURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s 16 | catalog.RegionOne.volume.adminURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s 17 | catalog.RegionOne.volume.internalURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s 18 | catalog.RegionOne.volume.name = Volume Service 19 | 20 | 21 | catalog.RegionOne.ec2.publicURL = http://%SERVICE_HOST%:8773/services/Cloud 22 | catalog.RegionOne.ec2.adminURL = http://%SERVICE_HOST%:8773/services/Admin 23 | catalog.RegionOne.ec2.internalURL = http://%SERVICE_HOST%:8773/services/Cloud 24 | catalog.RegionOne.ec2.name = EC2 Service 25 | 26 | 27 | catalog.RegionOne.s3.publicURL = http://%SERVICE_HOST%:%S3_SERVICE_PORT% 28 | catalog.RegionOne.s3.adminURL = http://%SERVICE_HOST%:%S3_SERVICE_PORT% 29 | catalog.RegionOne.s3.internalURL = http://%SERVICE_HOST%:%S3_SERVICE_PORT% 30 | catalog.RegionOne.s3.name = S3 Service 31 | 32 | 33 | catalog.RegionOne.image.publicURL = http://%SERVICE_HOST%:9292 34 | catalog.RegionOne.image.adminURL = http://%SERVICE_HOST%:9292 35 | catalog.RegionOne.image.internalURL = http://%SERVICE_HOST%:9292 36 | catalog.RegionOne.image.name = Image Service 37 | 38 | catalog.RegionOne.orchestration.publicURL = http://%SERVICE_HOST%:8000/v1 39 | catalog.RegionOne.orchestration.adminURL = http://%SERVICE_HOST%:8000/v1 40 | catalog.RegionOne.orchestration.internalURL = http://%SERVICE_HOST%:8000/v1 41 | catalog.RegionOne.orchestration.name = Heat Service 42 | -------------------------------------------------------------------------------- /files/ldap/manager.ldif.in: -------------------------------------------------------------------------------- 1 | dn: olcDatabase={${LDAP_OLCDB_NUMBER}}hdb,cn=config 2 | changetype: modify 3 | replace: olcSuffix 4 | olcSuffix: dc=openstack,dc=org 5 | - 6 | replace: olcRootDN 7 | olcRootDN: dc=Manager,dc=openstack,dc=org 8 | - 9 | ${LDAP_ROOTPW_COMMAND}: olcRootPW 10 | olcRootPW: ${SLAPPASS} 11 | -------------------------------------------------------------------------------- /files/ldap/openstack.ldif: -------------------------------------------------------------------------------- 1 | dn: dc=openstack,dc=org 2 | dc: openstack 3 | objectClass: dcObject 4 | objectClass: organizationalUnit 5 | ou: openstack 6 | 7 | dn: ou=Groups,dc=openstack,dc=org 8 | objectClass: organizationalUnit 9 | ou: Groups 10 | 11 | dn: ou=Users,dc=openstack,dc=org 12 | objectClass: organizationalUnit 13 | ou: Users 14 | 15 | dn: ou=Roles,dc=openstack,dc=org 16 | objectClass: organizationalUnit 17 | ou: Roles 18 | 19 | dn: ou=Projects,dc=openstack,dc=org 20 | objectClass: organizationalUnit 21 | ou: Projects 22 | -------------------------------------------------------------------------------- /files/rpms-suse/ceilometer-collector: -------------------------------------------------------------------------------- 1 | # Not available in openSUSE main repositories, but can be fetched from OBS 2 | # (devel:languages:python and server:database projects) 3 | mongodb 4 | python-pymongo 5 | -------------------------------------------------------------------------------- /files/rpms-suse/cinder: -------------------------------------------------------------------------------- 1 | lvm2 2 | tgt 3 | -------------------------------------------------------------------------------- /files/rpms-suse/general: -------------------------------------------------------------------------------- 1 | bridge-utils 2 | curl 3 | euca2ools 4 | git-core 5 | iputils 6 | openssh 7 | psmisc 8 | python-cmd2 # dist:opensuse-12.3 9 | python-netaddr 10 | python-pep8 11 | python-pip 12 | python-pylint 13 | python-unittest2 14 | python-virtualenv 15 | screen 16 | tar 17 | tcpdump 18 | unzip 19 | vim-enhanced 20 | wget 21 | 22 | findutils-locate # useful when debugging 23 | lsof # useful when debugging 24 | -------------------------------------------------------------------------------- /files/rpms-suse/glance: -------------------------------------------------------------------------------- 1 | gcc 2 | libxml2-devel 3 | python-PasteDeploy 4 | python-Routes 5 | python-SQLAlchemy 6 | python-argparse 7 | python-devel 8 | python-eventlet 9 | python-greenlet 10 | python-iso8601 11 | python-wsgiref 12 | python-xattr 13 | -------------------------------------------------------------------------------- /files/rpms-suse/horizon: -------------------------------------------------------------------------------- 1 | apache2 # NOPRIME 2 | apache2-mod_wsgi # NOPRIME 3 | nodejs 4 | python-CherryPy # why? (coming from apts) 5 | python-Paste 6 | python-PasteDeploy 7 | python-Routes 8 | python-Sphinx 9 | python-SQLAlchemy 10 | python-WebOb 11 | python-anyjson 12 | python-beautifulsoup 13 | python-coverage 14 | python-dateutil 15 | python-eventlet 16 | python-kombu 17 | python-mox 18 | python-netaddr 19 | python-nose 20 | python-pep8 21 | python-pylint 22 | python-sqlalchemy-migrate 23 | python-xattr 24 | -------------------------------------------------------------------------------- /files/rpms-suse/keystone: -------------------------------------------------------------------------------- 1 | cyrus-sasl-devel 2 | openldap2-devel 3 | python-Paste 4 | python-PasteDeploy 5 | python-PasteScript 6 | python-Routes 7 | python-SQLAlchemy 8 | python-WebOb 9 | python-devel 10 | python-distribute 11 | python-setuptools # instead of python-distribute; dist:sle11sp2 12 | python-greenlet 13 | python-lxml 14 | python-mysql 15 | python-py-bcrypt 16 | python-pysqlite 17 | sqlite3 18 | -------------------------------------------------------------------------------- /files/rpms-suse/n-api: -------------------------------------------------------------------------------- 1 | gcc # temporary because this pulls in glance to get the client without running the glance prereqs 2 | python-dateutil 3 | -------------------------------------------------------------------------------- /files/rpms-suse/n-cpu: -------------------------------------------------------------------------------- 1 | # Stuff for diablo volumes 2 | genisoimage 3 | lvm2 4 | open-iscsi 5 | sysfsutils 6 | sg3_utils 7 | -------------------------------------------------------------------------------- /files/rpms-suse/n-novnc: -------------------------------------------------------------------------------- 1 | python-numpy 2 | -------------------------------------------------------------------------------- /files/rpms-suse/n-vol: -------------------------------------------------------------------------------- 1 | lvm2 2 | tgt 3 | -------------------------------------------------------------------------------- /files/rpms-suse/nova: -------------------------------------------------------------------------------- 1 | curl 2 | # Note: we need to package dhcp_release in dnsmasq! 3 | dnsmasq 4 | ebtables 5 | gawk 6 | iptables 7 | iputils 8 | kpartx 9 | kvm 10 | # qemu as fallback if kvm cannot be used 11 | qemu 12 | libvirt # NOPRIME 13 | libvirt-python 14 | libxml2-python 15 | mysql-community-server # NOPRIME 16 | parted 17 | python-M2Crypto 18 | python-m2crypto # dist:sle11sp2 19 | python-Paste 20 | python-PasteDeploy 21 | python-Routes 22 | python-SQLAlchemy 23 | python-Tempita 24 | python-boto 25 | python-carrot 26 | python-cheetah 27 | python-eventlet 28 | python-feedparser 29 | python-greenlet 30 | python-iso8601 31 | python-kombu 32 | python-lockfile 33 | python-lxml # needed for glance which is needed for nova --- this shouldn't be here 34 | python-mox 35 | python-mysql 36 | python-netaddr 37 | python-paramiko 38 | python-python-gflags 39 | python-sqlalchemy-migrate 40 | python-suds 41 | python-xattr # needed for glance which is needed for nova --- this shouldn't be here 42 | rabbitmq-server # NOPRIME 43 | socat 44 | sqlite3 45 | sudo 46 | vlan 47 | 48 | # FIXME: qpid is not part of openSUSE, those names are tentative 49 | python-qpid # NOPRIME 50 | qpidd # NOPRIME 51 | -------------------------------------------------------------------------------- /files/rpms-suse/postgresql: -------------------------------------------------------------------------------- 1 | python-psycopg2 2 | -------------------------------------------------------------------------------- /files/rpms-suse/quantum: -------------------------------------------------------------------------------- 1 | # Note: we need to package dhcp_release in dnsmasq! 2 | dnsmasq 3 | ebtables 4 | iptables 5 | iputils 6 | mysql-community-server # NOPRIME 7 | python-boto 8 | python-eventlet 9 | python-greenlet 10 | python-iso8601 11 | python-kombu 12 | python-mysql 13 | python-netaddr 14 | python-Paste 15 | python-PasteDeploy 16 | python-pyudev 17 | python-Routes 18 | python-SQLAlchemy 19 | python-suds 20 | rabbitmq-server # NOPRIME 21 | sqlite3 22 | sudo 23 | vlan 24 | 25 | # FIXME: qpid is not part of openSUSE, those names are tentative 26 | python-qpid # NOPRIME 27 | qpidd # NOPRIME 28 | -------------------------------------------------------------------------------- /files/rpms-suse/ryu: -------------------------------------------------------------------------------- 1 | python-distribute 2 | python-setuptools # instead of python-distribute; dist:sle11sp2 3 | python-Sphinx 4 | python-gevent 5 | python-python-gflags 6 | -------------------------------------------------------------------------------- /files/rpms-suse/swift: -------------------------------------------------------------------------------- 1 | curl 2 | gcc 3 | memcached 4 | python-PasteDeploy 5 | python-WebOb 6 | python-configobj 7 | python-coverage 8 | python-devel 9 | python-distribute 10 | python-setuptools # instead of python-distribute; dist:sle11sp2 11 | python-eventlet 12 | python-greenlet 13 | python-netifaces 14 | python-nose 15 | python-simplejson 16 | python-xattr 17 | sqlite3 18 | xfsprogs 19 | xinetd 20 | -------------------------------------------------------------------------------- /files/rpms/ceilometer-collector: -------------------------------------------------------------------------------- 1 | mongodb-server 2 | pymongo 3 | -------------------------------------------------------------------------------- /files/rpms/cinder: -------------------------------------------------------------------------------- 1 | lvm2 2 | scsi-target-utils 3 | -------------------------------------------------------------------------------- /files/rpms/general: -------------------------------------------------------------------------------- 1 | bridge-utils 2 | curl 3 | euca2ools # only for testing client 4 | git-core 5 | openssh-server 6 | psmisc 7 | pylint 8 | python-netaddr 9 | python-pep8 10 | python-pip 11 | python-unittest2 12 | python-virtualenv 13 | screen 14 | tar 15 | tcpdump 16 | unzip 17 | wget 18 | -------------------------------------------------------------------------------- /files/rpms/glance: -------------------------------------------------------------------------------- 1 | libxml2-devel 2 | python-argparse 3 | python-devel 4 | python-eventlet 5 | python-greenlet 6 | python-paste-deploy 7 | python-routes 8 | python-sqlalchemy 9 | python-wsgiref 10 | pyxattr 11 | -------------------------------------------------------------------------------- /files/rpms/horizon: -------------------------------------------------------------------------------- 1 | Django 2 | django-registration 3 | gcc 4 | httpd # NOPRIME 5 | mod_wsgi # NOPRIME 6 | pylint 7 | python-anyjson 8 | python-BeautifulSoup 9 | python-boto 10 | python-coverage 11 | python-dateutil 12 | python-eventlet 13 | python-greenlet 14 | python-httplib2 15 | python-kombu 16 | python-migrate 17 | python-mox 18 | python-netaddr 19 | python-nose 20 | python-paste 21 | python-paste-deploy 22 | python-pep8 23 | python-routes 24 | python-sphinx 25 | python-sqlalchemy 26 | python-webob 27 | pyxattr 28 | -------------------------------------------------------------------------------- /files/rpms/keystone: -------------------------------------------------------------------------------- 1 | python-greenlet 2 | python-lxml 3 | python-paste 4 | python-paste-deploy 5 | python-paste-script 6 | python-routes 7 | python-setuptools 8 | python-sqlalchemy 9 | python-sqlite2 10 | python-webob 11 | sqlite 12 | -------------------------------------------------------------------------------- /files/rpms/ldap: -------------------------------------------------------------------------------- 1 | openldap-servers 2 | openldap-clients 3 | python-ldap 4 | -------------------------------------------------------------------------------- /files/rpms/n-api: -------------------------------------------------------------------------------- 1 | python-dateutil 2 | -------------------------------------------------------------------------------- /files/rpms/n-cpu: -------------------------------------------------------------------------------- 1 | # Stuff for diablo volumes 2 | iscsi-initiator-utils 3 | lvm2 4 | genisoimage 5 | sysfsutils 6 | sg3_utils 7 | -------------------------------------------------------------------------------- /files/rpms/n-novnc: -------------------------------------------------------------------------------- 1 | numpy 2 | -------------------------------------------------------------------------------- /files/rpms/n-spice: -------------------------------------------------------------------------------- 1 | numpy 2 | -------------------------------------------------------------------------------- /files/rpms/n-vol: -------------------------------------------------------------------------------- 1 | lvm2 2 | scsi-target-utils 3 | -------------------------------------------------------------------------------- /files/rpms/nova: -------------------------------------------------------------------------------- 1 | MySQL-python 2 | curl 3 | dnsmasq-utils # for dhcp_release 4 | ebtables 5 | gawk 6 | iptables 7 | iputils 8 | kpartx 9 | kvm 10 | libvirt-bin # NOPRIME 11 | libvirt-python 12 | libxml2-python 13 | numpy # needed by websockify for spice console 14 | m2crypto 15 | mysql-server # NOPRIME 16 | parted 17 | python-boto 18 | python-carrot 19 | python-cheetah 20 | python-eventlet 21 | python-feedparser 22 | python-gflags 23 | python-greenlet 24 | python-iso8601 25 | python-kombu 26 | python-lockfile 27 | python-migrate 28 | python-mox 29 | python-netaddr 30 | python-paramiko 31 | python-paste 32 | python-paste-deploy 33 | python-qpid 34 | python-routes 35 | python-sqlalchemy 36 | python-suds 37 | python-tempita 38 | rabbitmq-server # NOPRIME 39 | qpid-cpp-server-daemon # NOPRIME 40 | sqlite 41 | sudo 42 | vconfig 43 | -------------------------------------------------------------------------------- /files/rpms/postgresql: -------------------------------------------------------------------------------- 1 | python-psycopg2 2 | -------------------------------------------------------------------------------- /files/rpms/quantum: -------------------------------------------------------------------------------- 1 | MySQL-python 2 | dnsmasq-utils # for dhcp_release 3 | ebtables 4 | iptables 5 | iputils 6 | mysql-server # NOPRIME 7 | python-boto 8 | python-eventlet 9 | python-greenlet 10 | python-iso8601 11 | python-kombu 12 | python-netaddr 13 | python-paste 14 | python-paste-deploy 15 | python-qpid 16 | python-routes 17 | python-sqlalchemy 18 | python-suds 19 | rabbitmq-server # NOPRIME 20 | qpid-cpp-server-daemon # NOPRIME 21 | sqlite 22 | sudo 23 | vconfig 24 | -------------------------------------------------------------------------------- /files/rpms/ryu: -------------------------------------------------------------------------------- 1 | python-setuptools 2 | python-gevent 3 | python-gflags 4 | python-netifaces 5 | python-sphinx 6 | -------------------------------------------------------------------------------- /files/rpms/swift: -------------------------------------------------------------------------------- 1 | curl 2 | gcc 3 | memcached 4 | python-configobj 5 | python-coverage 6 | python-devel 7 | python-eventlet 8 | python-greenlet 9 | python-netifaces 10 | python-nose 11 | python-paste-deploy 12 | python-setuptools 13 | python-simplejson 14 | python-webob 15 | pyxattr 16 | sqlite 17 | xfsprogs 18 | xinetd 19 | -------------------------------------------------------------------------------- /files/sources.list: -------------------------------------------------------------------------------- 1 | deb http://mirror.rackspace.com/ubuntu/ %DIST% main restricted 2 | deb http://mirror.rackspace.com/ubuntu/ %DIST%-updates main restricted 3 | deb http://mirror.rackspace.com/ubuntu/ %DIST% universe 4 | deb http://mirror.rackspace.com/ubuntu/ %DIST%-updates universe 5 | deb http://mirror.rackspace.com/ubuntu/ %DIST% multiverse 6 | deb http://mirror.rackspace.com/ubuntu/ %DIST%-updates multiverse 7 | deb http://security.ubuntu.com/ubuntu %DIST%-security main restricted 8 | deb http://security.ubuntu.com/ubuntu %DIST%-security universe 9 | deb http://security.ubuntu.com/ubuntu %DIST%-security multiverse 10 | -------------------------------------------------------------------------------- /files/swift/rsyncd.conf: -------------------------------------------------------------------------------- 1 | uid = %USER% 2 | gid = %GROUP% 3 | log file = %SWIFT_DATA_DIR%/logs/rsyncd.log 4 | pid file = %SWIFT_DATA_DIR%/run/rsyncd.pid 5 | address = 127.0.0.1 6 | 7 | [account6012] 8 | max connections = 25 9 | path = %SWIFT_DATA_DIR%/1/node/ 10 | read only = false 11 | lock file = %SWIFT_DATA_DIR%/run/account6012.lock 12 | 13 | [account6022] 14 | max connections = 25 15 | path = %SWIFT_DATA_DIR%/2/node/ 16 | read only = false 17 | lock file = %SWIFT_DATA_DIR%/run/account6022.lock 18 | 19 | [account6032] 20 | max connections = 25 21 | path = %SWIFT_DATA_DIR%/3/node/ 22 | read only = false 23 | lock file = %SWIFT_DATA_DIR%/run/account6032.lock 24 | 25 | [account6042] 26 | max connections = 25 27 | path = %SWIFT_DATA_DIR%/4/node/ 28 | read only = false 29 | lock file = %SWIFT_DATA_DIR%/run/account6042.lock 30 | 31 | 32 | [container6011] 33 | max connections = 25 34 | path = %SWIFT_DATA_DIR%/1/node/ 35 | read only = false 36 | lock file = %SWIFT_DATA_DIR%/run/container6011.lock 37 | 38 | [container6021] 39 | max connections = 25 40 | path = %SWIFT_DATA_DIR%/2/node/ 41 | read only = false 42 | lock file = %SWIFT_DATA_DIR%/run/container6021.lock 43 | 44 | [container6031] 45 | max connections = 25 46 | path = %SWIFT_DATA_DIR%/3/node/ 47 | read only = false 48 | lock file = %SWIFT_DATA_DIR%/run/container6031.lock 49 | 50 | [container6041] 51 | max connections = 25 52 | path = %SWIFT_DATA_DIR%/4/node/ 53 | read only = false 54 | lock file = %SWIFT_DATA_DIR%/run/container6041.lock 55 | 56 | 57 | [object6010] 58 | max connections = 25 59 | path = %SWIFT_DATA_DIR%/1/node/ 60 | read only = false 61 | lock file = %SWIFT_DATA_DIR%/run/object6010.lock 62 | 63 | [object6020] 64 | max connections = 25 65 | path = %SWIFT_DATA_DIR%/2/node/ 66 | read only = false 67 | lock file = %SWIFT_DATA_DIR%/run/object6020.lock 68 | 69 | [object6030] 70 | max connections = 25 71 | path = %SWIFT_DATA_DIR%/3/node/ 72 | read only = false 73 | lock file = %SWIFT_DATA_DIR%/run/object6030.lock 74 | 75 | [object6040] 76 | max connections = 25 77 | path = %SWIFT_DATA_DIR%/4/node/ 78 | read only = false 79 | lock file = %SWIFT_DATA_DIR%/run/object6040.lock 80 | -------------------------------------------------------------------------------- /files/swift/rsyslog.conf: -------------------------------------------------------------------------------- 1 | # Uncomment the following to have a log containing all logs together 2 | #local1,local2,local3,local4,local5.* %SWIFT_LOGDIR%/all.log 3 | 4 | # Uncomment the following to have hourly proxy logs for stats processing 5 | #$template HourlyProxyLog,"%SWIFT_LOGDIR%/hourly/%$YEAR%%$MONTH%%$DAY%%$HOUR%" 6 | #local1.*;local1.!notice ?HourlyProxyLog 7 | 8 | local1.*;local1.!notice %SWIFT_LOGDIR%/proxy.log 9 | local1.notice %SWIFT_LOGDIR%/proxy.error 10 | local1.* ~ 11 | 12 | local2.*;local2.!notice %SWIFT_LOGDIR%/storage1.log 13 | local2.notice %SWIFT_LOGDIR%/storage1.error 14 | local2.* ~ 15 | 16 | local3.*;local3.!notice %SWIFT_LOGDIR%/storage2.log 17 | local3.notice %SWIFT_LOGDIR%/storage2.error 18 | local3.* ~ 19 | 20 | local4.*;local4.!notice %SWIFT_LOGDIR%/storage3.log 21 | local4.notice %SWIFT_LOGDIR%/storage3.error 22 | local4.* ~ 23 | 24 | local5.*;local5.!notice %SWIFT_LOGDIR%/storage4.log 25 | local5.notice %SWIFT_LOGDIR%/storage4.error 26 | local5.* ~ 27 | -------------------------------------------------------------------------------- /lib/ceilometer: -------------------------------------------------------------------------------- 1 | # lib/ceilometer 2 | # Install and start **Ceilometer** service 3 | 4 | # To enable, add the following to localrc 5 | # ENABLED_SERVICES+=ceilometer-acompute,ceilometer-acentral,ceilometer-collector,ceilometer-api 6 | 7 | # Dependencies: 8 | # - functions 9 | # - OS_AUTH_URL for auth in api 10 | # - DEST set to the destination directory 11 | # - SERVICE_PASSWORD, SERVICE_TENANT_NAME for auth in api 12 | # - STACK_USER service user 13 | 14 | # stack.sh 15 | # --------- 16 | # install_ceilometer 17 | # configure_ceilometer 18 | # init_ceilometer 19 | # start_ceilometer 20 | # stop_ceilometer 21 | # cleanup_ceilometer 22 | 23 | # Save trace setting 24 | XTRACE=$(set +o | grep xtrace) 25 | set +o xtrace 26 | 27 | 28 | # Defaults 29 | # -------- 30 | 31 | # Set up default directories 32 | CEILOMETER_DIR=$DEST/ceilometer 33 | CEILOMETERCLIENT_DIR=$DEST/python-ceilometerclient 34 | CEILOMETER_CONF_DIR=/etc/ceilometer 35 | CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf 36 | CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api 37 | CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer} 38 | 39 | # Support potential entry-points console scripts 40 | if [ -d $CEILOMETER_DIR/bin ] ; then 41 | CEILOMETER_BIN_DIR=$CEILOMETER_DIR/bin 42 | else 43 | CEILOMETER_BIN_DIR=$(get_python_exec_prefix) 44 | fi 45 | 46 | # cleanup_ceilometer() - Remove residual data files, anything left over from previous 47 | # runs that a clean run would need to clean up 48 | function cleanup_ceilometer() { 49 | mongo ceilometer --eval "db.dropDatabase();" 50 | } 51 | 52 | # configure_ceilometerclient() - Set config files, create data dirs, etc 53 | function configure_ceilometerclient() { 54 | setup_develop $CEILOMETERCLIENT_DIR 55 | } 56 | 57 | # configure_ceilometer() - Set config files, create data dirs, etc 58 | function configure_ceilometer() { 59 | setup_develop $CEILOMETER_DIR 60 | 61 | [ ! -d $CEILOMETER_CONF_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_CONF_DIR 62 | sudo chown $USER $CEILOMETER_CONF_DIR 63 | 64 | [ ! -d $CEILOMETER_API_LOG_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_API_LOG_DIR 65 | sudo chown $USER $CEILOMETER_API_LOG_DIR 66 | 67 | iniset $CEILOMETER_CONF DEFAULT rpc_backend 'ceilometer.openstack.common.rpc.impl_kombu' 68 | iniset $CEILOMETER_CONF DEFAULT notification_topics 'notifications,glance_notifications' 69 | iniset $CEILOMETER_CONF DEFAULT verbose True 70 | iniset $CEILOMETER_CONF DEFAULT rabbit_host $RABBIT_HOST 71 | iniset $CEILOMETER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD 72 | iniset $CEILOMETER_CONF DEFAULT sql_connection $BASE_SQL_CONN/nova?charset=utf8 73 | 74 | # Install the policy file for the API server 75 | cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR 76 | cp $CEILOMETER_DIR/etc/ceilometer/pipeline.yaml $CEILOMETER_CONF_DIR 77 | iniset $CEILOMETER_CONF DEFAULT policy_file $CEILOMETER_CONF_DIR/policy.json 78 | 79 | # the compute and central agents need these credentials in order to 80 | # call out to the public nova and glance APIs 81 | iniset $CEILOMETER_CONF DEFAULT os_username ceilometer 82 | iniset $CEILOMETER_CONF DEFAULT os_password $SERVICE_PASSWORD 83 | iniset $CEILOMETER_CONF DEFAULT os_tenant_name $SERVICE_TENANT_NAME 84 | iniset $CEILOMETER_CONF DEFAULT os_auth_url $OS_AUTH_URL 85 | 86 | iniset $CEILOMETER_CONF keystone_authtoken auth_protocol http 87 | iniset $CEILOMETER_CONF keystone_authtoken admin_user ceilometer 88 | iniset $CEILOMETER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD 89 | iniset $CEILOMETER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME 90 | iniset $CEILOMETER_CONF keystone_authtoken signing_dir $CEILOMETER_AUTH_CACHE_DIR 91 | 92 | cleanup_ceilometer 93 | } 94 | 95 | # init_ceilometer() - Initialize etc. 96 | function init_ceilometer() { 97 | # Create cache dir 98 | sudo mkdir -p $CEILOMETER_AUTH_CACHE_DIR 99 | sudo chown $STACK_USER $CEILOMETER_AUTH_CACHE_DIR 100 | rm -f $CEILOMETER_AUTH_CACHE_DIR/* 101 | } 102 | 103 | # install_ceilometer() - Collect source and prepare 104 | function install_ceilometer() { 105 | git_clone $CEILOMETER_REPO $CEILOMETER_DIR $CEILOMETER_BRANCH 106 | } 107 | 108 | # install_ceilometerclient() - Collect source and prepare 109 | function install_ceilometerclient() { 110 | git_clone $CEILOMETERCLIENT_REPO $CEILOMETERCLIENT_DIR $CEILOMETERCLIENT_BRANCH 111 | } 112 | 113 | # start_ceilometer() - Start running processes, including screen 114 | function start_ceilometer() { 115 | screen_it ceilometer-acompute "cd $CEILOMETER_DIR && sg libvirtd \"$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" 116 | screen_it ceilometer-acentral "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF" 117 | screen_it ceilometer-collector "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_CONF" 118 | screen_it ceilometer-api "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" 119 | } 120 | 121 | # stop_ceilometer() - Stop running processes 122 | function stop_ceilometer() { 123 | # Kill the ceilometer screen windows 124 | for serv in ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api; do 125 | screen -S $SCREEN_NAME -p $serv -X kill 126 | done 127 | } 128 | 129 | # Restore xtrace 130 | $XTRACE 131 | -------------------------------------------------------------------------------- /lib/database: -------------------------------------------------------------------------------- 1 | # lib/database 2 | # Interface for interacting with different database backends 3 | 4 | # Dependencies: 5 | # DATABASE_BACKENDS variable must contain a list of available database backends 6 | # DATABASE_TYPE variable must be set 7 | 8 | # Each database must implement four functions: 9 | # recreate_database_$DATABASE_TYPE 10 | # install_database_$DATABASE_TYPE 11 | # configure_database_$DATABASE_TYPE 12 | # database_connection_url_$DATABASE_TYPE 13 | # 14 | # and call register_database $DATABASE_TYPE 15 | 16 | # Save trace setting 17 | XTRACE=$(set +o | grep xtrace) 18 | set +o xtrace 19 | 20 | # Register a database backend 21 | # $1 The name of the database backend 22 | function register_database { 23 | [ -z "$DATABASE_BACKENDS" ] && DATABASE_BACKENDS=$1 || DATABASE_BACKENDS+=" $1" 24 | } 25 | 26 | for f in $TOP_DIR/lib/databases/*; do source $f; done 27 | 28 | # Set the database type based on the configuration 29 | function initialize_database_backends { 30 | for backend in $DATABASE_BACKENDS; do 31 | is_service_enabled $backend && DATABASE_TYPE=$backend 32 | done 33 | 34 | [ -z "$DATABASE_TYPE" ] && return 1 35 | 36 | # For backward-compatibility, read in the MYSQL_HOST/USER variables and use 37 | # them as the default values for the DATABASE_HOST/USER variables. 38 | MYSQL_HOST=${MYSQL_HOST:-localhost} 39 | MYSQL_USER=${MYSQL_USER:-root} 40 | 41 | DATABASE_HOST=${DATABASE_HOST:-${MYSQL_HOST}} 42 | DATABASE_USER=${DATABASE_USER:-${MYSQL_USER}} 43 | 44 | if [ -n "$MYSQL_PASSWORD" ]; then 45 | DATABASE_PASSWORD=$MYSQL_PASSWORD 46 | else 47 | read_password DATABASE_PASSWORD "ENTER A PASSWORD TO USE FOR THE DATABASE." 48 | fi 49 | 50 | # We configure Nova, Horizon, Glance and Keystone to use MySQL as their 51 | # database server. While they share a single server, each has their own 52 | # database and tables. 53 | 54 | # By default this script will install and configure MySQL. If you want to 55 | # use an existing server, you can pass in the user/password/host parameters. 56 | # You will need to send the same ``DATABASE_PASSWORD`` to every host if you are doing 57 | # a multi-node DevStack installation. 58 | 59 | # NOTE: Don't specify ``/db`` in this string so we can use it for multiple services 60 | BASE_SQL_CONN=${BASE_SQL_CONN:-${DATABASE_TYPE}://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOST} 61 | 62 | return 0 63 | } 64 | 65 | # Recreate a given database 66 | # $1 The name of the database 67 | # $2 The character set/encoding of the database 68 | function recreate_database { 69 | local db=$1 70 | local charset=$2 71 | recreate_database_$DATABASE_TYPE $db $charset 72 | } 73 | 74 | # Install the database 75 | function install_database { 76 | install_database_$DATABASE_TYPE 77 | } 78 | 79 | # Configure and start the database 80 | function configure_database { 81 | configure_database_$DATABASE_TYPE 82 | } 83 | 84 | # Generate an SQLAlchemy connection URL and store it in a variable 85 | # $1 The variable name in which to store the connection URL 86 | # $2 The name of the database 87 | function database_connection_url { 88 | local var=$1 89 | local db=$2 90 | database_connection_url_$DATABASE_TYPE $var $db 91 | } 92 | 93 | # Restore xtrace 94 | $XTRACE 95 | -------------------------------------------------------------------------------- /lib/databases/mysql: -------------------------------------------------------------------------------- 1 | # lib/databases/mysql 2 | # Functions to control the configuration and operation of the **MySQL** database backend 3 | 4 | # Dependencies: 5 | # DATABASE_{HOST,USER,PASSWORD} must be defined 6 | 7 | # Save trace setting 8 | MY_XTRACE=$(set +o | grep xtrace) 9 | set +o xtrace 10 | 11 | register_database mysql 12 | 13 | function recreate_database_mysql { 14 | local db=$1 15 | local charset=$2 16 | mysql -u$DATABASE_USER -p$DATABASE_PASSWORD -e "DROP DATABASE IF EXISTS $db;" 17 | mysql -u$DATABASE_USER -p$DATABASE_PASSWORD -e "CREATE DATABASE $db CHARACTER SET $charset;" 18 | } 19 | 20 | function configure_database_mysql { 21 | echo_summary "Configuring and starting MySQL" 22 | 23 | if is_ubuntu; then 24 | MY_CONF=/etc/mysql/my.cnf 25 | MYSQL=mysql 26 | elif is_fedora; then 27 | MY_CONF=/etc/my.cnf 28 | MYSQL=mysqld 29 | elif is_suse; then 30 | MY_CONF=/etc/my.cnf 31 | MYSQL=mysql 32 | else 33 | exit_distro_not_supported "mysql configuration" 34 | fi 35 | 36 | # Start mysql-server 37 | if is_fedora || is_suse; then 38 | # service is not started by default 39 | start_service $MYSQL 40 | fi 41 | 42 | # Set the root password - only works the first time. For Ubuntu, we already 43 | # did that with debconf before installing the package. 44 | if ! is_ubuntu; then 45 | sudo mysqladmin -u root password $DATABASE_PASSWORD || true 46 | fi 47 | 48 | # Update the DB to give user ‘$DATABASE_USER’@’%’ full control of the all databases: 49 | sudo mysql -uroot -p$DATABASE_PASSWORD -h127.0.0.1 -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" 50 | 51 | # Now update ``my.cnf`` for some local needs and restart the mysql service 52 | 53 | # Change ‘bind-address’ from localhost (127.0.0.1) to any (0.0.0.0) 54 | sudo sed -i '/^bind-address/s/127.0.0.1/0.0.0.0/g' $MY_CONF 55 | 56 | # Set default db type to InnoDB 57 | if sudo grep -q "default-storage-engine" $MY_CONF; then 58 | # Change it 59 | sudo bash -c "source $TOP_DIR/functions; iniset $MY_CONF mysqld default-storage-engine InnoDB" 60 | else 61 | # Add it 62 | sudo sed -i -e "/^\[mysqld\]/ a \ 63 | default-storage-engine = InnoDB" $MY_CONF 64 | fi 65 | 66 | # Turn on slow query log 67 | sudo sed -i '/log.slow.queries/d' $MY_CONF 68 | sudo sed -i -e "/^\[mysqld\]/ a \ 69 | log-slow-queries = /var/log/mysql/mysql-slow.log" $MY_CONF 70 | 71 | # Log all queries (any query taking longer than 0 seconds) 72 | sudo sed -i '/long.query.time/d' $MY_CONF 73 | sudo sed -i -e "/^\[mysqld\]/ a \ 74 | long-query-time = 0" $MY_CONF 75 | 76 | # Log all non-indexed queries 77 | sudo sed -i '/log.queries.not.using.indexes/d' $MY_CONF 78 | sudo sed -i -e "/^\[mysqld\]/ a \ 79 | log-queries-not-using-indexes" $MY_CONF 80 | 81 | restart_service $MYSQL 82 | } 83 | 84 | function install_database_mysql { 85 | if is_ubuntu; then 86 | # Seed configuration with mysql password so that apt-get install doesn't 87 | # prompt us for a password upon install. 88 | cat <$HOME/.my.cnf 100 | [client] 101 | user=$DATABASE_USER 102 | password=$DATABASE_PASSWORD 103 | host=$DATABASE_HOST 104 | EOF 105 | chmod 0600 $HOME/.my.cnf 106 | fi 107 | # Install mysql-server 108 | if is_ubuntu || is_fedora; then 109 | install_package mysql-server 110 | elif is_suse; then 111 | install_package mysql-community-server 112 | else 113 | exit_distro_not_supported "mysql installation" 114 | fi 115 | } 116 | 117 | function database_connection_url_mysql { 118 | local output=$1 119 | local db=$2 120 | eval "$output=$BASE_SQL_CONN/$db?charset=utf8" 121 | } 122 | 123 | # Restore xtrace 124 | $MY_XTRACE 125 | -------------------------------------------------------------------------------- /lib/databases/postgresql: -------------------------------------------------------------------------------- 1 | # lib/databases/postgresql 2 | # Functions to control the configuration and operation of the **PostgreSQL** database backend 3 | 4 | # Dependencies: 5 | # DATABASE_{HOST,USER,PASSWORD} must be defined 6 | 7 | # Save trace setting 8 | PG_XTRACE=$(set +o | grep xtrace) 9 | set +o xtrace 10 | 11 | register_database postgresql 12 | 13 | function recreate_database_postgresql { 14 | local db=$1 15 | local charset=$2 16 | # Avoid unsightly error when calling dropdb when the database doesn't exist 17 | psql -h$DATABASE_HOST -U$DATABASE_USER -dtemplate1 -c "DROP DATABASE IF EXISTS $db" 18 | createdb -h $DATABASE_HOST -U$DATABASE_USER -l C -T template0 -E $charset $db 19 | } 20 | 21 | function configure_database_postgresql { 22 | echo_summary "Configuring and starting PostgreSQL" 23 | if is_fedora; then 24 | PG_HBA=/var/lib/pgsql/data/pg_hba.conf 25 | PG_CONF=/var/lib/pgsql/data/postgresql.conf 26 | sudo [ -e $PG_HBA ] || sudo postgresql-setup initdb 27 | elif is_ubuntu; then 28 | PG_DIR=`find /etc/postgresql -name pg_hba.conf|xargs dirname` 29 | PG_HBA=$PG_DIR/pg_hba.conf 30 | PG_CONF=$PG_DIR/postgresql.conf 31 | elif is_suse; then 32 | PG_HBA=/var/lib/pgsql/data/pg_hba.conf 33 | PG_CONF=/var/lib/pgsql/data/postgresql.conf 34 | # initdb is called when postgresql is first started 35 | sudo [ -e $PG_HBA ] || start_service postgresql 36 | else 37 | exit_distro_not_supported "postgresql configuration" 38 | fi 39 | # Listen on all addresses 40 | sudo sed -i "/listen_addresses/s/.*/listen_addresses = '*'/" $PG_CONF 41 | # Do password auth from all IPv4 clients 42 | sudo sed -i "/^host/s/all\s\+127.0.0.1\/32\s\+ident/$DATABASE_USER\t0.0.0.0\/0\tpassword/" $PG_HBA 43 | # Do password auth for all IPv6 clients 44 | sudo sed -i "/^host/s/all\s\+::1\/128\s\+ident/$DATABASE_USER\t::0\/0\tpassword/" $PG_HBA 45 | restart_service postgresql 46 | 47 | # If creating the role fails, chances are it already existed. Try to alter it. 48 | sudo -u root sudo -u postgres -i psql -c "CREATE ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" || \ 49 | sudo -u root sudo -u postgres -i psql -c "ALTER ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" 50 | } 51 | 52 | function install_database_postgresql { 53 | echo_summary "Installing postgresql" 54 | PGPASS=$HOME/.pgpass 55 | if [[ ! -e $PGPASS ]]; then 56 | cat < $PGPASS 57 | *:*:*:$DATABASE_USER:$DATABASE_PASSWORD 58 | EOF 59 | chmod 0600 $PGPASS 60 | else 61 | sed -i "s/:root:\w\+/:root:$DATABASE_PASSWORD/" $PGPASS 62 | fi 63 | if is_ubuntu; then 64 | install_package postgresql 65 | elif is_fedora || is_suse; then 66 | install_package postgresql-server 67 | else 68 | exit_distro_not_supported "postgresql installation" 69 | fi 70 | } 71 | 72 | function database_connection_url_postgresql { 73 | local output=$1 74 | local db=$2 75 | eval "$output=$BASE_SQL_CONN/$db?client_encoding=utf8" 76 | } 77 | 78 | # Restore xtrace 79 | $PG_XTRACE 80 | -------------------------------------------------------------------------------- /lib/horizon: -------------------------------------------------------------------------------- 1 | # lib/horizon 2 | # Functions to control the configuration and operation of the horizon service 3 | # 4 | 5 | # Dependencies: 6 | # ``functions`` file 7 | # ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined 8 | # 9 | 10 | # ``stack.sh`` calls the entry points in this order: 11 | # 12 | # install_horizon 13 | # configure_horizon 14 | # init_horizon 15 | # start_horizon 16 | # stop_horizon 17 | # cleanup_horizon 18 | 19 | # Save trace setting 20 | XTRACE=$(set +o | grep xtrace) 21 | set +o xtrace 22 | 23 | 24 | # Defaults 25 | # -------- 26 | 27 | # 28 | 29 | # Set up default directories 30 | HORIZON_DIR=$DEST/horizon 31 | 32 | # Allow overriding the default Apache user and group, default to 33 | # current user and his default group. 34 | APACHE_USER=${APACHE_USER:-$USER} 35 | APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)} 36 | 37 | 38 | # Entry Points 39 | # ------------ 40 | 41 | # cleanup_horizon() - Remove residual data files, anything left over from previous 42 | # runs that a clean run would need to clean up 43 | function cleanup_horizon() { 44 | # kill instances (nova) 45 | # delete image files (glance) 46 | # This function intentionally left blank 47 | : 48 | } 49 | 50 | # configure_horizon() - Set config files, create data dirs, etc 51 | function configure_horizon() { 52 | setup_develop $HORIZON_DIR 53 | } 54 | 55 | # init_horizon() - Initialize databases, etc. 56 | function init_horizon() { 57 | # Remove stale session database. 58 | rm -f $HORIZON_DIR/openstack_dashboard/local/dashboard_openstack.sqlite3 59 | 60 | # ``local_settings.py`` is used to override horizon default settings. 61 | local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py 62 | cp $FILES/horizon_settings.py $local_settings 63 | 64 | # Initialize the horizon database (it stores sessions and notices shown to 65 | # users). The user system is external (keystone). 66 | cd $HORIZON_DIR 67 | python manage.py syncdb --noinput 68 | cd $TOP_DIR 69 | 70 | # Create an empty directory that apache uses as docroot 71 | sudo mkdir -p $HORIZON_DIR/.blackhole 72 | 73 | 74 | if is_ubuntu; then 75 | APACHE_NAME=apache2 76 | APACHE_CONF=sites-available/horizon 77 | # Clean up the old config name 78 | sudo rm -f /etc/apache2/sites-enabled/000-default 79 | # Be a good citizen and use the distro tools here 80 | sudo touch /etc/$APACHE_NAME/$APACHE_CONF 81 | sudo a2ensite horizon 82 | # WSGI isn't enabled by default, enable it 83 | sudo a2enmod wsgi 84 | elif is_fedora; then 85 | APACHE_NAME=httpd 86 | APACHE_CONF=conf.d/horizon.conf 87 | sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf 88 | elif is_suse; then 89 | APACHE_NAME=apache2 90 | APACHE_CONF=vhosts.d/horizon.conf 91 | # WSGI isn't enabled by default, enable it 92 | sudo a2enmod wsgi 93 | else 94 | exit_distro_not_supported "apache configuration" 95 | fi 96 | 97 | # Configure apache to run horizon 98 | sudo sh -c "sed -e \" 99 | s,%USER%,$APACHE_USER,g; 100 | s,%GROUP%,$APACHE_GROUP,g; 101 | s,%HORIZON_DIR%,$HORIZON_DIR,g; 102 | s,%APACHE_NAME%,$APACHE_NAME,g; 103 | s,%DEST%,$DEST,g; 104 | \" $FILES/apache-horizon.template >/etc/$APACHE_NAME/$APACHE_CONF" 105 | 106 | } 107 | 108 | # install_horizon() - Collect source and prepare 109 | function install_horizon() { 110 | # Apache installation, because we mark it NOPRIME 111 | if is_ubuntu; then 112 | # Install apache2, which is NOPRIME'd 113 | install_package apache2 libapache2-mod-wsgi 114 | elif is_fedora; then 115 | sudo rm -f /etc/httpd/conf.d/000-* 116 | install_package httpd mod_wsgi 117 | elif is_suse; then 118 | install_package apache2 apache2-mod_wsgi 119 | else 120 | exit_distro_not_supported "apache installation" 121 | fi 122 | 123 | # NOTE(sdague) quantal changed the name of the node binary 124 | if is_ubuntu; then 125 | if [[ ! -e "/usr/bin/node" ]]; then 126 | install_package nodejs-legacy 127 | fi 128 | fi 129 | 130 | git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG 131 | } 132 | 133 | # start_horizon() - Start running processes, including screen 134 | function start_horizon() { 135 | restart_service $APACHE_NAME 136 | screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log" 137 | } 138 | 139 | # stop_horizon() - Stop running processes (non-screen) 140 | function stop_horizon() { 141 | if is_ubuntu; then 142 | stop_service apache2 143 | elif is_fedora; then 144 | stop_service httpd 145 | elif is_suse; then 146 | stop_service apache2 147 | else 148 | exit_distro_not_supported "apache configuration" 149 | fi 150 | } 151 | 152 | # Restore xtrace 153 | $XTRACE 154 | -------------------------------------------------------------------------------- /lib/ldap: -------------------------------------------------------------------------------- 1 | # lib/ldap 2 | # Functions to control the installation and configuration of **ldap** 3 | 4 | # ``stack.sh`` calls the entry points in this order: 5 | # 6 | 7 | # Save trace setting 8 | XTRACE=$(set +o | grep xtrace) 9 | set +o xtrace 10 | 11 | # install_ldap 12 | # install_ldap() - Collect source and prepare 13 | function install_ldap() { 14 | echo "Installing LDAP inside function" 15 | echo "LDAP_PASSWORD is $LDAP_PASSWORD" 16 | echo "os_VENDOR is $os_VENDOR" 17 | printf "installing" 18 | if is_ubuntu; then 19 | echo "os vendor is Ubuntu" 20 | LDAP_OLCDB_NUMBER=1 21 | LDAP_ROOTPW_COMMAND=replace 22 | sudo DEBIAN_FRONTEND=noninteractive apt-get install slapd ldap-utils 23 | #automatically starts LDAP on ubuntu so no need to call start_ldap 24 | elif is_fedora; then 25 | echo "os vendor is Fedora" 26 | LDAP_OLCDB_NUMBER=2 27 | LDAP_ROOTPW_COMMAND=add 28 | start_ldap 29 | fi 30 | 31 | printf "generate password file" 32 | SLAPPASS=`slappasswd -s $LDAP_PASSWORD` 33 | 34 | printf "secret is $SLAPPASS\n" 35 | #create manager.ldif 36 | TMP_MGR_DIFF_FILE=`mktemp -t manager_ldiff.$$.XXXXXXXXXX.ldif` 37 | sed -e "s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER|" -e "s|\${SLAPPASS}|$SLAPPASS|" -e "s|\${LDAP_ROOTPW_COMMAND}|$LDAP_ROOTPW_COMMAND|" $FILES/ldap/manager.ldif.in >> $TMP_MGR_DIFF_FILE 38 | 39 | #update ldap olcdb 40 | sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $TMP_MGR_DIFF_FILE 41 | 42 | # add our top level ldap nodes 43 | if ldapsearch -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -b dc=openstack,dc=org | grep -q "Success" ; then 44 | printf "LDAP already configured for OpenStack\n" 45 | if [[ "$KEYSTONE_CLEAR_LDAP" == "yes" ]]; then 46 | # clear LDAP state 47 | clear_ldap_state 48 | # reconfigure LDAP for OpenStack 49 | ldapadd -c -x -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -w $LDAP_PASSWORD -f $FILES/ldap/openstack.ldif 50 | fi 51 | else 52 | printf "Configuring LDAP for OpenStack\n" 53 | ldapadd -c -x -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -w $LDAP_PASSWORD -f $FILES/ldap/openstack.ldif 54 | fi 55 | } 56 | 57 | # start_ldap() - Start LDAP 58 | function start_ldap() { 59 | sudo service slapd restart 60 | } 61 | 62 | 63 | # stop_ldap() - Stop LDAP 64 | function stop_ldap() { 65 | sudo service slapd stop 66 | } 67 | 68 | # clear_ldap_state() - Clear LDAP State 69 | function clear_ldap_state() { 70 | ldapdelete -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -r "dc=openstack,dc=org" 71 | } 72 | 73 | # Restore xtrace 74 | $XTRACE 75 | -------------------------------------------------------------------------------- /lib/quantum_plugins/README.md: -------------------------------------------------------------------------------- 1 | Quantum plugin specific files 2 | ============================= 3 | Quantum plugins require plugin specific behavior. 4 | The files under the directory, ``lib/quantum_plugins/``, will be used 5 | when their service is enabled. 6 | Each plugin has ``lib/quantum_plugins/$Q_PLUGIN`` and define the following 7 | functions. 8 | Plugin specific configuration variables should be in this file. 9 | 10 | * filename: ``$Q_PLUGIN`` 11 | * The corresponding file name MUST be the same to plugin name ``$Q_PLUGIN``. 12 | Plugin specific configuration variables should be in this file. 13 | 14 | functions 15 | --------- 16 | ``lib/quantum`` calls the following functions when the ``$Q_PLUGIN`` is enabled 17 | 18 | * ``quantum_plugin_create_nova_conf`` : 19 | set ``NOVA_VIF_DRIVER`` and optionally set options in nova_conf 20 | e.g. 21 | NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} 22 | * ``quantum_plugin_install_agent_packages`` : 23 | install packages that is specific to plugin agent 24 | e.g. 25 | install_package bridge-utils 26 | * ``quantum_plugin_configure_common`` : 27 | set plugin-specific variables, ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``, 28 | ``Q_DB_NAME``, ``Q_PLUGIN_CLASS`` 29 | * ``quantum_plugin_configure_debug_command`` 30 | * ``quantum_plugin_configure_dhcp_agent`` 31 | * ``quantum_plugin_configure_l3_agent`` 32 | * ``quantum_plugin_configure_plugin_agent`` 33 | * ``quantum_plugin_configure_service`` 34 | * ``quantum_plugin_setup_interface_driver`` 35 | -------------------------------------------------------------------------------- /lib/quantum_plugins/bigswitch_floodlight: -------------------------------------------------------------------------------- 1 | # Quantum Big Switch/FloodLight plugin 2 | # ------------------------------------ 3 | 4 | # Save trace setting 5 | MY_XTRACE=$(set +o | grep xtrace) 6 | set +o xtrace 7 | 8 | source $TOP_DIR/lib/quantum_plugins/ovs_base 9 | source $TOP_DIR/lib/quantum_thirdparty/bigswitch_floodlight # for third party service specific configuration values 10 | 11 | function quantum_plugin_create_nova_conf() { 12 | NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} 13 | } 14 | 15 | function quantum_plugin_install_agent_packages() { 16 | _quantum_ovs_base_install_agent_packages 17 | } 18 | 19 | function quantum_plugin_configure_common() { 20 | Q_PLUGIN_CONF_PATH=etc/quantum/plugins/bigswitch 21 | Q_PLUGIN_CONF_FILENAME=restproxy.ini 22 | Q_DB_NAME="restproxy_quantum" 23 | Q_PLUGIN_CLASS="quantum.plugins.bigswitch.plugin.QuantumRestProxyV2" 24 | BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80} 25 | BS_FL_CONTROLLER_TIMEOUT=${BS_FL_CONTROLLER_TIMEOUT:-10} 26 | } 27 | 28 | function quantum_plugin_configure_debug_command() { 29 | _quantum_ovs_base_configure_debug_command 30 | } 31 | 32 | function quantum_plugin_configure_dhcp_agent() { 33 | : 34 | } 35 | 36 | function quantum_plugin_configure_l3_agent() { 37 | _quantum_ovs_base_configure_l3_agent 38 | } 39 | 40 | function quantum_plugin_configure_plugin_agent() { 41 | : 42 | } 43 | 44 | function quantum_plugin_configure_service() { 45 | iniset /$Q_PLUGIN_CONF_FILE RESTPROXY servers $BS_FL_CONTROLLERS_PORT 46 | iniset /$Q_PLUGIN_CONF_FILE RESTPROXY servertimeout $BS_FL_CONTROLLER_TIMEOUT 47 | } 48 | 49 | function quantum_plugin_setup_interface_driver() { 50 | local conf_file=$1 51 | iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver 52 | } 53 | 54 | # Restore xtrace 55 | $MY_XTRACE 56 | -------------------------------------------------------------------------------- /lib/quantum_plugins/brocade: -------------------------------------------------------------------------------- 1 | # Brocade Quantum Plugin 2 | # ---------------------- 3 | 4 | # Save trace setting 5 | BRCD_XTRACE=$(set +o | grep xtrace) 6 | set +o xtrace 7 | 8 | function is_quantum_ovs_base_plugin() { 9 | return 1 10 | } 11 | 12 | function quantum_plugin_create_nova_conf() { 13 | NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"} 14 | } 15 | 16 | function quantum_plugin_install_agent_packages() { 17 | install_package bridge-utils 18 | } 19 | 20 | function quantum_plugin_configure_common() { 21 | Q_PLUGIN_CONF_PATH=etc/quantum/plugins/brocade 22 | Q_PLUGIN_CONF_FILENAME=brocade.ini 23 | Q_DB_NAME="brcd_quantum" 24 | Q_PLUGIN_CLASS="quantum.plugins.brocade.QuantumPlugin.BrocadePluginV2" 25 | } 26 | 27 | function quantum_plugin_configure_debug_command() { 28 | : 29 | } 30 | 31 | function quantum_plugin_configure_dhcp_agent() { 32 | : 33 | } 34 | 35 | function quantum_plugin_configure_l3_agent() { 36 | : 37 | } 38 | 39 | function quantum_plugin_configure_plugin_agent() { 40 | AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent" 41 | } 42 | 43 | function quantum_plugin_setup_interface_driver() { 44 | local conf_file=$1 45 | iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver 46 | } 47 | 48 | # Restore xtrace 49 | $BRCD_XTRACE 50 | -------------------------------------------------------------------------------- /lib/quantum_plugins/linuxbridge: -------------------------------------------------------------------------------- 1 | # Quantum Linux Bridge plugin 2 | # --------------------------- 3 | 4 | # Save trace setting 5 | MY_XTRACE=$(set +o | grep xtrace) 6 | set +o xtrace 7 | 8 | function is_quantum_ovs_base_plugin() { 9 | # linuxbridge doesn't use OVS 10 | return 1 11 | } 12 | 13 | function quantum_plugin_create_nova_conf() { 14 | NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"} 15 | } 16 | 17 | function quantum_plugin_install_agent_packages() { 18 | install_package bridge-utils 19 | } 20 | 21 | function quantum_plugin_configure_common() { 22 | Q_PLUGIN_CONF_PATH=etc/quantum/plugins/linuxbridge 23 | Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini 24 | Q_DB_NAME="quantum_linux_bridge" 25 | Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2" 26 | } 27 | 28 | function quantum_plugin_configure_debug_command() { 29 | iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge 30 | } 31 | 32 | function quantum_plugin_configure_dhcp_agent() { 33 | : 34 | } 35 | 36 | function quantum_plugin_configure_l3_agent() { 37 | iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge 38 | } 39 | 40 | function quantum_plugin_configure_plugin_agent() { 41 | # Setup physical network interface mappings. Override 42 | # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more 43 | # complex physical network configurations. 44 | if [[ "$LB_INTERFACE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then 45 | LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE 46 | fi 47 | if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then 48 | iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS 49 | fi 50 | AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent" 51 | } 52 | 53 | function quantum_plugin_configure_service() { 54 | if [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then 55 | iniset /$Q_PLUGIN_CONF_FILE VLANS tenant_network_type vlan 56 | else 57 | echo "WARNING - The linuxbridge plugin is using local tenant networks, with no connectivity between hosts." 58 | fi 59 | 60 | # Override ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` 61 | # for more complex physical network configurations. 62 | if [[ "$LB_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then 63 | LB_VLAN_RANGES=$PHYSICAL_NETWORK 64 | if [[ "$TENANT_VLAN_RANGE" != "" ]]; then 65 | LB_VLAN_RANGES=$LB_VLAN_RANGES:$TENANT_VLAN_RANGE 66 | fi 67 | fi 68 | if [[ "$LB_VLAN_RANGES" != "" ]]; then 69 | iniset /$Q_PLUGIN_CONF_FILE VLANS network_vlan_ranges $LB_VLAN_RANGES 70 | fi 71 | } 72 | 73 | function quantum_plugin_setup_interface_driver() { 74 | local conf_file=$1 75 | iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver 76 | } 77 | 78 | # Restore xtrace 79 | $MY_XTRACE 80 | -------------------------------------------------------------------------------- /lib/quantum_plugins/openvswitch: -------------------------------------------------------------------------------- 1 | # Quantum Open vSwtich plugin 2 | # --------------------------- 3 | 4 | # Save trace setting 5 | MY_XTRACE=$(set +o | grep xtrace) 6 | set +o xtrace 7 | 8 | source $TOP_DIR/lib/quantum_plugins/ovs_base 9 | 10 | function quantum_plugin_create_nova_conf() { 11 | NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} 12 | if [ "$VIRT_DRIVER" = 'xenserver' ]; then 13 | iniset $NOVA_CONF DEFAULT xenapi_vif_driver nova.virt.xenapi.vif.XenAPIOpenVswitchDriver 14 | iniset $NOVA_CONF DEFAULT xenapi_ovs_integration_bridge $FLAT_NETWORK_BRIDGE 15 | fi 16 | } 17 | 18 | function quantum_plugin_install_agent_packages() { 19 | _quantum_ovs_base_install_agent_packages 20 | } 21 | 22 | function quantum_plugin_configure_common() { 23 | Q_PLUGIN_CONF_PATH=etc/quantum/plugins/openvswitch 24 | Q_PLUGIN_CONF_FILENAME=ovs_quantum_plugin.ini 25 | Q_DB_NAME="ovs_quantum" 26 | Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2" 27 | } 28 | 29 | function quantum_plugin_configure_debug_command() { 30 | _quantum_ovs_base_configure_debug_command 31 | } 32 | 33 | function quantum_plugin_configure_dhcp_agent() { 34 | : 35 | } 36 | 37 | function quantum_plugin_configure_l3_agent() { 38 | _quantum_ovs_base_configure_l3_agent 39 | } 40 | 41 | function quantum_plugin_configure_plugin_agent() { 42 | # Setup integration bridge 43 | OVS_BRIDGE=${OVS_BRIDGE:-br-int} 44 | _quantum_ovs_base_setup_bridge $OVS_BRIDGE 45 | 46 | # Setup agent for tunneling 47 | if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then 48 | # Verify tunnels are supported 49 | # REVISIT - also check kernel module support for GRE and patch ports 50 | OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'` 51 | if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then 52 | echo "You are running OVS version $OVS_VERSION." 53 | echo "OVS 1.4+ is required for tunneling between multiple hosts." 54 | exit 1 55 | fi 56 | iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True 57 | iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP 58 | fi 59 | 60 | # Setup physical network bridge mappings. Override 61 | # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more 62 | # complex physical network configurations. 63 | if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then 64 | OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE 65 | 66 | # Configure bridge manually with physical interface as port for multi-node 67 | sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE 68 | fi 69 | if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then 70 | iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings $OVS_BRIDGE_MAPPINGS 71 | fi 72 | AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent" 73 | 74 | if [ "$VIRT_DRIVER" = 'xenserver' ]; then 75 | # Nova will always be installed along with quantum for a domU 76 | # devstack install, so it should be safe to rely on nova.conf 77 | # for xenapi configuration. 78 | Q_RR_DOM0_COMMAND="$QUANTUM_DIR/bin/quantum-rootwrap-xen-dom0 $NOVA_CONF" 79 | # Under XS/XCP, the ovs agent needs to target the dom0 80 | # integration bridge. This is enabled by using a root wrapper 81 | # that executes commands on dom0 via a XenAPI plugin. 82 | iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_DOM0_COMMAND" 83 | 84 | # FLAT_NETWORK_BRIDGE is the dom0 integration bridge. To 85 | # ensure the bridge lacks direct connectivity, set 86 | # VM_VLAN=-1;VM_DEV=invalid in localrc 87 | iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $FLAT_NETWORK_BRIDGE 88 | 89 | # The ovs agent needs to ensure that the ports associated with 90 | # a given network share the same local vlan tag. On 91 | # single-node XS/XCP, this requires monitoring both the dom0 92 | # bridge, where VM's are attached, and the domU bridge, where 93 | # dhcp servers are attached. 94 | if is_service_enabled q-dhcp; then 95 | iniset /$Q_PLUGIN_CONF_FILE OVS domu_integration_bridge $OVS_BRIDGE 96 | # DomU will use the regular rootwrap 97 | iniset /$Q_PLUGIN_CONF_FILE AGENT domu_root_helper "$Q_RR_COMMAND" 98 | # Plug the vm interface into the domU integration bridge. 99 | sudo ip addr flush dev $GUEST_INTERFACE_DEFAULT 100 | sudo ip link set $OVS_BRIDGE up 101 | # Assign the VM IP only if it has been set explicitly 102 | if [[ "$VM_IP" != "" ]]; then 103 | sudo ip addr add $VM_IP dev $OVS_BRIDGE 104 | fi 105 | sudo ovs-vsctl add-port $OVS_BRIDGE $GUEST_INTERFACE_DEFAULT 106 | fi 107 | fi 108 | } 109 | 110 | function quantum_plugin_configure_service() { 111 | if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then 112 | iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type gre 113 | iniset /$Q_PLUGIN_CONF_FILE OVS tunnel_id_ranges $TENANT_TUNNEL_RANGES 114 | elif [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then 115 | iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type vlan 116 | else 117 | echo "WARNING - The openvswitch plugin is using local tenant networks, with no connectivity between hosts." 118 | fi 119 | 120 | # Override ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` 121 | # for more complex physical network configurations. 122 | if [[ "$OVS_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then 123 | OVS_VLAN_RANGES=$PHYSICAL_NETWORK 124 | if [[ "$TENANT_VLAN_RANGE" != "" ]]; then 125 | OVS_VLAN_RANGES=$OVS_VLAN_RANGES:$TENANT_VLAN_RANGE 126 | fi 127 | fi 128 | if [[ "$OVS_VLAN_RANGES" != "" ]]; then 129 | iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges $OVS_VLAN_RANGES 130 | fi 131 | 132 | # Enable tunnel networks if selected 133 | if [[ $OVS_ENABLE_TUNNELING = "True" ]]; then 134 | iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True 135 | fi 136 | } 137 | 138 | function quantum_plugin_setup_interface_driver() { 139 | local conf_file=$1 140 | iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver 141 | } 142 | 143 | # Restore xtrace 144 | $MY_XTRACE 145 | -------------------------------------------------------------------------------- /lib/quantum_plugins/ovs_base: -------------------------------------------------------------------------------- 1 | # common functions for ovs based plugin 2 | # ------------------------------------- 3 | 4 | # Save trace setting 5 | MY_XTRACE=$(set +o | grep xtrace) 6 | set +o xtrace 7 | 8 | function is_quantum_ovs_base_plugin() { 9 | # Yes, we use OVS. 10 | return 0 11 | } 12 | 13 | function _quantum_ovs_base_setup_bridge() { 14 | local bridge=$1 15 | quantum-ovs-cleanup 16 | sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge 17 | sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge 18 | } 19 | 20 | function _quantum_ovs_base_install_agent_packages() { 21 | local kernel_version 22 | # Install deps 23 | # FIXME add to ``files/apts/quantum``, but don't install if not needed! 24 | if is_ubuntu; then 25 | kernel_version=`cat /proc/version | cut -d " " -f3` 26 | install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version 27 | else 28 | ### FIXME(dtroyer): Find RPMs for OpenVSwitch 29 | echo "OpenVSwitch packages need to be located" 30 | # Fedora does not started OVS by default 31 | restart_service openvswitch 32 | fi 33 | } 34 | 35 | function _quantum_ovs_base_configure_debug_command() { 36 | iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE 37 | } 38 | 39 | function _quantum_ovs_base_configure_l3_agent() { 40 | iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE 41 | 42 | quantum-ovs-cleanup 43 | sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE 44 | # ensure no IP is configured on the public bridge 45 | sudo ip addr flush dev $PUBLIC_BRIDGE 46 | } 47 | 48 | # Restore xtrace 49 | $MY_XTRACE 50 | -------------------------------------------------------------------------------- /lib/quantum_plugins/ryu: -------------------------------------------------------------------------------- 1 | # Quantum Ryu plugin 2 | # ------------------ 3 | 4 | # Save trace setting 5 | MY_XTRACE=$(set +o | grep xtrace) 6 | set +o xtrace 7 | 8 | source $TOP_DIR/lib/quantum_plugins/ovs_base 9 | source $TOP_DIR/lib/quantum_thirdparty/ryu # for configuration value 10 | 11 | function quantum_plugin_create_nova_conf() { 12 | NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} 13 | iniset $NOVA_CONF DEFAULT libvirt_ovs_integration_bridge "$OVS_BRIDGE" 14 | } 15 | 16 | function quantum_plugin_install_agent_packages() { 17 | _quantum_ovs_base_install_agent_packages 18 | 19 | # quantum_ryu_agent requires ryu module 20 | install_ryu 21 | } 22 | 23 | function quantum_plugin_configure_common() { 24 | Q_PLUGIN_CONF_PATH=etc/quantum/plugins/ryu 25 | Q_PLUGIN_CONF_FILENAME=ryu.ini 26 | Q_DB_NAME="ovs_quantum" 27 | Q_PLUGIN_CLASS="quantum.plugins.ryu.ryu_quantum_plugin.RyuQuantumPluginV2" 28 | } 29 | 30 | function quantum_plugin_configure_debug_command() { 31 | _quantum_ovs_base_configure_debug_command 32 | iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT 33 | } 34 | 35 | function quantum_plugin_configure_dhcp_agent() { 36 | iniset $Q_DHCP_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT 37 | } 38 | 39 | function quantum_plugin_configure_l3_agent() { 40 | iniset $Q_L3_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT 41 | _quantum_ovs_base_configure_l3_agent 42 | } 43 | 44 | function quantum_plugin_configure_plugin_agent() { 45 | # Set up integration bridge 46 | OVS_BRIDGE=${OVS_BRIDGE:-br-int} 47 | _quantum_ovs_base_setup_bridge $OVS_BRIDGE 48 | if [ -n "$RYU_INTERNAL_INTERFACE" ]; then 49 | sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $RYU_INTERNAL_INTERFACE 50 | fi 51 | iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $OVS_BRIDGE 52 | AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/ryu/agent/ryu_quantum_agent.py" 53 | } 54 | 55 | function quantum_plugin_configure_service() { 56 | iniset /$Q_PLUGIN_CONF_FILE OVS openflow_rest_api $RYU_API_HOST:$RYU_API_PORT 57 | } 58 | 59 | function quantum_plugin_setup_interface_driver() { 60 | local conf_file=$1 61 | iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver 62 | iniset $conf_file DEFAULT ovs_use_veth True 63 | } 64 | 65 | # Restore xtrace 66 | $MY_XTRACE 67 | -------------------------------------------------------------------------------- /lib/quantum_thirdparty/README.md: -------------------------------------------------------------------------------- 1 | Quantum third party specific files 2 | ================================== 3 | Some Quantum plugins require third party programs to function. 4 | The files under the directory, ``lib/quantum_thirdparty/``, will be used 5 | when their service are enabled. 6 | Third party program specific configuration variables should be in this file. 7 | 8 | * filename: ```` 9 | * The corresponding file name should be same to service name, ````. 10 | 11 | functions 12 | --------- 13 | ``lib/quantum`` calls the following functions when the ```` is enabled 14 | 15 | functions to be implemented 16 | * ``configure_``: 17 | set config files, create data dirs, etc 18 | e.g. 19 | sudo python setup.py deploy 20 | iniset $XXXX_CONF... 21 | 22 | * ``init_``: 23 | initialize databases, etc 24 | 25 | * ``install_``: 26 | collect source and prepare 27 | e.g. 28 | git clone xxx 29 | 30 | * ``start_``: 31 | start running processes, including screen 32 | e.g. 33 | screen_it XXXX "cd $XXXXY_DIR && $XXXX_DIR/bin/XXXX-bin" 34 | 35 | * ``stop_``: 36 | stop running processes (non-screen) 37 | -------------------------------------------------------------------------------- /lib/quantum_thirdparty/bigswitch_floodlight: -------------------------------------------------------------------------------- 1 | # Big Switch/FloodLight OpenFlow Controller 2 | # ------------------------------------------ 3 | 4 | # Save trace setting 5 | MY_XTRACE=$(set +o | grep xtrace) 6 | set +o xtrace 7 | 8 | BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80} 9 | BS_FL_OF_PORT=${BS_FL_OF_PORT:-6633} 10 | OVS_BRIDGE=${OVS_BRIDGE:-br-int} 11 | 12 | function configure_bigswitch_floodlight() { 13 | : 14 | } 15 | 16 | function init_bigswitch_floodlight() { 17 | install_quantum_agent_packages 18 | 19 | echo -n "Installing OVS managed by the openflow controllers:" 20 | echo ${BS_FL_CONTROLLERS_PORT} 21 | 22 | # Create local OVS bridge and configure it 23 | sudo ovs-vsctl --no-wait -- --if-exists del-br ${OVS_BRIDGE} 24 | sudo ovs-vsctl --no-wait add-br ${OVS_BRIDGE} 25 | sudo ovs-vsctl --no-wait br-set-external-id ${OVS_BRIDGE} bridge-id ${OVS_BRIDGE} 26 | 27 | ctrls= 28 | for ctrl in `echo ${BS_FL_CONTROLLERS_PORT} | tr ',' ' '` 29 | do 30 | ctrl=${ctrl%:*} 31 | ctrls="${ctrls} tcp:${ctrl}:${BS_FL_OF_PORT}" 32 | done 33 | echo "Adding Network conttrollers: " ${ctrls} 34 | sudo ovs-vsctl --no-wait set-controller ${OVS_BRIDGE} ${ctrls} 35 | } 36 | 37 | function install_bigswitch_floodlight() { 38 | : 39 | } 40 | 41 | function start_bigswitch_floodlight() { 42 | : 43 | } 44 | 45 | function stop_bigswitch_floodlight() { 46 | : 47 | } 48 | 49 | # Restore xtrace 50 | $MY_XTRACE 51 | -------------------------------------------------------------------------------- /lib/quantum_thirdparty/ryu: -------------------------------------------------------------------------------- 1 | # Ryu OpenFlow Controller 2 | # ----------------------- 3 | 4 | # Save trace setting 5 | MY_XTRACE=$(set +o | grep xtrace) 6 | set +o xtrace 7 | 8 | 9 | RYU_DIR=$DEST/ryu 10 | # Ryu API Host 11 | RYU_API_HOST=${RYU_API_HOST:-127.0.0.1} 12 | # Ryu API Port 13 | RYU_API_PORT=${RYU_API_PORT:-8080} 14 | # Ryu OFP Host 15 | RYU_OFP_HOST=${RYU_OFP_HOST:-127.0.0.1} 16 | # Ryu OFP Port 17 | RYU_OFP_PORT=${RYU_OFP_PORT:-6633} 18 | # Ryu Applications 19 | RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest} 20 | # Ryu configuration 21 | RYU_CONF_CONTENTS=${RYU_CONF_CONTENTS:-" 22 | --app_lists=$RYU_APPS 23 | --wsapi_host=$RYU_API_HOST 24 | --wsapi_port=$RYU_API_PORT 25 | --ofp_listen_host=$RYU_OFP_HOST 26 | --ofp_tcp_listen_port=$RYU_OFP_PORT 27 | --quantum_url=http://$Q_HOST:$Q_PORT 28 | --quantum_admin_username=$Q_ADMIN_USERNAME 29 | --quantum_admin_password=$SERVICE_PASSWORD 30 | --quantum_admin_tenant_name=$SERVICE_TENANT_NAME 31 | --quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0 32 | --quantum_auth_strategy=$Q_AUTH_STRATEGY 33 | --quantum_controller_addr=tcp:$RYU_OFP_HOST:$RYU_OFP_PORT 34 | "} 35 | 36 | function configure_ryu() { 37 | setup_develop $RYU_DIR 38 | } 39 | 40 | function init_ryu() { 41 | RYU_CONF_DIR=/etc/ryu 42 | if [[ ! -d $RYU_CONF_DIR ]]; then 43 | sudo mkdir -p $RYU_CONF_DIR 44 | fi 45 | sudo chown $STACK_USER $RYU_CONF_DIR 46 | RYU_CONF=$RYU_CONF_DIR/ryu.conf 47 | sudo rm -rf $RYU_CONF 48 | 49 | echo "${RYU_CONF_CONTENTS}" > $RYU_CONF 50 | } 51 | 52 | # install_ryu can be called multiple times as quantum_pluing/ryu may call 53 | # this function for quantum-ryu-agent 54 | # Make this function idempotent and avoid cloning same repo many times 55 | # with RECLONE=yes 56 | _RYU_INSTALLED=${_RYU_INSTALLED:-False} 57 | function install_ryu() { 58 | if [[ "$_RYU_INSTALLED" == "False" ]]; then 59 | git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH 60 | _RYU_INSTALLED=True 61 | fi 62 | } 63 | 64 | function start_ryu() { 65 | screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --flagfile $RYU_CONF" 66 | } 67 | 68 | function stop_ryu() { 69 | : 70 | } 71 | 72 | # Restore xtrace 73 | $MY_XTRACE 74 | -------------------------------------------------------------------------------- /lib/rpc_backend: -------------------------------------------------------------------------------- 1 | # lib/rpc_backend 2 | # Interface for interactig with different rpc backend 3 | # rpc backend settings 4 | 5 | # Dependencies: 6 | # ``functions`` file 7 | # ``RABBIT_{HOST|PASSWORD}`` must be defined when RabbitMQ is used 8 | 9 | # ``stack.sh`` calls the entry points in this order: 10 | # 11 | # check_rpc_backend 12 | # install_rpc_backend 13 | # restart_rpc_backend 14 | # iniset_rpc_backend 15 | 16 | # Save trace setting 17 | XTRACE=$(set +o | grep xtrace) 18 | set +o xtrace 19 | 20 | # Entry Points 21 | # ------------ 22 | 23 | # Make sure we only have one rpc backend enabled. 24 | # Also check the specified rpc backend is available on your platform. 25 | function check_rpc_backend() { 26 | local rpc_backend_cnt=0 27 | for svc in qpid zeromq rabbit; do 28 | is_service_enabled $svc && 29 | ((rpc_backend_cnt++)) 30 | done 31 | if [ "$rpc_backend_cnt" -gt 1 ]; then 32 | echo "ERROR: only one rpc backend may be enabled," 33 | echo " set only one of 'rabbit', 'qpid', 'zeromq'" 34 | echo " via ENABLED_SERVICES." 35 | elif [ "$rpc_backend_cnt" == 0 ]; then 36 | echo "ERROR: at least one rpc backend must be enabled," 37 | echo " set one of 'rabbit', 'qpid', 'zeromq'" 38 | echo " via ENABLED_SERVICES." 39 | fi 40 | 41 | if is_service_enabled qpid && ! qpid_is_supported; then 42 | echo "Qpid support is not available for this version of your distribution." 43 | exit 1 44 | fi 45 | } 46 | 47 | # install rpc backend 48 | function install_rpc_backend() { 49 | if is_service_enabled rabbit; then 50 | # Install rabbitmq-server 51 | # the temp file is necessary due to LP: #878600 52 | tfile=$(mktemp) 53 | install_package rabbitmq-server > "$tfile" 2>&1 54 | cat "$tfile" 55 | rm -f "$tfile" 56 | elif is_service_enabled qpid; then 57 | if is_fedora; then 58 | install_package qpid-cpp-server-daemon 59 | elif is_ubuntu; then 60 | install_package qpidd 61 | else 62 | exit_distro_not_supported "qpid installation" 63 | fi 64 | elif is_service_enabled zeromq; then 65 | if is_fedora; then 66 | install_package zeromq python-zmq 67 | elif is_ubuntu; then 68 | install_package libzmq1 python-zmq 69 | elif is_suse; then 70 | install_package libzmq1 python-pyzmq 71 | else 72 | exit_distro_not_supported "zeromq installation" 73 | fi 74 | fi 75 | } 76 | 77 | # restart the rpc backend 78 | function restart_rpc_backend() { 79 | if is_service_enabled rabbit; then 80 | # Start rabbitmq-server 81 | echo_summary "Starting RabbitMQ" 82 | if is_fedora || is_suse; then 83 | # service is not started by default 84 | restart_service rabbitmq-server 85 | fi 86 | # change the rabbit password since the default is "guest" 87 | sudo rabbitmqctl change_password guest $RABBIT_PASSWORD 88 | elif is_service_enabled qpid; then 89 | echo_summary "Starting qpid" 90 | restart_service qpidd 91 | fi 92 | } 93 | 94 | # iniset cofiguration 95 | function iniset_rpc_backend() { 96 | local package=$1 97 | local file=$2 98 | local section=$3 99 | if is_service_enabled zeromq; then 100 | iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_zmq 101 | elif is_service_enabled qpid; then 102 | iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_qpid 103 | elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then 104 | iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_kombu 105 | iniset $file $section rabbit_host $RABBIT_HOST 106 | iniset $file $section rabbit_password $RABBIT_PASSWORD 107 | fi 108 | } 109 | 110 | # Check if qpid can be used on the current distro. 111 | # qpid_is_supported 112 | function qpid_is_supported() { 113 | if [[ -z "$DISTRO" ]]; then 114 | GetDistro 115 | fi 116 | 117 | # Qpid was introduced to Ubuntu in precise, disallow it on oneiric; it is 118 | # not in openSUSE either right now. 119 | ( ! ([[ "$DISTRO" = "oneiric" ]] || is_suse) ) 120 | } 121 | 122 | # Restore xtrace 123 | $XTRACE 124 | -------------------------------------------------------------------------------- /lib/template: -------------------------------------------------------------------------------- 1 | # lib/template 2 | # Functions to control the configuration and operation of the XXXX service 3 | # 4 | 5 | # Dependencies: 6 | # ``functions`` file 7 | # ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined 8 | # 9 | 10 | # ``stack.sh`` calls the entry points in this order: 11 | # 12 | # install_XXXX 13 | # configure_XXXX 14 | # init_XXXX 15 | # start_XXXX 16 | # stop_XXXX 17 | # cleanup_XXXX 18 | 19 | # Save trace setting 20 | XTRACE=$(set +o | grep xtrace) 21 | set +o xtrace 22 | 23 | 24 | # Defaults 25 | # -------- 26 | 27 | # 28 | 29 | # Set up default directories 30 | XXXX_DIR=$DEST/XXXX 31 | XXX_CONF_DIR=/etc/XXXX 32 | 33 | 34 | # Entry Points 35 | # ------------ 36 | 37 | # cleanup_XXXX() - Remove residual data files, anything left over from previous 38 | # runs that a clean run would need to clean up 39 | function cleanup_XXXX() { 40 | # kill instances (nova) 41 | # delete image files (glance) 42 | # This function intentionally left blank 43 | : 44 | } 45 | 46 | # configure_XXXX() - Set config files, create data dirs, etc 47 | function configure_XXXX() { 48 | # sudo python setup.py deploy 49 | # iniset $XXXX_CONF ... 50 | # This function intentionally left blank 51 | : 52 | } 53 | 54 | # init_XXXX() - Initialize databases, etc. 55 | function init_XXXX() { 56 | # clean up from previous (possibly aborted) runs 57 | # create required data files 58 | : 59 | } 60 | 61 | # install_XXXX() - Collect source and prepare 62 | function install_XXXX() { 63 | # git clone xxx 64 | : 65 | } 66 | 67 | # start_XXXX() - Start running processes, including screen 68 | function start_XXXX() { 69 | # screen_it XXXX "cd $XXXX_DIR && $XXXX_DIR/bin/XXXX-bin" 70 | : 71 | } 72 | 73 | # stop_XXXX() - Stop running processes (non-screen) 74 | function stop_XXXX() { 75 | # FIXME(dtroyer): stop only our screen screen window? 76 | : 77 | } 78 | 79 | # Restore xtrace 80 | $XTRACE 81 | -------------------------------------------------------------------------------- /openrc: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # source openrc [username] [tenantname] 4 | # 5 | # Configure a set of credentials for $TENANT/$USERNAME: 6 | # Set OS_TENANT_NAME to override the default tenant 'demo' 7 | # Set OS_USERNAME to override the default user name 'demo' 8 | # Set ADMIN_PASSWORD to set the password for 'admin' and 'demo' 9 | 10 | # NOTE: support for the old NOVA_* novaclient environment variables has 11 | # been removed. 12 | 13 | if [[ -n "$1" ]]; then 14 | OS_USERNAME=$1 15 | fi 16 | if [[ -n "$2" ]]; then 17 | OS_TENANT_NAME=$2 18 | fi 19 | 20 | # Find the other rc files 21 | RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) 22 | 23 | # Import common functions 24 | source $RC_DIR/functions 25 | 26 | # Load local configuration 27 | source $RC_DIR/stackrc 28 | 29 | # Load the last env variables if available 30 | if [[ -r $TOP_DIR/.stackenv ]]; then 31 | source $TOP_DIR/.stackenv 32 | fi 33 | 34 | # Get some necessary configuration 35 | source $RC_DIR/lib/tls 36 | 37 | # The introduction of Keystone to the OpenStack ecosystem has standardized the 38 | # term **tenant** as the entity that owns resources. In some places references 39 | # still exist to the original Nova term **project** for this use. Also, 40 | # **tenant_name** is prefered to **tenant_id**. 41 | export OS_TENANT_NAME=${OS_TENANT_NAME:-demo} 42 | 43 | # In addition to the owning entity (tenant), nova stores the entity performing 44 | # the action as the **user**. 45 | export OS_USERNAME=${OS_USERNAME:-demo} 46 | 47 | # With Keystone you pass the keystone password instead of an api key. 48 | # Recent versions of novaclient use OS_PASSWORD instead of NOVA_API_KEYs 49 | # or NOVA_PASSWORD. 50 | export OS_PASSWORD=${ADMIN_PASSWORD:-secrete} 51 | 52 | # Don't put the key into a keyring by default. Testing for development is much 53 | # easier with this off. 54 | export OS_NO_CACHE=${OS_NO_CACHE:-1} 55 | 56 | # Set api HOST_IP endpoint. SERVICE_HOST may also be used to specify the endpoint, 57 | # which is convenient for some localrc configurations. 58 | HOST_IP=${HOST_IP:-127.0.0.1} 59 | SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} 60 | SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} 61 | 62 | # Some exercises call glance directly. On a single-node installation, Glance 63 | # should be listening on HOST_IP. If its running elsewhere, it can be set here 64 | GLANCE_HOST=${GLANCE_HOST:-$HOST_IP} 65 | 66 | # Authenticating against an Openstack cloud using Keystone returns a **Token** 67 | # and **Service Catalog**. The catalog contains the endpoints for all services 68 | # the user/tenant has access to - including nova, glance, keystone, swift, ... 69 | # We currently recommend using the 2.0 *identity api*. 70 | # 71 | # *NOTE*: Using the 2.0 *identity api* does not mean that compute api is 2.0. We 72 | # will use the 1.1 *compute api* 73 | export OS_AUTH_URL=$SERVICE_PROTOCOL://$SERVICE_HOST:5000/v2.0 74 | 75 | # Set the pointer to our CA certificate chain. Harmless if TLS is not used. 76 | export OS_CACERT=$INT_CA_DIR/ca-chain.pem 77 | 78 | # Currently novaclient needs you to specify the *compute api* version. This 79 | # needs to match the config of your catalog returned by Keystone. 80 | export NOVA_VERSION=${NOVA_VERSION:-1.1} 81 | # In the future this will change names: 82 | export COMPUTE_API_VERSION=${COMPUTE_API_VERSION:-$NOVA_VERSION} 83 | 84 | # set log level to DEBUG (helps debug issues) 85 | # export KEYSTONECLIENT_DEBUG=1 86 | # export NOVACLIENT_DEBUG=1 87 | -------------------------------------------------------------------------------- /rejoin-stack.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | # This script rejoins an existing screen, or re-creates a 4 | # screen session from a previous run of stack.sh. 5 | 6 | TOP_DIR=`dirname $0` 7 | 8 | # if screenrc exists, run screen 9 | if [[ -e $TOP_DIR/stack-screenrc ]]; then 10 | if screen -ls | egrep -q "[0-9].stack"; then 11 | echo "Attaching to already started screen session.." 12 | exec screen -r stack 13 | fi 14 | exec screen -c $TOP_DIR/stack-screenrc 15 | fi 16 | 17 | echo "Couldn't find $TOP_DIR/stack-screenrc file; have you run stack.sh yet?" 18 | exit 1 19 | -------------------------------------------------------------------------------- /samples/local.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Sample ``local.sh`` for user-configurable tasks to run automatically 4 | # at the sucessful conclusion of ``stack.sh``. 5 | 6 | # NOTE: Copy this file to the root ``devstack`` directory for it to 7 | # work properly. 8 | 9 | # This is a collection of some of the things we have found to be useful to run 10 | # after ``stack.sh`` to tweak the OpenStack configuration that DevStack produces. 11 | # These should be considered as samples and are unsupported DevStack code. 12 | 13 | 14 | # Keep track of the devstack directory 15 | TOP_DIR=$(cd $(dirname "$0") && pwd) 16 | 17 | # Import common functions 18 | source $TOP_DIR/functions 19 | 20 | # Use openrc + stackrc + localrc for settings 21 | source $TOP_DIR/stackrc 22 | 23 | # Destination path for installation ``DEST`` 24 | DEST=${DEST:-/opt/stack} 25 | 26 | 27 | # Import ssh keys 28 | # --------------- 29 | 30 | # Import keys from the current user into the default OpenStack user (usually 31 | # ``demo``) 32 | 33 | # Get OpenStack auth 34 | source $TOP_DIR/openrc 35 | 36 | # Add first keypair found in localhost:$HOME/.ssh 37 | for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do 38 | if [[ -r $i ]]; then 39 | nova keypair-add --pub_key=$i `hostname` 40 | break 41 | fi 42 | done 43 | 44 | 45 | # Create A Flavor 46 | # --------------- 47 | 48 | # Get OpenStack admin auth 49 | source $TOP_DIR/openrc admin admin 50 | 51 | # Name of new flavor 52 | # set in ``localrc`` with ``DEFAULT_INSTANCE_TYPE=m1.micro`` 53 | MI_NAME=m1.micro 54 | 55 | # Create micro flavor if not present 56 | if [[ -z $(nova flavor-list | grep $MI_NAME) ]]; then 57 | nova flavor-create $MI_NAME 6 128 0 1 58 | fi 59 | 60 | 61 | # Other Uses 62 | # ---------- 63 | 64 | # Add tcp/22 and icmp to default security group 65 | nova secgroup-add-rule default tcp 22 22 0.0.0.0/0 66 | nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0 67 | 68 | -------------------------------------------------------------------------------- /samples/localrc: -------------------------------------------------------------------------------- 1 | # Sample ``localrc`` for user-configurable variables in ``stack.sh`` 2 | 3 | # NOTE: Copy this file to the root ``devstack`` directory for it to 4 | # work properly. 5 | 6 | # ``localrc`` is a user-maintained setings file that is sourced from ``stackrc``. 7 | # This gives it the ability to override any variables set in ``stackrc``. 8 | # Also, most of the settings in ``stack.sh`` are written to only be set if no 9 | # value has already been set; this lets ``localrc`` effectively override the 10 | # default values. 11 | 12 | # This is a collection of some of the settings we have found to be useful 13 | # in our DevStack development environments. Additional settings are described 14 | # in http://devstack.org/localrc.html 15 | # These should be considered as samples and are unsupported DevStack code. 16 | 17 | 18 | # Minimal Contents 19 | # ---------------- 20 | 21 | # While ``stack.sh`` is happy to run without ``localrc``, devlife is better when 22 | # there are a few minimal variables set: 23 | 24 | # If the ``*_PASSWORD`` variables are not set here you will be prompted to enter 25 | # values for them by ``stack.sh`` and they will be added to ``localrc``. 26 | ADMIN_PASSWORD=nomoresecrete 27 | MYSQL_PASSWORD=stackdb 28 | RABBIT_PASSWORD=stackqueue 29 | SERVICE_PASSWORD=$ADMIN_PASSWORD 30 | 31 | # ``HOST_IP`` should be set manually for best results if the NIC configuration 32 | # of the host is unusual, i.e. ``eth1`` has the default route but ``eth0`` is the 33 | # public interface. It is auto-detected in ``stack.sh`` but often is indeterminate 34 | # on later runs due to the IP moving from an Ethernet interface to a bridge on 35 | # the host. Setting it here also makes it available for ``openrc`` to include 36 | # when setting ``OS_AUTH_URL``. 37 | # ``HOST_IP`` is not set by default. 38 | #HOST_IP=w.x.y.z 39 | 40 | 41 | # Logging 42 | # ------- 43 | 44 | # By default ``stack.sh`` output only goes to the terminal where it runs. It can 45 | # be configured to additionally log to a file by setting ``LOGFILE`` to the full 46 | # path of the destination log file. A timestamp will be appended to the given name. 47 | LOGFILE=$DEST/logs/stack.sh.log 48 | 49 | # Old log files are automatically removed after 7 days to keep things neat. Change 50 | # the number of days by setting ``LOGDAYS``. 51 | LOGDAYS=2 52 | 53 | # Nova logs will be colorized if ``SYSLOG`` is not set; turn this off by setting 54 | # ``LOG_COLOR`` false. 55 | #LOG_COLOR=False 56 | 57 | 58 | # Using milestone-proposed branches 59 | # --------------------------------- 60 | 61 | # Uncomment these to grab the milestone-proposed branches from the repos: 62 | #CINDER_BRANCH=milestone-proposed 63 | #GLANCE_BRANCH=milestone-proposed 64 | #HORIZON_BRANCH=milestone-proposed 65 | #KEYSTONE_BRANCH=milestone-proposed 66 | #KEYSTONECLIENT_BRANCH=milestone-proposed 67 | #NOVA_BRANCH=milestone-proposed 68 | #NOVACLIENT_BRANCH=milestone-proposed 69 | #QUANTUM_BRANCH=milestone-proposed 70 | #SWIFT_BRANCH=milestone-proposed 71 | 72 | 73 | # Swift 74 | # ----- 75 | 76 | # Swift is now used as the back-end for the S3-like object store. If Nova's 77 | # objectstore (``n-obj`` in ``ENABLED_SERVICES``) is enabled, it will NOT 78 | # run if Swift is enabled. Setting the hash value is required and you will 79 | # be prompted for it if Swift is enabled so just set it to something already: 80 | SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5 81 | 82 | # For development purposes the default of 3 replicas is usually not required. 83 | # Set this to 1 to save some resources: 84 | SWIFT_REPLICAS=1 85 | 86 | # The data for Swift is stored in the source tree by default (``$DEST/swift/data``) 87 | # and can be moved by setting ``SWIFT_DATA_DIR``. The directory will be created 88 | # if it does not exist. 89 | SWIFT_DATA_DIR=$DEST/data 90 | -------------------------------------------------------------------------------- /tools/build_bm.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # **build_bm.sh** 4 | 5 | # Build an OpenStack install on a bare metal machine. 6 | set +x 7 | 8 | # Keep track of the current directory 9 | TOOLS_DIR=$(cd $(dirname "$0") && pwd) 10 | TOP_DIR=$(cd $TOOLS_DIR/..; pwd) 11 | 12 | # Import common functions 13 | source $TOP_DIR/functions 14 | 15 | # Source params 16 | source ./stackrc 17 | 18 | # Param string to pass to stack.sh. Like "EC2_DMZ_HOST=192.168.1.1 MYSQL_USER=nova" 19 | STACKSH_PARAMS=${STACKSH_PARAMS:-} 20 | 21 | # Option to use the version of devstack on which we are currently working 22 | USE_CURRENT_DEVSTACK=${USE_CURRENT_DEVSTACK:-1} 23 | 24 | # Configure the runner 25 | RUN_SH=`mktemp` 26 | cat > $RUN_SH <$CFG <$PXEDIR/stack-initrd.gz 68 | fi 69 | cp -pu $PXEDIR/stack-initrd.gz $DEST_DIR/ubuntu 70 | 71 | if [ ! -r $PXEDIR/vmlinuz-*-generic ]; then 72 | MNTDIR=`mktemp -d --tmpdir mntXXXXXXXX` 73 | mount -t ext4 -o loop $PXEDIR/stack-initrd.img $MNTDIR 74 | 75 | if [ ! -r $MNTDIR/boot/vmlinuz-*-generic ]; then 76 | echo "No kernel found" 77 | umount $MNTDIR 78 | rmdir $MNTDIR 79 | exit 1 80 | else 81 | cp -pu $MNTDIR/boot/vmlinuz-*-generic $PXEDIR 82 | fi 83 | umount $MNTDIR 84 | rmdir $MNTDIR 85 | fi 86 | 87 | # Get generic kernel version 88 | KNAME=`basename $PXEDIR/vmlinuz-*-generic` 89 | KVER=${KNAME#vmlinuz-} 90 | cp -pu $PXEDIR/vmlinuz-$KVER $DEST_DIR/ubuntu 91 | cat >>$CFG <>$CFG <>$CFG < $MNT_DIR/etc/network/interfaces <$MNT_DIR/etc/hostname 148 | echo "127.0.0.1 localhost ramstack" >$MNT_DIR/etc/hosts 149 | 150 | # Configure the runner 151 | RUN_SH=$MNT_DIR/$DEST/run.sh 152 | cat > $RUN_SH < $DEST/run.sh.log 165 | echo >> $DEST/run.sh.log 166 | echo >> $DEST/run.sh.log 167 | echo "All done! Time to start clicking." >> $DEST/run.sh.log 168 | EOF 169 | 170 | # Make the run.sh executable 171 | chmod 755 $RUN_SH 172 | chroot $MNT_DIR chown stack $DEST/run.sh 173 | 174 | umount $MNT_DIR/dev 175 | umount $MNT_DIR 176 | rmdir $MNT_DIR 177 | mv $DEST_FILE_TMP $DEST_FILE 178 | rm -f $DEST_FILE_TMP 179 | 180 | trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT 181 | -------------------------------------------------------------------------------- /tools/build_usb_boot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | # **build_usb_boot.sh** 4 | 5 | # Create a syslinux boot environment 6 | # 7 | # build_usb_boot.sh destdev 8 | # 9 | # Assumes syslinux is installed 10 | # Needs to run as root 11 | 12 | DEST_DIR=${1:-/tmp/syslinux-boot} 13 | PXEDIR=${PXEDIR:-/opt/ramstack/pxe} 14 | 15 | # Clean up any resources that may be in use 16 | cleanup() { 17 | set +o errexit 18 | 19 | # Mop up temporary files 20 | if [ -n "$DEST_DEV" ]; then 21 | umount $DEST_DIR 22 | rmdir $DEST_DIR 23 | fi 24 | if [ -n "$MNTDIR" -a -d "$MNTDIR" ]; then 25 | umount $MNTDIR 26 | rmdir $MNTDIR 27 | fi 28 | 29 | # Kill ourselves to signal any calling process 30 | trap 2; kill -2 $$ 31 | } 32 | 33 | trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT 34 | 35 | # Keep track of the current directory 36 | TOOLS_DIR=$(cd $(dirname "$0") && pwd) 37 | TOP_DIR=`cd $TOOLS_DIR/..; pwd` 38 | 39 | if [ -b $DEST_DIR ]; then 40 | # We have a block device, install syslinux and mount it 41 | DEST_DEV=$DEST_DIR 42 | DEST_DIR=`mktemp -d --tmpdir mntXXXXXX` 43 | mount $DEST_DEV $DEST_DIR 44 | 45 | if [ ! -d $DEST_DIR/syslinux ]; then 46 | mkdir -p $DEST_DIR/syslinux 47 | fi 48 | 49 | # Install syslinux on the device 50 | syslinux --install --directory syslinux $DEST_DEV 51 | else 52 | # We have a directory (for sanity checking output) 53 | DEST_DEV="" 54 | if [ ! -d $DEST_DIR/syslinux ]; then 55 | mkdir -p $DEST_DIR/syslinux 56 | fi 57 | fi 58 | 59 | # Get some more stuff from syslinux 60 | for i in memdisk menu.c32; do 61 | cp -pu /usr/lib/syslinux/$i $DEST_DIR/syslinux 62 | done 63 | 64 | CFG=$DEST_DIR/syslinux/syslinux.cfg 65 | cat >$CFG <$PXEDIR/stack-initrd.gz 87 | fi 88 | cp -pu $PXEDIR/stack-initrd.gz $DEST_DIR/ubuntu 89 | 90 | if [ ! -r $PXEDIR/vmlinuz-*-generic ]; then 91 | MNTDIR=`mktemp -d --tmpdir mntXXXXXXXX` 92 | mount -t ext4 -o loop $PXEDIR/stack-initrd.img $MNTDIR 93 | 94 | if [ ! -r $MNTDIR/boot/vmlinuz-*-generic ]; then 95 | echo "No kernel found" 96 | umount $MNTDIR 97 | rmdir $MNTDIR 98 | if [ -n "$DEST_DEV" ]; then 99 | umount $DEST_DIR 100 | rmdir $DEST_DIR 101 | fi 102 | exit 1 103 | else 104 | cp -pu $MNTDIR/boot/vmlinuz-*-generic $PXEDIR 105 | fi 106 | umount $MNTDIR 107 | rmdir $MNTDIR 108 | fi 109 | 110 | # Get generic kernel version 111 | KNAME=`basename $PXEDIR/vmlinuz-*-generic` 112 | KVER=${KNAME#vmlinuz-} 113 | cp -pu $PXEDIR/vmlinuz-$KVER $DEST_DIR/ubuntu 114 | cat >>$CFG <>$CFG <>$CFG < $STAGING_DIR/etc/sudoers.d/50_stack_sh ) 57 | 58 | # Copy over your ssh keys and env if desired 59 | cp_it ~/.ssh $STAGING_DIR/$DEST/.ssh 60 | cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/$DEST/.ssh/authorized_keys 61 | cp_it ~/.gitconfig $STAGING_DIR/$DEST/.gitconfig 62 | cp_it ~/.vimrc $STAGING_DIR/$DEST/.vimrc 63 | cp_it ~/.bashrc $STAGING_DIR/$DEST/.bashrc 64 | 65 | # Copy devstack 66 | rm -rf $STAGING_DIR/$DEST/devstack 67 | cp_it . $STAGING_DIR/$DEST/devstack 68 | 69 | # Give stack ownership over $DEST so it may do the work needed 70 | chroot $STAGING_DIR chown -R $STACK_USER $DEST 71 | 72 | # Unmount 73 | umount $STAGING_DIR 74 | -------------------------------------------------------------------------------- /tools/get_uec_image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # **get_uec_image.sh** 4 | 5 | # Download and prepare Ubuntu UEC images 6 | 7 | CACHEDIR=${CACHEDIR:-/opt/stack/cache} 8 | ROOTSIZE=${ROOTSIZE:-2000M} 9 | 10 | # Keep track of the current directory 11 | TOOLS_DIR=$(cd $(dirname "$0") && pwd) 12 | TOP_DIR=$(cd $TOOLS_DIR/..; pwd) 13 | 14 | # Import common functions 15 | . $TOP_DIR/functions 16 | 17 | # Exit on error to stop unexpected errors 18 | set -o errexit 19 | set -o xtrace 20 | 21 | usage() { 22 | echo "Usage: $0 - Download and prepare Ubuntu UEC images" 23 | echo "" 24 | echo "$0 [-r rootsize] release imagefile [kernel]" 25 | echo "" 26 | echo "-r size - root fs size (min 2000MB)" 27 | echo "release - Ubuntu release: lucid - quantal" 28 | echo "imagefile - output image file" 29 | echo "kernel - output kernel" 30 | exit 1 31 | } 32 | 33 | # Clean up any resources that may be in use 34 | cleanup() { 35 | set +o errexit 36 | 37 | # Mop up temporary files 38 | if [ -n "$IMG_FILE_TMP" -a -e "$IMG_FILE_TMP" ]; then 39 | rm -f $IMG_FILE_TMP 40 | fi 41 | 42 | # Kill ourselves to signal any calling process 43 | trap 2; kill -2 $$ 44 | } 45 | 46 | while getopts hr: c; do 47 | case $c in 48 | h) usage 49 | ;; 50 | r) ROOTSIZE=$OPTARG 51 | ;; 52 | esac 53 | done 54 | shift `expr $OPTIND - 1` 55 | 56 | if [[ ! "$#" -eq "2" && ! "$#" -eq "3" ]]; then 57 | usage 58 | fi 59 | 60 | # Default args 61 | DIST_NAME=$1 62 | IMG_FILE=$2 63 | IMG_FILE_TMP=`mktemp $IMG_FILE.XXXXXX` 64 | KERNEL=$3 65 | 66 | case $DIST_NAME in 67 | quantal) ;; 68 | precise) ;; 69 | oneiric) ;; 70 | natty) ;; 71 | maverick) ;; 72 | lucid) ;; 73 | *) echo "Unknown release: $DIST_NAME" 74 | usage 75 | ;; 76 | esac 77 | 78 | trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT 79 | 80 | # Check dependencies 81 | if [ ! -x "`which qemu-img`" -o -z "`dpkg -l | grep cloud-utils`" ]; then 82 | # Missing KVM? 83 | apt_get install qemu-kvm cloud-utils 84 | fi 85 | 86 | # Find resize script 87 | RESIZE=`which resize-part-image || which uec-resize-image` 88 | if [ -z "$RESIZE" ]; then 89 | echo "resize tool from cloud-utils not found" 90 | exit 1 91 | fi 92 | 93 | # Get the UEC image 94 | UEC_NAME=$DIST_NAME-server-cloudimg-amd64 95 | if [ ! -d $CACHEDIR/$DIST_NAME ]; then 96 | mkdir -p $CACHEDIR/$DIST_NAME 97 | fi 98 | if [ ! -e $CACHEDIR/$DIST_NAME/$UEC_NAME.tar.gz ]; then 99 | (cd $CACHEDIR/$DIST_NAME && wget -N http://uec-images.ubuntu.com/$DIST_NAME/current/$UEC_NAME.tar.gz) 100 | (cd $CACHEDIR/$DIST_NAME && tar Sxvzf $UEC_NAME.tar.gz) 101 | fi 102 | 103 | $RESIZE $CACHEDIR/$DIST_NAME/$UEC_NAME.img ${ROOTSIZE} $IMG_FILE_TMP 104 | mv $IMG_FILE_TMP $IMG_FILE 105 | 106 | # Copy kernel to destination 107 | if [ -n "$KERNEL" ]; then 108 | cp -p $CACHEDIR/$DIST_NAME/*-vmlinuz-virtual $KERNEL 109 | fi 110 | 111 | trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT 112 | -------------------------------------------------------------------------------- /tools/info.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # **info.sh** 4 | 5 | # Produce a report on the state of devstack installs 6 | # 7 | # Output fields are separated with '|' chars 8 | # Output types are git,localrc,os,pip,pkg: 9 | # 10 | # git||[] 11 | # localtc|= 12 | # os|= 13 | # pip|| 14 | # pkg|| 15 | 16 | function usage { 17 | echo "$0 - Report on the devstack configuration" 18 | echo "" 19 | echo "Usage: $0" 20 | exit 1 21 | } 22 | 23 | if [ "$1" = "-h" ]; then 24 | usage 25 | fi 26 | 27 | # Keep track of the current directory 28 | TOOLS_DIR=$(cd $(dirname "$0") && pwd) 29 | TOP_DIR=$(cd $TOOLS_DIR/..; pwd) 30 | cd $TOP_DIR 31 | 32 | # Import common functions 33 | source $TOP_DIR/functions 34 | 35 | # Source params 36 | source $TOP_DIR/stackrc 37 | 38 | DEST=${DEST:-/opt/stack} 39 | FILES=$TOP_DIR/files 40 | if [[ ! -d $FILES ]]; then 41 | echo "ERROR: missing devstack/files - did you grab more than just stack.sh?" 42 | exit 1 43 | fi 44 | 45 | 46 | # OS 47 | # -- 48 | 49 | # Determine what OS we're using 50 | GetDistro 51 | 52 | echo "os|distro=$DISTRO" 53 | echo "os|vendor=$os_VENDOR" 54 | echo "os|release=$os_RELEASE" 55 | if [ -n "$os_UPDATE" ]; then 56 | echo "os|version=$os_UPDATE" 57 | fi 58 | 59 | 60 | # Repos 61 | # ----- 62 | 63 | # git_report 64 | function git_report() { 65 | local dir=$1 66 | local proj ref branch head 67 | if [[ -d $dir/.git ]]; then 68 | pushd $dir >/dev/null 69 | proj=$(basename $dir) 70 | ref=$(git symbolic-ref HEAD) 71 | branch=${ref##refs/heads/} 72 | head=$(git show-branch --sha1-name $branch | cut -d' ' -f1) 73 | echo "git|${proj}|${branch}${head}" 74 | popd >/dev/null 75 | fi 76 | } 77 | 78 | for i in $DEST/*; do 79 | if [[ -d $i ]]; then 80 | git_report $i 81 | fi 82 | done 83 | 84 | 85 | # Packages 86 | # -------- 87 | 88 | # - We are going to check packages only for the services needed. 89 | # - We are parsing the packages files and detecting metadatas. 90 | 91 | if is_ubuntu; then 92 | PKG_DIR=$FILES/apts 93 | elif is_fedora; then 94 | PKG_DIR=$FILES/rpms 95 | elif is_suse; then 96 | PKG_DIR=$FILES/rpms-suse 97 | else 98 | exit_distro_not_supported "list of packages" 99 | fi 100 | 101 | for p in $(get_packages $PKG_DIR); do 102 | if [[ "$os_PACKAGE" = "deb" ]]; then 103 | ver=$(dpkg -s $p 2>/dev/null | grep '^Version: ' | cut -d' ' -f2) 104 | elif [[ "$os_PACKAGE" = "rpm" ]]; then 105 | ver=$(rpm -q --queryformat "%{VERSION}-%{RELEASE}\n" $p) 106 | else 107 | exit_distro_not_supported "finding version of a package" 108 | fi 109 | echo "pkg|${p}|${ver}" 110 | done 111 | 112 | 113 | # Pips 114 | # ---- 115 | 116 | CMD_PIP=$(get_pip_command) 117 | 118 | # Pip tells us what is currently installed 119 | FREEZE_FILE=$(mktemp --tmpdir freeze.XXXXXX) 120 | $CMD_PIP freeze >$FREEZE_FILE 2>/dev/null 121 | 122 | # Loop through our requirements and look for matches 123 | while read line; do 124 | if [[ -n "$line" ]]; then 125 | if [[ "$line" =~ \+(.*)@(.*)#egg=(.*) ]]; then 126 | # Handle URLs 127 | p=${BASH_REMATCH[1]} 128 | ver=${BASH_REMATCH[2]} 129 | elif [[ "$line" =~ (.*)[=\<\>]=(.*) ]]; then 130 | # Normal pip packages 131 | p=${BASH_REMATCH[1]} 132 | ver=${BASH_REMATCH[2]} 133 | else 134 | # Unhandled format in freeze file 135 | #echo "unknown: $p" 136 | continue 137 | fi 138 | echo "pip|${p}|${ver}" 139 | else 140 | # No match in freeze file 141 | #echo "unknown: $p" 142 | continue 143 | fi 144 | done <$FREEZE_FILE 145 | 146 | rm $FREEZE_FILE 147 | 148 | 149 | # localrc 150 | # ------- 151 | 152 | # Dump localrc with 'localrc|' prepended and comments and passwords left out 153 | if [[ -r $TOP_DIR/localrc ]]; then 154 | sed -e ' 155 | /PASSWORD/d; 156 | /^#/d; 157 | s/^/localrc\|/; 158 | ' $TOP_DIR/localrc 159 | fi 160 | -------------------------------------------------------------------------------- /tools/install_openvpn.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # **install_openvpn.sh** 4 | 5 | # Install OpenVPN and generate required certificates 6 | # 7 | # install_openvpn.sh --client name 8 | # install_openvpn.sh --server [name] 9 | # 10 | # name is used on the CN of the generated cert, and the filename of 11 | # the configuration, certificate and key files. 12 | # 13 | # --server mode configures the host with a running OpenVPN server instance 14 | # --client mode creates a tarball of a client configuration for this server 15 | 16 | # Get config file 17 | if [ -e localrc ]; then 18 | . localrc 19 | fi 20 | if [ -e vpnrc ]; then 21 | . vpnrc 22 | fi 23 | 24 | # Do some IP manipulation 25 | function cidr2netmask() { 26 | set -- $(( 5 - ($1 / 8) )) 255 255 255 255 $(( (255 << (8 - ($1 % 8))) & 255 )) 0 0 0 27 | if [[ $1 -gt 1 ]]; then 28 | shift $1 29 | else 30 | shift 31 | fi 32 | echo ${1-0}.${2-0}.${3-0}.${4-0} 33 | } 34 | 35 | FIXED_NET=`echo $FIXED_RANGE | cut -d'/' -f1` 36 | FIXED_CIDR=`echo $FIXED_RANGE | cut -d'/' -f2` 37 | FIXED_MASK=`cidr2netmask $FIXED_CIDR` 38 | 39 | # VPN Config 40 | VPN_SERVER=${VPN_SERVER:-`ifconfig eth0 | awk "/inet addr:/ { print \$2 }" | cut -d: -f2`} # 50.56.12.212 41 | VPN_PROTO=${VPN_PROTO:-tcp} 42 | VPN_PORT=${VPN_PORT:-6081} 43 | VPN_DEV=${VPN_DEV:-tap0} 44 | VPN_BRIDGE=${VPN_BRIDGE:-br100} 45 | VPN_BRIDGE_IF=${VPN_BRIDGE_IF:-$FLAT_INTERFACE} 46 | VPN_CLIENT_NET=${VPN_CLIENT_NET:-$FIXED_NET} 47 | VPN_CLIENT_MASK=${VPN_CLIENT_MASK:-$FIXED_MASK} 48 | VPN_CLIENT_DHCP="${VPN_CLIENT_DHCP:-net.1 net.254}" 49 | 50 | VPN_DIR=/etc/openvpn 51 | CA_DIR=$VPN_DIR/easy-rsa 52 | 53 | usage() { 54 | echo "$0 - OpenVPN install and certificate generation" 55 | echo "" 56 | echo "$0 --client name" 57 | echo "$0 --server [name]" 58 | echo "" 59 | echo " --server mode configures the host with a running OpenVPN server instance" 60 | echo " --client mode creates a tarball of a client configuration for this server" 61 | exit 1 62 | } 63 | 64 | if [ -z $1 ]; then 65 | usage 66 | fi 67 | 68 | # Install OpenVPN 69 | VPN_EXEC=`which openvpn` 70 | if [ -z "$VPN_EXEC" -o ! -x "$VPN_EXEC" ]; then 71 | apt-get install -y openvpn bridge-utils 72 | fi 73 | if [ ! -d $CA_DIR ]; then 74 | cp -pR /usr/share/doc/openvpn/examples/easy-rsa/2.0/ $CA_DIR 75 | fi 76 | 77 | # Keep track of the current directory 78 | TOOLS_DIR=$(cd $(dirname "$0") && pwd) 79 | TOP_DIR=$(cd $TOOLS_DIR/.. && pwd) 80 | 81 | WEB_DIR=$TOP_DIR/../vpn 82 | if [[ ! -d $WEB_DIR ]]; then 83 | mkdir -p $WEB_DIR 84 | fi 85 | WEB_DIR=$(cd $TOP_DIR/../vpn && pwd) 86 | 87 | cd $CA_DIR 88 | source ./vars 89 | 90 | # Override the defaults 91 | export KEY_COUNTRY="US" 92 | export KEY_PROVINCE="TX" 93 | export KEY_CITY="SanAntonio" 94 | export KEY_ORG="Cloudbuilders" 95 | export KEY_EMAIL="rcb@lists.rackspace.com" 96 | 97 | if [ ! -r $CA_DIR/keys/dh1024.pem ]; then 98 | # Initialize a new CA 99 | $CA_DIR/clean-all 100 | $CA_DIR/build-dh 101 | $CA_DIR/pkitool --initca 102 | openvpn --genkey --secret $CA_DIR/keys/ta.key ## Build a TLS key 103 | fi 104 | 105 | do_server() { 106 | NAME=$1 107 | # Generate server certificate 108 | $CA_DIR/pkitool --server $NAME 109 | 110 | (cd $CA_DIR/keys; 111 | cp $NAME.crt $NAME.key ca.crt dh1024.pem ta.key $VPN_DIR 112 | ) 113 | cat >$VPN_DIR/br-up <$VPN_DIR/br-down <$VPN_DIR/$NAME.conf <$TMP_DIR/$HOST.conf <$VPN_DIR/hostname 215 | fi 216 | do_server $NAME 217 | ;; 218 | --clean) $CA_DIR/clean-all 219 | ;; 220 | *) usage 221 | esac 222 | -------------------------------------------------------------------------------- /tools/install_prereqs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # **install_prereqs.sh** 4 | 5 | # Install system package prerequisites 6 | # 7 | # install_prereqs.sh [-f] 8 | # 9 | # -f Force an install run now 10 | 11 | if [[ -n "$1" && "$1" = "-f" ]]; then 12 | FORCE_PREREQ=1 13 | fi 14 | 15 | # If TOP_DIR is set we're being sourced rather than running stand-alone 16 | # or in a sub-shell 17 | if [[ -z "$TOP_DIR" ]]; then 18 | # Keep track of the devstack directory 19 | TOP_DIR=$(cd $(dirname "$0")/.. && pwd) 20 | 21 | # Import common functions 22 | source $TOP_DIR/functions 23 | 24 | # Determine what system we are running on. This provides ``os_VENDOR``, 25 | # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` 26 | # and ``DISTRO`` 27 | GetDistro 28 | 29 | # Needed to get ``ENABLED_SERVICES`` 30 | source $TOP_DIR/stackrc 31 | 32 | # Prereq dirs are here 33 | FILES=$TOP_DIR/files 34 | fi 35 | 36 | # Minimum wait time 37 | PREREQ_RERUN_MARKER=${PREREQ_RERUN_MARKER:-$TOP_DIR/.prereqs} 38 | PREREQ_RERUN_HOURS=${PREREQ_RERUN_HOURS:-2} 39 | PREREQ_RERUN_SECONDS=$((60*60*$PREREQ_RERUN_HOURS)) 40 | 41 | NOW=$(date "+%s") 42 | LAST_RUN=$(head -1 $PREREQ_RERUN_MARKER 2>/dev/null || echo "0") 43 | DELTA=$(($NOW - $LAST_RUN)) 44 | if [[ $DELTA -lt $PREREQ_RERUN_SECONDS && -z "$FORCE_PREREQ" ]]; then 45 | echo "Re-run time has not expired ($(($PREREQ_RERUN_SECONDS - $DELTA)) seconds remaining); exiting..." 46 | return 0 47 | fi 48 | 49 | # Make sure the proxy config is visible to sub-processes 50 | export_proxy_variables 51 | 52 | 53 | # Install Packages 54 | # ================ 55 | 56 | # Install package requirements 57 | if is_ubuntu; then 58 | install_package $(get_packages $FILES/apts) 59 | elif is_fedora; then 60 | install_package $(get_packages $FILES/rpms) 61 | elif is_suse; then 62 | install_package $(get_packages $FILES/rpms-suse) 63 | else 64 | exit_distro_not_supported "list of packages" 65 | fi 66 | 67 | if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then 68 | if is_ubuntu || is_fedora; then 69 | install_package rsyslog-relp 70 | elif is_suse; then 71 | install_package rsyslog-module-relp 72 | else 73 | exit_distro_not_supported "rsyslog-relp installation" 74 | fi 75 | fi 76 | 77 | 78 | # Mark end of run 79 | # --------------- 80 | 81 | date "+%s" >$PREREQ_RERUN_MARKER 82 | date >>$PREREQ_RERUN_MARKER 83 | -------------------------------------------------------------------------------- /tools/jenkins/README.md: -------------------------------------------------------------------------------- 1 | Getting Started With Jenkins and Devstack 2 | ========================================= 3 | This little corner of devstack is to show how to get an Openstack jenkins 4 | environment up and running quickly, using the rcb configuration methodology. 5 | 6 | 7 | To create a jenkins server 8 | -------------------------- 9 | 10 | cd tools/jenkins/jenkins_home 11 | ./build_jenkins.sh 12 | 13 | This will create a jenkins environment configured with sample test scripts that run against xen and kvm. 14 | 15 | Configuring XS 16 | -------------- 17 | In order to make the tests for XS work, you must install xs 5.6 on a separate machine, 18 | and install the the jenkins public key on that server. You then need to create the 19 | /var/lib/jenkins/xenrc on your jenkins server like so: 20 | 21 | MYSQL_PASSWORD=secrete 22 | SERVICE_TOKEN=secrete 23 | ADMIN_PASSWORD=secrete 24 | RABBIT_PASSWORD=secrete 25 | # This is the password for your guest (for both stack and root users) 26 | GUEST_PASSWORD=secrete 27 | # Do not download the usual images yet! 28 | IMAGE_URLS="" 29 | FLOATING_RANGE=192.168.1.224/28 30 | VIRT_DRIVER=xenserver 31 | # Explicitly set multi-host 32 | MULTI_HOST=1 33 | # Give extra time for boot 34 | ACTIVE_TIMEOUT=45 35 | # IMPORTANT: This is the ip of your xenserver 36 | XEN_IP=10.5.5.1 37 | # IMPORTANT: The following must be set to your dom0 root password! 38 | XENAPI_PASSWORD='MY_XEN_ROOT_PW' 39 | -------------------------------------------------------------------------------- /tools/jenkins/adapters/euca.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Echo commands, exit on error 3 | set -o xtrace 4 | set -o errexit 5 | 6 | TOP_DIR=$(cd ../../.. && pwd) 7 | HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2` 8 | ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./euca.sh' 9 | -------------------------------------------------------------------------------- /tools/jenkins/adapters/floating_ips.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Echo commands, exit on error 3 | set -o xtrace 4 | set -o errexit 5 | 6 | TOP_DIR=$(cd ../../.. && pwd) 7 | HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2` 8 | ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./floating_ips.sh' 9 | -------------------------------------------------------------------------------- /tools/jenkins/adapters/swift.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Echo commands, exit on error 3 | set -o xtrace 4 | set -o errexit 5 | 6 | TOP_DIR=$(cd ../../.. && pwd) 7 | HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2` 8 | ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./swift.sh' 9 | -------------------------------------------------------------------------------- /tools/jenkins/adapters/volumes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Echo commands, exit on error 3 | set -o xtrace 4 | set -o errexit 5 | 6 | TOP_DIR=$(cd ../../.. && pwd) 7 | HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2` 8 | ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./volumes.sh' 9 | -------------------------------------------------------------------------------- /tools/jenkins/build_configuration.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | EXECUTOR_NUMBER=$1 4 | CONFIGURATION=$2 5 | ADAPTER=$3 6 | RC=$4 7 | 8 | function usage() { 9 | echo "Usage: $0 - Build a configuration" 10 | echo "" 11 | echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]" 12 | exit 1 13 | } 14 | 15 | # Validate inputs 16 | if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = "" || "$ADAPTER" = "" ]]; then 17 | usage 18 | fi 19 | 20 | # Execute configuration script 21 | cd configurations && ./$CONFIGURATION.sh $EXECUTOR_NUMBER $CONFIGURATION $ADAPTER "$RC" 22 | -------------------------------------------------------------------------------- /tools/jenkins/configurations/kvm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # exit on error to stop unexpected errors 4 | set -o errexit 5 | set -o xtrace 6 | 7 | EXECUTOR_NUMBER=$1 8 | CONFIGURATION=$2 9 | ADAPTER=$3 10 | RC=$4 11 | 12 | function usage() { 13 | echo "Usage: $0 - Build a test configuration" 14 | echo "" 15 | echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]" 16 | exit 1 17 | } 18 | 19 | # Validate inputs 20 | if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = "" || "$ADAPTER" = "" ]]; then 21 | usage 22 | fi 23 | 24 | # This directory 25 | CUR_DIR=$(cd $(dirname "$0") && pwd) 26 | 27 | # devstack directory 28 | cd ../../.. 29 | TOP_DIR=$(pwd) 30 | 31 | # Deps 32 | apt-get install -y --force-yes libvirt-bin || true 33 | 34 | # Name test instance based on executor 35 | BASE_NAME=executor-`printf "%02d" $EXECUTOR_NUMBER` 36 | GUEST_NAME=$BASE_NAME.$ADAPTER 37 | virsh list | grep $BASE_NAME | cut -d " " -f1 | xargs -n 1 virsh destroy || true 38 | virsh net-list | grep $BASE_NAME | cut -d " " -f1 | xargs -n 1 virsh net-destroy || true 39 | 40 | # Configure localrc 41 | cat <localrc 42 | RECLONE=yes 43 | GUEST_NETWORK=$EXECUTOR_NUMBER 44 | GUEST_NAME=$GUEST_NAME 45 | FLOATING_RANGE=192.168.$EXECUTOR_NUMBER.128/27 46 | GUEST_CORES=1 47 | GUEST_RAM=12574720 48 | MYSQL_PASSWORD=chicken 49 | RABBIT_PASSWORD=chicken 50 | SERVICE_TOKEN=chicken 51 | SERVICE_PASSWORD=chicken 52 | ADMIN_PASSWORD=chicken 53 | USERNAME=admin 54 | TENANT=admin 55 | NET_NAME=$BASE_NAME 56 | ACTIVE_TIMEOUT=45 57 | BOOT_TIMEOUT=45 58 | $RC 59 | EOF 60 | cd tools 61 | sudo ./build_uec.sh 62 | 63 | # Make the address of the instances available to test runners 64 | echo HEAD=`cat /var/lib/libvirt/dnsmasq/$BASE_NAME.leases | cut -d " " -f3` > $TOP_DIR/addresses 65 | -------------------------------------------------------------------------------- /tools/jenkins/configurations/xs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o errexit 3 | set -o xtrace 4 | 5 | 6 | EXECUTOR_NUMBER=$1 7 | CONFIGURATION=$2 8 | ADAPTER=$3 9 | RC=$4 10 | 11 | function usage() { 12 | echo "Usage: $0 - Build a test configuration" 13 | echo "" 14 | echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]" 15 | exit 1 16 | } 17 | 18 | # Validate inputs 19 | if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = "" || "$ADAPTER" = "" ]]; then 20 | usage 21 | fi 22 | 23 | # Configuration of xenrc 24 | XENRC=/var/lib/jenkins/xenrc 25 | if [ ! -e $XENRC ]; then 26 | echo "/var/lib/jenkins/xenrc is not present! See README.md" 27 | exit 1 28 | fi 29 | 30 | # Move to top of devstack 31 | cd ../../.. 32 | 33 | # Use xenrc as the start of our localrc 34 | cp $XENRC localrc 35 | 36 | # Set the PUB_IP 37 | PUB_IP=192.168.1.1$EXECUTOR_NUMBER 38 | echo "PUB_IP=$PUB_IP" >> localrc 39 | 40 | # Overrides 41 | echo "$RC" >> localrc 42 | 43 | # Source localrc 44 | . localrc 45 | 46 | # Make host ip available to tester 47 | echo "HEAD=$PUB_IP" > addresses 48 | 49 | # Build configuration 50 | REMOTE_DEVSTACK=/root/devstack 51 | ssh root@$XEN_IP "rm -rf $REMOTE_DEVSTACK" 52 | scp -pr . root@$XEN_IP:$REMOTE_DEVSTACK 53 | ssh root@$XEN_IP "cd $REMOTE_DEVSTACK/tools/xen && ./build_domU.sh" 54 | -------------------------------------------------------------------------------- /tools/jenkins/jenkins_home/.gitignore: -------------------------------------------------------------------------------- 1 | builds 2 | workspace 3 | *.sw* 4 | -------------------------------------------------------------------------------- /tools/jenkins/jenkins_home/build_jenkins.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Echo commands, exit on error 4 | set -o xtrace 5 | set -o errexit 6 | 7 | # Make sure only root can run our script 8 | if [[ $EUID -ne 0 ]]; then 9 | echo "This script must be run as root" 10 | exit 1 11 | fi 12 | 13 | # This directory 14 | CUR_DIR=$(cd $(dirname "$0") && pwd) 15 | 16 | # Configure trunk jenkins! 17 | echo "deb http://pkg.jenkins-ci.org/debian binary/" > /etc/apt/sources.list.d/jenkins.list 18 | wget -q -O - http://pkg.jenkins-ci.org/debian/jenkins-ci.org.key | sudo apt-key add - 19 | apt-get update 20 | 21 | 22 | # Clean out old jenkins - useful if you are having issues upgrading 23 | CLEAN_JENKINS=${CLEAN_JENKINS:-no} 24 | if [ "$CLEAN_JENKINS" = "yes" ]; then 25 | apt-get remove jenkins jenkins-common 26 | fi 27 | 28 | # Install software 29 | DEPS="jenkins cloud-utils" 30 | apt-get install -y --force-yes $DEPS 31 | 32 | # Install jenkins 33 | if [ ! -e /var/lib/jenkins ]; then 34 | echo "Jenkins installation failed" 35 | exit 1 36 | fi 37 | 38 | # Make sure user has configured a jenkins ssh pubkey 39 | if [ ! -e /var/lib/jenkins/.ssh/id_rsa.pub ]; then 40 | echo "Public key for jenkins is missing. This is used to ssh into your instances." 41 | echo "Please run "su -c ssh-keygen jenkins" before proceeding" 42 | exit 1 43 | fi 44 | 45 | # Setup sudo 46 | JENKINS_SUDO=/etc/sudoers.d/jenkins 47 | cat > $JENKINS_SUDO < $JENKINS_GITCONF < 56 | 57 | 4 58 | Jenkins 59 | jenkins@rcb.me 60 | 61 | EOF 62 | 63 | # Add build numbers 64 | JOBS=`ls jobs` 65 | for job in ${JOBS// / }; do 66 | if [ ! -e jobs/$job/nextBuildNumber ]; then 67 | echo 1 > jobs/$job/nextBuildNumber 68 | fi 69 | done 70 | 71 | # Set ownership to jenkins 72 | chown -R jenkins $CUR_DIR 73 | 74 | # Make sure this directory is accessible to jenkins 75 | if ! su -c "ls $CUR_DIR" jenkins; then 76 | echo "Your devstack directory is not accessible by jenkins." 77 | echo "There is a decent chance you are trying to run this from a directory in /root." 78 | echo "If so, try moving devstack elsewhere (eg. /opt/devstack)." 79 | exit 1 80 | fi 81 | 82 | # Move aside old jobs, if present 83 | if [ ! -h /var/lib/jenkins/jobs ]; then 84 | echo "Installing jobs symlink" 85 | if [ -d /var/lib/jenkins/jobs ]; then 86 | mv /var/lib/jenkins/jobs /var/lib/jenkins/jobs.old 87 | fi 88 | fi 89 | 90 | # Set up jobs symlink 91 | rm -f /var/lib/jenkins/jobs 92 | ln -s $CUR_DIR/jobs /var/lib/jenkins/jobs 93 | 94 | # List of plugins 95 | PLUGINS=http://hudson-ci.org/downloads/plugins/build-timeout/1.6/build-timeout.hpi,http://mirrors.jenkins-ci.org/plugins/git/1.1.12/git.hpi,http://hudson-ci.org/downloads/plugins/global-build-stats/1.2/global-build-stats.hpi,http://hudson-ci.org/downloads/plugins/greenballs/1.10/greenballs.hpi,http://download.hudson-labs.org/plugins/console-column-plugin/1.0/console-column-plugin.hpi 96 | 97 | # Configure plugins 98 | for plugin in ${PLUGINS//,/ }; do 99 | name=`basename $plugin` 100 | dest=/var/lib/jenkins/plugins/$name 101 | if [ ! -e $dest ]; then 102 | curl -L $plugin -o $dest 103 | fi 104 | done 105 | 106 | # Restart jenkins 107 | /etc/init.d/jenkins stop || true 108 | /etc/init.d/jenkins start 109 | -------------------------------------------------------------------------------- /tools/jenkins/jenkins_home/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script is not yet for general consumption. 3 | 4 | set -o errexit 5 | 6 | if [ ! "$FORCE" = "yes" ]; then 7 | echo "FORCE not set to 'yes'. Make sure this is something you really want to do. Exiting." 8 | exit 1 9 | fi 10 | 11 | virsh list | cut -d " " -f1 | grep -v "-" | egrep -e "[0-9]" | xargs -n 1 virsh destroy || true 12 | virsh net-list | grep active | cut -d " " -f1 | xargs -n 1 virsh net-destroy || true 13 | killall dnsmasq || true 14 | if [ "$CLEAN" = "yes" ]; then 15 | rm -rf jobs 16 | fi 17 | rm /var/lib/jenkins/jobs 18 | git checkout -f 19 | git fetch 20 | git merge origin/jenkins 21 | ./build_jenkins.sh 22 | -------------------------------------------------------------------------------- /tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | false 6 | 7 | 8 | 9 | 10 | RC 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 2 19 | 20 | 21 | origin 22 | +refs/heads/*:refs/remotes/origin/* 23 | git://github.com/cloudbuilders/devstack.git 24 | 25 | 26 | 27 | 28 | master 29 | 30 | 31 | false 32 | false 33 | false 34 | false 35 | false 36 | false 37 | false 38 | 39 | Default 40 | 41 | 42 | 43 | 44 | 45 | 46 | false 47 | 48 | 49 | true 50 | false 51 | false 52 | false 53 | 54 | false 55 | 56 | 57 | ADAPTER 58 | 59 | euca 60 | floating_ips 61 | 62 | 63 | 64 | 65 | 66 | sed -i 's/) 2>&1 | tee "${LOGFILE}"/)/' stack.sh 67 | 68 | 69 | set -o errexit 70 | cd tools/jenkins 71 | sudo ./build_configuration.sh $EXECUTOR_NUMBER kvm $ADAPTER "$RC" 72 | 73 | 74 | set -o errexit 75 | cd tools/jenkins 76 | ./run_test.sh $EXECUTOR_NUMBER $ADAPTER $RC "$RC" 77 | 78 | 79 | 80 | 81 | false 82 | 83 | -------------------------------------------------------------------------------- /tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/euca/config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | false 4 | 5 | 6 | false 7 | false 8 | false 9 | false 10 | 11 | false 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/floatingips/config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | false 4 | 5 | 6 | false 7 | false 8 | false 9 | false 10 | 11 | false 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | In order for this to work, you must create a /var/lib/jenkins/xenrc file as described in README.md 5 | false 6 | 7 | 8 | 9 | 10 | RC 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 2 19 | 20 | 21 | origin 22 | +refs/heads/*:refs/remotes/origin/* 23 | git://github.com/cloudbuilders/devstack.git 24 | 25 | 26 | 27 | 28 | master 29 | 30 | 31 | false 32 | false 33 | false 34 | false 35 | false 36 | false 37 | false 38 | 39 | Default 40 | 41 | 42 | 43 | 44 | 45 | 46 | false 47 | 48 | 49 | true 50 | false 51 | false 52 | false 53 | 54 | false 55 | 56 | 57 | ADAPTER 58 | 59 | euca 60 | floating_ips 61 | 62 | 63 | 64 | 65 | 66 | sed -i 's/) 2>&1 | tee "${LOGFILE}"/)/' stack.sh 67 | 68 | 69 | set -o errexit 70 | cd tools/jenkins 71 | sudo ./build_configuration.sh $EXECUTOR_NUMBER xs $ADAPTER "$RC" 72 | 73 | 74 | #!/bin/bash 75 | set -o errexit 76 | set -o xtrace 77 | 78 | . localrc 79 | 80 | # Unlike kvm, ssh to the xen host to run tests, in case the test instance is launch with a host only network 81 | ssh root@$XEN_IP "cd devstack && . localrc && cd tools/jenkins && ./run_test.sh $EXECUTOR_NUMBER $ADAPTER '$RC'" 82 | 83 | 84 | 85 | 86 | 87 | true 88 | 89 | -------------------------------------------------------------------------------- /tools/jenkins/jenkins_home/print_summary.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import urllib 3 | import json 4 | import sys 5 | 6 | 7 | def print_usage(): 8 | print ("Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]" 9 | % sys.argv[0]) 10 | sys.exit() 11 | 12 | 13 | def fetch_blob(url): 14 | return json.loads(urllib.urlopen(url + '/api/json').read()) 15 | 16 | 17 | if len(sys.argv) < 2: 18 | print_usage() 19 | 20 | BASE_URL = sys.argv[1] 21 | 22 | root = fetch_blob(BASE_URL) 23 | results = {} 24 | for job_url in root['jobs']: 25 | job = fetch_blob(job_url['url']) 26 | if job.get('activeConfigurations'): 27 | (tag, name) = job['name'].split('-') 28 | if not results.get(tag): 29 | results[tag] = {} 30 | if not results[tag].get(name): 31 | results[tag][name] = [] 32 | 33 | for config_url in job['activeConfigurations']: 34 | config = fetch_blob(config_url['url']) 35 | 36 | log_url = '' 37 | if config.get('lastBuild'): 38 | log_url = config['lastBuild']['url'] + 'console' 39 | 40 | results[tag][name].append({'test': config['displayName'], 41 | 'status': config['color'], 42 | 'logUrl': log_url, 43 | 'healthReport': config['healthReport']}) 44 | 45 | print json.dumps(results) 46 | -------------------------------------------------------------------------------- /tools/jenkins/run_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | EXECUTOR_NUMBER=$1 4 | ADAPTER=$2 5 | RC=$3 6 | 7 | function usage() { 8 | echo "Usage: $0 - Run a test" 9 | echo "" 10 | echo "$0 [EXECUTOR_NUMBER] [ADAPTER] [RC (optional)]" 11 | exit 1 12 | } 13 | 14 | # Validate inputs 15 | if [[ "$EXECUTOR_NUMBER" = "" || "$ADAPTER" = "" ]]; then 16 | usage 17 | fi 18 | 19 | # Execute configuration script 20 | cd adapters && ./$ADAPTER.sh $EXECUTOR_NUMBER $ADAPTER "$RC" 21 | -------------------------------------------------------------------------------- /tools/make_cert.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # **make_cert.sh** 4 | 5 | # Create a CA hierarchy (if necessary) and server certificate 6 | # 7 | # This mimics the CA structure that DevStack sets up when ``tls_proxy`` is enabled 8 | # but in the curent directory unless ``DATA_DIR`` is set 9 | 10 | ENABLE_TLS=True 11 | DATA_DIR=${DATA_DIR:-`pwd`/ca-data} 12 | 13 | ROOT_CA_DIR=$DATA_DIR/root 14 | INT_CA_DIR=$DATA_DIR/int 15 | 16 | # Import common functions 17 | source $TOP_DIR/functions 18 | 19 | # Import TLS functions 20 | source lib/tls 21 | 22 | function usage { 23 | echo "$0 - Create CA and/or certs" 24 | echo "" 25 | echo "Usage: $0 commonName [orgUnit]" 26 | exit 1 27 | } 28 | 29 | CN=$1 30 | if [ -z "$CN" ]]; then 31 | usage 32 | fi 33 | ORG_UNIT_NAME=${2:-$ORG_UNIT_NAME} 34 | 35 | # Useful on OS/X 36 | if [[ `uname -s` == 'Darwin' && -d /usr/local/Cellar/openssl ]]; then 37 | # set up for brew-installed modern OpenSSL 38 | OPENSSL_CONF=/usr/local/etc/openssl/openssl.cnf 39 | OPENSSL=/usr/local/Cellar/openssl/*/bin/openssl 40 | fi 41 | 42 | DEVSTACK_CERT_NAME=$CN 43 | DEVSTACK_HOSTNAME=$CN 44 | DEVSTACK_CERT=$DATA_DIR/$DEVSTACK_CERT_NAME.pem 45 | 46 | # Make sure the CA is set up 47 | configure_CA 48 | init_CA 49 | 50 | # Create the server cert 51 | make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME 52 | 53 | # Create a cert bundle 54 | cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT 55 | 56 | -------------------------------------------------------------------------------- /tools/uec/meta.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler 3 | from SimpleHTTPServer import SimpleHTTPRequestHandler 4 | 5 | def main(host, port, HandlerClass = SimpleHTTPRequestHandler, 6 | ServerClass = HTTPServer, protocol="HTTP/1.0"): 7 | """simple http server that listens on a give address:port""" 8 | 9 | server_address = (host, port) 10 | 11 | HandlerClass.protocol_version = protocol 12 | httpd = ServerClass(server_address, HandlerClass) 13 | 14 | sa = httpd.socket.getsockname() 15 | print "Serving HTTP on", sa[0], "port", sa[1], "..." 16 | httpd.serve_forever() 17 | 18 | if __name__ == '__main__': 19 | if sys.argv[1:]: 20 | address = sys.argv[1] 21 | else: 22 | address = '0.0.0.0' 23 | if ':' in address: 24 | host, port = address.split(':') 25 | else: 26 | host = address 27 | port = 8080 28 | 29 | main(host, int(port)) 30 | -------------------------------------------------------------------------------- /tools/upload_image.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # upload_image.sh - Retrieve and upload an image into Glance 3 | # 4 | # upload_image.sh 5 | # 6 | # Assumes credentials are set via OS_* environment variables 7 | 8 | function usage { 9 | echo "$0 - Retrieve and upload an image into Glance" 10 | echo "" 11 | echo "Usage: $0 [...]" 12 | echo "" 13 | echo "Assumes credentials are set via OS_* environment variables" 14 | exit 1 15 | } 16 | 17 | # Keep track of the current directory 18 | TOOLS_DIR=$(cd $(dirname "$0") && pwd) 19 | TOP_DIR=$(cd $TOOLS_DIR/..; pwd) 20 | 21 | # Import common functions 22 | source $TOP_DIR/functions 23 | 24 | # Import configuration 25 | source $TOP_DIR/openrc "" "" "" "" 26 | 27 | # Find the cache dir 28 | FILES=$TOP_DIR/files 29 | 30 | if [[ -z "$1" ]]; then 31 | usage 32 | fi 33 | 34 | # Get a token to authenticate to glance 35 | TOKEN=$(keystone token-get | grep ' id ' | get_field 2) 36 | 37 | # Glance connection info. Note the port must be specified. 38 | GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_HOST:9292} 39 | 40 | for IMAGE in "$*"; do 41 | upload_image $IMAGE $TOKEN 42 | done 43 | -------------------------------------------------------------------------------- /tools/warm_apts_for_uec.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # **warm_apts_for_uec.sh** 4 | 5 | # Echo commands 6 | set -o xtrace 7 | 8 | # Exit on error to stop unexpected errors 9 | set -o errexit 10 | 11 | # Keep track of the current directory 12 | TOOLS_DIR=$(cd $(dirname "$0") && pwd) 13 | TOP_DIR=`cd $TOOLS_DIR/..; pwd` 14 | 15 | # Change dir to top of devstack 16 | cd $TOP_DIR 17 | 18 | # Echo usage 19 | usage() { 20 | echo "Cache OpenStack dependencies on a uec image to speed up performance." 21 | echo "" 22 | echo "Usage: $0 [full path to raw uec base image]" 23 | } 24 | 25 | # Make sure this is a raw image 26 | if ! qemu-img info $1 | grep -q "file format: raw"; then 27 | usage 28 | exit 1 29 | fi 30 | 31 | # Make sure we are in the correct dir 32 | if [ ! -d files/apts ]; then 33 | echo "Please run this script from devstack/tools/" 34 | exit 1 35 | fi 36 | 37 | # Mount the image 38 | STAGING_DIR=/tmp/`echo $1 | sed "s/\//_/g"`.stage 39 | mkdir -p $STAGING_DIR 40 | umount $STAGING_DIR || true 41 | sleep 1 42 | mount -t ext4 -o loop $1 $STAGING_DIR 43 | 44 | # Make sure that base requirements are installed 45 | cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf 46 | 47 | # Perform caching on the base image to speed up subsequent runs 48 | chroot $STAGING_DIR apt-get update 49 | chroot $STAGING_DIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1` 50 | chroot $STAGING_DIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` || true 51 | 52 | # Unmount 53 | umount $STAGING_DIR 54 | -------------------------------------------------------------------------------- /tools/xen/README.md: -------------------------------------------------------------------------------- 1 | Getting Started With XenServer 5.6 and Devstack 2 | =============================================== 3 | The purpose of the code in this directory it to help developers bootstrap 4 | a XenServer 5.6 (or greater) + Openstack development environment. This file gives 5 | some pointers on how to get started. 6 | 7 | Xenserver is a Type 1 hypervisor, so it needs to be installed on bare metal. 8 | The Openstack services are configured to run within a "privileged" virtual 9 | machine on the Xenserver host (called OS domU). The VM uses the XAPI toolstack 10 | to communicate with the host. 11 | 12 | Step 1: Install Xenserver 13 | ------------------------ 14 | Install XenServer 5.6+ on a clean box. You can get XenServer by signing 15 | up for an account on citrix.com, and then visiting: 16 | https://www.citrix.com/English/ss/downloads/details.asp?downloadId=2311504&productId=683148 17 | 18 | For details on installation, see: http://wiki.openstack.org/XenServer/Install 19 | 20 | Here are some sample Xenserver network settings for when you are just 21 | getting started (Settings like this have been used with a laptop + cheap wifi router): 22 | 23 | * XenServer Host IP: 192.168.1.10 24 | * XenServer Netmask: 255.255.255.0 25 | * XenServer Gateway: 192.168.1.1 26 | * XenServer DNS: 192.168.1.1 27 | 28 | Step 2: Download devstack 29 | -------------------------- 30 | On your XenServer host, run the following commands as root: 31 | 32 | wget --no-check-certificate https://github.com/openstack-dev/devstack/zipball/master 33 | unzip -o master -d ./devstack 34 | cd devstack/*/ 35 | 36 | Step 3: Configure your localrc inside the devstack directory 37 | ------------------------------------------------------------ 38 | Devstack uses a localrc for user-specific configuration. Note that 39 | the XENAPI_PASSWORD must be your dom0 root password. 40 | Of course, use real passwords if this machine is exposed. 41 | 42 | cat > ./localrc <$STAGING_DIR/etc/rc.local 63 | # network restart required for getting the right gateway 64 | /etc/init.d/networking restart 65 | chown -R $STACK_USER /opt/stack 66 | su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" $STACK_USER 67 | exit 0 68 | EOF 69 | 70 | # Configure the hostname 71 | echo $GUEST_NAME > $STAGING_DIR/etc/hostname 72 | 73 | # Hostname must resolve for rabbit 74 | HOSTS_FILE_IP=$PUB_IP 75 | if [ $MGT_IP != "dhcp" ]; then 76 | HOSTS_FILE_IP=$MGT_IP 77 | fi 78 | cat <$STAGING_DIR/etc/hosts 79 | $HOSTS_FILE_IP $GUEST_NAME 80 | 127.0.0.1 localhost localhost.localdomain 81 | EOF 82 | 83 | # Configure the network 84 | INTERFACES=$STAGING_DIR/etc/network/interfaces 85 | TEMPLATES_DIR=$TOP_DIR/templates 86 | cp $TEMPLATES_DIR/interfaces.in $INTERFACES 87 | if [ $VM_IP == "dhcp" ]; then 88 | echo 'eth1 on dhcp' 89 | sed -e "s,iface eth1 inet static,iface eth1 inet dhcp,g" -i $INTERFACES 90 | sed -e '/@ETH1_/d' -i $INTERFACES 91 | else 92 | sed -e "s,@ETH1_IP@,$VM_IP,g" -i $INTERFACES 93 | sed -e "s,@ETH1_NETMASK@,$VM_NETMASK,g" -i $INTERFACES 94 | fi 95 | 96 | if [ $MGT_IP == "dhcp" ]; then 97 | echo 'eth2 on dhcp' 98 | sed -e "s,iface eth2 inet static,iface eth2 inet dhcp,g" -i $INTERFACES 99 | sed -e '/@ETH2_/d' -i $INTERFACES 100 | else 101 | sed -e "s,@ETH2_IP@,$MGT_IP,g" -i $INTERFACES 102 | sed -e "s,@ETH2_NETMASK@,$MGT_NETMASK,g" -i $INTERFACES 103 | fi 104 | 105 | if [ $PUB_IP == "dhcp" ]; then 106 | echo 'eth3 on dhcp' 107 | sed -e "s,iface eth3 inet static,iface eth3 inet dhcp,g" -i $INTERFACES 108 | sed -e '/@ETH3_/d' -i $INTERFACES 109 | else 110 | sed -e "s,@ETH3_IP@,$PUB_IP,g" -i $INTERFACES 111 | sed -e "s,@ETH3_NETMASK@,$PUB_NETMASK,g" -i $INTERFACES 112 | fi 113 | 114 | if [ "$ENABLE_GI" == "true" ]; then 115 | cat <>$INTERFACES 116 | auto eth0 117 | iface eth0 inet dhcp 118 | EOF 119 | fi 120 | 121 | # Gracefully cp only if source file/dir exists 122 | function cp_it { 123 | if [ -e $1 ] || [ -d $1 ]; then 124 | cp -pRL $1 $2 125 | fi 126 | } 127 | 128 | # Copy over your ssh keys and env if desired 129 | COPYENV=${COPYENV:-1} 130 | if [ "$COPYENV" = "1" ]; then 131 | cp_it ~/.ssh $STAGING_DIR/opt/stack/.ssh 132 | cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/opt/stack/.ssh/authorized_keys 133 | cp_it ~/.gitconfig $STAGING_DIR/opt/stack/.gitconfig 134 | cp_it ~/.vimrc $STAGING_DIR/opt/stack/.vimrc 135 | cp_it ~/.bashrc $STAGING_DIR/opt/stack/.bashrc 136 | fi 137 | 138 | # Configure run.sh 139 | cat <$STAGING_DIR/opt/stack/run.sh 140 | #!/bin/bash 141 | cd /opt/stack/devstack 142 | killall screen 143 | VIRT_DRIVER=xenserver FORCE=yes MULTI_HOST=$MULTI_HOST HOST_IP_IFACE=$HOST_IP_IFACE $STACKSH_PARAMS ./stack.sh 144 | EOF 145 | chmod 755 $STAGING_DIR/opt/stack/run.sh 146 | -------------------------------------------------------------------------------- /tools/xen/files/fstab: -------------------------------------------------------------------------------- 1 | LABEL=vpxroot / ext3 defaults 1 1 2 | tmpfs /dev/shm tmpfs defaults 0 0 3 | devpts /dev/pts devpts gid=5,mode=620 0 0 4 | sysfs /sys sysfs defaults 0 0 5 | proc /proc proc defaults 0 0 6 | -------------------------------------------------------------------------------- /tools/xen/files/hvc0.conf: -------------------------------------------------------------------------------- 1 | # hvc0 - getty 2 | # 3 | # This service maintains a getty on hvc0 from the point the system is 4 | # started until it is shut down again. 5 | 6 | start on stopped rc RUNLEVEL=[2345] 7 | stop on runlevel [!2345] 8 | 9 | respawn 10 | exec /sbin/getty -8 9600 hvc0 11 | -------------------------------------------------------------------------------- /tools/xen/prepare_guest.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is run on an Ubuntu VM. 4 | # This script is inserted into the VM by prepare_guest_template.sh 5 | # and is run when that VM boots. 6 | # It customizes a fresh Ubuntu install, so it is ready 7 | # to run stack.sh 8 | # 9 | # This includes installing the XenServer tools, 10 | # creating the user called "stack", 11 | # and shuts down the VM to signal the script has completed 12 | 13 | set -x 14 | # Echo commands 15 | set -o xtrace 16 | 17 | # Configurable nuggets 18 | GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} 19 | STAGING_DIR=${STAGING_DIR:-stage} 20 | DO_TGZ=${DO_TGZ:-1} 21 | XS_TOOLS_PATH=${XS_TOOLS_PATH:-"/root/xs-tools.deb"} 22 | STACK_USER=${STACK_USER:-stack} 23 | 24 | # Install basics 25 | chroot $STAGING_DIR apt-get update 26 | chroot $STAGING_DIR apt-get install -y cracklib-runtime curl wget ssh openssh-server tcpdump ethtool 27 | chroot $STAGING_DIR apt-get install -y curl wget ssh openssh-server python-pip git vim-nox sudo 28 | chroot $STAGING_DIR pip install xenapi 29 | 30 | # Install XenServer guest utilities 31 | cp $XS_TOOLS_PATH ${STAGING_DIR}${XS_TOOLS_PATH} 32 | chroot $STAGING_DIR dpkg -i $XS_TOOLS_PATH 33 | chroot $STAGING_DIR update-rc.d -f xe-linux-distribution remove 34 | chroot $STAGING_DIR update-rc.d xe-linux-distribution defaults 35 | 36 | # Make a small cracklib dictionary, so that passwd still works, but we don't 37 | # have the big dictionary. 38 | mkdir -p $STAGING_DIR/usr/share/cracklib 39 | echo a | chroot $STAGING_DIR cracklib-packer 40 | 41 | # Make /etc/shadow, and set the root password 42 | chroot $STAGING_DIR "pwconv" 43 | echo "root:$GUEST_PASSWORD" | chroot $STAGING_DIR chpasswd 44 | 45 | # Put the VPX into UTC. 46 | rm -f $STAGING_DIR/etc/localtime 47 | 48 | # Add stack user 49 | chroot $STAGING_DIR groupadd libvirtd 50 | chroot $STAGING_DIR useradd $STACK_USER -s /bin/bash -d /opt/stack -G libvirtd 51 | echo $STACK_USER:$GUEST_PASSWORD | chroot $STAGING_DIR chpasswd 52 | echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> $STAGING_DIR/etc/sudoers 53 | 54 | # Give ownership of /opt/stack to stack user 55 | chroot $STAGING_DIR chown -R $STACK_USER /opt/stack 56 | 57 | # Make our ip address hostnames look nice at the command prompt 58 | echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $STAGING_DIR/opt/stack/.bashrc 59 | echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $STAGING_DIR/root/.bashrc 60 | echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $STAGING_DIR/etc/profile 61 | 62 | function setup_vimrc { 63 | if [ ! -e $1 ]; then 64 | # Simple but usable vimrc 65 | cat > $1 <$STAGING_DIR/etc/rc.local 78 | GUEST_PASSWORD=$GUEST_PASSWORD STAGING_DIR=/ \ 79 | DO_TGZ=0 XS_TOOLS_PATH=$XS_TOOLS_PATH \ 80 | bash /opt/stack/prepare_guest.sh > /opt/stack/prepare_guest.log 2>&1 81 | EOF 82 | -------------------------------------------------------------------------------- /tools/xen/scripts/install_ubuntu_template.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This creates an Ubuntu Server 32bit or 64bit template 4 | # on Xenserver 5.6.x, 6.0.x and 6.1.x 5 | # The template does a net install only 6 | # 7 | # Based on a script by: David Markey 8 | # 9 | 10 | # Exit on errors 11 | set -o errexit 12 | # Echo commands 13 | set -o xtrace 14 | 15 | # This directory 16 | BASE_DIR=$(cd $(dirname "$0") && pwd) 17 | 18 | # For default setings see xenrc 19 | source $BASE_DIR/../xenrc 20 | 21 | # Get the params 22 | preseed_url=$1 23 | 24 | # Delete template or skip template creation as required 25 | previous_template=$(xe template-list name-label="$UBUNTU_INST_TEMPLATE_NAME" \ 26 | params=uuid --minimal) 27 | if [ -n "$previous_template" ]; then 28 | if $CLEAN_TEMPLATES; then 29 | xe template-param-clear param-name=other-config uuid=$previous_template 30 | xe template-uninstall template-uuid=$previous_template force=true 31 | else 32 | echo "Template $UBUNTU_INST_TEMPLATE_NAME already present" 33 | exit 0 34 | fi 35 | fi 36 | 37 | # Get built-in template 38 | builtin_name="Debian Squeeze 6.0 (32-bit)" 39 | builtin_uuid=$(xe template-list name-label="$builtin_name" --minimal) 40 | if [[ -z $builtin_uuid ]]; then 41 | echo "Cant find the Debian Squeeze 32bit template on your XenServer." 42 | exit 1 43 | fi 44 | 45 | # Clone built-in template to create new template 46 | new_uuid=$(xe vm-clone uuid=$builtin_uuid \ 47 | new-name-label="$UBUNTU_INST_TEMPLATE_NAME") 48 | disk_size=$(($OSDOMU_VDI_GB * 1024 * 1024 * 1024)) 49 | 50 | # Some of these settings can be found in example preseed files 51 | # however these need to be answered before the netinstall 52 | # is ready to fetch the preseed file, and as such must be here 53 | # to get a fully automated install 54 | pvargs="-- quiet console=hvc0 partman/default_filesystem=ext3 \ 55 | console-setup/ask_detect=false locale=${UBUNTU_INST_LOCALE} \ 56 | keyboard-configuration/layoutcode=${UBUNTU_INST_KEYBOARD} \ 57 | netcfg/choose_interface=${HOST_IP_IFACE} \ 58 | netcfg/get_hostname=os netcfg/get_domain=os auto \ 59 | url=${preseed_url}" 60 | 61 | if [ "$NETINSTALLIP" != "dhcp" ]; then 62 | netcfgargs="netcfg/disable_autoconfig=true \ 63 | netcfg/get_nameservers=${UBUNTU_INST_NAMESERVERS} \ 64 | netcfg/get_ipaddress=${UBUNTU_INST_IP} \ 65 | netcfg/get_netmask=${UBUNTU_INST_NETMASK} \ 66 | netcfg/get_gateway=${UBUNTU_INST_GATEWAY} \ 67 | netcfg/confirm_static=true" 68 | pvargs="${pvargs} ${netcfgargs}" 69 | fi 70 | 71 | xe template-param-set uuid=$new_uuid \ 72 | other-config:install-methods=http \ 73 | other-config:install-repository="$UBUNTU_INST_REPOSITORY" \ 74 | PV-args="$pvargs" \ 75 | other-config:debian-release="$UBUNTU_INST_RELEASE" \ 76 | other-config:default_template=true \ 77 | other-config:disks='' \ 78 | other-config:install-arch="$UBUNTU_INST_ARCH" 79 | 80 | echo "Ubuntu template installed uuid:$new_uuid" 81 | -------------------------------------------------------------------------------- /tools/xen/scripts/manage-vdi: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eux 4 | 5 | action="$1" 6 | vm="$2" 7 | device="${3-0}" 8 | part="${4-}" 9 | 10 | function xe_min() { 11 | local cmd="$1" 12 | shift 13 | xe "$cmd" --minimal "$@" 14 | } 15 | 16 | function run_udev_settle() { 17 | which_udev=$(which udevsettle) || true 18 | if [ -n "$which_udev" ]; then 19 | udevsettle 20 | else 21 | udevadm settle 22 | fi 23 | } 24 | 25 | vm_uuid=$(xe_min vm-list name-label="$vm") 26 | vdi_uuid=$(xe_min vbd-list params=vdi-uuid vm-uuid="$vm_uuid" \ 27 | userdevice="$device") 28 | 29 | dom0_uuid=$(xe_min vm-list is-control-domain=true) 30 | 31 | function get_mount_device() { 32 | vbd_uuid=$1 33 | 34 | dev=$(xe_min vbd-list params=device uuid="$vbd_uuid") 35 | if [[ "$dev" =~ "sm/" ]]; then 36 | DEBIAN_FRONTEND=noninteractive \ 37 | apt-get --option "Dpkg::Options::=--force-confold" --assume-yes \ 38 | install kpartx &> /dev/null || true 39 | mapping=$(kpartx -av "/dev/$dev" | sed -ne 's,^add map \([a-z0-9\-]*\).*$,\1,p' | sed -ne "s,^\(.*${part}\)\$,\1,p") 40 | if [ -z "$mapping" ]; then 41 | echo "Failed to find mapping" 42 | exit -1 43 | fi 44 | echo "/dev/mapper/${mapping}" 45 | else 46 | echo "/dev/$dev$part" 47 | fi 48 | } 49 | 50 | function clean_dev_mappings() { 51 | dev=$(xe_min vbd-list params=device uuid="$vbd_uuid") 52 | if [[ "$dev" =~ "sm/" ]]; then 53 | kpartx -dv "/dev/$dev" 54 | fi 55 | } 56 | 57 | function open_vdi() { 58 | vbd_uuid=$(xe vbd-create vm-uuid="$dom0_uuid" vdi-uuid="$vdi_uuid" \ 59 | device=autodetect) 60 | mp=$(mktemp -d) 61 | xe vbd-plug uuid="$vbd_uuid" 62 | 63 | run_udev_settle 64 | 65 | mount_device=$(get_mount_device "$vbd_uuid") 66 | mount "$mount_device" "$mp" 67 | echo "Your vdi is mounted at $mp" 68 | } 69 | 70 | function close_vdi() { 71 | vbd_uuid=$(xe_min vbd-list vm-uuid="$dom0_uuid" vdi-uuid="$vdi_uuid") 72 | mount_device=$(get_mount_device "$vbd_uuid") 73 | run_udev_settle 74 | umount "$mount_device" 75 | 76 | clean_dev_mappings 77 | 78 | xe vbd-unplug uuid=$vbd_uuid 79 | xe vbd-destroy uuid=$vbd_uuid 80 | } 81 | 82 | if [ "$action" == "open" ]; then 83 | open_vdi 84 | elif [ "$action" == "close" ]; then 85 | close_vdi 86 | fi 87 | -------------------------------------------------------------------------------- /tools/xen/scripts/on_exit.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -o xtrace 5 | 6 | declare -a on_exit_hooks 7 | 8 | on_exit() 9 | { 10 | for i in $(seq $((${#on_exit_hooks[*]} - 1)) -1 0) 11 | do 12 | eval "${on_exit_hooks[$i]}" 13 | done 14 | } 15 | 16 | add_on_exit() 17 | { 18 | local n=${#on_exit_hooks[*]} 19 | on_exit_hooks[$n]="$*" 20 | if [[ $n -eq 0 ]] 21 | then 22 | trap on_exit EXIT 23 | fi 24 | } 25 | -------------------------------------------------------------------------------- /tools/xen/scripts/templatedelete.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #Usage: ./templatedelete.sh 4 | 5 | templateuuid="$1" 6 | 7 | xe template-param-set other-config:default_template=false uuid="$templateuuid" 8 | xe template-param-set is-a-template=false uuid="$templateuuid" 9 | xe vm-destroy uuid="$templateuuid" 10 | -------------------------------------------------------------------------------- /tools/xen/scripts/uninstall-os-vpx.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Copyright (c) 2011 Citrix Systems, Inc. 4 | # Copyright 2011 OpenStack LLC. 5 | # All Rights Reserved. 6 | # 7 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 8 | # not use this file except in compliance with the License. You may obtain 9 | # a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 15 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 16 | # License for the specific language governing permissions and limitations 17 | # under the License. 18 | # 19 | 20 | set -ex 21 | 22 | # By default, don't remove the templates 23 | REMOVE_TEMPLATES=${REMOVE_TEMPLATES:-"false"} 24 | if [ "$1" = "--remove-templates" ]; then 25 | REMOVE_TEMPLATES=true 26 | fi 27 | 28 | xe_min() 29 | { 30 | local cmd="$1" 31 | shift 32 | xe "$cmd" --minimal "$@" 33 | } 34 | 35 | destroy_vdi() 36 | { 37 | local vbd_uuid="$1" 38 | local type=$(xe_min vbd-list uuid=$vbd_uuid params=type) 39 | local dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice) 40 | local vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid) 41 | 42 | if [ "$type" == 'Disk' ] && [ "$dev" != 'xvda' ] && [ "$dev" != '0' ]; then 43 | xe vdi-destroy uuid=$vdi_uuid 44 | fi 45 | } 46 | 47 | uninstall() 48 | { 49 | local vm_uuid="$1" 50 | local power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state) 51 | 52 | if [ "$power_state" != "halted" ]; then 53 | xe vm-shutdown vm=$vm_uuid force=true 54 | fi 55 | 56 | for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do 57 | destroy_vdi "$v" 58 | done 59 | 60 | xe vm-uninstall vm=$vm_uuid force=true >/dev/null 61 | } 62 | 63 | uninstall_template() 64 | { 65 | local vm_uuid="$1" 66 | 67 | for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do 68 | destroy_vdi "$v" 69 | done 70 | 71 | xe template-uninstall template-uuid=$vm_uuid force=true >/dev/null 72 | } 73 | 74 | # remove the VMs and their disks 75 | for u in $(xe_min vm-list other-config:os-vpx=true | sed -e 's/,/ /g'); do 76 | uninstall "$u" 77 | done 78 | 79 | # remove the templates 80 | if [ "$REMOVE_TEMPLATES" == "true" ]; then 81 | for u in $(xe_min template-list other-config:os-vpx=true | sed -e 's/,/ /g'); do 82 | uninstall_template "$u" 83 | done 84 | fi 85 | -------------------------------------------------------------------------------- /tools/xen/templates/hosts.in: -------------------------------------------------------------------------------- 1 | 127.0.0.1 localhost 2 | 127.0.0.1 %HOSTNAME% 3 | ::1 localhost ip6-localhost ip6-loopback 4 | fe00::0 ip6-localnet 5 | ff00::0 ip6-mcastprefix 6 | ff02::1 ip6-allnodes 7 | ff02::2 ip6-allrouters 8 | 9 | -------------------------------------------------------------------------------- /tools/xen/templates/interfaces.in: -------------------------------------------------------------------------------- 1 | auto lo 2 | iface lo inet loopback 3 | 4 | # If eth3 is static, the order should not matter 5 | # and eth0 will have the default gateway. If not, 6 | # we probably want the default gateway to be 7 | # what is on the public interface. Hence changed 8 | # the order here. 9 | auto eth3 10 | iface eth3 inet static 11 | address @ETH3_IP@ 12 | netmask @ETH3_NETMASK@ 13 | 14 | auto eth1 15 | iface eth1 inet static 16 | address @ETH1_IP@ 17 | netmask @ETH1_NETMASK@ 18 | post-up ethtool -K eth1 tx off 19 | 20 | auto eth2 21 | iface eth2 inet static 22 | address @ETH2_IP@ 23 | netmask @ETH2_NETMASK@ 24 | -------------------------------------------------------------------------------- /tools/xen/templates/menu.lst.in: -------------------------------------------------------------------------------- 1 | default 0 2 | 3 | title default 4 | root (hd0,0) 5 | kernel /boot/vmlinuz-@KERNEL_VERSION@ ro root=LABEL=vpxroot console=xvc0 6 | initrd /boot/initrd.img-@KERNEL_VERSION@ 7 | -------------------------------------------------------------------------------- /tools/xen/templates/ova.xml.in: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /tools/xen/xenrc: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 4 | # XenServer specific defaults for the /tools/xen/ scripts 5 | # Similar to stackrc, you can override these in your localrc 6 | # 7 | 8 | # Name of this guest 9 | GUEST_NAME=${GUEST_NAME:-DevStackOSDomU} 10 | 11 | # Size of image 12 | VDI_MB=${VDI_MB:-5000} 13 | OSDOMU_MEM_MB=1024 14 | OSDOMU_VDI_GB=8 15 | 16 | # VM Password 17 | GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} 18 | 19 | # Host Interface, i.e. the interface on the nova vm you want to expose the 20 | # services on. Usually eth2 (management network) or eth3 (public network) and 21 | # not eth0 (private network with XenServer host) or eth1 (VM traffic network) 22 | # This is also used as the interface for the Ubuntu install 23 | HOST_IP_IFACE=${HOST_IP_IFACE:-eth3} 24 | 25 | # 26 | # Our nova host's network info 27 | # 28 | 29 | # A host-only ip that let's the interface come up, otherwise unused 30 | VM_IP=${VM_IP:-10.255.255.255} 31 | MGT_IP=${MGT_IP:-172.16.100.55} 32 | PUB_IP=${PUB_IP:-192.168.1.55} 33 | 34 | # Public network 35 | PUB_BR=${PUB_BR:-"xenbr0"} 36 | PUB_DEV=${PUB_DEV:-eth0} 37 | PUB_VLAN=${PUB_VLAN:--1} 38 | PUB_NETMASK=${PUB_NETMASK:-255.255.255.0} 39 | 40 | # VM network params 41 | VM_NETMASK=${VM_NETMASK:-255.255.255.0} 42 | VM_BR=${VM_BR:-""} 43 | VM_VLAN=${VM_VLAN:-100} 44 | VM_DEV=${VM_DEV:-eth0} 45 | 46 | # MGMT network params 47 | MGT_NETMASK=${MGT_NETMASK:-255.255.255.0} 48 | MGT_BR=${MGT_BR:-""} 49 | MGT_VLAN=${MGT_VLAN:-101} 50 | MGT_DEV=${MGT_DEV:-eth0} 51 | 52 | # Decide if you should enable eth0, 53 | # the guest installer network 54 | # You need to disable this on xcp-xapi on Ubuntu 12.04 55 | ENABLE_GI=true 56 | 57 | # Ubuntu install settings 58 | UBUNTU_INST_RELEASE="oneiric" 59 | UBUNTU_INST_TEMPLATE_NAME="Ubuntu 11.10 (64-bit) for DevStack" 60 | # For 12.04 use "precise" and update template name 61 | # However, for 12.04, you should be using 62 | # XenServer 6.1 and later or XCP 1.6 or later 63 | # 11.10 is only really supported with XenServer 6.0.2 and later 64 | UBUNTU_INST_ARCH="amd64" 65 | UBUNTU_INST_REPOSITORY="http://archive.ubuntu.net/ubuntu" 66 | UBUNTU_INST_LOCALE="en_US" 67 | UBUNTU_INST_KEYBOARD="us" 68 | # network configuration for HOST_IP_IFACE during install 69 | UBUNTU_INST_IP="dhcp" 70 | UBUNTU_INST_NAMESERVERS="" 71 | UBUNTU_INST_NETMASK="" 72 | UBUNTU_INST_GATEWAY="" 73 | 74 | # Load stackrc defaults 75 | # then override with settings from localrc 76 | cd ../.. && source ./stackrc && cd $TOP_DIR 77 | -------------------------------------------------------------------------------- /unstack.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # **unstack.sh** 4 | 5 | # Stops that which is started by ``stack.sh`` (mostly) 6 | # mysql and rabbit are left running as OpenStack code refreshes 7 | # do not require them to be restarted. 8 | # 9 | # Stop all processes by setting ``UNSTACK_ALL`` or specifying ``--all`` 10 | # on the command line 11 | 12 | # Keep track of the current devstack directory. 13 | TOP_DIR=$(cd $(dirname "$0") && pwd) 14 | 15 | # Import common functions 16 | source $TOP_DIR/functions 17 | 18 | # Import database library 19 | source $TOP_DIR/lib/database 20 | 21 | # Load local configuration 22 | source $TOP_DIR/stackrc 23 | 24 | # Destination path for service data 25 | DATA_DIR=${DATA_DIR:-${DEST}/data} 26 | 27 | # Get project function libraries 28 | source $TOP_DIR/lib/baremetal 29 | source $TOP_DIR/lib/cinder 30 | source $TOP_DIR/lib/horizon 31 | source $TOP_DIR/lib/swift 32 | source $TOP_DIR/lib/quantum 33 | 34 | # Determine what system we are running on. This provides ``os_VENDOR``, 35 | # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` 36 | GetOSVersion 37 | 38 | if [[ "$1" == "--all" ]]; then 39 | UNSTACK_ALL=${UNSTACK_ALL:-1} 40 | fi 41 | 42 | # Run extras 43 | # ========== 44 | 45 | if [[ -d $TOP_DIR/extras.d ]]; then 46 | for i in $TOP_DIR/extras.d/*.sh; do 47 | [[ -r $i ]] && source $i unstack 48 | done 49 | fi 50 | 51 | if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then 52 | source $TOP_DIR/openrc 53 | teardown_quantum_debug 54 | fi 55 | 56 | # Shut down devstack's screen to get the bulk of OpenStack services in one shot 57 | SCREEN=$(which screen) 58 | if [[ -n "$SCREEN" ]]; then 59 | SESSION=$(screen -ls | awk '/[0-9].stack/ { print $1 }') 60 | if [[ -n "$SESSION" ]]; then 61 | screen -X -S $SESSION quit 62 | fi 63 | fi 64 | 65 | # Swift runs daemons 66 | if is_service_enabled swift; then 67 | stop_swift 68 | cleanup_swift 69 | fi 70 | 71 | # Apache has the WSGI processes 72 | if is_service_enabled horizon; then 73 | stop_horizon 74 | fi 75 | 76 | # Kill TLS proxies 77 | if is_service_enabled tls-proxy; then 78 | killall stud 79 | fi 80 | 81 | # baremetal might have created a fake environment 82 | if is_service_enabled baremetal && [[ "$BM_USE_FAKE_ENV" = "True" ]]; then 83 | cleanup_fake_baremetal_env 84 | fi 85 | 86 | SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* 87 | 88 | # Get the iSCSI volumes 89 | if is_service_enabled cinder; then 90 | cleanup_cinder 91 | fi 92 | 93 | if [[ -n "$UNSTACK_ALL" ]]; then 94 | # Stop MySQL server 95 | if is_service_enabled mysql; then 96 | stop_service mysql 97 | fi 98 | 99 | if is_service_enabled postgresql; then 100 | stop_service postgresql 101 | fi 102 | 103 | # Stop rabbitmq-server 104 | if is_service_enabled rabbit; then 105 | stop_service rabbitmq-server 106 | fi 107 | fi 108 | 109 | if is_service_enabled quantum; then 110 | stop_quantum 111 | stop_quantum_third_party 112 | fi 113 | --------------------------------------------------------------------------------