├── LICENSE ├── README.md ├── README.rst ├── bin └── fpga-cli.py ├── build_scripts ├── README.rst ├── boot_vm_and_install.sh ├── build_cloud.py ├── cloud.yaml ├── create_vm_clone.sh ├── redhat-72 │ └── newton │ │ ├── in_docker │ │ ├── in_docker_glance │ │ ├── in_flavor_and_image │ │ ├── in_fpga_db │ │ ├── in_fpga_exec │ │ ├── in_fpga_files │ │ ├── in_glance │ │ ├── in_horizon │ │ ├── in_keystone │ │ ├── in_nova │ │ ├── in_nova_compute │ │ ├── in_nova_docker_patches │ │ ├── in_nova_scheduler_filter │ │ ├── in_openstackclient_db_mq │ │ ├── in_provision_conf │ │ ├── out_docker │ │ ├── out_docker_glance │ │ ├── out_flavor_and_image │ │ ├── out_fpga_db │ │ ├── out_fpga_exec │ │ ├── out_fpga_files │ │ ├── out_glance │ │ ├── out_horizon │ │ ├── out_keystone │ │ ├── out_nova │ │ ├── out_nova_compute │ │ ├── out_nova_docker │ │ ├── out_nova_docker_patches │ │ ├── out_nova_scheduler_filter │ │ ├── out_openstackclient_db_mq │ │ └── out_provision_conf ├── ubuntu-1404 │ ├── liberty │ │ ├── in_docker │ │ ├── in_docker_glance │ │ ├── in_flavor_and_image │ │ ├── in_fpga_db │ │ ├── in_fpga_exec │ │ ├── in_fpga_files │ │ ├── in_glance │ │ ├── in_horizon │ │ ├── in_keystone │ │ ├── in_nova │ │ ├── in_nova_compute │ │ ├── in_nova_docker_patches │ │ ├── in_nova_scheduler_filter │ │ ├── in_openstackclient_db_mq │ │ ├── in_provision_conf │ │ ├── out_docker │ │ ├── out_docker_glance │ │ ├── out_flavor_and_image │ │ ├── out_fpga_db │ │ ├── out_fpga_exec │ │ ├── out_fpga_files │ │ ├── out_glance │ │ ├── out_horizon │ │ ├── out_keystone │ │ ├── out_nova │ │ ├── out_nova_compute │ │ ├── out_nova_docker │ │ ├── out_nova_docker_patches │ │ ├── out_nova_scheduler_filter │ │ ├── out_openstackclient_db_mq │ │ └── out_provision_conf │ └── mitaka │ │ ├── in_docker │ │ ├── in_docker_glance │ │ ├── in_flavor_and_image │ │ ├── in_fpga_db │ │ ├── in_fpga_exec │ │ ├── in_fpga_files │ │ ├── in_glance │ │ ├── in_horizon │ │ ├── in_keystone │ │ ├── in_nova │ │ ├── in_nova_compute │ │ ├── in_nova_docker_patches │ │ ├── in_nova_scheduler_filter │ │ ├── in_openstackclient_db_mq │ │ ├── in_provision_conf │ │ ├── out_docker │ │ ├── out_docker_glance │ │ ├── out_flavor_and_image │ │ ├── out_fpga_db │ │ ├── out_fpga_exec │ │ ├── out_fpga_files │ │ ├── out_glance │ │ ├── out_horizon │ │ ├── out_keystone │ │ ├── out_nova │ │ ├── out_nova_compute │ │ ├── out_nova_docker │ │ ├── out_nova_docker_patches │ │ ├── out_nova_scheduler_filter │ │ ├── out_openstackclient_db_mq │ │ └── out_provision_conf └── ubuntu-1604 │ └── newton │ ├── in_docker │ ├── in_docker_glance │ ├── in_flavor_and_image │ ├── in_fpga_db │ ├── in_fpga_exec │ ├── in_fpga_files │ ├── in_glance │ ├── in_horizon │ ├── in_keystone │ ├── in_nova │ ├── in_nova_compute │ ├── in_nova_docker_patches │ ├── in_nova_scheduler_filter │ ├── in_openstackclient_db_mq │ ├── in_provision_conf │ ├── out_docker │ ├── out_docker_glance │ ├── out_flavor_and_image │ ├── out_fpga_db │ ├── out_fpga_exec │ ├── out_fpga_files │ ├── out_glance │ ├── out_horizon │ ├── out_keystone │ ├── out_nova │ ├── out_nova_compute │ ├── out_nova_docker │ ├── out_nova_docker_patches │ ├── out_nova_scheduler_filter │ ├── out_openstackclient_db_mq │ └── out_provision_conf └── patches ├── nova_docker_liberty.patch ├── nova_docker_mitaka.patch ├── nova_docker_newton.patch ├── rhel_72-nova-14.0.2-tests.patch ├── rhel_72-nova-14.0.2.patch ├── ubuntu_14.04-nova-12.0.6.patch └── ubuntu_14.04-nova-13.1.3.patch /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | DISCONTINUATION OF PROJECT. 2 | 3 | This project will no longer be maintained by Intel. 4 | 5 | This project has been identified as having known security escapes. 6 | 7 | Intel has ceased development and contributions including, but not limited to, maintenance, bug fixes, new releases, or updates, to this project. 8 | 9 | Intel no longer accepts patches to this project. 10 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | FPGA Integration for OpenStack* Cloud 2 | ===================================== 3 | 4 | Aim of the FPGA Integration for OpenStack [*]_ Cloud project is to bring FPGA 5 | accelerators as a resources available through Docker containers for the 6 | OpenStack users. 7 | 8 | Conventions 9 | ----------- 10 | 11 | There are lots of shell session snippets, which have follows certain 12 | convention. Typical Unix/Linux environment is multiuser. To distinguish 13 | between ordinary user and privileged ones, shell indicates that by using 14 | different `shell prompt`_. That's why in shell snippets, the **$** in the 15 | beginning of command line indicates non-privileged user, for example: 16 | 17 | 18 | .. code:: shell-session 19 | :number-lines: 20 | 21 | $ whoami # this is a comment! 22 | ubuntu 23 | 24 | So the prompt in this case is ``$`` and the command was ``whoami``. Line 2 have 25 | no sign at the beginning means output of the command - user name. Note, that 26 | **#** sign is used for comments, which means that such thing will be ignored by 27 | shell. 28 | 29 | Besides **$** prompt can be also **#**, which means, that commands following it 30 | have to be executed as ``root`` (privileged) user: 31 | 32 | .. code:: shell-session 33 | :number-lines: 34 | 35 | $ sudo su - 36 | # date # this will print current date 37 | Wed Aug 31 13:23:12 CEST 2016 38 | # whoami 39 | root 40 | 41 | We have changed user to privileged one using ``su`` command (hence the **#** 42 | prompt sign), and executed ``date`` and ``whoami`` commands as a **root** user. 43 | Lines 3 and 4 represents output of the commands. Note, that shell sessions 44 | in this document have no numbered lines, so the above example would look like 45 | this: 46 | 47 | .. code:: shell-session 48 | 49 | $ sudo su - 50 | # date # this will print current date 51 | Wed Aug 31 13:23:12 CEST 2016 52 | # whoami 53 | root 54 | 55 | Openstack installation 56 | ------------------------------------------------- 57 | 58 | Hardware requirements 59 | +++++++++++++++++++++ 60 | 61 | For this very demo of the FPGA integration in OpenStack, there would be 3 62 | machines needed. The minimum requirements for each of them are: 63 | 64 | * Processor with 2+ cores 65 | * 4GB Ram 66 | * 10GB drive 67 | * 2 NICs (optionally - management is all we need for the demo purposes) 68 | 69 | One of the node should have FPGA installed, however it is not necessary for 70 | demonstrating OpenStack part, there could be mock command used. 71 | 72 | If needed, virtual machines might be used as well. 73 | 74 | Software assumption: 75 | 76 | * Command line application for burning/erasing/getting status is required 77 | (`fpga-cli.py` is provided for getting the idea about expected 78 | interface) 79 | 80 | Supported OpenStack versions: 81 | +++++++++++++++++++++++++++++ 82 | * Liberty: Ubuntu 14.04 83 | * Mitaka: Ubuntu 14.04 84 | * Newton: Ubuntu 16.04, RedHat 7.2 85 | 86 | 87 | Ubuntu Installation and configuration 88 | +++++++++++++++++++++++++++++++++++++ 89 | 90 | Note that the following guide is an example based on Ubuntu operating system. 91 | 92 | The recommended installation source is the `server version of Ubuntu`_. This 93 | demo was prepared using either 14.04 LTS or 16.04 LTS version. The 94 | installation is straightforward, although it might require providing some 95 | information (like proxy servers) depending on environment. The best way of 96 | installing the system is to keep it minimal. For what it's worth, it might be 97 | useful to install OpenSSH server on each node. 98 | 99 | Several configuration options should be verified: 100 | 101 | #. ``/etc/hostname`` - for each node provide unique host name (for example 102 | "controller", "compute1", "compute2") 103 | #. ``/etc/network/interfaces`` - provide the management and public network 104 | configuration. 105 | #. ``/etc/hosts`` - Idea is, that nodes should be pingable on the management 106 | network using their hostnames - for example: 107 | 108 | .. code:: 109 | 110 | ··· 111 | 192.168.0.10 controller 112 | 192.168.0.11 compute1 113 | 192.168.0.12 compute2 114 | ··· 115 | 116 | Following command executed on ``compute1``: 117 | 118 | .. code:: shell-session 119 | 120 | # ping controller 121 | PING controller (192.168.192.10) 56(84) bytes of data. 122 | 64 bytes from controller (192.168.192.10): icmp_seq=1 ttl=64 time=0.160 ms 123 | 64 bytes from controller (192.168.192.10): icmp_seq=2 ttl=64 time=0.221 ms 124 | 64 bytes from controller (192.168.192.10): icmp_seq=3 ttl=64 time=0.157 ms 125 | 126 | OpenStack installation 127 | ++++++++++++++++++++++ 128 | 129 | The main installation process is described in the `OpenStack documentation`_, 130 | with the following assumptions: 131 | 132 | * Services which are installed are narrowed down to: 133 | 134 | * Keystone 135 | * Nova (on controller and on computes) 136 | * Glance 137 | 138 | * `nova-docker`_ [1]_ should be installed 139 | 140 | Docekr and nova-docker installation and configuration 141 | +++++++++++++++++++++++++++++++++++++++++++++++++++++ 142 | 143 | Additional package `nova-docker`_ and docker itself is required on compute 144 | nodes, along with the and following changes: 145 | 146 | .. code:: shell-session 147 | 148 | # # Depending on OpenStack version, use appropriate branch: 149 | # # stable/liberty - for Liberty release 150 | # # stable/mitaka - for Mitaka release 151 | # # master - for Newton release 152 | # git clone https://github.com/openstack/nova-docker -b stable/liberty 153 | # cd nova-docker 154 | # patch -Np1 -i "[/path/to/this/repository]/patches/nova_docker_[OpenStack version].patch" 155 | # pip install . 156 | # # this one is optional; useful if you want to perform simple test 157 | # docker pull busybox 158 | # docker save -o busyimg busybox 159 | 160 | Alter the ``/etc/nova/nova.conf`` on compute nodes: 161 | 162 | .. code:: ini 163 | 164 | [DEFAULT] 165 | ... 166 | compute_driver=novadocker.virt.docker.DockerDriver 167 | 168 | And the ``/etc/glance/glance-api.conf`` on controller node: 169 | 170 | .. code:: ini 171 | 172 | [DEFAULT] 173 | ... 174 | container_formats=ami,ari,aki,bare,ovf,ova,docker 175 | 176 | Follow `docker installation guide`_, which basically are the following steps: 177 | 178 | .. code:: shell-session 179 | 180 | # apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 \ 181 | --recv-keys 58118E89F3A912897C070ADBF76221572C52609D 182 | # # Use either trusty or xenial repository 183 | # echo 'deb https://apt.dockerproject.org/repo ubuntu-trusty main' >> \ 184 | /etc/apt/sources.list 185 | # apt-get update 186 | # apt-get purge lxc-docker 187 | # apt-get install docker-engine 188 | 189 | Change the ``/etc/nova/nova-compute.conf`` to look like following on the compute 190 | nodes: 191 | 192 | .. code:: ini 193 | 194 | [DEFAULT] 195 | compute_driver=novadocker.virt.docker.DockerDriver 196 | 197 | Add users ``nova`` and ``ubuntu`` to group docker: 198 | 199 | .. code:: shell-session 200 | 201 | # usermod -a -G docker nova 202 | 203 | Since networking is not relevant at the moment for this moment (but that's the 204 | subject to change, obviously), installing nova-network is enough (on compute 205 | nodes): 206 | 207 | .. code:: shell-session 208 | 209 | # apt-get install nova-network 210 | 211 | For confidence, there are scripts for automate the process of installation under 212 | ``build_scripts`` directory. 213 | 214 | Installation process of modifications 215 | -------------------------------------- 216 | 217 | After having up and running OpenStack, it is time to install the modifications 218 | and configure stack to be FPGA aware. 219 | 220 | #. On controller alter line containing ``container_formats`` in file 221 | ``/etc/glance/glance-api.conf`` to looks like that: 222 | 223 | .. code:: ini 224 | 225 | container_formats = ami,ari,aki,bare,ovf,ova,docker,fpga 226 | 227 | #. Clone `this repository`_ 228 | 229 | #. Now, patch installed nova files with provided patches: 230 | 231 | .. code:: shell-session 232 | 233 | # cd /usr/lib/python2.7/dist-packages/nova 234 | # nova_ver=$(dpkg -l |grep -w python-nova | sed -e "s/ii\s\+python-nova\s\+2:\([0-9.]\+\).*/\1/g") 235 | # echo $nova_ver 236 | 12.0.5 237 | # patch -Np1 -i "[/path/to/this/repository]/patches/ubuntu_[14.04 or 16.04]-nova-${nova_ver}.patch" 238 | patching file compute/resource_tracker.py 239 | patching file db/sqlalchemy/migrate_repo/versions/303_add_fpga_field.py 240 | patching file db/sqlalchemy/migrate_repo/versions/304_add_fpga_instance_field.py 241 | patching file db/sqlalchemy/models.py 242 | patching file objects/block_device.py 243 | patching file objects/compute_node.py 244 | ... 245 | 246 | #. Append following lines on ``/etc/nova.conf`` on ``[DEFAULT]`` section on 247 | **FPGA node**: 248 | 249 | .. code:: ini 250 | 251 | [DEFAULT] 252 | ... 253 | fpga_access = True 254 | fpga_simulation_mode = False 255 | 256 | #. Alter ``/etc/nova/nova.conf`` to have the options changed or included in 257 | section ``[DEFAULT]`` on **controller node**: 258 | 259 | .. code:: ini 260 | 261 | [DEFAULT] 262 | ... 263 | scheduler_available_filters = nova.scheduler.filters.all_filters 264 | scheduler_available_filters = nova.scheduler.filters.fpga_filter.FpgaFilter 265 | scheduler_default_filters = RamFilter,ComputeFilter,AvailabilityZoneFilter,ImagePropertiesFilter,FpgaFilter 266 | 267 | #. Issue necessary migration (only on controller): 268 | 269 | .. code:: shell-session 270 | 271 | # nova-manage db sync 272 | 273 | #. *Optional*. You can install ``fpga-cli.py`` command from ``bin`` directory 274 | to ``/usr/bin``, if you are installing without real FPGA hardware, or for 275 | some reason you don't have the real command available or you just want to 276 | wrap the real commands into script or executable with compatible interface. 277 | For the last case it will need modification of the code. 278 | 279 | There is a need for modifying rootwrap configuration, for enabling stub 280 | command to be used by compute node. Append following line for 281 | ``/etc/nova/rootwrap.d/compute.filters`` and 282 | ``/etc/nova/rootwrap.d/network.filters``: 283 | 284 | .. code:: 285 | 286 | fpga-cli.py: CommandFilter, fpga-cli.py, root 287 | 288 | and provide configuration for it in ``/etc/nova/nova-compute.conf`` in 289 | section ``[DEFAULT]``: 290 | 291 | .. code:: ini 292 | 293 | [DEFAULT] 294 | ... 295 | fpga_exec = fpga-cli.py 296 | 297 | If you are using program which implements same interface as ``fpga-cli.py`` 298 | defines (see below for description), replace ``fpga-cli.py`` with proper 299 | executable file name. It is expected, that such executable will be placed 300 | somewhere in ``$PATH`` variable, so it will be easily reachable. 301 | 302 | #. Create images and new flavor. First image have artificial format of *fpga*, 303 | and should contain zip archive (bitfiles with certain accelerator, additional 304 | files, and manifest file), which should be propagated to image metadata (this 305 | process is not done here). Second image is the system image (here: simple 306 | busybox image, we created earlier), which should contain all the tools 307 | required for accelerator use, and, what is important, it should have 308 | ``docker_devices`` key, which contain list of devices from ``/dev`` 309 | filesystem, which should be passed to the container. Flavour metadata should 310 | point to right accelerator binaries. Below are example how match those three 311 | entities together for **LZO compression** accelerator. 312 | 313 | * FPGA `IP-Core`_ files as zip archive: 314 | 315 | .. code:: shell-session 316 | 317 | # glance image-create --id dd834aa4-f950-40e6-8c23-9dab7f3f0138 \ 318 | --name lzo_compression --disk-format raw --container-format fpga \ 319 | --file lzo_compression.zip 320 | # glance image-update \ 321 | --property manifest='$(cat manifest.json)' \ 322 | dd834aa4-f950-40e6-8c23-9dab7f3f0138 323 | 324 | where ``manifest.json`` file is the manifest file, which package 325 | ``lzo_compression.zip`` contains. 326 | 327 | Provided ``id`` is not necessary, but must be identical to the one with the 328 | one on flavor metadata. 329 | 330 | * Docker image with system and appropriate software to use accelerator: 331 | 332 | .. code:: shell-session 333 | 334 | # docker save ubuntu_lzo | glance image-create \ 335 | --id 064704cb-b416-4acf-b149-b7272e1a9a20 --name ubuntu_lzo \ 336 | --disk-format raw --container-format docker 337 | # glance image-update \ 338 | --property docker_devices='/dev/fpga1,/dev/fpga0,/dev/fpga2' \ 339 | 064704cb-b416-4acf-b149-b7272e1a9a20 340 | 341 | * New flavor. Note, that passed to ``hw:fpga_ip_id`` key value is the same as 342 | FPGA package image created above: 343 | 344 | .. code:: shell-session 345 | 346 | # nova flavor-create fpga-lzo 6 512 1 1 347 | # nova flavor-key fpga-lzo set \ 348 | "hw:fpga_ip_id"= "dd834aa4-f950-40e6-8c23-9dab7f3f0138" 349 | 350 | .. important:: 351 | 352 | Instead of flavor, information passed with the ``hw:fpga_ip_id`` key 353 | might be passed to the proper docker image in Glance, so that it can be 354 | only two entities, not the three. This however might be dangerous, 355 | because images can be created by users, while flavors not. Such user 356 | created image might contain malicious IP, wich may even phisically 357 | destroy FPGA device. 358 | 359 | Now restart all nova services on all nodes and you are done. 360 | To boot newly created flavor with "fpga" image, just issue the commands: 361 | 362 | .. code:: shell-session 363 | 364 | # nova boot --flavor 6 --image ubuntu_lzo dcr1 365 | 366 | Technical details 367 | ----------------- 368 | 369 | Integration with the OpenStack code base have, as described in the above 370 | instructions, assumptions: 371 | 372 | * Support for Docker containers only, thus nova-docker has to be used 373 | * Some kind of command line tool for programming, erasing and getting the 374 | status of FPGA with interface described below 375 | * On system level accelerator programmed on FPGA should expose any kind of 376 | interface which might be passed to container (i.e. device from /dev 377 | filesystem, socket, pipe etc) 378 | 379 | As for the OpenStack code base, nova components was changed as follows: 380 | 381 | * ``compute/resource_tracker.py`` 382 | 383 | - added new config option for accessing FPGA by compute host 384 | - added new method for updating fpga resources 385 | - added new ``scheduler/filters/fpga_filter.py`` 386 | - call for ``_update_fpga_resource`` from ``_update_available_resource``` 387 | 388 | * ``db/sqlalchemy/migrate_repo`` - added two migrations for new fields in 389 | tables ``compute_nodes`` and ``instances`` 390 | * ``db/sqlalchemy/models.py`` 391 | 392 | - added ``fpga_regions`` and ``fpga_regions_used`` fields for ``ComputeNode`` 393 | model 394 | - added ``fpga_device`` field for ``Instance`` model 395 | 396 | * several objects have bumped their versions due to change of ``Instance`` and 397 | ``ComputeNode`` classes: 398 | 399 | - ``BlockDeviceMapping`` 400 | - ``BlockDeviceMappingList`` 401 | - ``ComputeNodeList`` 402 | - ``FixedIP`` 403 | - ``FixedIPList`` 404 | - ``FloatingIP`` 405 | - ``FloatingIPList`` 406 | - ``InstanceListv1`` 407 | - ``SchedulerRetries`` 408 | - ``Service`` 409 | - ``ServiceList`` 410 | 411 | * ``scheduler/host_manager`` - ``HostState`` class was updated to make use of 412 | ``fpga_regions`` and ``fpga_regions_used`` fields 413 | 414 | * unit tests where adapted to above changes 415 | 416 | * ``fpga`` - new module that contains FPGA programming/erasing logic 417 | 418 | * ``compute/manager`` - triggers methods from ``fpga`` module to program/erase 419 | FPGA 420 | 421 | Nova-docker driver, was adapted to accept list of devices, file: ``novadocker/virt/docker/driver.py``. 422 | 423 | All actions regarding interaction with the FPGA going through the command line 424 | utility, which interface is described in next section. 425 | 426 | Cli for FPGA interaction 427 | ------------------------ 428 | 429 | There should be command line utility available, let's call it ``fpga-cli.py``, 430 | which will be used for programming, erasing and getting status of the FPGA. 431 | 432 | Such utility should provide following interface: 433 | 434 | #. ``burn``. This argument require another one which is identifier of an 435 | `IP-Core`_ image stored in glance service. Underneath logic should be able 436 | to fetch such image and as a result of programming there should be returned 437 | an unique identifier, which will help to find and identify the right region 438 | for erase procedure. This could be an *uuid* or any other string, which will 439 | not exceed 256 characters. For example: 440 | 441 | .. code:: shell-session 442 | 443 | # fpga-cli.py burn image-id 444 | a0399bc1-cb67-4548-b0b8-aa95a91402d3 445 | 446 | In case of error, it will return non-zero value, for example: 447 | 448 | .. code:: shell-session 449 | 450 | # fpga-cli.py burn bad-image-id; echo $? 451 | Error: cannot program `bad-image-id' - no matching hardware found 452 | 64 453 | 454 | #. ``erase``. Another argument is required, and it should be identifier 455 | returned by successful ``burn`` command. No output is returned, besides exit 456 | code, which in case of success is 0. For example: 457 | 458 | .. code:: shell-session 459 | 460 | # fpga-cli.py erase a0399bc1-cb67-4548-b0b8-aa95a91402d3; echo $? 461 | 462 | In case of error, it will return non-zero value, for example: 463 | 464 | .. code:: shell-session 465 | 466 | # fpga-cli.py erase bad_id; echo $? 467 | Error: cannot erase FPGA device with id `bad_id' - unknown region 468 | 33 469 | 470 | #. ``status``. Command for providing information about FPGA: 471 | 472 | .. code:: shell-session 473 | 474 | # fpga-cli.py status 475 | Used regions: 1/2 476 | 477 | Which means, that we have an FPGA have two regions, while one of it is 478 | occupied. Error situation will return non-zero exit code: 479 | 480 | .. code:: shell-session 481 | 482 | # fpga-cli.py status; echo $? 483 | Error: FPGA device is not accesible 484 | 127 485 | 486 | License 487 | ------- 488 | 489 | This work is on Apache 2.0 license. See LICENSE for details. 490 | 491 | Version 492 | ------- 493 | 494 | Current version of this work is 0.1, and is treated as alpha/PoC stage. 495 | 496 | .. [*] Other names and brands may be claimed as the property of others 497 | .. [1] Until virtualization is not completed, there will be docker driver used 498 | as a workaround for utilizing acceleration provided by FPGA under guest. 499 | 500 | .. _this repository: https://github.com/intelsdi-x/fpga-nova 501 | .. _server version of Ubuntu: http://www.ubuntu.com/download/server 502 | .. _OpenStack documentation: http://docs.openstack.org/ 503 | .. _nova-docker: https://github.com/openstack/nova-docker 504 | .. _docker installation guide: https://docs.docker.com/engine/installation/linux/ubuntulinux/ 505 | .. _IP-Core: https://en.wikipedia.org/wiki/Semiconductor_intellectual_property_core 506 | .. _shell prompt: https://en.wikipedia.org/wiki/Command-line_interface#Command_prompt 507 | -------------------------------------------------------------------------------- /bin/fpga-cli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | """ 4 | A stub with semi-functional interface suitable for test FPGA with FPGA enabled 5 | OpenStack. 6 | See --help for this command and README.rst for more details. 7 | """ 8 | 9 | import argparse 10 | import json 11 | import os 12 | import sys 13 | 14 | 15 | # change this to adjust the amount of regions 16 | FPGA_REGIONS = 2 17 | JSON = "/tmp/_fpga_data_stub.json" 18 | 19 | 20 | def write_data(data=None): 21 | """create and wrtie initial data to the json file""" 22 | 23 | if not data: 24 | data = {"total": FPGA_REGIONS, 25 | "used": 0, 26 | 'regions': [0 for _ in range(FPGA_REGIONS)]} 27 | 28 | with open(JSON, "w") as fobj: 29 | json.dump(data, fobj) 30 | 31 | 32 | def read_data(): 33 | """read json file and return deserialized dict""" 34 | with open(JSON) as fobj: 35 | return json.load(fobj) 36 | 37 | 38 | class RetVal(object): 39 | """Simple class for result storage""" 40 | def __init__(self, message="", exit_code=0): 41 | self.message = message 42 | self.exit_code = exit_code 43 | 44 | 45 | def status(args): 46 | """Get status""" 47 | if os.getenv("FPGA_ERROR"): 48 | return RetVal("Error!", 127) 49 | 50 | try: 51 | data = read_data() 52 | except (IOError, ValueError): 53 | return RetVal("Error!", 128) 54 | 55 | return RetVal("Used regions: %(used)s/%(total)s" % data) 56 | 57 | 58 | def erase(args): 59 | """erase fpga""" 60 | if os.getenv("FPGA_ERROR"): 61 | return RetVal("Unable to erase FPGA", 31) 62 | 63 | try: 64 | data = read_data() 65 | except (IOError, ValueError): 66 | return RetVal("Unable to erase FPGA", 32) 67 | 68 | index = data['total'] - (data['total'] - data['used']) - 1 69 | if index < 0: 70 | return RetVal("Unable to erase FPGA device %s no such device" % 71 | args.device, 33) 72 | 73 | data['regions'][index] = 0 74 | data['used'] = data['used'] - 1 75 | write_data(data) 76 | 77 | return RetVal() 78 | 79 | 80 | def burn(args): 81 | """Burn the fpga stub! :)""" 82 | assert args.image_id 83 | if os.getenv("FPGA_ERROR"): 84 | return RetVal("Unable to burn FPGA", 64) 85 | 86 | try: 87 | data = read_data() 88 | except (IOError, ValueError): 89 | return RetVal("Unable to burn FPGA", 64) 90 | 91 | if all(data['regions']): 92 | return RetVal("Unable to burn FPGA - no available regions", 65) 93 | 94 | index = data['regions'].index(0) 95 | data['regions'][index] = 1 96 | data['used'] += 1 97 | write_data(data) 98 | 99 | return RetVal("/dev/fpga%s" % index) 100 | 101 | 102 | def main(): 103 | """main""" 104 | 105 | usage = ("Change FPGA_REGIONS in this file, to set desired amount of " 106 | "regions to mock.\n" 107 | "After that, remove `" + JSON + "' file for refreshing the\n" 108 | "state and amount of regions.") 109 | parser = argparse.ArgumentParser(description=usage) 110 | subparser = parser.add_subparsers() 111 | 112 | _status = subparser.add_parser("status", help="Mimic FPGA status. Set env" 113 | " variable FPGA_ERROR to force the error" 114 | " situation.") 115 | _status.set_defaults(func=status) 116 | 117 | _erase = subparser.add_parser("erase", help="Mimic erase of the FPGA " 118 | "device. Set env variable FPGA_ERROR to " 119 | "make it fail.") 120 | _erase.add_argument("device") 121 | _erase.set_defaults(func=erase) 122 | 123 | _burn = subparser.add_parser("burn", help="Mimic flashing provided image " 124 | "to the FPGA. Set env variable FPGA_ERROR to" 125 | " make it fail.") 126 | _burn.add_argument("image_id") 127 | _burn.set_defaults(func=burn) 128 | 129 | args = parser.parse_args() 130 | 131 | if not os.path.exists(JSON): 132 | write_data() 133 | 134 | retval = args.func(args) 135 | 136 | if retval.exit_code: 137 | sys.stderr.write(retval.message + "\n") 138 | else: 139 | sys.stdout.write(retval.message + "\n") 140 | 141 | sys.exit(retval.exit_code) 142 | 143 | 144 | if __name__ == '__main__': 145 | main() 146 | -------------------------------------------------------------------------------- /build_scripts/README.rst: -------------------------------------------------------------------------------- 1 | Build scripts for deploying the demo on VMs 2 | =========================================== 3 | 4 | This directory contains helper scripts for deploying OpenStack with 5 | FPGA integration with ease on `VirtualBox`_ or real hardware, just for 6 | development and/or demo purposes. 7 | 8 | Supported OpenStack versions and distros 9 | ++++++++++++++++++++++++++++++++++++++++ 10 | - Liberty: `Ubuntu 14.04 server`_ 11 | - Mitaka: `Ubuntu 14.04 server`_ 12 | - Newton: `Ubuntu 16.04 server`_, `RHEL 7.2`_ 13 | 14 | Requirements 15 | ++++++++++++ 16 | 17 | * VirtualBox is used to set up the cloud. ``VBoxManage`` command line tool 18 | should be installed. 19 | * Virtualized hardware: 20 | 21 | * 2 CPU cores 22 | * 4096MB RAM 23 | * 3 network interfaces configured as follows: 24 | 25 | * 1st is NAT (with DHCP) for internet access 26 | * 2nd is static internal network set to ``192.168.1.2`` 27 | * 3rd is static host only adapter set to ``192.168.56.2`` for accessing 28 | from host, if using VirtualBox VMs. 29 | * 15GB storage 30 | 31 | * Preconfigured operating system: 32 | 33 | * Typical installation with only ssh server enabled 34 | * Main user: ``openstack`` 35 | * Installed software (note, that most of them are for help with 36 | development/debugging): 37 | 38 | * vim 39 | * git 40 | * ipython 41 | * pep8 42 | * pylint 43 | * bash-completion 44 | * exuberant-ctags 45 | * htop 46 | * python-ipdb 47 | * python-pip 48 | * tmux 49 | * mc 50 | * RHEL operating system has to be registered with Red Hat 51 | Subscription Management and attached to RHEL entitlements. 52 | * Configuration 53 | 54 | * Sudo: 55 | 56 | .. code:: 57 | 58 | %sudo ALL=(ALL) NOPASSWD:ALL 59 | 60 | * Network: 61 | 62 | * First network interface: DHCP 63 | * Second network interface: static 192.168.1.2/24 64 | * Third network interface: static 192.168.56.2/24 65 | 66 | * Host 67 | 68 | * SSH key exchange from host is required 69 | * The following line, should be placed in ``/etc/hosts``: 70 | 71 | .. code:: 72 | 73 | 192.168.56.2 base_openstack_vm 74 | 75 | Any other software may be installed in front as well. 76 | 77 | Build the cloud 78 | +++++++++++++++ 79 | 80 | For setting up the cloud, configuration file needs to be provided. The following 81 | yaml may be used as an example to set up Liberty on Ubuntu 14.04: 82 | 83 | .. code:: yaml 84 | 85 | base_vm: ubuntu-1404 86 | base_user: openstack 87 | base_distribution: ubuntu 88 | base_hostname: base_openstack_vm 89 | openstack_version: liberty 90 | config: {} 91 | nodes: 92 | controller: 93 | ips: [192.168.56.3, 192.168.1.3] 94 | role: controller 95 | modules: [provision_conf, openstackclient_db_mq, keystone, glance, 96 | docker, docker_glance, nova, fpga_files, fpga_db, 97 | nova_scheduler_filter, flavor_and_image, horizon] 98 | compute1: 99 | ips: [192.168.56.4, 192.168.1.4] 100 | role: compute 101 | modules: [provision_conf, nova_compute, docker, nova_docker_patches, 102 | fpga_files, fpga_exec] 103 | compute2: 104 | ips: [192.168.56.5, 192.168.1.5] 105 | role: compute 106 | modules: [provision_conf, nova_compute, docker, nova_docker_patches, 107 | fpga_files] 108 | 109 | Where: 110 | 111 | * base_vm is a name of VirtualBox VM that will be used as a base distro 112 | for all OpenStack nodes 113 | * base_user - username of the ``base_vm`` VM 114 | * base_distribution - ``base_vm`` OS distribution. One of: ``ubuntu``, 115 | ``redhat``. Needs to be specified because network configuration is done 116 | differently depending on OS distribution 117 | * base_hostname - host name, on which ``base_vm`` record is placed in 118 | ``/etc/hosts``. This host name will be used in early phase of provisioning 119 | cloned VM with configuration. 120 | * openstack_version - one of: ``liberty``, ``mitaka``, ``newton`` (see 121 | `Supported OpenStack versions and distros`_) 122 | * config is a dictionary with mapping for defaults for the entire cloud 123 | (currently they are values for the OS_* variables for OpenStack environment) 124 | * nodes defines a machines configuration to be generated (and VM cloned), where 125 | the key defines the VM name and its hostname at the same time, and the items 126 | under it: 127 | 128 | * "role" is one of "compute" or "controller" 129 | * "ips" is list of NICs - first one is internal network, second is host only 130 | adapter 131 | * "modules" is list of configuration chunks, which will be pre-processed and 132 | saved as the convention of *hostname*.sh 133 | 134 | Write the config into ``cloud.yaml``, and now it is possible for preparing the 135 | VMs on the host: 136 | 137 | .. code:: shell-session 138 | 139 | $ ./build_cloud.py cloud.yaml 140 | 141 | This will issue the clone from ``base_vm`` to VMs which names will 142 | correspond to the host names. Note, that script which clones machines will 143 | refuse to clone if machine already exists. 144 | 145 | Machines can be removed using VirtualBox GUI, ``VBoxManage`` and ``rm`` tools 146 | or by providing an ``-r`` (``--remove``) parameter which will power off (if 147 | needed) and removes all the virtual machines and their files, if they already 148 | exists in yaml definition. Again, note that names of the VMs should match their 149 | hostnames in ``/etc/hosts`` and node names in yaml cloud definition. Please be 150 | careful with this options, since there would be no prompt for removing 151 | confirmation. 152 | 153 | It is also possible to just generating the installation scripts, without 154 | cloning the VMs: 155 | 156 | .. code:: shell-session 157 | 158 | $ ./build_cloud.py -d cloud.yaml 159 | 160 | This will produce main script (and directory with modules) for each node, which 161 | could be run on destination hosts. Scripts will be named as ``hostname.sh``, and 162 | directory as ``hostname_modules``. 163 | 164 | Other parameters that may be passed to ``build_cloud.py``: 165 | 166 | * ``--skip-hosts`` - do not clone machine, just generate install scripts 167 | * ``--remove`` - dispose existing VMs 168 | * ``--auto-install`` - automatically start VMs and run OpenStack installation 169 | * ``--ssh-key`` - path to private SSH key used to clone git repositories 170 | * ``-v`` - be verbose. Adding more "v" will increase verbosity 171 | * ``-q`` - be quiet. Adding more "q" will decrease verbosity 172 | 173 | Installing OpenStack 174 | ++++++++++++++++++++ 175 | 176 | Follow the next steps only if ``--auto-install`` parameter was not specified 177 | in ``build_cloud.py``. Otherwise, Openstack installation on freshly cloned 178 | images needs to be triggered. Using the above example and assuming that 179 | ``/etc/hosts`` is filled with newly created machines, they can be started 180 | as follows: 181 | 182 | .. code:: shell-session 183 | 184 | $ VBoxManage startvm controller --type headless 185 | $ VBoxManage startvm compute1 --type headless 186 | $ VBoxManage startvm compute2 --type headless 187 | 188 | Next, connect to every node (`tmux`_ can be helpful for dividing terminal 189 | window, and synchronizing panes to enter command in all nodes at once), and do: 190 | 191 | .. code:: shell-session 192 | 193 | $ ssh @controller 194 | $ sudo su - 195 | # ./controller.sh 196 | 197 | for compute1 and compute2 nodes the commands are similar. After a (rather long) 198 | while, the setup should be up and running! 199 | 200 | .. _Ubuntu 14.04 server: http://releases.ubuntu.com/14.04/ 201 | .. _Ubuntu 16.04 server: http://releases.ubuntu.com/16.04/ 202 | .. _RHEL 7.2: https://access.redhat.com/downloads/content/69/ver=/rhel---7/7.2/x86_64/product-software 203 | .. _VirtualBox: https://www.virtualbox.org/ 204 | .. _tmux: https://tmux.github.io/ 205 | -------------------------------------------------------------------------------- /build_scripts/boot_vm_and_install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Log everything to a log file named $HOSTNAME.log 4 | exec 3>&1 1>>${HOSTNAME}.log 2>&1 5 | 6 | _validate_input() { 7 | if [ -z $IP_ADDRESS ]; then 8 | echo "IP_ADDRESS environment variable is not set. Exiting." 9 | exit 1 10 | fi 11 | if [ -z $HOSTNAME ]; then 12 | echo "HOSTNAME environment variable is not set. Exiting." 13 | exit 1 14 | fi 15 | if [ -z $VMUSER ]; then 16 | echo "VMUSER environment variable is not set. Exiting." 17 | exit 1 18 | fi 19 | } 20 | 21 | _start_vm() { 22 | # It may happen that VM is not present on the list of 'runningvms' 23 | # and despite this, it cannot be started again because it is still locked 24 | # by a session. Just waiting a while to avoid such situation. 25 | sleep 10 26 | VBoxManage startvm $HOSTNAME --type headless 27 | while true; do 28 | ssh $VMUSER@$IP_ADDRESS hostname >/dev/null 2>&1 29 | if [ $? -ne 0 ]; then 30 | echo "Waiting for ${HOSTNAME} to get up and running" 31 | sleep 2 32 | continue 33 | fi 34 | echo "VM ${HOSTNAME} is ready." 35 | break 36 | done 37 | } 38 | 39 | _run_install_script() { 40 | echo "Starting Openstack installation script." 41 | ssh $VMUSER@$IP_ADDRESS "sudo bash /root/${HOSTNAME}.sh" 42 | } 43 | 44 | _validate_input 45 | _start_vm 46 | _run_install_script 47 | -------------------------------------------------------------------------------- /build_scripts/build_cloud.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Simple config builder and cloner for VirtualBox virtual machines. 5 | 6 | See README.rst for more details. 7 | """ 8 | import argparse 9 | import errno 10 | import logging 11 | import os 12 | import re 13 | import shutil 14 | import subprocess 15 | import sys 16 | import threading 17 | 18 | import yaml 19 | 20 | 21 | def setup_logger(args): 22 | """Setup logger format and level""" 23 | level = logging.WARNING 24 | if args.quiet: 25 | level = logging.ERROR 26 | if args.quiet > 1: 27 | level = logging.CRITICAL 28 | if args.verbose: 29 | level = logging.INFO 30 | if args.verbose > 1: 31 | level = logging.DEBUG 32 | logging.basicConfig(level=level, 33 | format="%(asctime)s %(levelname)s: %(message)s") 34 | 35 | 36 | class Build(object): 37 | """Build the configuration scripts and optionally copy it to cloned VMs""" 38 | VM_RE = re.compile(r'^"(?P.+)"\s.*') 39 | 40 | def __init__(self, args, config): 41 | self._clone = not args.dont_clone 42 | self._remove_vms = args.remove 43 | self._skip_hosts = args.skip_hosts 44 | self._auto_install = args.auto_install 45 | self.ssh_key = args.ssh_key 46 | self.config = config['config'] 47 | self.context = {'controller': [], 'compute': []} 48 | self.hosts = config.get('nodes', {}) 49 | self.openstack_version = config.get('openstack_version', '') 50 | self.base_vm = config.get('base_vm', '') 51 | self.base_user = config.get('base_user') 52 | self.base_distribution = config.get('base_distribution') 53 | self.base_hostname = config.get('base_hostname') 54 | self.modules_path = os.path.join(os.path.dirname(__file__), 55 | self.base_vm, 56 | self.openstack_version) 57 | if self.ssh_key: 58 | self.fpga_nova_repo = 'git@github.com:intelsdi-x/' \ 59 | 'fpga-nova.git' 60 | else: 61 | self.fpga_nova_repo = 'https://github.com/intelsdi-x/' \ 62 | 'fpga-nova.git' 63 | 64 | for hostname, data in self.hosts.items(): 65 | self.context[data['role']].append(hostname) 66 | 67 | def build(self): 68 | """Build conf/clone vm""" 69 | self.validate_cloudconf() 70 | self.create_configs() 71 | self.create_cleanup() 72 | if self._clone: 73 | self.clone_vms() 74 | if self._auto_install: 75 | self.auto_install() 76 | 77 | def validate_cloudconf(self): 78 | """Make sure the specified cloud config file contains valid data""" 79 | 80 | if not os.path.isdir(self.modules_path): 81 | logging.info("Building OpenStack '%s' on '%s' is not supported." 82 | " Exiting.", self.openstack_version, self.base_vm) 83 | sys.exit(1) 84 | 85 | required_vars = [self.openstack_version, self.base_vm, self.base_user, 86 | self.base_distribution, self.base_hostname] 87 | for var in required_vars: 88 | if not var: 89 | logging.info("Cloud config file does not contain necessary " 90 | "data. Fill in the config and re-run the " 91 | "script. Exiting.") 92 | sys.exit(1) 93 | 94 | def _check_vms_existence(self, command="vms"): 95 | """Return list of existing machines, that match hosts in self.hosts""" 96 | 97 | result = [] 98 | 99 | try: 100 | out = subprocess.check_output(['VBoxManage', 'list', command]) 101 | except subprocess.CalledProcessError: 102 | return result 103 | 104 | for item in out.split('\n'): 105 | match = Build.VM_RE.match(item) 106 | if match and match.groups()[0] in self.hosts: 107 | result.append(match.groupdict()['name']) 108 | 109 | return result 110 | 111 | def _remove_vm(self, host): 112 | """Remove virtual machine""" 113 | logging.info("Removing vm `%s'.", host) 114 | cmd = ['VBoxManage', 'unregistervm', host] 115 | logging.debug("Executing: `%s'.", cmd) 116 | subprocess.check_call(cmd) 117 | cmd = ['rm', '-fr', os.path.join(os.path.expanduser('~/'), '.config', 118 | 'VirtualBox', host)] 119 | logging.debug("Executing: `%s'.", cmd) 120 | subprocess.check_call(cmd) 121 | 122 | def _poweroff_vm(self, host): 123 | """Turn off virtual machine""" 124 | logging.info("Power off vm `%s'.", host) 125 | cmd = ['VBoxManage', 'controlvm', host, 'poweroff'] 126 | logging.debug("Executing: `%s'.", cmd) 127 | subprocess.check_call(cmd) 128 | 129 | def remove_vms(self): 130 | """Remove vms""" 131 | hosts = self._check_vms_existence() 132 | 133 | if hosts and not self._remove_vms: 134 | logging.error("There is at least one VM which exists. Remove it " 135 | "manually, or use --remove switch for wiping out " 136 | "all existing machines before cloning." 137 | "\nConflicting VMs:\n%s", 138 | "\n".join(["- " + h for h in sorted(hosts)])) 139 | return False 140 | 141 | running_hosts = self._check_vms_existence('runningvms') 142 | 143 | for host in hosts: 144 | if host in running_hosts: 145 | self._poweroff_vm(host) 146 | self._remove_vm(host) 147 | 148 | return True 149 | 150 | def remap(self, line, data): 151 | """Replace the template placeholders to something meaningful""" 152 | line = line.rstrip() 153 | if 'CONTROLLER_HOSTNAME' in line: 154 | line = line.replace('CONTROLLER_HOSTNAME', 155 | self.context['controller'][0]) 156 | if 'AAA.BBB.CCC.DDD' in line: 157 | line = line.replace('AAA.BBB.CCC.DDD', data['ips'][0]) 158 | if 'FPGA-NOVA-REPO' in line: 159 | line = line.replace('FPGA-NOVA-REPO', self.fpga_nova_repo) 160 | 161 | for key, val in self.config.items(): 162 | if key in line: 163 | line = line.replace(key, str(val)) 164 | 165 | return line 166 | 167 | def create_cleanup(self): 168 | """Create cleanup conf""" 169 | 170 | for hostname, data in self.hosts.items(): 171 | modules_out = hostname + "_modules" 172 | 173 | output = ["#!/bin/bash", ""] 174 | 175 | for module in reversed(data['modules']): 176 | mod = [] 177 | modpath = os.path.join(modules_out, "out_" + module) 178 | with open(os.path.join(self.modules_path, 179 | "out_" + module)) as fobj: 180 | for line in fobj: 181 | mod.append(self.remap(line, data)) 182 | mod.append("") 183 | 184 | with open(modpath, "w") as fobj: 185 | fobj.write('\n'.join(mod)) 186 | 187 | modpath = os.path.join("/root", modpath) 188 | output.append("bash " + modpath) 189 | output.append("") 190 | 191 | with open(hostname + "_cleanup.sh", "w") as fobj: 192 | fobj.write("\n".join(output)) 193 | 194 | def clone_vms(self): 195 | """Cloning VMs""" 196 | if not self.remove_vms(): 197 | return 198 | 199 | for hostname, data in self.hosts.items(): 200 | env = os.environ.copy() 201 | env.update({'VMNAME': self.base_vm, 202 | 'VMUSER': self.base_user, 203 | 'BASE_HOSTNAME': self.base_hostname, 204 | 'NAME': hostname, 205 | 'LAST_OCTET': data['ips'][0].split(".")[-1], 206 | 'DISTRO': self.base_distribution}) 207 | if self.ssh_key: 208 | env['SSH_KEY'] = self.ssh_key 209 | 210 | cmd = ['./create_vm_clone.sh'] 211 | logging.debug("Executing: %s", " ".join(cmd)) 212 | try: 213 | subprocess.check_call(cmd, env=env) 214 | except subprocess.CalledProcessError as err: 215 | sys.exit(err.returncode) 216 | 217 | def create_configs(self): 218 | """Create configurations, and optionally clone and provision VMs""" 219 | 220 | if self._skip_hosts: 221 | logging.warning('Warning: You have to add appropriate entries to ' 222 | 'your /etc/hosts, otherwise your cloud may not ' 223 | 'work properly.') 224 | 225 | for hostname, data in self.hosts.items(): 226 | modules_out = hostname + "_modules" 227 | 228 | try: 229 | os.mkdir(modules_out) 230 | except OSError as err: 231 | if err.errno == errno.EEXIST: 232 | shutil.rmtree(modules_out) 233 | os.mkdir(modules_out) 234 | else: 235 | raise 236 | 237 | output = ["#!/bin/bash", ""] 238 | 239 | if not self._skip_hosts: 240 | for other_host_key in [x 241 | for x in self.hosts 242 | if x != hostname]: 243 | 244 | output.append("echo " + 245 | self.hosts[other_host_key]['ips'][0] + " " + 246 | other_host_key + 247 | " >> /etc/hosts") 248 | output.append("") 249 | 250 | for module in data['modules']: 251 | mod = [] 252 | modpath = os.path.join(modules_out, "in_" + module) 253 | with open(os.path.join(self.modules_path, 254 | "in_" + module)) as fobj: 255 | for line in fobj: 256 | mod.append(self.remap(line, data)) 257 | mod.append("") 258 | with open(modpath, "w") as fobj: 259 | fobj.write('\n'.join(mod)) 260 | 261 | modpath = os.path.join("/root", modpath) 262 | output.append("bash " + modpath) 263 | output.append("") 264 | 265 | with open(hostname + ".sh", "w") as fobj: 266 | fobj.write("\n".join(output)) 267 | 268 | def auto_install(self): 269 | """Triggers Openstack intallation on all nodes specified in cloud 270 | config file 271 | """ 272 | for hostname, data in self.hosts.items(): 273 | public_ip = data['ips'][1] 274 | t = threading.Thread(target=self.install_thread, 275 | args=(hostname, public_ip)) 276 | t.start() 277 | 278 | def install_thread(self, hostname, public_ip): 279 | """Thread method that is responsible for Openstack installation 280 | on a single VM 281 | """ 282 | logging.info("Openstack installation on host %s has started (see " 283 | "%s.log)", hostname, hostname) 284 | env = os.environ.copy() 285 | env.update({'HOSTNAME': hostname, 286 | 'IP_ADDRESS': public_ip, 287 | 'VMUSER': self.base_user}) 288 | logging.debug("Executing in thread: " + './boot_vm_and_install.sh') 289 | subprocess.check_call(['./boot_vm_and_install.sh'], env=env) 290 | logging.info("Openstack installation on host %s has finished.", 291 | hostname) 292 | 293 | 294 | def main(): 295 | """Main function, just parses arguments, and call create_configs""" 296 | parser = argparse.ArgumentParser() 297 | parser.add_argument('--dont-clone', '-d', action='store_true', 298 | help='Do not clone machine, just generate install ' 299 | 'scripts') 300 | parser.add_argument('--skip-hosts', '-s', action='store_true', 301 | help='Skip appending hosts to /etc/hosts') 302 | parser.add_argument('--remove', '-r', action='store_true', 303 | help='Dispose existing VMs') 304 | parser.add_argument('--auto-install', '-a', action='store_true', 305 | help='Automatically start VMs and run Openstack ' 306 | 'installation') 307 | parser.add_argument('--ssh-key', '-k', help='Path to private SSH key used' 308 | ' to clone git repositories') 309 | parser.add_argument('cloudconf', 310 | help='Yaml file with the cloud configuration') 311 | parser.add_argument('-v', '--verbose', help='Be verbose. Adding more "v" ' 312 | 'will increase verbosity', action="count", 313 | default=None) 314 | parser.add_argument('-q', '--quiet', help='Be quiet. Adding more "q" will' 315 | ' decrease verbosity', action="count", default=None) 316 | parsed_args = parser.parse_args() 317 | 318 | with open(parsed_args.cloudconf) as fobj: 319 | conf = yaml.load(fobj) 320 | 321 | setup_logger(parsed_args) 322 | Build(parsed_args, conf).build() 323 | 324 | 325 | if __name__ == "__main__": 326 | main() 327 | -------------------------------------------------------------------------------- /build_scripts/cloud.yaml: -------------------------------------------------------------------------------- 1 | # Example of simple 3 node setup for FPGA enabled Newton Openstack on Ubuntu 2 | # 16.04 server. 3 | base_vm: ubuntu-1604 4 | base_user: ubuntu 5 | base_distribution: ubuntu 6 | base_hostname: ubuntu 7 | openstack_version: newton 8 | config: 9 | ADMIN__OS_PROJECT_DOMAIN_NAME: default 10 | ADMIN__OS_USER_DOMAIN_NAME: default 11 | ADMIN__OS_PROJECT_NAME: admin 12 | ADMIN__OS_TENANT_NAME: admin 13 | ADMIN__OS_USERNAME: admin 14 | ADMIN__OS_PASSWORD: admin 15 | ADMIN__OS_IDENTITY_API_VERSION: 3 16 | ADMIN__OS_IMAGE_API_VERSION: 2 17 | DEMO__OS_PROJECT_DOMAIN_NAME: default 18 | DEMO__OS_USER_DOMAIN_NAME: default 19 | DEMO__OS_PROJECT_NAME: demo 20 | DEMO__OS_TENANT_NAME: demo 21 | DEMO__OS_USERNAME: demo 22 | DEMO__OS_PASSWORD: demo 23 | DEMO__OS_IDENTITY_API_VERSION: 3 24 | DEMO__OS_IMAGE_API_VERSION: 2 25 | nodes: 26 | controller: 27 | ips: [192.168.1.3, 192.168.56.3] 28 | role: controller 29 | modules: [provision_conf, openstackclient_db_mq, keystone, glance, 30 | docker, docker_glance, nova, fpga_files, fpga_db, 31 | nova_scheduler_filter, flavor_and_image, horizon] 32 | compute1: 33 | ips: [192.168.1.4, 192.168.56.4] 34 | role: compute 35 | modules: [provision_conf, nova_compute, docker, nova_docker_patches, 36 | fpga_files, fpga_exec] 37 | compute2: 38 | ips: [192.168.1.5, 192.168.56.5] 39 | role: compute 40 | modules: [provision_conf, nova_compute, docker, nova_docker_patches, 41 | fpga_files] 42 | -------------------------------------------------------------------------------- /build_scripts/create_vm_clone.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # preparing the bare openstack vm cloned out of base 3 | 4 | _check_if_exists() { 5 | local vms='' 6 | vms=$(VBoxManage list vms | \ 7 | awk -F ' {' '{ print $1 }' | \ 8 | tr '\n' '|' | \ 9 | sed 's/|$//' | \ 10 | sed 's/"//g') 11 | IFS='|' read -ra vms <<< "$vms" 12 | 13 | for item in "${vms[@]}" 14 | do 15 | [[ "${item}" == "${NAME}" ]] && \ 16 | echo "VM '${NAME}' already exists." && \ 17 | exit 2 18 | done 19 | } 20 | 21 | _clone() { 22 | echo "Cloning '${VMNAME}' into '${NAME}'..." 23 | VBoxManage clonevm ${VMNAME} --name ${NAME} --register 24 | } 25 | 26 | _boot() { 27 | echo "Booting '${NAME}'..." 28 | VBoxManage startvm ${NAME} --type headless 29 | 30 | while true; do 31 | ping -c 1 -t 1 $BASE_HOSTNAME >/dev/null 32 | if [ $? -eq 0 ]; then 33 | break 34 | fi 35 | echo "Still waiting for ${NAME}..." 36 | done 37 | 38 | while true; do 39 | ssh $VMUSER@$BASE_HOSTNAME ls >/dev/null 2>&1 40 | if [ $? -ne 0 ]; then 41 | echo "Still waiting for ${NAME}..." 42 | sleep 1 43 | continue 44 | fi 45 | break 46 | done 47 | } 48 | 49 | _provision() { 50 | echo "Provisioning '${NAME}'..." 51 | TMP=$(mktemp) 52 | echo '#!/bin/sh' > $TMP 53 | echo echo $NAME ' > /etc/hostname' >> $TMP 54 | 55 | if [ $DISTRO == 'ubuntu' ]; then 56 | echo "sed -i -e 's/192.168.1.2$/192.168.1.${IP}/' /etc/network/interfaces" >> $TMP 57 | echo "sed -i -e 's/192.168.56.2$/192.168.56.${IP}/' /etc/network/interfaces" >> $TMP 58 | elif [ $DISTRO == 'redhat' ]; then 59 | echo "sed -i -e 's/192.168.1.2$/192.168.1.${IP}/' /etc/sysconfig/network-scripts/*" >> $TMP 60 | echo "sed -i -e 's/192.168.56.2$/192.168.56.${IP}/' /etc/sysconfig/network-scripts/*" >> $TMP 61 | fi 62 | 63 | echo "echo '192.168.1.${IP} ${NAME}' >> /etc/hosts" >> $TMP 64 | scp $TMP $VMUSER@${BASE_HOSTNAME}: 65 | BTMP=$(basename $TMP) 66 | ssh $VMUSER@${BASE_HOSTNAME} chmod +x $BTMP 67 | ssh $VMUSER@${BASE_HOSTNAME} sudo /home/$VMUSER/$BTMP 68 | ssh $VMUSER@${BASE_HOSTNAME} rm $BTMP 69 | rm $TMP 70 | 71 | if [ $SSH_KEY ]; then 72 | scp $SSH_KEY $VMUSER@${BASE_HOSTNAME}:/home/$VMUSER/id_rsa 73 | ssh $VMUSER@${BASE_HOSTNAME} sudo mkdir -p /root/.ssh 74 | ssh $VMUSER@${BASE_HOSTNAME} sudo mv /home/$VMUSER/id_rsa /root/.ssh/id_rsa 75 | # This is done in order to omit the prompt when connecting to github for the first time 76 | ssh $VMUSER@${BASE_HOSTNAME} "sudo ssh-keyscan -t rsa github.com > known_hosts" 77 | ssh $VMUSER@${BASE_HOSTNAME} sudo mv known_hosts /root/.ssh/known_hosts 78 | fi 79 | if [ -e "${NAME}.sh" ]; then 80 | scp "${NAME}.sh" $VMUSER@${BASE_HOSTNAME}: 81 | ssh $VMUSER@${BASE_HOSTNAME} chmod +x "${NAME}.sh" 82 | ssh $VMUSER@${BASE_HOSTNAME} sudo mv /home/$VMUSER/"${NAME}.sh" /root/ 83 | rm "${NAME}.sh" 84 | fi 85 | if [ -e "${NAME}_cleanup.sh" ]; then 86 | scp "${NAME}_cleanup.sh" $VMUSER@${BASE_HOSTNAME}: 87 | ssh $VMUSER@${BASE_HOSTNAME} chmod +x "${NAME}_cleanup.sh" 88 | ssh $VMUSER@${BASE_HOSTNAME} sudo mv /home/$VMUSER/"${NAME}_cleanup.sh" /root/ 89 | rm "${NAME}_cleanup.sh" 90 | fi 91 | if [ -e "${NAME}_modules" ]; then 92 | scp -r "${NAME}_modules" $VMUSER@${BASE_HOSTNAME}: 93 | ssh $VMUSER@${BASE_HOSTNAME} sudo mv /home/$VMUSER/"${NAME}_modules" /root/ 94 | rm -fr "${NAME}_modules" 95 | fi 96 | } 97 | 98 | _poweroff() { 99 | echo "Power off the machine" 100 | ssh $VMUSER@${BASE_HOSTNAME} sudo poweroff 101 | 102 | while true; do 103 | $(VBoxManage list runningvms |grep -q $NAME) 104 | if [ $? -eq 1 ]; then 105 | break 106 | sleep 1 107 | fi 108 | done 109 | } 110 | 111 | _reboot() { 112 | echo "Rebooting ${NAME}" 113 | ssh $VMUSER@${BASE_HOSTNAME} sudo reboot 114 | while true; do 115 | ping -c 1 -t 1 ${BASE_HOSTNAME} >/dev/null 116 | if [ $? -ne 0 ]; then 117 | echo "Still waiting for ${NAME}..." 118 | continue 119 | fi 120 | break 121 | done 122 | sleep 3 123 | } 124 | 125 | _finalize() { 126 | echo Done. 127 | echo Now you can start VM: 128 | echo " VBoxManage startvm ${NAME} --type headless" 129 | echo and connect via ssh: 130 | echo " ssh $VMUSER@${NAME}" 131 | echo Installation script is available on /root directory 132 | } 133 | 134 | _usage() { 135 | echo Usage: $0 machine-name last-ip-octet base-vm-name base-host-name 136 | echo 137 | echo "Also, script needs following environment variables to be set:" 138 | echo "- VMNAME - name of the VBox virtual machine" 139 | echo "- VMUSER - user, which is used to log in into the system" 140 | echo "- BASE_HOSTNAME - host name which points to the IP address of VMNAME" 141 | echo "- DISTRO - Linux distribution, one of ubuntu, redhat." 142 | echo "- NAME - host name which will be set on cloned machine. This will " 143 | echo " also become a VBox virtual machine name." 144 | echo "- LAST_OCTET - last address IP octet which would be set on the cloned" 145 | echo " machine" 146 | } 147 | 148 | _check_variables() { 149 | [[ -n "${VMNAME}" ]] && \ 150 | [[ -n "${VMUSER}" ]] && \ 151 | [[ -n "${BASE_HOSTNAME}" ]] && \ 152 | [[ -n "${DISTRO}" ]] && \ 153 | [[ -n "${NAME}" ]] && \ 154 | [[ -n "${LAST_OCTET}" ]] && return || _usage && exit 1 155 | } 156 | 157 | EXISTS=false 158 | IP=$LAST_OCTET 159 | 160 | if [ $# -ne 0 ]; then 161 | _usage 162 | exit 1 163 | fi 164 | 165 | _check_variables 166 | _check_if_exists 167 | _clone 168 | _boot 169 | _provision 170 | _poweroff 171 | _finalize 172 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/in_docker: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | tee /etc/yum.repos.d/docker.repo <<-'EOF' 5 | [dockerrepo] 6 | name=Docker Repository 7 | baseurl=https://yum.dockerproject.org/repo/main/centos/7/ 8 | enabled=1 9 | gpgcheck=1 10 | gpgkey=https://yum.dockerproject.org/gpg 11 | EOF 12 | 13 | yum install -y docker-engine 14 | systemctl enable docker.service 15 | systemctl start docker 16 | docker pull busybox 17 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/in_docker_glance: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | cat /etc/glance/glance-api.conf | awk '/\[DEFAULT\]/ { print; print "container_formats = ami,ari,aki,bare,ovf,ova,docker"; next }1' > glance-api.conf 5 | mv -f glance-api.conf /etc/glance/glance-api.conf 6 | restorecon -v /etc/glance/glance-api.conf 7 | systemctl openstack-restart glance-api glance-registry 8 | 9 | . admin-openrc.sh 10 | 11 | docker pull busybox 12 | docker save -o busyimg busybox 13 | openstack image create --file busyimg --public --container-format docker \ 14 | --disk-format raw --id 4ba11f84-9b8d-4bcd-8f28-74f8cedcb1dc busybox 15 | rm -f busyimg 16 | IMAGE_ID=$(openstack image list | grep busybox | awk '{print $2}') 17 | if [ "${IMAGE_ID}x" == "x" ]; then 18 | echo "Cannot find busybox image" 19 | exit 1 20 | fi 21 | glance image-update --property os_command_line='/bin/sleep 100d' $IMAGE_ID 22 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/in_flavor_and_image: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | . admin-openrc.sh 5 | 6 | sed -ie 's/container_formats = .*/container_formats = ami,ari,aki,bare,ovf,ova,docker,fpga/' /etc/glance/glance-api.conf 7 | restorecon -v /etc/glance/glance-api.conf 8 | systemctl restart openstack-glance-api openstack-glance-registry 9 | 10 | sleep 10 11 | 12 | echo "DEADCODE" > fpga_image 13 | glance image-create --id dd834aa4-f950-40e6-8c23-9dab7f3f0138 --file fpga_image --name fpga_image --disk-format raw --container-format fpga 14 | rm -f fpga_image 15 | nova flavor-create fpga.tiny 6 512 1 1 16 | nova flavor-key fpga.tiny set "hw:fpga_ip_id"="dd834aa4-f950-40e6-8c23-9dab7f3f0138" 17 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/in_fpga_db: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | . admin-openrc.sh 5 | 6 | su -s /bin/sh -c "nova-manage api_db sync" nova 7 | su -s /bin/sh -c "nova-manage db sync" nova 8 | 9 | systemctl enable openstack-nova-api openstack-nova-scheduler \ 10 | openstack-nova-conductor 11 | systemctl start openstack-nova-api openstack-nova-scheduler \ 12 | openstack-nova-conductor 13 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/in_fpga_exec: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | if [ ! -d fpga-nova ]; then 5 | git clone FPGA-NOVA-REPO 6 | if [ $? != 0 ]; then 7 | echo Failure in getting fpga-nova repository 8 | exit 9 | fi 10 | fi 11 | 12 | fpga_exec="/usr/bin/fpga-cli" 13 | 14 | echo "fpga_access = True" >> /etc/nova/nova-compute.conf 15 | echo "fpga_exec = ${fpga_exec##*/}" >> /etc/nova/nova-compute.conf 16 | 17 | cp -a fpga-nova/bin/fpga-cli.py $fpga_exec 18 | 19 | echo "${fpga_exec##*/}: CommandFilter, ${fpga_exec##*/}, root" >> /usr/share/nova/rootwrap/compute.filters 20 | echo "${fpga_exec##*/}: CommandFilter, ${fpga_exec##*/}, root" >> /usr/share/nova/rootwrap/network.filters 21 | 22 | systemctl restart openstack-nova-compute 23 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/in_fpga_files: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | if [ ! -d fpga-nova ]; then 5 | git clone FPGA-NOVA-REPO 6 | if [ $? != 0 ]; then 7 | echo Failure in getting fpga-nova repository 8 | exit 9 | fi 10 | fi 11 | 12 | current=$(rpm -qi openstack-nova-common | grep '^Version' | tr -s ' ' | cut -d ' ' -f 3 ) 13 | [[ ! -e "fpga-nova/patches/rhel_72-nova-${current}.patch" ]] && echo "No patch for version ${current}!" && exit 1 14 | fpga_dir=$(pwd) 15 | cd /usr/lib/python2.7/site-packages 16 | patch -Np1 -i "${fpga_dir}/fpga-nova/patches/rhel_72-nova-${current}.patch" 17 | cd - 18 | systemctl restart openstack-nova-compute 19 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/in_glance: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | echo 'CREATE DATABASE glance;' | mysql -u root -proot 5 | echo "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'GLANCE_DBPASS';" |mysql -u root -proot 6 | echo "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'GLANCE_DBPASS';" |mysql -u root -proot 7 | 8 | . admin-openrc.sh 9 | 10 | openstack user create --domain default --password GLANCE_PASS glance 11 | openstack role add --project service --user glance admin 12 | openstack service create --name glance \ 13 | --description "OpenStack Image service" image 14 | 15 | openstack endpoint create --region RegionOne \ 16 | image public http://CONTROLLER_HOSTNAME:9292 17 | openstack endpoint create --region RegionOne \ 18 | image internal http://CONTROLLER_HOSTNAME:9292 19 | openstack endpoint create --region RegionOne \ 20 | image admin http://CONTROLLER_HOSTNAME:9292 21 | 22 | yum install -y openstack-glance 23 | 24 | cat << EOF > /etc/glance/glance-api.conf 25 | [DEFAULT] 26 | debug = True 27 | 28 | [database] 29 | connection = mysql+pymysql://glance:GLANCE_DBPASS@CONTROLLER_HOSTNAME/glance 30 | backend = sqlalchemy 31 | 32 | [keystone_authtoken] 33 | auth_uri = http://CONTROLLER_HOSTNAME:5000 34 | auth_url = http://CONTROLLER_HOSTNAME:35357 35 | auth_type = password 36 | project_domain_name = default 37 | user_domain_name = default 38 | project_name = service 39 | username = glance 40 | password = GLANCE_PASS 41 | 42 | [paste_deploy] 43 | flavor = keystone 44 | 45 | [glance_store] 46 | stores = file,http 47 | default_store = file 48 | filesystem_store_datadir = /var/lib/glance/images/ 49 | EOF 50 | 51 | cat << EOF > /etc/glance/glance-registry.conf 52 | [DEFAULT] 53 | verbose = True 54 | backend = sqlalchemy 55 | 56 | [database] 57 | connection = mysql+pymysql://glance:GLANCE_DBPASS@CONTROLLER_HOSTNAME/glance 58 | 59 | [keystone_authtoken] 60 | auth_uri = http://CONTROLLER_HOSTNAME:5000 61 | auth_url = http://CONTROLLER_HOSTNAME:35357 62 | auth_type = password 63 | project_domain_name = default 64 | user_domain_name = default 65 | project_name = service 66 | username = glance 67 | password = GLANCE_PASS 68 | 69 | [paste_deploy] 70 | flavor = keystone 71 | EOF 72 | 73 | su -s /bin/sh -c "glance-manage db_sync" glance 74 | 75 | systemctl enable openstack-glance-api.service openstack-glance-registry.service 76 | systemctl start openstack-glance-api.service openstack-glance-registry.service 77 | 78 | source admin-openrc.sh 79 | curl http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img \ 80 | -o cirros-0.3.4-x86_64-disk.img 81 | glance image-create --name "cirros" --file cirros-0.3.4-x86_64-disk.img \ 82 | --disk-format qcow2 --container-format bare --visibility public --progress \ 83 | --id 48644096-5555-4835-99a0-59089dd7da1b 84 | glance image-list 85 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/in_horizon: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | yum install -y openstack-dashboard 5 | 6 | sed -i 's/OPENSTACK_HOST = "127.0.0.1"/OPENSTACK_HOST = "CONTROLLER_HOSTNAME"/g' /etc/openstack-dashboard/local_settings 7 | sed -i "s/'enable_router': True/'enable_router': False/g" /etc/openstack-dashboard/local_settings 8 | sed -i "s/'enable_quotas': True/'enable_quotas': False/g" /etc/openstack-dashboard/local_settings 9 | sed -i "s/'enable_ipv6': True/'enable_ipv6': False/g" /etc/openstack-dashboard/local_settings 10 | sed -i "s/'enable_lb': True/'enable_lb': False/g" /etc/openstack-dashboard/local_settings 11 | sed -i "s/'enable_firewall': True/'enable_firewall': False/g" /etc/openstack-dashboard/local_settings 12 | sed -i "s/'enable_vpn': True/'enable_vpn': False/g" /etc/openstack-dashboard/local_settings 13 | sed -i "s/''enable_fip_topology_check'': True/''enable_fip_topology_check'': False/g" /etc/openstack-dashboard/local_settings 14 | sed -i "s/v2.0/v3/g" /etc/openstack-dashboard/local_settings 15 | echo 'OPENSTACK_API_VERSIONS = {"identity": 3, "volume": 2,"compute": 2}' >> /etc/openstack-dashboard/local_settings 16 | 17 | systemctl restart httpd.service 18 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/in_keystone: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | echo 'CREATE DATABASE keystone;' | mysql -u root -proot 5 | echo "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'KEYSTONE_DBPASS';" |mysql -u root -proot 6 | echo "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'KEYSTONE_DBPASS';" |mysql -u root -proot 7 | 8 | yum install -y openstack-keystone httpd mod_wsgi 9 | 10 | cat << EOF > /etc/keystone/keystone.conf 11 | [DEFAULT] 12 | log_dir = /var/log/keystone 13 | 14 | [database] 15 | connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@CONTROLLER_HOSTNAME/keystone 16 | 17 | [token] 18 | provider = fernet 19 | EOF 20 | 21 | su -s /bin/sh -c "keystone-manage db_sync" keystone 22 | keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone 23 | keystone-manage credential_setup --keystone-user keystone --keystone-group keystone 24 | 25 | keystone-manage bootstrap --bootstrap-password ADMIN__OS_PASSWORD \ 26 | --bootstrap-admin-url http://CONTROLLER_HOSTNAME:35357/v3/ \ 27 | --bootstrap-internal-url http://CONTROLLER_HOSTNAME:35357/v3/ \ 28 | --bootstrap-public-url http://CONTROLLER_HOSTNAME:5000/v3/ \ 29 | --bootstrap-region-id RegionOne 30 | 31 | echo ServerName CONTROLLER_HOSTNAME >> /etc/httpd/conf/httpd.conf 32 | ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/ 33 | systemctl enable httpd.service 34 | systemctl start httpd.service 35 | 36 | # create openrc scripts 37 | cat << EOF > admin-openrc.sh 38 | export OS_PROJECT_DOMAIN_NAME=ADMIN__OS_PROJECT_DOMAIN_NAME 39 | export OS_USER_DOMAIN_NAME=ADMIN__OS_USER_DOMAIN_NAME 40 | export OS_PROJECT_NAME=ADMIN__OS_PROJECT_NAME 41 | export OS_USERNAME=ADMIN__OS_USERNAME 42 | export OS_PASSWORD=ADMIN__OS_PASSWORD 43 | export OS_AUTH_URL=http://CONTROLLER_HOSTNAME:35357/v3 44 | export OS_IDENTITY_API_VERSION=ADMIN__OS_IDENTITY_API_VERSION 45 | export OS_IMAGE_API_VERSION=ADMIN__OS_IMAGE_API_VERSION 46 | EOF 47 | 48 | cat << EOF > demo-openrc.sh 49 | export OS_PROJECT_DOMAIN_NAME=DEMO__OS_PROJECT_DOMAIN_NAME 50 | export OS_USER_DOMAIN_NAME=DEMO__OS_USER_DOMAIN_NAME 51 | export OS_PROJECT_NAME=DEMO__OS_PROJECT_NAME 52 | export OS_USERNAME=DEMO__OS_USERNAME 53 | export OS_PASSWORD=DEMO__OS_PASSWORD 54 | export OS_AUTH_URL=http://CONTROLLER_HOSTNAME:5000/v3 55 | export OS_IDENTITY_API_VERSION=DEMO__OS_IDENTITY_API_VERSION 56 | export OS_IMAGE_API_VERSION=DEMO__OS_IMAGE_API_VERSION 57 | EOF 58 | 59 | source admin-openrc.sh 60 | 61 | openstack project create --domain default \ 62 | --description "Service Project" service 63 | openstack project create --domain default --description "Demo Project" demo 64 | openstack user create --domain default --password demo demo 65 | openstack role create user 66 | openstack role add --project demo --user demo user 67 | 68 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/in_nova: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | 5 | echo 'CREATE DATABASE nova_api;' | mysql -u root -proot 6 | echo 'CREATE DATABASE nova;' | mysql -u root -proot 7 | echo "GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';" |mysql -u root -proot 8 | echo "GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';" |mysql -u root -proot 9 | echo "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';" |mysql -u root -proot 10 | echo "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';" |mysql -u root -proot 11 | 12 | . admin-openrc.sh 13 | 14 | openstack user create --domain default --password NOVA_PASS nova 15 | openstack role add --project service --user nova admin 16 | openstack service create --name nova --description "OpenStack Compute" compute 17 | 18 | openstack endpoint create --region RegionOne \ 19 | compute public http://CONTROLLER_HOSTNAME:8774/v2.1/%\(tenant_id\)s 20 | openstack endpoint create --region RegionOne \ 21 | compute internal http://CONTROLLER_HOSTNAME:8774/v2.1/%\(tenant_id\)s 22 | openstack endpoint create --region RegionOne \ 23 | compute admin http://CONTROLLER_HOSTNAME:8774/v2.1/%\(tenant_id\)s 24 | 25 | yum install -y openstack-nova-api openstack-nova-conductor \ 26 | openstack-nova-scheduler 27 | 28 | cat << EOF > /etc/nova/nova.conf 29 | [DEFAULT] 30 | dhcpbridge_flagfile=/etc/nova/nova.conf 31 | dhcpbridge=/usr/bin/nova-dhcpbridge 32 | logdir=/var/log/nova 33 | state_path=/var/lib/nova 34 | lock_path=/var/lock/nova 35 | force_dhcp_release=True 36 | libvirt_use_virtio_for_bridges=True 37 | debug=True 38 | ec2_private_dns_show_ip=True 39 | api_paste_config=/etc/nova/api-paste.ini 40 | auth_strategy = keystone 41 | my_ip = AAA.BBB.CCC.DDD 42 | use_neutron = False 43 | transport_url = rabbit://openstack:RABBIT_PASS@CONTROLLER_HOSTNAME 44 | 45 | [api_database] 46 | connection = mysql+pymysql://nova:NOVA_DBPASS@CONTROLLER_HOSTNAME/nova_api 47 | 48 | [database] 49 | connection = mysql+pymysql://nova:NOVA_DBPASS@CONTROLLER_HOSTNAME/nova 50 | 51 | [keystone_authtoken] 52 | auth_uri = http://CONTROLLER_HOSTNAME:5000 53 | auth_url = http://CONTROLLER_HOSTNAME:35357 54 | auth_type = password 55 | project_domain_name = default 56 | user_domain_name = default 57 | project_name = service 58 | username = nova 59 | password = NOVA_PASS 60 | 61 | [oslo_concurrency] 62 | lock_path = /var/lib/nova/tmp 63 | 64 | [glance] 65 | api_servers = http://CONTROLLER_HOSTNAME:9292 66 | EOF 67 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/in_nova_compute: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | yum install -y openstack-nova-compute openstack-nova-network 5 | 6 | cat << EOF > /etc/nova/nova.conf 7 | [DEFAULT] 8 | dhcpbridge_flagfile=/etc/nova/nova.conf 9 | dhcpbridge=/usr/bin/nova-dhcpbridge 10 | logdir=/var/log/nova 11 | state_path=/var/lib/nova 12 | lock_path=/var/lock/nova 13 | force_dhcp_release=True 14 | libvirt_use_virtio_for_bridges=True 15 | verbose=True 16 | debug=True 17 | ec2_private_dns_show_ip=True 18 | api_paste_config=/etc/nova/api-paste.ini 19 | auth_strategy = keystone 20 | my_ip = AAA.BBB.CCC.DDD 21 | transport_url = rabbit://openstack:RABBIT_PASS@CONTROLLER_HOSTNAME 22 | use_neutron = False 23 | network_manager = nova.network.manager.FlatDHCPManager 24 | network_size = 254 25 | allow_same_net_traffic = False 26 | multi_host = False 27 | multi_node = False 28 | send_arp_for_ha = False 29 | share_dhcp_address = True 30 | force_dhcp_release = True 31 | enabled_apis = osapi_compute,metadata 32 | firewall_driver = nova.virt.firewall.NoopFirewallDriver 33 | 34 | [database] 35 | connection = mysql+pymysql://nova:NOVA_DBPASS@CONTROLLER_HOSTNAME/nova 36 | 37 | [keystone_authtoken] 38 | auth_uri = http://CONTROLLER_HOSTNAME:5000 39 | auth_url = http://CONTROLLER_HOSTNAME:35357 40 | auth_type = password 41 | project_domain_name = default 42 | user_domain_name = default 43 | project_name = service 44 | username = nova 45 | password = NOVA_PASS 46 | 47 | [oslo_concurrency] 48 | lock_path = /var/lib/nova/tmp 49 | 50 | [glance] 51 | api_servers = http://CONTROLLER_HOSTNAME:9292 52 | EOF 53 | 54 | systemctl enable openstack-nova-compute.service 55 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/in_nova_docker_patches: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | if [ ! -d fpga-nova ]; then 5 | git clone FPGA-NOVA-REPO 6 | if [ $? != 0 ]; then 7 | echo Failure in getting fpga-nova repository 8 | exit 9 | fi 10 | fi 11 | 12 | git clone https://github.com/openstack/nova-docker 13 | 14 | cd nova-docker 15 | patch -Np1 -i ../fpga-nova/patches/nova_docker_newton.patch 16 | pip install . 17 | cd - 18 | 19 | echo "[DEFAULT]" > /etc/nova/nova-compute.conf 20 | echo "compute_driver=novadocker.virt.docker.DockerDriver" >> /etc/nova/nova-compute.conf 21 | echo "fpga_simulation_mode = False" >> /etc/nova/nova-compute.conf 22 | 23 | usermod -a -G docker nova 24 | 25 | systemctl restart openstack-nova-network openstack-nova-compute 26 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/in_nova_scheduler_filter: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | cat /etc/nova/nova.conf | awk '/\[DEFAULT\]/ { print; print "scheduler_default_filters = RamFilter,ComputeFilter,AvailabilityZoneFilter,ImagePropertiesFilter,FpgaFilter"; next }1' > nova.conf 5 | mv -f nova.conf /etc/nova/nova.conf 6 | cat /etc/nova/nova.conf | awk '/\[DEFAULT\]/ { print; print "scheduler_available_filters = nova.scheduler.filters.fpga_filter.FpgaFilter"; next }1' > nova.conf 7 | mv -f nova.conf /etc/nova/nova.conf 8 | cat /etc/nova/nova.conf | awk '/\[DEFAULT\]/ { print; print "scheduler_available_filters = nova.scheduler.filters.all_filters"; next }1' > nova.conf 9 | mv -f nova.conf /etc/nova/nova.conf 10 | restorecon -v /etc/nova/nova.conf 11 | 12 | systemctl restart openstack-nova-api openstack-nova-scheduler \ 13 | openstack-nova-conductor 14 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/in_openstackclient_db_mq: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | yum install -y python-openstackclient 5 | 6 | # assuming password 'root' 7 | # sudo debconf-set-selections <<< 'mariadb-server-10.0 mysql-server/root_password password root' 8 | # sudo debconf-set-selections <<< 'mariadb-server-10.0 mysql-server/root_password_again password root' 9 | 10 | yum install -y mariadb mariadb-server python2-PyMySQL 11 | 12 | echo '[mysqld]' > /etc/my.cnf.d/openstack.cnf 13 | echo 'bind-address = AAA.BBB.CCC.DDD' >> /etc/my.cnf.d/openstack.cnf 14 | echo "default-storage-engine = innodb" >> /etc/my.cnf.d/openstack.cnf 15 | echo "innodb_file_per_table" >> /etc/my.cnf.d/openstack.cnf 16 | echo "collation-server = utf8_general_ci" >> /etc/my.cnf.d/openstack.cnf 17 | echo "init-connect = 'SET NAMES utf8'" >> /etc/my.cnf.d/openstack.cnf 18 | echo "character-set-server = utf8" >> /etc/my.cnf.d/openstack.cnf 19 | 20 | systemctl enable mariadb.service 21 | systemctl start mariadb.service 22 | 23 | # This is to avoid prompting for password 24 | mysql_secure_installation <> /etc/security/limits.conf 36 | ulimit -H -n 65536 37 | ulimit -S -n 65536 38 | 39 | yum install -y rabbitmq-server 40 | systemctl enable rabbitmq-server.service 41 | systemctl start rabbitmq-server.service 42 | 43 | rabbitmqctl add_user openstack RABBIT_PASS 44 | rabbitmqctl set_permissions openstack ".*" ".*" ".*" 45 | 46 | iptables -A IN_public_allow -p tcp -m tcp \ 47 | --dport 5672 -m conntrack --ctstate NEW -j ACCEPT 48 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/in_provision_conf: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | yum -y install chrony 5 | sed -i '/^server [0-9].rhel.*/d' /etc/chrony.conf 6 | echo 'server 172.28.168.170 offline minpoll 8' >> /etc/chrony.conf 7 | yum install -y https://rdoproject.org/repos/rdo-release.rpm 8 | yum upgrade -y 9 | yum install -y openstack-selinux 10 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/out_docker: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | yum remove -y docker-engine 5 | rm -f /etc/yum.repos.d/docker.repo 6 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/out_docker_glance: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/out_flavor_and_image: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/out_fpga_db: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/out_fpga_exec: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | rm -f /usr/bin/fpga-cli 5 | rm -fr fpga-nova 6 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/out_fpga_files: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | rm -fr fpga-nova 4 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/out_glance: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | systemctl stop openstack-glance-registry openstack-glance-api 5 | echo 'DROP DATABASE glance;' | mysql -u root -proot 6 | yum remove -y openstack-glance openstack-python-glanceclient \ 7 | python-glanceclient 8 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/out_horizon: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | systemctl stop httpd.service 5 | yum remove -y openstack-dashboard 6 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/out_keystone: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | systemctl stop httpd 5 | 6 | echo 'DROP DATABASE keystone;' | mysql -u root -proot 7 | yum remove -y openstack-keystone httpd mod_wsgi 8 | 9 | rm admin-openrc.sh 10 | rm demo-openrc.sh 11 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/out_nova: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | systemctl stop openstack-nova-api openstack-nova-scheduler openstack-nova-conductor 5 | 6 | echo 'DROP DATABASE nova;' | mysql -u root -proot 7 | echo 'DROP DATABASE nova_api;' | mysql -u root -proot 8 | 9 | yum remove -y openstack-nova-api openstack-nova-conductor \ 10 | openstack-nova-scheduler python-novaclient 11 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/out_nova_compute: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | systemctl stop openstack-nova-compute openstack-nova-network 5 | yum remove -y openstack-nova-compute openstack-nova-network 6 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/out_nova_docker: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | systemctl stop openstack-nova-compute 5 | rm -fr nova-docker 6 | pip uninstall nova-docker -y 7 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/out_nova_docker_patches: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | systemctl stop openstack-nova-compute 5 | rm -fr fpga-nova 6 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/out_nova_scheduler_filter: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/out_openstackclient_db_mq: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | systemctl stop mariadb 5 | systemctl stop rabbitmq-server 6 | sed -i '/rabbitmq - nofile 65536/d' /etc/security/limits.conf 7 | 8 | yum remove -y python-openstackclient mariadb mariadb-server \ 9 | python2-PyMySQL mariadb-common mariadb-config mariadb-errmsg \ 10 | mariadb-lib rabbitmq-server 11 | 12 | rm -rf /var/lib/mysql 13 | -------------------------------------------------------------------------------- /build_scripts/redhat-72/newton/out_provision_conf: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | yum remove -y openstack-selinux chrony python-openstackclient 5 | 6 | rpm -qa | grep rdo | xargs rpm -e 7 | yum update -y 8 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/in_docker: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D 4 | apt-key list |grep -iq docker 5 | while [[ $? != 0 ]]; do 6 | echo "*** Error getting the key, retrying…" 7 | apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D 8 | apt-key list |grep -iq docker 9 | done 10 | 11 | echo 'deb https://apt.dockerproject.org/repo ubuntu-trusty main' >> /etc/apt/sources.list 12 | apt-get update 13 | apt-get -y --force-yes purge lxc-docker 14 | apt-get -y --force-yes install docker-engine 15 | 16 | docker pull busybox 17 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/in_docker_glance: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cat /etc/glance/glance-api.conf | awk '/\[DEFAULT\]/ { print; print "container_formats = ami,ari,aki,bare,ovf,ova,docker"; next }1' > glance-api.conf 4 | mv glance-api.conf /etc/glance/glance-api.conf 5 | service glance-api restart 6 | service glance-registry restart 7 | 8 | . admin-openrc.sh 9 | 10 | docker pull busybox 11 | docker save -o busyimg busybox 12 | openstack image create --file busyimg --public --container-format docker \ 13 | --disk-format raw --id 4ba11f84-9b8d-4bcd-8f28-74f8cedcb1dc busybox 14 | rm busyimg 15 | IMAGE_ID=$(openstack image list | grep busybox | awk '{print $2}') 16 | if [ "${IMAGE_ID}x" == "x" ]; then 17 | echo "Cannot find busybox image" 18 | exit 1 19 | fi 20 | glance image-update --property os_command_line='/bin/sleep 100d' $IMAGE_ID 21 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/in_flavor_and_image: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . admin-openrc.sh 4 | 5 | sed -ie 's/container_formats = .*/container_formats = ami,ari,aki,bare,ovf,ova,docker,fpga/' /etc/glance/glance-api.conf 6 | service glance-api restart 7 | service glance-registry restart 8 | 9 | sleep 10 10 | 11 | echo "DEADCODE" > fpga_image 12 | glance image-create --id dd834aa4-f950-40e6-8c23-9dab7f3f0138 --file fpga_image --name fpga_image --disk-format raw --container-format fpga 13 | rm fpga_image 14 | nova flavor-create fpga.tiny 6 512 1 1 15 | nova flavor-key fpga.tiny set "hw:fpga_ip_id"="dd834aa4-f950-40e6-8c23-9dab7f3f0138" 16 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/in_fpga_db: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . admin-openrc.sh 4 | su -s /bin/sh -c "nova-manage db sync" nova 5 | 6 | service nova-api restart 7 | service nova-scheduler restart 8 | service nova-conductor restart 9 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/in_fpga_exec: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ ! -d fpga-nova ]; then 4 | git clone FPGA-NOVA-REPO 5 | if [ $? != 0 ]; then 6 | echo Failure in getting fpga-nova repository 7 | exit 8 | fi 9 | fi 10 | 11 | fpga_exec="/usr/bin/fpga-cli" 12 | 13 | echo "fpga_access = True" >> /etc/nova/nova-compute.conf 14 | echo "fpga_exec = ${fpga_exec##*/}" >> /etc/nova/nova-compute.conf 15 | 16 | cp -a fpga-nova/bin/fpga-cli.py $fpga_exec 17 | 18 | echo "${fpga_exec##*/}: CommandFilter, ${fpga_exec##*/}, root" >> /etc/nova/rootwrap.d/compute.filters 19 | echo "${fpga_exec##*/}: CommandFilter, ${fpga_exec##*/}, root" >> /etc/nova/rootwrap.d/network.filters 20 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/in_fpga_files: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ ! -d fpga-nova ]; then 4 | git clone FPGA-NOVA-REPO 5 | if [ $? != 0 ]; then 6 | echo Failure in getting fpga-nova repository 7 | exit 8 | fi 9 | fi 10 | 11 | current=$(dpkg -l |grep -w python-nova | sed -e "s/ii\s\+python-nova\s\+2:\([0-9.]\+\).*/\1/g") 12 | [[ ! -e "fpga-nova/patches/ubuntu_14.04-nova-${current}.patch" ]] && echo "No patch for version ${current}!" && exit 1 13 | fpga_dir=$(pwd) 14 | cd /usr/lib/python2.7/dist-packages 15 | patch -Np1 -i "${fpga_dir}/fpga-nova/patches/ubuntu_14.04-nova-${current}.patch" 16 | cd - 17 | service nova-compute restart 18 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/in_glance: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo 'CREATE DATABASE glance;' | mysql -u root -proot 4 | echo "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'GLANCE_DBPASS';" |mysql -u root -proot 5 | echo "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'GLANCE_DBPASS';" |mysql -u root -proot 6 | 7 | . admin-openrc.sh 8 | 9 | openstack user create --domain default --password GLANCE_PASS glance 10 | openstack role add --project service --user glance admin 11 | openstack service create --name glance \ 12 | --description "OpenStack Image service" image 13 | 14 | openstack endpoint create --region RegionOne \ 15 | image public http://CONTROLLER_HOSTNAME:9292 16 | openstack endpoint create --region RegionOne \ 17 | image internal http://CONTROLLER_HOSTNAME:9292 18 | openstack endpoint create --region RegionOne \ 19 | image admin http://CONTROLLER_HOSTNAME:9292 20 | 21 | apt-get -y --force-yes install glance python-glanceclient 22 | 23 | cat << EOF > /etc/glance/glance-api.conf 24 | [DEFAULT] 25 | notification_driver = noop 26 | verbose = True 27 | 28 | [database] 29 | connection = mysql+pymysql://glance:GLANCE_DBPASS@CONTROLLER_HOSTNAME/glance 30 | backend = sqlalchemy 31 | 32 | [keystone_authtoken] 33 | auth_uri = http://CONTROLLER_HOSTNAME:5000 34 | auth_url = http://CONTROLLER_HOSTNAME:35357 35 | auth_plugin = password 36 | project_domain_id = default 37 | user_domain_id = default 38 | project_name = service 39 | username = glance 40 | password = GLANCE_PASS 41 | 42 | [paste_deploy] 43 | flavor = keystone 44 | 45 | [glance_store] 46 | default_store = file 47 | filesystem_store_datadir = /var/lib/glance/images/ 48 | EOF 49 | 50 | cat << EOF > /etc/glance/glance-registry.conf 51 | [DEFAULT] 52 | notification_driver = noop 53 | verbose = True 54 | backend = sqlalchemy 55 | 56 | [database] 57 | connection = mysql+pymysql://glance:GLANCE_DBPASS@CONTROLLER_HOSTNAME/glance 58 | 59 | [keystone_authtoken] 60 | auth_uri = http://CONTROLLER_HOSTNAME:5000 61 | auth_url = http://CONTROLLER_HOSTNAME:35357 62 | auth_plugin = password 63 | project_domain_id = default 64 | user_domain_id = default 65 | project_name = service 66 | username = glance 67 | password = GLANCE_PASS 68 | 69 | [paste_deploy] 70 | flavor = keystone 71 | EOF 72 | 73 | su -s /bin/sh -c "glance-manage db_sync" glance 74 | 75 | service glance-registry restart 76 | service glance-api restart 77 | 78 | rm -f /var/lib/glance/glance.sqlite 79 | 80 | echo "export OS_IMAGE_API_VERSION=2" | tee -a admin-openrc.sh demo-openrc.sh 81 | source admin-openrc.sh 82 | wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img 83 | glance image-create --name "cirros" --file cirros-0.3.4-x86_64-disk.img \ 84 | --disk-format qcow2 --container-format bare --visibility public --progress \ 85 | --id 48644096-5555-4835-99a0-59089dd7da1b 86 | glance image-list 87 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/in_horizon: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | apt-get -y --force-yes install nova-consoleauth nova-novncproxy nova-cert \ 4 | openstack-dashboard 5 | 6 | sed -i 's/OPENSTACK_HOST = "127.0.0.1"/OPENSTACK_HOST = "CONTROLLER_HOSTNAME"/g' /etc/openstack-dashboard/local_settings.py 7 | sed -i "s/'enable_router': True/'enable_router': False/g" /etc/openstack-dashboard/local_settings.py 8 | sed -i "s/'enable_quotas': True/'enable_quotas': False/g" /etc/openstack-dashboard/local_settings.py 9 | sed -i "s/'enable_ipv6': True/'enable_ipv6': False/g" /etc/openstack-dashboard/local_settings.py 10 | sed -i "s/'enable_lb': True/'enable_lb': False/g" /etc/openstack-dashboard/local_settings.py 11 | sed -i "s/'enable_firewall': True/'enable_firewall': False/g" /etc/openstack-dashboard/local_settings.py 12 | sed -i "s/'enable_vpn': True/'enable_vpn': False/g" /etc/openstack-dashboard/local_settings.py 13 | sed -i "s/''enable_fip_topology_check'': True/''enable_fip_topology_check'': False/g" /etc/openstack-dashboard/local_settings.py 14 | 15 | # Remove Ubuntu branded theme 16 | apt-get -y --force-yes purge openstack-dashboard-ubuntu-theme 17 | 18 | service nova-cert restart 19 | service nova-consoleauth restart 20 | service nova-novncproxy restart 21 | service apache2 reload 22 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/in_keystone: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo 'CREATE DATABASE keystone;' | mysql -u root -proot 4 | echo "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'KEYSTONE_DBPASS';" |mysql -u root -proot 5 | echo "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'KEYSTONE_DBPASS';" |mysql -u root -proot 6 | echo "manual" > /etc/init/keystone.override 7 | 8 | apt-get -y --force-yes install keystone apache2 libapache2-mod-wsgi memcached python-memcache 9 | 10 | cat << EOF > /etc/keystone/keystone.conf 11 | [DEFAULT] 12 | admin_token = ADMIN_TOKEN 13 | log_dir = /var/log/keystone 14 | verbose = True 15 | 16 | [database] 17 | connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@CONTROLLER_HOSTNAME/keystone 18 | 19 | [memcache] 20 | servers = localhost:11211 21 | 22 | [revoke] 23 | driver = sql 24 | 25 | [token] 26 | provider = uuid 27 | driver = memcache 28 | 29 | [extra_headers] 30 | Distribution = Ubuntu 31 | EOF 32 | 33 | echo ServerName CONTROLLER_HOSTNAME >> /etc/apache2/apache2.conf 34 | 35 | cat << EOF > /etc/apache2/sites-available/wsgi-keystone.conf 36 | Listen 5000 37 | Listen 35357 38 | 39 | 40 | WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP} 41 | WSGIProcessGroup keystone-public 42 | WSGIScriptAlias / /usr/bin/keystone-wsgi-public 43 | WSGIApplicationGroup %{GLOBAL} 44 | WSGIPassAuthorization On 45 | = 2.4> 46 | ErrorLogFormat "%{cu}t %M" 47 | 48 | ErrorLog /var/log/apache2/keystone.log 49 | CustomLog /var/log/apache2/keystone_access.log combined 50 | 51 | 52 | = 2.4> 53 | Require all granted 54 | 55 | 56 | Order allow,deny 57 | Allow from all 58 | 59 | 60 | 61 | 62 | 63 | WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP} 64 | WSGIProcessGroup keystone-admin 65 | WSGIScriptAlias / /usr/bin/keystone-wsgi-admin 66 | WSGIApplicationGroup %{GLOBAL} 67 | WSGIPassAuthorization On 68 | = 2.4> 69 | ErrorLogFormat "%{cu}t %M" 70 | 71 | ErrorLog /var/log/apache2/keystone.log 72 | CustomLog /var/log/apache2/keystone_access.log combined 73 | 74 | 75 | = 2.4> 76 | Require all granted 77 | 78 | 79 | Order allow,deny 80 | Allow from all 81 | 82 | 83 | 84 | EOF 85 | ln -s /etc/apache2/sites-available/wsgi-keystone.conf \ 86 | /etc/apache2/sites-enabled 87 | service apache2 restart 88 | rm -f /var/lib/keystone/keystone.db 89 | # for some reason, keystone does rollback(?) migrations, or make them on the 90 | # sqlite db. Let's do that again 91 | su -s /bin/sh -c "keystone-manage db_sync" keystone 92 | 93 | export OS_TOKEN=ADMIN_TOKEN 94 | export OS_URL=http://CONTROLLER_HOSTNAME:35357/v3 95 | export OS_IDENTITY_API_VERSION=3 96 | 97 | openstack service create \ 98 | --name keystone --description "OpenStack Identity" identity 99 | 100 | # endpointy: 101 | openstack endpoint create --region RegionOne \ 102 | identity public http://CONTROLLER_HOSTNAME:5000/v2.0 103 | openstack endpoint create --region RegionOne \ 104 | identity internal http://CONTROLLER_HOSTNAME:5000/v2.0 105 | openstack endpoint create --region RegionOne \ 106 | identity admin http://CONTROLLER_HOSTNAME:35357/v2.0 107 | 108 | # Create projects, users, and roles 109 | openstack project create --domain default --description "Admin Project" admin 110 | openstack user create --domain default --password admin admin 111 | openstack role create admin 112 | openstack role add --project admin --user admin admin 113 | openstack project create --domain default \ 114 | --description "Service Project" service 115 | openstack project create --domain default --description "Demo Project" demo 116 | openstack user create --domain default --password demo demo 117 | openstack role create user 118 | openstack role add --project demo --user demo user 119 | 120 | # create openrc scripts 121 | cat << EOF > admin-openrc.sh 122 | export OS_PROJECT_DOMAIN_ID=ADMIN__OS_PROJECT_DOMAIN_NAME 123 | export OS_USER_DOMAIN_ID=ADMIN__OS_USER_DOMAIN_NAME 124 | export OS_PROJECT_NAME=ADMIN__OS_PROJECT_NAME 125 | export OS_TENANT_NAME=ADMIN__OS_TENANT_NAME 126 | export OS_USERNAME=ADMIN__OS_USERNAME 127 | export OS_PASSWORD=ADMIN__OS_PASSWORD 128 | export OS_AUTH_URL=http://CONTROLLER_HOSTNAME:35357/v3 129 | export OS_IDENTITY_API_VERSION=ADMIN__OS_IDENTITY_API_VERSION 130 | EOF 131 | 132 | cat << EOF > demo-openrc.sh 133 | export OS_PROJECT_DOMAIN_ID=DEMO__OS_PROJECT_DOMAIN_NAME 134 | export OS_USER_DOMAIN_ID=DEMO__OS_USER_DOMAIN_NAME 135 | export OS_PROJECT_NAME=DEMO__OS_PROJECT_NAME 136 | export OS_TENANT_NAME=DEMO__OS_TENANT_NAME 137 | export OS_USERNAME=DEMO__OS_USERNAME 138 | export OS_PASSWORD=DEMO__OS_PASSWORD 139 | export OS_AUTH_URL=http://CONTROLLER_HOSTNAME:5000/v3 140 | export OS_IDENTITY_API_VERSION=DEMO__OS_IDENTITY_API_VERSION 141 | EOF 142 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/in_nova: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo 'CREATE DATABASE nova;' | mysql -u root -proot 4 | echo "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';" |mysql -u root -proot 5 | echo "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';" |mysql -u root -proot 6 | 7 | . admin-openrc.sh 8 | 9 | openstack user create --domain default --password NOVA_PASS nova 10 | openstack role add --project service --user nova admin 11 | openstack service create --name nova --description "OpenStack Compute" compute 12 | 13 | openstack endpoint create --region RegionOne \ 14 | compute public http://CONTROLLER_HOSTNAME:8774/v2/%\(tenant_id\)s 15 | openstack endpoint create --region RegionOne \ 16 | compute internal http://CONTROLLER_HOSTNAME:8774/v2/%\(tenant_id\)s 17 | openstack endpoint create --region RegionOne \ 18 | compute admin http://CONTROLLER_HOSTNAME:8774/v2/%\(tenant_id\)s 19 | 20 | apt-get -y --force-yes install nova-api nova-conductor nova-scheduler python-novaclient 21 | 22 | cat << EOF > /etc/nova/nova.conf 23 | [DEFAULT] 24 | dhcpbridge_flagfile=/etc/nova/nova.conf 25 | dhcpbridge=/usr/bin/nova-dhcpbridge 26 | logdir=/var/log/nova 27 | state_path=/var/lib/nova 28 | lock_path=/var/lock/nova 29 | force_dhcp_release=True 30 | libvirt_use_virtio_for_bridges=True 31 | verbose=True 32 | ec2_private_dns_show_ip=True 33 | api_paste_config=/etc/nova/api-paste.ini 34 | enabled_apis=osapi_compute,metadata 35 | 36 | auth_strategy = keystone 37 | my_ip = AAA.BBB.CCC.DDD 38 | rpc_backend = rabbit 39 | verbose = True 40 | 41 | network_api_class = nova.network.api.API 42 | security_group_api = nova 43 | 44 | [database] 45 | connection = mysql+pymysql://nova:NOVA_DBPASS@CONTROLLER_HOSTNAME/nova 46 | 47 | [oslo_messaging_rabbit] 48 | rabbit_host = CONTROLLER_HOSTNAME 49 | rabbit_userid = openstack 50 | rabbit_password = RABBIT_PASS 51 | 52 | [keystone_authtoken] 53 | auth_uri = http://CONTROLLER_HOSTNAME:5000 54 | auth_url = http://CONTROLLER_HOSTNAME:35357 55 | auth_plugin = password 56 | project_domain_id = default 57 | user_domain_id = default 58 | project_name = service 59 | username = nova 60 | password = NOVA_PASS 61 | 62 | [oslo_concurrency] 63 | lock_path = /var/lib/nova/tmp 64 | 65 | [glance] 66 | host = CONTROLLER_HOSTNAME 67 | EOF 68 | 69 | rm -f /var/lib/nova/nova.sqlite 70 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/in_nova_compute: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | apt-get -y --force-yes install nova-compute nova-network sysfsutils 4 | 5 | cat << EOF > /etc/nova/nova.conf 6 | [DEFAULT] 7 | dhcpbridge_flagfile=/etc/nova/nova.conf 8 | dhcpbridge=/usr/bin/nova-dhcpbridge 9 | logdir=/var/log/nova 10 | state_path=/var/lib/nova 11 | lock_path=/var/lock/nova 12 | force_dhcp_release=True 13 | libvirt_use_virtio_for_bridges=True 14 | verbose=True 15 | ec2_private_dns_show_ip=True 16 | api_paste_config=/etc/nova/api-paste.ini 17 | enabled_apis=osapi_compute,metadata 18 | 19 | auth_strategy = keystone 20 | my_ip = AAA.BBB.CCC.DDD 21 | rpc_backend = rabbit 22 | verbose = True 23 | 24 | # network conf 25 | network_api_class = nova.network.api.API 26 | security_group_api = nova 27 | #firewall_driver = nova.virt.libvirt.firewall.IptablesFirewallDriver 28 | network_manager = nova.network.manager.FlatDHCPManager 29 | network_size = 254 30 | allow_same_net_traffic = False 31 | multi_host = False 32 | multi_node = False 33 | send_arp_for_ha = False 34 | share_dhcp_address = True 35 | force_dhcp_release = True 36 | #flat_network_bridge = br100 37 | #flat_interface = eth0 38 | #public_interface = eth0 39 | 40 | [database] 41 | connection = mysql+pymysql://nova:NOVA_DBPASS@CONTROLLER_HOSTNAME/nova 42 | 43 | [oslo_messaging_rabbit] 44 | rabbit_host = CONTROLLER_HOSTNAME 45 | rabbit_userid = openstack 46 | rabbit_password = RABBIT_PASS 47 | 48 | [keystone_authtoken] 49 | auth_uri = http://CONTROLLER_HOSTNAME:5000 50 | auth_url = http://CONTROLLER_HOSTNAME:35357 51 | auth_plugin = password 52 | project_domain_id = default 53 | user_domain_id = default 54 | project_name = service 55 | username = nova 56 | password = NOVA_PASS 57 | 58 | [oslo_concurrency] 59 | lock_path = /var/lib/nova/tmp 60 | 61 | [glance] 62 | host = CONTROLLER_HOSTNAME 63 | EOF 64 | 65 | service nova-compute restart 66 | 67 | rm -f /var/lib/nova/nova.sqlite 68 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/in_nova_docker_patches: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ ! -d fpga-nova ]; then 4 | git clone FPGA-NOVA-REPO 5 | if [ $? != 0 ]; then 6 | echo Failure in getting fpga-nova repository 7 | exit 8 | fi 9 | fi 10 | 11 | git clone https://github.com/openstack/nova-docker -b stable/liberty 12 | 13 | cd nova-docker 14 | patch -Np1 -i ../fpga-nova/patches/nova_docker_liberty.patch 15 | pip install . 16 | cd - 17 | 18 | echo "[DEFAULT]" > /etc/nova/nova-compute.conf 19 | echo "compute_driver=novadocker.virt.docker.DockerDriver" >> /etc/nova/nova-compute.conf 20 | echo "fpga_simulation_mode = False" >> /etc/nova/nova-compute.conf 21 | 22 | usermod -a -G docker nova 23 | 24 | service nova-network restart 25 | service nova-compute restart 26 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/in_nova_scheduler_filter: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cat /etc/nova/nova.conf | awk '/\[DEFAULT\]/ { print; print "scheduler_default_filters = RamFilter,ComputeFilter,AvailabilityZoneFilter,ImagePropertiesFilter,FpgaFilter"; next }1' > nova.conf 4 | mv nova.conf /etc/nova/nova.conf 5 | cat /etc/nova/nova.conf | awk '/\[DEFAULT\]/ { print; print "scheduler_available_filters = nova.scheduler.filters.fpga_filter.FpgaFilter"; next }1' > nova.conf 6 | mv nova.conf /etc/nova/nova.conf 7 | cat /etc/nova/nova.conf | awk '/\[DEFAULT\]/ { print; print "scheduler_available_filters = nova.scheduler.filters.all_filters"; next }1' > nova.conf 8 | mv nova.conf /etc/nova/nova.conf 9 | 10 | service nova-api restart 11 | service nova-cert restart 12 | service nova-consoleauth restart 13 | service nova-scheduler restart 14 | service nova-conductor restart 15 | service nova-novncproxy restart 16 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/in_openstackclient_db_mq: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | apt-get -y --force-yes install python-openstackclient 4 | 5 | # assuming password 'root' 6 | sudo debconf-set-selections <<< 'mariadb-server-10.0 mysql-server/root_password password root' 7 | sudo debconf-set-selections <<< 'mariadb-server-10.0 mysql-server/root_password_again password root' 8 | apt-get -y --force-yes install mariadb-server python-pymysql 9 | echo '[mysqld]' > /etc/mysql/conf.d/mysqld_openstack.cnf 10 | echo 'bind-address = AAA.BBB.CCC.DDD' >> /etc/mysql/conf.d/mysqld_openstack.cnf 11 | echo "default-storage-engine = innodb" >> /etc/mysql/conf.d/mysqld_openstack.cnf 12 | echo "innodb_file_per_table" >> /etc/mysql/conf.d/mysqld_openstack.cnf 13 | echo "collation-server = utf8_general_ci" >> /etc/mysql/conf.d/mysqld_openstack.cnf 14 | echo "init-connect = 'SET NAMES utf8'" >> /etc/mysql/conf.d/mysqld_openstack.cnf 15 | echo "character-set-server = utf8" >> /etc/mysql/conf.d/mysqld_openstack.cnf 16 | service mysql restart 17 | 18 | echo 'rabbitmq - nofile 65536' >> /etc/security/limits.conf 19 | ulimit -H -n 65536 20 | ulimit -S -n 65536 21 | 22 | apt-get -y --force-yes install rabbitmq-server 23 | rabbitmqctl add_user openstack RABBIT_PASS 24 | rabbitmqctl set_permissions openstack ".*" ".*" ".*" 25 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/in_provision_conf: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | apt-get -y --force-yes install chrony 4 | sed -i '/^server [0-9].debian.*/d' /etc/chrony/chrony.conf 5 | echo 'server 172.28.168.170 offline minpoll 8' >> /etc/chrony/chrony.conf 6 | apt-get -y --force-yes install software-properties-common 7 | add-apt-repository -y cloud-archive:liberty 8 | apt-get update && apt-get -y --force-yes dist-upgrade 9 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/out_docker: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | apt-get -y --force-yes purge docker-engine 4 | sed -i '/deb https:\/\/apt.dockerproject.org\/repo ubuntu-trusty main/d' /etc/apt/sources.list 5 | key=$(apt-key list |grep -B 1 Docker |grep pub |sed 's/pub\s\+[A-Za-z0-9]\+\/\([a-zA-Z0-9]\+\)\s.*/\1/g') 6 | apt-key del $key 7 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/out_docker_glance: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/out_flavor_and_image: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/out_fpga_db: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/out_fpga_exec: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm /usr/bin/fpga-cli 4 | rm -fr fpga-nova 5 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/out_fpga_files: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | rm -fr fpga-nova 3 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/out_glance: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | service glance-registry stop 4 | service glance-api stop 5 | echo 'DROP DATABASE glance;' | mysql -u root -proot 6 | apt-get -y --force-yes purge glance python-glanceclient 7 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/out_horizon: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | service apache2 stop 4 | service nova-cert stop 5 | service nova-consoleauth stop 6 | service nova-novncproxy stop 7 | apt-get -y --force-yes purge nova-cert nova-consoleauth nova-novncproxy openstack-dashboard 8 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/out_keystone: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | service apache2 stop 4 | service keystone stop 5 | 6 | echo 'DROP DATABASE keystone;' | mysql -u root -proot 7 | 8 | apt-get -y --force-yes purge keystone apache2 libapache2-mod-wsgi memcached python-memcache 9 | 10 | rm admin-openrc.sh 11 | rm demo-openrc.sh 12 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/out_nova: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | service nova-api stop 4 | service nova-scheduler stop 5 | service nova-conductor stop 6 | 7 | echo 'DROP DATABASE nova;' | mysql -u root -proot 8 | 9 | apt-get -y --force-yes purge nova-api nova-conductor nova-scheduler python-novaclient 10 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/out_nova_compute: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | service nova-compute stop 4 | service nova-network stop 5 | 6 | apt-get -y --force-yes purge nova-compute nova-network sysfsutils 7 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/out_nova_docker: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | service nova-compute stop 4 | rm -fr nova-docker 5 | pip uninstall nova-docker -y 6 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/out_nova_docker_patches: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | service nova-compute stop 4 | rm -fr fpga-nova 5 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/out_nova_scheduler_filter: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/out_openstackclient_db_mq: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | service mysql stop 4 | service rabbitmq-server stop 5 | sed -i '/rabbitmq - nofile 65536/d' /etc/security/limits.conf 6 | 7 | apt-get -y --force-yes install python-openstackclient mariadb-server \ 8 | python-pymysql mariadb-server-5.5 mariadb-client-5.5 \ 9 | mariadb-server-core-5.5 mariadb-common rabbitmq-server \ 10 | libmariadbclient18:amd64 mariadb-client-core-5.5 mariadb-server 11 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/liberty/out_provision_conf: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | apt-get -y --force-yes purge chrony 4 | apt-get -y --force-yes purge software-properties-common 5 | add-apt-repository -y --remove cloud-archive:liberty 6 | apt-get update && apt-get -y --force-yes dist-upgrade 7 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/in_docker: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D 5 | apt-key list |grep -iq docker 6 | while [[ $? != 0 ]]; do 7 | echo "*** Error getting the key, retrying…" 8 | apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D 9 | apt-key list |grep -iq docker 10 | done 11 | 12 | echo 'deb https://apt.dockerproject.org/repo ubuntu-trusty main' >> /etc/apt/sources.list 13 | apt-get update 14 | apt-get -y --force-yes purge lxc-docker 15 | apt-get -y --force-yes install docker-engine 16 | 17 | docker pull busybox 18 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/in_docker_glance: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | cat /etc/glance/glance-api.conf | awk '/\[DEFAULT\]/ { print; print "container_formats = ami,ari,aki,bare,ovf,ova,docker"; next }1' > glance-api.conf 5 | mv glance-api.conf /etc/glance/glance-api.conf 6 | service glance-api restart 7 | service glance-registry restart 8 | 9 | . admin-openrc.sh 10 | 11 | docker pull busybox 12 | docker save -o busyimg busybox 13 | openstack image create --file busyimg --public --container-format docker \ 14 | --disk-format raw --id 4ba11f84-9b8d-4bcd-8f28-74f8cedcb1dc busybox 15 | rm busyimg 16 | IMAGE_ID=$(openstack image list | grep busybox | awk '{print $2}') 17 | if [ "${IMAGE_ID}x" == "x" ]; then 18 | echo "Cannot find busybox image" 19 | exit 1 20 | fi 21 | glance image-update --property os_command_line='/bin/sleep 100d' $IMAGE_ID 22 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/in_flavor_and_image: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | . admin-openrc.sh 5 | 6 | sed -ie 's/container_formats = .*/container_formats = ami,ari,aki,bare,ovf,ova,docker,fpga/' /etc/glance/glance-api.conf 7 | service glance-api restart 8 | service glance-registry restart 9 | 10 | sleep 10 11 | 12 | echo "DEADCODE" > fpga_image 13 | glance image-create --id dd834aa4-f950-40e6-8c23-9dab7f3f0138 --file fpga_image --name fpga_image --disk-format raw --container-format fpga 14 | rm fpga_image 15 | nova flavor-create fpga.tiny 6 512 1 1 16 | nova flavor-key fpga.tiny set "hw:fpga_ip_id"="dd834aa4-f950-40e6-8c23-9dab7f3f0138" 17 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/in_fpga_db: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | . admin-openrc.sh 5 | 6 | su -s /bin/sh -c "nova-manage api_db sync" nova 7 | su -s /bin/sh -c "nova-manage db sync" nova 8 | 9 | service nova-api restart 10 | service nova-scheduler restart 11 | service nova-conductor restart 12 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/in_fpga_exec: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | if [ ! -d fpga-nova ]; then 5 | git clone FPGA-NOVA-REPO 6 | if [ $? != 0 ]; then 7 | echo Failure in getting fpga-nova repository 8 | exit 9 | fi 10 | fi 11 | 12 | fpga_exec="/usr/bin/fpga-cli" 13 | 14 | echo "fpga_access = True" >> /etc/nova/nova-compute.conf 15 | echo "fpga_exec = ${fpga_exec##*/}" >> /etc/nova/nova-compute.conf 16 | 17 | cp -a fpga-nova/bin/fpga-cli.py $fpga_exec 18 | 19 | echo "${fpga_exec##*/}: CommandFilter, ${fpga_exec##*/}, root" >> /etc/nova/rootwrap.d/compute.filters 20 | echo "${fpga_exec##*/}: CommandFilter, ${fpga_exec##*/}, root" >> /etc/nova/rootwrap.d/network.filters 21 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/in_fpga_files: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | if [ ! -d fpga-nova ]; then 5 | git clone FPGA-NOVA-REPO 6 | if [ $? != 0 ]; then 7 | echo Failure in getting fpga-nova repository 8 | exit 9 | fi 10 | fi 11 | 12 | current=$(dpkg -l |grep -w python-nova | sed -e "s/ii\s\+python-nova\s\+2:\([0-9.]\+\).*/\1/g") 13 | [[ ! -e "fpga-nova/patches/ubuntu_14.04-nova-${current}.patch" ]] && echo "No patch for version ${current}!" && exit 1 14 | fpga_dir=$(pwd) 15 | cd /usr/lib/python2.7/dist-packages 16 | patch -Np1 -i "${fpga_dir}/fpga-nova/patches/ubuntu_14.04-nova-${current}.patch" 17 | cd - 18 | service nova-compute restart 19 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/in_glance: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | echo 'CREATE DATABASE glance;' | mysql -u root -proot 5 | echo "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'GLANCE_DBPASS';" |mysql -u root -proot 6 | echo "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'GLANCE_DBPASS';" |mysql -u root -proot 7 | 8 | . admin-openrc.sh 9 | 10 | openstack user create --domain default --password GLANCE_PASS glance 11 | openstack role add --project service --user glance admin 12 | openstack service create --name glance \ 13 | --description "OpenStack Image service" image 14 | 15 | openstack endpoint create --region RegionOne \ 16 | image public http://CONTROLLER_HOSTNAME:9292 17 | openstack endpoint create --region RegionOne \ 18 | image internal http://CONTROLLER_HOSTNAME:9292 19 | openstack endpoint create --region RegionOne \ 20 | image admin http://CONTROLLER_HOSTNAME:9292 21 | 22 | apt-get -y --force-yes install glance python-glanceclient 23 | 24 | cat << EOF > /etc/glance/glance-api.conf 25 | [DEFAULT] 26 | verbose = True 27 | 28 | [database] 29 | connection = mysql+pymysql://glance:GLANCE_DBPASS@CONTROLLER_HOSTNAME/glance 30 | backend = sqlalchemy 31 | 32 | [keystone_authtoken] 33 | auth_uri = http://CONTROLLER_HOSTNAME:5000 34 | auth_url = http://CONTROLLER_HOSTNAME:35357 35 | auth_type = password 36 | project_domain_name = default 37 | user_domain_name = default 38 | project_name = service 39 | username = glance 40 | password = GLANCE_PASS 41 | 42 | [paste_deploy] 43 | flavor = keystone 44 | 45 | [glance_store] 46 | stores = file,http 47 | default_store = file 48 | filesystem_store_datadir = /var/lib/glance/images/ 49 | EOF 50 | 51 | cat << EOF > /etc/glance/glance-registry.conf 52 | [DEFAULT] 53 | verbose = True 54 | backend = sqlalchemy 55 | 56 | [database] 57 | connection = mysql+pymysql://glance:GLANCE_DBPASS@CONTROLLER_HOSTNAME/glance 58 | 59 | [keystone_authtoken] 60 | auth_uri = http://CONTROLLER_HOSTNAME:5000 61 | auth_url = http://CONTROLLER_HOSTNAME:35357 62 | auth_type = password 63 | project_domain_name = default 64 | user_domain_name = default 65 | project_name = service 66 | username = glance 67 | password = GLANCE_PASS 68 | 69 | [paste_deploy] 70 | flavor = keystone 71 | EOF 72 | 73 | su -s /bin/sh -c "glance-manage db_sync" glance 74 | 75 | service glance-registry restart 76 | service glance-api restart 77 | 78 | source admin-openrc.sh 79 | wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img 80 | glance image-create --name "cirros" --file cirros-0.3.4-x86_64-disk.img \ 81 | --disk-format qcow2 --container-format bare --visibility public --progress \ 82 | --id 48644096-5555-4835-99a0-59089dd7da1b 83 | glance image-list 84 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/in_horizon: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | apt-get -y --force-yes install nova-consoleauth nova-novncproxy nova-cert \ 5 | openstack-dashboard 6 | 7 | sed -i 's/OPENSTACK_HOST = "127.0.0.1"/OPENSTACK_HOST = "CONTROLLER_HOSTNAME"/g' /etc/openstack-dashboard/local_settings.py 8 | sed -i "s/'enable_router': True/'enable_router': False/g" /etc/openstack-dashboard/local_settings.py 9 | sed -i "s/'enable_quotas': True/'enable_quotas': False/g" /etc/openstack-dashboard/local_settings.py 10 | sed -i "s/'enable_ipv6': True/'enable_ipv6': False/g" /etc/openstack-dashboard/local_settings.py 11 | sed -i "s/'enable_lb': True/'enable_lb': False/g" /etc/openstack-dashboard/local_settings.py 12 | sed -i "s/'enable_firewall': True/'enable_firewall': False/g" /etc/openstack-dashboard/local_settings.py 13 | sed -i "s/'enable_vpn': True/'enable_vpn': False/g" /etc/openstack-dashboard/local_settings.py 14 | sed -i "s/''enable_fip_topology_check'': True/''enable_fip_topology_check'': False/g" /etc/openstack-dashboard/local_settings.py 15 | sed -i "s/v2.0/v3/g" /etc/openstack-dashboard/local_settings.py 16 | sed -i '4iWSGIApplicationGroup %{GLOBAL}' /etc/apache2/conf-available/openstack-dashboard.conf 17 | 18 | echo 'OPENSTACK_API_VERSIONS = {"identity": 3, "volume": 2,"compute": 2}' >> /etc/openstack-dashboard/local_settings.py 19 | 20 | # Remove Ubuntu branded theme 21 | apt-get -y --force-yes purge openstack-dashboard-ubuntu-theme 22 | 23 | service nova-cert restart 24 | service nova-consoleauth restart 25 | service nova-novncproxy restart 26 | service apache2 reload 27 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/in_keystone: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | echo 'CREATE DATABASE keystone;' | mysql -u root -proot 5 | echo "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'KEYSTONE_DBPASS';" |mysql -u root -proot 6 | echo "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'KEYSTONE_DBPASS';" |mysql -u root -proot 7 | echo "manual" > /etc/init/keystone.override 8 | 9 | apt-get -y --force-yes install keystone apache2 libapache2-mod-wsgi 10 | 11 | cat << EOF > /etc/keystone/keystone.conf 12 | [DEFAULT] 13 | admin_token = ADMIN_TOKEN 14 | log_dir = /var/log/keystone 15 | verbose = True 16 | 17 | [database] 18 | connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@CONTROLLER_HOSTNAME/keystone 19 | 20 | [token] 21 | provider = fernet 22 | 23 | [extra_headers] 24 | Distribution = Ubuntu 25 | EOF 26 | 27 | echo ServerName CONTROLLER_HOSTNAME >> /etc/apache2/apache2.conf 28 | 29 | cat << EOF > /etc/apache2/sites-available/wsgi-keystone.conf 30 | Listen 5000 31 | Listen 35357 32 | 33 | 34 | WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP} 35 | WSGIProcessGroup keystone-public 36 | WSGIScriptAlias / /usr/bin/keystone-wsgi-public 37 | WSGIApplicationGroup %{GLOBAL} 38 | WSGIPassAuthorization On 39 | ErrorLogFormat "%{cu}t %M" 40 | ErrorLog /var/log/apache2/keystone.log 41 | CustomLog /var/log/apache2/keystone_access.log combined 42 | 43 | 44 | Require all granted 45 | 46 | 47 | 48 | 49 | WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP} 50 | WSGIProcessGroup keystone-admin 51 | WSGIScriptAlias / /usr/bin/keystone-wsgi-admin 52 | WSGIApplicationGroup %{GLOBAL} 53 | WSGIPassAuthorization On 54 | ErrorLogFormat "%{cu}t %M" 55 | ErrorLog /var/log/apache2/keystone.log 56 | CustomLog /var/log/apache2/keystone_access.log combined 57 | 58 | 59 | Require all granted 60 | 61 | 62 | EOF 63 | ln -s /etc/apache2/sites-available/wsgi-keystone.conf \ 64 | /etc/apache2/sites-enabled 65 | service apache2 restart 66 | rm -f /var/lib/keystone/keystone.db 67 | # for some reason, keystone does rollback(?) migrations, or make them on the 68 | # sqlite db. Let's do that again 69 | su -s /bin/sh -c "keystone-manage db_sync" keystone 70 | 71 | keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone 72 | 73 | export OS_TOKEN=ADMIN_TOKEN 74 | export OS_URL=http://CONTROLLER_HOSTNAME:35357/v3 75 | export OS_IDENTITY_API_VERSION=3 76 | 77 | openstack service create \ 78 | --name keystone --description "OpenStack Identity" identity 79 | 80 | # endpointy: 81 | openstack endpoint create --region RegionOne \ 82 | identity public http://CONTROLLER_HOSTNAME:5000/v3 83 | openstack endpoint create --region RegionOne \ 84 | identity internal http://CONTROLLER_HOSTNAME:5000/v3 85 | openstack endpoint create --region RegionOne \ 86 | identity admin http://CONTROLLER_HOSTNAME:35357/v3 87 | 88 | # Create projects, users, and roles 89 | openstack domain create --description "Default Domain" default 90 | openstack project create --domain default --description "Admin Project" admin 91 | openstack user create --domain default --password admin admin 92 | openstack role create admin 93 | openstack role add --project admin --user admin admin 94 | openstack project create --domain default \ 95 | --description "Service Project" service 96 | openstack project create --domain default --description "Demo Project" demo 97 | openstack user create --domain default --password demo demo 98 | openstack role create user 99 | openstack role add --project demo --user demo user 100 | 101 | # create openrc scripts 102 | cat << EOF > admin-openrc.sh 103 | export OS_PROJECT_DOMAIN_NAME=ADMIN__OS_PROJECT_DOMAIN_NAME 104 | export OS_USER_DOMAIN_NAME=ADMIN__OS_USER_DOMAIN_NAME 105 | export OS_PROJECT_NAME=ADMIN__OS_PROJECT_NAME 106 | export OS_USERNAME=ADMIN__OS_USERNAME 107 | export OS_PASSWORD=ADMIN__OS_PASSWORD 108 | export OS_AUTH_URL=http://CONTROLLER_HOSTNAME:35357/v3 109 | export OS_IDENTITY_API_VERSION=ADMIN__OS_IDENTITY_API_VERSION 110 | export OS_IMAGE_API_VERSION=ADMIN__OS_IMAGE_API_VERSION 111 | EOF 112 | 113 | cat << EOF > demo-openrc.sh 114 | export OS_PROJECT_DOMAIN_NAME=DEMO__OS_PROJECT_DOMAIN_NAME 115 | export OS_USER_DOMAIN_NAME=DEMO__OS_USER_DOMAIN_NAME 116 | export OS_PROJECT_NAME=DEMO__OS_PROJECT_NAME 117 | export OS_USERNAME=DEMO__OS_USERNAME 118 | export OS_PASSWORD=DEMO__OS_PASSWORD 119 | export OS_AUTH_URL=http://CONTROLLER_HOSTNAME:5000/v3 120 | export OS_IDENTITY_API_VERSION=DEMO__OS_IDENTITY_API_VERSION 121 | export OS_IMAGE_API_VERSION=DEMO__OS_IMAGE_API_VERSION 122 | EOF 123 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/in_nova: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | 5 | echo 'CREATE DATABASE nova_api;' | mysql -u root -proot 6 | echo 'CREATE DATABASE nova;' | mysql -u root -proot 7 | echo "GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';" |mysql -u root -proot 8 | echo "GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';" |mysql -u root -proot 9 | echo "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';" |mysql -u root -proot 10 | echo "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';" |mysql -u root -proot 11 | 12 | . admin-openrc.sh 13 | 14 | openstack user create --domain default --password NOVA_PASS nova 15 | openstack role add --project service --user nova admin 16 | openstack service create --name nova --description "OpenStack Compute" compute 17 | 18 | openstack endpoint create --region RegionOne \ 19 | compute public http://CONTROLLER_HOSTNAME:8774/v2.1/%\(tenant_id\)s 20 | openstack endpoint create --region RegionOne \ 21 | compute internal http://CONTROLLER_HOSTNAME:8774/v2.1/%\(tenant_id\)s 22 | openstack endpoint create --region RegionOne \ 23 | compute admin http://CONTROLLER_HOSTNAME:8774/v2.1/%\(tenant_id\)s 24 | 25 | apt-get -y --force-yes install nova-api nova-conductor nova-scheduler python-novaclient 26 | 27 | cat << EOF > /etc/nova/nova.conf 28 | [DEFAULT] 29 | dhcpbridge_flagfile=/etc/nova/nova.conf 30 | dhcpbridge=/usr/bin/nova-dhcpbridge 31 | logdir=/var/log/nova 32 | state_path=/var/lib/nova 33 | lock_path=/var/lock/nova 34 | force_dhcp_release=True 35 | libvirt_use_virtio_for_bridges=True 36 | verbose=True 37 | ec2_private_dns_show_ip=True 38 | api_paste_config=/etc/nova/api-paste.ini 39 | enabled_apis=osapi_compute,metadata 40 | 41 | auth_strategy = keystone 42 | my_ip = AAA.BBB.CCC.DDD 43 | rpc_backend = rabbit 44 | verbose = True 45 | 46 | network_api_class = nova.network.api.API 47 | security_group_api = nova 48 | 49 | [api_database] 50 | connection = mysql+pymysql://nova:NOVA_DBPASS@CONTROLLER_HOSTNAME/nova_api 51 | 52 | [database] 53 | connection = mysql+pymysql://nova:NOVA_DBPASS@CONTROLLER_HOSTNAME/nova 54 | 55 | [oslo_messaging_rabbit] 56 | rabbit_host = CONTROLLER_HOSTNAME 57 | rabbit_userid = openstack 58 | rabbit_password = RABBIT_PASS 59 | 60 | [keystone_authtoken] 61 | auth_uri = http://CONTROLLER_HOSTNAME:5000 62 | auth_url = http://CONTROLLER_HOSTNAME:35357 63 | auth_type = password 64 | project_domain_name = default 65 | user_domain_name = default 66 | project_name = service 67 | username = nova 68 | password = NOVA_PASS 69 | 70 | [oslo_concurrency] 71 | lock_path = /var/lib/nova/tmp 72 | 73 | [glance] 74 | api_servers = http://CONTROLLER_HOSTNAME:9292 75 | EOF 76 | 77 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/in_nova_compute: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | apt-get -y --force-yes install nova-compute nova-network 5 | 6 | cat << EOF > /etc/nova/nova.conf 7 | [DEFAULT] 8 | dhcpbridge_flagfile=/etc/nova/nova.conf 9 | dhcpbridge=/usr/bin/nova-dhcpbridge 10 | logdir=/var/log/nova 11 | state_path=/var/lib/nova 12 | lock_path=/var/lock/nova 13 | force_dhcp_release=True 14 | libvirt_use_virtio_for_bridges=True 15 | verbose=True 16 | ec2_private_dns_show_ip=True 17 | api_paste_config=/etc/nova/api-paste.ini 18 | enabled_apis=osapi_compute,metadata 19 | 20 | auth_strategy = keystone 21 | my_ip = AAA.BBB.CCC.DDD 22 | rpc_backend = rabbit 23 | verbose = True 24 | 25 | # network conf 26 | network_api_class = nova.network.api.API 27 | security_group_api = nova 28 | #firewall_driver = nova.virt.libvirt.firewall.IptablesFirewallDriver 29 | network_manager = nova.network.manager.FlatDHCPManager 30 | network_size = 254 31 | allow_same_net_traffic = False 32 | multi_host = False 33 | multi_node = False 34 | send_arp_for_ha = False 35 | share_dhcp_address = True 36 | force_dhcp_release = True 37 | #flat_network_bridge = br100 38 | #flat_interface = eth0 39 | #public_interface = eth0 40 | 41 | [database] 42 | connection = mysql+pymysql://nova:NOVA_DBPASS@CONTROLLER_HOSTNAME/nova 43 | 44 | [oslo_messaging_rabbit] 45 | rabbit_host = CONTROLLER_HOSTNAME 46 | rabbit_userid = openstack 47 | rabbit_password = RABBIT_PASS 48 | 49 | [keystone_authtoken] 50 | auth_uri = http://CONTROLLER_HOSTNAME:5000 51 | auth_url = http://CONTROLLER_HOSTNAME:35357 52 | auth_type = password 53 | project_domain_name = default 54 | user_domain_name = default 55 | project_name = service 56 | username = nova 57 | password = NOVA_PASS 58 | 59 | [oslo_concurrency] 60 | lock_path = /var/lib/nova/tmp 61 | 62 | [glance] 63 | api_servers = http://CONTROLLER_HOSTNAME:9292 64 | EOF 65 | 66 | service nova-compute restart 67 | 68 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/in_nova_docker_patches: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | if [ ! -d fpga-nova ]; then 5 | git clone FPGA-NOVA-REPO 6 | if [ $? != 0 ]; then 7 | echo Failure in getting fpga-nova repository 8 | exit 9 | fi 10 | fi 11 | 12 | git clone https://github.com/openstack/nova-docker -b stable/mitaka 13 | 14 | cd nova-docker 15 | patch -Np1 -i ../fpga-nova/patches/nova_docker_mitaka.patch 16 | pip install . 17 | cd - 18 | 19 | echo "[DEFAULT]" > /etc/nova/nova-compute.conf 20 | echo "compute_driver=novadocker.virt.docker.DockerDriver" >> /etc/nova/nova-compute.conf 21 | echo "fpga_simulation_mode = False" >> /etc/nova/nova-compute.conf 22 | 23 | usermod -a -G docker nova 24 | 25 | service nova-network restart 26 | service nova-compute restart 27 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/in_nova_scheduler_filter: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | cat /etc/nova/nova.conf | awk '/\[DEFAULT\]/ { print; print "scheduler_default_filters = RamFilter,ComputeFilter,AvailabilityZoneFilter,ImagePropertiesFilter,FpgaFilter"; next }1' > nova.conf 5 | mv nova.conf /etc/nova/nova.conf 6 | cat /etc/nova/nova.conf | awk '/\[DEFAULT\]/ { print; print "scheduler_available_filters = nova.scheduler.filters.fpga_filter.FpgaFilter"; next }1' > nova.conf 7 | mv nova.conf /etc/nova/nova.conf 8 | cat /etc/nova/nova.conf | awk '/\[DEFAULT\]/ { print; print "scheduler_available_filters = nova.scheduler.filters.all_filters"; next }1' > nova.conf 9 | mv nova.conf /etc/nova/nova.conf 10 | 11 | service nova-api restart 12 | service nova-scheduler restart 13 | service nova-conductor restart 14 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/in_openstackclient_db_mq: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | apt-get -y --force-yes install python-openstackclient 5 | 6 | # assuming password 'root' 7 | sudo debconf-set-selections <<< 'mariadb-server-10.0 mysql-server/root_password password root' 8 | sudo debconf-set-selections <<< 'mariadb-server-10.0 mysql-server/root_password_again password root' 9 | apt-get -y --force-yes install mariadb-server python-pymysql 10 | echo '[mysqld]' > /etc/mysql/conf.d/mysqld_openstack.cnf 11 | echo 'bind-address = AAA.BBB.CCC.DDD' >> /etc/mysql/conf.d/mysqld_openstack.cnf 12 | echo "default-storage-engine = innodb" >> /etc/mysql/conf.d/mysqld_openstack.cnf 13 | echo "innodb_file_per_table" >> /etc/mysql/conf.d/mysqld_openstack.cnf 14 | echo "collation-server = utf8_general_ci" >> /etc/mysql/conf.d/mysqld_openstack.cnf 15 | echo "init-connect = 'SET NAMES utf8'" >> /etc/mysql/conf.d/mysqld_openstack.cnf 16 | echo "character-set-server = utf8" >> /etc/mysql/conf.d/mysqld_openstack.cnf 17 | service mysql restart 18 | 19 | echo 'rabbitmq - nofile 65536' >> /etc/security/limits.conf 20 | ulimit -H -n 65536 21 | ulimit -S -n 65536 22 | 23 | apt-get -y --force-yes install rabbitmq-server 24 | rabbitmqctl add_user openstack RABBIT_PASS 25 | rabbitmqctl set_permissions openstack ".*" ".*" ".*" 26 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/in_provision_conf: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | apt-get -y --force-yes install chrony 5 | sed -i '/^server [0-9].debian.*/d' /etc/chrony/chrony.conf 6 | echo 'server 172.28.168.170 offline minpoll 8' >> /etc/chrony/chrony.conf 7 | apt-get -y --force-yes install software-properties-common 8 | add-apt-repository -y cloud-archive:mitaka 9 | apt-get update && apt-get -y --force-yes dist-upgrade 10 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/out_docker: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | apt-get -y --force-yes purge docker-engine 5 | sed -i '/deb https:\/\/apt.dockerproject.org\/repo ubuntu-trusty main/d' /etc/apt/sources.list 6 | key=$(apt-key list |grep -B 1 Docker |grep pub |sed 's/pub\s\+[A-Za-z0-9]\+\/\([a-zA-Z0-9]\+\)\s.*/\1/g') 7 | apt-key del $key 8 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/out_docker_glance: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/out_flavor_and_image: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/out_fpga_db: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/out_fpga_exec: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | rm /usr/bin/fpga-cli 5 | rm -fr fpga-nova 6 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/out_fpga_files: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | rm -fr fpga-nova 4 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/out_glance: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | service glance-registry stop 5 | service glance-api stop 6 | echo 'DROP DATABASE glance;' | mysql -u root -proot 7 | apt-get -y --force-yes purge glance python-glanceclient 8 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/out_horizon: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | service apache2 stop 5 | service nova-cert stop 6 | service nova-consoleauth stop 7 | service nova-novncproxy stop 8 | apt-get -y --force-yes purge nova-cert nova-consoleauth nova-novncproxy openstack-dashboard 9 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/out_keystone: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | service apache2 stop 5 | service keystone stop 6 | 7 | echo 'DROP DATABASE keystone;' | mysql -u root -proot 8 | 9 | apt-get -y --force-yes purge keystone apache2 libapache2-mod-wsgi 10 | 11 | rm admin-openrc.sh 12 | rm demo-openrc.sh 13 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/out_nova: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | service nova-api stop 5 | service nova-scheduler stop 6 | service nova-conductor stop 7 | 8 | echo 'DROP DATABASE nova;' | mysql -u root -proot 9 | echo 'DROP DATABASE nova_api;' | mysql -u root -proot 10 | 11 | apt-get -y --force-yes purge nova-api nova-conductor nova-scheduler python-novaclient 12 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/out_nova_compute: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | service nova-compute stop 5 | service nova-network stop 6 | 7 | apt-get -y --force-yes purge nova-compute nova-network 8 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/out_nova_docker: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | service nova-compute stop 5 | rm -fr nova-docker 6 | pip uninstall nova-docker -y 7 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/out_nova_docker_patches: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | service nova-compute stop 5 | rm -fr fpga-nova 6 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/out_nova_scheduler_filter: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/out_openstackclient_db_mq: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | service mysql stop 5 | service rabbitmq-server stop 6 | sed -i '/rabbitmq - nofile 65536/d' /etc/security/limits.conf 7 | 8 | apt-get -y --force-yes install python-openstackclient mariadb-server \ 9 | python-pymysql mariadb-server-5.5 mariadb-client-5.5 \ 10 | mariadb-server-core-5.5 mariadb-common rabbitmq-server \ 11 | libmariadbclient18:amd64 mariadb-client-core-5.5 mariadb-server 12 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1404/mitaka/out_provision_conf: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | apt-get -y --force-yes purge chrony 5 | apt-get -y --force-yes purge software-properties-common 6 | add-apt-repository -y --remove cloud-archive:mitaka 7 | apt-get update && apt-get -y --force-yes dist-upgrade 8 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/in_docker: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D 5 | apt-key list |grep -iq docker 6 | while [[ $? != 0 ]]; do 7 | echo "*** Error getting the key, retrying…" 8 | apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D 9 | apt-key list |grep -iq docker 10 | done 11 | 12 | echo 'deb https://apt.dockerproject.org/repo ubuntu-xenial main' >> /etc/apt/sources.list 13 | apt-get update 14 | apt-get -y --force-yes purge lxc-docker 15 | apt-get -y --force-yes install docker-engine 16 | 17 | docker pull busybox 18 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/in_docker_glance: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | cat /etc/glance/glance-api.conf | awk '/\[DEFAULT\]/ { print; print "container_formats = ami,ari,aki,bare,ovf,ova,docker"; next }1' > glance-api.conf 5 | mv glance-api.conf /etc/glance/glance-api.conf 6 | service glance-api restart 7 | service glance-registry restart 8 | 9 | . admin-openrc.sh 10 | 11 | docker pull busybox 12 | docker save -o busyimg busybox 13 | openstack image create --file busyimg --public --container-format docker \ 14 | --disk-format raw --id 4ba11f84-9b8d-4bcd-8f28-74f8cedcb1dc busybox 15 | rm busyimg 16 | IMAGE_ID=$(openstack image list | grep busybox | awk '{print $2}') 17 | if [ "${IMAGE_ID}x" == "x" ]; then 18 | echo "Cannot find busybox image" 19 | exit 1 20 | fi 21 | glance image-update --property os_command_line='/bin/sleep 100d' $IMAGE_ID 22 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/in_flavor_and_image: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | . admin-openrc.sh 5 | 6 | sed -ie 's/container_formats = .*/container_formats = ami,ari,aki,bare,ovf,ova,docker,fpga/' /etc/glance/glance-api.conf 7 | service glance-api restart 8 | service glance-registry restart 9 | 10 | sleep 10 11 | 12 | echo "DEADCODE" > fpga_image 13 | glance image-create --id dd834aa4-f950-40e6-8c23-9dab7f3f0138 --file fpga_image --name fpga_image --disk-format raw --container-format fpga 14 | rm fpga_image 15 | nova flavor-create fpga.tiny 6 512 1 1 16 | nova flavor-key fpga.tiny set "hw:fpga_ip_id"="dd834aa4-f950-40e6-8c23-9dab7f3f0138" 17 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/in_fpga_db: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | . admin-openrc.sh 5 | 6 | su -s /bin/sh -c "nova-manage api_db sync" nova 7 | su -s /bin/sh -c "nova-manage db sync" nova 8 | 9 | service nova-api restart 10 | service nova-scheduler restart 11 | service nova-conductor restart 12 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/in_fpga_exec: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | if [ ! -d fpga-nova ]; then 5 | git clone FPGA-NOVA-REPO 6 | if [ $? != 0 ]; then 7 | echo Failure in getting fpga-nova repository 8 | exit 9 | fi 10 | fi 11 | 12 | fpga_exec="/usr/bin/fpga-cli" 13 | 14 | echo "fpga_access = True" >> /etc/nova/nova-compute.conf 15 | echo "fpga_exec = ${fpga_exec##*/}" >> /etc/nova/nova-compute.conf 16 | 17 | cp -a fpga-nova/bin/fpga-cli.py $fpga_exec 18 | 19 | echo "${fpga_exec##*/}: CommandFilter, ${fpga_exec##*/}, root" >> /etc/nova/rootwrap.d/compute.filters 20 | echo "${fpga_exec##*/}: CommandFilter, ${fpga_exec##*/}, root" >> /etc/nova/rootwrap.d/network.filters 21 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/in_fpga_files: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | if [ ! -d fpga-nova ]; then 5 | git clone FPGA-NOVA-REPO 6 | if [ $? != 0 ]; then 7 | echo Failure in getting fpga-nova repository 8 | exit 9 | fi 10 | fi 11 | 12 | current=$(dpkg -l |grep -w python-nova | sed -e "s/ii\s\+python-nova\s\+2:\([0-9.]\+\).*/\1/g") 13 | [[ ! -e "fpga-nova/patches/ubuntu_16.04-nova-${current}.patch" ]] && echo "No patch for version ${current}!" && exit 1 14 | fpga_dir=$(pwd) 15 | cd /usr/lib/python2.7/dist-packages 16 | patch -Np1 -i "${fpga_dir}/fpga-nova/patches/ubuntu_16.04-nova-${current}.patch" 17 | cd - 18 | service nova-compute restart 19 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/in_glance: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | echo 'CREATE DATABASE glance;' | mysql -u root -proot 5 | echo "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'GLANCE_DBPASS';" |mysql -u root -proot 6 | echo "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'GLANCE_DBPASS';" |mysql -u root -proot 7 | 8 | . admin-openrc.sh 9 | 10 | openstack user create --domain default --password GLANCE_PASS glance 11 | openstack role add --project service --user glance admin 12 | openstack service create --name glance \ 13 | --description "OpenStack Image service" image 14 | 15 | openstack endpoint create --region RegionOne \ 16 | image public http://CONTROLLER_HOSTNAME:9292 17 | openstack endpoint create --region RegionOne \ 18 | image internal http://CONTROLLER_HOSTNAME:9292 19 | openstack endpoint create --region RegionOne \ 20 | image admin http://CONTROLLER_HOSTNAME:9292 21 | 22 | apt-get -y --force-yes install glance python-glanceclient 23 | 24 | cat << EOF > /etc/glance/glance-api.conf 25 | [DEFAULT] 26 | verbose = True 27 | 28 | [database] 29 | connection = mysql+pymysql://glance:GLANCE_DBPASS@CONTROLLER_HOSTNAME/glance 30 | backend = sqlalchemy 31 | 32 | [keystone_authtoken] 33 | auth_uri = http://CONTROLLER_HOSTNAME:5000 34 | auth_url = http://CONTROLLER_HOSTNAME:35357 35 | auth_type = password 36 | project_domain_name = default 37 | user_domain_name = default 38 | project_name = service 39 | username = glance 40 | password = GLANCE_PASS 41 | 42 | [paste_deploy] 43 | flavor = keystone 44 | 45 | [glance_store] 46 | stores = file,http 47 | default_store = file 48 | filesystem_store_datadir = /var/lib/glance/images/ 49 | EOF 50 | 51 | cat << EOF > /etc/glance/glance-registry.conf 52 | [DEFAULT] 53 | verbose = True 54 | backend = sqlalchemy 55 | 56 | [database] 57 | connection = mysql+pymysql://glance:GLANCE_DBPASS@CONTROLLER_HOSTNAME/glance 58 | 59 | [keystone_authtoken] 60 | auth_uri = http://CONTROLLER_HOSTNAME:5000 61 | auth_url = http://CONTROLLER_HOSTNAME:35357 62 | auth_type = password 63 | project_domain_name = default 64 | user_domain_name = default 65 | project_name = service 66 | username = glance 67 | password = GLANCE_PASS 68 | 69 | [paste_deploy] 70 | flavor = keystone 71 | EOF 72 | 73 | su -s /bin/sh -c "glance-manage db_sync" glance 74 | 75 | service glance-registry restart 76 | service glance-api restart 77 | 78 | source admin-openrc.sh 79 | wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img 80 | glance image-create --name "cirros" --file cirros-0.3.4-x86_64-disk.img \ 81 | --disk-format qcow2 --container-format bare --visibility public --progress \ 82 | --id 48644096-5555-4835-99a0-59089dd7da1b 83 | glance image-list 84 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/in_horizon: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | apt-get -y --force-yes install nova-consoleauth nova-novncproxy nova-cert \ 5 | openstack-dashboard 6 | 7 | sed -i 's/OPENSTACK_HOST = "127.0.0.1"/OPENSTACK_HOST = "CONTROLLER_HOSTNAME"/g' /etc/openstack-dashboard/local_settings.py 8 | sed -i "s/'enable_router': True/'enable_router': False/g" /etc/openstack-dashboard/local_settings.py 9 | sed -i "s/'enable_quotas': True/'enable_quotas': False/g" /etc/openstack-dashboard/local_settings.py 10 | sed -i "s/'enable_ipv6': True/'enable_ipv6': False/g" /etc/openstack-dashboard/local_settings.py 11 | sed -i "s/'enable_lb': True/'enable_lb': False/g" /etc/openstack-dashboard/local_settings.py 12 | sed -i "s/'enable_firewall': True/'enable_firewall': False/g" /etc/openstack-dashboard/local_settings.py 13 | sed -i "s/'enable_vpn': True/'enable_vpn': False/g" /etc/openstack-dashboard/local_settings.py 14 | sed -i "s/''enable_fip_topology_check'': True/''enable_fip_topology_check'': False/g" /etc/openstack-dashboard/local_settings.py 15 | sed -i "s/v2.0/v3/g" /etc/openstack-dashboard/local_settings.py 16 | echo 'OPENSTACK_API_VERSIONS = {"identity": 3, "volume": 2,"compute": 2}' >> /etc/openstack-dashboard/local_settings.py 17 | 18 | # Remove Ubuntu branded theme 19 | apt-get -y --force-yes purge openstack-dashboard-ubuntu-theme 20 | 21 | service nova-cert restart 22 | service nova-consoleauth restart 23 | service nova-novncproxy restart 24 | service apache2 reload 25 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/in_keystone: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | echo 'CREATE DATABASE keystone;' | mysql -u root -proot 5 | echo "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'KEYSTONE_DBPASS';" |mysql -u root -proot 6 | echo "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'KEYSTONE_DBPASS';" |mysql -u root -proot 7 | 8 | apt-get -y --force-yes install keystone 9 | 10 | cat << EOF > /etc/keystone/keystone.conf 11 | [DEFAULT] 12 | #admin_token = ADMIN_TOKEN 13 | log_dir = /var/log/keystone 14 | verbose = True 15 | 16 | [database] 17 | connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@CONTROLLER_HOSTNAME/keystone 18 | 19 | [token] 20 | provider = fernet 21 | 22 | #[extra_headers] 23 | #Distribution = Ubuntu 24 | EOF 25 | 26 | su -s /bin/sh -c "keystone-manage db_sync" keystone 27 | keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone 28 | keystone-manage credential_setup --keystone-user keystone --keystone-group keystone 29 | 30 | keystone-manage bootstrap --bootstrap-password ADMIN__OS_PASSWORD \ 31 | --bootstrap-admin-url http://CONTROLLER_HOSTNAME:35357/v3/ \ 32 | --bootstrap-internal-url http://CONTROLLER_HOSTNAME:35357/v3/ \ 33 | --bootstrap-public-url http://CONTROLLER_HOSTNAME:5000/v3/ \ 34 | --bootstrap-region-id RegionOne 35 | 36 | echo ServerName CONTROLLER_HOSTNAME >> /etc/apache2/apache2.conf 37 | 38 | service apache2 restart 39 | rm -f /var/lib/keystone/keystone.db 40 | 41 | # create openrc scripts 42 | cat << EOF > admin-openrc.sh 43 | export OS_PROJECT_DOMAIN_NAME=ADMIN__OS_PROJECT_DOMAIN_NAME 44 | export OS_USER_DOMAIN_NAME=ADMIN__OS_USER_DOMAIN_NAME 45 | export OS_PROJECT_NAME=ADMIN__OS_PROJECT_NAME 46 | export OS_USERNAME=ADMIN__OS_USERNAME 47 | export OS_PASSWORD=ADMIN__OS_PASSWORD 48 | export OS_AUTH_URL=http://CONTROLLER_HOSTNAME:35357/v3 49 | export OS_IDENTITY_API_VERSION=ADMIN__OS_IDENTITY_API_VERSION 50 | export OS_IMAGE_API_VERSION=ADMIN__OS_IMAGE_API_VERSION 51 | EOF 52 | 53 | cat << EOF > demo-openrc.sh 54 | export OS_PROJECT_DOMAIN_NAME=DEMO__OS_PROJECT_DOMAIN_NAME 55 | export OS_USER_DOMAIN_NAME=DEMO__OS_USER_DOMAIN_NAME 56 | export OS_PROJECT_NAME=DEMO__OS_PROJECT_NAME 57 | export OS_USERNAME=DEMO__OS_USERNAME 58 | export OS_PASSWORD=DEMO__OS_PASSWORD 59 | export OS_AUTH_URL=http://CONTROLLER_HOSTNAME:5000/v3 60 | export OS_IDENTITY_API_VERSION=DEMO__OS_IDENTITY_API_VERSION 61 | export OS_IMAGE_API_VERSION=DEMO__OS_IMAGE_API_VERSION 62 | EOF 63 | 64 | source admin-openrc.sh 65 | 66 | openstack project create --domain default \ 67 | --description "Service Project" service 68 | openstack project create --domain default --description "Demo Project" demo 69 | openstack user create --domain default --password demo demo 70 | openstack role create user 71 | openstack role add --project demo --user demo user 72 | 73 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/in_nova: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | 5 | echo 'CREATE DATABASE nova_api;' | mysql -u root -proot 6 | echo 'CREATE DATABASE nova;' | mysql -u root -proot 7 | echo "GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';" |mysql -u root -proot 8 | echo "GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';" |mysql -u root -proot 9 | echo "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';" |mysql -u root -proot 10 | echo "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';" |mysql -u root -proot 11 | 12 | . admin-openrc.sh 13 | 14 | openstack user create --domain default --password NOVA_PASS nova 15 | openstack role add --project service --user nova admin 16 | openstack service create --name nova --description "OpenStack Compute" compute 17 | 18 | openstack endpoint create --region RegionOne \ 19 | compute public http://CONTROLLER_HOSTNAME:8774/v2.1/%\(tenant_id\)s 20 | openstack endpoint create --region RegionOne \ 21 | compute internal http://CONTROLLER_HOSTNAME:8774/v2.1/%\(tenant_id\)s 22 | openstack endpoint create --region RegionOne \ 23 | compute admin http://CONTROLLER_HOSTNAME:8774/v2.1/%\(tenant_id\)s 24 | 25 | apt-get -y --force-yes install nova-api nova-conductor nova-scheduler python-novaclient 26 | 27 | cat << EOF > /etc/nova/nova.conf 28 | [DEFAULT] 29 | dhcpbridge_flagfile=/etc/nova/nova.conf 30 | dhcpbridge=/usr/bin/nova-dhcpbridge 31 | logdir=/var/log/nova 32 | state_path=/var/lib/nova 33 | lock_path=/var/lock/nova 34 | force_dhcp_release=True 35 | libvirt_use_virtio_for_bridges=True 36 | verbose=True 37 | debug=True 38 | ec2_private_dns_show_ip=True 39 | api_paste_config=/etc/nova/api-paste.ini 40 | auth_strategy = keystone 41 | my_ip = AAA.BBB.CCC.DDD 42 | verbose = True 43 | use_neutron = False 44 | transport_url = rabbit://openstack:RABBIT_PASS@CONTROLLER_HOSTNAME 45 | 46 | [api_database] 47 | connection = mysql+pymysql://nova:NOVA_DBPASS@CONTROLLER_HOSTNAME/nova_api 48 | 49 | [database] 50 | connection = mysql+pymysql://nova:NOVA_DBPASS@CONTROLLER_HOSTNAME/nova 51 | 52 | [keystone_authtoken] 53 | auth_uri = http://CONTROLLER_HOSTNAME:5000 54 | auth_url = http://CONTROLLER_HOSTNAME:35357 55 | auth_type = password 56 | project_domain_name = default 57 | user_domain_name = default 58 | project_name = service 59 | username = nova 60 | password = NOVA_PASS 61 | 62 | [oslo_concurrency] 63 | lock_path = /var/lib/nova/tmp 64 | 65 | [glance] 66 | api_servers = http://CONTROLLER_HOSTNAME:9292 67 | EOF 68 | 69 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/in_nova_compute: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | apt-get -y --force-yes install nova-compute nova-network 5 | 6 | cat << EOF > /etc/nova/nova.conf 7 | [DEFAULT] 8 | dhcpbridge_flagfile=/etc/nova/nova.conf 9 | dhcpbridge=/usr/bin/nova-dhcpbridge 10 | logdir=/var/log/nova 11 | state_path=/var/lib/nova 12 | lock_path=/var/lock/nova 13 | force_dhcp_release=True 14 | libvirt_use_virtio_for_bridges=True 15 | verbose=True 16 | debug=True 17 | ec2_private_dns_show_ip=True 18 | api_paste_config=/etc/nova/api-paste.ini 19 | auth_strategy = keystone 20 | my_ip = AAA.BBB.CCC.DDD 21 | transport_url = rabbit://openstack:RABBIT_PASS@CONTROLLER_HOSTNAME 22 | use_neutron = False 23 | network_manager = nova.network.manager.FlatDHCPManager 24 | network_size = 254 25 | allow_same_net_traffic = False 26 | multi_host = False 27 | multi_node = False 28 | send_arp_for_ha = False 29 | share_dhcp_address = True 30 | force_dhcp_release = True 31 | 32 | [database] 33 | connection = mysql+pymysql://nova:NOVA_DBPASS@CONTROLLER_HOSTNAME/nova 34 | 35 | [keystone_authtoken] 36 | auth_uri = http://CONTROLLER_HOSTNAME:5000 37 | auth_url = http://CONTROLLER_HOSTNAME:35357 38 | auth_type = password 39 | project_domain_name = default 40 | user_domain_name = default 41 | project_name = service 42 | username = nova 43 | password = NOVA_PASS 44 | 45 | [oslo_concurrency] 46 | lock_path = /var/lib/nova/tmp 47 | 48 | [glance] 49 | api_servers = http://CONTROLLER_HOSTNAME:9292 50 | EOF 51 | 52 | service nova-compute restart 53 | 54 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/in_nova_docker_patches: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | if [ ! -d fpga-nova ]; then 5 | git clone FPGA-NOVA-REPO 6 | if [ $? != 0 ]; then 7 | echo Failure in getting fpga-nova repository 8 | exit 9 | fi 10 | fi 11 | 12 | git clone https://github.com/openstack/nova-docker 13 | 14 | cd nova-docker 15 | patch -Np1 -i ../fpga-nova/patches/nova_docker_newton.patch 16 | pip install . 17 | cd - 18 | 19 | echo "[DEFAULT]" > /etc/nova/nova-compute.conf 20 | echo "compute_driver=novadocker.virt.docker.DockerDriver" >> /etc/nova/nova-compute.conf 21 | echo "fpga_simulation_mode = False" >> /etc/nova/nova-compute.conf 22 | 23 | usermod -a -G docker nova 24 | 25 | service nova-network restart 26 | service nova-compute restart 27 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/in_nova_scheduler_filter: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | cat /etc/nova/nova.conf | awk '/\[DEFAULT\]/ { print; print "scheduler_default_filters = RamFilter,ComputeFilter,AvailabilityZoneFilter,ImagePropertiesFilter,FpgaFilter"; next }1' > nova.conf 5 | mv nova.conf /etc/nova/nova.conf 6 | cat /etc/nova/nova.conf | awk '/\[DEFAULT\]/ { print; print "scheduler_available_filters = nova.scheduler.filters.fpga_filter.FpgaFilter"; next }1' > nova.conf 7 | mv nova.conf /etc/nova/nova.conf 8 | cat /etc/nova/nova.conf | awk '/\[DEFAULT\]/ { print; print "scheduler_available_filters = nova.scheduler.filters.all_filters"; next }1' > nova.conf 9 | mv nova.conf /etc/nova/nova.conf 10 | 11 | service nova-api restart 12 | service nova-scheduler restart 13 | service nova-conductor restart 14 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/in_openstackclient_db_mq: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | apt-get -y --force-yes install python-openstackclient 5 | 6 | # assuming password 'root' 7 | sudo debconf-set-selections <<< 'mariadb-server-10.0 mysql-server/root_password password root' 8 | sudo debconf-set-selections <<< 'mariadb-server-10.0 mysql-server/root_password_again password root' 9 | apt-get -y --force-yes install mariadb-server python-pymysql 10 | echo '[mysqld]' > /etc/mysql/mariadb.conf.d/99-openstack.cnf 11 | echo 'bind-address = AAA.BBB.CCC.DDD' >> /etc/mysql/mariadb.conf.d/99-openstack.cnf 12 | echo "default-storage-engine = innodb" >> /etc/mysql/mariadb.conf.d/99-openstack.cnf 13 | echo "innodb_file_per_table" >> /etc/mysql/mariadb.conf.d/99-openstack.cnf 14 | echo "collation-server = utf8_general_ci" >> /etc/mysql/mariadb.conf.d/99-openstack.cnf 15 | echo "init-connect = 'SET NAMES utf8'" >> /etc/mysql/mariadb.conf.d/99-openstack.cnf 16 | echo "character-set-server = utf8" >> /etc/mysql/mariadb.conf.d/99-openstack.cnf 17 | service mysql restart 18 | 19 | echo 'rabbitmq - nofile 65536' >> /etc/security/limits.conf 20 | ulimit -H -n 65536 21 | ulimit -S -n 65536 22 | 23 | apt-get -y --force-yes install rabbitmq-server 24 | rabbitmqctl add_user openstack RABBIT_PASS 25 | rabbitmqctl set_permissions openstack ".*" ".*" ".*" 26 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/in_provision_conf: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | apt-get -y --force-yes install chrony 5 | sed -i '/^server [0-9].debian.*/d' /etc/chrony/chrony.conf 6 | echo 'server 172.28.168.170 offline minpoll 8' >> /etc/chrony/chrony.conf 7 | add-apt-repository -y cloud-archive:newton 8 | apt-get update && apt-get -y --force-yes dist-upgrade 9 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/out_docker: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | apt-get -y --force-yes purge docker-engine 5 | sed -i '/deb https:\/\/apt.dockerproject.org\/repo ubuntu-xenial main/d' /etc/apt/sources.list 6 | key=$(apt-key list |grep -B 1 Docker |grep pub |sed 's/pub\s\+[A-Za-z0-9]\+\/\([a-zA-Z0-9]\+\)\s.*/\1/g') 7 | apt-key del $key 8 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/out_docker_glance: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/out_flavor_and_image: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/out_fpga_db: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/out_fpga_exec: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | rm /usr/bin/fpga-cli 5 | rm -fr fpga-nova 6 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/out_fpga_files: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | rm -fr fpga-nova 4 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/out_glance: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | service glance-registry stop 5 | service glance-api stop 6 | echo 'DROP DATABASE glance;' | mysql -u root -proot 7 | apt-get -y --force-yes purge glance python-glanceclient 8 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/out_horizon: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | service apache2 stop 5 | service nova-cert stop 6 | service nova-consoleauth stop 7 | service nova-novncproxy stop 8 | apt-get -y --force-yes purge nova-cert nova-consoleauth nova-novncproxy openstack-dashboard 9 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/out_keystone: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | service apache2 stop 5 | 6 | echo 'DROP DATABASE keystone;' | mysql -u root -proot 7 | 8 | apt-get -y --force-yes purge keystone 9 | 10 | rm admin-openrc.sh 11 | rm demo-openrc.sh 12 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/out_nova: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | service nova-api stop 5 | service nova-scheduler stop 6 | service nova-conductor stop 7 | 8 | echo 'DROP DATABASE nova;' | mysql -u root -proot 9 | echo 'DROP DATABASE nova_api;' | mysql -u root -proot 10 | 11 | apt-get -y --force-yes purge nova-api nova-conductor nova-scheduler python-novaclient 12 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/out_nova_compute: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | service nova-compute stop 5 | service nova-network stop 6 | 7 | apt-get -y --force-yes purge nova-compute nova-network 8 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/out_nova_docker: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | service nova-compute stop 5 | rm -fr nova-docker 6 | pip uninstall nova-docker -y 7 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/out_nova_docker_patches: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | service nova-compute stop 5 | rm -fr fpga-nova 6 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/out_nova_scheduler_filter: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/out_openstackclient_db_mq: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | service mysql stop 5 | service rabbitmq-server stop 6 | sed -i '/rabbitmq - nofile 65536/d' /etc/security/limits.conf 7 | 8 | apt-get -y --force-yes install python-openstackclient mariadb-server \ 9 | python-pymysql mariadb-server-5.5 mariadb-client-5.5 \ 10 | mariadb-server-core-5.5 mariadb-common rabbitmq-server \ 11 | libmariadbclient18:amd64 mariadb-client-core-5.5 mariadb-server 12 | -------------------------------------------------------------------------------- /build_scripts/ubuntu-1604/newton/out_provision_conf: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | apt-get -y --force-yes purge chrony 5 | add-apt-repository -y --remove cloud-archive:newton 6 | apt-get update && apt-get -y --force-yes dist-upgrade 7 | -------------------------------------------------------------------------------- /patches/nova_docker_liberty.patch: -------------------------------------------------------------------------------- 1 | diff -Naur '--exclude=.git' nova-docker/novadocker/tests/virt/docker/mock_client.py nova-docker_modified/novadocker/tests/virt/docker/mock_client.py 2 | --- nova-docker/novadocker/tests/virt/docker/mock_client.py 2016-10-10 09:23:42.340324506 +0200 3 | +++ nova-docker_modified/novadocker/tests/virt/docker/mock_client.py 2016-10-10 08:28:00.000000000 +0200 4 | @@ -115,7 +115,8 @@ 5 | } 6 | return container_id 7 | 8 | - def start(self, container_id, binds=None, dns=None, privileged=False): 9 | + def start(self, container_id, binds=None, dns=None, privileged=False, 10 | + devices=None): 11 | if container_id not in self._containers: 12 | return False 13 | self._containers[container_id]['running'] = True 14 | diff -Naur '--exclude=.git' nova-docker/novadocker/tests/virt/docker/test_driver.py nova-docker_modified/novadocker/tests/virt/docker/test_driver.py 15 | --- nova-docker/novadocker/tests/virt/docker/test_driver.py 2016-10-10 09:25:31.712328306 +0200 16 | +++ nova-docker_modified/novadocker/tests/virt/docker/test_driver.py 2016-10-10 10:54:02.000000000 +0200 17 | @@ -471,3 +471,32 @@ 18 | 'fake_files', 'fake_password') 19 | info = self.connection._find_container_by_uuid(instance_href['uuid']) 20 | self.assertEqual(instance_href['name'], info['Config'].get('Hostname')) 21 | + 22 | + def test_start_container_pass_devices(self, image_info=None, 23 | + instance_href=None, 24 | + network_info=None): 25 | + if instance_href is None: 26 | + instance_href = utils.get_test_instance() 27 | + if image_info is None: 28 | + image_info = utils.get_test_image_info(None, instance_href) 29 | + image_info['disk_format'] = 'raw' 30 | + image_info['container_format'] = 'docker' 31 | + image_info['properties'] = {'os_command_line': 'uname', 32 | + 'docker_devices': '/dev/loop2,' 33 | + '/dev/loop3'} 34 | + 35 | + with mock.patch.object(self.mock_client, 'start') as mc: 36 | + self.connection.spawn(self.context, instance_href, image_info, 37 | + 'fake_files', 'fake_password', 38 | + network_info=network_info) 39 | + self.assertEqual(mc.call_args[1]['devices'], 40 | + ['/dev/loop2', '/dev/loop3']) 41 | + 42 | + # Test with one device 43 | + image_info['properties'] = {'os_command_line': 'uname', 44 | + 'docker_devices': '/dev/floppy'} 45 | + with mock.patch.object(self.mock_client, 'start') as mc: 46 | + self.connection.spawn(self.context, instance_href, image_info, 47 | + 'fake_files', 'fake_password', 48 | + network_info=network_info) 49 | + self.assertEqual(mc.call_args[1]['devices'], ['/dev/floppy']) 50 | diff -Naur '--exclude=.git' nova-docker/novadocker/virt/docker/driver.py nova-docker_modified/novadocker/virt/docker/driver.py 51 | --- nova-docker/novadocker/virt/docker/driver.py 2016-10-10 09:25:31.716328306 +0200 52 | +++ nova-docker_modified/novadocker/virt/docker/driver.py 2016-10-10 10:47:11.000000000 +0200 53 | @@ -440,11 +440,13 @@ 54 | binds = {mount_origin: {'bind': '/root/.ssh', 'ro': True}} 55 | return binds 56 | 57 | - def _start_container(self, container_id, instance, network_info=None): 58 | + def _start_container(self, container_id, instance, network_info=None, 59 | + start_args=None): 60 | + start_args = start_args if start_args else {} 61 | binds = self._get_key_binds(container_id, instance) 62 | dns = self._extract_dns_entries(network_info) 63 | self.docker.start(container_id, binds=binds, dns=dns, 64 | - privileged=CONF.docker.privileged) 65 | + privileged=CONF.docker.privileged, **start_args) 66 | 67 | if not network_info: 68 | return 69 | @@ -471,6 +473,14 @@ 70 | 'network_disabled': True, 71 | } 72 | 73 | + start_args = {} 74 | + try: 75 | + docker_devices = image_meta['properties']['docker_devices'] 76 | + if docker_devices: 77 | + start_args.update({"devices": docker_devices.split(',')}) 78 | + except (KeyError, TypeError): 79 | + pass 80 | + 81 | try: 82 | image = self.docker.inspect_image(self._encode_utf8(image_name)) 83 | except errors.APIError: 84 | @@ -492,7 +502,8 @@ 85 | _('Cannot create container'), 86 | instance_id=instance['name']) 87 | 88 | - self._start_container(container_id, instance, network_info) 89 | + self._start_container(container_id, instance, network_info, 90 | + start_args) 91 | 92 | def _inject_key(self, id, key): 93 | if isinstance(id, dict): 94 | -------------------------------------------------------------------------------- /patches/nova_docker_mitaka.patch: -------------------------------------------------------------------------------- 1 | diff --git a/novadocker/tests/virt/docker/mock_client.py b/novadocker/tests/virt/docker/mock_client.py 2 | index bc50e51..8b3d7d0 100644 3 | --- a/novadocker/tests/virt/docker/mock_client.py 4 | +++ b/novadocker/tests/virt/docker/mock_client.py 5 | @@ -115,7 +115,8 @@ class MockClient(object): 6 | } 7 | return container_id 8 | 9 | - def start(self, container_id, binds=None, dns=None, privileged=False): 10 | + def start(self, container_id, binds=None, dns=None, privileged=False, 11 | + devices=None): 12 | if container_id not in self._containers: 13 | return False 14 | self._containers[container_id]['running'] = True 15 | diff --git a/novadocker/tests/virt/docker/test_driver.py b/novadocker/tests/virt/docker/test_driver.py 16 | index 8469161..29aeec1 100644 17 | --- a/novadocker/tests/virt/docker/test_driver.py 18 | +++ b/novadocker/tests/virt/docker/test_driver.py 19 | @@ -581,3 +581,30 @@ class DockerDriverTestCase(test_virt_drivers._VirtDriverTestCase, 20 | 'fake_files', 'fake_password') 21 | info = self.connection._find_container_by_uuid(instance_href['uuid']) 22 | self.assertEqual(instance_href['name'], info['Config'].get('Hostname')) 23 | + 24 | + def test_start_container_pass_devices(self, image_info=None, 25 | + instance_href=None, 26 | + network_info=None): 27 | + if instance_href is None: 28 | + instance_href = utils.get_test_instance() 29 | + if image_info is None: 30 | + image_info = utils.get_test_image_object(None, instance_href) 31 | + image_info.properties.docker_devices = '/dev/loop2:/dev/loop2,' \ 32 | + '/dev/loop3' 33 | + image_info.disk_format = 'raw' 34 | + image_info.container_format = 'docker' 35 | + 36 | + with mock.patch.object(self.mock_client, 'start') as mc: 37 | + self.connection.spawn(self.context, instance_href, image_info, 38 | + 'fake_files', 'fake_password', 39 | + network_info=network_info) 40 | + self.assertEqual(mc.call_args[1]['devices'], 41 | + ['/dev/loop2:/dev/loop2', '/dev/loop3']) 42 | + 43 | + # Test with one device 44 | + image_info.properties.docker_devices = '/dev/floppy' 45 | + with mock.patch.object(self.mock_client, 'start') as mc: 46 | + self.connection.spawn(self.context, instance_href, image_info, 47 | + 'fake_files', 'fake_password', 48 | + network_info=network_info) 49 | + self.assertEqual(mc.call_args[1]['devices'], ['/dev/floppy']) 50 | diff --git a/novadocker/virt/docker/driver.py b/novadocker/virt/docker/driver.py 51 | index 18e52ee..bea8022 100644 52 | --- a/novadocker/virt/docker/driver.py 53 | +++ b/novadocker/virt/docker/driver.py 54 | @@ -464,11 +464,13 @@ class DockerDriver(driver.ComputeDriver): 55 | return [('network-vif-plugged', vif['id']) 56 | for vif in network_info if vif.get('active', True) is False] 57 | 58 | - def _start_container(self, container_id, instance, network_info=None): 59 | + def _start_container(self, container_id, instance, network_info=None, 60 | + start_args=None): 61 | + start_args = start_args if start_args else {} 62 | binds = self._get_key_binds(container_id, instance) 63 | dns = self._extract_dns_entries(network_info) 64 | self.docker.start(container_id, binds=binds, dns=dns, 65 | - privileged=CONF.docker.privileged) 66 | + privileged=CONF.docker.privileged, **start_args) 67 | 68 | if not network_info: 69 | return 70 | @@ -513,6 +515,15 @@ class DockerDriver(driver.ComputeDriver): 71 | 'network_disabled': True, 72 | } 73 | 74 | + start_args = {} 75 | + try: 76 | + if hasattr(image_meta.properties, 'docker_devices'): 77 | + docker_devices = image_meta.properties.docker_devices 78 | + if docker_devices: 79 | + start_args.update({"devices": docker_devices.split(',')}) 80 | + except Exception as ex: 81 | + LOG.warning("Error while retrieving docker_devices: %s" % ex) 82 | + 83 | try: 84 | image = self.docker.inspect_image(self._encode_utf8(image_name)) 85 | except errors.APIError: 86 | @@ -534,7 +545,8 @@ class DockerDriver(driver.ComputeDriver): 87 | _('Cannot create container'), 88 | instance_id=instance['name']) 89 | 90 | - self._start_container(container_id, instance, network_info) 91 | + self._start_container(container_id, instance, network_info, 92 | + start_args) 93 | 94 | def _inject_key(self, id, key): 95 | if isinstance(id, dict): 96 | -------------------------------------------------------------------------------- /patches/nova_docker_newton.patch: -------------------------------------------------------------------------------- 1 | diff --git a/novadocker/tests/virt/docker/mock_client.py b/novadocker/tests/virt/docker/mock_client.py 2 | index bc50e51..8b3d7d0 100644 3 | --- a/novadocker/tests/virt/docker/mock_client.py 4 | +++ b/novadocker/tests/virt/docker/mock_client.py 5 | @@ -115,7 +115,8 @@ class MockClient(object): 6 | } 7 | return container_id 8 | 9 | - def start(self, container_id, binds=None, dns=None, privileged=False): 10 | + def start(self, container_id, binds=None, dns=None, privileged=False, 11 | + devices=None): 12 | if container_id not in self._containers: 13 | return False 14 | self._containers[container_id]['running'] = True 15 | diff --git a/novadocker/tests/virt/docker/test_driver.py b/novadocker/tests/virt/docker/test_driver.py 16 | index f9b2e10..0cb6c88 100644 17 | --- a/novadocker/tests/virt/docker/test_driver.py 18 | +++ b/novadocker/tests/virt/docker/test_driver.py 19 | @@ -581,3 +581,30 @@ class DockerDriverTestCase(test_virt_drivers._VirtDriverTestCase, 20 | 'fake_files', 'fake_password') 21 | info = self.connection._find_container_by_uuid(instance_href['uuid']) 22 | self.assertEqual(instance_href['name'], info['Config'].get('Hostname')) 23 | + 24 | + def test_start_container_pass_devices(self, image_info=None, 25 | + instance_href=None, 26 | + network_info=None): 27 | + if instance_href is None: 28 | + instance_href = utils.get_test_instance() 29 | + if image_info is None: 30 | + image_info = utils.get_test_image_object(None, instance_href) 31 | + image_info.properties.docker_devices = '/dev/loop2:/dev/loop2,' \ 32 | + '/dev/loop3' 33 | + image_info.disk_format = 'raw' 34 | + image_info.container_format = 'docker' 35 | + 36 | + with mock.patch.object(self.mock_client, 'start') as mc: 37 | + self.connection.spawn(self.context, instance_href, image_info, 38 | + 'fake_files', 'fake_password', 39 | + network_info=network_info) 40 | + self.assertEqual(mc.call_args[1]['devices'], 41 | + ['/dev/loop2:/dev/loop2', '/dev/loop3']) 42 | + 43 | + # Test with one device 44 | + image_info.properties.docker_devices = '/dev/floppy' 45 | + with mock.patch.object(self.mock_client, 'start') as mc: 46 | + self.connection.spawn(self.context, instance_href, image_info, 47 | + 'fake_files', 'fake_password', 48 | + network_info=network_info) 49 | + self.assertEqual(mc.call_args[1]['devices'], ['/dev/floppy']) 50 | diff --git a/novadocker/virt/docker/driver.py b/novadocker/virt/docker/driver.py 51 | index 517f01d..befcbf7 100644 52 | --- a/novadocker/virt/docker/driver.py 53 | +++ b/novadocker/virt/docker/driver.py 54 | @@ -470,11 +470,13 @@ class DockerDriver(driver.ComputeDriver): 55 | return [('network-vif-plugged', vif['id']) 56 | for vif in network_info if vif.get('active', True) is False] 57 | 58 | - def _start_container(self, container_id, instance, network_info=None): 59 | + def _start_container(self, container_id, instance, network_info=None, 60 | + start_args=None): 61 | + start_args = start_args if start_args else {} 62 | binds = self._get_key_binds(container_id, instance) 63 | dns = self._extract_dns_entries(network_info) 64 | self.docker.start(container_id, binds=binds, dns=dns, 65 | - privileged=CONF.docker.privileged) 66 | + privileged=CONF.docker.privileged, **start_args) 67 | 68 | if not network_info: 69 | return 70 | @@ -519,6 +521,15 @@ class DockerDriver(driver.ComputeDriver): 71 | 'network_disabled': True, 72 | } 73 | 74 | + start_args = {} 75 | + try: 76 | + if hasattr(image_meta.properties, 'docker_devices'): 77 | + docker_devices = image_meta.properties.docker_devices 78 | + if docker_devices: 79 | + start_args.update({"devices": docker_devices.split(',')}) 80 | + except Exception as ex: 81 | + LOG.warning("Error while retrieving docker_devices: %s" % ex) 82 | + 83 | try: 84 | image = self.docker.inspect_image(self._encode_utf8(image_name)) 85 | except errors.APIError: 86 | @@ -540,7 +551,8 @@ class DockerDriver(driver.ComputeDriver): 87 | _('Cannot create container'), 88 | instance_id=instance['name']) 89 | 90 | - self._start_container(container_id, instance, network_info) 91 | + self._start_container(container_id, instance, network_info, 92 | + start_args) 93 | 94 | def _inject_key(self, id, key): 95 | if isinstance(id, dict): 96 | diff --git a/novadocker/virt/docker/vifs.py b/novadocker/virt/docker/vifs.py 97 | index ce9aa09..c19d1a9 100644 98 | --- a/novadocker/virt/docker/vifs.py 99 | +++ b/novadocker/virt/docker/vifs.py 100 | @@ -35,7 +35,6 @@ CONF = cfg.CONF 101 | CONF.import_opt('my_ip', 'nova.conf.netconf') 102 | CONF.import_opt('vlan_interface', 'nova.manager') 103 | CONF.import_opt('flat_interface', 'nova.manager') 104 | -CONF.import_opt('network_device_mtu', 'nova.objects.network') 105 | 106 | LOG = logging.getLogger(__name__) 107 | 108 | @@ -435,16 +434,6 @@ class DockerGenericVIFDriver(object): 109 | utils.execute('ip', 'netns', 'exec', container_id, 'ip', 'link', 110 | 'set', if_remote_name, 'up', run_as_root=True) 111 | 112 | - # Setup MTU on if_remote_name is required if it is a non 113 | - # default value 114 | - mtu = CONF.network_device_mtu 115 | - if vif.get('mtu') is not None: 116 | - mtu = vif.get('mtu') 117 | - if mtu is not None: 118 | - utils.execute('ip', 'netns', 'exec', container_id, 'ip', 119 | - 'link', 'set', if_remote_name, 'mtu', mtu, 120 | - run_as_root=True) 121 | - 122 | if gateway is not None: 123 | utils.execute('ip', 'netns', 'exec', container_id, 124 | 'ip', 'route', 'replace', 'default', 'via', 125 | -------------------------------------------------------------------------------- /patches/rhel_72-nova-14.0.2-tests.patch: -------------------------------------------------------------------------------- 1 | diff --git a/nova/tests/unit/api/openstack/fakes.py b/nova/tests/unit/api/openstack/fakes.py 2 | index a3edd52..d3ef3b3 100644 3 | --- a/nova/tests/unit/api/openstack/fakes.py 4 | +++ b/nova/tests/unit/api/openstack/fakes.py 5 | @@ -395,7 +395,7 @@ def stub_instance(id=1, user_id=None, project_id=None, host=None, 6 | memory_mb=0, vcpus=0, root_gb=0, ephemeral_gb=0, 7 | instance_type=None, launch_index=0, kernel_id="", 8 | ramdisk_id="", user_data=None, system_metadata=None, 9 | - services=None): 10 | + services=None, fpga_device=""): 11 | if user_id is None: 12 | user_id = 'fake_user' 13 | if project_id is None: 14 | @@ -504,7 +504,8 @@ def stub_instance(id=1, user_id=None, project_id=None, host=None, 15 | "flavor": flavorinfo, 16 | }, 17 | "cleaned": cleaned, 18 | - "services": services} 19 | + "services": services, 20 | + "fpga_device": ""} 21 | 22 | instance.update(info_cache) 23 | instance['info_cache']['instance_uuid'] = instance['uuid'] 24 | diff --git a/nova/tests/unit/compute/test_claims.py b/nova/tests/unit/compute/test_claims.py 25 | index 6bd25e3..f8974fd 100644 26 | --- a/nova/tests/unit/compute/test_claims.py 27 | +++ b/nova/tests/unit/compute/test_claims.py 28 | @@ -88,12 +88,14 @@ class ClaimTestCase(test.NoDBTestCase): 29 | 30 | requests = requests or self.empty_requests 31 | 32 | + fpga_ip_id = kwargs.pop('fpga_ip_id', None) 33 | + 34 | @mock.patch('nova.db.instance_extra_get_by_instance_uuid', 35 | return_value=db_numa_topology) 36 | def get_claim(mock_extra_get): 37 | return claims.Claim(self.context, instance, self.tracker, 38 | self.resources, requests, overhead=overhead, 39 | - limits=limits) 40 | + limits=limits, fpga_ip_id=fpga_ip_id) 41 | return get_claim() 42 | 43 | def _fake_instance(self, **kwargs): 44 | @@ -131,6 +133,8 @@ class ClaimTestCase(test.NoDBTestCase): 45 | 'free_disk_gb': 20, 46 | 'vcpus': 2, 47 | 'vcpus_used': 0, 48 | + 'fpga_regions': 0, 49 | + 'fpga_regions_used': 0, 50 | 'numa_topology': objects.NUMATopology( 51 | cells=[objects.NUMACell(id=1, cpuset=set([1, 2]), memory=512, 52 | memory_usage=0, cpu_usage=0, 53 | @@ -245,6 +249,27 @@ class ClaimTestCase(test.NoDBTestCase): 54 | self._claim, requests=requests) 55 | mock_pci_supports_requests.assert_called_once_with([request]) 56 | 57 | + def test_fpga_pass(self): 58 | + limits = {'fpga_regions': 2} 59 | + self.resources.fpga_regions_used = 0 60 | + claim = self._claim(limits, fpga_ip_id='accelerator_id') 61 | + self.assertIsNone(claim._test_fpga(resources=self.resources, 62 | + limit=2)) 63 | + 64 | + def test_fpga_insufficient(self): 65 | + limits = {'fpga_regions': 2} 66 | + self.resources.fpga_regions_used = 2 67 | + self.assertRaises(exception.ComputeResourcesUnavailable, 68 | + self._claim, limits=limits, 69 | + fpga_ip_id='accelerator_id') 70 | + 71 | + def test_fpga_pass_when_fpga_not_requested(self): 72 | + limits = {'fpga_regions': 2} 73 | + self.resources.fpga_regions_used = 2 74 | + claim = self._claim(limits, fpga_ip_id=None) 75 | + self.assertIsNone(claim._test_fpga(resources=self.resources, 76 | + limit=2)) 77 | + 78 | @mock.patch('nova.pci.stats.PciDeviceStats.support_requests') 79 | def test_pci_pass_no_requests(self, mock_pci_supports_requests): 80 | self._claim() 81 | @@ -384,6 +409,7 @@ class MoveClaimTestCase(ClaimTestCase): 82 | 83 | def _claim(self, limits=None, overhead=None, requests=None, 84 | image_meta=None, **kwargs): 85 | + fpga_ip_id = kwargs.pop('fpga_ip_id', None) 86 | instance_type = self._fake_instance_type(**kwargs) 87 | numa_topology = kwargs.pop('numa_topology', None) 88 | image_meta = image_meta or {} 89 | @@ -412,7 +438,7 @@ class MoveClaimTestCase(ClaimTestCase): 90 | return claims.MoveClaim(self.context, self.instance, instance_type, 91 | image_meta, self.tracker, self.resources, 92 | requests, overhead=overhead, 93 | - limits=limits) 94 | + limits=limits, fpga_ip_id=fpga_ip_id) 95 | return get_claim() 96 | 97 | @mock.patch('nova.objects.Instance.drop_migration_context') 98 | diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py 99 | index 601a4f6..5067e06 100644 100 | --- a/nova/tests/unit/compute/test_compute.py 101 | +++ b/nova/tests/unit/compute/test_compute.py 102 | @@ -194,7 +194,9 @@ class BaseTestCase(test.TestCase): 103 | 'cpu_allocation_ratio': 16.0, 104 | 'ram_allocation_ratio': 1.5, 105 | 'disk_allocation_ratio': 1.0, 106 | - 'host_ip': '127.0.0.1'}] 107 | + 'host_ip': '127.0.0.1', 108 | + 'fpga_regions': 0, 109 | + 'fpga_regions_used': 0}] 110 | return [objects.ComputeNode._from_db_object( 111 | context, objects.ComputeNode(), cn) 112 | for cn in fake_compute_nodes] 113 | diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py 114 | index 719bda1..5c5229f 100644 115 | --- a/nova/tests/unit/compute/test_compute_mgr.py 116 | +++ b/nova/tests/unit/compute/test_compute_mgr.py 117 | @@ -253,6 +253,46 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase): 118 | else: 119 | self.assertFalse(db_node.destroy.called) 120 | 121 | + @mock.patch('nova.compute.manager.ComputeManager.' 122 | + '_notify_about_instance_usage') 123 | + @mock.patch('nova.compute.manager.ComputeManager._shutdown_instance') 124 | + @mock.patch('nova.compute.manager.ComputeManager.' 125 | + 'update_available_resource') 126 | + @mock.patch('nova.objects.Instance.destroy') 127 | + @mock.patch('nova.objects.Instance.save') 128 | + @mock.patch('nova.fpga.erase_fpga') 129 | + def test_delete_instance_fpga_erased(self, fpga_erase, 130 | + instance_save, instance_destroy, update_resources, 131 | + shutdown_instance, inst_usage_notify): 132 | + quotas = mock.create_autospec(objects.Quotas, spec_set=True) 133 | + instance = fake_instance.fake_instance_obj(self.context, 134 | + vm_state=vm_states.ACTIVE, 135 | + expected_attrs=['system_metadata', 'info_cache', 'fpga_device']) 136 | + 137 | + instance.fpga_device = "accelerator_id" 138 | + self.compute._delete_instance(self.context, instance, [], quotas) 139 | + fpga_erase.assert_called_once_with(instance) 140 | + 141 | + @mock.patch('nova.compute.manager.ComputeManager.' 142 | + '_notify_about_instance_usage') 143 | + @mock.patch('nova.compute.manager.ComputeManager._shutdown_instance') 144 | + @mock.patch('nova.compute.manager.ComputeManager.' 145 | + 'update_available_resource') 146 | + @mock.patch('nova.objects.Instance.destroy') 147 | + @mock.patch('nova.objects.Instance.save') 148 | + @mock.patch('nova.fpga.erase_fpga') 149 | + def test_delete_instance_without_fpga_no_erasing(self, fpga_erase, 150 | + instance_save, instance_destroy, update_resources, 151 | + shutdown_instance, inst_usage_notify): 152 | + quotas = mock.create_autospec(objects.Quotas, spec_set=True) 153 | + instance = fake_instance.fake_instance_obj(self.context, 154 | + vm_state=vm_states.ACTIVE, 155 | + expected_attrs=['system_metadata', 'info_cache', 'fpga_device']) 156 | + 157 | + instance.fpga_device = None 158 | + self.compute._delete_instance(self.context, instance, [], quotas) 159 | + self.assertFalse(fpga_erase.called) 160 | + 161 | @mock.patch('nova.compute.utils.notify_about_instance_action') 162 | def test_delete_instance_without_info_cache(self, mock_notify): 163 | instance = fake_instance.fake_instance_obj( 164 | @@ -3306,7 +3346,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase): 165 | self.image, self.injected_files, self.admin_pass, 166 | self.requested_networks, self.security_groups, 167 | self.block_device_mapping, self.node, self.limits, 168 | - self.filter_properties) 169 | + self.filter_properties, request_spec={}) 170 | 171 | # This test when sending an icehouse compatible rpc call to juno compute 172 | # node, NetworkRequest object can load from three items tuple. 173 | @@ -3371,7 +3411,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase): 174 | self.image, self.injected_files, self.admin_pass, 175 | self.requested_networks, self.security_groups, 176 | self.block_device_mapping, self.node, self.limits, 177 | - self.filter_properties) 178 | + self.filter_properties, request_spec={}) 179 | mock_clean_net.assert_called_once_with(self.context, self.instance, 180 | self.requested_networks) 181 | mock_clean_vol.assert_called_once_with(self.context, 182 | @@ -3418,7 +3458,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase): 183 | self.image, self.injected_files, self.admin_pass, 184 | self.requested_networks, self.security_groups, 185 | self.block_device_mapping, self.node, self.limits, 186 | - self.filter_properties) 187 | + self.filter_properties, request_spec={}) 188 | mock_clean.assert_called_once_with(self.context, self.instance, 189 | self.compute.host) 190 | mock_nil.assert_called_once_with(self.instance) 191 | @@ -3501,7 +3541,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase): 192 | self.image, self.injected_files, self.admin_pass, 193 | self.requested_networks, self.security_groups, 194 | self.block_device_mapping, self.node, self.limits, 195 | - self.filter_properties) 196 | + self.filter_properties, request_spec={}) 197 | mock_cleanup_network.assert_called_once_with( 198 | self.context, instance, self.compute.host) 199 | mock_build_ins.assert_called_once_with(self.context, 200 | @@ -3556,7 +3596,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase): 201 | self.image, self.injected_files, self.admin_pass, 202 | self.requested_networks, self.security_groups, 203 | self.block_device_mapping, self.node, self.limits, 204 | - self.filter_properties) 205 | + self.filter_properties, request_spec={}) 206 | mock_cleanup_network.assert_called_once_with( 207 | self.context, instance, self.requested_networks) 208 | mock_build_ins.assert_called_once_with(self.context, 209 | @@ -3600,7 +3640,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase): 210 | mock_build_run.assert_called_once_with(self.context, self.instance, 211 | self.image, self.injected_files, self.admin_pass, 212 | self.requested_networks, self.security_groups, 213 | - self.block_device_mapping, self.node, self.limits, {}) 214 | + self.block_device_mapping, self.node, self.limits, {}, 215 | + request_spec={}) 216 | mock_clean_net.assert_called_once_with(self.context, self.instance, 217 | self.requested_networks) 218 | mock_add.assert_called_once_with(self.context, self.instance, 219 | @@ -3649,7 +3690,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase): 220 | self.image, self.injected_files, self.admin_pass, 221 | self.requested_networks, self.security_groups, 222 | self.block_device_mapping, self.node, self.limits, 223 | - self.filter_properties) 224 | + self.filter_properties, request_spec={}) 225 | mock_deallocate.assert_called_once_with(self.instance) 226 | mock_clean_inst.assert_called_once_with(self.context, self.instance, 227 | self.compute.host) 228 | @@ -3697,7 +3738,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase): 229 | self.image, self.injected_files, self.admin_pass, 230 | self.requested_networks, self.security_groups, 231 | self.block_device_mapping, self.node, self.limits, 232 | - self.filter_properties) 233 | + self.filter_properties, request_spec={}) 234 | mock_deallocate.assert_called_once_with(self.instance) 235 | mock_clean.assert_called_once_with(self.context, self.instance, 236 | self.requested_networks) 237 | @@ -3757,7 +3798,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase): 238 | self.image, self.injected_files, self.admin_pass, 239 | self.requested_networks, self.security_groups, 240 | self.block_device_mapping, self.node, self.limits, 241 | - self.filter_properties) 242 | + self.filter_properties, request_spec={}) 243 | mock_clean_net.assert_called_once_with(self.context, self.instance, 244 | self.requested_networks) 245 | 246 | diff --git a/nova/tests/unit/compute/test_multiple_nodes.py b/nova/tests/unit/compute/test_multiple_nodes.py 247 | index 2907e94..aae48d6 100644 248 | --- a/nova/tests/unit/compute/test_multiple_nodes.py 249 | +++ b/nova/tests/unit/compute/test_multiple_nodes.py 250 | @@ -100,7 +100,9 @@ class MultiNodeComputeTestCase(BaseTestCase): 251 | 'cpu_allocation_ratio': None, 252 | 'ram_allocation_ratio': None, 253 | 'disk_allocation_ratio': None, 254 | - 'host_ip': '127.0.0.1'}] 255 | + 'host_ip': '127.0.0.1', 256 | + 'fpga_regions': 0, 257 | + 'fpga_regions_used': 0}] 258 | return [objects.ComputeNode._from_db_object( 259 | context, objects.ComputeNode(), cn) 260 | for cn in fake_compute_nodes] 261 | diff --git a/nova/tests/unit/compute/test_resource_tracker.py b/nova/tests/unit/compute/test_resource_tracker.py 262 | index fcc623e..9caadc9 100644 263 | --- a/nova/tests/unit/compute/test_resource_tracker.py 264 | +++ b/nova/tests/unit/compute/test_resource_tracker.py 265 | @@ -86,6 +86,8 @@ _COMPUTE_NODE_FIXTURES = [ 266 | cpu_allocation_ratio=16.0, 267 | ram_allocation_ratio=1.5, 268 | disk_allocation_ratio=1.0, 269 | + fpga_regions=0, 270 | + fpga_regions_used=0, 271 | ), 272 | ] 273 | 274 | @@ -1371,6 +1373,51 @@ class TestInstanceClaim(BaseTestCase): 275 | self.assertTrue(obj_base.obj_equal_prims(expected_updated, 276 | self.rt.compute_node)) 277 | 278 | + @mock.patch('nova.utils.execute') 279 | + def test_update_fpga_resource(self, mock_execute): 280 | + resource_tracker.CONF.fpga_access = True 281 | + resource_tracker.CONF.fpga_exec = "fpga_exec" 282 | + mock_execute.return_value = ('Used regions: 1/2', 0) 283 | + 284 | + resources = {'fpga_regions': 2, 'fpga_regions_used': 0} 285 | + self.rt._update_fpga_resource(resources) 286 | + 287 | + self.assertEqual(resources['fpga_regions'], 2) 288 | + self.assertEqual(resources['fpga_regions_used'], 1) 289 | + 290 | + # Cleanup 291 | + resource_tracker.CONF.fpga_access = False 292 | + 293 | + @mock.patch('nova.utils.execute') 294 | + def test_update_fpga_resource_fpga_exec_not_found(self, mock_execute): 295 | + resource_tracker.CONF.fpga_access = True 296 | + resource_tracker.CONF.fpga_exec = "fpga_exec" 297 | + mock_execute.side_effect = OSError('fpga_exec binary not found') 298 | + 299 | + resources = {'fpga_regions': 0, 'fpga_regions_used': 0} 300 | + self.rt._update_fpga_resource(resources) 301 | + 302 | + self.assertEqual(resources['fpga_regions'], 0) 303 | + self.assertEqual(resources['fpga_regions_used'], 0) 304 | + 305 | + # Cleanup 306 | + resource_tracker.CONF.fpga_access = False 307 | + 308 | + @mock.patch('nova.utils.execute') 309 | + def test_update_fpga_resource_failed_to_parse_response(self, mock_exec): 310 | + resource_tracker.CONF.fpga_access = True 311 | + resource_tracker.CONF.fpga_exec = "fpga_exec" 312 | + mock_exec.return_value = ('Invalid format of this message', 0) 313 | + 314 | + resources = {'fpga_regions': 0, 'fpga_regions_used': 0} 315 | + self.rt._update_fpga_resource(resources) 316 | + 317 | + self.assertEqual(resources['fpga_regions'], 0) 318 | + self.assertEqual(resources['fpga_regions_used'], 0) 319 | + 320 | + # Cleanup 321 | + resource_tracker.CONF.fpga_access = False 322 | + 323 | @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid') 324 | @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node') 325 | def test_claim(self, migr_mock, pci_mock): 326 | @@ -1609,7 +1656,6 @@ class TestResize(BaseTestCase): 327 | get_mock.return_value = _INSTANCE_FIXTURES 328 | migr_mock.return_value = [] 329 | get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0] 330 | - 331 | instance = _INSTANCE_FIXTURES[0].obj_clone() 332 | instance.new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2] 333 | # This migration context is fine, it points to the first instance 334 | diff --git a/nova/tests/unit/compute/test_shelve.py b/nova/tests/unit/compute/test_shelve.py 335 | index 2dcc81f..4cb3869 100644 336 | --- a/nova/tests/unit/compute/test_shelve.py 337 | +++ b/nova/tests/unit/compute/test_shelve.py 338 | @@ -36,6 +36,8 @@ def _fake_resources(): 339 | 'local_gb': 20, 340 | 'local_gb_used': 0, 341 | 'free_disk_gb': 20, 342 | + 'fpga_regions': 0, 343 | + 'fpga_regions_used': 0, 344 | 'vcpus': 2, 345 | 'vcpus_used': 0 346 | } 347 | diff --git a/nova/tests/unit/db/test_db_api.py b/nova/tests/unit/db/test_db_api.py 348 | index a4483c4..c007e64 100644 349 | --- a/nova/tests/unit/db/test_db_api.py 350 | +++ b/nova/tests/unit/db/test_db_api.py 351 | @@ -7663,7 +7663,8 @@ class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin): 352 | cpu_allocation_ratio=16.0, 353 | ram_allocation_ratio=1.5, 354 | disk_allocation_ratio=1.0, 355 | - stats='', numa_topology='') 356 | + stats='', numa_topology='', 357 | + fpga_regions=0, fpga_regions_used=0) 358 | # add some random stats 359 | self.stats = dict(num_instances=3, num_proj_12345=2, 360 | num_proj_23456=2, num_vm_building=3) 361 | @@ -7712,6 +7713,8 @@ class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin): 362 | cpu_allocation_ratio=16.0, 363 | ram_allocation_ratio=1.5, 364 | disk_allocation_ratio=1.0, 365 | + fpga_regions=0, 366 | + fpga_regions_used=0, 367 | stats='', numa_topology='') 368 | stats = dict(num_instances=2, num_proj_12345=1, 369 | num_proj_23456=1, num_vm_building=2) 370 | @@ -8905,7 +8908,7 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin): 371 | # NOTE(PaulMurray): migration 333 adds 'console_auth_tokens' 372 | if table_name in ['tags', 'resource_providers', 'allocations', 373 | 'inventories', 'resource_provider_aggregates', 374 | - 'console_auth_tokens']: 375 | + 'console_auth_tokens', 'fpga_devices']: 376 | continue 377 | 378 | if table_name.startswith("shadow_"): 379 | diff --git a/nova/tests/unit/db/test_migrations.py b/nova/tests/unit/db/test_migrations.py 380 | index 7c47f4c..6f10063 100644 381 | --- a/nova/tests/unit/db/test_migrations.py 382 | +++ b/nova/tests/unit/db/test_migrations.py 383 | @@ -916,6 +916,23 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync, 384 | self.assertColumnExists(engine, 'shadow_instance_extra', 385 | 'device_metadata') 386 | 387 | + def _check_335(self, engine, data): 388 | + self.assertColumnExists(engine, 'compute_nodes', 'fpga_regions') 389 | + self.assertColumnExists(engine, 'shadow_compute_nodes', 'fpga_regions') 390 | + self.assertColumnExists(engine, 'compute_nodes', 'fpga_regions_used') 391 | + self.assertColumnExists(engine, 'shadow_compute_nodes', 392 | + 'fpga_regions_used') 393 | + 394 | + def _check_336(self, engine, data): 395 | + self.assertColumnExists(engine, 'instances', 'fpga_device') 396 | + self.assertColumnExists(engine, 'shadow_instances', 'fpga_device') 397 | + 398 | + def _check_337(self, engine, data): 399 | + self.assertColumnExists(engine, 'fpga_devices', 'instance_uuid') 400 | + self.assertIndexMembers(engine, 'fpga_devices', 401 | + 'ix_fpga_devices_instance_uuid', 402 | + ['instance_uuid']) 403 | + 404 | 405 | class TestNovaMigrationsSQLite(NovaMigrationsCheckers, 406 | test_base.DbTestCase, 407 | diff --git a/nova/tests/unit/db/test_sqlalchemy_migration.py b/nova/tests/unit/db/test_sqlalchemy_migration.py 408 | index 9f2af9d..102871d 100644 409 | --- a/nova/tests/unit/db/test_sqlalchemy_migration.py 410 | +++ b/nova/tests/unit/db/test_sqlalchemy_migration.py 411 | @@ -239,29 +239,6 @@ class TestNewtonCheck(test.TestCase): 412 | '330_enforce_mitaka_online_migrations') 413 | self.engine = db_api.get_engine() 414 | 415 | - def test_all_migrated(self): 416 | - cn = objects.ComputeNode(context=self.context, 417 | - vcpus=1, memory_mb=512, local_gb=10, 418 | - vcpus_used=0, memory_mb_used=256, 419 | - local_gb_used=5, hypervisor_type='HyperDanVM', 420 | - hypervisor_version='34', cpu_info='foo') 421 | - cn.create() 422 | - objects.Aggregate(context=self.context, 423 | - name='foo').create() 424 | - objects.PciDevice.create(self.context, {}) 425 | - self.migration.upgrade(self.engine) 426 | - 427 | - def test_cn_not_migrated(self): 428 | - cn = objects.ComputeNode(context=self.context, 429 | - vcpus=1, memory_mb=512, local_gb=10, 430 | - vcpus_used=0, memory_mb_used=256, 431 | - local_gb_used=5, hypervisor_type='HyperDanVM', 432 | - hypervisor_version='34', cpu_info='foo') 433 | - cn.create() 434 | - db_api.compute_node_update(self.context, cn.id, {'uuid': None}) 435 | - self.assertRaises(exception.ValidationError, 436 | - self.migration.upgrade, self.engine) 437 | - 438 | def test_aggregate_not_migrated(self): 439 | agg = db_api.aggregate_create(self.context, {"name": "foobar"}) 440 | db_api.aggregate_update(self.context, agg.id, {'uuid': None}) 441 | diff --git a/nova/tests/unit/fpga/__init__.py b/nova/tests/unit/fpga/__init__.py 442 | new file mode 100644 443 | index 0000000..e69de29 444 | diff --git a/nova/tests/unit/fpga/test_fpga.py b/nova/tests/unit/fpga/test_fpga.py 445 | new file mode 100644 446 | index 0000000..32f35c8 447 | --- /dev/null 448 | +++ b/nova/tests/unit/fpga/test_fpga.py 449 | @@ -0,0 +1,160 @@ 450 | +# Copyright 2014 IBM Corp. 451 | +# All Rights Reserved. 452 | +# 453 | +# Licensed under the Apache License, Version 2.0 (the "License"); you may 454 | +# not use this file except in compliance with the License. You may obtain 455 | +# a copy of the License at 456 | +# 457 | +# http://www.apache.org/licenses/LICENSE-2.0 458 | +# 459 | +# Unless required by applicable law or agreed to in writing, software 460 | +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 461 | +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 462 | +# License for the specific language governing permissions and limitations 463 | +# under the License. 464 | + 465 | +"""Tests for FPGA basic functions""" 466 | + 467 | +import mock 468 | +from nova import context 469 | +from nova import exception 470 | +from nova import fpga 471 | +from nova import test 472 | +from nova.tests.unit import fake_instance 473 | +from oslo_concurrency.processutils import ProcessExecutionError 474 | + 475 | + 476 | +class FpgaTestCase(test.NoDBTestCase): 477 | + 478 | + def test_get_ip_image_id_from_flavor(self): 479 | + request_spec = {'instance_type': {'extra_specs': 480 | + {'hw:fpga_ip_id': 'accelerator_ip'}}} 481 | + self.assertEqual('accelerator_ip', fpga.get_ip_image_id(request_spec)) 482 | + 483 | + def test_get_ip_image_id_from_image_metadata(self): 484 | + request_spec = { 485 | + 'image': {'properties': {'hw_fpga_ip_id': 'accelerator_ip'}}} 486 | + self.assertEqual('accelerator_ip', fpga.get_ip_image_id(request_spec)) 487 | + 488 | + def test_get_ip_image_id_image_precedes(self): 489 | + request_spec = { 490 | + 'image': {'properties': {'hw_fpga_ip_id': 'id_from_image'}}, 491 | + 'instance_type': {'extra_specs': 492 | + {'hw:fpga_ip_id': 'id_from_flavor'}}} 493 | + self.assertEqual('id_from_image', fpga.get_ip_image_id(request_spec)) 494 | + 495 | + def test_get_ip_image_id_no_fpga_key_found(self): 496 | + request_spec = None 497 | + self.assertIsNone(fpga.get_ip_image_id(request_spec)) 498 | + request_spec = {'instance_type': {'extra_specs': {'key': 'val'}}} 499 | + self.assertIsNone(fpga.get_ip_image_id(request_spec)) 500 | + request_spec = {'image': {'properties': {'key': 'val'}}} 501 | + self.assertIsNone(fpga.get_ip_image_id(request_spec)) 502 | + 503 | + @mock.patch('nova.utils.execute') 504 | + def test_program_ip_simulation_mode_programming_not_called(self, 505 | + execute_mock): 506 | + fpga.CONF.fpga_simulation_mode = True 507 | + fpga.program_ip(None, None) 508 | + self.assertFalse(execute_mock.called) 509 | + 510 | + @mock.patch('nova.utils.execute') 511 | + def test_program_ip_succeeded(self, execute_mock): 512 | + fpga.CONF.fpga_simulation_mode = False 513 | + 514 | + req_context = context.RequestContext('fake', 'fake') 515 | + instance = fake_instance.fake_instance_obj(req_context, 516 | + expected_attrs=[]) 517 | + 518 | + execute_mock.return_value = ('fpga_image_id', 0) 519 | + 520 | + fpga.program_ip('fpga_image_id', instance) 521 | + self.assertEqual(instance.fpga_device, 'fpga_image_id') 522 | + 523 | + @mock.patch('nova.utils.execute') 524 | + def test_program_ip_failed(self, execute_mock): 525 | + fpga.CONF.fpga_simulation_mode = False 526 | + 527 | + req_context = context.RequestContext('fake', 'fake') 528 | + instance = fake_instance.fake_instance_obj(req_context, 529 | + expected_attrs=[]) 530 | + 531 | + execute_mock.return_value = ['fpga_image_id', 'failed'] 532 | + 533 | + self.assertRaises(exception.RescheduledException, fpga.program_ip, 534 | + 'fpga_image_id', instance) 535 | + self.assertIsNone(instance.fpga_device) 536 | + 537 | + @mock.patch('nova.utils.execute', 538 | + side_effect=ProcessExecutionError("failed")) 539 | + def test_program_ip_failed_with_processexecutionerror(self, execute_mock): 540 | + fpga.CONF.fpga_simulation_mode = False 541 | + 542 | + req_context = context.RequestContext('fake', 'fake') 543 | + instance = fake_instance.fake_instance_obj(req_context, 544 | + expected_attrs=[]) 545 | + 546 | + self.assertRaises(exception.RescheduledException, fpga.program_ip, 547 | + 'fpga_image_id', instance) 548 | + self.assertIsNone(instance.fpga_device) 549 | + 550 | + @mock.patch('nova.utils.execute', side_effect=OSError("failed")) 551 | + def test_program_ip_failed_with_oserror(self, execute_mock): 552 | + fpga.CONF.fpga_simulation_mode = False 553 | + 554 | + req_context = context.RequestContext('fake', 'fake') 555 | + instance = fake_instance.fake_instance_obj(req_context, 556 | + expected_attrs=[]) 557 | + 558 | + self.assertRaises(exception.RescheduledException, fpga.program_ip, 559 | + 'fpga_image_id', instance) 560 | + self.assertIsNone(instance.fpga_device) 561 | + 562 | + @mock.patch('nova.utils.execute') 563 | + def test_erase_fpga_succeeded(self, execute_mock): 564 | + fpga.CONF.fpga_simulation_mode = False 565 | + 566 | + req_context = context.RequestContext('fake', 'fake') 567 | + instance = fake_instance.fake_instance_obj(req_context, 568 | + expected_attrs=[]) 569 | + 570 | + execute_mock.return_value = ('fpga_image_id', 0) 571 | + 572 | + erase_result = fpga.erase_fpga(instance) 573 | + self.assertFalse(erase_result[1]) 574 | + 575 | + @mock.patch('nova.utils.execute') 576 | + def test_erase_fpga_failed(self, execute_mock): 577 | + fpga.CONF.fpga_simulation_mode = False 578 | + 579 | + req_context = context.RequestContext('fake', 'fake') 580 | + instance = fake_instance.fake_instance_obj(req_context, 581 | + expected_attrs=[]) 582 | + 583 | + execute_mock.return_value = ('fpga_image_id', 'failed') 584 | + 585 | + erase_result = fpga.erase_fpga(instance) 586 | + self.assertTrue(erase_result[1]) 587 | + 588 | + @mock.patch('nova.utils.execute', side_effect=OSError("failed")) 589 | + def test_erase_fpga_failed_with_oserror(self, execute_mock): 590 | + fpga.CONF.fpga_simulation_mode = False 591 | + 592 | + req_context = context.RequestContext('fake', 'fake') 593 | + instance = fake_instance.fake_instance_obj(req_context, 594 | + expected_attrs=[]) 595 | + 596 | + erase_result = fpga.erase_fpga(instance) 597 | + self.assertTrue(erase_result[1]) 598 | + 599 | + @mock.patch('nova.utils.execute', 600 | + side_effect=ProcessExecutionError("failed")) 601 | + def test_erase_fpga_failed_with_processexecutionerror(self, execute_mock): 602 | + fpga.CONF.fpga_simulation_mode = False 603 | + 604 | + req_context = context.RequestContext('fake', 'fake') 605 | + instance = fake_instance.fake_instance_obj(req_context, 606 | + expected_attrs=[]) 607 | + 608 | + erase_result = fpga.erase_fpga(instance) 609 | + self.assertTrue(erase_result[1]) 610 | diff --git a/nova/tests/unit/objects/test_compute_node.py b/nova/tests/unit/objects/test_compute_node.py 611 | index ff0459b..b92cf0b 100644 612 | --- a/nova/tests/unit/objects/test_compute_node.py 613 | +++ b/nova/tests/unit/objects/test_compute_node.py 614 | @@ -90,6 +90,8 @@ fake_compute_node = { 615 | 'cpu_allocation_ratio': 16.0, 616 | 'ram_allocation_ratio': 1.5, 617 | 'disk_allocation_ratio': 1.0, 618 | + 'fpga_regions': 0, 619 | + 'fpga_regions_used': 0 620 | } 621 | # FIXME(sbauza) : For compatibility checking, to be removed once we are sure 622 | # that all computes are running latest DB version with host field in it. 623 | diff --git a/nova/tests/unit/objects/test_objects.py b/nova/tests/unit/objects/test_objects.py 624 | index 6bc72d6..a86e942 100644 625 | --- a/nova/tests/unit/objects/test_objects.py 626 | +++ b/nova/tests/unit/objects/test_objects.py 627 | @@ -1109,7 +1109,7 @@ object_data = { 628 | 'BuildRequestList': '1.0-cd95608eccb89fbc702c8b52f38ec738', 629 | 'CellMapping': '1.0-7f1a7e85a22bbb7559fc730ab658b9bd', 630 | 'CellMappingList': '1.0-4ee0d9efdfd681fed822da88376e04d2', 631 | - 'ComputeNode': '1.16-2436e5b836fa0306a3c4e6d9e5ddacec', 632 | + 'ComputeNode': '1.17-0b99b9d63197845ba019e9d34b4315ae', 633 | 'ComputeNodeList': '1.15-4ec4ea3ed297edbd25c33e2aaf797cca', 634 | 'DNSDomain': '1.0-7b0b2dab778454b6a7b6c66afe163a1a', 635 | 'DNSDomainList': '1.0-4ee0d9efdfd681fed822da88376e04d2', 636 | @@ -1132,8 +1132,8 @@ object_data = { 637 | 'HVSpec': '1.2-db672e73304da86139086d003f3977e7', 638 | 'IDEDeviceBus': '1.0-29d4c9f27ac44197f01b6ac1b7e16502', 639 | 'ImageMeta': '1.8-642d1b2eb3e880a367f37d72dd76162d', 640 | - 'ImageMetaProps': '1.15-d45133ec8d2d4a6456338fb0ffd0e5c2', 641 | - 'Instance': '2.3-4f98ab23f4b0a25fabb1040c8f5edecc', 642 | + 'ImageMetaProps': '1.17-a69b05c98ac0f1183b61c50c3a848db6', 643 | + 'Instance': '2.4-0438594b2facc08b6d9b5743aefd95e1', 644 | 'InstanceAction': '1.1-f9f293e526b66fca0d05c3b3a2d13914', 645 | 'InstanceActionEvent': '1.1-e56a64fa4710e43ef7af2ad9d6028b33', 646 | 'InstanceActionEventList': '1.1-13d92fb953030cdbfee56481756e02be', 647 | diff --git a/nova/tests/unit/scheduler/fakes.py b/nova/tests/unit/scheduler/fakes.py 648 | index bbf1761..dbbfa4c 100644 649 | --- a/nova/tests/unit/scheduler/fakes.py 650 | +++ b/nova/tests/unit/scheduler/fakes.py 651 | @@ -82,7 +82,7 @@ COMPUTE_NODES = [ 652 | hypervisor_type='foo', supported_hv_specs=[], 653 | pci_device_pools=None, cpu_info=None, stats=None, metrics=None, 654 | cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, 655 | - disk_allocation_ratio=1.0), 656 | + disk_allocation_ratio=1.0, fpga_regions=0, fpga_regions_used=0), 657 | objects.ComputeNode( 658 | id=2, local_gb=2048, memory_mb=2048, vcpus=2, 659 | disk_available_least=1024, free_ram_mb=1024, vcpus_used=2, 660 | @@ -93,7 +93,7 @@ COMPUTE_NODES = [ 661 | hypervisor_type='foo', supported_hv_specs=[], 662 | pci_device_pools=None, cpu_info=None, stats=None, metrics=None, 663 | cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, 664 | - disk_allocation_ratio=1.0), 665 | + disk_allocation_ratio=1.0, fpga_regions=0, fpga_regions_used=0), 666 | objects.ComputeNode( 667 | id=3, local_gb=4096, memory_mb=4096, vcpus=4, 668 | disk_available_least=3333, free_ram_mb=3072, vcpus_used=1, 669 | @@ -104,7 +104,7 @@ COMPUTE_NODES = [ 670 | hypervisor_type='foo', supported_hv_specs=[], 671 | pci_device_pools=None, cpu_info=None, stats=None, metrics=None, 672 | cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, 673 | - disk_allocation_ratio=1.0), 674 | + disk_allocation_ratio=1.0, fpga_regions=0, fpga_regions_used=0), 675 | objects.ComputeNode( 676 | id=4, local_gb=8192, memory_mb=8192, vcpus=8, 677 | disk_available_least=8192, free_ram_mb=8192, vcpus_used=0, 678 | @@ -115,7 +115,7 @@ COMPUTE_NODES = [ 679 | hypervisor_type='foo', supported_hv_specs=[], 680 | pci_device_pools=None, cpu_info=None, stats=None, metrics=None, 681 | cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, 682 | - disk_allocation_ratio=1.0), 683 | + disk_allocation_ratio=1.0, fpga_regions=0, fpga_regions_used=0), 684 | # Broken entry 685 | objects.ComputeNode( 686 | id=5, local_gb=1024, memory_mb=1024, vcpus=1, 687 | diff --git a/nova/tests/unit/scheduler/filters/test_fpga_filter.py b/nova/tests/unit/scheduler/filters/test_fpga_filter.py 688 | new file mode 100644 689 | index 0000000..97e7df1 690 | --- /dev/null 691 | +++ b/nova/tests/unit/scheduler/filters/test_fpga_filter.py 692 | @@ -0,0 +1,62 @@ 693 | +# Copyright 2016, OpenStack Foundation 694 | +# All Rights Reserved. 695 | +# 696 | +# Licensed under the Apache License, Version 2.0 (the "License"); 697 | +# you may not use this file except in compliance with the License. 698 | +# You may obtain a copy of the License at 699 | +# 700 | +# http://www.apache.org/licenses/LICENSE-2.0 701 | +# 702 | +# Unless required by applicable law or agreed to in writing, software 703 | +# distributed under the License is distributed on an "AS IS" BASIS, 704 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 705 | +# See the License for the specific language governing permissions and 706 | +# limitations under the License. 707 | + 708 | +from nova import objects 709 | +from nova import test 710 | + 711 | +from nova.scheduler.filters import fpga_filter 712 | +from nova.tests.unit.scheduler import fakes 713 | + 714 | + 715 | +class TestFpgaFilter(test.NoDBTestCase): 716 | + 717 | + def setUp(self): 718 | + super(TestFpgaFilter, self).setUp() 719 | + self.filt_cls = fpga_filter.FpgaFilter() 720 | + 721 | + def test_fpga_filter_passes_image_properties_matched(self): 722 | + host = self._create_host_state(fpga_regions=2, fpga_regions_used=0) 723 | + spec = self._create_spec(extra_specs={}, 724 | + hw_fpga_ip_id='accelerator_id') 725 | + self.assertTrue(self.filt_cls.host_passes(host, spec)) 726 | + 727 | + def test_fpga_filter_passes_flavor_specs_matched(self): 728 | + host = self._create_host_state(fpga_regions=2, fpga_regions_used=0) 729 | + extra_specs = {'hw:fpga_ip_id': 'accelerator_id'} 730 | + spec = self._create_spec(extra_specs=extra_specs) 731 | + self.assertTrue(self.filt_cls.host_passes(host, spec)) 732 | + 733 | + def test_fpga_filter_fails_no_free_fpga_regions(self): 734 | + host = self._create_host_state(fpga_regions=2, fpga_regions_used=2) 735 | + extra_specs = {'hw:fpga_ip_id': 'accelerator_id'} 736 | + spec = self._create_spec(extra_specs=extra_specs) 737 | + self.assertFalse(self.filt_cls.host_passes(host, spec)) 738 | + 739 | + def test_fpga_filter_passes_no_fpga_property(self): 740 | + host = self._create_host_state(fpga_regions=2, fpga_regions_used=0) 741 | + spec = self._create_spec(extra_specs={'some_key': ''}, 742 | + hw_video_ram=123) 743 | + self.assertTrue(self.filt_cls.host_passes(host, spec)) 744 | + 745 | + def _create_spec(self, extra_specs, **image_properties): 746 | + flavor = objects.Flavor(extra_specs=extra_specs) 747 | + properties = objects.ImageMetaProps(**image_properties) 748 | + image = objects.ImageMeta(properties=properties) 749 | + return objects.RequestSpec(flavor=flavor, image=image) 750 | + 751 | + def _create_host_state(self, fpga_regions, fpga_regions_used): 752 | + return fakes.FakeHostState('host1', 'node1', 753 | + {'fpga_regions': fpga_regions, 754 | + 'fpga_regions_used': fpga_regions_used}) 755 | diff --git a/nova/tests/unit/scheduler/test_host_manager.py b/nova/tests/unit/scheduler/test_host_manager.py 756 | index 312e1ef..fe69f3c 100644 757 | --- a/nova/tests/unit/scheduler/test_host_manager.py 758 | +++ b/nova/tests/unit/scheduler/test_host_manager.py 759 | @@ -967,7 +967,7 @@ class HostStateTestCase(test.NoDBTestCase): 760 | hypervisor_version=hyper_ver_int, numa_topology=None, 761 | pci_device_pools=None, metrics=None, 762 | cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, 763 | - disk_allocation_ratio=1.0) 764 | + disk_allocation_ratio=1.0, fpga_regions=0, fpga_regions_used=0) 765 | 766 | host = host_manager.HostState("fakehost", "fakenode") 767 | host.update(compute=compute) 768 | @@ -1010,7 +1010,7 @@ class HostStateTestCase(test.NoDBTestCase): 769 | hypervisor_version=hyper_ver_int, numa_topology=None, 770 | pci_device_pools=None, metrics=None, 771 | cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, 772 | - disk_allocation_ratio=1.0) 773 | + disk_allocation_ratio=1.0, fpga_regions=0, fpga_regions_used=0) 774 | 775 | host = host_manager.HostState("fakehost", "fakenode") 776 | host.update(compute=compute) 777 | @@ -1043,7 +1043,7 @@ class HostStateTestCase(test.NoDBTestCase): 778 | hypervisor_version=hyper_ver_int, numa_topology=None, 779 | pci_device_pools=None, metrics=None, 780 | cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, 781 | - disk_allocation_ratio=1.0) 782 | + disk_allocation_ratio=1.0, fpga_regions=0, fpga_regions_used=0) 783 | 784 | host = host_manager.HostState("fakehost", "fakenode") 785 | host.update(compute=compute) 786 | @@ -1204,7 +1204,7 @@ class HostStateTestCase(test.NoDBTestCase): 787 | numa_topology=fakes.NUMA_TOPOLOGY._to_json(), 788 | stats=None, pci_device_pools=None, 789 | cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5, 790 | - disk_allocation_ratio=1.0) 791 | + disk_allocation_ratio=1.0, fpga_regions=0, fpga_regions_used=0) 792 | host = host_manager.HostState("fakehost", "fakenode") 793 | host.update(compute=compute) 794 | 795 | --------------------------------------------------------------------------------