├── .gitignore ├── .gitreview ├── Doxyfile ├── INFO.yaml ├── LICENSE ├── Makefile ├── README.md ├── Vagrantfile ├── cleanup.sh ├── common.env.tmpl ├── common ├── common.sh ├── functions.sh ├── setup_docker.sh ├── setup_docker_macosx.sh ├── setup_docker_registry.sh ├── setup_docker_root.sh └── tf_functions.sh ├── container ├── Dockerfile.centos ├── Dockerfile.ubi7 ├── Dockerfile.ubi8 ├── build-centos7.sh ├── build-centos8.sh ├── build.sh ├── entrypoint.sh ├── populate-cache.sh ├── run.sh └── tpc.repo ├── run.sh ├── scripts ├── build-tpp.sh ├── controller_ut │ ├── definitions.sh │ ├── run-tests.py │ └── run-tests.sh ├── fetch-packages.sh ├── gather-unittest-targets.py ├── go │ └── run-tests.sh ├── package-tpp.sh ├── package │ ├── Dockerfile.src.tmpl │ ├── build-containers.sh │ ├── build-operator-containers.sh │ ├── build-src-containers.sh │ ├── build-test-containers.sh │ ├── list-containers.sh │ └── prepare-containers.sh ├── patch-repo-manifest.py ├── run-tests.sh ├── setup-httpd.sh ├── sync-sources.sh ├── tox │ └── run-tests.sh └── webui_ut │ └── run-tests.sh ├── skip_tests ├── src_containers_to_publish └── startup.sh /.gitignore: -------------------------------------------------------------------------------- 1 | *.retry 2 | .vagrant 3 | *.cfg 4 | /code 5 | common.env 6 | daemon.json 7 | dev_config.yaml 8 | vars.yaml 9 | /config 10 | /output 11 | /input 12 | /contrail 13 | build*.log 14 | .DS_Store 15 | -------------------------------------------------------------------------------- /.gitreview: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2018 Juniper Networks, Inc. All rights reserved. 3 | # 4 | # OpenContrail Code Review System 5 | # 6 | # Please visit opencontrail.org additional information. 7 | # 8 | # e.g. After changes are complete and commited to a local branch, do 9 | # 'git review' to submit changeset to gerrit.tungsten.io Code Review 10 | # (gerrit) System. 11 | # 12 | [gerrit] 13 | host=gerrit.opensdn.io 14 | port=29418 15 | project=tungstenfabric/tf-dev-env.git 16 | defaultbranch=master 17 | -------------------------------------------------------------------------------- /INFO.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | project: 'tf-dev-env' 3 | project_creation_date: '2018-01-04' 4 | project_category: '' 5 | lifecycle_state: 'Incubation' 6 | project_lead: &tungstenfabric_tf-dev-env_ptl 7 | name: 'Andrey Pavlov' 8 | email: 'andrey-mp@yandex.ru' 9 | id: 'Andrey-mp' 10 | company: 'Progmaticlab' 11 | timezone: 'Europe/Moscow' 12 | primary_contact: *tungstenfabric_tf-dev-env_ptl 13 | issue_tracking: 14 | type: 'jira' 15 | url: 'https://jira.tungsten.io/projects/' 16 | key: '' 17 | mailing_list: 18 | type: 'groups.io' 19 | url: '' 20 | tag: '[]' 21 | realtime_discussion: 22 | type: '' 23 | server: '' 24 | channel: '' 25 | meetings: 26 | - type: '' 27 | agenda: '' 28 | url: '' 29 | server: '' 30 | channel: '' 31 | repeats: '' 32 | time: '' 33 | repositories: 34 | - tungstenfabric/tf-dev-env 35 | committers: 36 | - <<: *tungstenfabric_tf-dev-env_ptl 37 | - name: 'Alexandre Levine' 38 | email: 'alevine@progmaticlab.com' 39 | company: 'Progmaticlab' 40 | id: 'alexandrelevine' 41 | timezone: 'Europe/Moscow' 42 | - name: 'Alexey Morlang' 43 | email: 'alexey.morlang@gmail.com' 44 | company: 'Progmaticlab' 45 | id: 'alexey-mr' 46 | timezone: 'Europe/Moscow' 47 | - name: 'Darien Hirotsu' 48 | email: 'darien@tachtech.net' 49 | company: '' 50 | id: 'dhirotsu' 51 | timezone: 'America/Los_Angeles' 52 | - name: 'Michał Krawczyk' 53 | email: 'krawczyk.michal91@gmail.com' 54 | company: '' 55 | id: 'kravvcu' 56 | timezone: 'Poland/Warsaw' 57 | - name: 'Will Stevens' 58 | email: 'williamstevens@gmail.com' 59 | company: '' 60 | id: 'swill' 61 | timezone: 'America/New_York' 62 | - name: 'Szymon Krasuski' 63 | email: 'krasuski.szymon.piotr@gmail.com' 64 | company: 'CodiLime' 65 | id: 'Dysproz' 66 | timezone: 'Europe/Warsaw' 67 | tsc: 68 | # yamllint disable rule:line-length 69 | approval: 'https://wiki.tungsten.io/display/TUN/Projects' 70 | changes: 71 | - type: 'initial approval' 72 | name: '' 73 | link: 'https://lists.tungsten.io/g/tsc/topic/88752516#1249' 74 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | TF_DE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) 2 | TF_DE_TOP := $(abspath $(TF_DE_DIR)/../)/ 3 | SHELL=/bin/bash -o pipefail 4 | 5 | # include RPM-building targets 6 | -include $(TF_DE_TOP)contrail/tools/packages/Makefile 7 | 8 | REPODIR=$(TF_DE_TOP)contrail 9 | CONTAINER_BUILDER_DIR=$(REPODIR)/contrail-container-builder 10 | CONTRAIL_TEST_DIR=$(REPODIR)/third_party/contrail-test 11 | export REPODIR 12 | export CONTRAIL_TEST_DIR 13 | export CONTAINER_BUILDER_DIR 14 | 15 | all: dep rpm containers 16 | 17 | fetch_packages: 18 | @$(TF_DE_DIR)scripts/fetch-packages.sh 19 | 20 | setup: 21 | @yum autoremove -y python2-requests python2-urllib3 22 | @python2 -m pip list | grep urllib3 >/dev/null && python2 -m pip uninstall -y urllib3 requests chardet || true 23 | @python2 -m pip -q uninstall -y setuptools || true 24 | @yum -q reinstall -y python2-setuptools 25 | @yum -q install -y python2-requests python2-urllib3 26 | 27 | sync: 28 | @$(TF_DE_DIR)scripts/sync-sources.sh 29 | 30 | ############################################################################## 31 | # RPM repo targets 32 | create-repo: 33 | @mkdir -p $(REPODIR)/RPMS 34 | @createrepo --update $(REPODIR)/RPMS/ 35 | @echo "INFO: clean all for contrail repo after udpate" 36 | @yum clean all --disablerepo=* --enablerepo=contrail || true 37 | 38 | update-repo: create-repo 39 | 40 | clean-repo: 41 | @test -d $(REPODIR)/RPMS/repodata && rm -rf $(REPODIR)/RPMS/repodata || true 42 | 43 | setup-httpd: 44 | @$(TF_DE_DIR)scripts/setup-httpd.sh 45 | 46 | ############################################################################## 47 | # Contrail third party packaged 48 | build-tpp: 49 | @$(TF_DE_DIR)scripts/build-tpp.sh 50 | 51 | package-tpp: 52 | @$(TF_DE_DIR)scripts/package-tpp.sh 53 | 54 | ############################################################################## 55 | # Container deployer-src targets 56 | src-containers: 57 | @$(TF_DE_DIR)scripts/package/build-src-containers.sh |& sed "s/^/src-containers: /" 58 | 59 | ############################################################################## 60 | # Container builder targets 61 | prepare-containers: 62 | @$(TF_DE_DIR)scripts/package/prepare-containers.sh |& sed "s/^/containers: /" 63 | 64 | list-containers: 65 | @$(TF_DE_DIR)scripts/package/list-containers.sh $(CONTAINER_BUILDER_DIR) container 66 | 67 | container-%: 68 | @$(TF_DE_DIR)scripts/package/build-containers.sh $(CONTAINER_BUILDER_DIR) container $(patsubst container-%,%,$(subst _,/,$(@))) | sed "s/^/$(@): /" 69 | 70 | containers-only: 71 | @$(TF_DE_DIR)scripts/package/build-containers.sh $(CONTAINER_BUILDER_DIR) container |& sed "s/^/containers: /" 72 | 73 | containers: prepare-containers containers-only 74 | 75 | ############################################################################## 76 | # Operator container targets 77 | operator-containers: 78 | @$(TF_DE_DIR)scripts/package/build-operator-containers.sh |& sed "s/^/operator-containers: /" 79 | 80 | ############################################################################## 81 | # Test container targets 82 | test-containers: 83 | @$(TF_DE_DIR)scripts/package/build-test-containers.sh |& sed "s/^/test-containers: /" 84 | 85 | ############################################################################## 86 | # Unit Test targets 87 | test: 88 | @$(TF_DE_DIR)scripts/run-tests.sh $(TEST_PACKAGE) 89 | 90 | ############################################################################## 91 | # Prepare Doxygen documentation 92 | doxygen: 93 | echo $(DOXYFILE) 94 | doxygen $(DOXYFILE) 95 | 96 | ############################################################################## 97 | # Other clean targets 98 | clean-rpm: 99 | @test -d $(REPODIR)/RPMS && rm -rf $(REPODIR)/RPMS/* || true 100 | 101 | clean: clean-deployers clean-containers clean-repo clean-rpm 102 | @true 103 | 104 | dbg: 105 | @echo $(TF_DE_TOP) 106 | @echo $(TF_DE_DIR) 107 | 108 | .PHONY: clean-deployers clean-containers clean-repo clean-rpm setup build containers deployers createrepo all 109 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ** THIS REPOSITORY IS NO LONGER USED ** 2 | 3 | # PLEASE SEE OPENSDN.IO INSTEAD 4 | 5 | # tf-dev-env: Tungsten Fabric Developer Environment 6 | 7 | tf-dev-env is a tool which allows building, unit-testing and linting TF 8 | Everything is done inside a container which is controller by run.sh script with its parameters 9 | 10 | ## Hardware and software requirements 11 | 12 | Minimal: 13 | 14 | - instance with 2 virtual CPU, 8 GB of RAM and 64 GB of disk space 15 | 16 | Recommended: 17 | 18 | - instance with 4+ virtual CPU, 16+ GB of RAM and 64 GB of disk space 19 | 20 | - Ubuntu 18.04 21 | - CentOS 7.x 22 | - MacOS (Experimental support, please ensure that you have brew and coreutils installed) 23 | 24 | ## Quick start 25 | 26 | ### 1. Preparation part 27 | 28 | Enable passwordless sudo for your user 29 | (for centos example: [serverfault page](https://serverfault.com/questions/160581/how-to-setup-passwordless-sudo-on-linux)) 30 | 31 | Install git: 32 | 33 | ``` bash 34 | sudo yum install -y git 35 | ``` 36 | 37 | For MacOS only: 38 | 39 | The script will install a limited number of dependencies using `brew` 40 | (python, docker, lsof). The `coreutils` packages is needed by the 41 | script itself. 42 | 43 | For Docker, the community edition will be installed if any other 44 | version already present. Please ensure that you have started Docker Desktop 45 | (Docker.app) application. 46 | 47 | ``` bash 48 | brew install git 49 | brew install coreutils 50 | ``` 51 | 52 | Create a WORKSPACE directory (build artifacts will be put there) and ```export WORKSPACE=myworkspacedir``` if you want to have specific workspace different from your current directory used by default. 53 | 54 | ### 2. Download tf-dev-env and fetch sources 55 | 56 | ``` bash 57 | git clone http://github.com/tungstenfabric/tf-dev-env 58 | ``` 59 | 60 | Prepare the build container and fetch TF sources: 61 | 62 | ``` bash 63 | tf-dev-env/run.sh 64 | ``` 65 | 66 | Note: The sources are fetched into the directory $WORKSPACE/contrail. 67 | The [repo tool](https://storage.googleapis.com/git-repo-downloads/repo) is used for fetching. 68 | The directory structure corresponds to [default.xml](https://github.com/tungstenfabric/tf-vnc/blob/master/default.xml) 69 | 70 | ### 3. Make changes (if any needed) 71 | 72 | Make required changes in sources fetched to contrail directory. For example, fetch particular review for controller (you can find download link in the gerrit review): 73 | 74 | ``` bash 75 | cd contrail/controller 76 | git fetch "https://gerrit.tungsten.io/tungstenfabric/tf-controller" refs/changes/..... && git checkout FETCH_HEAD 77 | cd ../../ 78 | ``` 79 | 80 | ### 3. Build 81 | 82 | Run the build 83 | 84 | ``` bash 85 | tf-dev-env/run.sh build 86 | ``` 87 | 88 | ### 3. Unit-test 89 | 90 | Run the unit-testing 91 | 92 | ``` bash 93 | tf-dev-env/run.sh test 94 | ``` 95 | 96 | This command can be parameterized 97 | 98 | ``` bash 99 | tf-dev-env/run.sh test TARGET 100 | ``` 101 | 102 | TARGET can be: 103 | 104 | - ui. WebUI unit tests will be run. 105 | - tox. tox will be run for project $GERRIT_PROJECT. Specific tox target like pep8 can be run with placing target's name into file tf-dev-env/input/target_set 106 | - any test name from Scons (example can be found in file ci_unittests.json of tf-controller repository) 107 | - without any arguments default set of Scons UT will be run (please refer to default section of ci_unittests.json) 108 | 109 | To skip some tests, you have to create a `tf-dev-env/skip_tests` file listing such tests. Example file: 110 | 111 | ```bash 112 | test_query_security_group_with_one_tag 113 | test_connection_status 114 | test_uuid_in_duplicate_name 115 | test_query_all_floating_ip_with_match_any 116 | ``` 117 | 118 | Note, that skipping tests doesn't work for `src/contrail-analytics/contrail-broadview:test`, `src/contrail-analytics/contrail-snmp-collector:test` and `src/contrail-analytics/contrail-topology:test` targets. 119 | 120 | ## Targets 121 | 122 | Various optional targets can be given as parameters to run.sh command. There are simple or complex ones. 123 | 124 | For example, The target 'build' is a sequence of fetch, configure, compile and package targets. Each target is executed once and would be skipped on next runs of the build target. 125 | Any target can be run again explicitely if needed like: 126 | 127 | ``` bash 128 | ./run.sh compile 129 | ./run.sh package 130 | ``` 131 | 132 | Supported targets: 133 | 134 | - fetch - sync TF git repos 135 | - configure - fetch third party packages and install dependencies 136 | - compile - build TF binaries (RPM-s) 137 | - package - package TF into docker containers 138 | - test - run unittests 139 | 140 | ## Advanced usage 141 | 142 | It is possible to use more finegraned build process via running make tool for building artifacts manually. 143 | Note: the described way below uses internal commands and might be changed in future. 144 | 145 | ### 1. Prepare developer-sandbox container and dont run any targets 146 | 147 | ```bash 148 | ./run.sh none 149 | ``` 150 | 151 | ### 2. Attach to developer-sandbox container 152 | 153 | ```bash 154 | sudo docker exec -it tf-dev-sandbox bash 155 | ``` 156 | 157 | ### 3. Prepare developer-sandbox container 158 | 159 | ``` bash 160 | cd ~/tf-dev-env 161 | make sync # get latest code 162 | make setup dep # set up docker container and install build dependencies 163 | make fetch_packages # pull third_party dependencies 164 | ``` 165 | 166 | The descriptions of targets: 167 | 168 | - `make sync` - sync code in `./contrail` directory using `repo` tool 169 | - `make fetch_packages` - pull `./third_party` dependencies (after code checkout) 170 | - `make setup` - initial configuration of image (required to run once) 171 | - `make dep` - installs all build dependencies 172 | - `make dep-` - installs build dependencies for 173 | 174 | ### 4. Building artifacts 175 | 176 | #### RPM packages 177 | 178 | - `make list` - lists all available RPM targets 179 | - `make rpm` - builds all RPMs 180 | - `make rpm-` - builds single RPM for 181 | 182 | #### Before containers build 183 | 184 | - `make create-repo` - creates repository with built RPMs 185 | - `make update-repo` - updates repository with built RPMs (in case when some RPM-s were rebuilt) 186 | - `make setup-httpd` - configures httpd for building images (can be run once after RPM's repository creation) 187 | 188 | #### Container images 189 | 190 | - `make list-containers` - lists all container targets 191 | - `make containers` - builds all containers' images, requires RPM packages in ~/contrail/RPMS and configured httpd 192 | - `make container-` - builds single container as a target, with all docker dependencies 193 | 194 | #### Deployers 195 | 196 | - `make list-deployers` - lists all deployers container targets 197 | - `make deployers` - builds all deployers 198 | - `make deployer-` - builds single deployer as a target, with all docker dependencies 199 | 200 | #### Test containers 201 | 202 | - `make test-containers` - build test containers 203 | 204 | #### TF Operator containers 205 | 206 | - `make operator-containers` - build tf-operator container 207 | 208 | #### Source containers 209 | 210 | - `make src-containers` - build containers with source of deployer code 211 | 212 | #### Clean 213 | 214 | - `make clean{-containers,-deployers,-repo,-rpm}` - delete artefacts 215 | 216 | #### Alternate build methods 217 | 218 | Instead of step 4 above (which runs `scons` inside `make`), you can use `scons` directly. The steps 1-3 are still required. 219 | 220 | ``` bash 221 | cd ~/contrail 222 | scons # ( or "scons test" etc) 223 | ``` 224 | 225 | NOTE: 226 | Above example build whole TungstenFabric project with default kernel headers and those 227 | are headers for running kernel (`uname -r`). If you want to customize your manual build and 228 | use i.e newer kernel header take a look at below examples. 229 | 230 | In case you want to compile TungstenFabric with latest or another custom kernel headers installed 231 | in `tf-dev-sandbox` container, then you have to run scons with extra arguments: 232 | 233 | ``` bash 234 | RTE_KERNELDIR=/path/to/custom_kernel_headers scons --kernel-dir=/path/to/custom_kernel_headers 235 | ``` 236 | 237 | To alter default behaviour and build TF without support for DPDK just provide the `--without-dpdk` flag: 238 | 239 | ``` bash 240 | scons --kernel-dir=/path/to/custom_kernel_headers --without-dpdk 241 | ``` 242 | 243 | To build only specific module like i.e `vrouter`: 244 | 245 | ``` bash 246 | scons --kernel-dir=/path/to/custom_kernel_headers vrouter 247 | ``` 248 | 249 | To build and run unit test against your code: 250 | 251 | ``` bash 252 | RTE_KERNELDIR=/path/to/custom_kernel_headers scons --kernel-dir=/path/to/custom_kernel_headers test 253 | ``` 254 | 255 | ## Customizing dev-env container 256 | 257 | There are several options to change standard behaviour of `tf-dev-sandbox` container: 258 | 259 | - Attach external sources to container 260 | - Use external docker registry to store TF container images 261 | - Building kernel module files for current kernel versio 262 | 263 | ### External sources 264 | 265 | You can attach you host tf-vnc sources instead of syncing them from github.com. 266 | 267 | There are special environment variables to set correct behaviour: 268 | 269 | - **CONTRAIL_DIR** stores host's path to initialized tf-vnc repository. 270 | - **SITE_MIRROR** stores contrail third-party repository url. It used to collect external packages required by *contrail-third-party* tools. There is an example: 271 | 272 | ``` bash 273 | export CONTRAIL_DIR=$HOME/my-tf-sources 274 | ./run.sh configure 275 | ./run.sh compile 276 | ./run.sh package 277 | ``` 278 | 279 | ### External docker registry 280 | 281 | Environment variable **CONTAINER_REGISTRY** stores external docker registry connection information where TF's containers would be stored. 282 | There is an example: 283 | 284 | ``` bash 285 | export CONTRAIL_DEPLOY_REGISTRY=0 286 | export CONTAINER_REGISTRY=10.1.1.190:5000 287 | ./run.sh build 288 | ``` 289 | 290 | ### Building kernel module files for current kernel version 291 | 292 | You can find default kernel versions in `tf-packages` repository in `kernel_version.info`, `kernel_version.rhel.info` and `kernel_version.ubi.info` files. So, if you want to use different kernel version, you must add it into the corresponding file. 293 | 294 | For example, if you need to build kernel modules for CentOS with kernel version `3.10.0-1127.13.1.el7.x86_64`, you can do: 295 | 296 | ```bash 297 | ./tf-dev-env/run.sh fetch 298 | echo "3.10.0-1127.13.1.el7.x86_64" >> contrail/tools/packages/kernel_version.info 299 | ./tf-dev-env/run.sh build 300 | ``` 301 | 302 | ## Full TF dev suite 303 | 304 | IMPORTANT: some of the parts and pieces are still under construction 305 | 306 | Full TF dev suite consists of: 307 | 308 | - [tf-dev-env](https://github.com/tungstenfabric/tf-dev-env) - develop and build TF 309 | - [tf-devstack](https://github.com/tungstenfabric/tf-devstack) - deploy TF 310 | - [tf-dev-test](https://github.com/tungstenfabric/tf-dev-test) - test deployed TF 311 | 312 | Each of these tools can be used separately or in conjunction with the other two. They are supposed to be invoked in the sequence they were listed and produce environment (conf files and variables) seamlessly consumable by the next tool. 313 | 314 | They provide two main scripts: 315 | 316 | - run.sh 317 | - cleanup.sh 318 | 319 | Both these scripts accept targets (like ``run.sh build``) for various actions. 320 | 321 | Typical scenarios are (examples are given for centos): 322 | 323 | ## Developer's scenario 324 | 325 | Typical developer's scenario could look like this: 326 | 327 | ### 1. Preparation part 328 | 329 | Run a machine, for example AWS instance or a VirtualBox (powerful with lots of memory - 16GB+ recommended- ) 330 | 331 | Enable passwordless sudo for your user 332 | (for centos example: [serverfault page](https://serverfault.com/questions/160581/how-to-setup-passwordless-sudo-on-linux)) 333 | 334 | Install git: 335 | 336 | ``` bash 337 | sudo yum install -y git 338 | ``` 339 | 340 | ### 2. tf-dev-env part 341 | 342 | Clone tf-dev-env: 343 | 344 | ``` bash 345 | git clone http://github.com/tungstenfabric/tf-dev-env 346 | ``` 347 | Switch to a branch other than master (if necessary): 348 | 349 | ``` bash 350 | export GERRIT_BRANCH="branch_name" 351 | ``` 352 | 353 | Prepare the build container and fetch TF sources: 354 | 355 | ``` bash 356 | tf-dev-env/run.sh 357 | ``` 358 | 359 | Make required changes in sources fetched to contrail directory. For example, fetch particular review for controller (you can find download link in the gerrit review): 360 | 361 | ``` bash 362 | cd contrail/controller 363 | git fetch "https://gerrit.tungsten.io/tungstenfabric/tf-controller" refs/changes/..... && git checkout FETCH_HEAD 364 | cd ../../ 365 | ``` 366 | 367 | Run TF build: 368 | 369 | ``` bash 370 | tf-dev-env/run.sh build 371 | ``` 372 | 373 | ### 3. tf-devstack part 374 | 375 | Clone tf-devstack: 376 | 377 | ``` bash 378 | git clone http://github.com/tungstenfabric/tf-devstack 379 | ``` 380 | 381 | Deploy TF by means of k8s manifests, for example: 382 | 383 | ``` bash 384 | tf-devstack/k8s_manifests/run.sh 385 | ``` 386 | 387 | #### 3.1 Using targets 388 | 389 | If you're on VirtualBox, for example, and want to snapshot k8s deployment prior to TF deployment you can use run.sh targets like: 390 | 391 | ``` bash 392 | tf-devstack/k8s_manifests/run.sh platform 393 | ``` 394 | 395 | and then: 396 | 397 | ``` bash 398 | tf-devstack/k8s_manifests/run.sh tf 399 | ``` 400 | 401 | Along with cleanup of particular target you can do tf deployment multiple times: 402 | 403 | ``` bash 404 | tf-devstack/k8s_manifests/cleanup.sh tf 405 | ``` 406 | 407 | ### 4. tf-dev-test part 408 | 409 | Clone tf-dev-test: 410 | 411 | ``` bash 412 | git clone http://github.com/tungstenfabric/tf-dev-test 413 | ``` 414 | 415 | Test the deployment by smoke tests, for example: 416 | 417 | ``` bash 418 | tf-dev-test/smoke/run.sh 419 | ``` 420 | 421 | ## Evaluation scenario 422 | 423 | Typical developer's scenario could look like this: 424 | 425 | ### 1. Preparation part 426 | 427 | Run a machine, for example AWS instance or a VirtualBox (powerful with lots of memory - 16GB+ recommended- ) 428 | 429 | Enable passwordless sudo for your user 430 | (for centos example: [serverfault page](https://serverfault.com/questions/160581/how-to-setup-passwordless-sudo-on-linux)) 431 | 432 | Install git: 433 | 434 | ``` bash 435 | sudo yum install -y git 436 | ``` 437 | 438 | ### 2. tf-devstack part 439 | 440 | Clone tf-devstack: 441 | 442 | ``` bash 443 | git clone http://github.com/tungstenfabric/tf-devstack 444 | ``` 445 | 446 | Deploy TF by means of k8s manifests, for example: 447 | 448 | ``` bash 449 | tf-devstack/k8s_manifests/run.sh 450 | ``` 451 | 452 | Or if you want to deploy with the most recent sources from master use: 453 | 454 | ``` bash 455 | tf-devstack/k8s_manifests/run.sh master 456 | ``` 457 | 458 | [Slack]: https://tungstenfabric.slack.com/messages/C0DQ23SJF/ 459 | [Google Group]: https://groups.google.com/forum/#!forum/tungsten-dev 460 | 461 | ## Notes 462 | 463 | - Vagrantfile is deprecated and is not supported. Will be removed later. 464 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | VAGRANTFILE_API_VERSION = "2" 5 | USER = "root" 6 | GROUP = "root" 7 | HOME_DIR = "/root" 8 | DEV_ENV_DIR = "#{HOME_DIR}/tf-dev-env" 9 | REPOS_DIR = "#{HOME_DIR}/src/gerrit.tungsten.io/tungstenfabric/" 10 | 11 | Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| 12 | 13 | config.vm.define :contrail_sandbox do |contrail_sandbox_config| 14 | contrail_sandbox_config.vm.box = "geerlingguy/centos7" 15 | contrail_sandbox_config.vm.hostname = "contrail-sandbox" 16 | contrail_sandbox_config.vm.network "private_network", ip: "192.168.60.200" 17 | contrail_sandbox_config.vm.synced_folder "./code", "#{REPOS_DIR}", 18 | owner: "#{USER}", group: "#{GROUP}" 19 | contrail_sandbox_config.vm.synced_folder ".", "#{DEV_ENV_DIR}", owner: "#{USER}", group: "#{GROUP}" 20 | contrail_sandbox_config.ssh.forward_agent = true 21 | contrail_sandbox_config.ssh.insert_key = true 22 | contrail_sandbox_config.ssh.username = "#{USER}" 23 | contrail_sandbox_config.ssh.password = 'vagrant' 24 | 25 | contrail_sandbox_config.vm.provider "virtualbox" do |vb| 26 | vb.memory = 8192 27 | vb.cpus = 4 28 | end 29 | contrail_sandbox_config.vm.provision "shell", name: "run presetup" do |presetup| 30 | presetup.inline = "make -f $1/Makefile presetup" 31 | presetup.args = "#{DEV_ENV_DIR}" 32 | end 33 | end 34 | end 35 | -------------------------------------------------------------------------------- /cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | scriptdir=$(realpath $(dirname "$0")) 4 | source ${scriptdir}/common/common.sh 5 | source ${scriptdir}/common/functions.sh 6 | 7 | function print_help() { 8 | echo -e "Usage:\n"\ 9 | "./cleanup.sh # cleanup build artefacts (sandbox container, sources, saved configuration)\n"\ 10 | " [-a ] false ] # cleanup all (build artefacts, dev-env image\n"\ 11 | " [-h ] # print help\n" 12 | } 13 | 14 | remove_sources=1 15 | remove_containers=1 16 | remove_image=0 17 | remove_tf_dev_config=1 18 | while getopts ":abh" opt; do 19 | case $opt in 20 | a) 21 | remove_sources=1 22 | remove_containers=1 23 | remove_image=1 24 | remove_tf_dev_config=1 25 | ;; 26 | h) 27 | print_help 28 | exit 29 | ;; 30 | *) 31 | print_help 32 | echo "Invalid option: -$opt. Exiting..." >&2 33 | exit 1 34 | ;; 35 | esac 36 | done 37 | 38 | echo tf-dev-env cleanup 39 | if [[ $remove_containers -eq 1 ]] ; then 40 | echo 41 | echo '[containers]' 42 | for container in $DEVENV_CONTAINER_NAME $REGISTRY_CONTAINER_NAME; do 43 | if is_container_created "$container" ; then 44 | echo -ne "$(mysudo docker stop $container) stopped."\\r 45 | echo $(mysudo docker rm $container) removed. 46 | else 47 | echo "$container not running." 48 | fi 49 | done 50 | fi 51 | 52 | if [[ $remove_image -eq 1 ]] ; then 53 | echo 54 | echo '[images]' 55 | mysudo docker inspect ${DEVENV_IMAGE} >/dev/null 2>&1 && mysudo docker rmi -f ${DEVENV_IMAGE} 56 | mysudo docker inspect ${CONTAINER_REGISTRY}/${DEVENV_IMAGE} >/dev/null 2>&1 && mysudo docker rmi -f ${CONTAINER_REGISTRY}/${DEVENV_IMAGE} 57 | echo "image $DEVENV_IMAGE removed" 58 | fi 59 | 60 | if [[ $remove_sources -eq 1 ]] ; then 61 | echo 62 | echo '[folder]' 63 | [ -d "$CONTRAIL_DIR" ] && mysudo rm -rf "$CONTRAIL_DIR" 64 | fi 65 | 66 | if [[ $remove_tf_dev_config -eq 1 ]] ; then 67 | echo 68 | echo '[tf dev config]' 69 | [ -d "$TF_CONFIG_DIR" ] && mysudo rm -rf "$TF_CONFIG_DIR" 70 | fi 71 | 72 | echo tf-dev-env cleanup finished 73 | -------------------------------------------------------------------------------- /common.env.tmpl: -------------------------------------------------------------------------------- 1 | LINUX_DISTR=${LINUX_DISTR} 2 | LINUX_DISTR_VER=${LINUX_DISTR_VER} 3 | CONTRAIL_CONTAINER_TAG="${CONTRAIL_CONTAINER_TAG}" 4 | CONTRAIL_DEPLOYERS_TAG="${CONTRAIL_CONTAINER_TAG}" 5 | CONTRAIL_REGISTRY="${CONTAINER_REGISTRY}" 6 | CONTAINER_REGISTRY=${CONTAINER_REGISTRY} 7 | CONTRAIL_REPOSITORY="http://localhost:${RPM_REPO_PORT}" 8 | DEPLOYERS_BASE_CONTAINER="${CONTAINER_REGISTRY}/contrail-general-base:${CONTRAIL_CONTAINER_TAG}" 9 | CONTRAIL_PARALLEL_BUILD=$CONTRAIL_PARALLEL_BUILD 10 | VENDOR_NAME="$VENDOR_NAME" 11 | VENDOR_DOMAIN="$VENDOR_DOMAIN" 12 | YUM_ENABLE_REPOS="$YUM_ENABLE_REPOS" -------------------------------------------------------------------------------- /common/common.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | [ -n "$DEBUG" ] && set -x 4 | set -o errexit 5 | 6 | # extract DEBUGINFO 7 | # Should be set to TRUE to produce debuginfo 8 | export DEBUGINFO=${DEBUGINFO:-FALSE} 9 | 10 | # working environment 11 | # WORKSPACE and two next vars are applicable only outside of sandbox container - on host. 12 | export WORKSPACE=${WORKSPACE:-$(pwd)} 13 | export TF_CONFIG_DIR=${TF_CONFIG_DIR:-"${HOME}/.tf"} 14 | export TF_DEVENV_PROFILE="${TF_CONFIG_DIR}/dev.env" 15 | 16 | # Build mode allows skipping stages or targets after freeze if patchset is present - values full, fast 17 | export BUILD_MODE=${BUILD_MODE:-"full"} 18 | 19 | [ -e "$TF_DEVENV_PROFILE" ] && source "$TF_DEVENV_PROFILE" 20 | 21 | # determined variables 22 | if [[ "$OSTYPE" == "linux-gnu" ]]; then 23 | export DISTRO=$(cat /etc/*release | egrep '^ID=' | awk -F= '{print $2}' | tr -d \") 24 | export DISTRO_VER=$(cat /etc/*release | egrep '^VERSION_ID=' | awk -F= '{print $2}' | tr -d \") 25 | export DISTRO_VER_MAJOR=${DISTRO_VER//.*/} 26 | elif [[ "$OSTYPE" == "darwin"* ]]; then 27 | export DISTRO="macosx" 28 | else 29 | echo "Unsupported platform." 30 | exit 1 31 | fi 32 | 33 | # working build directories 34 | # CONTRAIL_DIR is useful only outside of sandbox container 35 | if [ -z "${CONTRAIL_DIR+x}" ] ; then 36 | # not defined => use default 37 | CONTRAIL_DIR=${WORKSPACE}/contrail 38 | elif [ -z "$CONTRAIL_DIR" ] ; then 39 | # defined empty => dont bind contrail dir to host: tf jenkins 40 | CONTRAIL_DIR=${WORKSPACE}/contrail 41 | BIND_CONTRAIL_DIR=false 42 | fi 43 | export CONTRAIL_DIR 44 | 45 | # build environment preparation options 46 | export CONTAINER_REGISTRY=${CONTAINER_REGISTRY:-"localhost:5001"} 47 | # check if container registry is in ip:port format 48 | if [[ $CONTAINER_REGISTRY == *":"* ]]; then 49 | export REGISTRY_IP=$(echo $CONTAINER_REGISTRY | cut -f 1 -d ':') 50 | export REGISTRY_PORT=$(echo $CONTAINER_REGISTRY | cut -f 2 -d ':') 51 | else 52 | # no need to setup local registry while using docker hub 53 | export CONTRAIL_DEPLOY_REGISTRY=0 54 | # skip updating insecure registry for docker 55 | export CONTRAIL_SKIP_INSECURE_REGISTRY=1 56 | fi 57 | # FROZEN_REGISTRY is the source container registry where existing containers reside to skip rebuilding unchanged ones 58 | # Also it is the registry to take frozen tf-dev-sandbox container from 59 | export FROZEN_REGISTRY=${FROZEN_REGISTRY:-"nexus.opensdn.io:5101"} 60 | 61 | # Gerrit URL is used when patchsets-info.json is provided 62 | export GERRIT_URL=${GERRIT_URL:-https://gerrit.tungsten.io/r} 63 | 64 | export RPM_REPO_IP='localhost' 65 | export RPM_REPO_PORT='6667' 66 | export REGISTRY_CONTAINER_NAME=${REGISTRY_CONTAINER_NAME:-"tf-dev-env-registry"} 67 | export DEVENV_CONTAINER_NAME=${DEVENV_CONTAINER_NAME:-"tf-dev-sandbox"} 68 | export CONTRAIL_PARALLEL_BUILD=${CONTRAIL_PARALLEL_BUILD:-true} 69 | 70 | # tf-dev-env sandbox parameters 71 | export DEVENV_IMAGE_NAME=${DEVENV_IMAGE_NAME:-"tf-dev-sandbox"} 72 | export DEVENV_TAG=${DEVENV_TAG:-"latest"} 73 | export DEVENV_PUSH_TAG=${DEVENV_PUSH_TAG:-"frozen"} 74 | export DEVENV_IMAGE=${DEVENV_IMAGE:-"${DEVENV_IMAGE_NAME}:${DEVENV_TAG}"} 75 | 76 | # build options 77 | export MULTI_KERNEL_BUILD=${MULTI_KERNEL_BUILD:-"false"} 78 | 79 | # RHEL specific build options 80 | export ENABLE_RHSM_REPOS=${ENABLE_RHSM_REPOS:-'false'} 81 | 82 | # versions info 83 | export CONTRAIL_CONTAINER_TAG=${CONTRAIL_CONTAINER_TAG:-'dev'} 84 | # tag for existing prebuilt containers reflecting current merged code in gerrit. 85 | # It's determined automatically taken from http://nexus.opensdn.io:8082/frozen/tag during fetch stage 86 | export FROZEN_TAG="" 87 | # note: there is spaces available in names below 88 | export VENDOR_NAME=${VENDOR_NAME:-"TungstenFabric"} 89 | export VENDOR_DOMAIN=${VENDOR_DOMAIN:-"io.tungsten"} 90 | 91 | # Contrail repo branches options 92 | export CONTRAIL_BRANCH=${CONTRAIL_BRANCH:-${GERRIT_BRANCH:-'master'}} 93 | 94 | # Docker options 95 | if [ -z "${DOCKER_VOLUME_OPTIONS}" ] ; then 96 | export DOCKER_VOLUME_OPTIONS="z" 97 | if [[ $DISTRO == "macosx" ]]; then 98 | # Performance issue with osxfs, this option is making the 99 | # writes async from the container to the host. This means a 100 | # difference can happen from the host POV, but that should not 101 | # be an issue since we are not expecting anything to update 102 | # the source code. Based on test this option increase the perf 103 | # of about 10% but it still quite slow comparativly to a host 104 | # using GNU/Linux. 105 | DOCKER_VOLUME_OPTIONS+=",delegated" 106 | fi 107 | fi 108 | 109 | function source_env() { 110 | if [[ -e /input/tf-developer-sandbox.env ]] ; then 111 | echo "INFO: source env from /input/tf-developer-sandbox.env" 112 | source /input/tf-developer-sandbox.env 113 | fi 114 | 115 | if [[ -e $DEV_ENV_ROOT/common.env ]] ; then 116 | echo "INFO: source env from $DEV_ENV_ROOT/common.env" 117 | set -o allexport 118 | source $DEV_ENV_ROOT/common.env 119 | set +o allexport 120 | fi 121 | } 122 | -------------------------------------------------------------------------------- /common/functions.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function is_container_created() { 4 | local container=$1 5 | if ! mysudo docker ps -a --format '{{ .Names }}' | grep -x "$container" > /dev/null 2>&1 ; then 6 | return 1 7 | fi 8 | } 9 | 10 | function is_container_up() { 11 | local container=$1 12 | if ! mysudo docker inspect --format '{{ .State.Status }}' $container | grep -q "running" > /dev/null 2>&1 ; then 13 | return 1 14 | fi 15 | } 16 | 17 | function ensure_root() { 18 | local me=$(whoami) 19 | if [ "$me" != 'root' ] ; then 20 | echo "ERROR: this script requires root:" 21 | echo " mysudo -E $0" 22 | return 1 23 | fi 24 | } 25 | 26 | function ensure_port_free() { 27 | local port=$1 28 | if mysudo lsof -Pn -sTCP:LISTEN -i :$port ; then 29 | echo "ERROR: Port $port is already opened by another process" 30 | return 1 31 | fi 32 | } 33 | 34 | function mysudo() { 35 | if [[ $DISTRO == "macosx" ]]; then 36 | "$@" 37 | else 38 | sudo "$@" 39 | fi 40 | } 41 | 42 | function save_tf_devenv_profile() { 43 | local file=${1:-$TF_DEVENV_PROFILE} 44 | echo 45 | echo '[update tf devenv configuration]' 46 | mkdir -p "$(dirname $file)" 47 | cat < $file 48 | # dev env options 49 | CONTRAIL_CONTAINER_TAG=\${CONTRAIL_CONTAINER_TAG:-${CONTRAIL_CONTAINER_TAG}} 50 | FROZEN_TAG=\${FROZEN_TAG:-${FROZEN_TAG}} 51 | FROZEN_REGISTRY=\${FROZEN_REGISTRY:-${FROZEN_REGISTRY}} 52 | CONTAINER_REGISTRY=\${CONTAINER_REGISTRY:-${CONTAINER_REGISTRY}} 53 | RPM_REPO_IP='localhost' 54 | RPM_REPO_PORT=\${RPM_REPO_PORT:-${RPM_REPO_PORT}} 55 | BUILD_MODE=\${BUILD_MODE:-${BUILD_MODE}} 56 | 57 | # others 58 | VENDOR_NAME="\${VENDOR_NAME:-${VENDOR_NAME}}" 59 | VENDOR_DOMAIN="\${VENDOR_DOMAIN:-${VENDOR_DOMAIN}}" 60 | EOF 61 | echo "tf setup profile $file" 62 | cat ${file} 63 | } 64 | 65 | function load_tf_devenv_profile() { 66 | if [ -e "$TF_DEVENV_PROFILE" ] ; then 67 | echo 68 | echo '[load tf devenv configuration]' 69 | source "$TF_DEVENV_PROFILE" 70 | else 71 | echo 72 | echo '[there is no tf devenv configuration to load]' 73 | fi 74 | } 75 | 76 | function install_prerequisites_centos() { 77 | local pkgs="$1" 78 | which lsof || pkgs+=" lsof" 79 | which python3 || pkgs+=" python3" 80 | if [ -n "$pkgs" ] ; then 81 | mysudo yum install -y $pkgs 82 | fi 83 | which python || mysudo alternatives --verbose --set python /usr/bin/python3 84 | which pip || mysudo alternatives --verbose --install /usr/bin/pip pip $(which pip3) 100 85 | } 86 | 87 | function install_prerequisites_rhel() { 88 | local pkgs="" 89 | if [[ ${DISTRO}_${DISTRO_VER} == 'rhel_8.2' || ${DISTRO}_${DISTRO_VER} == 'rhel_8.4' ]]; then 90 | pkgs="jq" 91 | fi 92 | install_prerequisites_centos "$pkgs" 93 | } 94 | 95 | function install_prerequisites_ubuntu() { 96 | local pkgs="" 97 | which lsof || pkgs+=" lsof" 98 | which python3 || pkgs+=" python3-minimal" 99 | which python || pkgs+=" python-is-python3" 100 | if [ -n "$pkgs" ] ; then 101 | export DEBIAN_FRONTEND=noninteractive 102 | mysudo -E apt-get update -y 103 | mysudo -E apt-get install -y $pkgs 104 | fi 105 | } 106 | 107 | function install_prerequisites_macosx() { 108 | local pkgs="" 109 | which lsof || pkgs+=" lsof" 110 | which python || pkgs+=" python" 111 | if [ -n "$pkgs" ] ; then 112 | brew install $pkgs 113 | fi 114 | } 115 | 116 | function install_prerequisites_arch() { 117 | local pkgs="" 118 | which lsof || pkgs+=" lsof" 119 | which python3 || pkgs+=" python3" 120 | if [ -n "$pkgs" ] ; then 121 | pacman -S $pkgs 122 | fi 123 | } 124 | -------------------------------------------------------------------------------- /common/setup_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | scriptdir=$(realpath $(dirname "$0")) 4 | source ${scriptdir}/common.sh 5 | source ${scriptdir}/functions.sh 6 | 7 | CONTRAIL_SETUP_DOCKER=${CONTRAIL_SETUP_DOCKER:-1} 8 | [[ "$CONTRAIL_SETUP_DOCKER" != 1 ]] && { echo "INFO: setup docker skipped" && exit ; } 9 | if [ $DISTRO == "macosx" ] ; then 10 | output=$(${scriptdir}/setup_docker_macosx.sh) 11 | else 12 | output=$(sudo -E ${scriptdir}/setup_docker_root.sh) 13 | fi 14 | echo "$output" 15 | 16 | export REGISTRY_IP=$(echo "$output" | awk '/^REGISTRY_IP: .*/{print($2)}' | head -n 1) 17 | save_tf_devenv_profile 18 | -------------------------------------------------------------------------------- /common/setup_docker_macosx.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | scriptdir=$(realpath $(dirname "$0")) 4 | source ${scriptdir}/common.sh 5 | source ${scriptdir}/functions.sh 6 | 7 | docker_cfg="$HOME/.docker/daemon.json" 8 | 9 | function check_docker_value() { 10 | local name=$1 11 | local value=$2 12 | python -c "import json; f=open('$docker_cfg'); data=json.load(f); print(data.get('$name'));" 2>/dev/null| grep -qi "$value" 13 | } 14 | 15 | echo 16 | echo "INFO: [docker install]" 17 | if ! which docker >/dev/null 2>&1 ; then 18 | brew install docker 19 | else 20 | echo "INFO: docker installed: $(docker --version)" 21 | version=$(docker version --format '{{.Client.Version}}' 2>/dev/null | head -1 | cut -d '.' -f 1) 22 | if (( version < 16)); then 23 | echo "ERROR: docker is too old. please remove it. tf-dev-env will install correct version." 24 | exit 1 25 | fi 26 | fi 27 | 28 | echo docker ps > /dev/null 2>&1 29 | if [[ $? != 0 ]]; then 30 | echo "ERROR: Please start Docker Deskop (Docker.app) before to continue..." 31 | exit 1 32 | fi 33 | 34 | echo 35 | echo "INFO: [docker config]" 36 | default_iface=`route get 1 | grep interface | awk '{print $2}'` 37 | registry_ip=${REGISTRY_IP} 38 | if [ -z $registry_ip ]; then 39 | # use default ip as registry ip if it's not passed to the script 40 | registry_ip=`ifconfig $default_iface | grep 'inet ' | awk '{print $2}'` 41 | fi 42 | default_iface_mtu=`ifconfig $default_iface | grep 'mtu ' | awk '{print $4}'` 43 | 44 | docker_reload=0 45 | if ! check_docker_value "insecure-registries" "${registry_ip}:${REGISTRY_PORT}" || ! check_docker_value mtu "$default_iface_mtu" || ! check_docker_value "live-restore" "true" ; then 46 | python </dev/null 17 | echo "INFO: $REGISTRY_CONTAINER_NAME created" 18 | else 19 | if is_container_up "$REGISTRY_CONTAINER_NAME"; then 20 | echo "INFO: $REGISTRY_CONTAINER_NAME already running." 21 | else 22 | ensure_port_free $REGISTRY_PORT 23 | echo "INFO: $(mysudo docker start $REGISTRY_CONTAINER_NAME) started" 24 | fi 25 | fi 26 | -------------------------------------------------------------------------------- /common/setup_docker_root.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ex 2 | 3 | scriptdir=$(realpath $(dirname "$0")) 4 | source ${scriptdir}/common.sh 5 | source ${scriptdir}/functions.sh 6 | 7 | ensure_root 8 | 9 | function retry() { 10 | local i 11 | for ((i=0; i<5; ++i)) ; do 12 | if $@ ; then 13 | break 14 | fi 15 | sleep 5 16 | done 17 | if [[ $i == 5 ]]; then 18 | return 1 19 | fi 20 | } 21 | 22 | function install_docker_ubuntu() { 23 | export DEBIAN_FRONTEND=noninteractive 24 | which docker && return 25 | apt-get update 26 | apt-get install -y apt-transport-https ca-certificates curl gnupg-agent software-properties-common 27 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - 28 | add-apt-repository -y -u "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" 29 | retry apt-get install -y "docker-ce=5:24.0.6-1~ubuntu.20.04~focal" 30 | } 31 | 32 | function install_docker_centos() { 33 | which docker && return 34 | yum install -y yum-utils device-mapper-persistent-data lvm2 35 | yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo 36 | retry yum install -y docker-ce-cli-20.10.9 docker-ce-20.10.9 37 | } 38 | 39 | function install_docker_rhel_7() { 40 | which docker && return 41 | if [[ "$ENABLE_RHSM_REPOS" == "true" ]]; then 42 | subscription-manager repos \ 43 | --enable rhel-7-server-extras-rpms \ 44 | --enable rhel-7-server-optional-rpms 45 | fi 46 | retry yum install -y docker device-mapper-libs device-mapper-event-libs 47 | systemctl start docker 48 | } 49 | 50 | function install_docker_rhel_8() { 51 | which podman && return 52 | if [[ "$ENABLE_RHSM_REPOS" == "true" ]]; then 53 | subscription-manager repos \ 54 | --enable rhel-8-server-extras-rpms \ 55 | --enable rhel-8-server-optional-rpms 56 | fi 57 | echo "INFO: dnf disable modules container-tools" 58 | dnf module disable -y container-tools || true 59 | declare -A ct_vers=(["8.2"]="2.0" ["8.4"]="3.0") 60 | echo "INFO: dnf enable container-tools:${ct_vers[$DISTRO_VER]}" 61 | dnf module enable -y container-tools:${ct_vers[$DISTRO_VER]} 62 | retry dnf install -y podman-docker podman device-mapper-libs device-mapper-event-libs 63 | touch /etc/containers/nodocker 64 | sed -i 's/.*image_default_format.*/image_default_format = "v2s2"/g' /usr/share/containers/containers.conf 65 | sed -i 's/.*image_build_format.*/image_build_format = "docker"/g' /usr/share/containers/containers.conf 66 | } 67 | 68 | declare -A install_docker_rhel=( 69 | ['7.8']=install_docker_rhel_7 70 | ['7.9']=install_docker_rhel_7 71 | ['8.2']=install_docker_rhel_8 72 | ['8.4']=install_docker_rhel_8 73 | ) 74 | 75 | function check_docker_value() { 76 | local name=$1 77 | local value=$2 78 | python -c "import json; f=open('/etc/docker/daemon.json'); data=json.load(f); print(data.get('$name'));" 2>/dev/null| grep -qi "$value" 79 | } 80 | 81 | function check_insecure_registry() { 82 | case ${DISTRO}_${DISTRO_VER} in 83 | rhel_8.2|rhel_8.4) 84 | grep -A 1 '\[registries.insecure\]' /etc/containers/registries.conf | \ 85 | grep -o 'registries[ ]*='.* | cut -d '=' -f 2 | \ 86 | jq -cr '.[]' | \ 87 | grep -q "^$1$" 88 | ;; 89 | *) 90 | check_docker_value "insecure-registries" "$1" 91 | ;; 92 | esac 93 | } 94 | 95 | function update_config_docker() { 96 | local insecure_registries="$1" 97 | local default_iface_mtu="$2" 98 | case ${DISTRO}_${DISTRO_VER} in 99 | rhel_8.2|rhel_8.4) 100 | local cf="/etc/containers/registries.conf" 101 | echo "INFO: update insecure registries in config $cf" 102 | local ir 103 | local cr=$(grep -A 1 '\[registries.insecure\]' $cf \ 104 | | grep -o 'registries[ ]*='.* | cut -d '=' -f 2 \ 105 | | jq -c ".") 106 | for ir in ${insecure_registries//,/ } ; do 107 | cr=$(echo "$cr" | jq -c ". += [ \"$ir\" ]") 108 | done 109 | cp $cf ${cf}.bkp 110 | awk "{ if (s==1) {s=0; print(\"registries = ${cr//\"/\\\"}\")} else if (\$1==\"[registries.insecure]\") {print(\$0); s=1} else {print(\$0)} }" $cf > ${cf}.tf 111 | mv ${cf}.tf $cf 112 | local pcf="/etc/cni/net.d/87-podman-bridge.conflist" 113 | echo "INFO: update mtu in $pcf" 114 | python </dev/null 2>&1 ; then 205 | if [ x"$DISTRO" == x"centos" ]; then 206 | systemctl stop firewalld || true 207 | install_docker_centos 208 | systemctl start docker 209 | # grep 'dm.basesize=20G' /etc/sysconfig/docker-storage || sed -i 's/DOCKER_STORAGE_OPTIONS=/DOCKER_STORAGE_OPTIONS=--storage-opt dm.basesize=20G /g' /etc/sysconfig/docker-storage 210 | # systemctl restart docker 211 | elif [ x"$DISTRO" == x"rhel" ]; then 212 | systemctl stop firewalld || true 213 | ${install_docker_rhel[$DISTRO_VER]} 214 | elif [ x"$DISTRO" == x"ubuntu" ]; then 215 | install_docker_ubuntu 216 | fi 217 | else 218 | echo "INFO: docker installed: $(docker --version)" 219 | if [ x"$DISTRO" != x"rhel" ]; then 220 | version=$(docker version --format '{{.Client.Version}}' 2>/dev/null | head -1 | cut -d '.' -f 1) 221 | if (( version < 16)); then 222 | echo "ERROR: docker is too old. please remove it. tf-dev-env will install correct version." 223 | exit 1 224 | fi 225 | fi 226 | fi 227 | 228 | 229 | echo 230 | echo "INFO: [docker config]" 231 | default_iface=`ip route get 1 | grep -o "dev.*" | awk '{print $2}'` 232 | 233 | CONTRAIL_SKIP_INSECURE_REGISTRY=${CONTRAIL_SKIP_INSECURE_REGISTRY:-0} 234 | insecure_registries=${INSECURE_REGISTRIES:-} 235 | registry_ip=${REGISTRY_IP} 236 | UPDATE_INSECURE_REGISTRY=false 237 | if [ "$CONTRAIL_SKIP_INSECURE_REGISTRY" != 0 ]; then 238 | echo "INFO: Docker config - setting insecure registry skipped" 239 | else 240 | if [ -z $registry_ip ]; then 241 | # use default ip as registry ip if it's not passed to the script 242 | registry_ip=`ip addr show dev $default_iface | awk '/inet /{print $2}' | cut -f '1' -d '/'` 243 | fi 244 | if ! check_insecure_registry "${registry_ip}:${REGISTRY_PORT}" ; then 245 | if [ -n "$insecure_registries" ] ; then 246 | insecure_registries+="," 247 | fi 248 | insecure_registries+="${registry_ip}:${REGISTRY_PORT}" 249 | fi 250 | fi 251 | if [ -n "$insecure_registries" ] ; then 252 | UPDATE_INSECURE_REGISTRY=true 253 | fi 254 | 255 | default_iface_mtu=`ip link show $default_iface | grep -o "mtu.*" | awk '{print $2}'` 256 | 257 | docker_reload=0 258 | if $UPDATE_INSECURE_REGISTRY || ! check_docker_value mtu "$default_iface_mtu" || ! check_docker_value "live-restore" "true" ; then 259 | update_config_docker "$insecure_registries" "$default_iface_mtu" 260 | docker_reload=1 261 | else 262 | echo "INFO: no config changes required" 263 | fi 264 | 265 | runtime_docker_mtu=$(get_docker_mtu) 266 | if [[ "$default_iface_mtu" != "$runtime_docker_mtu" || "$docker_reload" == '1' ]]; then 267 | set_docker_mtu $default_iface_mtu 268 | restart_docker 269 | else 270 | echo "INFO: no docker service restart required" 271 | fi 272 | 273 | echo "REGISTRY_IP: $registry_ip" 274 | -------------------------------------------------------------------------------- /common/tf_functions.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | WORK_DIR="${HOME}/work" 4 | STAGES_DIR="${WORK_DIR}/.stages" 5 | 6 | # Folders and artifacts which have to be symlinked in order to separate them from sources 7 | 8 | declare -a work_folders=(build BUILDROOT BUILD RPMS SOURCES SRPMS SRPMSBUILD .sconf_temp SPECS .stages) 9 | declare -a work_files=(.sconsign.dblite) 10 | 11 | function create_env_file() { 12 | # exports 'src_volume_name' as return result 13 | local tf_container_env_file=$1 14 | cat < $tf_container_env_file 15 | set -m 16 | export DEBUG=${DEBUG} 17 | export DEBUGINFO=${DEBUGINFO} 18 | export LINUX_DISTR=${LINUX_DISTR} 19 | export LINUX_DISTR_VER=${LINUX_DISTR_VER} 20 | export BUILD_MODE=${BUILD_MODE} 21 | export DEV_ENV_ROOT=/root/tf-dev-env 22 | export DEVENV_TAG=$DEVENV_TAG 23 | export SITE_MIRROR=${SITE_MIRROR} 24 | export CONTRAIL_KEEP_LOG_FILES=${CONTRAIL_KEEP_LOG_FILES} 25 | export CONTRAIL_BRANCH=${CONTRAIL_BRANCH} 26 | export CONTRAIL_CONTAINER_TAG=${CONTRAIL_CONTAINER_TAG} 27 | export CONTRAIL_REPOSITORY=http://localhost:${RPM_REPO_PORT} 28 | export CONTRAIL_REGISTRY=${CONTAINER_REGISTRY} 29 | export CONTAINER_REGISTRY=${CONTAINER_REGISTRY} 30 | export VENDOR_NAME=$VENDOR_NAME 31 | export VENDOR_DOMAIN=$VENDOR_DOMAIN 32 | export MULTI_KERNEL_BUILD=$MULTI_KERNEL_BUILD 33 | export KERNEL_REPOSITORIES_RHEL8="$KERNEL_REPOSITORIES_RHEL8" 34 | export CONTRAIL_SOURCE=${CONTRAIL_DIR} 35 | export BUILDTAG=${CONTRAIL_CONTAINER_TAG//-/_} 36 | export REPO_INIT_MANIFEST_URL=$REPO_INIT_MANIFEST_URL 37 | export VNC_ORGANIZATION=$VNC_ORGANIZATION 38 | EOF 39 | if [[ -n "${GENERAL_EXTRA_RPMS+x}" ]] ; then 40 | echo "export GENERAL_EXTRA_RPMS=${GENERAL_EXTRA_RPMS}" >> $tf_container_env_file 41 | fi 42 | if [[ -n "${BASE_EXTRA_RPMS+x}" ]] ; then 43 | echo "export BASE_EXTRA_RPMS=${BASE_EXTRA_RPMS}" >> $tf_container_env_file 44 | fi 45 | if [[ -n "${RHEL_HOST_REPOS+x}" ]] ; then 46 | echo "export RHEL_HOST_REPOS=${RHEL_HOST_REPOS}" >> $tf_container_env_file 47 | fi 48 | 49 | if [[ -d "${scriptdir}/config" ]]; then 50 | echo "export CONTRAIL_CONFIG_DIR=${CONTRAIL_CONFIG_DIR:-'/config'}" >> $tf_container_env_file 51 | fi 52 | 53 | # code review system options 54 | if [[ -n "$GERRIT_URL" ]]; then 55 | echo "export GERRIT_URL=${GERRIT_URL}" >> $tf_container_env_file 56 | fi 57 | if [[ -n "$GERRIT_BRANCH" ]]; then 58 | echo "export GERRIT_BRANCH=${GERRIT_BRANCH}" >> $tf_container_env_file 59 | fi 60 | if [[ -n "$GERRIT_PROJECT" ]]; then 61 | echo "export GERRIT_PROJECT=${GERRIT_PROJECT}" >> $tf_container_env_file 62 | fi 63 | } 64 | 65 | function prepare_infra() 66 | { 67 | echo "INFO: create symlinks to work directories with artifacts $(date)" 68 | mkdir -p $HOME/work /root/contrail 69 | # /root/contrail will be defined later as REPODIR 70 | for folder in ${work_folders[@]} ; do 71 | [[ -e $WORK_DIR/$folder ]] || mkdir $WORK_DIR/$folder 72 | [[ -e /root/contrail/$folder ]] || ln -s $WORK_DIR/$folder /root/contrail/$folder 73 | done 74 | for file in ${work_files[@]} ; do 75 | touch $WORK_DIR/$file 76 | [[ -e /root/contrail/$file ]] || ln -s $WORK_DIR/$file /root/contrail/$file 77 | done 78 | # to re-read yum data before each run - mirror list or mirrors itself can be changed since previous run 79 | yum clean all 80 | } 81 | 82 | function get_current_container_tag() 83 | { 84 | if curl -sI "http://nexus.opensdn.io:8082/frozen/tag" | grep -q "HTTP/1.1 200 OK" ; then 85 | curl -s "http://nexus.opensdn.io:8082/frozen/tag" 86 | fi 87 | } 88 | 89 | # Classification of TF projects dealing with containers. 90 | # TODO: use vnc/default.xml for this information later (loaded to .repo/manifest.xml) 91 | deployers_projects=("tf-charms" "tf-helm-deployer" "tf-ansible-deployer" "tf-operator" \ 92 | "tf-kolla-ansible" "tf-tripleo-heat-templates" "tf-container-builder") 93 | containers_projects=("tf-container-builder") 94 | operator_projects=("tf-operator") 95 | tests_projects=("tf-test" "tf-deployment-test") 96 | vrouter_dpdk=("tf-dpdk") 97 | infra_projects=("tf-jenkins" "tf-dev-env" "tf-devstack" "tf-dev-test") 98 | 99 | changed_projects=() 100 | changed_containers_projects=() 101 | changed_deployers_projects=() 102 | changed_operator_projects=() 103 | changed_tests_projects=() 104 | changed_product_projects=() 105 | unchanged_containers=() 106 | 107 | # Check patchset and fill changed_projects, also collect containers NOT to build 108 | function patches_exist() { 109 | if [[ ! -e "/input/patchsets-info.json" ]] ; then 110 | return 1 111 | fi 112 | 113 | # First fetch existing containers list 114 | # TODO: detect protocol first 115 | frozen_containers=($(curl -fSs https://$FROZEN_REGISTRY/v2/_catalog | jq -r '.repositories | .[]')) 116 | # Next initialize projects lists and look for changes 117 | changed_projects=() 118 | changed_containers_projects=() 119 | changed_deployers_projects=() 120 | changed_operator_projects=() 121 | changed_tests_projects=() 122 | changed_product_projects=() 123 | projects=$(jq '.[].project' "/input/patchsets-info.json") 124 | for project in ${projects[@]}; do 125 | project=$(echo $project | cut -f 2 -d "/" | tr -d '"') 126 | changed_projects+=($project) 127 | non_container_project=true 128 | if [[ ${infra_projects[@]} =~ $project ]] ; then 129 | continue 130 | fi 131 | if [[ ${containers_projects[@]} =~ $project ]] ; then 132 | changed_containers_projects+=($project) 133 | non_container_project=false 134 | fi 135 | if [[ ${deployers_projects[@]} =~ $project ]] ; then 136 | changed_deployers_projects+=($project) 137 | non_container_project=false 138 | fi 139 | if [[ ${operator_projects[@]} =~ $project ]] ; then 140 | changed_operator_projects+=($project) 141 | non_container_project=false 142 | fi 143 | if [[ ${tests_projects[@]} =~ $project ]] ; then 144 | changed_tests_projects+=($project) 145 | non_container_project=false 146 | fi 147 | if $non_container_project ; then 148 | changed_product_projects+=($project) 149 | # No containers are reused in this case - all should be rebuilt 150 | frozen_containers=() 151 | fi 152 | done 153 | 154 | # Now scan through frozen containers and remove ones to rebuild 155 | for container in ${frozen_containers[@]}; do 156 | if [[ $container == *-test ]] ; then 157 | if [[ -z $changed_tests_projects ]] ; then 158 | unchanged_containers+=($container) 159 | fi 160 | elif [[ $container == *-src ]] ; then 161 | if [[ -z $changed_deployers_projects ]] ; then 162 | unchanged_containers+=($container) 163 | fi 164 | elif [[ $container == *-operator ]] ; then 165 | if [[ -z $changed_operator_projects ]] ; then 166 | unchanged_containers+=($container) 167 | fi 168 | else 169 | if [[ $container != *-sandbox ]] && [[ -z $changed_containers_projects ]] ; then 170 | unchanged_containers+=($container) 171 | fi 172 | fi 173 | done 174 | 175 | return 0 176 | } 177 | -------------------------------------------------------------------------------- /container/Dockerfile.centos: -------------------------------------------------------------------------------- 1 | ARG LINUX_DISTR=centos 2 | ARG LINUX_DISTR_VER=7 3 | FROM $LINUX_DISTR:$LINUX_DISTR_VER 4 | 5 | ARG SITE_MIRROR 6 | ARG LC_ALL=en_US.UTF-8 7 | ARG LANG=en_US.UTF-8 8 | ARG LANGUAGE=en_US.UTF-8 9 | 10 | ENV USER root 11 | ENV HOME /root 12 | ENV CONTRAIL $HOME/contrail 13 | ENV LC_ALL=$LC_ALL 14 | ENV LANG=$LANG 15 | ENV LANGUAGE=$LANGUAGE 16 | ENV PS1="sandbox:\[\033[01;34m\]\w\[\033[00m\]\$ " 17 | 18 | WORKDIR $CONTRAIL 19 | 20 | # Inject repositories that we might need 21 | # copy pip.conf to the same place and move it to right place later to simplify build script 22 | COPY *.repo pip.conf* /etc/yum.repos.d/ 23 | COPY entrypoint.sh build-centos*.sh / 24 | 25 | RUN source /etc/os-release && /build-centos${VERSION_ID}.sh 26 | 27 | ENTRYPOINT ["/entrypoint.sh"] 28 | -------------------------------------------------------------------------------- /container/Dockerfile.ubi7: -------------------------------------------------------------------------------- 1 | ARG LINUX_DISTR=registry.access.redhat.com/ubi7/ubi 2 | ARG LINUX_DISTR_VER=latest 3 | FROM $LINUX_DISTR:$LINUX_DISTR_VER 4 | ARG YUM_SM_PLUGIN_ENABLED=0 5 | 6 | ARG SITE_MIRROR 7 | ARG YUM_ENABLE_REPOS="" 8 | ARG LC_ALL=en_US.UTF-8 9 | ARG LANG=en_US.UTF-8 10 | ARG LANGUAGE=en_US.UTF-8 11 | 12 | ENV USER root 13 | ENV HOME /root 14 | ENV CONTRAIL $HOME/contrail 15 | ENV LC_ALL=$LC_ALL 16 | ENV LANG=$LANG 17 | ENV LANGUAGE=$LANGUAGE 18 | ENV PS1="sandbox:\[\033[01;34m\]\w\[\033[00m\]\$ " 19 | 20 | WORKDIR $CONTRAIL 21 | 22 | # Inject repositories that we might need 23 | # copy pip.conf to the same place and move it to right place later to simplify build script 24 | COPY *.repo pip.conf* /etc/yum.repos.d/ 25 | 26 | # NOTE: 27 | # - we have to remove /usr/local/bin/virtualenv after installing tox by python3 because it has python3 as shebang and masked 28 | # /usr/bin/virtualenv with python2 shebang. it can be removed later when all code will be ready for python3 29 | # - disable subscription-manager - rhel/ubi build use only mirrors that dont requrie subscription 30 | # NOTE: pin nss version due to bug https://bugzilla.redhat.com/show_bug.cgi?id=1896808 31 | # in centos it's possible to downgrade, in rhel yum issues prevent downgrade 32 | RUN \ 33 | sed -i "s/enabled=.*/enabled=$YUM_SM_PLUGIN_ENABLED/g" /etc/yum/pluginconf.d/subscription-manager.conf && \ 34 | if [ -f /etc/yum.repos.d/pip.conf ] ; then mv /etc/yum.repos.d/pip.conf /etc/ ; fi && \ 35 | YUM_ENABLE_REPOS=$(echo $YUM_ENABLE_REPOS | tr -d '"') && \ 36 | if [[ -n "$YUM_ENABLE_REPOS" ]] ; then \ 37 | echo "INFO: enable repos $YUM_ENABLE_REPOS" && \ 38 | yum-config-manager --enable $YUM_ENABLE_REPOS ; \ 39 | yum clean metadata ; \ 40 | fi && \ 41 | if ! yum repolist | grep -q epel ; then \ 42 | yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm || exit 1 ; \ 43 | fi && \ 44 | yum -y update -x nss* && \ 45 | curl --retry 3 --retry-delay 10 ${SITE_MIRROR:-"https://bootstrap.pypa.io"}/pip/2.7/get-pip.py | python2 - 'pip==20.1' && \ 46 | curl -O --retry 3 --retry-delay 10 ${SITE_MIRROR:-"https://packages.endpointdev.com"}/rhel/7/os/x86_64/git-core-2.37.1-1.ep7.x86_64.rpm && \ 47 | curl -O --retry 3 --retry-delay 10 ${SITE_MIRROR:-"https://packages.endpointdev.com"}/rhel/7/os/x86_64/git-2.37.1-1.ep7.x86_64.rpm && \ 48 | curl -O --retry 3 --retry-delay 10 ${SITE_MIRROR:-"https://packages.endpointdev.com"}/rhel/7/os/x86_64/git-core-doc-2.37.1-1.ep7.noarch.rpm && \ 49 | curl -O --retry 3 --retry-delay 10 ${SITE_MIRROR:-"https://packages.endpointdev.com"}/rhel/7/os/x86_64/perl-Git-2.37.1-1.ep7.noarch.rpm && \ 50 | yum -y install \ 51 | git-2.37.1-1.ep7.x86_64.rpm git-core-2.37.1-1.ep7.x86_64.rpm \ 52 | git-core-doc-2.37.1-1.ep7.noarch.rpm perl-Git-2.37.1-1.ep7.noarch.rpm \ 53 | python3 iproute devtoolset-7-gcc devtoolset-7-binutils \ 54 | autoconf automake createrepo docker-client docker-python gdb rsync git-review jq libtool \ 55 | make python-devel python-lxml rpm-build vim wget yum-utils redhat-lsb-core \ 56 | rpmdevtools sudo gcc-c++ net-tools httpd \ 57 | python-virtualenv python-future python-tox \ 58 | elfutils-libelf-devel && \ 59 | yum clean all && \ 60 | rm -rf /var/cache/yum && \ 61 | pip3 install --retries=10 --timeout 200 --upgrade tox setuptools "lxml<5.1" jinja2 && \ 62 | rm -f /usr/local/bin/virtualenv 63 | 64 | ADD entrypoint.sh / 65 | 66 | RUN echo export CONTRAIL=$CONTRAIL >> $HOME/.bashrc && \ 67 | echo export LD_LIBRARY_PATH=$CONTRAIL/build/lib >> $HOME/.bashrc && \ 68 | wget -nv ${SITE_MIRROR:-"https://dl.google.com"}/go/go1.14.2.linux-amd64.tar.gz && \ 69 | tar -C /usr/local -xzf go1.14.2.linux-amd64.tar.gz && \ 70 | rm -f go1.14.2.linux-amd64.tar.gz && \ 71 | echo export PATH=$PATH:/usr/local/go/bin >> $HOME/.bashrc && \ 72 | wget -nv ${SITE_MIRROR:-"https://github.com"}/operator-framework/operator-sdk/releases/download/v0.17.2/operator-sdk-v0.17.2-x86_64-linux-gnu -O /usr/local/bin/operator-sdk-v0.17 && \ 73 | wget -nv ${SITE_MIRROR:-"https://github.com"}/operator-framework/operator-sdk/releases/download/v0.18.2/operator-sdk-v0.18.2-x86_64-linux-gnu -O /usr/local/bin/operator-sdk-v0.18 && \ 74 | ln -s /usr/local/bin/operator-sdk-v0.18 /usr/local/bin/operator-sdk && \ 75 | chmod u+x /usr/local/bin/operator-sdk-v0.17 /usr/local/bin/operator-sdk-v0.18 /usr/local/bin/operator-sdk 76 | 77 | 78 | ENTRYPOINT ["/entrypoint.sh"] 79 | -------------------------------------------------------------------------------- /container/Dockerfile.ubi8: -------------------------------------------------------------------------------- 1 | ARG LINUX_DISTR=registry.access.redhat.com/ubi8 2 | ARG LINUX_DISTR_VER=8.4 3 | FROM $LINUX_DISTR:$LINUX_DISTR_VER 4 | ARG YUM_SM_PLUGIN_ENABLED=0 5 | 6 | ARG SITE_MIRROR 7 | ARG YUM_ENABLE_REPOS="" 8 | ARG LC_ALL=en_US.UTF-8 9 | ARG LANG=en_US.UTF-8 10 | ARG LANGUAGE=en_US.UTF-8 11 | ARG CONTAINER_TOOLS_VER="" 12 | 13 | ENV USER root 14 | ENV HOME /root 15 | ENV CONTRAIL $HOME/contrail 16 | ENV LC_ALL $LC_ALL 17 | ENV LANG $LANG 18 | ENV LANGUAGE $LANGUAGE 19 | ENV PS1 "sandbox:\[\033[01;34m\]\w\[\033[00m\]\$ " 20 | 21 | WORKDIR $CONTRAIL 22 | 23 | # Inject repositories that we might need 24 | # copy pip.conf to the same place and move it to right place later to simplify build script 25 | COPY *.repo pip.conf* /etc/yum.repos.d/ 26 | 27 | # custom openssl built from source 28 | ENV OPENSSL_ROOT_DIR /usr/local/ssl 29 | ENV LD_LIBRARY_PATH $CONTRAIL/build/lib:$OPENSSL_ROOT_DIR/lib 30 | ENV LIBRARY_PATH $LD_LIBRARY_PATH 31 | ENV C_INCLUDE_PATH $OPENSSL_ROOT_DIR/include:/usr/include/tirpc 32 | ENV CPLUS_INCLUDE_PATH $C_INCLUDE_PATH 33 | ENV LDFLAGS "-L/usr/local/lib -L$OPENSSL_ROOT_DIR/lib" 34 | ENV PATH $OPENSSL_ROOT_DIR/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/usr/local/go/bin 35 | 36 | # https://fedoraproject.org/wiki/Changes/Avoid_usr_bin_python_in_RPM_Build#Quick_Opt-Out 37 | ENV PYTHON_DISALLOW_AMBIGUOUS_VERSION=0 38 | 39 | # NOTE: 40 | # - we have to remove /usr/local/bin/virtualenv after installing tox by python3 because it has python3 as shebang and masked 41 | # /usr/bin/virtualenv with python2 shebang. it can be removed later when all code will be ready for python3 42 | # - disable subscription-manager - rhel/ubi build use only mirrors that dont requrie subscription 43 | # NOTE: pin nss version due to bug https://bugzilla.redhat.com/show_bug.cgi?id=1896808 44 | # in centos it's possible to downgrade, in rhel yum issues prevent downgrade 45 | 46 | # TODO: add ability to export repos for rhel-8 to be enabled inside container explicitely 47 | # codeready-builder-for-rhel-8-x86_64-rpms 48 | # TODO: 49 | # exclude=openssl-devel - to avoid installation and break build earlier 50 | # as this is incompatible version with contrail 51 | # TODO: remove podman pinning. for now podman 4.0.2 can't build general-base image due to bug with setxattr 52 | RUN \ 53 | echo "exclude=openssl-devel" >> /etc/yum.conf && \ 54 | sed -i "s/enabled=.*/enabled=$YUM_SM_PLUGIN_ENABLED/g" /etc/yum/pluginconf.d/subscription-manager.conf && \ 55 | if [ -f /etc/yum.repos.d/pip.conf ] ; then mv /etc/yum.repos.d/pip.conf /etc/ ; fi && \ 56 | YUM_ENABLE_REPOS=$(echo $YUM_ENABLE_REPOS | tr -d '"') && \ 57 | if [[ -n "$YUM_ENABLE_REPOS" ]] ; then \ 58 | echo "INFO: enable repos $YUM_ENABLE_REPOS" && \ 59 | dnf config-manager --enable $YUM_ENABLE_REPOS ; \ 60 | dnf clean metadata ; \ 61 | fi && \ 62 | dnf update -y -x "redhat-release*" && \ 63 | if ! dnf repolist | grep -q epel ; then \ 64 | dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm || exit 1 ; \ 65 | fi && \ 66 | if [[ -n "$CONTAINER_TOOLS_VER" ]] ; then \ 67 | echo "INFO: enable container-tools:${CONTAINER_TOOLS_VER}" && \ 68 | dnf module disable -y container-tools && \ 69 | dnf module enable -y container-tools:${CONTAINER_TOOLS_VER} ; \ 70 | fi && \ 71 | dnf group install -y --nobest "Development Tools" --excludepkgs "boost*,source-highlight,asciidoc,systemtap*" && \ 72 | dnf install -y --nobest glibc-langpack-en rsync \ 73 | python3 python2 iproute \ 74 | createrepo git-review jq rpmdevtools \ 75 | rpcgen perl-Test-Harness \ 76 | vim wget yum-utils redhat-lsb-core \ 77 | sudo net-tools httpd \ 78 | podman-docker podman-3.2.3-0.11.module+el8.4.0+12050+ef972f71 \ 79 | python3-devel python3-lxml python3-virtualenv python3-future python3-tox \ 80 | libtirpc-devel compat-openssl10 compat-openssl10-debugsource python2-pyyaml && \ 81 | rpm -ivh --nodeps $(repoquery -q --location --latest-limit 1 "mariadb-connector-c-3.*x86_64*" | head -n 1) && \ 82 | rpm -ivh --nodeps $(repoquery -q --location --latest-limit 1 "mariadb-connector-c-devel-3.*x86_64*" | head -n 1) && \ 83 | pip3 install --retries=10 --timeout 200 --upgrade tox setuptools "lxml<5.1" jinja2 && \ 84 | which python || alternatives --verbose --set python /usr/bin/python2 && \ 85 | which pip || alternatives --verbose --install /usr/bin/pip pip $(which pip2) 100 && \ 86 | python2 -m pip install scons==3.1.2 pytest==3.0.6 sphinx==1.1.3 chardet==2.2.1 docutils==0.12 && \ 87 | yum module disable -y nodejs && \ 88 | dnf clean all && \ 89 | echo "%_pkgverify_level none" > /etc/rpm/macros.verify && \ 90 | touch /etc/containers/nodocker && \ 91 | sed -i 's/.*image_default_format.*/image_default_format = "v2s2"/g' /usr/share/containers/containers.conf && \ 92 | sed -i 's/.*image_build_format.*/image_build_format = "docker"/g' /usr/share/containers/containers.conf && \ 93 | rm -rf /var/cache/yum && \ 94 | rm -f /usr/local/bin/virtualenv 95 | 96 | # custom comput ssl 1.0.2o 97 | RUN \ 98 | mkdir -p $OPENSSL_ROOT_DIR/lib && \ 99 | ln -s /usr/src/debug/compat-openssl10-1.0.2o-3.el8.x86_64/include $OPENSSL_ROOT_DIR/include && \ 100 | ln -s /usr/lib64/libcrypto.so.10 $OPENSSL_ROOT_DIR/lib/libcrypto.so && \ 101 | ln -s /usr/lib64/libssl.so.10 $OPENSSL_ROOT_DIR/lib/libssl.so 102 | 103 | ADD entrypoint.sh / 104 | 105 | # golang 106 | RUN wget -nv ${SITE_MIRROR:-"https://dl.google.com"}/go/go1.14.2.linux-amd64.tar.gz && \ 107 | tar -C /usr/local -xzf go1.14.2.linux-amd64.tar.gz && \ 108 | rm -f go1.14.2.linux-amd64.tar.gz && \ 109 | wget -nv ${SITE_MIRROR:-"https://github.com"}/operator-framework/operator-sdk/releases/download/v0.17.2/operator-sdk-v0.17.2-x86_64-linux-gnu -O /usr/local/bin/operator-sdk-v0.17 && \ 110 | wget -nv ${SITE_MIRROR:-"https://github.com"}/operator-framework/operator-sdk/releases/download/v0.18.2/operator-sdk-v0.18.2-x86_64-linux-gnu -O /usr/local/bin/operator-sdk-v0.18 && \ 111 | ln -s /usr/local/bin/operator-sdk-v0.18 /usr/local/bin/operator-sdk && \ 112 | chmod u+x /usr/local/bin/operator-sdk-v0.17 /usr/local/bin/operator-sdk-v0.18 /usr/local/bin/operator-sdk 113 | 114 | RUN \ 115 | echo export CONTRAIL=$CONTRAIL >> $HOME/.bashrc && \ 116 | echo export OPENSSL_ROOT_DIR=$OPENSSL_ROOT_DIR >> $HOME/.bashrc && \ 117 | echo export LD_LIBRARY_PATH=$LD_LIBRARY_PATH >> $HOME/.bashrc && \ 118 | echo export LIBRARY_PATH=$LIBRARY_PATH >> $HOME/.bashrc && \ 119 | echo export C_INCLUDE_PATH=$C_INCLUDE_PATH >> $HOME/.bashrc && \ 120 | echo export CPLUS_INCLUDE_PATH=$CPLUS_INCLUDE_PATH >> $HOME/.bashrc && \ 121 | echo export LDFLAGS=\"$LDFLAGS\" >> $HOME/.bashrc && \ 122 | echo export PATH=$PATH >> $HOME/.bashrc && \ 123 | echo set -m >> $HOME/.bashrc 124 | 125 | ENTRYPOINT ["/entrypoint.sh"] 126 | -------------------------------------------------------------------------------- /container/build-centos7.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | if ! yum info jq ; then 4 | yum -y install epel-release 5 | fi 6 | 7 | if [ -f /etc/yum.repos.d/pip.conf ] ; then 8 | mv /etc/yum.repos.d/pip.conf /etc/ 9 | fi 10 | 11 | # NOTE: pin nss version due to bug https://bugzilla.redhat.com/show_bug.cgi?id=1896808 12 | yum -y update -x nss* 13 | yum -y downgrade nss* 14 | 15 | curl -s --retry 3 --retry-delay 10 ${SITE_MIRROR:-"https://bootstrap.pypa.io"}/pip/2.7/get-pip.py | python2 - 'pip==20.1' 16 | 17 | sclo=0 18 | if ! yum repolist | grep -q "centos-sclo-rh" ; then 19 | sclo=1 20 | yum -y install centos-release-scl 21 | fi 22 | 23 | echo "INFO: installing newer git" 24 | curl -s -O --retry 3 --retry-delay 10 ${SITE_MIRROR:-"https://packages.endpointdev.com"}/rhel/7/os/x86_64/git-core-2.37.1-1.ep7.x86_64.rpm 25 | curl -s -O --retry 3 --retry-delay 10 ${SITE_MIRROR:-"https://packages.endpointdev.com"}/rhel/7/os/x86_64/git-2.37.1-1.ep7.x86_64.rpm 26 | curl -s -O --retry 3 --retry-delay 10 ${SITE_MIRROR:-"https://packages.endpointdev.com"}/rhel/7/os/x86_64/git-core-doc-2.37.1-1.ep7.noarch.rpm 27 | curl -s -O --retry 3 --retry-delay 10 ${SITE_MIRROR:-"https://packages.endpointdev.com"}/rhel/7/os/x86_64/perl-Git-2.37.1-1.ep7.noarch.rpm 28 | ls -l *.rpm 29 | 30 | yum install -y git-2.37.1-1.ep7.x86_64.rpm git-core-2.37.1-1.ep7.x86_64.rpm git-core-doc-2.37.1-1.ep7.noarch.rpm perl-Git-2.37.1-1.ep7.noarch.rpm 31 | echo "INFO: git installed $(git --version)" 32 | 33 | yum -y install \ 34 | python3 iproute devtoolset-7-gcc devtoolset-7-binutils \ 35 | autoconf automake createrepo docker-client docker-python gdb git-review jq libtool rsync \ 36 | make python-devel python-lxml rpm-build vim wget yum-utils redhat-lsb-core \ 37 | rpmdevtools sudo gcc-c++ net-tools httpd \ 38 | python-virtualenv python-future python-tox \ 39 | elfutils-libelf-devel \ 40 | doxygen graphviz 41 | # next packages are required for UT 42 | yum -y install java-1.8.0-openjdk 43 | yum clean all 44 | rm -rf /var/cache/yum 45 | if [[ "$sclo" == '1' ]]; then 46 | yum -y remove centos-release-scl 47 | rm -rf /var/cache/yum /etc/yum.repos.d/CentOS-SCLo-scl-rh.repo 48 | fi 49 | 50 | pip3 install --retries=10 --timeout 200 --upgrade tox setuptools "lxml<5.1" jinja2 51 | # NOTE: we have to remove /usr/local/bin/virtualenv after installing tox by python3 because it has python3 as shebang and masked 52 | # /usr/bin/virtualenv with python2 shebang. it can be removed later when all code will be ready for python3 53 | rm -f /usr/local/bin/virtualenv 54 | 55 | echo export CONTRAIL=$CONTRAIL >> $HOME/.bashrc 56 | echo export LD_LIBRARY_PATH=$CONTRAIL/build/lib >> $HOME/.bashrc 57 | 58 | wget -nv ${SITE_MIRROR:-"https://dl.google.com"}/go/go1.14.2.linux-amd64.tar.gz 59 | tar -C /usr/local -xzf go1.14.2.linux-amd64.tar.gz 60 | rm -f go1.14.2.linux-amd64.tar.gz 61 | echo export PATH=$PATH:/usr/local/go/bin >> $HOME/.bashrc 62 | wget -nv ${SITE_MIRROR:-"https://github.com"}/operator-framework/operator-sdk/releases/download/v0.17.2/operator-sdk-v0.17.2-x86_64-linux-gnu -O /usr/local/bin/operator-sdk-v0.17 63 | wget -nv ${SITE_MIRROR:-"https://github.com"}/operator-framework/operator-sdk/releases/download/v0.18.2/operator-sdk-v0.18.2-x86_64-linux-gnu -O /usr/local/bin/operator-sdk-v0.18 64 | ln -s /usr/local/bin/operator-sdk-v0.18 /usr/local/bin/operator-sdk 65 | chmod u+x /usr/local/bin/operator-sdk-v0.17 /usr/local/bin/operator-sdk-v0.18 /usr/local/bin/operator-sdk 66 | -------------------------------------------------------------------------------- /container/build-centos8.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | # workaround for deprecated centos 8 repos 4 | sed -i -e "s|mirrorlist=|#mirrorlist=|g" /etc/yum.repos.d/CentOS-* 5 | sed -i -e "s|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g" /etc/yum.repos.d/CentOS-* 6 | 7 | if ! yum info git-review ; then 8 | yum -y install epel-release 9 | fi 10 | 11 | if [ -f /etc/yum.repos.d/pip.conf ] ; then 12 | mv /etc/yum.repos.d/pip.conf /etc/ 13 | fi 14 | 15 | # to fix locale warning and to enable following cmd 16 | yum install -y langpacks-en glibc-all-langpacks yum-utils 17 | 18 | # for cmake 19 | yum --enable config-manager powertools 20 | 21 | yum -y install \ 22 | python3 iproute autoconf automake createrepo gdb git git-review jq libtool \ 23 | make cmake libuv-devel rpm-build vim wget redhat-lsb-core \ 24 | rpmdevtools sudo gcc-c++ net-tools httpd elfutils-libelf-devel \ 25 | python3-virtualenv python3-future python3-tox python3-devel python3-lxml \ 26 | python2-devel python2 python2-setuptools \ 27 | doxygen graphviz 28 | 29 | # next packages are required for UT 30 | yum -y install java-1.8.0-openjdk 31 | 32 | # this is for net-snmp packages (it is not possible to use BuildRequires in spec 33 | # as it installs openssl-devel-1.1.1 which is incompatible with other Contrail comps 34 | # (3rd party bind and boost-1.53)) 35 | rpm -ivh --nodeps $(repoquery -q --location --latest-limit 1 "mariadb-connector-c-3.*x86_64*" | head -n 1) 36 | rpm -ivh --nodeps $(repoquery -q --location --latest-limit 1 "mariadb-connector-c-devel-3.*x86_64*" | head -n 1) 37 | 38 | yum clean all 39 | rm -rf /var/cache/yum 40 | 41 | pip3 install --retries=10 --timeout 200 --upgrade tox setuptools "lxml<5.1" jinja2 42 | 43 | wget -nv ${SITE_MIRROR:-"https://dl.google.com"}/go/go1.14.2.linux-amd64.tar.gz 44 | tar -C /usr/local -xzf go1.14.2.linux-amd64.tar.gz 45 | rm -f go1.14.2.linux-amd64.tar.gz 46 | echo export PATH=$PATH:/usr/local/go/bin >> $HOME/.bashrc 47 | wget -nv ${SITE_MIRROR:-"https://github.com"}/operator-framework/operator-sdk/releases/download/v0.17.2/operator-sdk-v0.17.2-x86_64-linux-gnu -O /usr/local/bin/operator-sdk-v0.17 48 | wget -nv ${SITE_MIRROR:-"https://github.com"}/operator-framework/operator-sdk/releases/download/v0.18.2/operator-sdk-v0.18.2-x86_64-linux-gnu -O /usr/local/bin/operator-sdk-v0.18 49 | ln -s /usr/local/bin/operator-sdk-v0.18 /usr/local/bin/operator-sdk 50 | chmod u+x /usr/local/bin/operator-sdk-v0.17 /usr/local/bin/operator-sdk-v0.18 /usr/local/bin/operator-sdk 51 | 52 | # this is required to compile boost-1.53 from tpp 53 | alternatives --verbose --set python /usr/bin/python2 54 | 55 | # install, customize and configure compat ssl 1.0.2o 56 | yum install -y \ 57 | compat-openssl10 \ 58 | ${SITE_MIRROR:-"https://pkgs.dyn.su"}/el8/extras/x86_64/compat-openssl10-devel-1.0.2o-3.el8.x86_64.rpm \ 59 | ${SITE_MIRROR:-"https://koji.mbox.centos.org"}/pkgs/packages/compat-openssl10/1.0.2o/3.el8/x86_64/compat-openssl10-debugsource-1.0.2o-3.el8.x86_64.rpm 60 | 61 | OPENSSL_ROOT_DIR=/usr/local/ssl 62 | echo export OPENSSL_ROOT_DIR=/usr/local/ssl >> $HOME/.bashrc 63 | echo export LD_LIBRARY_PATH=$CONTRAIL/build/lib:$OPENSSL_ROOT_DIR/lib >> $HOME/.bashrc 64 | echo export LIBRARY_PATH=$LD_LIBRARY_PATH >> $HOME/.bashrc 65 | echo export C_INCLUDE_PATH=$OPENSSL_ROOT_DIR/include:/usr/include/tirpc >> $HOME/.bashrc 66 | echo export CPLUS_INCLUDE_PATH=$C_INCLUDE_PATH >> $HOME/.bashrc 67 | echo export LDFLAGS=\"-L/usr/local/lib -L$OPENSSL_ROOT_DIR/lib\" >> $HOME/.bashrc 68 | echo export PATH=$PATH:$OPENSSL_ROOT_DIR/bin >> $HOME/.bashrc 69 | 70 | mkdir -p $OPENSSL_ROOT_DIR/lib 71 | ln -s /usr/src/debug/compat-openssl10-1.0.2o-3.el8.x86_64/include $OPENSSL_ROOT_DIR/include 72 | ln -s /usr/lib64/libcrypto.so.10 $OPENSSL_ROOT_DIR/lib/libcrypto.so 73 | ln -s /usr/lib64/libssl.so.10 $OPENSSL_ROOT_DIR/lib/libssl.so 74 | -------------------------------------------------------------------------------- /container/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | scriptdir=$(realpath $(dirname "$0")) 4 | source ${scriptdir}/../common/common.sh 5 | 6 | function mysudo() { 7 | if [[ $DISTRO == "macosx" ]]; then 8 | "$@" 9 | else 10 | sudo $@ 11 | fi 12 | } 13 | 14 | LINUX_DISTR=${LINUX_DISTR:-'centos'} 15 | LINUX_DISTR_VER=${LINUX_DISTR_VER:-7} 16 | 17 | CONTRAIL_KEEP_LOG_FILES=${CONTRAIL_KEEP_LOG_FILES:-'false'} 18 | 19 | mkdir -p ${WORKSPACE}/output/logs 20 | logfile="${WORKSPACE}/output/logs/build-tf-dev-env.log" 21 | echo "Building tf-dev-env image: ${DEVENV_IMAGE}" | tee $logfile 22 | 23 | build_opts="--build-arg LC_ALL=en_US.UTF-8 --build-arg LANG=en_US.UTF-8 --build-arg LANGUAGE=en_US.UTF-8" 24 | build_opts+=" --build-arg LINUX_DISTR=$LINUX_DISTR --build-arg LINUX_DISTR_VER=$LINUX_DISTR_VER" 25 | build_opts+=" --build-arg SITE_MIRROR=${SITE_MIRROR:+${SITE_MIRROR}/external-web-cache}" 26 | if [ -n "$YUM_ENABLE_REPOS" ] ; then 27 | build_opts+=" --build-arg YUM_ENABLE_REPOS=$YUM_ENABLE_REPOS" 28 | fi 29 | if [[ "$LINUX_DISTR" =~ 'centos' ]] ; then 30 | docker_file="Dockerfile.centos" 31 | else 32 | if [[ "$LINUX_DISTR" =~ 'ubi7' ]] ; then 33 | docker_file="Dockerfile.ubi7" 34 | else 35 | docker_file="Dockerfile.ubi8" 36 | fi 37 | if [[ -n "$YUM_SM_PLUGIN_ENABLED" ]] ; then 38 | build_opts+=" --build-arg YUM_SM_PLUGIN_ENABLED=$YUM_SM_PLUGIN_ENABLED" 39 | fi 40 | fi 41 | 42 | docker_ver=$(mysudo docker -v | awk -F' ' '{print $3}' | sed 's/,//g') 43 | echo "INFO: Docker version: $docker_ver" 44 | 45 | if [[ "$docker_ver" < '17.06' ]] ; then 46 | # old docker can't use ARG-s before FROM: 47 | # comment all ARG-s before FROM 48 | cat ${docker_file} | awk '{if(ncmt!=1 && $1=="ARG"){print("#"$0)}else{print($0)}; if($1=="FROM"){ncmt=1}}' > ${docker_file}.nofromargs 49 | # and then change FROM-s that uses ARG-s 50 | sed -i \ 51 | -e "s|^FROM \${CONTRAIL_REGISTRY}/\([^:]*\):\${CONTRAIL_CONTAINER_TAG}|FROM ${CONTRAIL_REGISTRY}/\1:${tag}|" \ 52 | -e "s|^FROM \$LINUX_DISTR:\$LINUX_DISTR_VER|FROM $LINUX_DISTR:$LINUX_DISTR_VER|" \ 53 | -e "s|^FROM \$UBUNTU_DISTR:\$UBUNTU_DISTR_VERSION|FROM $UBUNTU_DISTR:$UBUNTU_DISTR_VERSION|" \ 54 | ${docker_file}.nofromargs 55 | docker_file="${docker_file}.nofromargs" 56 | fi 57 | 58 | #Configuring tpc.repo 59 | sed -i "s|___SITE_MIRROR___|${SITE_MIRROR:-"http://nexus.opensdn.io/repository"}|" tpc.repo 60 | 61 | echo "INFO: DISTRO=$DISTRO DISTRO_VER=$DISTRO_VER DISTRO_VER_MAJOR=$DISTRO_VER_MAJOR" 62 | if [[ "$DISTRO_VER_MAJOR" == '8' ]] ; then 63 | build_opts+=' --format docker' 64 | # TODO: invetigate: use of module 3.0 leads to broken build 65 | # (podman run is not working inside sandbox) 66 | # declare -A ct_vers=(["8.2"]="2.0" ["8.4"]="3.0") 67 | # build_opts+=" --build-arg CONTAINER_TOOLS_VER=${ct_vers[$DISTRO_VER]}" 68 | fi 69 | build_opts+=" --network host --no-cache --tag ${DEVENV_IMAGE} --tag ${CONTAINER_REGISTRY}/${DEVENV_IMAGE} -f $docker_file ." 70 | 71 | if [[ $DISTRO != 'macosx' ]] ; then 72 | CONTRAIL_KEEP_LOG_FILES=${CONTRAIL_KEEP_LOG_FILES,,} 73 | fi 74 | if [[ "${CONTRAIL_KEEP_LOG_FILES}" != 'true' ]] ; then 75 | echo "INFO: build cmd: docker build $build_opts" 76 | mysudo docker build $build_opts 2>&1 | tee -a $logfile 77 | result=${PIPESTATUS[0]} 78 | if [ $result -eq 0 ]; then 79 | rm -f $logfile 80 | fi 81 | else 82 | # skip output into terminal 83 | echo "INFO: build cmd: docker build $build_opts" 84 | mysudo docker build $build_opts >> $logfile 2>&1 85 | result=${PIPESTATUS[0]} 86 | fi 87 | 88 | exit $result 89 | -------------------------------------------------------------------------------- /container/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # just wait 4 | tail -f /dev/null 5 | -------------------------------------------------------------------------------- /container/populate-cache.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | if ! which wget; then 4 | echo "ERROR: wget is not found. please install it. exit" 5 | exit 1 6 | fi 7 | 8 | CACHE_DIR=${CACHE_DIR:-'/tmp/cache'} 9 | 10 | mkdir -p $CACHE_DIR || true 11 | pushd $CACHE_DIR 12 | 13 | wget -nv -t3 -P pip/2.7 https://bootstrap.pypa.io/pip/2.7/get-pip.py 14 | wget -nv -t3 -P go https://dl.google.com/go/go1.14.2.linux-amd64.tar.gz 15 | wget -nv -t3 -P operator-framework/operator-sdk/releases/download/v0.17.2 https://github.com/operator-framework/operator-sdk/releases/download/v0.17.2/operator-sdk-v0.17.2-x86_64-linux-gnu 16 | wget -nv -t3 -P operator-framework/operator-sdk/releases/download/v0.18.2 https://github.com/operator-framework/operator-sdk/releases/download/v0.18.2/operator-sdk-v0.18.2-x86_64-linux-gnu 17 | 18 | wget -nv -t3 -P el8/extras/x86_64 https://pkgs.dyn.su/el8/extras/x86_64/compat-openssl10-devel-1.0.2o-3.el8.x86_64.rpm 19 | #wget -nv -t3 -P pkgs/packages/compat-openssl10/1.0.2o/3.el8/x86_64 https://koji.mbox.centos.org/pkgs/packages/compat-openssl10/1.0.2o/3.el8/x86_64/compat-openssl10-debugsource-1.0.2o-3.el8.x86_64.rpm 20 | 21 | #Upgrading git in centos7 22 | #default git 1.18 causes fail (HTTP 402) in fetch-sources job when it's working with gitlab 23 | wget -nv -t3 -P rhel/7/os/x86_64 https://packages.endpointdev.com/rhel/7/os/x86_64/git-core-2.37.1-1.ep7.x86_64.rpm 24 | wget -nv -t3 -P rhel/7/os/x86_64 https://packages.endpointdev.com/rhel/7/os/x86_64/git-2.37.1-1.ep7.x86_64.rpm 25 | wget -nv -t3 -P rhel/7/os/x86_64 https://packages.endpointdev.com/rhel/7/os/x86_64/git-core-doc-2.37.1-1.ep7.noarch.rpm 26 | wget -nv -t3 -P rhel/7/os/x86_64 https://packages.endpointdev.com/rhel/7/os/x86_64/perl-Git-2.37.1-1.ep7.noarch.rpm 27 | 28 | popd 29 | -------------------------------------------------------------------------------- /container/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | my_file="$(readlink -e "$0")" 4 | my_dir="$(dirname $my_file)" 5 | source "$my_dir/../common/common.sh" 6 | source "$my_dir/../common/functions.sh" 7 | source "$my_dir/../common/tf_functions.sh" 8 | 9 | stage="$1" 10 | target="$2" 11 | 12 | echo "INFO: run stage $stage with target $target" 13 | 14 | set -eo pipefail 15 | 16 | load_tf_devenv_profile 17 | source_env 18 | prepare_infra 19 | cd $DEV_ENV_ROOT 20 | 21 | [ -n "$DEBUG" ] && set -x 22 | 23 | declare -a all_stages=(fetch configure compile package test freeze doxygen) 24 | declare -a default_stages=(fetch configure) 25 | declare -a build_stages=(fetch configure compile package) 26 | 27 | function fetch() { 28 | verify_tag=$(get_current_container_tag) 29 | while true ; do 30 | # Sync sources 31 | echo "INFO: make sync $(date)" 32 | make sync 33 | current_tag=$(get_current_container_tag) 34 | if [[ $verify_tag == $current_tag ]] ; then 35 | export FROZEN_TAG=$current_tag 36 | save_tf_devenv_profile 37 | break 38 | fi 39 | # If tag's changed during our fetch we'll cleanup sources and retry fetching 40 | echo "WARNING: tag was changed ($verify_tag -> $current_tag). Run sync again..." 41 | verify_tag=$current_tag 42 | done 43 | 44 | # paths must be fixed inside tf-dev-sandbox container 45 | for vfile in $(find .. -name version.info); do 46 | echo "INFO: patching file $vfile" 47 | echo $CONTRAIL_CONTAINER_TAG | sed 's/[_-]/./g' > $vfile 48 | done 49 | 50 | # Invalidate stages after new fetch. For fast build and patchest invalidate only if needed. 51 | if [[ $BUILD_MODE == "fast" ]] ; then 52 | echo "INFO: Checking patches for fast build mode" 53 | if patches_exist ; then 54 | echo "INFO: patches encountered" $changed_projects 55 | if [[ -n $changed_product_projects ]] ; then 56 | echo "INFO: Contrail core is changed, cleaning all stages" 57 | cleanup compile 58 | # vrouter dpdk project uses makefile and relies on date of its artifacts to be fresher than sources 59 | # which after resyncing here isn't true, so we'll refresh it if it's unchanged to skip rebuilding 60 | if ! [[ ${changed_product_project[@]} =~ "tf-dpdk" ]] ; then 61 | find $WORK_DIR/build/production/vrouter/dpdk/x86_64-native-linuxapp-gcc/build -type f -exec touch {} + || /bin/true 62 | fi 63 | fi 64 | else 65 | echo "INFO: No patches encountered" 66 | fi 67 | # Cleaning packages stage because we need to fetch ready containers if they're not to be built 68 | cleanup package 69 | else 70 | cleanup 71 | fi 72 | } 73 | 74 | function configure() { 75 | # targets can use yum and will block each other. don't run them in parallel 76 | 77 | local targets="$@" 78 | [ -n "$targets" ] || targets="setup tpp dep" 79 | 80 | # frozen may have contrail repo set (e.g. if tpp changed) 81 | # it is needed to have up rpm repo any stage that operates with yum 82 | make setup-httpd 83 | if [[ "$targets" =~ 'setup' ]] ; then 84 | echo "INFO: make setup $(date)" 85 | make setup 86 | fi 87 | 88 | if [[ "$targets" =~ 'tpp' ]] ; then 89 | if [[ "$targets" == 'tpp' ]] ; then 90 | export BUILD_TPP_FORCE='true' 91 | fi 92 | echo "INFO: make fetch_packages $(date)" 93 | make fetch_packages 94 | fi 95 | 96 | if [[ "$targets" =~ 'dep' ]] ; then 97 | echo "INFO: make dep $(date)" 98 | make dep 99 | fi 100 | 101 | # disable byte compiling 102 | if [[ ! -f /usr/lib/rpm/brp-python-bytecompile.org ]] ; then 103 | echo "INFO: disable byte compiling for python" 104 | mv /usr/lib/rpm/brp-python-bytecompile /usr/lib/rpm/brp-python-bytecompile.org 105 | cat < $scriptdir/common.env 51 | echo "INFO: common.env content:" 52 | cat $scriptdir/common.env 53 | 54 | # make env profile to run inside container 55 | # input dir can be already created and had files like patchsets-info.json, unittest_targets.lst 56 | input_dir="${scriptdir}/input" 57 | mkdir -p "$input_dir" 58 | tf_container_env_file="${input_dir}/tf-developer-sandbox.env" 59 | create_env_file "$tf_container_env_file" 60 | 61 | # mount this dir always - some stage can put files there even if it was empty when container was created 62 | mkdir -p ${scriptdir}/config 63 | 64 | devenv_image="$CONTAINER_REGISTRY/$DEVENV_IMAGE" 65 | 66 | echo 67 | echo 'INFO: environment setup' 68 | if ! is_container_created "$DEVENV_CONTAINER_NAME"; then 69 | if [[ "$stage" == 'frozen' ]]; then 70 | echo "INFO: fetching frozen tf-dev-env from CI registry" 71 | devenv_image="$FROZEN_REGISTRY/$DEVENV_IMAGE_NAME:frozen" 72 | fi 73 | 74 | if [[ "$BUILD_DEVDEV_ENV" != '1' ]] && ! is_container_created ${devenv_image} ; then 75 | if ! mysudo docker inspect $devenv_image >/dev/null 2>&1 && ! mysudo docker pull $devenv_image ; then 76 | if [[ "$BUILD_DEV_ENV_ON_PULL_FAIL" != '1' ]]; then 77 | exit 1 78 | fi 79 | echo "INFO: No image $devenv_image is available. Try to build." 80 | BUILD_DEV_ENV=1 81 | fi 82 | fi 83 | 84 | if [[ "$BUILD_DEV_ENV" == '1' ]]; then 85 | echo "INFO: Build $DEVENV_IMAGE_NAME:$DEVENV_TAG docker image" 86 | cd ${scriptdir}/container 87 | ./build.sh -i ${DEVENV_IMAGE_NAME} ${DEVENV_TAG} 88 | cd ${scriptdir} 89 | fi 90 | 91 | options="-e LC_ALL=en_US.UTF-8 -e LANG=en_US.UTF-8 -e LANGUAGE=en_US.UTF-8 " 92 | volumes="" 93 | if [[ $DISTRO != "macosx" ]]; then 94 | volumes+=" -v /etc/localtime:/etc/localtime:${DOCKER_VOLUME_OPTIONS}" 95 | fi 96 | volumes+=" -v ${scriptdir}:/root/tf-dev-env:${DOCKER_VOLUME_OPTIONS}" 97 | if [[ "$BIND_CONTRAIL_DIR" != 'false' ]] ; then 98 | # make dir to create them under current user 99 | mkdir -p ${CONTRAIL_DIR} 100 | volumes+=" -v ${CONTRAIL_DIR}:/root/contrail:${DOCKER_VOLUME_OPTIONS}" 101 | fi 102 | # make dir to create them under current user 103 | mkdir -p ${WORKSPACE}/output/logs 104 | volumes+=" -v ${WORKSPACE}/output:/output:${DOCKER_VOLUME_OPTIONS}" 105 | volumes+=" -v ${input_dir}:/input:${DOCKER_VOLUME_OPTIONS}" 106 | volumes+=" -v ${scriptdir}/config:/config:${DOCKER_VOLUME_OPTIONS}" 107 | 108 | if [[ "$DISTRO" == 'rhel' && "$(echo $DISTRO_VER | cut -d '.' -f 1)" == '8' ]] ; then 109 | echo "INFO: add podman container options for rhel8 env" 110 | volumes+=' -v /var/run:/var/run' 111 | volumes+=' -v /run/runc:/run/runc' 112 | volumes+=' -v /sys/fs/cgroup:/sys/fs/cgroup:ro' 113 | volumes+=' -v /sys/fs/selinux:/sys/fs/selinux' 114 | volumes+=' -v /var/lib/containers:/var/lib/containers:shared' 115 | volumes+=' -v /etc/containers:/etc/containers:ro' 116 | volumes+=' -v /usr/share/containers:/usr/share/containers:ro' 117 | options+=' --security-opt seccomp=unconfined' 118 | options+=' --security-opt label=disable' 119 | if [[ ! -e /run/runc ]] ; then 120 | # WA for rhel8.4 with container-tools:3.0: folder created at first podman run 121 | # so it is not possible to bind this folder at first run as podman 122 | # fails because folder doesnt exist 123 | sudo mkdir -v -p --context='unconfined_u:object_r:container_var_run_t:s0' -m 0600 /run/runc 124 | fi 125 | elif [[ $DISTRO != "macosx" ]]; then 126 | volumes+=" -v /var/run:/var/run:${DOCKER_VOLUME_OPTIONS}" 127 | fi 128 | 129 | # Provide env variables because: 130 | # - there is backward compatibility case with manual doing docker exec 131 | # into container and user of make. 132 | # - TF Jenkins CI use non-bind folder for sources 133 | start_sandbox_cmd="mysudo docker run --network host --privileged --detach \ 134 | --name $DEVENV_CONTAINER_NAME \ 135 | -w /root ${options} \ 136 | $volumes -it \ 137 | $devenv_image" 138 | 139 | echo "INFO: start cmd '$start_sandbox_cmd'" 140 | eval $start_sandbox_cmd 2>&1 141 | if [[ ${PIPESTATUS[0]} != 0 ]] ; then 142 | echo 143 | echo "ERROR: Failed to run $DEVENV_CONTAINER_NAME container." 144 | exit 1 145 | fi 146 | 147 | echo $DEVENV_CONTAINER_NAME created. 148 | else 149 | if is_container_up "$DEVENV_CONTAINER_NAME"; then 150 | echo "INFO: $DEVENV_CONTAINER_NAME already running." 151 | else 152 | echo "INFO: $(mysudo docker start $DEVENV_CONTAINER_NAME) started." 153 | fi 154 | fi 155 | 156 | if [[ "$stage" == 'none' || "$stage" == 'frozen' ]] ; then 157 | echo "INFO: don't run any stages" 158 | exit 0 159 | fi 160 | 161 | if [[ "$stage" == 'test' ]] && which atop >/dev/null 2>&1 ; then 162 | nohup sudo atop -w ${WORKSPACE}/output/logs/atop 60 > ${WORKSPACE}/output/logs/atop.log 2>&1 < /dev/null & 163 | fi 164 | 165 | echo "INFO: run stage $stage with target $target" 166 | mysudo docker exec -i $DEVENV_CONTAINER_NAME /root/tf-dev-env/container/run.sh $stage $target 167 | result=${PIPESTATUS[0]} 168 | 169 | if [[ "$BIND_CONTRAIL_DIR" != 'false' ]] ; then 170 | # do chown for sources that were cloned with root inside container 171 | if ! mysudo chown -R $(id -u):$(id -g) $CONTRAIL_DIR ; then 172 | echo "WARNING: owner for sources folder was not changed correctly." 173 | fi 174 | fi 175 | 176 | if [[ $result == 0 ]] ; then 177 | echo 178 | echo '[DONE]' 179 | echo "There are stages available to run ./run.sh :" 180 | echo " build - perform sequence of stages: fetch, configure, compile, package" 181 | echo " (if stage was run previously it be skipped)" 182 | echo " fetch - sync TF git repos" 183 | echo " configure - fetch third party packages and install dependencies" 184 | echo " compile - build TF binaries" 185 | echo " package - package TF into docker containers (you can specify target container to build like container-vrouter)" 186 | echo " test - run unittests" 187 | echo " freeze - prepare tf-dev-env for pushing to container registry for future reuse by compressing contrail directory" 188 | echo " upload - push tf-dev-env to container registry" 189 | echo " none - create the tf-dev-env container empty" 190 | echo " frozen - fetch frozen tf-dev-env from Ci registry, you still have to use run.sh or fetch/configure to get sources" 191 | echo " doxygen - builds doxygen documentation for the project" 192 | echo "For advanced usage You can now connect to the sandbox container by using:" 193 | if [[ $DISTRO != "macosx" ]]; then 194 | echo " sudo docker exec -it $DEVENV_CONTAINER_NAME bash" 195 | else 196 | echo " docker exec -it $DEVENV_CONTAINER_NAME bash" 197 | fi 198 | else 199 | echo 200 | echo 'ERROR: There were failures. See logs for details.' 201 | fi 202 | 203 | exit $result 204 | -------------------------------------------------------------------------------- /scripts/build-tpp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | scriptdir=$(realpath $(dirname "$0")) 4 | source "$scriptdir/../common/common.sh" 5 | source_env 6 | 7 | function make_contrail_repo() { 8 | if [[ ! -e /etc/yum.repos.d/contrail.repo ]] ; then 9 | echo "INFO: enable contrail repo for next compilation steps" 10 | # enable contrail repo for dev-env if not created 11 | # (it is for tpp to be available during compile stage) 12 | cat < 0 or int(suite.attrib["failures"]) > 0: 151 | status = TestResult.FAILURE 152 | 153 | suite_obj = TestSuite(name=suite.attrib["name"]) 154 | 155 | # XXX(kklimonda): see if those can be generated from test cases 156 | for attr in ["disabled", "errors", "failures"]: 157 | if attr in suite.attrib: 158 | setattr(suite_obj, attr, suite.attrib[attr]) 159 | for test in suite.findall('testcase'): 160 | test_obj = TestCase(name=test.attrib['name']) 161 | for attr in ["classname", "status", "time"]: 162 | if attr in test.attrib: 163 | setattr(test_obj, attr, test.attrib[attr]) 164 | 165 | failures = test.findall('failure') 166 | if failures: 167 | for failure in failures: 168 | fail_obj = TestFailure() 169 | for attr in ["message", "type"]: 170 | if attr in failure.attrib: 171 | setattr(fail_obj, attr, failure.attrib[attr]) 172 | fail_obj.data = failure.text 173 | test_obj.failures += [fail_obj] 174 | 175 | suite_obj.test_cases += [test_obj] 176 | suite_objs += [suite_obj] 177 | return status, suite_objs 178 | 179 | def _store_test_results(self, suite, result, tests): 180 | key = self._get_relative_path(suite['node_path']) 181 | xml_basepath = self._get_relative_path(os.path.splitext(suite['xml_path'])[0]) 182 | log_basepath = self._get_relative_path(os.path.splitext(suite['log_path'])[0]) 183 | rnd_suffix_len = 8 184 | 185 | # If there is no log file, assume a total failure and store that info. 186 | if not os.path.exists(suite['log_path']): 187 | result = TestResult.MISSING_LOG 188 | 189 | while True: 190 | random_string = "".join([random.choice(string.ascii_lowercase) for i in range(rnd_suffix_len)]) 191 | xml_path = xml_basepath + "." + random_string + ".xml" 192 | log_path = log_basepath + "." + random_string + ".log" 193 | if not (os.path.exists(xml_path) or os.path.exists(log_path)): 194 | break 195 | 196 | if os.path.exists(suite['xml_path']): 197 | os.rename(suite['xml_path'], xml_path) 198 | else: 199 | logging.warning('{} does not exist!'.format(suite['xml_path'])) 200 | 201 | if os.path.exists(suite['log_path']): 202 | os.rename(suite['log_path'], log_path) 203 | else: 204 | logging.warning('{} does not exist!'.format(suite['log_path'])) 205 | 206 | result_text = "SUCCESS" if result == TestResult.SUCCESS else "FAILURE" 207 | self.test_results[key]['result'] = result_text 208 | self.test_results[key]["details"] += [{ 209 | "result": result, 210 | "xml_path": xml_path, 211 | "log_path": log_path, 212 | "tests": tests 213 | }] 214 | 215 | def _get_test_for_target(self, target): 216 | for test in self.tests: 217 | if self._get_relative_path(test['node_path']) == target: 218 | return test 219 | raise RuntimeError("No test found for target " + target) 220 | 221 | def analyze_test_results(self, targets=None): 222 | """Parses XML output from tests looking for failures. 223 | 224 | Parse XML output from tests and keep track of any failures, also 225 | renaming XML and log files so they are not overwritten by consecutive 226 | runs. 227 | """ 228 | global_status = TestResult.SUCCESS 229 | failed_targets = [] 230 | 231 | # if we have not received targets, we want to analyze everything - pull 232 | # targets directly from self.tests. 233 | if not targets: 234 | targets = [self._get_relative_path(t['node_path']) for t in self.tests] 235 | 236 | for target in targets: 237 | test = self._get_test_for_target(target) 238 | logging.debug("Analyzing test results for %s", test['node_path']) 239 | 240 | status, tests = self._parse_junit_xml(test['xml_path']) 241 | if status == TestResult.MISSING_XML: 242 | logging.warning("Test %s generated no XML - assuming failure.", test['node_path']) 243 | self._store_test_results(test, status, tests) 244 | 245 | if status != TestResult.SUCCESS: 246 | global_status = TestResult.FAILURE 247 | failed_targets += [self._get_relative_path(test['node_path'])] 248 | return global_status, failed_targets 249 | 250 | def generate_test_report(self, scons_rc, final_result): 251 | tpl = """Tungsten Test Runner Results 252 | ============================ 253 | 254 | SCons targets executed: 255 | {% for target in scons_targets %} 256 | {{ target }} 257 | {% endfor %} 258 | SCons Result: {{ scons_rc }} 259 | Analyzer Result: {{ final_result }} 260 | 261 | Test Results: 262 | {% for key, values in results.items() %} 263 | ======================== 264 | SCons target: {{ key }} 265 | Result: {{ values['result'] }} 266 | ------------------------ 267 | {% for test in values['details'] %} 268 | Run #{{ loop.index }} 269 | Result: {{ test.result }} 270 | Tests: {{ test.test | length }} 271 | Failures: {{ test.failures }} 272 | Errors: {{ test.errors }} 273 | XML Log: {{ test.xml_path }} 274 | Console Log: {{ test.log_path }} 275 | 276 | Details: 277 | {% for test_suite in test.tests -%} 278 | {% for test_case in test_suite.test_cases -%} 279 | {% if test_case.failures | length > 0 %} 280 | {{- test_suite.name }}.{{- test_case.name }} - FAILED 281 | {% for failure in test_case.failures %} 282 | {{- failure.data -}} 283 | {%- endfor -%} 284 | {% elif test_case.status == "notrun" -%} 285 | {{- test_suite.name }}.{{- test_case.name }} - SKIPPED 286 | {% else %} 287 | {{- test_suite.name }}.{{- test_case.name }} - SUCCESS 288 | {% endif -%} 289 | {% endfor -%} 290 | {% endfor -%} 291 | {% endfor -%} 292 | {% endfor -%} 293 | """ 294 | text = '' 295 | template = jinja2.Template(tpl) 296 | ctx = { 297 | "scons_targets": self.args.targets, 298 | "scons_rc": scons_rc, 299 | "final_result": final_result, 300 | "results": self.test_results} 301 | try: 302 | text = template.render(ctx) 303 | except Exception as e: 304 | print('Unit test report generation failed!') 305 | print('The exception is ignored to allow the job to successfully finish if no tests ' 306 | 'failed.') 307 | print('See https://contrail-jws.atlassian.net/browse/JD-475 for more information.') 308 | print(e) 309 | print(text) 310 | 311 | 312 | def main(): 313 | runner = TungstenTestRunner() 314 | runner.parse_arguments() 315 | runner.describe_tests() 316 | 317 | failed_targets = None 318 | for counter in range(3): 319 | rc, targets = runner.run_tests(targets=failed_targets) 320 | if rc > 0: 321 | logging.info("SCons failed with exit code {}. Analyzing results.".format(rc)) 322 | else: 323 | logging.info("SCons succeeded. Analyzing results.") 324 | 325 | # First analysis is done over all tests, because at this point 326 | # a) we want to analyze everything 327 | # b) targets that we have are "generic", not for each test - can't 328 | # match it against tests that we store. 329 | result, failed_targets = runner.analyze_test_results(targets=(None if counter == 0 else targets)) 330 | logging.info("Analyzer result is " + ("SUCCESS" if result == TestResult.SUCCESS else "FAILURE")) 331 | if rc > 0 and result == TestResult.SUCCESS: 332 | logging.error("SCons failed, but analyzer didn't find any errors.") 333 | if not failed_targets: 334 | logging.critical("Analyzer didn't find targets to retry. Exiting.") 335 | sys.exit(rc) 336 | 337 | if result == TestResult.SUCCESS: 338 | break 339 | 340 | logging.warning("Test Failure, {} targets failed:\n".format(len(failed_targets)) + 341 | "\n\t".join(failed_targets)) 342 | logging.info("Retrying, %d attempts remaining.", counter) 343 | 344 | runner.generate_test_report(rc, "SUCCESS" if result == TestResult.SUCCESS else "FAILURE") 345 | sys.exit(rc) 346 | 347 | 348 | if __name__ == "__main__": 349 | main() 350 | -------------------------------------------------------------------------------- /scripts/controller_ut/run-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TARGET=${1:-} 4 | TARGET_TIMEOUT=${TARGET_TIMEOUT:-"120m"} 5 | 6 | scriptdir=$(realpath $(dirname "$0")) 7 | source $scriptdir/definitions.sh 8 | 9 | cd $HOME/contrail 10 | dump_path="/output/cores" 11 | logs_path="/output/logs" 12 | mkdir -p "$logs_path" 13 | rm -rf "$dump_path" 14 | mkdir -p "$dump_path" 15 | 16 | # contrail code assumes this in tests, since it uses socket.fqdn(..) but expects the result 17 | # to be 'localhost' when for CentOS it would return 'localhost.localdomain' 18 | # see e.g.: https://github.com/Juniper/contrail-analytics/blob/b488e3cd608643ae5dd1e0dcbc03c9e8768178ce/contrail-opserver/alarmgen.py#L872 19 | bash -c 'echo "127.0.0.1 localhost" > /etc/hosts' 20 | bash -c 'echo "::1 localhost" >> /etc/hosts' 21 | 22 | unset BUILD_ONLY 23 | 24 | # pip==20.3.1 has issues with installing packages. looks like new resolver is broken for python3.6 25 | # let's pin old version to avoid such issues 26 | export VIRTUALENV_PIP="20.2" 27 | 28 | echo "INFO: Prepare targets $(date)" 29 | targets_file="/input/unittest_targets.lst" 30 | if [[ ! -f "$targets_file" || -n "$TARGET" ]] ; then 31 | targets_file='/tmp/unittest_targets.lst' 32 | rm "$targets_file" && touch "$targets_file" 33 | for utest in $(jq -r ".[].scons_test_targets[]" controller/ci_unittests.json| sort | uniq) ; do 34 | if [[ -z "$TARGET" || "$utest" == *"$TARGET"* ]]; then 35 | echo "$utest" >> "$targets_file" 36 | fi 37 | done 38 | fi 39 | 40 | # target_set as an additional key for some log names 41 | if [ -e /input/target_set ]; then 42 | target_set=$(cat /input/target_set) 43 | fi 44 | 45 | res=0 46 | echo "INFO: enable core dumps" 47 | ulimit -c unlimited 48 | echo "$dump_path/core-%i-%p-%E" > /proc/sys/kernel/core_pattern 49 | 50 | echo "INFO: targets to run:" 51 | cat "$targets_file" 52 | echo ; echo 53 | for utest in $(cat "$targets_file") ; do 54 | echo "INFO: $(date) Starting unit tests for target $utest" 55 | logfilename="$(echo $utest | cut -f 1 -d ':' | rev | cut -f 1 -d '/' | rev).log" 56 | if ! timeout $TARGET_TIMEOUT "$scriptdir/run-tests.py" --less-strict -j $JOBS --skip-tests $DEV_ENV_ROOT/skip_tests $utest &> $logs_path/$logfilename ; then 57 | res=1 58 | echo "ERROR: $utest failed" 59 | fi 60 | echo "INFO: $(date) Unit test log is available at $logs_path/$logfilename" 61 | done 62 | 63 | function process_file() { 64 | local src_file=$1 65 | local ext=$2 66 | if [[ "$src_file" == 'null' ]]; then 67 | return 68 | fi 69 | for file in $(ls -1 ${src_file%.${ext}}.*.${ext} 2>/dev/null) ; do 70 | dst_file=$(echo $file | sed "s~$HOME/contrail~$logs_path~g") 71 | mkdir -p $(dirname $dst_file) 72 | cp $file $dst_file 73 | done 74 | } 75 | 76 | # gather scons logs 77 | test_list="$logs_path/scons_describe_tests.txt" 78 | if [[ -n "$target_set" ]] ; then test_list+=".$target_set" ; fi 79 | scons -Q --warn=no-all --describe-tests $(cat $targets_file | tr '\n' ' ') > $test_list 80 | while IFS= read -r line 81 | do 82 | process_file "$(echo $line | jq -r ".log_path" 2>/dev/null)" 'log' 83 | process_file "$(echo $line | jq -r ".xml_path" 2>/dev/null)" 'xml' 84 | done < "$test_list" 85 | 86 | # gather core dumps 87 | cat < /tmp/commands.txt 88 | set height 0 89 | t a a bt 90 | quit 91 | COMMAND 92 | echo "INFO: cores: $(ls -l $dump_path/)" 93 | for core in $(ls -1 $dump_path/core-*) ; do 94 | x=$(basename "${core}") 95 | y=${x/#core-*[0-9]-*[0-9]-/} 96 | y=${y//\!//} 97 | timeout -s 9 30 gdb --command=/tmp/commands.txt -c $core $y > build/$x-bt.log 98 | done 99 | rm -rf $dump_path 100 | 101 | # gather test logs 102 | for file in $(find build/ -name '*.log' ! -size 0) ; do 103 | mkdir -p $logs_path/$(dirname $file) 104 | cp -u $file $logs_path/$file 105 | done 106 | 107 | # gzip .log files - they consume several Gb unpacked 108 | pushd $logs_path 109 | time find $(pwd) -name '*.log' | xargs gzip 110 | popd 111 | 112 | if [[ "$res" != '0' ]]; then 113 | echo "ERROR: some UT failed" 114 | fi 115 | exit $res 116 | -------------------------------------------------------------------------------- /scripts/fetch-packages.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | scriptdir=$(realpath $(dirname "$0")) 4 | source "$scriptdir/../common/common.sh" 5 | source_env 6 | 7 | opts='' 8 | if [[ -n "${SITE_MIRROR}" ]]; then 9 | opts="--site-mirror ${SITE_MIRROR}" 10 | fi 11 | cd $HOME/contrail/third_party 12 | if ! output=`python3 -u fetch_packages.py $opts 2>&1` ; then 13 | echo "$output" 14 | exit 1 15 | fi 16 | 17 | echo "$output" | grep -Ei 'Processing|patching' 18 | -------------------------------------------------------------------------------- /scripts/gather-unittest-targets.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import json 4 | import sys 5 | import os 6 | from xml.etree import ElementTree 7 | 8 | 9 | patchsets = json.load(sys.stdin) 10 | 11 | home_dir = os.getenv("HOME", '/root') 12 | with open("%s/contrail/controller/ci_unittests.json" % home_dir, 'r') as fh: 13 | unittests = json.load(fh) 14 | 15 | # load vnc structure to evaluate UT targets 16 | # stdin is a patchsets_info.json file wich has gerrit plain structure 17 | # ci_unittests.json has vnc structure 18 | with open("%s/contrail/.repo/manifest.xml" % home_dir, 'r') as f: 19 | vnc_raw = ElementTree.parse(f).getroot() 20 | remotes = dict() 21 | for remote in vnc_raw.findall(".//remote"): 22 | remotes[remote.get('name')] = remote.get('fetch').split('/')[-1] 23 | projects = dict() 24 | for project in vnc_raw.findall(".//project"): 25 | projects[remotes[project.get('remote')] + '/' + project.get('name')] = project.get('path') 26 | 27 | review_files = set() 28 | for patchset in patchsets: 29 | if patchset["project"] not in projects: 30 | continue 31 | path = projects[patchset["project"]] 32 | review_files.update([path + '/' + file for file in patchset["files"]]) 33 | 34 | actual_targets = set() 35 | misc_targets = set() 36 | for ffile in review_files: 37 | for package in unittests.keys(): 38 | for sd in unittests[package]["source_directories"]: 39 | if sd in ffile: 40 | actual_targets.update(unittests[package]["scons_test_targets"]) 41 | misc_targets.update(unittests[package]["misc_test_targets"]) 42 | break 43 | 44 | if not actual_targets: 45 | actual_targets = set(unittests['default']["scons_test_targets"]) 46 | misc_targets = set(unittests['default']["misc_test_targets"]) 47 | 48 | for misc_target in misc_targets: 49 | actual_targets.update(unittests[misc_target]["scons_test_targets"]) 50 | 51 | for target in actual_targets: 52 | print(target) 53 | -------------------------------------------------------------------------------- /scripts/go/run-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | scriptdir=$(realpath $(dirname "$0")) 4 | 5 | if [ -z "$GERRIT_PROJECT" ]; then 6 | echo "ERROR: GERRIT_PROJECT must be set for tox tests" 7 | exit 1 8 | fi 9 | 10 | echo "INFO: Running go tests for project: $GERRIT_PROJECT" 11 | 12 | type go >/dev/null 2>&1 || { 13 | export PATH=$PATH:/usr/local/go/bin 14 | } 15 | 16 | cd $HOME/contrail 17 | 18 | project=$(echo $GERRIT_PROJECT | cut -d '/' -f 2) 19 | echo "INFO: short project name: $project" 20 | path=$(./repo list -f -r $project | awk '{print $1}' | head -1) 21 | echo "INFO: project path: $path" 22 | 23 | res=0 24 | pushd $path 25 | 26 | make test || res=1 27 | 28 | popd 29 | 30 | # collect log files if required 31 | 32 | if [[ "$res" != '0' ]]; then 33 | echo "ERROR: some UT failed" 34 | fi 35 | exit $res 36 | -------------------------------------------------------------------------------- /scripts/package-tpp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | scriptdir=$(realpath $(dirname "$0")) 4 | source "$scriptdir/../common/common.sh" 5 | source_env 6 | 7 | echo "INFO: compile tpp if needed $(date)" 8 | 9 | if [ -z "${REPODIR}" ] ; then 10 | echo "ERROR: env variable REPODIR is required" 11 | exit 1 12 | fi 13 | 14 | patchsets_info_file=/input/patchsets-info.json 15 | if [[ ! -e "$patchsets_info_file" ]] ; then 16 | echo "INFO: skip tpp: there is no patchset info" 17 | exit 18 | fi 19 | files=$(cat $patchsets_info_file | jq -r '.[] | select(.project | contains("tf-third-party-packages")) | select(has("files")) | .files[]') 20 | if [[ -z "$files" ]] ; then 21 | echo "INFO: skip tpp: there is no changes in the files for contrail-third-party-packages" 22 | exit 23 | fi 24 | 25 | working_dir=${REPODIR}/tpp-container-build 26 | mkdir -p ${working_dir} 27 | rm -rf ${working_dir}/* 28 | mkdir ${working_dir}/rpms 29 | 30 | pushd ${working_dir} 31 | 32 | find $REPODIR/RPMS/ -name "*.rpm" -exec cp "{}" ./rpms/ ";" 33 | if ! ls ./rpms/*.rpm >/dev/null 2>&1 ; then 34 | echo "ERROR: no tpp rpms found for packaging" 35 | exit 1 36 | fi 37 | 38 | cat < ./Dockerfile 39 | FROM scratch 40 | LABEL vendor="$VENDOR_NAME" \ 41 | version="$CONTRAIL_CONTAINER_TAG" \ 42 | release="5.1.0" 43 | COPY rpms /contrail/tpp/rpms 44 | EOF 45 | 46 | build_tag=${CONTAINER_REGISTRY}/contrail-third-party-packages:${CONTRAIL_CONTAINER_TAG} 47 | build_opts="--build-arg LC_ALL=en_US.UTF-8 --build-arg LANG=en_US.UTF-8 --build-arg LANGUAGE=en_US.UTF-8" 48 | build_opts+=" --no-cache --tag $build_tag -f Dockerfile ." 49 | if [[ "$DISTRO_VER_MAJOR" == '8' ]] ; then 50 | build_opts+=' --format docker' 51 | fi 52 | 53 | docker build $build_opts 54 | docker push $build_tag 55 | 56 | popd 57 | -------------------------------------------------------------------------------- /scripts/package/Dockerfile.src.tmpl: -------------------------------------------------------------------------------- 1 | FROM scratch 2 | 3 | ARG VENDOR_NAME 4 | ARG CONTAINER_NAME 5 | ARG CONTRAIL_CONTAINER_TAG 6 | 7 | LABEL vendor=$VENDOR_NAME \ 8 | version=$CONTRAIL_CONTAINER_TAG \ 9 | release="5.1.0" \ 10 | name=$CONTAINER_NAME 11 | 12 | COPY . /src 13 | -------------------------------------------------------------------------------- /scripts/package/build-containers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | workdir=$1 4 | prefix=$2 5 | container=$3 6 | 7 | scriptdir=$(realpath $(dirname "$0")) 8 | source "$scriptdir/../../common/common.sh" 9 | source_env 10 | 11 | res=0 12 | ${workdir}/containers/build.sh $container || res=1 13 | 14 | mkdir -p /output/logs/${prefix}s 15 | # do not fail script if logs files are absent 16 | mv ${workdir}/containers/*.log /output/logs/${prefix}s/ || /bin/true 17 | 18 | exit $res 19 | -------------------------------------------------------------------------------- /scripts/package/build-operator-containers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | set -o pipefail 4 | 5 | scriptdir=$(realpath $(dirname "$0")) 6 | source "$scriptdir/../../common/common.sh" 7 | source_env 8 | 9 | REPODIR=${REPODIR:-"."} 10 | 11 | if [[ -z "${CONTRAIL_REGISTRY}" ]]; then 12 | echo "CONTRAIL_REGISTRY is not set" 13 | exit 1 14 | fi 15 | 16 | export CONTRAIL_REGISTRY 17 | 18 | if [[ -z "${CONTRAIL_REPOSITORY}" ]]; then 19 | echo "CONTRAIL_REPOSITORY is not set" 20 | exit 1 21 | fi 22 | 23 | export CONTRAIL_CONTAINER_TAG=${CONTRAIL_CONTAINER_TAG:-"dev"} 24 | CONTRAIL_KEEP_LOG_FILES=${CONTRAIL_KEEP_LOG_FILES:-'false'} 25 | 26 | function append_log() { 27 | local logfile=$1 28 | while read line ; do 29 | if [[ "${CONTRAIL_KEEP_LOG_FILES,,}" == 'true' ]] ; then 30 | echo "$line" >> $logfile 31 | else 32 | echo "$line" | tee -a $logfile 33 | fi 34 | done 35 | } 36 | 37 | function run_cmd(){ 38 | local me=$(whoami) 39 | if [[ "root" == "$me" ]] || ! grep -q "^docker:" /etc/group || groups | grep -q 'docker' ; then 40 | eval "$@" 41 | return 42 | fi 43 | if ! grep -q "^docker:.*:$me" /etc/group ; then 44 | /usr/bin/sudo usermod -aG docker $me 45 | fi 46 | echo "$@" | sg docker -c bash 47 | } 48 | 49 | function build_operator() { 50 | cd ${REPODIR}/tf-operator 51 | 52 | type go >/dev/null 2>&1 || { 53 | export PATH=$PATH:/usr/local/go/bin 54 | } 55 | export CGO_ENABLED=1 56 | 57 | local sdk_ver=$(awk '/github.com\/operator-framework\/operator-sdk/{print($2)}' go.mod | cut -d '.' -f1,2) 58 | 59 | if [[ -e contrib/ziu-2011-to-21.4-hack ]] ; then 60 | echo "INFO: build ziu hack tool to enable 2011 -> 21.4 upgrade" 61 | go build -o build/_output/bin/ ./contrib/ziu-2011-to-21.4-hack/ 62 | fi 63 | 64 | echo "INFO: build tf-operator (operator-sdk version $sdk_ver)" 65 | local target=${CONTAINER_REGISTRY}/tf-operator:${CONTRAIL_CONTAINER_TAG} 66 | local build_opts="" 67 | if [[ "$DISTRO_VER_MAJOR" == '8' ]] ; then 68 | build_opts+=' --image-builder podman --image-build-args "--format docker --network host -v /etc/resolv.conf:/etc/resolv.conf:ro"' 69 | fi 70 | local sdk_cmd="operator-sdk" 71 | [ -z "$sdk_ver" ] || sdk_cmd+="-$sdk_ver" 72 | echo "INFO: build tf-operator cmd: $sdk_cmd build $target $build_opts" 73 | run_cmd $sdk_cmd build $target "$build_opts" 74 | run_cmd docker push $target 75 | 76 | # olm bundle 77 | echo "INFO: build tf-operator bundle for olm" 78 | local build_tag=${CONTAINER_REGISTRY}/tf-operator-bundle:${CONTRAIL_CONTAINER_TAG} 79 | build_opts=" --no-cache --tag $build_tag -f deploy/bundle/bundle.Dockerfile deploy/bundle" 80 | if [[ "$DISTRO_VER_MAJOR" == '8' ]] ; then 81 | build_opts+=' --format docker --network host' 82 | fi 83 | echo "INFO: build tf-operator bundle cmd: docker build $build_opts" 84 | run_cmd docker build $build_opts 85 | run_cmd docker push $build_tag 86 | } 87 | 88 | res=0 89 | 90 | operator_logfile="${WORKSPACE}/tf_operator_build_containers.log" 91 | if [ ! -d ${REPODIR}/tf-operator ] ; then 92 | echo "WARNING: tf-operator is absent. Won't be built" 93 | exit 0 94 | fi 95 | 96 | build_operator 2>&1 | append_log $operator_logfile || res=1 97 | 98 | mkdir -p /output/logs/tf-operator 99 | # do not fail script if logs file is absent 100 | mv $operator_logfile /output/logs/tf-operator || /bin/true 101 | 102 | exit $res 103 | -------------------------------------------------------------------------------- /scripts/package/build-src-containers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | scriptdir=$(realpath $(dirname "$0")) 4 | source "$scriptdir/../../common/common.sh" 5 | source_env 6 | 7 | echo "INFO: Build sources containers" 8 | if [[ -z "${REPODIR}" ]] ; then 9 | echo "ERROR: REPODIR Must be set for build src containers" 10 | exit 1 11 | fi 12 | 13 | buildsh=${REPODIR}/contrail-container-builder/containers/build.sh 14 | if ! [[ -x "${buildsh}" ]] ; then 15 | echo "ERROR: build.sh tool from contrail-container-builder is not available in ${REPODIR} or is not executable" 16 | exit 1 17 | fi 18 | 19 | publish_list_file=${PUBLISH_LIST_FILE:-"${DEV_ENV_ROOT}/src_containers_to_publish"} 20 | if ! [[ -f "${publish_list_file}" ]] ; then 21 | echo "ERROR: targets for build as src containers must be listed at ${publish_list_file}" 22 | exit 1 23 | fi 24 | 25 | dockerfile_template=${DOCKERFILE_TEMPLATE:-"${scriptdir}/Dockerfile.src.tmpl"} 26 | if ! [[ -f "${dockerfile_template}" ]] ; then 27 | echo "ERROR: Dockerfile template ${dockerfile_template} is not available." 28 | exit 1 29 | fi 30 | 31 | function build_container() { 32 | local line=$1 33 | # clean .dockerignore before build to get full git repo inside src container 34 | [ -f ${REPODIR}/${line}/.dockerignore ] && rm -f ${REPODIR}/${line}/.dockerignore 35 | CONTRAIL_CONTAINER_NAME=${line}-src ${buildsh} ${REPODIR}/${line} 36 | rm -f ${REPODIR}/${line}/Dockerfile 37 | } 38 | 39 | jobs="" 40 | echo "INFO: ===== Start Build Containers at $(date) =====" 41 | while IFS= read -r line; do 42 | if ! [[ "$line" =~ ^\#.*$ ]] ; then 43 | if ! [[ "$line" =~ ^[\-0-9a-zA-Z\/_.]+$ ]] ; then 44 | echo "ERROR: Directory name ${line} must contain only latin letters, digits or '.', '-', '_' symbols " 45 | exit 1 46 | fi 47 | 48 | if ! [[ -d "${REPODIR}/${line}" ]] ; then 49 | echo "WARNING: not found directory ${REPODIR}/${line} mentioned in ${publish_list_file}" 50 | continue 51 | fi 52 | 53 | echo "INFO: Pack $line sources to container ${line}-src ${buildsh}" 54 | cp -f ${dockerfile_template} ${REPODIR}/${line}/Dockerfile 55 | build_container ${line} & 56 | jobs+=" $!" 57 | fi 58 | done < ${publish_list_file} 59 | 60 | res=0 61 | for i in $jobs ; do 62 | wait $i || res=1 63 | done 64 | 65 | mkdir -p /output/logs/container-builder-src 66 | # do not fail script if logs files are absent 67 | mv ${REPODIR}/contrail-container-builder/containers/*.log /output/logs/container-builder-src/ || /bin/true 68 | 69 | if [[ $res == 1 ]] ; then 70 | echo "ERROR: There were some errors when source containers builded." 71 | exit 1 72 | fi 73 | 74 | echo "INFO: All source containers has been successfuly built." 75 | -------------------------------------------------------------------------------- /scripts/package/build-test-containers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | scriptdir=$(realpath $(dirname "$0")) 4 | source "$scriptdir/../../common/common.sh" 5 | source_env 6 | 7 | REPODIR=${REPODIR:-"."} 8 | CONTRAIL_TEST_DIR=${CONTRAIL_TEST_DIR:-"${REPODIR}/third_party/contrail-test"} 9 | 10 | if [[ -z "${CONTRAIL_REGISTRY}" ]]; then 11 | echo "CONTRAIL_REGISTRY is not set" && exit 1 12 | fi 13 | 14 | export CONTRAIL_REGISTRY 15 | 16 | if [[ -z "${CONTRAIL_REPOSITORY}" ]]; then 17 | echo "CONTRAIL_REPOSITORY is not set" && exit 1 18 | fi 19 | 20 | export CONTRAIL_CONTAINER_TAG=${CONTRAIL_CONTAINER_TAG:-"dev"} 21 | openstack_version="train" 22 | CONTRAIL_KEEP_LOG_FILES=${CONTRAIL_KEEP_LOG_FILES:-'false'} 23 | 24 | # in R21.4 test containers are joined and base container is absent 25 | # supporting earlier versions 26 | if [[ -f "${CONTRAIL_TEST_DIR}/docker/base/Dockerfile" ]]; then 27 | tpc_repo="/etc/yum.repos.d/tpc.repo" 28 | if [ -f $tpc_repo ]; then 29 | cp $tpc_repo ${CONTRAIL_TEST_DIR}/docker/base/tpc.repo 30 | cp $tpc_repo ${CONTRAIL_TEST_DIR}/docker/test/tpc.repo 31 | fi 32 | fi 33 | 34 | pushd ${CONTRAIL_TEST_DIR} 35 | 36 | # in R21.4 test containers are joined and base container is absent 37 | # supporting earlier versions 38 | if [[ -f "${CONTRAIL_TEST_DIR}/docker/base/Dockerfile" ]]; then 39 | if [[ -n "$CONTRAIL_CONFIG_DIR" && -d "${CONTRAIL_CONFIG_DIR}/etc/yum.repos.d" && -n "$(ls ${CONTRAIL_CONFIG_DIR}/etc/yum.repos.d/)" ]] ; then 40 | # apply same repos for test containers 41 | cp -f ${CONTRAIL_CONFIG_DIR}/etc/yum.repos.d/* docker/base/ 42 | cp -f ${CONTRAIL_CONFIG_DIR}/etc/yum.repos.d/* docker/test/ 43 | fi 44 | 45 | if [ -e $CONTRAIL_CONFIG_DIR/etc/pip.conf ]; then 46 | cp $CONTRAIL_CONFIG_DIR/etc/pip.conf docker/base/ 47 | fi 48 | fi 49 | 50 | function append_log() { 51 | local logfile=$1 52 | local always_echo=${2:-'false'} 53 | while read line ; do 54 | if [[ "${CONTRAIL_KEEP_LOG_FILES,,}" != 'true' || "$always_echo" != 'false' ]] ; then 55 | echo "$line" | tee -a $logfile 56 | else 57 | echo "$line" >> $logfile 58 | fi 59 | done 60 | } 61 | 62 | function build_for_os_version() { 63 | local openstack_version=$1 64 | local logfile="./build-test-${openstack_version}.log" 65 | local openstack_repo_option="" 66 | if [[ ! -z "${OPENSTACK_REPOSITORY}" ]]; then 67 | echo Using openstack repository ${OPENSTACK_REPOSITORY}/openstack-${openstack_version} 68 | openstack_repo_option="--openstack-repo ${OPENSTACK_REPOSITORY}/openstack-${openstack_version}" 69 | fi 70 | 71 | echo "INFO: Start build test container for ${openstack_version}" | append_log $logfile true 72 | ./build-container.sh test \ 73 | --base-tag ${CONTRAIL_CONTAINER_TAG} \ 74 | --tag ${CONTRAIL_CONTAINER_TAG} \ 75 | --registry-server ${CONTRAIL_REGISTRY} \ 76 | --sku ${openstack_version} \ 77 | --contrail-repo ${CONTRAIL_REPOSITORY} \ 78 | ${openstack_repo_option} \ 79 | --post | append_log $logfile 80 | 81 | local res=${PIPESTATUS[0]} 82 | if [ $res -eq 0 ]; then 83 | echo "INFO: Build test container for ${openstack_version} finished successfully" | append_log $logfile true 84 | [[ "${CONTRAIL_KEEP_LOG_FILES,,}" != 'true' ]] && rm -f $logfile 85 | else 86 | echo "ERROR: Faild to build test container for ${openstack_version}" | append_log $logfile true 87 | fi 88 | return $res 89 | } 90 | 91 | res=0 92 | 93 | # in R21.4 test containers are joined and base container is absent 94 | # supporting earlier versions 95 | if [[ -f "${CONTRAIL_TEST_DIR}/docker/base/Dockerfile" ]]; then 96 | logfile="./build-test-base.log" 97 | echo "INFO: Build base test container" | append_log $logfile true 98 | ./build-container.sh base \ 99 | --registry-server ${CONTRAIL_REGISTRY} \ 100 | --tag ${CONTRAIL_CONTAINER_TAG} 2>&1 | append_log $logfile 101 | if [ ${PIPESTATUS[0]} -eq 0 ]; then 102 | echo "INFO: Build base test container finished successfully" | append_log $logfile true 103 | [[ "${CONTRAIL_KEEP_LOG_FILES,,}" != 'true' ]] && rm -f $logfile 104 | else 105 | echo "ERROR: Failed to build base test container" | append_log $logfile true 106 | res=1 107 | fi 108 | fi 109 | 110 | if [[ $res == '0' ]]; then 111 | build_for_os_version $openstack_version || res=1 112 | fi 113 | 114 | popd 115 | 116 | mkdir -p /output/logs/contrail-test 117 | # do not fail script if logs files are absent 118 | mv ${CONTRAIL_TEST_DIR}/*.log /output/logs/contrail-test || /bin/true 119 | 120 | mkdir -p ${REPODIR}/tf-deployment-test/mirrors 121 | if [[ -n "$CONTRAIL_CONFIG_DIR" && -d "${CONTRAIL_CONFIG_DIR}/etc/yum.repos.d" && -n "$(ls ${CONTRAIL_CONFIG_DIR}/etc/yum.repos.d/)" ]] ; then 122 | # apply same repos for test containers 123 | cp -f ${CONTRAIL_CONFIG_DIR}/etc/yum.repos.d/* ${REPODIR}/tf-deployment-test/mirrors/ 124 | fi 125 | 126 | if [ -e $CONTRAIL_CONFIG_DIR/etc/pip.conf ]; then 127 | cp $CONTRAIL_CONFIG_DIR/etc/pip.conf ${REPODIR}/tf-deployment-test/mirrors/ 128 | fi 129 | 130 | deployment_test_logfile="${WORKSPACE}/tf_deployment_test_build_containers.log" 131 | if [[ $res == '0' && -e ${REPODIR}/tf-deployment-test/build-containers.sh ]]; then 132 | ${REPODIR}/tf-deployment-test/build-containers.sh | append_log $deployment_test_logfile true || res=1 133 | fi 134 | 135 | mkdir -p /output/logs/tf-deployment-test 136 | # do not fail script if logs file is absent 137 | mv $deployment_test_logfile /output/logs/tf-deployment-test || /bin/true 138 | 139 | exit $res 140 | -------------------------------------------------------------------------------- /scripts/package/list-containers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | workdir=$1 4 | prefix=$2 5 | 6 | scriptdir=$(realpath $(dirname "$0")) 7 | source "$scriptdir/../../common/common.sh" 8 | source_env 9 | 10 | set -o pipefail 11 | 12 | ${workdir}/containers/build.sh list | grep -v INFO | sed -e 's,/,_,g' -e "s/^/${prefix}-/" 13 | -------------------------------------------------------------------------------- /scripts/package/prepare-containers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ex 2 | 3 | scriptdir=$(realpath $(dirname "$0")) 4 | source "$scriptdir/../../common/common.sh" 5 | source_env 6 | 7 | REPODIR=${REPODIR:-"."} 8 | CONTAINER_BUILDER_DIR=${CONTAINER_BUILDER_DIR:-"${REPODIR}/contrail-container-builder"} 9 | COPY_REPO_GLOB=${COPY_REPO_GLOB:-"$CONTRAIL_CONFIG_DIR/etc/yum.repos.d/*.repo"} 10 | 11 | for file in $COPY_REPO_GLOB /etc/yum.repos.d/tpc.repo; do 12 | if [ -e $file ]; then 13 | cp $file ${CONTAINER_BUILDER_DIR}/$(basename $file).template 14 | fi 15 | done 16 | if [ -e $CONTRAIL_CONFIG_DIR/etc/apt/sources.list ]; then 17 | cp $CONTRAIL_CONFIG_DIR/etc/apt/sources.list ${CONTAINER_BUILDER_DIR}/ 18 | fi 19 | if [ -e common.env ]; then 20 | cp common.env ${CONTAINER_BUILDER_DIR} 21 | fi 22 | 23 | if [ -e $CONTRAIL_CONFIG_DIR/etc/pip.conf ]; then 24 | cp $CONTRAIL_CONFIG_DIR/etc/pip.conf ${CONTAINER_BUILDER_DIR}/containers/general-base/ 25 | fi 26 | -------------------------------------------------------------------------------- /scripts/patch-repo-manifest.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import argparse 4 | import json 5 | import logging 6 | import os 7 | import sys 8 | import traceback 9 | from xml.etree import ElementTree 10 | 11 | 12 | def dbg(msg): 13 | logging.debug(msg) 14 | 15 | 16 | def err(msg): 17 | logging.error(msg) 18 | 19 | 20 | class Manifest(object): 21 | def __init__(self, file, remote): 22 | self.remote = remote 23 | if file: 24 | with open(file, 'r') as f: 25 | self._root = ElementTree.parse(f).getroot() 26 | else: 27 | self._root = ElementTree.fromstring('') 28 | 29 | def add_remote(self, org): 30 | remote_name = 'gerritreview-' + org 31 | xpath = './/remote[@name=\'%s\']' % remote_name 32 | if not self._root.findall(xpath): 33 | remote = ElementTree.Element('remote', {'fetch': os.path.join(self.remote, org), 'name': remote_name}) 34 | self._root.insert(0, remote) 35 | return remote_name 36 | 37 | def set_branch_default(self, branch): 38 | defaults = self._root.findall('.//default') 39 | if defaults: 40 | for default in defaults: 41 | rev = default.get('revision').split('/')[:-1] 42 | rev.append(branch) 43 | b = branch if not rev else "/".join(rev) 44 | default.set('revision', b) 45 | 46 | def _apply_patch(self, patch): 47 | branch = patch.get('branch', None) 48 | org_project = patch['project'] 49 | org = org_project.split('/')[0] 50 | project = org_project.split('/')[1] 51 | remote = self.add_remote(org) 52 | xpath = './/project[@name=\'%s\']' % project 53 | for p in self._root.findall(xpath): 54 | p.set('remote', remote) 55 | if branch: 56 | p.set('revision', branch) 57 | 58 | 59 | def apply_patches(self, patchsets): 60 | for p in patchsets: 61 | self._apply_patch(p) 62 | 63 | def dump(self, file): 64 | if not file: 65 | ElementTree.dump(self._root) 66 | return 67 | with open(file, "w") as f: 68 | f.write(ElementTree.tostring(self._root, encoding='utf-8').decode('utf-8')) 69 | 70 | 71 | def load_patchsets(file): 72 | with open(file, 'r') as f: 73 | return json.load(f) 74 | 75 | 76 | def main(): 77 | parser = argparse.ArgumentParser( 78 | description="TF tool for Gerrit patchset dependencies resolving") 79 | parser.add_argument("--debug", dest="debug", action="store_true") 80 | parser.add_argument("--source", help="Source file with manifest", dest="source", type=str) 81 | parser.add_argument("--remote", help="Remote to set in manifest", dest="remote", type=str) 82 | parser.add_argument("--branch", help="Branch", dest="branch", type=str, default=None) 83 | parser.add_argument("--patchsets", help="File with patchsets", dest="patchsets", type=str, default=None) 84 | parser.add_argument("--output", 85 | help="Save result into the file instead stdout", 86 | default=None, dest="output", type=str) 87 | args = parser.parse_args() 88 | log_level = logging.DEBUG if args.debug else logging.INFO 89 | logging.basicConfig(level=log_level) 90 | if not args.remote and args.patchsets: 91 | err("ERROR: Please specify remote cause patchsets is present") 92 | sys.exit(1) 93 | try: 94 | manifest = Manifest(args.source, args.remote) 95 | if args.branch: 96 | manifest.set_branch_default(args.branch) 97 | if args.patchsets: 98 | manifest.apply_patches(load_patchsets(args.patchsets)) 99 | 100 | manifest.dump(args.output) 101 | except Exception as e: 102 | print(traceback.format_exc()) 103 | err("ERROR: failed patch manifest: %s" % e) 104 | sys.exit(1) 105 | 106 | 107 | if __name__ == "__main__": 108 | main() 109 | -------------------------------------------------------------------------------- /scripts/run-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | scriptdir=$(realpath $(dirname "$0")) 4 | source "$scriptdir/../common/common.sh" 5 | source_env 6 | 7 | TARGET=${1:-} 8 | export JOBS=${JOBS:-$(nproc)} 9 | 10 | scriptdir=$(realpath $(dirname "$0")) 11 | 12 | if [[ -n "$CONTRAIL_CONFIG_DIR" && -d "$CONTRAIL_CONFIG_DIR" && -n "$(ls ${CONTRAIL_CONFIG_DIR}/)" ]]; then 13 | cp -rf ${CONTRAIL_CONFIG_DIR}/* / 14 | fi 15 | 16 | if [[ "$TARGET" == 'ui' ]]; then 17 | echo "INFO: Running web ui tests" 18 | $scriptdir/webui_ut/run-tests.sh 19 | elif [[ "$TARGET" == 'tox' ]]; then 20 | $scriptdir/tox/run-tests.sh 21 | elif [[ "$TARGET" == 'go' ]]; then 22 | $scriptdir/go/run-tests.sh 23 | else 24 | echo "INFO: Running controller tests" 25 | $scriptdir/controller_ut/run-tests.sh $TARGET 26 | fi 27 | -------------------------------------------------------------------------------- /scripts/setup-httpd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | RPM_REPO_PORT='6667' 4 | 5 | mkdir -p $HOME/contrail/RPMS 6 | mkdir -p /run/httpd # For some reason it's not created automatically 7 | 8 | sed -i "s/Listen 80/Listen $RPM_REPO_PORT/" /etc/httpd/conf/httpd.conf 9 | sed -i "s/\/var\/www\/html\"/\/var\/www\/html\/repo\"/" /etc/httpd/conf/httpd.conf 10 | rm -f /var/www/html/repo 11 | ln -s $HOME/contrail/RPMS /var/www/html/repo 12 | 13 | # The following is a workaround for when tf-dev-env is run as root (which shouldn't usually happen) 14 | chmod 755 -R /var/www/html/repo 15 | chmod 755 /root 16 | 17 | if ! pidof httpd ; then 18 | echo "INFO: start httpd" 19 | /usr/sbin/httpd 20 | else 21 | echo "INFO: httpd is already started" 22 | fi 23 | -------------------------------------------------------------------------------- /scripts/sync-sources.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | scriptdir=$(realpath $(dirname "$0")) 4 | source "$scriptdir/../common/common.sh" 5 | source_env 6 | 7 | [ -n "$DEBUG" ] && set -x 8 | set -o pipefail 9 | 10 | echo 11 | echo '[setup contrail git sources]' 12 | 13 | if [ -z "${REPODIR}" ] ; then 14 | echo "ERROR: env variable REPODIR is required"\ 15 | exit 1 16 | fi 17 | 18 | cd $REPODIR 19 | echo "INFO: current folder is $(pwd)" 20 | 21 | repo_init_defauilts='--repo-branch=repo-1' 22 | repo_sync_defauilts='--no-tags --no-clone-bundle -q ' 23 | [ -n "$DEBUG" ] && repo_init_defauilts+=' -q' && repo_sync_defauilts+=' -q' 24 | 25 | REPO_INIT_MANIFEST_URL=${REPO_INIT_MANIFEST_URL:-"https://github.com/tungstenfabric/tf-vnc"} 26 | VNC_ORGANIZATION=${VNC_ORGANIZATION:-"tungstenfabric"} 27 | VNC_REPO="tf-vnc" 28 | if [[ -n "$CONTRAIL_BRANCH" ]] ; then 29 | echo "INFO: CONTRAIL_BRANCH is not empty - $CONTRAIL_BRANCH" 30 | # check branch in tf-vnc, then in contrail-vnc and then fallback to master branch in tf-vnc 31 | if [[ $(curl -s https://api.github.com/repos/tungstenfabric/tf-vnc/branches/${CONTRAIL_BRANCH} | jq -r '.name') != "${CONTRAIL_BRANCH}" ]]; then 32 | if [[ $(curl -s https://api.github.com/repos/Juniper/contrail-vnc/branches/${CONTRAIL_BRANCH} | jq -r '.name') == "${CONTRAIL_BRANCH}" ]]; then 33 | echo "INFO: using Juniper/contrail-vnc" 34 | REPO_INIT_MANIFEST_URL="https://github.com/Juniper/contrail-vnc" 35 | VNC_ORGANIZATION="Juniper" 36 | VNC_REPO="contrail-vnc" 37 | else 38 | # reset branch to master if no such branch in both vnc: openshift-ansible, 39 | # contrail-tripleo-puppet, contrail-trieplo-heat-templates do not 40 | # depend on contrail branch and they are openstack depended. 41 | echo "INFO: There is no $CONTRAIL_BRANCH branch in tf-vnc or in contrail-vnc, use master for tf-vnc" 42 | echo "INFO: tungstenfabric/tf-vnc answer" 43 | curl -s https://api.github.com/repos/tungstenfabric/tf-vnc/branches/${CONTRAIL_BRANCH} 44 | echo "INFO: Juniper/contrail-vnc asnwer" 45 | curl -s https://api.github.com/repos/Juniper/contrail-vnc/branches/${CONTRAIL_BRANCH} 46 | CONTRAIL_BRANCH="master" 47 | GERRIT_BRANCH="" 48 | fi 49 | else 50 | echo "INFO: using ${REPO_INIT_MANIFEST_URL}" 51 | fi 52 | fi 53 | 54 | REPO_INIT_MANIFEST_BRANCH=${REPO_INIT_MANIFEST_BRANCH:-${CONTRAIL_BRANCH}} 55 | REPO_INIT_OPTS=${REPO_INIT_OPTS:-${repo_init_defauilts}} 56 | REPO_SYNC_OPTS=${REPO_SYNC_OPTS:-${repo_sync_defauilts}} 57 | REPO_TOOL=${REPO_TOOL:-"./repo"} 58 | 59 | if [[ ! -e $REPO_TOOL ]] ; then 60 | echo "INFO: Download repo tool" 61 | curl -s -o $REPO_TOOL https://storage.googleapis.com/git-repo-downloads/repo-1 || exit 1 62 | chmod a+x $REPO_TOOL 63 | fi 64 | 65 | echo "INFO: Init contrail sources git repos" 66 | # check if git is setup for current user, 67 | # use a default for repo sync if not 68 | git config --get user.name >/dev/null 2>&1 || git config --global user.name "tf-dev-env" 69 | git config --get user.email >/dev/null 2>&1 || git config --global user.email "tf-dev-env@tf" 70 | 71 | git config --global http.postBuffer 524288000 72 | 73 | # temporary hack for expired SSL certs at review.opencontrail.org 74 | # git config --global http.sslVerify false 75 | 76 | REPO_INIT_OPTS+=" -u $REPO_INIT_MANIFEST_URL -b $REPO_INIT_MANIFEST_BRANCH" 77 | echo "INFO: cmd: $REPO_TOOL init $REPO_INIT_OPTS" 78 | # disable pipefail because 'yes' fails if repo init doesnt read at least once 79 | set +o pipefail 80 | yes | $REPO_TOOL init $REPO_INIT_OPTS 81 | if [[ $? != 0 ]] ; then 82 | echo "ERROR: repo init failed" 83 | exit 1 84 | fi 85 | set -o pipefail 86 | 87 | branch_opts="" 88 | if [[ -n "$GERRIT_BRANCH" ]] ; then 89 | branch_opts+="--branch $GERRIT_BRANCH" 90 | fi 91 | 92 | # file for patchset info if any 93 | patchsets_info_file=/input/patchsets-info.json 94 | 95 | # resolve changes if any 96 | if [ ! -e "$patchsets_info_file" ] ; then 97 | echo "INFO: There is no file $patchsets_info_file - skipping cherry-picking." 98 | else 99 | echo "INFO: gerrit URL = ${GERRIT_URL}" 100 | cat $patchsets_info_file | jq '.' 101 | vnc_changes=$(cat $patchsets_info_file | jq -r ".[] | select(.project == \"${VNC_ORGANIZATION}/${VNC_REPO}\") | .project + \" \" + .ref + \" \" + .branch") 102 | if [[ -n "$vnc_changes" ]] ; then 103 | # clone from GERRIT_URL cause this is taken from patchsets 104 | vnc_branch=$(echo "$vnc_changes" | head -n 1 | awk '{print($3)}') 105 | rm -rf ${VNC_REPO} 106 | cmd="git clone --depth=1 --single-branch -b $vnc_branch ${GERRIT_URL}${VNC_ORGANIZATION}/${VNC_REPO} ${VNC_REPO}" 107 | echo "INFO: $cmd" 108 | eval "$cmd" || { 109 | echo "ERROR: failed to $cmd" 110 | exit 1 111 | } 112 | pushd ${VNC_REPO} 113 | echo "$vnc_changes" | while read project ref branch; do 114 | cmd="git fetch ${GERRIT_URL}${VNC_ORGANIZATION}/${VNC_REPO} $ref && git cherry-pick FETCH_HEAD " 115 | echo "INFO: apply patch: $cmd" 116 | eval "$cmd" || { 117 | echo "ERROR: failed to $cmd" 118 | exit 1 119 | } 120 | done 121 | popd 122 | echo "INFO: replace manifest from review" 123 | cp -f ${VNC_REPO}/default.xml .repo/manifest.xml 124 | fi 125 | 126 | echo "INFO: patching manifest.xml for repo tool" 127 | ${scriptdir}/patch-repo-manifest.py \ 128 | --remote "$GERRIT_URL" \ 129 | $branch_opts \ 130 | --source ./.repo/manifest.xml \ 131 | --patchsets $patchsets_info_file \ 132 | --output ./.repo/manifest.xml || exit 1 133 | echo "INFO: patched manifest.xml" 134 | cat ./.repo/manifest.xml 135 | echo 136 | fi 137 | 138 | echo "INFO: Sync contrail sources git repos" 139 | threads=$(( $(nproc) * 8 )) 140 | if (( threads > 16 )) ; then 141 | threads=16 142 | fi 143 | if [ -n "$($REPO_TOOL --trace forall -c 'echo $REPO_PROJECT')" ] ; then 144 | REPO_SYNC_OPTS="-n ${REPO_SYNC_OPTS}" 145 | fi 146 | echo "INFO: cmd: $REPO_TOOL sync $REPO_SYNC_OPTS -j $threads" 147 | $REPO_TOOL --trace sync $REPO_SYNC_OPTS -j $threads 148 | if [[ $? != 0 ]] ; then 149 | echo "ERROR: repo sync failed" 150 | exit 1 151 | fi 152 | # switch to branches 153 | while read repo_project ; do 154 | while read repo_path && read commit && read revision ; do 155 | pushd $repo_path 156 | remote=$(git log -1 --pretty=%d HEAD | tr -d '(,)' | awk '{print($3)}') 157 | [ -n "$remote" ] || { 158 | echo "ERROR: failed to get remote for tracking branch $revision for $repo_path : $repo_project" 159 | exit 1 160 | } 161 | [[ 'refs/heads/master' == $revision ]] && revision='master' 162 | echo "INFO: set tracking branch $revision to $remote for $repo_path : $repo_project" 163 | git checkout --track -b $revision $remote || git checkout $revision || { 164 | echo "ERROR: failed switch to branch $revision with remote $remote for $repo_path : $repo_project" 165 | exit 1 166 | } 167 | git log -3 --oneline 168 | echo '' 169 | popd 170 | done < <($REPO_TOOL info -l $repo_project | awk '/Mount path:|Current revision:|Manifest revision:/ {print($3)}') 171 | done < <($REPO_TOOL list --name-only | sort -u) 172 | 173 | if [[ $? != 0 ]] ; then 174 | echo "ERROR: $REPO_TOOL start failed" 175 | exit 1 176 | fi 177 | 178 | if [ -e "$patchsets_info_file" ] ; then 179 | # apply patches 180 | echo "INFO: review dependencies" 181 | cat $patchsets_info_file | jq -r '.[] | select(.project != "${VNC_ORGANIZATION}/${VNC_REPO}") | .project + " " + .ref' | while read project ref; do 182 | short_name=$(echo $project | cut -d '/' -f 2) 183 | repo_projects=$($REPO_TOOL list -r "^${short_name}$" | tr -d ':' ) 184 | # use manual filter as repo forall -regex checks both path and project 185 | while read -r repo_path repo_project ; do 186 | echo "INFO: process repo_path=$repo_path , repo_project=$repo_project" 187 | if [[ "$short_name" != "$repo_project" ]] ; then 188 | echo "INFO: doesnt match to $short_name .. skipped" 189 | continue 190 | fi 191 | echo "INFO: apply change $ref for $project" 192 | echo "INFO: cmd: git fetch $GERRIT_URL/$project $ref && git cherry-pick FETCH_HEAD" 193 | pushd $repo_path 194 | if ! git fetch $GERRIT_URL/$project $ref ; then 195 | echo "ERROR: failed to fetch changes for $project" 196 | exit 1 197 | fi 198 | fetch_head_sha=$(git log -1 --oneline --no-abbrev-commit FETCH_HEAD | awk '{print $1}') 199 | if ! git log --oneline --no-abbrev-commit | grep $fetch_head_sha ; then 200 | if ! git cherry-pick FETCH_HEAD ; then 201 | echo "ERROR: failed to cherry-pick changes for $project" 202 | exit 1 203 | fi 204 | fi 205 | popd 206 | done <<< "$repo_projects" 207 | done 208 | [[ $? != 0 ]] && exit 1 209 | fi 210 | 211 | # build one more src container - with manifest.xml to save git SHA for repos in the build 212 | #TODO: think about repos with non-master branches: tf-kolla-ansible, tf-tripleo-heat-templates 213 | mkdir -p ${REPODIR}/tf-build-manifest 214 | $REPO_TOOL manifest -r -o ${REPODIR}/tf-build-manifest/manifest.xml 215 | 216 | echo "INFO: gathering UT targets" 217 | if [ -e "$patchsets_info_file" ] ; then 218 | # this script uses ci_unittests.json from controller to eval required UT targets from changes 219 | ${scriptdir}/gather-unittest-targets.py < $patchsets_info_file | sort | uniq > /output/unittest_targets.lst || exit 1 220 | else 221 | # take default 222 | # TODO: take misc_targets into accout 223 | cat ${REPODIR}/controller/ci_unittests.json | jq -r ".[].scons_test_targets[]" | sort | uniq > /output/unittest_targets.lst 224 | fi 225 | cat /output/unittest_targets.lst 226 | echo 227 | 228 | echo "INFO: replace symlinks inside .git folder to real files to be able to use them at deployment stage" 229 | # replace symlinks with target files for all .git files 230 | for item in $(find ${REPODIR}/ -type l -print | grep "/.git/" | grep -v "/.repo/") ; do 231 | idir=$(dirname $item) 232 | target=$(realpath $item) 233 | rm -f "$item" 234 | cp -arfL $target $idir/ 235 | done 236 | -------------------------------------------------------------------------------- /scripts/tox/run-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TARGET_TIMEOUT=${TARGET_TIMEOUT:-"120m"} 4 | 5 | scriptdir=$(realpath $(dirname "$0")) 6 | 7 | if [ -z "$GERRIT_PROJECT" ]; then 8 | echo "ERROR: GERRIT_PROJECT must be set for tox tests" 9 | exit 1 10 | fi 11 | 12 | if [ ! -e /input/target_set ]; then 13 | echo "INFO: /input/target_set is absent - run all tox targets" 14 | target_set="ALL" 15 | else 16 | target_set=$(cat /input/target_set) 17 | fi 18 | 19 | echo "INFO: Running tox tests for project: $GERRIT_PROJECT, tox target: $target_set" 20 | 21 | cd $HOME/contrail 22 | 23 | project=$(echo $GERRIT_PROJECT | cut -d '/' -f 2) 24 | echo "INFO: short project name: $project" 25 | path=$(./repo list -f -r $project | awk '{print $1}' | head -1) 26 | echo "INFO: project path: $path" 27 | 28 | res=0 29 | pushd $path 30 | if [ ! -e tox.ini ]; then 31 | echo "WARNING: tox.ini is absent. Skipping tests." 32 | exit 0 33 | fi 34 | 35 | tox -e $target_set || res=1 36 | popd 37 | 38 | logs_path="/output/logs" 39 | mkdir -p "$logs_path" 40 | # gather log files 41 | cp -R $path/.tox/$target_set/log/ $logs_path/ || /bin/true 42 | 43 | # gzip .log files - they consume several Gb unpacked 44 | pushd $logs_path 45 | time find $(pwd) -name '*.log' | xargs gzip 46 | popd 47 | 48 | if [[ "$res" != '0' ]]; then 49 | echo "ERROR: some UT failed" 50 | fi 51 | exit $res 52 | -------------------------------------------------------------------------------- /scripts/webui_ut/run-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ex 2 | 3 | scriptdir=$(realpath $(dirname "$0")) 4 | 5 | src_root=$HOME/contrail 6 | cd $src_root 7 | logs_path="/output/logs" 8 | test_reports_dir="$logs_path/test-reports" 9 | coverage_reports_dir="$logs_path/coverage-reports" 10 | mkdir -p "$logs_path" "$test_reports_dir" "$coverage_reports_dir" 11 | 12 | function pre_test_setup() { 13 | #Update the featurePkg path in contrail-web-core/config/config.global.js with Controller, Storage and Server Manager features 14 | cd $src_root/contrail-web-core 15 | 16 | # Controller 17 | cat config/config.global.js | sed -e "s%/usr/src/contrail/contrail-web-controller%$src_root/contrail-web-controller%" > $src_root/contrail-web-core/config/config.global.js.tmp 18 | cp $src_root/contrail-web-core/config/config.global.js.tmp $src_root/contrail-web-core/config/config.global.js 19 | rm $src_root/contrail-web-core/config/config.global.js.tmp 20 | touch config/config.global.js 21 | 22 | #fetch dependent packages 23 | make fetch-pkgs-dev 24 | } 25 | 26 | function build_unittest() { 27 | #Setup the Prod Environment 28 | make prod-env REPO=webController 29 | #Setup the Test Environment 30 | make test-env REPO=webController 31 | 32 | # Run Controller related Unit Testcase 33 | cd $src_root/contrail-web-controller 34 | ./webroot/test/ui/run_tests.sh 2>&1 | tee $logs_path/web_controller_unittests.log 35 | } 36 | 37 | function copy_reports(){ 38 | cd $src_root 39 | report_dir=webroot/test/ui/reports 40 | 41 | echo "info: gathering XML test reports..." 42 | cp -p contrail-web*/$report_dir/tests/*-test-results.xml $test_reports_dir || true 43 | 44 | echo "info: gathering XML coverage reports..." 45 | cp -p $HOME/contrail/contrail-web-controller/$report_dir/coverage/*/*/cobertura-coverage.xml $coverage_reports_dir/controller-cobertura-coverage.xml || true 46 | } 47 | 48 | #This installs node, npm and does a fetch_packages, make prod env, test setup 49 | pre_test_setup 50 | 51 | # run unit test case 52 | build_unittest 53 | 54 | # copy the generated reports to specific directory 55 | copy_reports 56 | 57 | res=$? 58 | if [[ "$res" != '0' ]]; then 59 | echo "ERROR: some UT failed" 60 | fi 61 | echo "INFO: Unit test log is available at contrail/output/logs/web_controller_unittests.log" 62 | echo "INFO: Test report is available at contrail/output/logs/test-reports/web-controller-test-results.xml" 63 | echo "INFO: Coverage report is available at contrail/output/logs/coverage-reports/controller-cobertura-coverage.xml" 64 | exit $res 65 | -------------------------------------------------------------------------------- /skip_tests: -------------------------------------------------------------------------------- 1 | test.test_analytics_uve.AnalyticsUveTest.test_06_alarmgen_basic 2 | schema_transformer.tests.test_bgpvpn.TestBgpvpnWithVirtualNetwork.test_route_target_removed_when_resource_deleted 3 | test.test_analytics_uve.AnalyticsUveTest.test_12_uve_get_alarm 4 | test.test_analytics_sys.AnalyticsTest.test_03_flow_query 5 | neutron_plugin_contrail.tests.unit.opencontrail.test_contrail_plugin.TestContrailL3NatTestCase.test_router_set_gateway_cidr_overlapped_with_subnets 6 | -------------------------------------------------------------------------------- /src_containers_to_publish: -------------------------------------------------------------------------------- 1 | # In this file there is a list of directories which will be packed into containers 2 | # when you use make containers, make container- 3 | # one line = one source directory 4 | tf-build-manifest 5 | tf-charms 6 | tf-ansible-deployer 7 | tf-kolla-ansible 8 | tf-container-builder 9 | tf-tripleo-heat-templates 10 | tf-helm-deployer 11 | tf-operator 12 | -------------------------------------------------------------------------------- /startup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | my_file="$(readlink -e "$0")" 4 | my_dir="$(dirname $my_file)" 5 | 6 | source $my_dir/run.sh 7 | --------------------------------------------------------------------------------