├── .buildkite └── pipeline.yaml ├── .github └── CODEOWNERS ├── .gitignore ├── .gitreview ├── Dockerfile ├── LICENSE ├── README.md ├── assets ├── entrypoint.sh ├── hook.d │ ├── B05check-yhist │ ├── C05shell │ └── D05update-sources ├── pbuilder_create.sh ├── pbuilderrc └── profile ├── docker_build.sh ├── man └── opx_rel_pkgasm.py.1 ├── requirements.txt └── scripts ├── idx-pkgs ├── opx_bld_basics.py ├── opx_build ├── opx_build_mlnx ├── opx_get_packages.py ├── opx_rel_pkgasm.py ├── opx_rootfs.py ├── opx_run └── templates ├── do_apt_upgrade_sh ├── do_dpkg_sh ├── do_insrtpkgs_sh └── install_opx_sh /.buildkite/pipeline.yaml: -------------------------------------------------------------------------------- 1 | steps: 2 | - label: "no-op" 3 | command: "true" 4 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @open-switch/operations 2 | 3 | *.yang @open-switch/yang 4 | *.yhist @open-switch/yang 5 | debian/ @open-switch/operations 6 | *.md @open-switch/documentation 7 | .github/ @open-switch/administration 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *.cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_settings.py 55 | 56 | # Flask stuff: 57 | instance/ 58 | .webassets-cache 59 | 60 | # Scrapy stuff: 61 | .scrapy 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | # Jupyter Notebook 70 | .ipynb_checkpoints 71 | 72 | # pyenv 73 | .python-version 74 | 75 | # celery beat schedule file 76 | celerybeat-schedule 77 | 78 | # SageMath parsed files 79 | *.sage.py 80 | 81 | # Environments 82 | .env 83 | .venv 84 | env/ 85 | venv/ 86 | ENV/ 87 | 88 | # Spyder project settings 89 | .spyderproject 90 | .spyproject 91 | 92 | # Rope project settings 93 | .ropeproject 94 | 95 | # mkdocs documentation 96 | /site 97 | 98 | # mypy 99 | .mypy_cache/ 100 | 101 | .cid 102 | 103 | .DS_Store 104 | -------------------------------------------------------------------------------- /.gitreview: -------------------------------------------------------------------------------- 1 | [gerrit] 2 | host=review.openswitch.net 3 | port=29418 4 | project=opx/opx-build 5 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | LABEL maintainer="ops-dev@lists.openswitch.net" 3 | 4 | ENV DIST stretch 5 | 6 | RUN apt-get -qq update && apt-get -qq upgrade -y \ 7 | && apt-get -qq install -y --no-install-recommends \ 8 | build-essential \ 9 | cowbuilder \ 10 | curl \ 11 | debian-archive-keyring \ 12 | debootstrap \ 13 | dh-autoreconf \ 14 | dh-systemd \ 15 | fakechroot \ 16 | fakeroot \ 17 | git-buildpackage \ 18 | gosu \ 19 | lsb-release \ 20 | python-apt \ 21 | python-jinja2 \ 22 | python-lxml \ 23 | python-pip \ 24 | python-requests \ 25 | sudo \ 26 | vim \ 27 | wget \ 28 | && apt-get -qq autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* 29 | 30 | # Pyang not available as Debian package 31 | RUN pip2 install pyang requests-file \ 32 | && ln -s /usr/local/bin/pyang /usr/bin 33 | 34 | # Get OPX and other Debian GPG keys 35 | RUN gpg --batch --import /usr/share/keyrings/debian-archive-keyring.gpg \ 36 | && gpg --batch --keyserver hkp://keyserver.ubuntu.com:80 --recv-key AD5073F1 \ 37 | && gpg --batch --export AD5073F1 >/etc/apt/trusted.gpg.d/opx-archive-keyring.gpg 38 | 39 | # Add OPX package sources 40 | RUN mkdir -p /etc/apt/sources.list.d/ \ 41 | && echo "deb http://deb.openswitch.net/$DIST unstable main opx opx-non-free" >>/etc/apt/sources.list.d/opx.list \ 42 | && echo "deb-src http://deb.openswitch.net/$DIST unstable opx" >>/etc/apt/sources.list.d/opx.list \ 43 | && apt-get -qq update 44 | 45 | # Set up for the user we will create at runtime 46 | RUN mkdir -p /home/opx && chmod -R 777 /home/opx \ 47 | && echo 'opx ALL=(ALL) NOPASSWD:ALL' >>/etc/sudoers \ 48 | && echo '%opx ALL=(ALL) NOPASSWD:ALL' >>/etc/sudoers \ 49 | && echo 'Defaults env_keep += "OPX_RELEASE DIST ARCH"' >>/etc/sudoers 50 | 51 | COPY assets/profile /etc/profile.d/opx.sh 52 | COPY assets/entrypoint.sh / 53 | COPY assets/hook.d /var/cache/pbuilder/hook.d 54 | COPY assets/pbuilder_create.sh / 55 | COPY assets/pbuilderrc /etc/pbuilderrc 56 | COPY scripts /opt/opx-build/scripts 57 | 58 | VOLUME /mnt 59 | WORKDIR /mnt 60 | 61 | # vim: syn=dockerfile 62 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2018 Dell EMC Inc. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | not use this file except in compliance with the License. You may obtain 5 | a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR 8 | CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT 9 | LIMITATION ANY IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS 10 | FOR A PARTICULAR PURPOSE, MERCHANTABLITY OR NON-INFRINGEMENT. 11 | 12 | See the Apache Version 2.0 License for specific language governing 13 | permissions and limitations under the License. 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OPX Build 2 | 3 | *Build environment and scripts for OpenSwitch.* 4 | 5 | If you would like to download binaries instead, see 6 | [Install OPX on Dell EMC ON Series platforms][install-docs]. 7 | 8 | ## Quick start 9 | 10 | ```bash 11 | # get source code 12 | repo init -u https://github.com/open-switch/opx-manifest && repo sync 13 | 14 | # build all open-source packages 15 | opx-build/scripts/opx_run opx_build all 16 | 17 | # assemble installer 18 | opx-build/scripts/opx_run opx_rel_pkgasm.py --dist unstable \ 19 | -b opx-onie-installer/release_bp/OPX_dell_base_.xml 20 | 21 | Ex: 22 | opx-build/scripts/opx_run opx_rel_pkgasm.py --dist unstable \ 23 | -b opx-onie-installer/release_bp/OPX_dell_base_stretch.xml 24 | ``` 25 | 26 | ## Getting started with OpenSwitch 27 | 28 | ### Prerequisites 29 | 30 | - 20GB free disk space 31 | - [Docker](https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/) 32 | - [Repo](https://source.android.com/source/downloading) 33 | 34 | ### Get source code 35 | 36 | ```bash 37 | repo init -u https://github.com/open-switch/opx-manifest && repo sync 38 | ``` 39 | 40 | The `repo` commands download all of the source files that are necessary to 41 | build OpenSwitch. Binary libraries for the SAI are also required. 42 | These binary libraries are currently *not* open source, as they are based on 43 | the Broadcom SDK. 44 | 45 | ### Build packages 46 | 47 | Build dependencies are pulled from the `unstable` distribution. To 48 | change this, use `$OPX_RELEASE`. 49 | 50 | ```bash 51 | # Build all repositories 52 | opx-build/scripts/opx_run opx_build all 53 | 54 | # Build a single repository 55 | opx-build/scripts/opx_run opx_build opx-logging 56 | 57 | # Build against the 2.2.1 release 58 | OPX_RELEASE=2.2.1 opx-build/scripts/opx_run opx_build all 59 | ``` 60 | **Note:** Ensure the Docker image is up to date by running docker pull opxhub/build. 61 | 62 | ## Manual build of single repository 63 | 64 | It can be helpful to build a single repository with finer control. 65 | 66 | It is always possible to enter the container via `opx_run` and manually install 67 | dependencies using `apt`, `dpkg`, `pip`, and so on. Then build as per usual. 68 | 69 | ```bash 70 | fakeroot debian/rules binary 71 | ``` 72 | 73 | This allows you to see all files created during the build that 74 | would normally be cleaned up after an `opx_build` build terminates. 75 | 76 | It is possible to build unstripped executables by adding the following line to 77 | the end of the file `debian/rules`: 78 | 79 | ``` 80 | override_dh_strip: 81 | ``` 82 | 83 | ## Working with Jessie 84 | 85 | Our image is based on Debian Stretch. Building packages for Jessie using `opx_build` will continue to work with `DIST=jessie`. To run a container based on Jessie, add `VERSION=jessie` to the environment. 86 | 87 | ```bash 88 | VERSION=jessie DIST=jessie opx-build/scripts/opx_run 89 | ``` 90 | 91 | ## Installation 92 | 93 | Creating an installer requires the 94 | [opx-onie-installer](http://git.openswitch.net/cgit/opx/opx-onie-installer/) 95 | repository. This repository is included if you cloned with `repo` and contains 96 | the blueprints used to assemble an installer. 97 | 98 | Any local packages you have built will be included in the installer. To exclude 99 | them, remove the `deb` files from the repo root. 100 | 101 | The `unstable` distribution is used to grab missing packages on 102 | installer creation and fetch updates when running. To use a different 103 | distribution, use the `--dist` flag. 104 | 105 | Run `opx-build/scripts/opx_run opx_rel_pkgasm.py --help` to see the available 106 | distributions. 107 | 108 | ```bash 109 | opx-build/scripts/opx_run opx_rel_pkgasm.py --dist stable \ 110 | -b opx-onie-installer/release_bp/OPX_dell_base_stretch.xml 111 | ``` 112 | To build a Debian Jessie-based installer change stretch to jessie. 113 | ```bash 114 | opx-build/scripts/opx_run opx_rel_pkgasm.py --dist stable \ 115 | -b opx-onie-installer/release_bp/OPX_dell_base_jessie.xml 116 | ``` 117 | 118 | ## Creating the opx-build Docker image 119 | 120 | ```bash 121 | ./docker_build.sh 122 | ``` 123 | 124 | The default Docker image builds against the unstable OPX distribution. When 125 | other distributions are requested, pbuilder chroots are created on the fly. 126 | These chroots are lost when the container is removed, but only take 7.5sec to 127 | create. 128 | 129 | ## Docker Image Architecture 130 | 131 | Since `git-buildpackage` with `cowbuilder` is used to build our packages, a 132 | pbuilder chroot is created in the image. Due to an issue with 133 | docker/kernel/overlayfs/pbuilder, the pbuilder chroot is created by running a 134 | privileged base container and committing it. To keep the image size small, only 135 | one chroot is created. This chroot contains sources from the unstable OPX 136 | release. To support building against multiple OPX releases, this chroot is 137 | copied and modified as needed with new package sources at runtime (when the 138 | `OPX_RELEASE` variable is used). When publishing our image, we use a tag with 139 | the format `${sha}-${dist}`, where `${sha}` is the HEAD of this repository and 140 | `${dist}` is the Debian distribution of the pbuilder chroot. The `latest` tag 141 | always point to the most recently published image. 142 | 143 | ## Build Options 144 | 145 | These environment variables enable different options. 146 | 147 | * `OPX_GIT_TAG=yes`: after each build, tag the repository for publishing 148 | * `OPX_RELEASE=2.2.1`: change which OPX release to build against 149 | 150 | --- 151 | 152 | > [For older documentation, see b64c3be](https://github.com/open-switch/opx-build/blob/b64c3bedf6db0d5c5ed9fbe0e3ddcb5f4da3f525/README.md). 153 | 154 | © 2018 Dell Inc. or its subsidiaries. All Rights Reserved. 155 | 156 | [install-docs]: https://github.com/open-switch/opx-docs/wiki/Install-OPX-on-Dell-EMC-ON-series-platforms 157 | -------------------------------------------------------------------------------- /assets/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ -n "${LOCAL_UID}" ]]; then 4 | LOCAL_GID=${LOCAL_GID:-${LOCAL_UID}} 5 | 6 | if ! grep -q opx /etc/group; then 7 | groupadd -o --gid="${LOCAL_GID}" opx 8 | useradd -o --uid="${LOCAL_UID}" --gid="${LOCAL_GID}" -s /bin/bash opx 9 | fi 10 | 11 | exec gosu opx "$@" 12 | fi 13 | 14 | exec "$@" 15 | -------------------------------------------------------------------------------- /assets/hook.d/B05check-yhist: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Fail build if any yhist files change 3 | 4 | # Fix unknown TERM error with tput 5 | TERM=dumb 6 | 7 | builddir="$(readlink -f /tmp/buildd/*/debian/..)/" 8 | srcdir="/mnt/$(basename $builddir | awk -F'-' 'BEGIN {OFS="-"} {NF--; print}')/" 9 | 10 | if [ ! -d "$srcdir/history/" ]; then 11 | echo "Skipping older repository without history directory..." >&2 12 | exit 0 13 | fi 14 | 15 | # Only run on repos with yhist files 16 | if test -n "$(find $builddir -name '*.yhist' -type f)"; then 17 | 18 | # Clone source to temp directory for comparison 19 | work=$(mktemp -d) 20 | trap "rm -rf $work" EXIT 21 | git clone $srcdir $work 22 | 23 | # Copy new yhist files to src repo 24 | find $builddir -name '*.yhist' -type f -print0 | xargs -0 cp -t $work/history/ 25 | 26 | # Fail build if any yhist files changed 27 | git_diff="$(git -C $work status --porcelain '*.yhist')" 28 | if test -n "$git_diff"; then 29 | Error () { echo "$(tput setaf 1)$*$(tput sgr 0)"; } 30 | Error "=================================================" 31 | Error "[ERROR] Yhist changes detected. Build marked as failed." 32 | Error "" 33 | Error "If you meant to change these, please commit the changes and rebuild." 34 | Error "" 35 | Error "Changes:" 36 | Error "$git_diff" 37 | Error "=================================================" 38 | exit 1 39 | fi 40 | 41 | fi 42 | 43 | -------------------------------------------------------------------------------- /assets/hook.d/C05shell: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Drop into a shell on build failure 3 | 4 | # stdin must be a terminal 5 | if [ ! -t 0 ]; then exit 0; fi 6 | 7 | Error () { echo "$(tput setaf 1)[ERROR] $*$(tput sgr 0)"; } 8 | 9 | Error "Build failed. Preparing shell for diagnosis." 10 | 11 | apt-get install -y --force-yes vim less bash >/dev/null 2>&1 12 | 13 | builddir="$(readlink -f /tmp/buildd/*/debian/..)/" 14 | cd "$builddir" 15 | 16 | Error "Build failed. Entering shell for diagnosis." 17 | 18 | /bin/bash < /dev/tty > /dev/tty 2> /dev/tty 19 | 20 | -------------------------------------------------------------------------------- /assets/hook.d/D05update-sources: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Update package sources 3 | 4 | if [[ -n "$CUSTOM_SOURCES" ]]; then 5 | echo "$CUSTOM_SOURCES" >/etc/apt/sources.list.d/opx.list 6 | else 7 | 8 | echo "deb http://deb.openswitch.net/$DIST $OPX_RELEASE main opx opx-non-free" \ 9 | >> /etc/apt/sources.list.d/opx.list 10 | 11 | if [[ "$DIST" == "jessie" ]]; then 12 | echo "deb http://deb.openswitch.net/contrib stable contrib" \ 13 | >> /etc/apt/sources.list.d/opx.list 14 | fi 15 | 16 | if [[ "$OPX_RELEASE" =~ ^(unstable|testing|stable)$ ]]; then 17 | echo "deb-src http://deb.openswitch.net/$DIST $OPX_RELEASE opx" \ 18 | >>/etc/apt/sources.list.d/opx.list 19 | fi 20 | 21 | fi 22 | 23 | echo "Updating with the following sources: 24 | 25 | /etc/apt/sources.list 26 | --------------------- 27 | $(cat /etc/apt/sources.list) 28 | 29 | /etc/apt/sources.list.d/opx.list 30 | -------------------------------- 31 | $(cat /etc/apt/sources.list.d/opx.list) 32 | " 33 | apt-get -qq update 34 | -------------------------------------------------------------------------------- /assets/pbuilder_create.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | mkdir -p "/mnt/pool/${DIST}-${ARCH}" && touch "/mnt/pool/${DIST}-${ARCH}/Packages" 4 | 5 | git-pbuilder create 6 | git-pbuilder update 7 | cat </etc/apt/preferences 17 | PBUILDER 18 | 19 | -------------------------------------------------------------------------------- /assets/pbuilderrc: -------------------------------------------------------------------------------- 1 | # this is your configuration file for pbuilder. 2 | # the file in /usr/share/pbuilder/pbuilderrc is the default template. 3 | # /etc/pbuilderrc is the one meant for overwriting defaults in 4 | # the default template 5 | # 6 | # read pbuilderrc.5 document for notes on specific options. 7 | 8 | : ${DIST:="$(lsb_release --short --codename)"} 9 | : ${ARCH:="$(dpkg --print-architecture)"} 10 | 11 | APTKEYRINGS=( /etc/apt/trusted.gpg.d/opx-archive-keyring.gpg ) 12 | BINDMOUNTS="/mnt/pool/${DIST}-${ARCH}" 13 | HOOKDIR=/var/cache/pbuilder/hook.d 14 | MIRRORSITE=http://deb.debian.org/debian 15 | OTHERMIRROR="deb [trusted=yes] file:///mnt/pool/${DIST}-${ARCH} ./" 16 | -------------------------------------------------------------------------------- /assets/profile: -------------------------------------------------------------------------------- 1 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 2 | PATH=/opt/opx-build/scripts:$PATH 3 | 4 | export PATH 5 | 6 | alias ll="ls -Fhl --color=auto" 7 | alias la="ls -AFhl --color=auto" 8 | alias less="less -FSRXc" 9 | -------------------------------------------------------------------------------- /docker_build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # Currently includes jessie and stretch pbuilder roots 3 | 4 | # docker image tag 5 | VERSION="$(git log -1 --pretty=%h)" 6 | # docker image name 7 | IMAGE="opxhub/build" 8 | # file where container id is saved for cleanup 9 | CIDFILE=".cid" 10 | 11 | cleanup () { 12 | if [[ -e "${CIDFILE}" ]]; then 13 | docker rm -f "$(cat ${CIDFILE})" 14 | rm -f ${CIDFILE} 15 | fi 16 | } 17 | trap cleanup EXIT 18 | 19 | main() { 20 | command -v docker >/dev/null 2>&1 || { 21 | echo "You will need to install docker for this to work." 22 | exit 1 23 | } 24 | 25 | docker build -t ${IMAGE}:base . 26 | pbuilder_create jessie base stamp1 27 | pbuilder_create stretch stamp1 "${VERSION}" 28 | docker tag "${IMAGE}:${VERSION}" "${IMAGE}:latest" 29 | } 30 | 31 | pbuilder_create() { 32 | # Create the pbuilder chroots. 33 | # 34 | # Since pyang is not (yet) available as a debian package, install it 35 | # in the pbuilder chroot. 36 | # 37 | # There is an issue with docker/kernel/overlayfs/pbuilder: directory 38 | # renames fail with a cross-device link error if the directory is on 39 | # a lower layer. Work around this by combining all steps of chroot 40 | # creation in one docker run invocation. 41 | [[ $# != 3 ]] && return 1 42 | 43 | dist="$1" 44 | from_tag="$2" 45 | to_tag="$3" 46 | 47 | rm -f ${CIDFILE} 48 | 49 | docker run \ 50 | --cidfile ${CIDFILE} \ 51 | --privileged \ 52 | -e ARCH=amd64 \ 53 | -e DIST="$dist" \ 54 | "${IMAGE}:${from_tag}" \ 55 | /pbuilder_create.sh 56 | 57 | docker commit \ 58 | --change 'CMD ["bash"]' \ 59 | --change 'ENTRYPOINT ["/entrypoint.sh"]' \ 60 | "$(cat ${CIDFILE})" \ 61 | "${IMAGE}:${to_tag}" 62 | 63 | docker rm -f "$(cat $CIDFILE)" 64 | rm -f ${CIDFILE} 65 | } 66 | 67 | main 68 | 69 | -------------------------------------------------------------------------------- /man/opx_rel_pkgasm.py.1: -------------------------------------------------------------------------------- 1 | .TH opx_rel_pkgasm.py 1 2017-08-01 OPX "OPX build commands" 2 | .SH NAME 3 | opx_rel_pkgasm.py \- Build an ONIE installer image for OPX 4 | .SH SYNOPSIS 5 | opx_rel_pkgasm.py -b 6 | .I blueprint 7 | -n 8 | .I build-number 9 | [ --debug ] [ -v 10 | .I verbosity-level 11 | ] 12 | .SH DESCRIPTION 13 | This utility constructs a image file suitable for an ONIE installer, to install OPX on a target. 14 | .TP 15 | .I blueprint 16 | is the name of the blueprint file for the image (see Blueprints, below) 17 | .TP 18 | .I build-number 19 | is the build number, appended as the last set of digits in the image version (see Output, below) 20 | .IP --debug 21 | enables debug output, if present 22 | .TP 23 | .I verbosity-level 24 | is the desired level of verbosity of output, from 0 to 9 25 | .SS Blueprints 26 | A blueprint is an XML describing how to construct an ONIE image. It specifies: 27 | .IP \(bu 28 | a tarfile to use as the root filesystem, 29 | .IP \(bu 30 | a list of packages to be included in the images, and 31 | .IP \(bu 32 | a list of hooks (shell scripts) to run on the target, as part of the installation process. 33 | .P 34 | The schema for a blueprint file can be found in the file 35 | 36 | .RS 37 | opx-onie-installer/release_bp/blueprint.xsd 38 | .RE 39 | 40 | Documentation regarding semantics is embedded in the schema. 41 | 42 | For an example of a blueprint to build an ONIE installer image for OPX for all Dell platforms, see the file 43 | 44 | .RS 45 | opx-onie-installer/release_bp/OPX_dell_base.xml 46 | .RE 47 | .SS Output 48 | This utility creates an ONIE image named as follows: 49 | 50 | .RI PKGS_ name - version -installer- arch .bin 51 | 52 | where: 53 | .TP 54 | .I name 55 | is specified by the 'name' element under the 'output_format' element in the blueprint 56 | .TP 57 | .I version 58 | is of the form x.y.z.b, where x, y, and z are specified in the blueprint (see the 'version' element, under the 'output_format' element, in the blueprint), and the build number b is given by the command-line option 59 | .TP 60 | .I arch 61 | is the target CPU architecture (only x86_64 is currently supported) 62 | .SH EXAMPLE 63 | To build an ONIE image for OPX for all Dell hardware platforms, and build number 1, run the command: 64 | 65 | opx_build/scripts/opx_rel_pkgasm.py -b opx-onie-installer/release_bp/OPX_dell_base.xml -n 1 66 | .SH FILES 67 | opx-onie-installer/release_bp/blueprint.xsd 68 | .RS 69 | Schema for blueprint files 70 | .RE 71 | 72 | opx-onie-installer/inst-hooks/*.sh 73 | .RS 74 | Installation hooks 75 | .RE 76 | 77 | .RI PKGS_ name - version -installer- arch .bin 78 | .RS 79 | Installer image file 80 | .RE 81 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | requests>=2.20.0 2 | Jinja2>=2.10.1 3 | lxml==4.6.5 4 | requests_file==1.4.2 5 | -------------------------------------------------------------------------------- /scripts/idx-pkgs: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Create the APT Packages (index) file for all local packages 4 | 5 | DIST="unstable" 6 | COMP="main" 7 | ARCH="amd64" 8 | 9 | [[ $# -gt 0 ]] && { 10 | DIST="$1" 11 | } 12 | 13 | LOC="dists/$DIST/$COMP/binary-$ARCH" 14 | 15 | rm -fr dists 16 | mkdir -p "$LOC/" 17 | dpkg-scanpackages -m . >"$LOC/Packages" 18 | -------------------------------------------------------------------------------- /scripts/opx_bld_basics.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python 2 | """ 3 | opx_bld_basics.py set of classes and defines intrinsic to the basic 4 | OPX build environment 5 | """ 6 | 7 | from __future__ import print_function 8 | import os 9 | import fnmatch 10 | import subprocess 11 | 12 | 13 | class ChangeDirectory(object): 14 | """ 15 | ChangeDirectory class for use as a context manager 16 | to perform operations in an alternate directory, 17 | encapsulates save of current directory, change to 18 | the alternative directory, and on exit of context 19 | return to the original working directory 20 | """ 21 | def __init__(self, path): 22 | self.old_dir = os.getcwd() 23 | self.new_dir = path 24 | 25 | def __enter__(self): 26 | os.chdir(self.new_dir) 27 | 28 | def __exit__(self, exc_type, exc_value, exc_tb): 29 | os.chdir(self.old_dir) 30 | 31 | # Return correct value for python to raise 32 | # an exception embedded in the context 33 | # basically works now because default 34 | # return for a function is None which 35 | # evaluates to False 36 | if exc_type is None: 37 | return True 38 | return False 39 | 40 | 41 | # This dictionary maps the blueprint platform attribute into 42 | # the onie platform name. The onie platform name is used as 43 | # part of the image filenames. 44 | 45 | ONIE_PLATFORM_MAP = { 46 | 'S3048-ON': 'x86_64-dell_s3000_c2338-r0', 47 | 'S6000-ON': 'x86_64-dell_s6000_s1220-r0', 48 | 'S6000-VM': 'x86_64-dell_OPX_VM_s6000-r0', 49 | 'ALL-X64': 'x86_64', 50 | } 51 | 52 | # need to harmonize with the above list 53 | PLATFORMS = ONIE_PLATFORM_MAP.keys() 54 | IMAGES = ['ALL-Base'] 55 | ARCHS = ['x86_64'] 56 | 57 | PUB_LOCS = {'latest': '/tftpboot/OPX', 58 | 'archive': '/neteng/netarchive1/OPX'} 59 | DEFAULT_PUB = 'latest' 60 | 61 | DEV_RELEASE = 'engineering' 62 | DEFAULT_RELEASE = DEV_RELEASE 63 | RELEASE_DIRS = [DEV_RELEASE, 'testing'] 64 | 65 | # Dictonary maps release name to release specific 66 | # information. Should probably be replaced by an 67 | # independent database. 68 | # NOTE: all release names must be listed above 69 | # This excludes the release "states", these 70 | # Will all be at a minimum in the testing 71 | # state (only one at at time), all others 72 | # will be stable + nmae 73 | INACTIVE_STATES = ['deprecated', 'retired'] 74 | RELEASE_STATES = ['sid', 'unstable', 'testing', 'stable'] + INACTIVE_STATES 75 | 76 | RELEASES = [ 77 | { 'rel-name': 'OPX', 78 | 'rel-version': '1.0.0', 79 | 'rel-state': 'stable', 80 | 'tool-sha': '6fff5835afc6ca9ee5abcc4f22a32aa7c44e56ad' 81 | } 82 | ] 83 | 84 | RELEASES_BY_NAME = {_r['rel-name']: _r for _r in RELEASES} 85 | RELEASES_BY_VERSION = {_r['rel-version']: _r for _r in RELEASES} 86 | 87 | RELEASE_NAMES = [n_['rel-name'] for n_ in RELEASES] + RELEASE_DIRS 88 | 89 | 90 | INST_GOOD_LINK = 'last_good' 91 | INST_LINKS = ['latest', INST_GOOD_LINK] 92 | 93 | DEFAULT_BUILDID = '99999' 94 | DEFAULT_DIRNAME = 'latest-build' 95 | 96 | VERBOSITY = 0 97 | 98 | 99 | def release_path(publication=DEFAULT_PUB, release=DEFAULT_RELEASE): 100 | """ 101 | release_path - return the path to installers 102 | input: 103 | publication - which of the published sets of installers 104 | tftboot or archive (netarchive) 105 | release - which release build installers 106 | """ 107 | assert release in RELEASE_NAMES 108 | 109 | path = PUB_LOCS[publication] 110 | 111 | if path is not None: 112 | if release in RELEASE_DIRS: 113 | rel_dir = '%s-release' 114 | else: 115 | rel_dir = 'release_%s-release' 116 | 117 | path = os.path.join(path, (rel_dir % release), 118 | 'AmazonInstallers') 119 | 120 | if release is not DEFAULT_RELEASE and VERBOSITY > 0: 121 | print('release path returns %s' % path) 122 | return path 123 | 124 | 125 | def find_files(path='workspace/debian/jessie/x86_64/build', find='*.deb', 126 | out_filter='*-dev_*'): 127 | """ 128 | find_files - find the files that match criteria and return the list 129 | input: 130 | path - where to start file tree walk 131 | find - regular expression of desired file name(s) 132 | out_filter - regular expression of file names to exclude 133 | return: list of file paths 134 | """ 135 | flist = [] 136 | 137 | for rdir, srdirs, files in os.walk(path): 138 | if VERBOSITY > 1: 139 | print("searching " + rdir) 140 | if VERBOSITY > 2: 141 | for _sd in srdirs: 142 | print(" has subdir " + _sd) 143 | 144 | for fname in files: 145 | if fnmatch.fnmatch(fname, find): 146 | if out_filter is not None \ 147 | and fnmatch.fnmatch(fname, out_filter): 148 | continue 149 | flist.append(os.path.join(rdir, fname)) 150 | # consider making this an itterator/factory> 151 | # return/pause until end -- can't recall what 152 | # the patern name is right now 153 | 154 | return flist 155 | 156 | 157 | def short_path(file_path): 158 | """ 159 | short_path -- returns a single directory with file_name 160 | input: 161 | file_path - some portion of a file path name, may be 162 | relative, but must exist 163 | returns: string 164 | diagnostic: raises exception if file not found 165 | """ 166 | if not os.path.exists(file_path): 167 | raise NameError(('%s does not exist' % file_path)) 168 | 169 | full_path = os.path.abspath(file_path) 170 | my_dir = os.path.basename(os.path.dirname(full_path)) 171 | return os.path.join(my_dir, os.path.sep, os.path.basename(full_path)) 172 | 173 | 174 | def gen_package_list(pkg_cache_path): 175 | """ 176 | gen_package_list -- generate a Packages and Packages.gz file from 177 | the packages in the given path 178 | input: 179 | pkg_cache_path - Path to the folder containing the package cache 180 | returns: None 181 | diagnostic: raises exception if call to dpkg-scanpackages or gzip fails 182 | """ 183 | packages_file = os.path.join(pkg_cache_path, 'Packages') 184 | packages_gz_file = os.path.join(pkg_cache_path, 'Packages.gz') 185 | 186 | # Create a package repository in the cache 187 | # Do the write first so the file exists, 188 | # or try read/write fails not found 189 | cmd = ['dpkg-scanpackages', '-m', '.', '/dev/null'] 190 | with open(packages_file, 'w+') as fd_: 191 | try: 192 | subprocess.check_call(cmd, stdout=fd_, 193 | cwd=pkg_cache_path) 194 | except subprocess.CalledProcessError as ex: 195 | print(ex) 196 | raise 197 | 198 | # Add a gzipped version of Packages for use by apt-get 199 | cmd = ['gzip', '-9c'] 200 | with open(packages_file, 'r') as fd_, \ 201 | open(packages_gz_file, 'w') as fd0: 202 | try: 203 | subprocess.check_call(cmd, stdin=fd_, stdout=fd0, 204 | cwd=pkg_cache_path) 205 | except subprocess.CalledProcessError as ex: 206 | print(ex) 207 | raise 208 | 209 | 210 | # set of support functions for RELEASES above 211 | 212 | # pre-check for release name and version strings 213 | 214 | def valid_rel_ver(version): 215 | """ 216 | valid_rel_ver -- is the version string passed in valid 217 | """ 218 | return version in RELEASES_BY_VERSION 219 | 220 | 221 | def valid_rel_name(name): 222 | """ 223 | valid_rel_name -- is the name string passed in valid 224 | """ 225 | return name in RELEASES_BY_NAME 226 | 227 | 228 | # active release names 229 | def active_release_names(): 230 | """ 231 | Fetch all the active names 232 | """ 233 | names = [n_['rel-name'] for n_ in RELEASES 234 | if n_['rel-state'] is not 'retired'] 235 | names = names + RELEASE_DIRS 236 | return names 237 | 238 | 239 | # name query 240 | def get_relname_info(name): 241 | """ 242 | locates the name (row) in the release map defined above 243 | and returns that objects properties (columns) 244 | """ 245 | return RELEASES_BY_NAME[name] 246 | 247 | 248 | # version query 249 | def get_relver_info(version): 250 | """ 251 | As above, returns the row associated with the release 252 | version string specified 253 | 254 | Currently throws a key error if the version is not that 255 | of a release 256 | """ 257 | return RELEASES_BY_VERSION[version] 258 | 259 | 260 | # Local Variables: 261 | # tab-width:4 262 | # indent-tabs-mode:nil 263 | # End: 264 | # vim: set expandtab tabstop=4 shiftwidth=4 softtabstop=4 : 265 | -------------------------------------------------------------------------------- /scripts/opx_build: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | # Use temporary output directory isolate the output of each build 4 | GIT_PBUILDER_OUTPUT_DIR=$(mktemp -d) 5 | export GIT_PBUILDER_OUTPUT_DIR 6 | 7 | cleanup () { 8 | rm -rf "${GIT_PBUILDER_OUTPUT_DIR}" 9 | rm -f ./*.{dsc,tar.gz,build,changes} 10 | } 11 | trap cleanup EXIT 12 | 13 | # Available options 14 | export OPX_GIT_TAG="${OPX_GIT_TAG:-no}" 15 | 16 | # Set build options 17 | export OPX_RELEASE="${OPX_RELEASE:-unstable}" 18 | export DIST="${DIST:-stretch}" 19 | export ARCH="${ARCH:-amd64}" 20 | export CUSTOM_SOURCES="${CUSTOM_SOURCES:-}" 21 | 22 | # Define list of repositories in dependency order. 23 | # TODO: read the debian/control files and compute the dependency graph. 24 | repos=( 25 | opx-core 26 | opx-logging 27 | opx-common-utils 28 | opx-cps 29 | opx-base-model 30 | opx-db-sql 31 | opx-sai-api 32 | opx-sai-vm 33 | opx-nas-common 34 | opx-nas-linux 35 | opx-nas-ndi-api 36 | opx-nas-ndi 37 | opx-nas-acl 38 | opx-nas-interface 39 | opx-nas-multicast 40 | opx-nas-l2 41 | opx-nas-l3 42 | opx-nas-qos 43 | opx-nas-daemon 44 | opx-platform-config 45 | opx-sdi-sys 46 | opx-pas 47 | opx-tmpctl 48 | opx-tools 49 | opx-alarm 50 | opx-snmp 51 | opx-system-utils 52 | opx-dhcp-agent 53 | ) 54 | 55 | if [ $# -gt 0 ] && [ "$1" != "all" ]; then 56 | repos=( "$@" ) 57 | fi 58 | 59 | for r in ${repos[*]}; do 60 | if [ ! -d "$r" ]; then 61 | echo "$r does not exist..." 62 | continue 63 | fi 64 | 65 | POOL="pool/${DIST}-${ARCH}" 66 | OUTPUT_DIR="$POOL/$r" 67 | mkdir -p "$OUTPUT_DIR" 68 | 69 | if [ -e "$r/DEBIAN/control" ] && [ ! -e "$r/DEBIAN/rules" ]; then 70 | chmod 00755 "$r/DEBIAN" || true 71 | dpkg-deb -b "$r" "$OUTPUT_DIR" 72 | echo "Build results can be found in $OUTPUT_DIR." 73 | elif [ -e "$r/debian/control" ]; then 74 | ( 75 | # create local package cache 76 | (cd "/mnt/$POOL" && dpkg-scanpackages -m . >Packages) 77 | 78 | cd "$r" 79 | 80 | if [ "$OPX_GIT_TAG"b = "yes"b ]; then 81 | gbp buildpackage --git-tag-only 82 | else 83 | gbp buildpackage \ 84 | --git-pbuilder \ 85 | --git-dist="$DIST" \ 86 | --git-arch="$ARCH" \ 87 | --git-ignore-branch \ 88 | --git-ignore-new 89 | 90 | find "$GIT_PBUILDER_OUTPUT_DIR" \ 91 | \( -name '*.deb' -o -name '*.dsc' -o -name '*.changes' -o -name '*.tar.gz' -o -name '*.build' -o -name '*.buildinfo' \) \ 92 | -exec cp -t "../$OUTPUT_DIR/" {} + 93 | echo "Build results can be found in $OUTPUT_DIR." 94 | fi 95 | 96 | # clear output directory for next package 97 | # question mark ensures that rm -rf /* is never run 98 | rm -rf "${GIT_PBUILDER_OUTPUT_DIR:?}"/* 99 | ) 100 | else 101 | echo "Error. No debian/control file found in $r." 102 | exit 1 103 | fi 104 | done 105 | 106 | rm -f /mnt/Packages || true 107 | 108 | # vim: set sw=2 et 109 | -------------------------------------------------------------------------------- /scripts/opx_build_mlnx: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright Mellanox Technologies, Ltd. 2001-2017. 4 | # This software product is licensed under Apache version 2, as detailed in 5 | # the LICENSE file. 6 | 7 | set -e 8 | 9 | DIST=$(lsb_release --codename --short) 10 | 11 | # Define list of repositories in dependency order. 12 | repos=( 13 | opx-logging 14 | opx-common-utils 15 | opx-cps 16 | opx-base-model 17 | opx-platform-config 18 | opx-sdi-api 19 | opx-mlnx-sdi-sys 20 | opx-pas 21 | opx-tmpctl 22 | ) 23 | 24 | if [ ! -z "$1" ] && [ "$1" != "all" ]; then 25 | repos=( "$@" ) 26 | fi 27 | 28 | # Use temporary output directory isolate the output of each build 29 | GIT_PBUILDER_OUTPUT_DIR=$(mktemp -d) 30 | export GIT_PBUILDER_OUTPUT_DIR 31 | trap 'rm -rf ${GIT_PBUILDER_OUTPUT_DIR}' EXIT 32 | 33 | # For installing local packages as dependencies 34 | touch Packages 35 | 36 | wget https://github.com/Mellanox/SAI-Implementation/raw/sonic/sdk/sxd-libs_1.mlnx.4.2.3102_amd64.deb -P /mnt/ 37 | wget https://github.com/Mellanox/SAI-Implementation/raw/sonic/sdk/sxd-libs-dev_1.mlnx.4.2.3102_amd64.deb -P /mnt/ 38 | wget https://github.com/Mellanox/SAI-Implementation/raw/sonic/sdk/sx-complib_1.mlnx.4.2.3102_amd64.deb -P /mnt/ 39 | wget https://github.com/Mellanox/SAI-Implementation/raw/sonic/sdk/sx-complib-dev_1.mlnx.4.2.3102_amd64.deb -P /mnt/ 40 | 41 | apt-get install --assume-yes linux-headers-3.16.0-4-amd64 42 | rm -rf hw-mgmt 43 | git clone https://github.com/Mellanox/hw-mgmt -b V1.0.0050 44 | ( 45 | cd hw-mgmt 46 | sed "s~@SED_VERSION@~V1.0.0050~" -i debian/changelog 47 | chmod +x debian/rules 48 | debian/rules binary KVERSION="3.16.0-4-amd64" 49 | mv ./*.deb .. 50 | ) 51 | 52 | for r in ${repos[*]}; do 53 | if [ ! -d "$r" ]; then 54 | continue 55 | fi 56 | 57 | ( 58 | cd "$r" 59 | gbp buildpackage --git-dist="$DIST" --git-pbuilder --git-ignore-branch --git-ignore-new 60 | 61 | cp "${GIT_PBUILDER_OUTPUT_DIR}"/*.deb .. 62 | (cd /mnt && dpkg-scanpackages -m . >Packages) 63 | 64 | # clear output directory for next package 65 | # question mark ensures that rm -rf /* is never run 66 | rm -rf "${GIT_PBUILDER_OUTPUT_DIR:?}"/* 67 | ) 68 | done 69 | 70 | # vim: set sw=4 et 71 | -------------------------------------------------------------------------------- /scripts/opx_get_packages.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | """ 3 | opx_get_packages -- fetch a list of debian packages, and all their 4 | run-time dependencies 5 | """ 6 | 7 | from __future__ import print_function 8 | import apt 9 | import apt_pkg 10 | import collections 11 | import sys 12 | import os 13 | import shutil 14 | import subprocess 15 | import argparse 16 | import logging 17 | import itertools 18 | from distutils.version import LooseVersion 19 | 20 | logger = logging.getLogger(__name__) 21 | logger.addHandler(logging.NullHandler()) 22 | 23 | # defaults for the OpxPackages constructor (__init__) function 24 | # also used in command invocation method below 25 | DEFAULT_SYSROOT = None 26 | DEFAULT_SYSROOTDEV = None 27 | 28 | DEFAULT_PKG_URL = "http://deb.openswitch.net/" 29 | DEFAULT_PKG_DISTRIBUTION = "unstable" 30 | DEFAULT_PKG_COMPONENT = "main opx opx-non-free" 31 | 32 | 33 | class VersionWrapper(object): 34 | """ 35 | :class:`apt_pkg.Version` wrapper 36 | 37 | We need to do set operations on :class:`apt_pkg.Version` objects, 38 | but they are not hashable. This thin wrapper does just enough to 39 | make the objects hashable. 40 | """ 41 | 42 | def __init__(self, version): 43 | self._ver = version 44 | 45 | def __hash__(self): 46 | return hash((self._ver.parent_pkg.name, self._ver.ver_str)) 47 | 48 | def __eq__(self, other): 49 | return (self._ver.parent_pkg.name == other._ver.parent_pkg.name and 50 | self._ver.ver_str == other._ver.ver_str) 51 | 52 | def __str__(self): 53 | return self._ver.__str__() 54 | 55 | @property 56 | def parent_pkg(self): 57 | """ 58 | apt Parent Package accessor 59 | """ 60 | return self._ver.parent_pkg 61 | 62 | @property 63 | def ver_str(self): 64 | """ 65 | apt Package Version string accessor 66 | """ 67 | return self._ver.ver_str 68 | 69 | 70 | class OpxPackagesError(Exception): 71 | """ 72 | OpxPackgesError - OPX get package general exception 73 | """ 74 | pass 75 | 76 | 77 | class OpxPackageSource(object): 78 | """ 79 | Represents package source (sources.list entry) 80 | """ 81 | 82 | def __init__(self, url, distribution, component): 83 | """ 84 | Construct a :class:`OpxPackageSource` object 85 | 86 | :param url: 87 | The url to the base of the package repository. 88 | 89 | :param distribution: 90 | The distribution (also called 'suite') reflects the 91 | level of testing/acceptance of a particular package. 92 | In the Debian package repository, packages start as 93 | unstable, and are promoted to testing, stable, and a 94 | release codename like 'wheezy' or 'jessie'. 95 | 96 | :param component: 97 | The component (also called 'section'). In the Debian 98 | package repository, component is 'main', 'contrib', or 99 | 'non-free'. Other repositories have their own naming 100 | conventions. 101 | """ 102 | self.url = url 103 | self.distribution = distribution 104 | self.component = component 105 | 106 | 107 | class OpxPackages(object): 108 | """ 109 | Provides interface to the python apt and apt_pkg libraries 110 | Used to fulfill build and dev dependencies for clone and 111 | clone-all actions. 112 | Will be used to assemble from packages 113 | """ 114 | def __init__(self, 115 | sysroot, 116 | pkg_sources, 117 | default_solver=False, 118 | sysrootdev=None, 119 | install_recommends=False, 120 | install_suggests=False): 121 | """ 122 | Construct a :class:`OpxPackages` object 123 | 124 | :param sysroot: 125 | Path to sysroot 126 | :param pkg_sources: 127 | List of :class:`OpxPackageSource` objects, used to create 128 | /etc/apt/sources.list file used to fetch packages. 129 | :param sysrootdev: 130 | Path to sysroot-dev 131 | :param install_recomends: 132 | If ``True``, install recommended packages. 133 | :param install_suggests: 134 | If ``True``, install suggested packages. 135 | """ 136 | self._apt_cache = None 137 | self._cache = None 138 | self._default_solver = default_solver 139 | self._pkg_sources = pkg_sources 140 | self._folder = sysroot 141 | self._build_folder = sysrootdev 142 | 143 | if self._folder[-1:] == '/': 144 | self._folder = self._folder[:-1] 145 | 146 | if not os.path.exists(self._folder): 147 | raise OpxPackagesError(self._folder + " does not exist") 148 | 149 | _msg = "Sysroot is in " + self._folder 150 | if not self._build_folder: 151 | self._build_folder = self._folder + "-dev" 152 | 153 | if self._build_folder and os.path.exists(self._build_folder): 154 | _msg += " Development rootfs is in " + self._build_folder 155 | else: 156 | self._build_folder = None 157 | 158 | print(_msg) 159 | 160 | # Set up pointers to and create the dpkg package cache 161 | # within the specified sysroot 162 | self._apt_cache = os.path.join(self._folder, "var", "lib", "dpkg") 163 | 164 | # Standard debian packages are maintained in a seperate root 165 | # file system image to keep isolation between the AR 166 | # generate package and the standard distribution packages 167 | # Development packages from the distro are imported in 168 | # a sysroot-dev root file system image with a package 169 | # cache, that package cache is used to seed the sysroot 170 | # for individual package build or development, so seed 171 | # this sysroot's package cache from the sysroot-dev if 172 | # it exists ... 173 | if self._build_folder: 174 | _build_cache = os.path.join(self._build_folder, 175 | "var", "lib", "dpkg") 176 | print("Checking..." + self._apt_cache + " and " + _build_cache) 177 | if not os.path.exists(self._apt_cache) \ 178 | and os.path.exists(_build_cache): 179 | print("Copying.. " + _build_cache) 180 | shutil.copytree(_build_cache, self._apt_cache, symlinks=True) 181 | 182 | self._apt_cache = os.path.join(self._folder, "var", "cache", 183 | "apt", "archives") 184 | self.sources = os.path.join(self._folder, "etc", "apt", "sources.list") 185 | if not os.path.exists(self.sources): 186 | if not os.path.exists(os.path.dirname(self.sources)): 187 | os.makedirs(os.path.dirname(self.sources)) 188 | else: 189 | shutil.copy(self.sources, self.sources + ".save") 190 | 191 | # create sources.list file with url, distribution, and component. 192 | with open(self.sources, "w") as f: 193 | for pkg_source in self._pkg_sources: 194 | source = "{} {} {}".format( 195 | pkg_source.url, 196 | pkg_source.distribution, 197 | pkg_source.component, 198 | ) 199 | 200 | # local packages must be explicitly trusted 201 | if "copy:/mnt" in pkg_source.url: 202 | options = "[arch=amd64 trusted=yes]" 203 | else: 204 | options = "[arch=amd64]" 205 | 206 | print("Using {}".format(source)) 207 | 208 | f.write("deb %s %s\n" % (options, source)) 209 | 210 | # create apt preferences file to always use local packages 211 | with open(os.path.join(self._folder, "etc", "apt", "preferences"), "w") as f: 212 | f.write('Package: *\nPin: origin ""\nPin-Priority: 1100\n\n') 213 | f.write('Package: *\nPin: origin "deb.openswitch.net"\nPin-Priority: 750\n\n') 214 | 215 | # create cache and update it 216 | self._cache = apt.Cache(rootdir=self._folder, memonly=True) 217 | 218 | # set Install-Recommends and Install-Suggests configuration options 219 | apt_pkg.config['APT::Install-Recommends'] = \ 220 | "1" if install_recommends else "0" 221 | apt_pkg.config['APT::Install-Suggests'] = \ 222 | "1" if install_suggests else "0" 223 | 224 | try: 225 | self._cache.update() 226 | except Exception as ex: 227 | print("\nCache update error ignored : %s\n" % (ex)) 228 | 229 | self._cache.open() 230 | 231 | def __enter__(self): 232 | return self 233 | 234 | def __exit__(self, exc_type, exc_value, traceback): 235 | self.close() 236 | 237 | def close(self): 238 | """ 239 | close and clean-up of an object instance 240 | """ 241 | self._cache.close() 242 | if os.path.exists(self.sources + '.save'): 243 | shutil.copy(self.sources + ".save", self.sources) 244 | 245 | def list_packages(self): 246 | """ 247 | List packages available in cache 248 | """ 249 | 250 | print("Packages available are as follows:") 251 | for i in self._cache.keys(): 252 | print(str(i)) 253 | 254 | @property 255 | def _depcache(self): 256 | """ 257 | Dependency cache state accessor 258 | """ 259 | return self._cache._depcache 260 | 261 | def _dump_package(self, pkg): 262 | """ 263 | dump_package 264 | 265 | dump metadata from :class:`apt_pkg.Package` object 266 | """ 267 | 268 | logger.debug("%s:", pkg.name) 269 | logger.debug(" marked_delete: %s", 270 | self._depcache.marked_delete(pkg)) 271 | logger.debug(" marked_downgrade: %s", 272 | self._depcache.marked_downgrade(pkg)) 273 | logger.debug(" marked_install: %s", 274 | self._depcache.marked_install(pkg)) 275 | logger.debug(" marked_keep: %s", 276 | self._depcache.marked_keep(pkg)) 277 | logger.debug(" marked_reinstall: %s", 278 | self._depcache.marked_reinstall(pkg)) 279 | logger.debug(" marked_upgrade: %s", 280 | self._depcache.marked_upgrade(pkg)) 281 | logger.debug(" is_auto_installed: %s", 282 | self._depcache.is_auto_installed(pkg)) 283 | logger.debug(" is_garbage: %s", 284 | self._depcache.is_garbage(pkg)) 285 | logger.debug(" is_inst_broken: %s", 286 | self._depcache.is_inst_broken(pkg)) 287 | logger.debug(" is_now_broken: %s", 288 | self._depcache.is_now_broken(pkg)) 289 | logger.debug(" is_upgradable %s", 290 | self._depcache.is_upgradable(pkg)) 291 | 292 | def _fetch_package(self, pkg, from_user=False, backtrace=[]): 293 | """ 294 | Get the dependencies of the package's desired (candidate) 295 | version and compute the set of dependent packages. If the 296 | dependent package is not already installed, recursively 297 | invoke this function. 298 | 299 | :meth:`apt_pkg.Dependency.all_targets` returns the set of 300 | dependent package versions that that satisfy a dependency. 301 | However, since a package may have more than one dependency 302 | for a given dependent package (e.g., one dependency with a 303 | version floor, another with a version ceiling), we compute 304 | the set of dependent packages which satisfy all of the 305 | dependencies. 306 | 307 | This is done with two dictionaries. pkg_versions is the 308 | dictionary of all dependent packages and versions, while 309 | dep_versions is the dictionary of packages and versions 310 | for a single :class:`apt.pkg.Dependency`. 311 | 312 | TODO: This function only handles simple dependencies, 313 | not Breaks, Conflicts, or Replaces. 314 | """ 315 | 316 | version = self._depcache.get_candidate_ver(pkg) 317 | logger.debug("version: %s", version) 318 | logger.debug(" %s", backtrace) 319 | 320 | if 'Depends' in version.depends_list: 321 | pkg_versions = dict() 322 | for or_deps in version.depends_list["Depends"]: 323 | logger.debug("or_deps: %s", or_deps) 324 | 325 | # In general, this script does not handle "or" 326 | # dependencies. However, We have special cased 327 | # makedev/udev and debconf/debconf-2.0 to make 328 | # it good enough for NGOS image creation until 329 | # it can. 330 | if len(or_deps) != 1: 331 | logger.debug("pre: %s", or_deps) 332 | or_deps = [dep for dep in or_deps 333 | if dep.target_pkg.name 334 | not in ('makedev', 'debconf-2.0')] 335 | logger.debug("post: %s", or_deps) 336 | 337 | if len(or_deps) != 1: 338 | raise OpxPackagesError("Can't handle or-dependencies") 339 | 340 | for dep in or_deps: 341 | logger.debug("dep: %s", dep) 342 | 343 | logger.debug("%s is satisfied by:", dep.target_pkg.name) 344 | for v in dep.all_targets(): 345 | logger.debug(" %s", v) 346 | 347 | dep_versions = collections.defaultdict(set) 348 | for v in dep.all_targets(): 349 | dep_versions[dep.target_pkg.name].add(VersionWrapper(v)) 350 | 351 | for name, versions in dep_versions.items(): 352 | if not name in pkg_versions: 353 | pkg_versions[name] = set(versions) 354 | else: 355 | pkg_versions[name] &= versions 356 | 357 | # We now have list of :class:`apt_pkg.Version` objects that satisfy 358 | # the dependencies for the package. Next we identify what packages 359 | # may need to be installed. 360 | for name, versions in pkg_versions.items(): 361 | logger.debug("pkg_versions: %s -> %s", pkg.name, name) 362 | if len(versions) == 0: 363 | raise OpxPackagesError( 364 | "Unable to satisfy dependency: %s %s" % 365 | (pkg.name, name)) 366 | 367 | # Identify a list of candidate packages 368 | logger.debug("start iterating group") 369 | candidate_versions = [] 370 | sv = sorted(versions, key=lambda x: x._ver.parent_pkg.name) 371 | for k, vx in itertools.groupby(sv, 372 | key=lambda x: x._ver.parent_pkg.name): 373 | # change vx from an iterator to a list, as we need to 374 | # traverse it multiple times 375 | vx = list(vx) 376 | 377 | # While the library returns the versions in order, the 378 | # set operations destroy that order. So use the Loose 379 | # Version() function from distutils to sort 380 | best_v = sorted(vx, 381 | key=lambda x: LooseVersion(x.ver_str), 382 | reverse=True) 383 | 384 | logger.debug("%s", k) 385 | for v in best_v: 386 | logger.debug(" %s", v.ver_str) 387 | 388 | best_v = best_v[0] 389 | logger.debug("best candidate is %s", best_v) 390 | candidate_versions.append(best_v) 391 | logger.debug("done iterating group") 392 | 393 | # Determine whether any of the candidates are already installed 394 | installed = False 395 | for v in candidate_versions: 396 | dep_pkg = v.parent_pkg 397 | if dep_pkg.id in [xpkg.id for xpkg in backtrace]: 398 | installed = True 399 | break 400 | if dep_pkg.current_state != apt_pkg.CURSTATE_NOT_INSTALLED: 401 | installed = True 402 | break 403 | if self._depcache.marked_install(dep_pkg): 404 | installed = True 405 | break 406 | 407 | # If dependent package is not installed, then select the first 408 | # (we don't have a mechanism to indicate a preference), then 409 | # recurse. 410 | if not installed: 411 | v = candidate_versions[0] 412 | 413 | logger.debug("\t will fetch %s %s", 414 | v.parent_pkg.name, v.ver_str) 415 | 416 | self._depcache.set_candidate_ver(dep_pkg, v._ver) 417 | self._fetch_package(dep_pkg, backtrace=[pkg]+backtrace) 418 | 419 | logger.debug("marking %s for install", pkg) 420 | 421 | try: 422 | self._depcache.mark_install(pkg, False, from_user) 423 | except SystemError as ex: 424 | raise OpxPackagesError, OpxPackagesError(ex), sys.exc_info()[2] 425 | 426 | def fetch(self, names): 427 | """ 428 | Fetch packages 429 | 430 | Fetch specified and all dependent packages. 431 | """ 432 | 433 | # There may be more than one revision specification for a package. 434 | # We store them in a list for each package, and we store each list 435 | # in a ordered dict indexed by the package name. An orderd dict is 436 | # used to ensure the packages are processed in the specified order. 437 | 438 | depends = collections.OrderedDict() 439 | for package_name in names: 440 | pkg = apt_pkg.parse_depends(package_name)[0][0] 441 | if pkg[0] not in depends: 442 | depends[pkg[0]] = [] 443 | depends[pkg[0]].append(pkg) 444 | 445 | for package_name in depends.keys(): 446 | try: 447 | pkg = self._cache[package_name] 448 | except KeyError: 449 | msg = "Can't find %s in package cache" % package_name 450 | raise OpxPackagesError, OpxPackagesError(msg), sys.exc_info()[2] 451 | 452 | # find a version that satisfies the revision specification 453 | found = False 454 | for v in pkg.versions: 455 | satisfied = True 456 | 457 | for dep in depends[package_name]: 458 | dep_version = dep[1] 459 | dep_relation = dep[2] 460 | 461 | if not apt_pkg.check_dep(v.version, 462 | dep_relation, 463 | dep_version): 464 | satisfied = False 465 | break 466 | 467 | if satisfied: 468 | found = True 469 | 470 | pkg.candidate = v 471 | if self._default_solver: 472 | # Use default apt_pkg solver 473 | try: 474 | pkg.mark_install(auto_inst=True, 475 | auto_fix=True, 476 | from_user=False) 477 | except SystemError as ex: 478 | raise OpxPackagesError, OpxPackagesError(ex), sys.exc_info()[2] 479 | 480 | if pkg.marked_keep and not pkg.is_installed: 481 | self._dump_package(pkg._pkg) 482 | msg = "Could not install %s due to version conflicts" % package_name 483 | raise OpxPackagesError(msg) 484 | else: 485 | # Use modified solver for handling semantic versioning 486 | self._fetch_package(pkg._pkg) 487 | 488 | break 489 | 490 | if not found: 491 | raise OpxPackagesError("Failed to locate %s that satisfies revision specifications" % package_name) 492 | 493 | if self._depcache.broken_count: 494 | logger.info("Attempting to fix %s broken packages", 495 | self._depcache.broken_count) 496 | try: 497 | self._depcache.fix_broken() 498 | except SystemError: 499 | raise OpxPackagesError("We have broken dependencies") 500 | 501 | # Fetch packages 502 | try: 503 | self._cache.fetch_archives() 504 | except apt.cache.FetchFailedException as ex: 505 | # re-raise exception 506 | msg = "Fetch failed\n{}".format(ex) 507 | raise OpxPackagesError, OpxPackagesError(msg), sys.exc_info()[2] 508 | except apt.cache.FetchCancelledException as ex: 509 | # re-raise exception 510 | msg = "Fetch cancelled" 511 | raise OpxPackagesError, OpxPackagesError(msg), sys.exc_info()[2] 512 | 513 | def install(self): 514 | """ 515 | Install packages 516 | 517 | Install packages in the package cache. 518 | """ 519 | for debfile in [os.path.join(self._apt_cache, f) 520 | for f in os.listdir(self._apt_cache) 521 | if f.endswith('.deb')]: 522 | 523 | l = ["dpkg", "-x", debfile, self._folder] 524 | print(l) 525 | try: 526 | subprocess.check_call(l) 527 | except subprocess.CalledProcessError as ex: 528 | logger.error("dpkg -x %s failed", debfile) 529 | logger.exception(ex) 530 | 531 | def clean(self): 532 | """ 533 | Remove files from package cache 534 | """ 535 | for debfile in [os.path.join(self._apt_cache, f) 536 | for f in os.listdir(self._apt_cache) 537 | if f.endswith('.deb')]: 538 | os.remove(debfile) 539 | 540 | 541 | def main(): 542 | """ Command line class instantiation 543 | the class instance is created based on defaults 544 | and any command line requested options 545 | """ 546 | 547 | # parse command line arguments. 548 | # 549 | # _distribution_ and _component_ select the hierarchy within the 550 | # package repository. In an official Debian OS repository, 551 | # _distribution_ names a archive type/state like "unstable" or 552 | # "testing" or a OS codename like "jessie"; and _component_ is 553 | # "main", "contrib", or "non-free". Other package repositories 554 | # use different naming conventions. 555 | 556 | parser = argparse.ArgumentParser() 557 | parser.add_argument('--debug', 558 | help=argparse.SUPPRESS, 559 | action='store_true') 560 | parser.add_argument('--install-recommends', 561 | dest='install_recommends', 562 | help='Consider recommended packages as dependencies for installing', 563 | action='store_true') 564 | parser.add_argument('--no-install-recommends', 565 | dest='install_recommends', 566 | help=argparse.SUPPRESS, 567 | action='store_false') 568 | parser.add_argument('--install-suggests', 569 | dest='install_suggests', 570 | help='Consider suggested packages as dependencies for installing', 571 | action='store_true') 572 | parser.add_argument('--no-install-suggests', 573 | dest='install_suggests', 574 | help=argparse.SUPPRESS, 575 | action='store_false') 576 | parser.add_argument('--download-only', '-d', 577 | help='Download packages, but do not unpack or install them', 578 | action='store_true') 579 | parser.add_argument('-l', '--sysroot', 580 | help="specify system root directory", 581 | default=DEFAULT_SYSROOT) 582 | parser.add_argument('-L', '--sysrootdev', 583 | help="specify development system root directory", 584 | default=DEFAULT_SYSROOTDEV) 585 | parser.add_argument('-u', '--url', 586 | help="package repository URL", 587 | default=DEFAULT_PKG_URL) 588 | parser.add_argument('--distribution', 589 | help="package distribution", 590 | default=DEFAULT_PKG_DISTRIBUTION) 591 | parser.add_argument('--component', 592 | help="package component", 593 | default=DEFAULT_PKG_COMPONENT) 594 | parser.add_argument('-p', '--package_list', 595 | help="comma separated list of packages") 596 | parser.add_argument('--default_solver', action='store_true', 597 | help="Use standard solver to resolve package dependencies") 598 | 599 | args = parser.parse_args() 600 | 601 | if args.debug: 602 | loglevel = logging.DEBUG 603 | else: 604 | loglevel = logging.WARNING 605 | 606 | logging.basicConfig(level=loglevel) 607 | 608 | # instantiate this OpxPackage instance 609 | try: 610 | with OpxPackages(sysroot=args.sysroot, 611 | pkg_sources=[ 612 | OpxPackageSource(args.url, 613 | args.distribution, 614 | args.component), 615 | ], 616 | default_solver=args.default_solver, 617 | sysrootdev=args.sysrootdev, 618 | install_recommends=args.install_recommends, 619 | install_suggests=args.install_suggests) as ar: 620 | 621 | if args.package_list: 622 | ar.fetch(names=args.package_list.split(',')) 623 | if not args.download_only: 624 | ar.install() 625 | else: 626 | ar.list_packages() 627 | 628 | except OpxPackagesError as ex: 629 | print(ex) 630 | return 1 631 | 632 | return 0 633 | 634 | if __name__ == "__main__": 635 | sys.exit(main()) 636 | 637 | # Local Variables: 638 | # tab-width:4 639 | # indent-tabs-mode:nil 640 | # End: 641 | # vim: set expandtab tabstop=4 shiftwidth=4 softtabstop=4 : 642 | -------------------------------------------------------------------------------- /scripts/opx_rel_pkgasm.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python 2 | """ 3 | opx_rel_pkgasm.py -- assemble release object from packages 4 | 5 | Assemble an OPX release from packages 6 | """ 7 | 8 | from __future__ import print_function 9 | 10 | import argparse 11 | import collections 12 | import datetime 13 | import errno 14 | import fileinput 15 | import glob 16 | import hashlib 17 | import jinja2 18 | import json 19 | import logging 20 | import os 21 | import re 22 | import shutil 23 | import stat 24 | import subprocess 25 | import sys 26 | import time 27 | 28 | from lxml import etree 29 | from lxml.builder import E 30 | 31 | import opx_bld_basics 32 | import opx_get_packages 33 | import opx_rootfs 34 | 35 | build_num = 99999 36 | build_suffix = "" 37 | verbosity = 0 38 | 39 | def _str2bool(s): 40 | """ 41 | Convert string to boolean 42 | 43 | Used by XML serialization 44 | """ 45 | 46 | s = s.strip().lower() 47 | if s in ["1", "true"]: 48 | return True 49 | if s in ["0", "false"]: 50 | return False 51 | 52 | raise ValueError("Invalid boolean value %r" % (s)) 53 | 54 | 55 | def _bool2str(b): 56 | """ 57 | Convert boolean to string 58 | 59 | Used by XML serialization 60 | """ 61 | 62 | return "true" if b else "false" 63 | 64 | 65 | def art8601_format(dt): 66 | """ 67 | Format datetime object in ISO 8601 format suitable for Artifactory. 68 | 69 | Artifactory's ISO 8601 timestamp parser is strict. It only accepts 70 | 3 sigificant digits of sub-second precision (milliseconds) instead 71 | of the 6 significant digits (microseconds) in datetime.isoformat() 72 | output. 73 | 74 | I've raised a support ticket asking JFrog to consider relaxing 75 | their parser. 76 | 77 | Code adapted from standard python library. 78 | """ 79 | 80 | s = '%04d-%02d-%02dT%02d:%02d:%02d.%03d' % ( 81 | dt.year, 82 | dt.month, 83 | dt.day, 84 | dt.hour, 85 | dt.minute, 86 | dt.second, 87 | dt.microsecond / 1000) 88 | 89 | utc_offset = dt.utcoffset() 90 | if utc_offset is not None: 91 | if utc_offset.days < 0: 92 | sign = '-' 93 | utc_offset = - utc_offset 94 | else: 95 | sign = '+' 96 | 97 | hh, mm = divmod(utc_offset.seconds, 3600) 98 | mm //= 60 99 | 100 | s += "%s%02d%02d" % (sign, hh, mm) 101 | else: 102 | s += "Z" 103 | 104 | return s 105 | 106 | 107 | class OpxRelPackageRestriction(object): 108 | """ 109 | Represents a package version restriction. 110 | 111 | Loosely based on Maven's Restriction object API. 112 | """ 113 | def __init__(self, lower_bound, 114 | lower_bound_inclusive, 115 | upper_bound, 116 | upper_bound_inclusive): 117 | self.lower_bound = lower_bound 118 | self.lower_bound_inclusive = lower_bound_inclusive 119 | self.upper_bound = upper_bound 120 | self.upper_bound_inclusive = upper_bound_inclusive 121 | 122 | def toDebian(self): 123 | """ 124 | Return list of version restrictions in Debian format 125 | :returns: list of version specifications 126 | """ 127 | # special case equality 128 | if (self.lower_bound_inclusive and 129 | self.lower_bound == self.upper_bound and 130 | self.upper_bound_inclusive): 131 | return ['=' + self.lower_bound] 132 | 133 | # special case inequality 134 | if (not self.lower_bound_inclusive and 135 | self.lower_bound == self.upper_bound and 136 | not self.upper_bound_inclusive): 137 | return ['!=' + self.lower_bound] 138 | 139 | restrictions = list() 140 | if self.lower_bound is not None: 141 | if self.lower_bound_inclusive: 142 | restrictions.append('>=' + self.lower_bound) 143 | else: 144 | restrictions.append('>>' + self.lower_bound) 145 | if self.upper_bound is not None: 146 | if self.upper_bound_inclusive: 147 | restrictions.append('<=' + self.upper_bound) 148 | else: 149 | restrictions.append('<<' + self.upper_bound) 150 | 151 | return restrictions 152 | 153 | def __str__(self): 154 | """ 155 | Override str method for a pretty format of the data members. 156 | """ 157 | # special case equality 158 | if (self.lower_bound_inclusive and 159 | self.lower_bound == self.upper_bound and 160 | self.upper_bound_inclusive): 161 | return '[' + self.lower_bound + ']' 162 | 163 | # special case inequality 164 | if (not self.lower_bound_inclusive and 165 | self.lower_bound == self.upper_bound and 166 | not self.upper_bound_inclusive): 167 | return '(' + self.lower_bound + ')' 168 | 169 | s = '[' if self.lower_bound_inclusive else '(' 170 | if self.lower_bound is not None: 171 | s += self.lower_bound 172 | s += ',' 173 | if self.upper_bound is not None: 174 | s += self.upper_bound 175 | s += ']' if self.upper_bound_inclusive else ')' 176 | 177 | return s 178 | 179 | 180 | class OpxRelPackage(object): 181 | """ 182 | Defines a package in a :class:`OpxRelPackageSet`. 183 | """ 184 | 185 | def __init__(self, name, restriction): 186 | self.name = name 187 | self.restriction = restriction 188 | 189 | @classmethod 190 | def fromElement(cls, elem): 191 | """ 192 | Construct :class:`OpxRelPackage` object from :class:`etree.Element` 193 | """ 194 | 195 | # Legacy blueprints define the package name and revision 196 | # specification in the text field of the package element. 197 | # Current blueprints use name and version attributes. 198 | # 199 | # .. note:: 200 | # There was some debate whether name and version should 201 | # be attributes or elements in their own right. We used 202 | # attributes for now, but if it turns out we made the 203 | # wrong choice, it's easy enough to change. 204 | 205 | if elem.text: 206 | match = re.match(r'\A([a-zA-Z0-9][a-zA-Z0-9+-.]+)\s*(?:\(\s*(<<|<=|!=|=|>=|>>)\s*([0-9][a-z0-9+-.:~]+)\s*\))?\s*\Z', elem.text) 207 | if not match: 208 | raise ValueError("Can't parse version: ->%s<-" % elem.text) 209 | 210 | name = match.group(1) 211 | relation = match.group(2) 212 | version = match.group(3) 213 | 214 | restriction = None 215 | 216 | if relation: 217 | if relation == '<<': 218 | lower_bound = None 219 | lower_bound_inclusive = False 220 | upper_bound = version 221 | upper_bound_inclusive = False 222 | elif relation == '<=': 223 | lower_bound = None 224 | lower_bound_inclusive = False 225 | upper_bound = version 226 | upper_bound_inclusive = True 227 | elif relation == '!=': 228 | lower_bound = version 229 | lower_bound_inclusive = False 230 | upper_bound = version 231 | lower_bound_inclusive = False 232 | elif relation == '=': 233 | lower_bound = version 234 | lower_bound_inclusive = True 235 | upper_bound = version 236 | lower_bound_inclusive = True 237 | elif relation == '>=': 238 | lower_bound = version 239 | lower_bound_inclusive = True 240 | upper_bound = None 241 | upper_bound_inclusive = False 242 | elif relation == '>>': 243 | lower_bound = version 244 | lower_bound_inclusive = True 245 | upper_bound = None 246 | upper_bound_inclusive = False 247 | 248 | restriction = OpxRelPackageRestriction( 249 | lower_bound, 250 | lower_bound_inclusive, 251 | upper_bound, 252 | upper_bound_inclusive) 253 | 254 | return OpxRelPackage(name, restriction) 255 | 256 | name = elem.get('name') 257 | version = elem.get('version') 258 | if not version: 259 | return OpxRelPackage(name, None) 260 | 261 | match = re.match(r'\A([[(])([0-9][a-z0-9+-.:~]+)?,([0-9][a-z0-9+-.:~]+)?([])])\Z', version) 262 | if match: 263 | restriction = OpxRelPackageRestriction( 264 | match.group(2), 265 | match.group(1) == '[', 266 | match.group(3), 267 | match.group(4) == ']') 268 | return OpxRelPackage(name, restriction) 269 | 270 | # special case equality 271 | match = re.match(r'\A\[([0-9][a-z0-9+-.:~]+)\]\Z', version) 272 | if match: 273 | restriction = OpxRelPackageRestriction( 274 | match.group(1), 275 | True, 276 | match.group(1), 277 | True) 278 | return OpxRelPackage(name, restriction) 279 | 280 | # special case inequality 281 | match = re.match(r'\A\(([0-9][a-z0-9+-.:~]+)\)\Z', version) 282 | if match: 283 | restriction = OpxRelPackageRestriction( 284 | match.group(1), 285 | False, 286 | match.group(1), 287 | False) 288 | return OpxRelPackage(name, restriction) 289 | 290 | raise ValueError("Can't parse version: ->%s<-" % version) 291 | 292 | def toElement(self): 293 | """ 294 | Return :class:`etree.Element` representing :class:`OpxRelPackage` 295 | :returns: :class:`etree.Element` 296 | """ 297 | 298 | attributes = collections.OrderedDict() 299 | attributes['name'] = self.name 300 | if self.restriction: 301 | attributes['version'] = str(self.restriction) 302 | 303 | return E.package(attributes) 304 | 305 | def toDebian(self): 306 | """ 307 | Return list of package name+version restrictions in Debian format 308 | :returns: list of version specifications for this package 309 | """ 310 | if self.restriction is not None: 311 | return ["{}({})".format(self.name, x) 312 | for x in self.restriction.toDebian()] 313 | else: 314 | return [self.name] 315 | 316 | def __str__(self): 317 | """ 318 | Override str method for a pretty format of the data members. 319 | """ 320 | s = self.name 321 | if self.restriction is not None: 322 | s += " " 323 | s += str(self.restriction) 324 | return s 325 | 326 | 327 | class OpxRelPackageList(object): 328 | """ 329 | Defines a list of packages, each one being an :class:`OpxRelPackage` 330 | """ 331 | def __init__(self, package_list, no_package_filter=False): 332 | self.packages = package_list 333 | self.no_package_filter = no_package_filter 334 | 335 | @classmethod 336 | def fromElement(cls, element): 337 | """ 338 | Construct :class:`OpxRelPackageList` object from :class:`etree.Element` 339 | """ 340 | # no_package_filter is local as this is a classmethod 341 | if element.find('no_package_filter') is not None: 342 | no_package_filter = True 343 | else: 344 | no_package_filter = False 345 | 346 | package_list = [] 347 | for package_elem in element.findall('package'): 348 | package_list.append(OpxRelPackage.fromElement(package_elem)) 349 | 350 | return OpxRelPackageList(package_list, no_package_filter) 351 | 352 | def toElement(self): 353 | """ 354 | Return :class:`etree.Element` representing :class:`OpxRelPackageList` 355 | :returns: :class:`etree.Element` 356 | """ 357 | elem = E.package_list() 358 | 359 | if self.no_package_filter: 360 | elem.append(E.no_package_filter()) 361 | 362 | for package in self.packages: 363 | elem.append(package.toElement()) 364 | 365 | return elem 366 | 367 | 368 | class OpxRelPackageSet(object): 369 | """ 370 | Defines a package set, including a list of packages, 371 | and where to find/get them. 372 | """ 373 | def __init__(self, name, kind, default_solver, platform, flavor, 374 | package_sources, package_lists): 375 | self.name = name 376 | self.kind = kind 377 | self.default_solver = default_solver 378 | self.platform = platform 379 | self.flavor = flavor 380 | self.package_sources = package_sources 381 | self.package_lists = package_lists 382 | 383 | @classmethod 384 | def fromElement(cls, elem): 385 | """ 386 | Construct :class:`OpxRelPackageSet` object from :class:`etree.Element` 387 | """ 388 | 389 | name = elem.find('name').text 390 | kind = elem.find('type').text 391 | 392 | if elem.find('default_solver') is not None: 393 | default_solver = True 394 | else: 395 | default_solver = False 396 | 397 | _tmp = elem.find('platform') 398 | if _tmp is not None: 399 | platform = _tmp.text 400 | else: 401 | platform = None 402 | 403 | _tmp = elem.find('flavor') 404 | if _tmp is not None: 405 | flavor = _tmp.text 406 | else: 407 | flavor = None 408 | 409 | package_sources = [] 410 | for package_desc_elem in elem.findall('package_desc'): 411 | package_sources.append( 412 | opx_get_packages.OpxPackageSource( 413 | package_desc_elem.find('url').text, 414 | package_desc_elem.find('distribution').text, 415 | package_desc_elem.find('component').text, 416 | ) 417 | ) 418 | 419 | package_lists = [] 420 | for package_list_elem in elem.findall('package_list'): 421 | package_lists.append(OpxRelPackageList.fromElement( 422 | package_list_elem)) 423 | 424 | return OpxRelPackageSet(name, kind, default_solver, platform, 425 | flavor, package_sources, package_lists) 426 | 427 | def toElement(self): 428 | """ 429 | Return :class:`etree.Element` representing :class:`OpxRelPackageSet` 430 | :returns: :class:`etree.Element` 431 | """ 432 | 433 | elem = E.package_set( 434 | E.name(self.name), 435 | E.type(self.kind) 436 | ) 437 | 438 | if self.default_solver: 439 | elem.append(E.default_solver()) 440 | 441 | if self.platform is not None: 442 | elem.append(E.platform(self.platform)) 443 | 444 | if self.flavor is not None: 445 | elem.append(E.flavor(self.flavor)) 446 | 447 | for package_source in self.package_sources: 448 | elem.append( 449 | E.package_desc( 450 | E.url(package_source.url), 451 | E.distribution(package_source.distribution), 452 | E.component(package_source.component), 453 | ) 454 | ) 455 | 456 | elem.extend([package_list.toElement() 457 | for package_list in self.package_lists]) 458 | 459 | return elem 460 | 461 | def __str__(self): 462 | """ 463 | Override str method for a pretty format of the Data members 464 | 465 | """ 466 | mstr = "\n" + self.__class__.__name__ 467 | mstr += " is an OpxRelPackageSet() instance\n" 468 | mstr += "\t" + self.name + "\n" 469 | mstr += "\twhich is a " + self.kind + "\n" 470 | mstr += "\tsources:\n" 471 | for src in self.package_sources: 472 | mstr += "\t\t%s [%s,%s]\n" % ( 473 | src.url, 474 | src.distribution, 475 | src.component 476 | ) 477 | mstr += "\tpackages:\n" 478 | for pkg_list in self.package_lists: 479 | for pkg in pkg_list.packages: 480 | mstr += "\t\t" + str(pkg) + "\n" 481 | mstr += "\n" 482 | 483 | return mstr 484 | 485 | 486 | class OpxRelInstHook(object): 487 | """ 488 | Installation hook file in an OPX release 489 | """ 490 | def __init__(self, hook_file): 491 | hook_file_path = os.path.join('opx-onie-installer', 'inst-hooks', 492 | hook_file) 493 | if not os.path.exists(hook_file_path): 494 | print("Hook file %s does not exist" % hook_file_path, 495 | file=sys.stderr) 496 | sys.exit(1) 497 | 498 | if not os.access(hook_file_path, os.X_OK): 499 | print("Hook file %s is not executable" % hook_file_path, 500 | file=sys.stderr) 501 | sys.exit(1) 502 | 503 | self.hook_file = hook_file 504 | self.hook_file_path = hook_file_path 505 | 506 | @classmethod 507 | def fromElement(cls, elem): 508 | """ 509 | Construct :class:`OpxRelInstHook` from :class:`etree.Element` 510 | """ 511 | hook_file = elem.text 512 | return OpxRelInstHook(hook_file) 513 | 514 | def toElement(self): 515 | """ 516 | Return :class:`etree.Element` representing :class:`OpxRelInstHook` 517 | :returns: :class:`etree.Element` 518 | """ 519 | return E.inst_hook(self.hook_file) 520 | 521 | 522 | class OpxRelBlueprint(object): 523 | """ 524 | Blue Print to create an OPX release 525 | """ 526 | def __init__(self, description, package_type, 527 | platform, architecture, installer_suffix, version, 528 | rootfs, output_format, package_sets, inst_hooks): 529 | 530 | self.description = description 531 | self.package_type = package_type 532 | self.platform = platform 533 | self.architecture = architecture 534 | self.installer_suffix = installer_suffix 535 | self.version = version 536 | self.rootfs = rootfs 537 | self.output_format = output_format 538 | self.package_sets = package_sets 539 | self.inst_hooks = inst_hooks 540 | self.validate() 541 | 542 | def validate(self): 543 | """ 544 | Validate OpxRelBlueprint object 545 | 546 | Currently prints error and exits on invalid object. Should probably 547 | throw an exception 548 | """ 549 | # Insure that only one of the ONIE outputs is selected as they 550 | # are mutually exclusive ... 551 | if self.output_format['ONIE_pkg'] and self.output_format['ONIE_image']: 552 | print("ONIE pkg and image mutually exclusve - both true", 553 | file=sys.stderr) 554 | sys.exit(1) 555 | 556 | @classmethod 557 | def fromElement(cls, elem, dist): 558 | """ 559 | Construct :class:`OpxRelBlueprint` object from :class:`etree.Element` 560 | """ 561 | 562 | try: 563 | description = elem.find('description').text 564 | package_type = elem.find('package_type').text 565 | platform = elem.find('platform').text 566 | architecture = elem.find('architecture').text 567 | installer_suffix = elem.find('installer_suffix').text 568 | version = elem.find('version').text 569 | except AttributeError: 570 | print("We were unable to find a required value in the XML. Verify the blueprint XML provides the following " 571 | "values:\n- description\n- package_type\n- platform\n- architecture\n- installer_suffix\n- version") 572 | sys.exit(1) 573 | 574 | rootfs_elem = elem.find('rootfs') 575 | rootfs = { 576 | 'tar_name': rootfs_elem.find('tar_name').text, 577 | 'source': rootfs_elem.find('source').text, 578 | 'location': rootfs_elem.find('location').text, 579 | 'url': os.path.join(rootfs_elem.find('source').text, 580 | rootfs_elem.find('tar_name').text), 581 | } 582 | 583 | rootfs_md5_elem = rootfs_elem.find('md5') 584 | if rootfs_md5_elem is not None: 585 | rootfs['md5'] = rootfs_md5_elem.text 586 | else: 587 | rootfs['md5'] = None 588 | 589 | rootfs_sha1_elem = rootfs_elem.find('sha1') 590 | if rootfs_sha1_elem is not None: 591 | rootfs['sha1'] = rootfs_sha1_elem.text 592 | else: 593 | rootfs['sha1'] = None 594 | 595 | output_elem = elem.find('output_format') 596 | output_format = { 597 | 'name': output_elem.find('name').text, 598 | 'version': output_elem.find('version').text, 599 | 'tar_archive': _str2bool(output_elem.find('tar_archive').text), 600 | 'ONIE_image': _str2bool(output_elem.find('ONIE_image').text), 601 | 'ONIE_pkg': _str2bool(output_elem.find('ONIE_pkg').text), 602 | 'package_cache': _str2bool(output_elem.find('package_cache').text), 603 | } 604 | 605 | package_sets = [] 606 | for package_set_elem in elem.findall('package_set'): 607 | package_sets.append(OpxRelPackageSet.fromElement(package_set_elem)) 608 | 609 | for p in package_sets: 610 | for s in p.package_sources: 611 | if ('copy:/mnt' in s.url or ('openswitch.net' in s.url and 612 | 'contrib' not in s.url)): 613 | s.distribution = dist 614 | 615 | inst_hooks = [] 616 | for hook_elem in elem.findall('inst_hook'): 617 | inst_hooks.append(OpxRelInstHook.fromElement(hook_elem)) 618 | 619 | return OpxRelBlueprint(description, package_type, 620 | platform, architecture, installer_suffix, version, 621 | rootfs, output_format, package_sets, 622 | inst_hooks) 623 | 624 | def toElement(self): 625 | """ 626 | Return :class:`etree.Element` representing :class:`OpxRelBlueprint` 627 | :returns: :class:`etree.Element` 628 | """ 629 | 630 | elem = E.blueprint( 631 | E.description(self.description), 632 | E.package_type(self.package_type), 633 | E.platform(self.platform), 634 | E.architecture(self.architecture), 635 | E.installer_suffix(self.installer_suffix), 636 | E.version(self.version), 637 | E.rootfs( 638 | E.tar_name(self.rootfs['tar_name']), 639 | E.source(self.rootfs['source']), 640 | E.location(self.rootfs['location']) 641 | ), 642 | E.output_format( 643 | E.name(self.output_format['name']), 644 | E.version(self.output_format['version']), 645 | E.tar_archive(_bool2str(self.output_format['tar_archive'])), 646 | E.ONIE_image(_bool2str(self.output_format['ONIE_image'])), 647 | E.ONIE_pkg(_bool2str(self.output_format['ONIE_pkg'])), 648 | E.package_cache(_bool2str(self.output_format['package_cache'])) 649 | ), 650 | *[s.toElement() for s in self.package_sets] 651 | ) 652 | 653 | elem.extend([s.toElement() for s in self.inst_hooks]) 654 | 655 | return elem 656 | 657 | @classmethod 658 | def load_xml(cls, fd_, dist): 659 | tree = etree.parse(fd_) 660 | tree.xinclude() 661 | root = tree.getroot() 662 | return OpxRelBlueprint.fromElement(root, dist) 663 | 664 | def dumps_xml(self): 665 | root = etree.Element('blueprint', 666 | nsmap={'xi': 'http://www.w3.org/2001/XInclude'}) 667 | root.extend(self.toElement()) 668 | 669 | return etree.tostring(root, xml_declaration=True, pretty_print=True) 670 | 671 | def dump_xml(self, fd_): 672 | root = etree.Element('blueprint', 673 | nsmap={'xi': 'http://www.w3.org/2001/XInclude'}) 674 | root.extend(self.toElement()) 675 | 676 | tree = etree.ElementTree(root) 677 | tree.write(fd_, xml_declaration=True, pretty_print=True) 678 | 679 | # consider adding iterator class extensions so we can iterate 680 | # through a set to get a coherent set of releases, that is 681 | # create a set of release plans that can be executed in order 682 | # with a single set of blue-prints 683 | def __str__(self): 684 | """ 685 | Override the str method, to get it formatted, 686 | possibly to dump information into a formal log 687 | """ 688 | mstr = self.__class__.__name__ 689 | mstr += " is a OpxRelBluePrint()\n" 690 | mstr += self.description + "\n" 691 | mstr += "a collection of " + self.package_type + " packages\n" 692 | mstr += "Version:" + self.version + "\n" 693 | 694 | mstr += "root file system descriptor:\n" 695 | mstr += "\turl = %s\n" % (self.rootfs['url']) 696 | if self.rootfs['md5']: 697 | mstr += "\tmd5 = %s\n" % (self.rootfs['md5']) 698 | if self.rootfs['sha1']: 699 | mstr += "\tsha1 = %s\n" % (self.rootfs['sha1']) 700 | mstr += "\tlocation = %s\n" % (self.rootfs['location']) 701 | 702 | # print in order of creation by make_output 703 | name = '{}-{}{}{}'.format( 704 | self.output_format['name'], 705 | self.output_format['version'], 706 | '.{}'.format(build_num) if build_num != 0 else '', 707 | build_suffix, 708 | ) 709 | 710 | mstr += "creates:\n" 711 | if self.output_format['package_cache']: 712 | mstr += "\t" + name + "--pkg_cache.tgz\n" 713 | if self.output_format['ONIE_image'] or self.output_format['ONIE_pkg']: 714 | mstr += "\t" + name + "-installer-.bin\n" 715 | if self.output_format['tar_archive']: 716 | mstr += "\t" + name + "--rootfs.tgz\n" 717 | 718 | for p in self.package_sets: 719 | mstr += p.__str__() 720 | 721 | return mstr 722 | 723 | 724 | class OpxRelPackageAssembler(object): 725 | """ 726 | Create images as directed by blueprint 727 | """ 728 | 729 | def __init__(self, blueprint): 730 | self._blueprint = blueprint 731 | self.artifacts = [] 732 | self.dependencies = [] 733 | 734 | # .. todo:: Need to assert current directory is ${PROJROOT} and 735 | # the opx-onie-installer repository is present 736 | 737 | # Create rootfs object 738 | # 739 | # .. todo:: Need to add a SHA or MD5 entry to the blueprint, 740 | # so we can validate the integrity of the rootfs tar archive. 741 | 742 | self._root_obj = opx_rootfs.Opxrootfs( 743 | rootfs_path=None, 744 | rootfs_url=self._blueprint.rootfs['url'], 745 | rootfs_md5=self._blueprint.rootfs['md5'], 746 | rootfs_sha1=self._blueprint.rootfs['sha1']) 747 | 748 | if verbosity >= 2: 749 | pathname = self._root_obj.rootpath('etc', 'passwd') 750 | try: 751 | with open(pathname, 'r') as pwd: 752 | print("") 753 | print("AFTER: %s:" % pathname) 754 | for line in pwd.readlines(): 755 | print(line.strip()) 756 | print("") 757 | except: 758 | print("WARNING: AFTER, Can't read %s" % pathname) 759 | 760 | def get_version_info(self): 761 | """ 762 | Determine the version based on Bamboo environment variables. 763 | """ 764 | 765 | current_time = time.time() 766 | try: 767 | build_date = os.environ['bamboo_buildTimeStamp'] 768 | except KeyError: 769 | build_date = time.strftime('%FT%T%z') 770 | 771 | current_localtime = time.localtime(current_time) 772 | current_year = current_localtime.tm_year 773 | copyright_string = "Copyright (c) 1999-%4d by Dell EMC Inc. All Rights Reserved." \ 774 | % (current_year) 775 | 776 | version_info = {} 777 | version_info['name'] = self._blueprint.output_format['name'] 778 | version_info['version'] = self._blueprint.output_format['version'] 779 | version_info['build_num'] = build_num 780 | version_info['build_suffix'] = build_suffix 781 | version_info['platform'] = self._blueprint.platform 782 | version_info['architecture'] = self._blueprint.architecture 783 | version_info['bp_description'] = self._blueprint.description 784 | version_info['bp_version'] = self._blueprint.version 785 | version_info['build_date'] = build_date 786 | version_info['copyright'] = copyright_string 787 | 788 | return version_info 789 | 790 | def determine_version_info(self): 791 | """ 792 | Determine the version based on Bamboo environment variables. 793 | """ 794 | 795 | version_data = self.get_version_info() 796 | 797 | version_info = list() 798 | version_info.append('COPYRIGHT="%s"' % (version_data['copyright'])) 799 | version_info.append('OS_NAME="Dell EMC Networking %s"' 800 | % (version_data['name'])) 801 | version_info.append('OS_VERSION="%s"' % (version_data['version'])) 802 | version_info.append('PLATFORM="%s"' % (version_data['platform'])) 803 | version_info.append('ARCHITECTURE="%s"' 804 | % (version_data['architecture'])) 805 | version_info.append('INTERNAL_BUILD_ID="%s %s"' 806 | % (version_data['bp_description'], 807 | version_data['bp_version'])) 808 | version_info.append('BUILD_VERSION="%s(%d)%s"' % ( 809 | version_data['version'], 810 | version_data['build_num'], 811 | version_data['build_suffix'], 812 | ) 813 | ) 814 | version_info.append('BUILD_DATE="%s"' % (version_data['build_date'])) 815 | 816 | return version_info 817 | 818 | def set_installer_version_env(self, version_info): 819 | """ 820 | Set environment variables used by onie-mk-opx.sh. 821 | """ 822 | for line in version_info: 823 | (name, val) = re.split(r'=', line, maxsplit=1) 824 | os.environ['INSTALLER_%s' % (name)] = val 825 | sys.stderr.write("INFO: Set os.environ['INSTALLER_%s']=%s.\n" 826 | % (name, val)) 827 | 828 | def write_etc_version_file(self, version_info): 829 | """ 830 | Write /etc/OPX-release-version file. 831 | """ 832 | 833 | ar_v_filename = self._root_obj.rootpath("etc", "OPX-release-version") 834 | try: 835 | with open(ar_v_filename, 'w') as ar_v_file: 836 | for line in version_info: 837 | ar_v_file.write("%s\n" % (line)) 838 | os.fchmod(ar_v_file.fileno(), 0644) 839 | subprocess.call(['/bin/ls', '-l', ar_v_filename]) 840 | except IOError, msg: 841 | print("WARNING: Can't write '%s' : %s, in %s" 842 | % (ar_v_filename, msg, os.getcwd())) 843 | 844 | def write_installer_file(self): 845 | """ 846 | Write /root/install_opx.sh file. 847 | """ 848 | print("write_installer_file(self)") 849 | 850 | opx_install_file = self._root_obj.rootpath("root", "install_opx.sh") 851 | 852 | # Create jinja2 environment, used for template expansion 853 | env = jinja2.Environment(loader=jinja2.FileSystemLoader( 854 | os.path.join(os.path.dirname(__file__), 'templates'))) 855 | template = env.get_template('install_opx_sh') 856 | 857 | template_params = {} 858 | template_params['package_sets'] = [] 859 | for pks in self._blueprint.package_sets: 860 | pks_params = {} 861 | pks_params['name'] = pks.name 862 | pks_params['platform'] = pks.platform 863 | pks_params['flavor'] = pks.flavor 864 | 865 | packages = [] 866 | for pkg_list in pks.package_lists: 867 | for pkg in pkg_list.packages: 868 | packages.append(pkg.name) 869 | 870 | pks_params['packages'] = packages 871 | 872 | template_params['package_sets'].append(pks_params) 873 | 874 | # Save version info into template 875 | template_params['release'] = self.get_version_info() 876 | 877 | try: 878 | with open(opx_install_file, 'w') as fd_: 879 | fd_.write(template.render(template_params)) 880 | 881 | os.chmod(opx_install_file, stat.S_IRWXU | stat.S_IRWXG) 882 | 883 | except IOError, msg: 884 | print("WARNING: Can't write '%s' : %s, in %s" 885 | % (opx_install_file, msg, os.getcwd())) 886 | 887 | def filter_packages(self): 888 | """ 889 | filter_packages() check the list of packages against the installed 890 | packages in the rootfs, remove any that are already present 891 | """ 892 | print("filter_packages(self)") 893 | 894 | rootfs_package_list = self._root_obj.installed_packages() 895 | 896 | for pks in self._blueprint.package_sets: 897 | for pkg_list in pks.package_lists: 898 | # Filter out packages that are already in rootfs 899 | pkg_list.packages = [package for package in pkg_list.packages 900 | if pkg_list.no_package_filter 901 | or package.name not in rootfs_package_list] 902 | 903 | def update_rootfs(self): 904 | """ 905 | update_rootfs() updates the package list from the upstream 906 | and upgrades the packages to the latest available versions, 907 | in order to address upgrades/security patches issued since 908 | the rootfs was originally created. 909 | """ 910 | print("update_rootfs(self)") 911 | 912 | script_nm = os.path.join(os.path.dirname(__file__), 913 | 'templates', 'do_apt_upgrade_sh') 914 | self._root_obj.do_chroot(script_nm) 915 | 916 | 917 | def add_packages(self): 918 | """ 919 | add_packages() add packages selected to the package cache 920 | iterates through the sets of packages 921 | """ 922 | print("add_packages(self)") 923 | 924 | for pks in self._blueprint.package_sets: 925 | deb_package_list = [] 926 | for pkg_list in pks.package_lists: 927 | for package in pkg_list.packages: 928 | deb_package_list.extend(package.toDebian()) 929 | 930 | if verbosity > 1: 931 | print('Load %s of %s' % (pks.name, pks.kind)) 932 | 933 | for package_source in pks.package_sources: 934 | print('from %s [%s,%s] %s' % (package_source.url, 935 | package_source.distribution, 936 | package_source.component, 937 | '(default solver)' 938 | if pks.default_solver else '')) 939 | 940 | print('Loading') 941 | print(deb_package_list) 942 | 943 | # fetch the packages from this package set 944 | with opx_get_packages.OpxPackages( 945 | sysroot=self._root_obj.rootpath(), 946 | pkg_sources=pks.package_sources, 947 | default_solver=pks.default_solver) \ 948 | as packer: 949 | packer.fetch(names=deb_package_list) 950 | 951 | # list all packages that have been fetched 952 | if verbosity > 2: 953 | for mfn in self._root_obj.listdir( 954 | os.path.join("var", "cache", "apt", "archives")): 955 | print(mfn) 956 | 957 | def verify_packages(self): 958 | """ 959 | verify_packages() checks that all the packages listed in the 960 | blueprint are present in the rootfs 961 | """ 962 | print("verify_packages(self)") 963 | 964 | # Populate the package list with the packages from the blueprint 965 | deb_package_list = set() 966 | for pks in self._blueprint.package_sets: 967 | for pkg_list in pks.package_lists: 968 | for package in pkg_list.packages: 969 | deb_package_list.add(package.name) 970 | 971 | if verbosity > 2: 972 | print("Expected package list: %s" % sorted(list(deb_package_list))) 973 | 974 | # Get the list of packages in the rootfs 975 | # This assumes that the packages are available in 976 | # /var/cache/apt/archives/ 977 | packages_path = self._root_obj.rootpath('var', 'cache', 'apt', 978 | 'archives') 979 | rootfs_package_list = set(os.path.basename(pkg).split('_')[0] for pkg in 980 | glob.glob(os.path.join(packages_path, '*.deb'))) 981 | 982 | if verbosity > 2: 983 | print("Downloaded package list: %s" 984 | % sorted(list(rootfs_package_list))) 985 | 986 | # Check if the rootfs_package_list is a superset of deb_package_list 987 | if not rootfs_package_list.issuperset(deb_package_list): 988 | print('Missing packages %s' % 989 | list(deb_package_list.difference(rootfs_package_list))) 990 | raise ValueError('Could not find all packages') 991 | 992 | def install_packages(self): 993 | """ 994 | install_packages() -- install packages from package 995 | cache created above 996 | """ 997 | print("install_packages(self)") 998 | if verbosity > 1: 999 | print("Create the script") 1000 | 1001 | script_nm = os.path.join(os.path.dirname(__file__), 1002 | 'templates', 1003 | 'do_dpkg_sh') 1004 | 1005 | # We don't install packages if we are creating 1006 | # the ONIE installer with package cache payload 1007 | if not self._blueprint.output_format['ONIE_pkg']: 1008 | self._root_obj.do_chroot(script_nm) 1009 | version_info = self.determine_version_info() 1010 | self.write_etc_version_file(version_info) 1011 | self.set_installer_version_env(version_info) 1012 | 1013 | def add_artifact(self, pathname): 1014 | ''' 1015 | Record artifact for Artifactory build-info metadata 1016 | ''' 1017 | with open(pathname, 'rb') as f: 1018 | # Compute hashes 1019 | h_md5 = hashlib.md5() 1020 | h_sha1 = hashlib.sha1() 1021 | buf = f.read(8192) 1022 | while len(buf) > 0: 1023 | h_md5.update(buf) 1024 | h_sha1.update(buf) 1025 | buf = f.read(8192) 1026 | 1027 | artifact = { 1028 | 'name': os.path.basename(pathname), 1029 | 'md5': h_md5.hexdigest(), 1030 | 'sha1': h_sha1.hexdigest(), 1031 | } 1032 | 1033 | if pathname.endswith('.bin'): 1034 | artifact['type'] = 'bin' 1035 | elif pathname.endswith(('.tgz', '.tar.gz')): 1036 | artifact['type'] = 'tgz' 1037 | 1038 | self.artifacts.append(artifact) 1039 | 1040 | def add_dependency(self, pathname): 1041 | ''' 1042 | Record dependency for Artifactory build-info metadata 1043 | ''' 1044 | 1045 | h_md5 = self._root_obj.compute_md5(pathname) 1046 | h_sha1 = self._root_obj.compute_sha1(pathname) 1047 | 1048 | dependency = { 1049 | 'id': os.path.basename(pathname), 1050 | 'md5': h_md5.hexdigest(), 1051 | 'sha1': h_sha1.hexdigest(), 1052 | } 1053 | 1054 | if pathname.endswith('.deb'): 1055 | dependency['type'] = 'deb' 1056 | elif pathname.endswith(('.tgz', '.tar.gz')): 1057 | dependency['type'] = 'tgz' 1058 | 1059 | self.dependencies.append(dependency) 1060 | 1061 | def copy_inst_hooks(self, dist): 1062 | """ 1063 | copy_inst_hooks() -- copy the postinst hooks from 1064 | the blueprint folder to the rootfs 1065 | """ 1066 | print("copy_inst_hooks(self)") 1067 | 1068 | destpath = self._root_obj.rootpath('root', 'hooks') 1069 | 1070 | # Create the directory 1071 | try: 1072 | os.makedirs(destpath) 1073 | except OSError as exc: 1074 | if exc.errno == errno.EEXIST and os.path.isdir(destpath): 1075 | pass 1076 | else: 1077 | raise 1078 | 1079 | for hook in self._blueprint.inst_hooks: 1080 | shutil.copy(hook.hook_file_path, destpath) 1081 | 1082 | # Change distribution in apt inst-hook 1083 | # TODO: Do this in python, not sed 1084 | if hook.hook_file == '98-set-apt-sources.postinst.sh': 1085 | new_hook = "{}/{}".format(destpath, hook.hook_file) 1086 | subprocess.check_call([ 1087 | "/bin/sed", 1088 | "-i", 1089 | "s#openswitch.net/jessie unstable#openswitch.net/jessie {}#g".format(dist), 1090 | new_hook, 1091 | ]) 1092 | if hook.hook_file == '98-set-apt-sources.stretch.postinst.sh': 1093 | new_hook = "{}/{}".format(destpath, hook.hook_file) 1094 | subprocess.check_call([ 1095 | "/bin/sed", 1096 | "-i", 1097 | "s#openswitch.net/stretch unstable#openswitch.net/stretch {}#g".format(dist), 1098 | new_hook, 1099 | ]) 1100 | 1101 | 1102 | def make_output(self): 1103 | """ 1104 | make_output() -- create requested output 1105 | depends on the the plan, tests its request 1106 | as True or False for package cache archive 1107 | ONIE installer, or tar gzip archive of rootfs 1108 | """ 1109 | 1110 | print("make_output(self)") 1111 | 1112 | nm_prefix = '{}{}-{}{}{}'.format( 1113 | 'PKGS_' if self._blueprint.output_format['ONIE_pkg'] else '', 1114 | self._blueprint.output_format['name'], 1115 | self._blueprint.output_format['version'], 1116 | '.{}'.format(build_num) if build_num != 0 else '', 1117 | build_suffix 1118 | ) 1119 | 1120 | nm_suffix = self._blueprint.installer_suffix 1121 | 1122 | pkgcache_path = "%s-%s-pkg_cache.tgz" % (nm_prefix, nm_suffix) 1123 | 1124 | rootfs_path = "%s-%s-rootfs.tgz" % (nm_prefix, nm_suffix) 1125 | 1126 | image_path = "%s-installer-%s.bin" % (nm_prefix, nm_suffix) 1127 | 1128 | # need to make a list of dir entries that endwith .deb 1129 | path = os.path.join('var', 'cache', 'apt', 'archives') 1130 | flist = [fnm for fnm in self._root_obj.listdir(path) 1131 | if fnm.endswith('.deb')] 1132 | if verbosity > 2: 1133 | print(flist) 1134 | 1135 | if flist: 1136 | for f in flist: 1137 | self.add_dependency(os.path.join(path, f)) 1138 | 1139 | if self._blueprint.output_format['package_cache']: 1140 | if verbosity > 1: 1141 | print("INFO: creating %s\n" % pkgcache_path) 1142 | sys.stdout.flush() 1143 | 1144 | try: 1145 | self._root_obj.tar_out(pkgcache_path, 1146 | directory=path, files=flist) 1147 | except opx_rootfs.OpxrootfsError as ex: 1148 | print("ERROR: package cache creation failed: %s" % (ex)) 1149 | raise 1150 | 1151 | self.add_artifact(pkgcache_path) 1152 | 1153 | # clean out the debian package cache before we 1154 | # build the other (possibly) requested items 1155 | if not self._blueprint.output_format['ONIE_pkg']: 1156 | if verbosity > 1: 1157 | print("INFO: removing files from the package cache") 1158 | sys.stdout.flush() 1159 | 1160 | for debfn in flist: 1161 | try: 1162 | self._root_obj.remove(os.path.join(path, debfn)) 1163 | except opx_rootfs.OpxrootfsError as ex: 1164 | print("WARNING: for Opxrootfs.remove(%s), ignoring %s." 1165 | % (path, ex)) 1166 | else: 1167 | rootpath = self._root_obj.rootpath(path) 1168 | opx_bld_basics.gen_package_list(rootpath) 1169 | 1170 | # add my ONIE installer package repository to sources.list 1171 | # over-writes the existing one, with the expectation that 1172 | # the installer can put the save version back in place 1173 | fnm = self._root_obj.rootpath('etc', 'apt', 'sources.list.d', 1174 | 'installer.list') 1175 | try: 1176 | with open(fnm, 'w+') as fd_: 1177 | # Marker so it can be easily removed 1178 | fd_.write('#-ONIE REMOVE START\n') 1179 | fd_.write('# Added for special installer use\n') 1180 | fd_.write('deb file:/var/cache/apt/archives ./\n') 1181 | except IOError as ex: 1182 | print(ex) 1183 | raise 1184 | 1185 | # Write the installer file 1186 | self.write_installer_file() 1187 | 1188 | # Clean out apt state 1189 | # -- rootfs should not not reference our package sources. 1190 | for path in [ 1191 | os.path.join('etc', 'apt', 'sources.list'), 1192 | os.path.join('etc', 'apt', 'sources.list.save'), 1193 | os.path.join('var', 'cache', 'apt', 'pkgcache.bin'), 1194 | ]: 1195 | if self._root_obj.exists(path): 1196 | if verbosity > 1: 1197 | print("INFO: removing %s" % path) 1198 | sys.stdout.flush() 1199 | 1200 | try: 1201 | self._root_obj.remove(path) 1202 | except opx_rootfs.OpxrootfsError as ex: 1203 | print("WARNING: for Opxrootfs.remove(%s), ignoring %s." 1204 | % (path, ex)) 1205 | 1206 | # Remove any artifacts left in the rootfs image /tmp directory 1207 | for fnm in self._root_obj.listdir('/tmp'): 1208 | path = os.path.join('/tmp', fnm) 1209 | 1210 | if verbosity > 1: 1211 | print("INFO: removing %s" % path) 1212 | sys.stdout.flush() 1213 | 1214 | if self._root_obj.isdir(path): 1215 | try: 1216 | self._root_obj.rmtree(path) 1217 | except opx_rootfs.OpxrootfsError as ex: 1218 | print("WARNING: for Opxrootfs.rmtree(%s), ignoring %s." 1219 | % (path, ex)) 1220 | else: 1221 | try: 1222 | self._root_obj.remove(path) 1223 | except opx_rootfs.OpxrootfsError as ex: 1224 | print("WARNING: for Opxrootfs.remove(%s), ignoring %s." 1225 | % (path, ex)) 1226 | 1227 | # these output formats use a rootfs tar gzipped archive 1228 | # so build it here, package cache just pulls its contents 1229 | # out, but doesn't need to archive the root image 1230 | if (self._blueprint.output_format['ONIE_image'] 1231 | or self._blueprint.output_format['ONIE_pkg'] 1232 | or self._blueprint.output_format['tar_archive']): 1233 | if verbosity > 1: 1234 | print("INFO: creating %s\n" % (rootfs_path)) 1235 | sys.stdout.flush() 1236 | 1237 | try: 1238 | self._root_obj.tar_out(rootfs_path) 1239 | except opx_rootfs.OpxrootfsError as ex: 1240 | # Try to address the 'too many levels of symbolic links' 1241 | # errors, which seem to happen randomly. 1242 | print("WARNING: First attempt to create tar file failed: %s" 1243 | % (ex)) 1244 | try: 1245 | self._root_obj.tar_out(rootfs_path) 1246 | except opx_rootfs.OpxrootfsError as ex: 1247 | print("ERROR: Second attempt to create tar file failed: %s" 1248 | % (ex)) 1249 | raise 1250 | 1251 | # to create the ONIE image, we use the current sysroot 1252 | # archive to create the ONIE installer, create what 1253 | # would be the output of the open-source-rootfs build 1254 | # and use ngos.sh to build the ONIE installer image 1255 | if (self._blueprint.output_format['ONIE_image'] 1256 | or self._blueprint.output_format['ONIE_pkg']): 1257 | if verbosity > 1: 1258 | print("creating %s\n" % (image_path)) 1259 | sys.stdout.flush() 1260 | 1261 | cmd = ['opx-onie-installer/onie/onie-mk-opx.sh', 1262 | self._blueprint.architecture, 1263 | image_path, 1264 | rootfs_path 1265 | ] 1266 | if verbosity > 2: 1267 | print(cmd) 1268 | 1269 | try: 1270 | subprocess.check_call(cmd) 1271 | except subprocess.CalledProcessError as ex: 1272 | print("ERROR: image creation failed: %s" % (ex)) 1273 | raise 1274 | 1275 | self.add_artifact(image_path) 1276 | 1277 | if self._blueprint.output_format['tar_archive']: 1278 | self.add_artifact(rootfs_path) 1279 | else: 1280 | # If blueprint was not set to generate rootfs tarball, 1281 | # should we remove it? 1282 | pass 1283 | 1284 | 1285 | def index_local_packages(dist): 1286 | """Run the idx-pkgs script from opx-build/scripts with the correct dist.""" 1287 | cmd = '{}/idx-pkgs'.format(os.path.dirname(__file__)) 1288 | try: 1289 | subprocess.check_call([cmd, dist]) 1290 | except subprocess.CalledProcessError as ex: 1291 | print("ERROR: indexing local packages failed: %s" % (ex)) 1292 | raise 1293 | 1294 | 1295 | 1296 | def main(): 1297 | """ 1298 | command line method to assemble a release from packages 1299 | """ 1300 | 1301 | start_timestamp = datetime.datetime.now() 1302 | 1303 | parser = argparse.ArgumentParser() 1304 | parser.add_argument('--debug', 1305 | help=argparse.SUPPRESS, 1306 | action='store_true') 1307 | parser.add_argument('-b', help="specify location of release blue-print", 1308 | required=True) 1309 | parser.add_argument('-n', help="specify build number of release", 1310 | type=int, default=9999) 1311 | parser.add_argument('-s', help="specify release number suffix", 1312 | type=str, default="") 1313 | parser.add_argument('-v', help="specify verbosity level", 1314 | type=int, default=0) 1315 | parser.add_argument('--build-info', 1316 | help="specify location of build-info json output") 1317 | parser.add_argument('--build-url') 1318 | parser.add_argument('--vcs-url') 1319 | parser.add_argument('--vcs-revision') 1320 | parser.add_argument( 1321 | '-d', '--dist', 1322 | help="Distribution to build", 1323 | default='unstable' 1324 | ) 1325 | 1326 | 1327 | args = parser.parse_args() 1328 | 1329 | if args.debug: 1330 | loglevel = logging.DEBUG 1331 | else: 1332 | loglevel = logging.WARNING 1333 | 1334 | logging.basicConfig(level=loglevel) 1335 | 1336 | # set the verboseness of the instance based on 1337 | # either the default or command line input 1338 | # could also give meaning to numeric levels here ... 1339 | global verbosity 1340 | verbosity = args.v 1341 | 1342 | global build_num, build_suffix 1343 | build_num = args.n 1344 | build_suffix = args.s if args.s == "" else ("-" + args.s) 1345 | 1346 | with open(args.b, 'r') as fd_: 1347 | rel_blueprint = OpxRelBlueprint.load_xml(fd_, args.dist) 1348 | 1349 | if verbosity > 0: 1350 | print(rel_blueprint) 1351 | 1352 | rel_plan = OpxRelPackageAssembler(rel_blueprint) 1353 | rel_plan.update_rootfs() 1354 | rel_plan.filter_packages() 1355 | index_local_packages(args.dist) 1356 | rel_plan.add_packages() 1357 | rel_plan.verify_packages() 1358 | rel_plan.install_packages() 1359 | rel_plan.copy_inst_hooks(args.dist) 1360 | rel_plan.make_output() 1361 | 1362 | end_timestamp = datetime.datetime.now() 1363 | duration = end_timestamp - start_timestamp 1364 | 1365 | if args.build_info: 1366 | build_name = 'OPX' 1367 | build_number = '{}{}'.format( 1368 | rel_blueprint.output_format['version'], 1369 | '.{}'.format(build_num) if build_num != 0 else '', 1370 | ) 1371 | 1372 | build_info = { 1373 | "version": '1.0.1', 1374 | "name": build_name, 1375 | "number": build_number, 1376 | "suffix": build_suffix, 1377 | "type": 'GENERIC', 1378 | "started": art8601_format(start_timestamp), 1379 | "durationMillis": int(duration.total_seconds() * 1000), 1380 | 'modules': [ 1381 | { 1382 | 'id': os.path.basename(args.b), 1383 | 'artifacts': rel_plan.artifacts, 1384 | 'dependencies': rel_plan.dependencies, 1385 | }, 1386 | ], 1387 | 'properties': { 1388 | "buildInfo.env." + key: val for key, val in os.environ.items() 1389 | }, 1390 | } 1391 | 1392 | if args.build_url is not None: 1393 | build_info['url'] = args.build_url 1394 | if args.vcs_url is not None: 1395 | build_info['vcsUrl'] = args.vcs_url 1396 | if args.vcs_revision is not None: 1397 | build_info['vcsRevision'] = args.vcs_revision 1398 | 1399 | with open(args.build_info, 'w') as f: 1400 | json.dump(build_info, f, indent=4) 1401 | 1402 | return 0 1403 | 1404 | 1405 | if __name__ == "__main__": 1406 | sys.exit(main()) 1407 | 1408 | # Local Variables: 1409 | # tab-width:4 1410 | # indent-tabs-mode:nil 1411 | # End: 1412 | # vim: set expandtab tabstop=4 shiftwidth=4 softtabstop=4 : 1413 | -------------------------------------------------------------------------------- /scripts/opx_rootfs.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python 2 | 3 | """Create/update root file system image 4 | 5 | A utility class used to create and update a root file system image, 6 | using a previously created tar archive as a base. 7 | 8 | Ideally we would create the root file system image from scratch. But 9 | it's created using debootstrap in a docker container, which we can't 10 | (reasonably) do here. It's worth investigating whether we can get the 11 | same results using a different method. 12 | 13 | See the open-source-rootfs repository for details. 14 | """ 15 | 16 | from __future__ import print_function 17 | import hashlib 18 | import sys 19 | import os 20 | import stat 21 | import shutil 22 | import subprocess 23 | import requests 24 | import requests_file 25 | import tempfile 26 | 27 | verbosity = 1 28 | 29 | FAKECHROOT = 'fakechroot' 30 | FAKEROOT = 'fakeroot' 31 | 32 | class TemporaryDirectory(object): 33 | """ 34 | Context Manager for managing lifetime of a temporary directory 35 | 36 | This was inspired by Python 3's tempfile.TemporaryDirectory 37 | class. 38 | """ 39 | def __init__(self, suffix="", prefix="tmp", dir=None): 40 | self.closed = False 41 | self.name = tempfile.mkdtemp(suffix, prefix, dir) 42 | 43 | def __enter__(self): 44 | return self.name 45 | 46 | def __exit__(self, *args): 47 | self.cleanup() 48 | 49 | def __del__(self): 50 | self.cleanup() 51 | 52 | def cleanup(self): 53 | if not self.closed: 54 | try: 55 | shutil.rmtree(self.name) 56 | except: 57 | pass 58 | self.closed = True 59 | 60 | class OpxrootfsError(Exception): 61 | pass 62 | 63 | class Opxrootfs(object): 64 | """ 65 | OPX root file system class 66 | 67 | Allows creation of a rootfs image from a tar archive, which can 68 | then be manipulated with various methods inspired by libguestfs. 69 | """ 70 | @staticmethod 71 | def _my_mkdir(path): 72 | """ 73 | Local make dir that takes into account the fact that 74 | it may already exist, or be a regular file 75 | elevated verbosity puts informational messages on stderr, 76 | present, but not a directory -- raise an error 77 | """ 78 | if not os.path.isdir(path): 79 | if os.path.exists(path): 80 | if verbosity > 0: 81 | print(path + " exists -- aborting", file=sys.stderr) 82 | raise 83 | try: 84 | os.makedirs(path) 85 | except OSError as exception: 86 | if exception.errno != os.errno.EEXIST: 87 | raise 88 | elif verbosity > 0: 89 | print(path + " already exists", file=sys.stderr) 90 | 91 | def __init__(self, rootfs_path, rootfs_url, rootfs_sha1=None, rootfs_md5=None): 92 | """ 93 | Initialize the rootfs instance 94 | Creates a root file system in the specified directory, 95 | with the appropriate properties for the work at hand 96 | 97 | :param:`rootfs_path` 98 | location of rootfs 99 | :param:`rootfs_url` 100 | url to initial rootfs location 101 | :param:`rootfs_sha1` 102 | SHA1 digest of rootfs tarball 103 | :param:`rootfs_md5` 104 | MD5 digest of rootfs tarball 105 | """ 106 | 107 | # Create temporary file for fakeroot state 108 | self._fakeroot_state = tempfile.NamedTemporaryFile() 109 | 110 | # Create temporary directory for rootfs 111 | # if rootfs_path is None, use a temporary directory; 112 | # otherwise use the supplied path. 113 | if rootfs_path is None: 114 | self._rootfs_tmpdir = TemporaryDirectory() 115 | self._rootpath = self._rootfs_tmpdir.name 116 | else: 117 | self._rootpath = rootfs_path 118 | shutil.rmtree(self._rootpath, ignore_errors=True) 119 | self._my_mkdir(self._rootpath) 120 | 121 | # Use temporary file to hold incoming rootfs tar 122 | with tempfile.NamedTemporaryFile() as fd_: 123 | # request the specified archive 124 | print("fetching %s ..." % rootfs_url) 125 | 126 | s = requests.Session() 127 | s.mount('file://', requests_file.FileAdapter()) 128 | 129 | resp = s.get(rootfs_url, stream=True) 130 | if not resp.status_code == requests.codes.ok: 131 | print(".remote fetch failed for %s : %d." 132 | % (rootfs_url, resp.status_code), 133 | file=sys.stderr) 134 | print(resp.headers['status'], file=sys.stderr) 135 | resp.raise_for_status() 136 | 137 | chunk_size = 4096 138 | md5 = hashlib.md5() 139 | sha1 = hashlib.sha1() 140 | for chunk in resp.iter_content(chunk_size): 141 | md5.update(chunk) 142 | sha1.update(chunk) 143 | fd_.write(chunk) 144 | fd_.flush() 145 | 146 | # Validate MD5 digest 147 | if rootfs_md5: 148 | if rootfs_md5 != md5.hexdigest(): 149 | raise OpxrootfsError("MD5 validation failed: got %s, expected %s" 150 | % (md5.hexdigest(), rootfs_md5)) 151 | 152 | # Validate SHA1 digest 153 | if rootfs_sha1: 154 | if rootfs_sha1 != sha1.hexdigest(): 155 | raise OpxrootfsError("SHA1 validation failed: got %s, expected %s" 156 | % (sha1.hexdigest(), rootfs_sha1)) 157 | 158 | # load the initial file system 159 | self.tar_in(fd_.name) 160 | 161 | def rootpath(self, *args): 162 | """ 163 | Return host path to the rootfs path :param:`path`. 164 | """ 165 | 166 | path = self._rootpath 167 | for x in args: 168 | if not path.endswith('/') and not x.startswith('/'): 169 | path += '/' 170 | path += x 171 | 172 | return path 173 | 174 | def exists(self, path): 175 | """ 176 | Returns true if :param:`path` exists 177 | 178 | .. note:: 179 | 180 | since this only determines whether the file exists, we should not 181 | have to do this under fakeroot. 182 | """ 183 | return os.path.exists(self.rootpath(path)) 184 | 185 | def isfile(self, path): 186 | """ 187 | Returns true if :param:`path` is a regular file. 188 | 189 | .. note:: 190 | 191 | since this only determines the file type, we should not have 192 | to do this under fakeroot. 193 | """ 194 | return os.path.isfile(self.rootpath(path)) 195 | 196 | def isdir(self, path): 197 | """ 198 | Returns true if :param:`path` is a directory. 199 | 200 | .. note:: 201 | 202 | since this only determines the file type, we should not have 203 | to do this under fakeroot. 204 | """ 205 | return os.path.isdir(self.rootpath(path)) 206 | 207 | def listdir(self, path): 208 | """ 209 | Returns a list of the names of the directory entries in the 210 | directory given by path. 211 | 212 | .. note:: 213 | since this only returns names, we should not have to do 214 | this under fakeroot. 215 | """ 216 | return os.listdir(self.rootpath(path)) 217 | 218 | def compute_md5(self, path): 219 | """ 220 | Returns hashlib.md5 object of file given path path. 221 | 222 | 223 | .. note:: 224 | since this only accesses file contents, we should not 225 | have to do this under fakeroot. 226 | """ 227 | with open(self.rootpath(path), 'rb') as f: 228 | md5 = hashlib.md5() 229 | buf = f.read(8192) 230 | while len(buf) > 0: 231 | md5.update(buf) 232 | buf = f.read(8192) 233 | 234 | return md5 235 | 236 | def compute_sha1(self, path): 237 | """ 238 | Returns hashlib.sha1 object of file given path path. 239 | 240 | 241 | .. note:: 242 | since this only accesses file contents, we should not 243 | have to do this under fakeroot. 244 | """ 245 | with open(self.rootpath(path), 'rb') as f: 246 | sha1 = hashlib.sha1() 247 | buf = f.read(8192) 248 | while len(buf) > 0: 249 | sha1.update(buf) 250 | buf = f.read(8192) 251 | 252 | return sha1 253 | 254 | def remove(self, path): 255 | """ 256 | Removes file or directory :param:`path`. 257 | 258 | .. note:: 259 | Run under fakeroot to keep database coherent. 260 | """ 261 | cmd = [FAKEROOT, 262 | '-i', self._fakeroot_state.name, 263 | '-s', self._fakeroot_state.name, 264 | 'rm', '-f', self.rootpath(path) 265 | ] 266 | 267 | try: 268 | subprocess.check_call(cmd) 269 | except subprocess.CalledProcessError as ex: 270 | if verbosity > 0: 271 | print(ex) 272 | raise OpxrootfsError("Can't remove(%s)" % path) 273 | 274 | def rename(self, src, dst): 275 | """ 276 | Rename file or directory :param:`src` to :param:`dst`. 277 | 278 | .. note:: 279 | Run under fakeroot to keep database coherent. 280 | """ 281 | cmd = [FAKEROOT, 282 | '-i', self._fakeroot_state.name, 283 | '-s', self._fakeroot_state.name, 284 | 'mv', '-f', self.rootpath(src), self.rootpath(dst) 285 | ] 286 | 287 | try: 288 | subprocess.check_call(cmd) 289 | except subprocess.CalledProcessError as ex: 290 | if verbosity > 0: 291 | print(ex) 292 | raise OpxrootfsError("Can't rename(%s,%s)" % (src, dst)) 293 | 294 | def rmtree(self, path): 295 | """ 296 | Recursively directory :param:`path` and its contents. 297 | 298 | .. note:: 299 | Run under fakeroot to keep database coherent. 300 | """ 301 | cmd = [FAKEROOT, 302 | '-i', self._fakeroot_state.name, 303 | '-s', self._fakeroot_state.name, 304 | 'rm', '-rf', self.rootpath(path) 305 | ] 306 | 307 | try: 308 | subprocess.check_call(cmd) 309 | except subprocess.CalledProcessError as ex: 310 | if verbosity > 0: 311 | print(ex) 312 | raise OpxrootfsError("Can't rmtree(%s)" % path) 313 | 314 | def do_chroot(self, op_path): 315 | """ 316 | Execute file specified under fakechroot in this 317 | rootfs instance, ... 318 | """ 319 | print("do_chroot(self, %s)" % op_path) 320 | 321 | # do we need to verify the path, as in a file 322 | # referenced by the path 323 | # copy the file, and insure execute permission 324 | _target = os.path.join(self._rootpath, os.path.basename(op_path)) 325 | shutil.copyfile(op_path, _target) 326 | os.chmod(_target, (stat.S_IXUSR | stat.S_IRUSR 327 | | stat.S_IXGRP | stat.S_IRGRP 328 | | stat.S_IXOTH | stat.S_IROTH)) 329 | 330 | # build up the fakeroot/fakechroot wrapper for the command 331 | # assumes the command is in the root directory, and 332 | # thus executes it there. 333 | cmd = [FAKECHROOT] 334 | cmd += [FAKEROOT, 335 | '-i', self._fakeroot_state.name, 336 | '-s', self._fakeroot_state.name] 337 | cmd += ['/usr/sbin/chroot', self._rootpath] 338 | cmd += [os.path.sep + os.path.basename(op_path)] 339 | 340 | if verbosity > 0: 341 | print("do_chroot") 342 | print(cmd) 343 | 344 | # execute the command in the fakechroot environment 345 | try: 346 | subprocess.check_call(cmd) 347 | except subprocess.CalledProcessError as ex: 348 | if verbosity > 0: 349 | print(ex) 350 | raise OpxrootfsError("Can't run script") 351 | 352 | # collect status for return/display 353 | # need to remove the command executed from the sysroot 354 | os.remove(os.path.join(self._rootpath, os.path.basename(op_path))) 355 | 356 | def installed_packages(self): 357 | """ 358 | Returns a list of installed packages within the rootfs 359 | """ 360 | print("installed_packages(self)") 361 | 362 | # Build up the fakeroot/fakechroot wrapper for the apt list command 363 | cmd = [FAKECHROOT, FAKEROOT, '/usr/sbin/chroot'] 364 | cmd += [self._rootpath, 'apt', 'list', '--installed'] 365 | 366 | if verbosity > 0: 367 | print("installed_packages") 368 | print(cmd) 369 | 370 | package_list = [] 371 | # execute the command in the fakechroot environment and get the output 372 | try: 373 | for pkg_full in subprocess.check_output(cmd).split('\n'): 374 | # An installed package has the form 375 | # util-linux/stable,now 2.25.2-6 amd64 [installed] 376 | # We only care about the package name (before the /) 377 | pkg = pkg_full.split('/')[0] 378 | package_list.append(pkg) 379 | except subprocess.CalledProcessError as ex: 380 | if verbosity > 0: 381 | print(ex) 382 | raise OpxrootfsError("Error running apt list") 383 | 384 | # Return the installed package list to the caller 385 | return package_list 386 | 387 | def tar_in(self, tarfile, directory='/', compress=True): 388 | """ 389 | Extract a compressed tar archive to the rootfs 390 | 391 | Extracts a tar archive from local file :param:`tarfile` into 392 | rootfs directory :param:`directory`. 393 | """ 394 | if verbosity > 0: 395 | print("tar_in(self, %s, %s)" % (tarfile, directory)) 396 | 397 | tar_cmd = ['tar', '-C', self.rootpath(directory), '-x', '-f', '-'] 398 | 399 | if compress: 400 | tar_cmd += ['-z'] 401 | 402 | if verbosity > 1: 403 | tar_cmd += ['-v'] 404 | 405 | tar_cmd += ['--numeric-owner', '--preserve-permissions'] 406 | 407 | with open(tarfile, 'r') as fd_: 408 | cmd = [FAKEROOT, 409 | '-i', self._fakeroot_state.name, 410 | '-s', self._fakeroot_state.name] 411 | cmd += tar_cmd 412 | 413 | if verbosity > 0: 414 | print("tar_in(%s)" % cmd) 415 | 416 | try: 417 | subprocess.check_call(cmd, stdin=fd_) 418 | except subprocess.CalledProcessError as ex: 419 | if verbosity > 0: 420 | print(ex) 421 | raise OpxrootfsError("Can't extract tarball") 422 | 423 | def tar_out(self, tarfile, directory='/', compress=True, files=['.']): 424 | """ 425 | Create a compressed tar archive from the rootfs 426 | 427 | Creates a tar archive with the contents of :param:`directory` 428 | and writes it to local file :param:`tarfile`. 429 | """ 430 | if verbosity > 0: 431 | print("tar_out(self, %s, %s)" % (tarfile, directory)) 432 | 433 | tar_cmd = ['tar', '-C', directory, '-c', '-f', '-'] 434 | 435 | if compress: 436 | tar_cmd += ['-z'] 437 | 438 | if verbosity > 1: 439 | tar_cmd += ['-v'] 440 | 441 | tar_cmd += files 442 | 443 | with open(tarfile, 'w') as fd_: 444 | cmd = [FAKECHROOT, '-e', 'none'] 445 | cmd += [FAKEROOT, 446 | '-i', self._fakeroot_state.name, 447 | '-s', self._fakeroot_state.name] 448 | cmd += ['/usr/sbin/chroot', self._rootpath] 449 | cmd += tar_cmd 450 | 451 | if verbosity > 0: 452 | print("tar_out(%s)" % cmd) 453 | 454 | try: 455 | subprocess.check_call(cmd, stdout=fd_) 456 | except subprocess.CalledProcessError as ex: 457 | if verbosity > 0: 458 | print(ex) 459 | raise OpxrootfsError("Can't create tarball") 460 | 461 | # Local Variables: 462 | # tab-width:4 463 | # indent-tabs-mode:nil 464 | # End: 465 | # vim: set expandtab tabstop=4 shiftwidth=4 softtabstop=4 : 466 | -------------------------------------------------------------------------------- /scripts/opx_run: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # available options 4 | export OPX_GIT_TAG="${OPX_GIT_TAG:-no}" 5 | 6 | # package distribution 7 | export OPX_RELEASE="${OPX_RELEASE:-unstable}" 8 | # currently tracked release 9 | export DIST="${DIST:-stretch}" 10 | export ARCH="${ARCH:-amd64}" 11 | 12 | export CUSTOM_SOURCES="${CUSTOM_SOURCES:-}" 13 | 14 | # docker image name 15 | IMAGE="opxhub/build" 16 | # docker image tag 17 | VERSION="${VERSION:-latest}" 18 | 19 | interactive="-i" 20 | if [ -t 1 ]; then 21 | # STDOUT is attached to TTY 22 | interactive="-it" 23 | fi 24 | 25 | read -d '' opx_docker_command <<- EOF 26 | docker run 27 | --rm 28 | --name ${USER}_$(basename $PWD)_$$ 29 | --privileged 30 | -e LOCAL_UID=$(id -u ${USER}) 31 | -e LOCAL_GID=$(id -g ${USER}) 32 | -v ${PWD}:/mnt 33 | -v $HOME/.gitconfig:/home/opx/.gitconfig 34 | -v /etc/localtime:/etc/localtime:ro 35 | -e ARCH 36 | -e DIST 37 | -e OPX_RELEASE 38 | -e OPX_GIT_TAG 39 | -e CUSTOM_SOURCES 40 | ${interactive} 41 | ${IMAGE}:${VERSION} 42 | EOF 43 | 44 | if [[ $# -gt 0 ]]; then 45 | # run command directly 46 | # not using bash because tar fails to complete 47 | # root cause unknown (see opx_rel_pkgasm.py:tar_in) 48 | $opx_docker_command sh -l -c "$*" 49 | else 50 | # launch interactive shell 51 | # using bash here because tar does not fail in an interactive shell 52 | $opx_docker_command bash -l 53 | fi 54 | -------------------------------------------------------------------------------- /scripts/templates/do_apt_upgrade_sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | # There is a bug in dpkg, fixed in later versions 4 | # (need to identify that version 1.15.6?) 5 | # for now remove 6 | mv /var/lib/dpkg/statoverride /var/lib/dpkg/stato 7 | touch /var/lib/dpkg/statoverride 8 | 9 | # specify non-interactive operation required 10 | export DEBIAN_FRONTEND=noninteractive 11 | 12 | # prevent any daemon installations from starting 13 | # those daemons 14 | cat > /usr/sbin/policy-rc.d << __EOF__ 15 | #! /bin/sh 16 | 17 | exit 101 18 | __EOF__ 19 | 20 | # Execution is key (Thanks JT) 21 | chmod a+x /usr/sbin/policy-rc.d 22 | 23 | apt-get update 24 | apt-get install -y curl dirmngr 25 | sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys AD5073F1 26 | apt-get upgrade -y 27 | 28 | rm -f /usr/sbin/policy-rc.d 29 | 30 | # restore the original statoverride file, removed 31 | # above to prevent error 32 | rm -f /var/lib/dpkg/statoverride 33 | mv /var/lib/dpkg/stato /var/lib/dpkg/statoverride 34 | 35 | exit 0 36 | -------------------------------------------------------------------------------- /scripts/templates/do_dpkg_sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | # There is a bug in dpkg, fixed in later versions 4 | # (need to identify that version 1.15.6?) 5 | # for now remove 6 | mv /var/lib/dpkg/statoverride /var/lib/dpkg/stato 7 | touch /var/lib/dpkg/statoverride 8 | 9 | # specify non-interactive operation required 10 | export DEBIAN_FRONTEND=noninteractive 11 | 12 | # prevent any daemon installations from starting 13 | # those daemons 14 | cat > /usr/sbin/policy-rc.d << __EOF__ 15 | #! /bin/sh 16 | 17 | exit 101 18 | __EOF__ 19 | 20 | # Execution is key (Thanks JT) 21 | chmod a+x /usr/sbin/policy-rc.d 22 | 23 | dpkg -R --install --force-all --pre-invoke='echo PRE: ${DPKG_HOOK_ACTION}' --post-invoke='echo POST: ${DPKG_HOOK_ACTION}' /var/cache/apt/archives 24 | 25 | rm -f /usr/sbin/policy-rc.d 26 | 27 | # restore the original statoverride file, removed 28 | # above to prevent error 29 | rm -f /var/lib/dpkg/statoverride 30 | mv /var/lib/dpkg/stato /var/lib/dpkg/statoverride 31 | 32 | exit 0 33 | -------------------------------------------------------------------------------- /scripts/templates/do_insrtpkgs_sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | # There is a bug in dpkg, fixed in later versions 4 | # (need to identify that version 1.15.6?) 5 | # for now remove 6 | mv /var/lib/dpkg/statoverride /var/lib/dpkg/stato 7 | touch /var/lib/dpkg/statoverride 8 | 9 | # specify non-interactive operation required 10 | export DEBIAN_FRONTEND=noninteractive 11 | 12 | # prevent any daemon installations from starting 13 | # those daemons 14 | cat > /usr/sbin/policy-rc.d << __EOF__ 15 | #! /bin/sh 16 | 17 | exit 101 18 | __EOF__ 19 | 20 | # Execution is key (Thanks JT) 21 | chmod a+x /usr/sbin/policy-rc.d 22 | 23 | 24 | dpkg -R --install --force-all --pre-invoke='echo PRE: ${DPKG_HOOK_ACTION}' --post-invoke='echo POST: ${DPKG_HOOK_ACTION}' /var/cache/apt/archives 25 | 26 | # delete policy-rc.d script created above 27 | rm -f /usr/sbin/policy-rc.d 28 | 29 | # restore the original statoverride file, removed 30 | # above to prevent error 31 | rm -f /var/lib/dpkg/statoverride 32 | mv /var/lib/dpkg/stato /var/lib/dpkg/statoverride 33 | 34 | exit 0 35 | -------------------------------------------------------------------------------- /scripts/templates/install_opx_sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | # Install OPX for the given platform 3 | # Usage: $0 [vm-flavor] 4 | # This must be run within a chroot in order to ensure that the package 5 | # installs are limited to the chroot 6 | 7 | OPX_PLATFORM="$1" 8 | OPX_VM_FLAVOR="$2" 9 | 10 | if [ ! -n "$OPX_PLATFORM" ] 11 | then 12 | echo "ERROR: Must specify platform for installation!" 13 | exit 1 14 | fi 15 | 16 | # # Save stdout and stderr file descriptors 17 | # exec 6>&1 18 | # exec 7>&2 19 | 20 | # # Redirect stdout and stderr to /root/install.log 21 | # exec 1>/root/install.log 22 | # exec 2>/root/install.log 23 | 24 | # # Restore stdout and stderr on exit 25 | # trap 'exec 1>&6 ; exec 2>&7' EXIT 26 | 27 | # Run hooks for installation 28 | HOOKSDIR=/root/hooks 29 | run_hooks() 30 | { 31 | local stage=$1 32 | shopt -s nullglob 33 | for hook in $HOOKSDIR/*.${stage}.sh 34 | do 35 | [ -x $hook ] && $hook 36 | done 37 | shopt -u nullglob 38 | } 39 | 40 | # specify non-interactive operation required 41 | export DEBIAN_FRONTEND=noninteractive 42 | 43 | # Prevent any daemon installations from starting those daemons 44 | cat > /usr/sbin/policy-rc.d << EOF 45 | #!/bin/sh 46 | 47 | exit 101 48 | EOF 49 | 50 | chmod +x /usr/sbin/policy-rc.d 51 | 52 | # Create the /proc and /sys directories 53 | # This will prevent any "chdir /proc failed" style messages 54 | mkdir -p /proc 55 | mkdir -p /sys 56 | 57 | # Run any pre install hooks 58 | run_hooks preinst 59 | 60 | # For every installed package in the system, run dpkg-reconfigure 61 | # dpkg -l -- list all packages in the system (typically installed) 62 | # awk -- Search for all lines beginning with ii (installed) 63 | # and print the package name 64 | # sed -- Strip off any architecture component 65 | # xargs -- call dpkg-reconfigure on each package individually 66 | dpkg -l | awk '/^ii/ { print $2 }' | sed 's/:.*$//' | xargs -n1 dpkg-reconfigure 67 | 68 | # Update the APT database 69 | apt-get update 70 | 71 | # Abort in case of any errors during installation 72 | set -e 73 | 74 | {% for pks in package_sets %} 75 | 76 | # {{ pks.name }} 77 | {%- if pks.platform %} 78 | # Platform {{ pks.platform }} 79 | if [ "$OPX_PLATFORM" == "{{ pks.platform }}" ] 80 | then 81 | {%- if pks.flavor %} 82 | # VM Flavor {{ pks.flavor }} 83 | if [ ! -n "$OPX_VM_FLAVOR" ] 84 | then 85 | echo "ERROR: Installing a VM platform, but VM flavor is not defined!" >&7 86 | exit 1 87 | fi 88 | if [ "$OPX_VM_FLAVOR" == "{{ pks.flavor }}" ] 89 | then 90 | {%- endif %} 91 | {%- endif %} 92 | 93 | apt-get install -y --force-yes {{ pks.packages | join(' ') }} 94 | 95 | {%- if pks.platform %} 96 | {%- if pks.flavor %} 97 | fi 98 | {%- endif %} 99 | fi 100 | {%- endif %} 101 | {%- endfor %} 102 | 103 | # Create OPX Version file 104 | cat > /etc/OPX-release-version << EOF 105 | OS_NAME="{{ release.name }}" 106 | OS_VERSION="{{ release.version }}" 107 | PLATFORM="$OPX_PLATFORM_DISPLAY_NAME" 108 | ARCHITECTURE="{{ release.architecture }}" 109 | INTERNAL_BUILD_ID="{{ release.bp_description }} {{ release.bp_version }}" 110 | BUILD_VERSION="{{release.version}}.{{release.build_num}}{{release.build_suffix}}" 111 | BUILD_DATE="{{ release.build_date }}" 112 | INSTALL_DATE="$(date -Isec)" 113 | EOF 114 | 115 | # Run any post install hooks 116 | run_hooks postinst 117 | rm -rf $HOOKSDIR 118 | 119 | # Finished installation, remove all deb files and installer remnants 120 | set +e 121 | 122 | rm -f /usr/sbin/policy-rc.d 123 | rm -f /etc/apt/sources.list.d/installer.list 124 | 125 | apt-get clean 126 | 127 | # Protect the install log 128 | chmod 400 /root/install.log 129 | 130 | rm -f $0 131 | exit 0 132 | 133 | --------------------------------------------------------------------------------