├── tests
├── acceptance
│ ├── requirements.txt
│ ├── fixtures
│ │ └── test-job.xml
│ └── test_jenkins.py
└── scale
│ ├── config.py
│ ├── test_quota.py
│ └── test_scale_utils.py
├── jenkins-agent-images
└── linux
│ └── dcos-jenkins-dind-agent
│ ├── .dockerignore
│ ├── wrapper.sh
│ ├── LICENSE
│ ├── Dockerfile.ubuntu
│ ├── scripts
│ └── build_images.sh
│ ├── Dockerfile.alpine
│ └── README.md
├── testing
├── security
│ ├── keytab-validator
│ │ ├── .gitignore
│ │ ├── src
│ │ │ ├── META-INF
│ │ │ │ └── MANIFEST.MF
│ │ │ └── Main.java
│ │ ├── known_bad.keytab
│ │ ├── known_good.keytab
│ │ ├── keytab-validator.jar
│ │ └── README.MD
│ ├── __init__.py
│ ├── kerberos.py
│ └── transport_encryption.py
├── README.md
├── sdk_dcos.py
├── testData
│ ├── test-job.xml
│ └── gen-job.xml
├── sdk_fault_domain.py
├── sdk_networks.py
├── sdk_quota.py
├── sdk_hosts.py
├── sdk_repository.py
├── sdk_jobs.py
├── sdk_utils.py
└── sdk_metrics.py
├── tools
├── pip
│ ├── .gitignore
│ ├── README.md
│ ├── build.sh
│ ├── cmd_wrapper
│ │ └── __init__.py
│ └── setup.py
├── Dockerfile
├── kdc
│ ├── run.sh
│ ├── Dockerfile
│ ├── kdc.json
│ ├── README.md
│ ├── kdc.py
│ └── kdc.conf
├── universe
│ ├── __init__.py
│ ├── resources
│ │ └── template
│ │ │ └── package.json
│ ├── test_package_builder.py
│ ├── s3_uploader.py
│ ├── test_package.py
│ ├── package.py
│ ├── test_package_manager.py
│ └── package_manager.py
├── vagrant
│ ├── vbox-network.sh
│ ├── metadata.json
│ └── Vagrantfile
├── create_service_account.sh
├── save_properties.py
├── ci
│ ├── launch_cluster.sh
│ └── test_runner.sh
├── build_framework.sh
├── create_testing_volumes.py
├── print_package_tag.py
├── build_package.sh
├── update_config_json.py
├── dcos_login.py
├── distribution
│ ├── init
│ └── UPDATING.md
├── airgap_linter.py
└── build_go_exe.sh
├── .dockerignore
├── conf
├── jenkins
│ ├── jenkins.model.JenkinsLocationConfiguration.xml
│ ├── nodeMonitors.xml
│ └── configuration.yaml
└── nginx
│ ├── nginx.conf.template
│ └── nginx.conf
├── .gitignore
├── universe
├── resource.json
├── package.json
└── marathon.json.mustache
├── LICENSE
├── scripts
├── dcos-write-known-hosts-file.sh
├── release.sh
├── dcos-framework-dns-name.sh
├── export-libssl.sh
├── plugin-list.py
├── dcos-quota.sh
├── init.groovy.d
│ └── mesos-auth.groovy
├── run.sh
└── test.sh
├── plugins.conf
├── README.md
├── Dockerfile
└── test-runner.sh
/tests/acceptance/requirements.txt:
--------------------------------------------------------------------------------
1 | shakedown
2 |
--------------------------------------------------------------------------------
/jenkins-agent-images/linux/dcos-jenkins-dind-agent/.dockerignore:
--------------------------------------------------------------------------------
1 | scripts/*
2 |
--------------------------------------------------------------------------------
/testing/security/keytab-validator/.gitignore:
--------------------------------------------------------------------------------
1 | Main.class
2 | .DS_Store
3 |
4 |
--------------------------------------------------------------------------------
/tests/scale/config.py:
--------------------------------------------------------------------------------
1 | PACKAGE_NAME = 'jenkins'
2 | SERVICE_NAME = PACKAGE_NAME
3 |
--------------------------------------------------------------------------------
/tools/pip/.gitignore:
--------------------------------------------------------------------------------
1 | *.egg-info/
2 | *.pyc
3 | *.whl
4 | dist/
5 | build/
6 | venv/
7 |
--------------------------------------------------------------------------------
/tools/Dockerfile:
--------------------------------------------------------------------------------
1 | # Dockerfile for the KDC image
2 |
3 | FROM nvaziri/kdc:dev_0
4 |
5 | CMD /run.sh
6 |
--------------------------------------------------------------------------------
/testing/security/keytab-validator/src/META-INF/MANIFEST.MF:
--------------------------------------------------------------------------------
1 | Manifest-Version: 1.0
2 | Main-Class: Main
3 |
4 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | .git
2 | .gitignore
3 | README.md
4 | demo/
5 | docs/
6 | scripts/release.sh
7 | scripts/test.sh
8 | tests/
9 |
--------------------------------------------------------------------------------
/testing/security/__init__.py:
--------------------------------------------------------------------------------
1 | from . import kerberos
2 | from . import transport_encryption
3 |
4 |
5 | __all__ = [kerberos, transport_encryption]
6 |
--------------------------------------------------------------------------------
/testing/security/keytab-validator/known_bad.keytab:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/d2iq-archive/dcos-jenkins-service/HEAD/testing/security/keytab-validator/known_bad.keytab
--------------------------------------------------------------------------------
/testing/security/keytab-validator/known_good.keytab:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/d2iq-archive/dcos-jenkins-service/HEAD/testing/security/keytab-validator/known_good.keytab
--------------------------------------------------------------------------------
/testing/security/keytab-validator/keytab-validator.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/d2iq-archive/dcos-jenkins-service/HEAD/testing/security/keytab-validator/keytab-validator.jar
--------------------------------------------------------------------------------
/tools/kdc/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -e
2 |
3 | echo
4 | echo KDC LOGS
5 | echo ========
6 | exec /usr/lib/heimdal-servers/kdc --config-file=/etc/heimdal-kdc/kdc.conf --ports=$PORT_KDC
7 |
--------------------------------------------------------------------------------
/conf/jenkins/jenkins.model.JenkinsLocationConfiguration.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | address not configured yet <nobody@nowhere>
4 |
5 |
6 |
--------------------------------------------------------------------------------
/tools/kdc/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM mesosphere/kdc:latest
2 |
3 | RUN mkdir /kdc
4 | COPY run.sh /kdc/run.sh
5 | COPY kdc.conf /etc/heimdal-kdc/kdc.conf
6 | RUN chown -R nobody:nogroup /kdc
7 | RUN chmod -R 744 /var/lib/heimdal-kdc/
8 | RUN chmod -R 744 /etc/heimdal-kdc/
9 | RUN chmod -R 744 /kdc
10 |
11 | CMD /kdc/run.sh
12 |
--------------------------------------------------------------------------------
/testing/README.md:
--------------------------------------------------------------------------------
1 | # Testing utils
2 |
3 | Common python code used for integration testing of services in this repository. By convention the libraries in this directory tend to start with `sdk_`, making them a little easier to identify in individual tests.
4 |
5 | Each service has its own tests which use these common utilities. For example, see the [../frameworks/helloworld/tests/](hello-world service tests).
6 |
--------------------------------------------------------------------------------
/tools/pip/README.md:
--------------------------------------------------------------------------------
1 | # .whl tooling
2 |
3 | Tooling to release the contents of `tools/` and `testing/` as `.whl` packages. These are uploaded in SDK releases from early 2018 onwards.
4 |
5 | To build, run `build.sh `. The output will include two `.whl` packages with the provided version, containing the respective content of `tools/` and `testing/`. See output from `build.sh` for example usage of the built artifacts.
6 |
--------------------------------------------------------------------------------
/jenkins-agent-images/linux/dcos-jenkins-dind-agent/wrapper.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | echo "==> Launching the Docker daemon..."
5 | dind dockerd --storage-driver=overlay2 --experimental $DOCKER_EXTRA_OPTS &
6 |
7 | while(! docker info > /dev/null 2>&1); do
8 | echo "==> Waiting for the Docker daemon to come online..."
9 | sleep 1
10 | done
11 | echo "==> Docker Daemon is up and running!"
12 |
13 | /bin/sh -c "$@"
14 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.hpi
2 | __pycache__/
3 | .pytest_cache/*
4 | .cache/
5 | target/
6 | tmp/
7 | .idea/
8 | tests/scale/*.pyc
9 |
10 | # leftovers from running tests/tools directly
11 | tools/env/
12 | tools/cluster-*.properties
13 | tools/*.pem
14 | tools/*.pyc
15 | tools/dcostests_env/
16 | tools/shakedown_env/
17 | tools/*/shakedown_env/
18 | stub-universe
19 |
20 | # vscode
21 | .vscode
22 |
23 | # vim
24 | *.swp
25 |
26 | # mac
27 | .DS_Store
28 |
--------------------------------------------------------------------------------
/tools/universe/__init__.py:
--------------------------------------------------------------------------------
1 | from .s3_uploader import S3Uploader
2 | from .package import Package
3 | from .package_builder import UniversePackageBuilder
4 | from .package_manager import PackageManager
5 | from .package_publisher import UniversePackagePublisher
6 |
7 | __all__ = [
8 | "S3Uploader",
9 | "Package",
10 | "PackageManager",
11 | "UniversePackageBuilder",
12 | "UniversePackagePublisher",
13 | "VersionResolver",
14 | ]
15 |
--------------------------------------------------------------------------------
/testing/security/keytab-validator/README.MD:
--------------------------------------------------------------------------------
1 | This is a simple keytab validator.
2 |
3 | The files known_good.keytab and known_bad.keytab can be used to test the validator if you need to make any changes.
4 |
5 | A copy of the validator is checked in at keytab-validator.jar in this directory. If you make changes, update this checked
6 | in file :).
7 |
8 | To build the validator:
9 | ```
10 | # From /keytab-validator:
11 | javac -d . src/Main.java && jar -cvfe keytab-validator.jar Main Main.class
12 | ```
13 |
--------------------------------------------------------------------------------
/tools/kdc/kdc.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": "/kdc",
3 | "instances": 1,
4 | "cpus": 1,
5 | "mem": 512,
6 | "user": "nobody",
7 | "container": {
8 | "type": "MESOS",
9 | "docker": {
10 | "image": "nvaziri/kdc:mesosphere",
11 | "forcePullImage": true
12 | }
13 | },
14 | "networks": [
15 | {
16 | "mode": "host"
17 | }
18 | ],
19 | "portDefinitions": [
20 | {
21 | "port": 2500,
22 | "name": "kdc"
23 | }
24 | ],
25 | "requirePorts": true
26 | }
27 |
--------------------------------------------------------------------------------
/universe/resource.json:
--------------------------------------------------------------------------------
1 | {
2 | "images": {
3 | "icon-small": "https://downloads.mesosphere.com/assets/universe/000/jenkins-icon-small.png",
4 | "icon-medium": "https://downloads.mesosphere.com/assets/universe/000/jenkins-icon-medium.png",
5 | "icon-large": "https://downloads.mesosphere.com/assets/universe/000/jenkins-icon-large.png"
6 | },
7 | "assets": {
8 | "container": {
9 | "docker": {
10 | "jenkins": "{{docker-image}}"
11 | }
12 | }
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright 2015 Mesosphere, Inc.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 |
--------------------------------------------------------------------------------
/scripts/dcos-write-known-hosts-file.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # This script writes out the SSH known hosts file which has the hosts sourced from
4 | # the SSH_KNOWN_HOSTS env-var which is in-turn specified via the service config settings.
5 |
6 | arg=$SSH_KNOWN_HOSTS
7 | lstrip_space=${arg#' '}
8 | rstrip_space=${lstrip_space%' '}
9 | SSH_KEYSCAN_ARGS=$rstrip_space
10 |
11 | SSH_KNOWN_HOSTS_DIR="$JENKINS_HOME/.ssh/"
12 | SSH_KNOWN_HOSTS_FILE="$JENKINS_HOME/.ssh/ssh_known_hosts"
13 |
14 | # Create the directory if it doesn't exist.
15 | mkdir -p $SSH_KNOWN_HOSTS_DIR
16 |
17 | ssh-keyscan $SSH_KEYSCAN_ARGS > $SSH_KNOWN_HOSTS_FILE
18 |
--------------------------------------------------------------------------------
/tools/vagrant/vbox-network.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -o errexit
4 | set -o nounset
5 | set -o pipefail
6 |
7 | if VBoxManage list hostonlyifs | grep 'IPAddress:.*192.168.65.1' -q; then
8 | # compatible VirtualBox network found
9 | exit 0
10 | fi
11 |
12 | echo ">>> Creating VirtualBox Network"
13 | network_name="$(VBoxManage hostonlyif create | grep "successfully created" | sed "s/Interface '\(.*\)' was successfully created/\1/")"
14 | echo "Network Created: '${network_name}'"
15 |
16 | echo ">>> Configuring network '${network_name}' to use 192.168.65.0\24"
17 | VBoxManage hostonlyif ipconfig --ip 192.168.65.1 "${network_name}"
18 |
--------------------------------------------------------------------------------
/tools/universe/resources/template/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "packagingVersion": "4.0",
3 | "upgradesFrom": ["{{upgrades-from}}"],
4 | "downgradesTo": ["{{downgrades-to}}"],
5 | "minDcosReleaseVersion": "1.9",
6 | "name": "template",
7 | "version": "{{package-version}}",
8 | "maintainer": "support@YOURNAMEHERE.COM",
9 | "description": "YOURNAMEHERE on DC/OS",
10 | "selected": false,
11 | "framework": true,
12 | "tags": ["template"],
13 | "postInstallNotes": "DC/OS YOURNAMEHERE is being installed!\n\n\tDocumentation: {{documentation-path}}\n\tIssues: {{issues-path}}",
14 | "postUninstallNotes": "DC/OS YOURNAMEHERE is being uninstalled."
15 | }
16 |
--------------------------------------------------------------------------------
/jenkins-agent-images/linux/dcos-jenkins-dind-agent/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright 2015-2016 Mesosphere, Inc.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 |
--------------------------------------------------------------------------------
/tools/vagrant/metadata.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "mesosphere/dcos-docker-sdk",
3 | "description": "DC/OS framework development environment, containing a cluster and dev tools",
4 | "versions": [
5 | {
6 | "version": "20161123.025803",
7 | "status": "active",
8 | "description_markdown": "initial release, needs size optimizations and auto-start",
9 | "providers": [
10 | {
11 | "name": "virtualbox",
12 | "url": "https://downloads.mesosphere.com/dcos-docker-sdk/dcos-docker-sdk-20161123.025803.box",
13 | "checksum_type": "sha1",
14 | "checksum": "16a6388b3b90c6509b0b8b2de181e2bc799e869c"
15 | }
16 | ]
17 | }
18 | ]
19 | }
20 |
--------------------------------------------------------------------------------
/scripts/release.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -o errexit -o nounset -o pipefail
3 |
4 | # vars
5 | readonly DOCKER_IMAGE="mesosphere/jenkins"
6 | readonly TAG=${GIT_BRANCH##*/}
7 | readonly TAR_FILE="velocity-${TAG}.tar"
8 |
9 | # docker pieces
10 | docker login --email="$DOCKER_HUB_EMAIL" --username="$DOCKER_HUB_USERNAME" --password="$DOCKER_HUB_PASSWORD"
11 | docker build -t $DOCKER_IMAGE:$TAG .
12 | docker push $DOCKER_IMAGE:$TAG
13 | docker save --output="${TAR_FILE}" $DOCKER_IMAGE:$TAG
14 |
15 | # check integrity
16 | tar tf $TAR_FILE > /dev/null
17 | # gzip with best compression
18 | gzip -9 $TAR_FILE
19 |
20 | # generate sigs
21 | openssl md5 $TAR_FILE.gz >> jenkins-$TAG.checksums
22 | openssl sha1 $TAR_FILE.gz >> jenkins-$TAG.checksums
23 |
--------------------------------------------------------------------------------
/scripts/dcos-framework-dns-name.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #This script will produce valid DNS scheme to be used for Mesos DNS resolution.
4 |
5 | #Strip out leading and trailing slashes.
6 | arg=$JENKINS_FRAMEWORK_NAME
7 | lstrip_slash=${arg#'/'}
8 | rstrip_slash=${lstrip_slash%'/'}
9 |
10 | #Tokenize path
11 | IFS='/' read -r -a tokenize <<< "$rstrip_slash"
12 |
13 | #Reverse the path to conform with DNS lookups.
14 | FRAMEWORK_DNS_NAME=""
15 |
16 | min=0
17 | max=$(( ${#tokenize[@]} -1 ))
18 | while [[ max -ge min ]]
19 | do
20 | # Note the '-' at the end we will strip out later.
21 | FRAMEWORK_DNS_NAME+="${tokenize[$max]}-"
22 | (( max-- ))
23 | done
24 |
25 | #Strip out trailing '-' from earlier.
26 | JENKINS_FRAMEWORK_NAME=${FRAMEWORK_DNS_NAME%'-'}
27 |
28 | #Export out for consumption.
29 | export JENKINS_FRAMEWORK_NAME
30 |
--------------------------------------------------------------------------------
/scripts/export-libssl.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # Older versions of mesos or DC/OS send relative paths
4 | # for the LIBPROCESS_SSL_ environment variables. This
5 | # script tries to detect that and prepend the env vars
6 | # with the MESOS_SANDBOX value. This results in an
7 | # absolute path to the needed SSL certificates.
8 |
9 | # check if the SSL file exists and if not assume it needs
10 | # the MESOS_SANDBOX prefix.
11 | fix_ssl_var()
12 | {
13 | local var_name=\$"$1"
14 | local var_val=`eval "expr \"$var_name\""`
15 | if [ ! -f $var_val ]; then
16 | eval "$1=\"$MESOS_SANDBOX/$var_val\""
17 | fi
18 | }
19 |
20 | fix_ssl_var LIBPROCESS_SSL_CA_FILE
21 | fix_ssl_var LIBPROCESS_SSL_CERT_FILE
22 | fix_ssl_var LIBPROCESS_SSL_KEY_FILE
23 |
24 | # pass on new values
25 | export LIBPROCESS_SSL_CA_FILE LIBPROCESS_SSL_CERT_FILE LIBPROCESS_SSL_KEY_FILE
26 |
27 |
--------------------------------------------------------------------------------
/tools/kdc/README.md:
--------------------------------------------------------------------------------
1 | This directory serves to contain all relevant config files and script to enable the deployment and teardown of a
2 | Kerberos Domain Controller (KDC) server.
3 |
4 | The `kdc.json` config is used by the `sdk_auth` testing module to configure a KDC in the integration test environment.
5 |
6 | The Dockerfile is used to maintain/build a docker image which serves the KDC. The `run.sh` and `kdc.conf` files faciliate the bootstrap for
7 | said server as they're copied into the image via the Dockerfile recipe.
8 |
9 | The `kdc.py` script is an ad-hoc tool to deploy/teardown a KDC instance outside of the testing environment. Its usage
10 | from the root of the repo would look something like:
11 | ```
12 | PYTHONPATH=testing ./tools/kdc.py deploy principals.txt
13 | ```
14 |
15 | where `principals.txt` is a file of new-line separated strings of principals.
16 |
--------------------------------------------------------------------------------
/scripts/plugin-list.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """Parses pom.xml and outputs a Markdown-friendly list of plugins and their
3 | versions.
4 | """
5 | import os
6 | import xml.etree.ElementTree as ET
7 |
8 |
9 | def main():
10 | pom = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../pom.xml')
11 | tree = ET.parse(pom)
12 | props = tree.find("{http://maven.apache.org/POM/4.0.0}properties")
13 | plugins = []
14 |
15 | for plugin in props.getchildren():
16 | name = plugin.tag.split('}', 1)[1].split('.', 1)[0]
17 | ver = plugin.text
18 | plugins.append({'name': name, 'version': ver})
19 |
20 | print('Jenkins plugins:')
21 | for plugin in sorted(plugins, key=lambda k: k['name']):
22 | print(" * {} v{}".format(plugin['name'], plugin['version']))
23 |
24 |
25 | if __name__ == '__main__':
26 | main()
27 |
--------------------------------------------------------------------------------
/testing/sdk_dcos.py:
--------------------------------------------------------------------------------
1 | '''Utilities relating to getting information about DC/OS itself
2 |
3 | ************************************************************************
4 | FOR THE TIME BEING WHATEVER MODIFICATIONS ARE APPLIED TO THIS FILE
5 | SHOULD ALSO BE APPLIED TO sdk_dcos IN ANY OTHER PARTNER REPOS
6 | ************************************************************************
7 | '''
8 | from enum import Enum
9 |
10 | import sdk_cmd
11 |
12 |
13 | class DCOS_SECURITY(Enum):
14 | disabled = 1
15 | permissive = 2
16 | strict = 3
17 |
18 |
19 | def get_metadata():
20 | return sdk_cmd.cluster_request('GET',
21 | 'dcos-metadata/bootstrap-config.json',
22 | retry=False)
23 |
24 |
25 | def get_security_mode() -> DCOS_SECURITY:
26 | r = get_metadata().json()
27 | mode = r['security']
28 | return DCOS_SECURITY[mode]
29 |
--------------------------------------------------------------------------------
/conf/jenkins/nodeMonitors.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | true
5 |
6 |
7 | true
8 |
9 |
10 | true
11 | 1GB
12 |
13 |
14 | true
15 |
16 |
17 | true
18 | 1GB
19 |
20 |
21 | true
22 |
23 |
24 |
--------------------------------------------------------------------------------
/plugins.conf:
--------------------------------------------------------------------------------
1 | blueocean-bitbucket-pipeline:${BLUEOCEAN_VERSION}
2 | blueocean-commons:${BLUEOCEAN_VERSION}
3 | blueocean-config:${BLUEOCEAN_VERSION}
4 | blueocean-dashboard:${BLUEOCEAN_VERSION}
5 | blueocean-events:${BLUEOCEAN_VERSION}
6 | blueocean-git-pipeline:${BLUEOCEAN_VERSION}
7 | blueocean-github-pipeline:${BLUEOCEAN_VERSION}
8 | blueocean-i18n:${BLUEOCEAN_VERSION}
9 | blueocean-jwt:${BLUEOCEAN_VERSION}
10 | blueocean-jira:${BLUEOCEAN_VERSION}
11 | blueocean-personalization:${BLUEOCEAN_VERSION}
12 | blueocean-pipeline-api-impl:${BLUEOCEAN_VERSION}
13 | blueocean-pipeline-editor:${BLUEOCEAN_VERSION}
14 | blueocean-pipeline-scm-api:${BLUEOCEAN_VERSION}
15 | blueocean-rest-impl:${BLUEOCEAN_VERSION}
16 | blueocean-rest:${BLUEOCEAN_VERSION}
17 | blueocean-web:${BLUEOCEAN_VERSION}
18 | blueocean:${BLUEOCEAN_VERSION}
19 | metrics:4.0.2.6
20 | job-dsl:1.77
21 | configuration-as-code:1.36
22 | mesos:2.0
23 |
--------------------------------------------------------------------------------
/tools/pip/build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -e
3 |
4 | # This script builds .whl files for the tools/ and testing/ directories, to be published as part of an SDK release.
5 |
6 | if [ $# -ne 1 ]; then
7 | echo "Syntax: $0 "
8 | exit 1
9 | fi
10 | # convert snapshot releases from e.g. '1.2.3-SNAPSHOT' to '1.2.3+snapshot' to keep python happy. see also PEP-440.
11 | export VERSION=$(echo $1 | sed 's/-/+/g' | tr '[:upper:]' '[:lower:]')
12 |
13 | THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
14 | cd $THIS_DIR
15 |
16 | build_dir() {
17 | rm -rf dist/ binaries/
18 | OUTPUT_NAME=$1 INPUT_DIR=$2 python3 setup.py -q bdist_wheel
19 | }
20 |
21 | build_dir tools ..
22 | build_dir testing ../../testing
23 |
24 | cat < [args ...]
37 |
38 | Or import testing as a library with:
39 |
40 | python3
41 | import sys, os.path, testing
42 | sys.path.append(os.path.dirname(testing.__file__))
43 | import
44 | EOF
45 |
--------------------------------------------------------------------------------
/jenkins-agent-images/linux/dcos-jenkins-dind-agent/Dockerfile.ubuntu:
--------------------------------------------------------------------------------
1 | FROM ubuntu:20.04
2 |
3 | ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64
4 |
5 | RUN apt-get update -y \
6 | && apt-get upgrade -y \
7 | && apt-get install -y \
8 | apt-transport-https \
9 | build-essential \
10 | bzip2 \
11 | ca-certificates \
12 | curl \
13 | git \
14 | iptables \
15 | jq \
16 | lvm2 \
17 | lxc \
18 | openjdk-8-jdk-headless \
19 | unzip \
20 | zip
21 |
22 | # links to commit hashes are listed inside posted Dockerfiles https://hub.docker.com/r/library/docker/
23 | # NOTE: must match engine version that is directly pulled from Alpine's Dockerfile
24 | ENV DIND_COMMIT 37498f009d8bf25fbb6199e8ccd34bed84f2874b
25 | # docker
26 | RUN curl -sSL https://get.docker.com | sh
27 | # fetch DIND script
28 | RUN curl -sSL https://raw.githubusercontent.com/docker/docker/${DIND_COMMIT}/hack/dind -o /usr/local/bin/dind \
29 | && chmod a+x /usr/local/bin/dind
30 |
31 | COPY ./wrapper.sh /usr/local/bin/wrapper.sh
32 | RUN chmod a+x /usr/local/bin/wrapper.sh
33 |
34 | VOLUME /var/lib/docker
35 | ENTRYPOINT ["wrapper.sh"]
36 | CMD []
37 |
--------------------------------------------------------------------------------
/testing/security/keytab-validator/src/Main.java:
--------------------------------------------------------------------------------
1 |
2 |
3 | import sun.security.krb5.internal.ktab.KeyTab;
4 |
5 | import java.io.File;
6 | import java.util.Arrays;
7 | import java.util.List;
8 |
9 | public class Main {
10 | public static void main(String[] args) {
11 | List arguments = Arrays.asList(args);
12 |
13 | if (arguments.size() == 0 ||
14 | arguments.contains("-h") ||
15 | arguments.contains("--help") ||
16 | arguments.contains("help")) {
17 | printHelp();
18 | }
19 |
20 | if (arguments.size() > 1) {
21 | printHelp();
22 | }
23 |
24 | File raw = new File(arguments.get(0));
25 | if (!raw.exists()) {
26 | System.out.println("Supplied file does not exist!");
27 | System.exit(1);
28 | }
29 |
30 | KeyTab keyTab = KeyTab.getInstance(raw);
31 |
32 | if (keyTab.isValid()) {
33 | System.out.println("This keytab is a-ok");
34 | System.exit(0);
35 | } else {
36 | System.out.println("Keytab not valid :(");
37 | System.exit(1);
38 | }
39 | }
40 |
41 | public static void printHelp() {
42 | System.out.println("Usage: keytab-validator ");
43 | System.exit(1);
44 | }
45 | }
46 |
47 |
48 |
--------------------------------------------------------------------------------
/tests/acceptance/fixtures/test-job.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | This is an acceptance test job for Jenkins. The test that creates
5 | and triggers this job will assert that it completes successfully
6 | within a given amount of time. This job runs a shell script that
7 | sleeps for 30 seconds before exiting with a status code of 0
8 | (success).
9 |
10 | false
11 |
12 |
13 | false
14 | false
15 |
16 |
17 |
18 | true
19 | false
20 | false
21 | false
22 |
23 | false
24 |
25 |
26 | sleep 30; exit 0
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/tools/universe/test_package_builder.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from .package_builder import UniversePackageBuilder
3 | from .package import Package
4 |
5 |
6 | def test_non_existent_input_dir_raises_exception():
7 | with pytest.raises(Exception) as e:
8 | UniversePackageBuilder(None, None, '__SHOULD_NOT_EXIST__', '.', [])
9 |
10 | assert "Provided package path is not a directory: __SHOULD_NOT_EXIST__" in str(
11 | e.value)
12 |
13 |
14 | def test_empty_input_dir_raises_exception():
15 | with pytest.raises(Exception) as e:
16 | UniversePackageBuilder(None, None, 'resources/empty', '.', [])
17 |
18 | assert "Provided package path does not contain the expected package files: resources/empty" in str(
19 | e.value)
20 |
21 |
22 | def test_template_service_(mocker):
23 |
24 | package_json = {
25 | 'name': 'template',
26 | 'version': '1.2.3',
27 | 'releaseVersion': 0
28 | }
29 | package = Package("template", "stub-universe")
30 | package_manager = mocker.Mock()
31 |
32 | package_manager.get_latest = mocker.MagicMock(return_value=Package.from_json(package_json))
33 |
34 | upb = UniversePackageBuilder(package, package_manager, 'resources/template', ',', [])
35 |
36 | template_mapping = upb._get_template_mapping_for_content("")
37 | assert 'upgrades-from' in template_mapping
38 | assert template_mapping['upgrades-from'] == "1.2.3"
39 |
--------------------------------------------------------------------------------
/testing/testData/test-job.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | created from tests
4 | false
5 |
6 |
7 |
8 |
9 |
10 | false
11 | false
12 |
13 |
14 |
15 | false
16 | false
17 | null
18 | false
19 | false
20 |
21 |
22 | * * * * *
23 |
24 |
25 | false
26 |
27 |
28 | echo "Hello World"; sleep 30
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
--------------------------------------------------------------------------------
/tools/vagrant/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | $dcos_box = ENV.fetch('DCOS_BOX', 'mesosphere/dcos-docker-sdk')
5 | # TODO: when box isn't being upgraded regularly, switch from the raw URL to the (commented-out) cached URL:
6 | #$dcos_box_url = ENV.fetch('DCOS_BOX_URL', 'https://downloads.mesosphere.com/dcos-docker-sdk/metadata.json')
7 | $dcos_box_url = ENV.fetch('DCOS_BOX_URL', 'https://s3.amazonaws.com/downloads.mesosphere.io/dcos-docker-sdk/metadata.json')
8 |
9 | $dcos_cpus = ENV.fetch('DCOS_CPUS', 2)
10 | $dcos_mem = ENV.fetch('DCOS_MEM', 6144) # 6GB
11 |
12 | # configure vbox host-only network
13 | system('./vbox-network.sh')
14 |
15 | Vagrant.configure(2) do |config|
16 | # configure vagrant-vbguest plugin
17 | if Vagrant.has_plugin?('vagrant-vbguest')
18 | config.vbguest.auto_update = true
19 | end
20 |
21 | config.vm.define 'dcos-docker-sdk' do |vm_cfg|
22 | vm_cfg.vm.box = $dcos_box
23 | vm_cfg.vm.box_url = $dcos_box_url
24 |
25 | vm_cfg.vm.hostname = 'dcos-docker-sdk'
26 | vm_cfg.vm.network :private_network, ip: '192.168.65.50'
27 | config.vm.synced_folder '.', '/vagrant', type: :virtualbox
28 | config.vm.synced_folder '../../', '/dcos-commons', type: :virtualbox
29 |
30 | vm_cfg.vm.provider :virtualbox do |v|
31 | v.name = vm_cfg.vm.hostname
32 | v.cpus = $dcos_cpus
33 | v.memory = $dcos_mem
34 | # configure guest to use host DNS resolver
35 | v.customize ['modifyvm', :id, '--natdnshostresolver1', 'on']
36 | end
37 | end
38 | end
39 |
--------------------------------------------------------------------------------
/tools/universe/s3_uploader.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import logging
4 | import os
5 | import os.path
6 | import subprocess
7 |
8 | log = logging.getLogger(__name__)
9 | logging.basicConfig(level=logging.DEBUG, format="%(message)s")
10 |
11 |
12 | class S3Uploader(object):
13 | def __init__(self, s3_directory, dry_run=False):
14 | # check if aws cli tools are installed
15 | if not subprocess.run("aws --version".split()).returncode == 0:
16 | raise Exception('Required "aws" command is not installed.')
17 |
18 | self._s3_directory = s3_directory
19 | self._aws_region = os.environ.get('AWS_UPLOAD_REGION', '')
20 | self._dry_run = dry_run
21 |
22 | def get_s3_directory(self):
23 | return self._s3_directory
24 |
25 | def upload(self, filepath, content_type=None):
26 | filename = os.path.basename(filepath)
27 | cmdlist = ['aws s3']
28 | if self._aws_region:
29 | cmdlist.append('--region={}'.format(self._aws_region))
30 | cmdlist.append('cp --acl public-read')
31 | if self._dry_run:
32 | cmdlist.append('--dryrun')
33 | if content_type is not None:
34 | cmdlist.append('--content-type "{}"'.format(content_type))
35 | dest_url = '{}/{}'.format(self._s3_directory, filename)
36 | cmdlist.append('{} {} 1>&2'.format(filepath, dest_url))
37 | cmd = ' '.join(cmdlist)
38 | log.info(cmd)
39 | ret = os.system(cmd)
40 | if not ret == 0:
41 | raise Exception('Failed to upload {} to {}'.format(filepath, dest_url))
42 |
--------------------------------------------------------------------------------
/tests/scale/test_quota.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import pytest
3 | import uuid
4 |
5 | import config
6 | import sdk_install
7 | import sdk_quota
8 |
9 | log = logging.getLogger(__name__)
10 |
11 |
12 | @pytest.fixture(scope='module', autouse=True)
13 | def configure_package():
14 | try:
15 | sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
16 | sdk_install.install(config.PACKAGE_NAME,
17 | config.SERVICE_NAME, 0, wait_for_deployment=False,
18 | additional_options={
19 | "roles": {
20 | "jenkins-agent-role": "jenkins"
21 | }
22 | })
23 |
24 | yield # let the test session execute
25 | finally:
26 | pass
27 | sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
28 |
29 |
30 | @pytest.mark.sanity
31 | def test_create_quota():
32 | try:
33 | sdk_quota.create_quota("jenkins", cpus=4.0, mem=4000)
34 | quotas = sdk_quota.list_quotas()
35 |
36 | present = False
37 | for quota in quotas['infos']:
38 | if quota['role'] == "jenkins":
39 | present = True
40 | break
41 |
42 | assert present, "There was no quota present for the jenkins role"
43 |
44 | finally:
45 | sdk_quota.remove_quota("jenkins")
46 |
47 | quotas = sdk_quota.list_quotas()
48 | if 'infos' in quotas:
49 | for quota in quotas['infos']:
50 | assert quota['role'] != "jenkins"
51 |
--------------------------------------------------------------------------------
/scripts/dcos-quota.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # This script enables multi-tenancy features available from DC/OS 2.0 and later.
4 | # This script forces $JENKINS_AGENT_ROLE to match $MESOS_ALLOCATION_ROLE
5 | # Only when $MARATHON_APP_ENFORCE_GROUP_ROLE is set.
6 |
7 | IS_ENFORCE_GROUP_ROLE=""
8 | _str2bool() {
9 | local test_condition=${1,,}
10 | case $test_condition in
11 | "yes" | "true" | "t" | "y" | "1")
12 | IS_ENFORCE_GROUP_ROLE="True"
13 | ;;
14 | "no" | "false" | "f" | "n" | "0")
15 | IS_ENFORCE_GROUP_ROLE=""
16 | ;;
17 | *)
18 | IS_ENFORCE_GROUP_ROLE=""
19 | esac
20 | }
21 |
22 | if [ -z "$MESOS_ALLOCATION_ROLE" ] || [ -z "$MARATHON_APP_ENFORCE_GROUP_ROLE" ]; then
23 | echo "INFO: This cluster does not have multi-tenant features. Using legacy non-quota enforcement aware semantics."
24 | exit 0
25 | fi
26 |
27 | #Filter all variants of true here.
28 | _str2bool $MARATHON_APP_ENFORCE_GROUP_ROLE
29 |
30 | if [ ! -z "$IS_ENFORCE_GROUP_ROLE" ]; then
31 | if [ "$MESOS_ALLOCATION_ROLE" != "$JENKINS_AGENT_ROLE" ]; then
32 | echo "WARN: JENKINS_AGENT_ROLE:'$JENKINS_AGENT_ROLE' not the same as MESOS_ALLOCATION_ROLE:'$MESOS_ALLOCATION_ROLE'."
33 | echo "enforceRole detected on top-level group, using '$MESOS_ALLOCATION_ROLE' as agent-role."
34 | fi
35 | JENKINS_AGENT_ROLE=$MESOS_ALLOCATION_ROLE
36 | export $JENKINS_AGENT_ROLE
37 | echo "INFO: using enforced group role '$JENKINS_AGENT_ROLE' for agents."
38 | else
39 | echo "INFO: using non-enforced group role '$JENKINS_AGENT_ROLE' for agents."
40 | fi
41 |
--------------------------------------------------------------------------------
/testing/security/kerberos.py:
--------------------------------------------------------------------------------
1 | """
2 | A set of Kerberos utilities
3 | """
4 | import itertools
5 | import logging
6 |
7 | import sdk_cmd
8 |
9 |
10 | log = logging.getLogger(__name__)
11 |
12 |
13 | def genererate_principal(primary: str, instance: str, realm: str) -> str:
14 | """
15 | Generate a Kerberos principal from the three different components.
16 | """
17 | if instance:
18 | principal = "{}/{}".format(primary, instance)
19 | else:
20 | principal = primary
21 |
22 | return "{}@{}".format(principal, realm.upper())
23 |
24 |
25 | def generate_principal_list(primaries: list, instances: list, realm: str) -> list:
26 | principals = []
27 | for (primary, instance) in itertools.product(primaries, instances):
28 | principals.append(genererate_principal(primary, instance, realm))
29 |
30 | return principals
31 |
32 |
33 | def write_krb5_config_file(task: str, filename: str, krb5: object) -> str:
34 | """
35 | Generate a Kerberos config file.
36 | """
37 | output_file = filename
38 |
39 | log.info("Generating %s", output_file)
40 | krb5_file_contents = ['[libdefaults]',
41 | 'default_realm = {}'.format(krb5.get_realm()),
42 | '',
43 | '[realms]',
44 | ' {realm} = {{'.format(realm=krb5.get_realm()),
45 | ' kdc = {}'.format(krb5.get_kdc_address()),
46 | ' }', ]
47 | log.info("%s", krb5_file_contents)
48 |
49 | output = sdk_cmd.create_task_text_file(task, output_file, krb5_file_contents)
50 | log.info(output)
51 |
52 | return output_file
53 |
--------------------------------------------------------------------------------
/tools/create_service_account.sh:
--------------------------------------------------------------------------------
1 | set -x
2 | MODE=
3 |
4 | # Otherwise, Python will complain.
5 | export LC_ALL=en_US.UTF-8
6 | export LANG=en_US.UTF-8
7 |
8 | SERVICE_ACCOUNT_NAME=${1:-service-acct}
9 | SECRET_NAME=${2:-secret}
10 |
11 | while [ ! $# -eq 0 ]
12 | do
13 | case "$1" in
14 | --strict | -s)
15 | MODE="--strict"
16 | ;;
17 | esac
18 | shift
19 | done
20 |
21 | echo Creating service account for account=$SERVICE_ACCOUNT_NAME secret=$SECRET_NAME mode=$MODE
22 |
23 | echo Install cli necessary for security...
24 | if ! dcos package install dcos-enterprise-cli --yes; then
25 | echo "Failed to install dcos-enterprise cli extension" >&2
26 | exit 1
27 | fi
28 |
29 | echo Create keypair...
30 | if ! dcos security org service-accounts keypair private-key.pem public-key.pem; then
31 | echo "Failed to create keypair for testing service account" >&2
32 | exit 1
33 | fi
34 |
35 | echo Create service account...
36 | dcos security org service-accounts delete "${SERVICE_ACCOUNT_NAME}" &> /dev/null
37 | if ! dcos security org service-accounts create -p public-key.pem -d "My service account" "${SERVICE_ACCOUNT_NAME}"; then
38 | echo "Failed to create service account '${SERVICE_ACCOUNT_NAME}'" >&2
39 | exit 1
40 | fi
41 |
42 | echo Create secret...
43 | dcos security secrets delete "${SECRET_NAME}" &> /dev/null
44 | if ! dcos security secrets create-sa-secret ${MODE} private-key.pem "${SERVICE_ACCOUNT_NAME}" "${SECRET_NAME}"; then
45 | echo "Failed to create secret '${SECRET_NAME}' for service account '${SERVICE_ACCOUNT_NAME}'" >&2
46 | exit 1
47 | fi
48 |
49 | echo Service account created for account=$SERVICE_ACCOUNT_NAME secret=$SECRET_NAME
50 |
--------------------------------------------------------------------------------
/tools/save_properties.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #
3 | # Saves the stub-universe.properties file by uploading it to S3.
4 | #
5 | # Assumption: this script is called from Jenkins where $WORKSPACE is defined.
6 |
7 | import logging
8 | import os
9 | import sys
10 |
11 | logger = logging.getLogger(__name__)
12 | logging.basicConfig(level=logging.DEBUG, format="%(message)s")
13 |
14 | PROPERTIES_FILE_NAME='stub-universe.properties'
15 |
16 |
17 | def upload_to_s3(s3_dir_uri):
18 | jenkins_workspace_path = os.environ.get('WORKSPACE', '')
19 | properties_file_path = "{}/{}".format(jenkins_workspace_path, PROPERTIES_FILE_NAME)
20 | if not os.path.isfile(properties_file_path):
21 | err = 'Could not find properties file: {}'.format(properties_file_path)
22 | raise Exception(err)
23 |
24 | # check if aws cli tools are installed
25 | cmd = "aws --version"
26 | ret = os.system(cmd)
27 | if not ret == 0:
28 | err = 'Required AWS cli tools not installed.'
29 | raise Exception(err)
30 |
31 | filename = os.path.basename(properties_file_path)
32 | cmd = 'aws s3 cp --acl public-read {} {}/{} 1>&2'.format(
33 | properties_file_path, s3_dir_uri, filename)
34 | logger.info(cmd)
35 | ret = os.system(cmd)
36 | if not ret == 0:
37 | err = 'Failed to upload {} to S3'.format(filename)
38 | raise Exception(err)
39 |
40 |
41 | def main(argv):
42 | if len(argv) != 2:
43 | logger.error('Syntax: {} '.format(argv[0]))
44 | logger.error('Received arguments {}'.format(str(argv)))
45 | return 1
46 | upload_to_s3(argv[1])
47 |
48 | return 0
49 |
50 |
51 | if __name__ == '__main__':
52 | sys.exit(main(sys.argv))
53 |
--------------------------------------------------------------------------------
/tools/ci/launch_cluster.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | LAUNCH_CONFIG_FILE=${1:-/build/config.yaml}
4 | CLUSTER_INFO_FILE=${2:-/build/cluster_info.json}
5 |
6 | set -e
7 | LAUNCH_SUCCESS="False"
8 | RETRY_LAUNCH="True"
9 |
10 | while [ x"${LAUNCH_SUCCESS}" == x"False" ]; do
11 | dcos-launch create --config-path=${LAUNCH_CONFIG_FILE} --info-path=${CLUSTER_INFO_FILE}
12 | if [ x"$RETRY_LAUNCH" == x"True" ]; then
13 | set +e
14 | else
15 | set -e
16 | fi
17 | dcos-launch wait --info-path=${CLUSTER_INFO_FILE} 2>&1 | tee dcos-launch-wait-output.stdout
18 |
19 | # Grep exits with an exit code of 1 if no lines are matched. We thus need to
20 | # disable exit on errors.
21 | set +e
22 | ROLLBACK_FOUND=$(grep -o "Exception: StackStatus changed unexpectedly to: ROLLBACK_IN_PROGRESS" dcos-launch-wait-output.stdout)
23 | if [ -n "${ROLLBACK_FOUND}" ]; then
24 |
25 | if [ x"${RETRY_LAUNCH}" == x"False" ]; then
26 | set -e
27 | echo "Cluster launch failed"
28 | exit 1
29 | fi
30 | # TODO: This would be a good place to add some form of alerting!
31 | # We could add a cluster_failure.sh callback, for example.
32 |
33 | # We only retry once!
34 | RETRY_LAUNCH="False"
35 | set -e
36 |
37 | # We need to wait for the current stack to be deleted
38 | dcos-launch delete --info-path=${CLUSTER_INFO_FILE}
39 | rm -f ${CLUSTER_INFO_FILE}
40 | echo "Cluster creation failed. Retrying after 30 seconds"
41 | sleep 30
42 | else
43 | LAUNCH_SUCCESS="True"
44 | fi
45 | done
46 | set -e
47 |
48 | # Print the cluster info.
49 | echo "Printing ${CLUSTER_INFO_FILE}..."
50 | cat ${CLUSTER_INFO_FILE}
51 |
--------------------------------------------------------------------------------
/conf/jenkins/configuration.yaml:
--------------------------------------------------------------------------------
1 | jenkins:
2 | agentProtocols:
3 | - "JNLP4-connect"
4 | - "Ping"
5 | numExecutors: 0
6 | clouds:
7 | - mesos:
8 | agentUser: "${JENKINS_AGENT_USER:-nobody}"
9 | frameworkName: "${JENKINS_FRAMEWORK_NAME:-Jenkins Scheduler}"
10 | jenkinsURL: "http://${JENKINS_FRAMEWORK_NAME:-jenkins}.${MARATHON_NAME:-marathon}.mesos:${PORT0:-8080}"
11 | mesosAgentSpecTemplates:
12 | - label: "${JENKINS_LINUX_AGENT_LABEL:-linux}"
13 | agentAttributes: "${JENKINS_LINUX_AGENT_OFFER_SELECTION_ATTRIBUTES:-}"
14 | agentCommandStyle: Linux
15 | containerInfo:
16 | dockerForcePullImage: false
17 | dockerImage: "${JENKINS_LINUX_AGENT_IMAGE:-mesosphere/jenkins-dind:0.9.0}"
18 | dockerPrivilegedMode: true
19 | isDind: true
20 | networking: HOST
21 | type: "DOCKER"
22 | cpus: ${JENKINS_LINUX_AGENT_CPUS:-0.1}
23 | disk: ${JENKINS_LINUX_AGENT_DISK:-0.0}
24 | domainFilterModel: "home"
25 | idleTerminationMinutes: ${JENKINS_LINUX_AGENT_IDLE_TERMINATION_MINUTES:-3}
26 | jnlpArgs: "${JENKINS_LINUX_AGENT_JNLP_ARGS:--noReconnect}"
27 | maxExecutors: ${JENKINS_LINUX_AGENT_MAX_EXECUTORS:-1}
28 | mem: ${JENKINS_LINUX_AGENT_MEM:-512}
29 | minExecutors: ${JENKINS_LINUX_AGENT_MIN_EXECUTORS:-1}
30 | mode: EXCLUSIVE
31 | mesosMasterUrl: "${JENKINS_MESOS_MASTER:-http://leader.mesos:5050}"
32 | role: "${JENKINS_AGENT_ROLE:-*}"
33 | unclassified:
34 | location:
35 | adminAddress: "address not configured yet "
36 | url: "http://${JENKINS_FRAMEWORK_NAME:-jenkins}.${MARATHON_NAME:-marathon}.mesos:${PORT0:-8080}/"
37 |
--------------------------------------------------------------------------------
/universe/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "packagingVersion": "3.0",
3 | "name": "jenkins",
4 | "version": "{{package-version}}",
5 | "minDcosReleaseVersion": "1.11",
6 | "scm": "https://github.com/mesosphere/dcos-jenkins-service.git",
7 | "maintainer": "support@mesosphere.io",
8 | "website": "https://jenkins.io",
9 | "framework": true,
10 | "description": "Jenkins is an award-winning, cross-platform, continuous integration and continuous delivery application that increases your productivity. Use Jenkins to build and test your software projects continuously making it easier for developers to integrate changes to the project, and making it easier for users to obtain a fresh build. It also allows you to continuously deliver your software by providing powerful ways to define your build pipelines and integrating with a large number of testing and deployment technologies.",
11 | "tags": ["continuous-integration", "ci", "jenkins"],
12 | "preInstallNotes": "WARNING: If you didn't provide a value for `storage.host-volume` (either using the CLI or via the Advanced Install dialog),\nYOUR DATA WILL NOT BE SAVED IN ANY WAY.\n",
13 | "postInstallNotes": "Jenkins has been installed.",
14 | "postUninstallNotes": "Jenkins has been uninstalled. Note that any data persisted to a NFS share still exists and will need to be manually removed.",
15 | "licenses": [
16 | {
17 | "name": "Apache License Version 2.0",
18 | "url": "https://github.com/mesosphere/dcos-jenkins-service/blob/master/LICENSE"
19 | },
20 | {
21 | "name": "Apache License Version 2.0",
22 | "url": "https://github.com/jenkinsci/mesos-plugin/blob/master/LICENSE"
23 | },
24 | {
25 | "name": "MIT License",
26 | "url": "https://github.com/jenkinsci/jenkins/blob/master/LICENSE.txt"
27 | }
28 | ],
29 | "selected": true
30 | }
31 |
--------------------------------------------------------------------------------
/testing/sdk_fault_domain.py:
--------------------------------------------------------------------------------
1 | '''Utilities related to fault domain information from cloud providers.
2 |
3 | ************************************************************************
4 | FOR THE TIME BEING WHATEVER MODIFICATIONS ARE APPLIED TO THIS FILE
5 | SHOULD ALSO BE APPLIED TO ANY OTHER PARTNER REPOS
6 | ************************************************************************
7 | '''
8 | import logging
9 |
10 |
11 | log = logging.getLogger(__name__)
12 |
13 | # TODO: use cloud provider library APIs to get list of regions.
14 | AWS_REGIONS = [
15 | 'ap-northeast-1',
16 | 'ap-northeast-2',
17 | 'ap-south-1',
18 | 'ap-southeast-1',
19 | 'ap-southeast-2',
20 | 'ca-central-1',
21 | 'eu-central-1',
22 | 'eu-west-1',
23 | 'eu-west-2',
24 | 'sa-east-1',
25 | 'us-east-1',
26 | 'us-east-2',
27 | 'us-west-1',
28 | 'us-west-2'
29 | ]
30 |
31 | # TODO: use cloud provider library APIs to get list of zones.
32 | # a through h
33 | AWS_ZONE_SUFFIXES = [chr(i) for i in range(ord('a'), ord('h')+1)]
34 |
35 |
36 | # expect e.g. "aws/us-west-2" or "ca-central-1"
37 | def is_valid_aws_region(region: str):
38 | if region.startswith('aws/'):
39 | # trim leading 'aws/' if present
40 | region = region[len('aws/'):]
41 | return region in AWS_REGIONS
42 |
43 |
44 | # expect e.g. "aws/us-west-2c" or "ca-central-1h"
45 | def is_valid_aws_zone(zone: str):
46 | region = zone[:-1] # all except last character
47 | zone_suffix = zone[-1:] # last character
48 | return is_valid_aws_region(region) and zone_suffix in AWS_ZONE_SUFFIXES
49 |
50 |
51 | # TODO: handle multiple cloud providers.
52 | def is_valid_region(region: str):
53 | return is_valid_aws_region(region)
54 |
55 |
56 | # TODO: handle multiple cloud providers.
57 | def is_valid_zone(zone: str):
58 | # e.g. "aws/us-west-2c"
59 | return is_valid_aws_zone(zone)
60 |
--------------------------------------------------------------------------------
/jenkins-agent-images/linux/dcos-jenkins-dind-agent/scripts/build_images.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | GIT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
3 | TAG=${GIT_BRANCH#*/}
4 |
5 | function usage() {
6 | echo "$0 [yes] [tag prefix]"
7 | echo "build Jenkins DIND Agent docker images."
8 | echo ""
9 | echo " is the image name, with prefix."
10 | echo "[yes] is optional; include to also push images to docker hub."
11 | echo "[tag prefix] is optional; include to overwrite using the git branch."
12 | echo ""
13 | exit -1
14 | }
15 |
16 | function preflight {
17 | if [ ! -d ".git" ]
18 | then
19 | echo "This directory does not contain a .git directory."
20 | echo "Please run this script from the jenkins-dind-agent root directory."
21 | echo ""
22 | exit -2
23 | fi
24 | }
25 |
26 | function dock_build {
27 | local image=$1
28 | local tag=$2
29 | local file=$3
30 |
31 | echo "Building $image:$tag ..."
32 | docker build -t $image:$tag -f $file .
33 | }
34 |
35 | function dock_push {
36 | local image=$1
37 | local tag=$2
38 |
39 | echo "Pushing $image:$tag ..."
40 | docker push $image:$tag
41 | }
42 |
43 | function build_images() {
44 | if [ "x$1" == "x" ]
45 | then
46 | usage
47 | else
48 | image_name="$1"
49 | fi
50 |
51 | if [ "x$3" != "x" ]
52 | then
53 | TAG=$3
54 | fi
55 |
56 | preflight
57 |
58 | for i in Dockerfile*
59 | do
60 | local possible_tag=${i#*.}
61 | local tag_suffix=""
62 | if [ "$possible_tag" != "Dockerfile" ]
63 | then
64 | tag_suffix="-${possible_tag}"
65 | fi
66 |
67 | final_tag="$TAG${tag_suffix}"
68 |
69 | dock_build "$image_name" "$final_tag" "$i"
70 |
71 | if [ "x$2" == "xyes" ]
72 | then
73 | dock_push "$image_name" "$final_tag"
74 | fi
75 | done
76 | }
77 |
78 | build_images $1 $2 $3
79 |
--------------------------------------------------------------------------------
/tools/build_framework.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cat < Version(0, "1.2.4")
13 | assert Version(10, "1.2.4") > Version(0, "1.2.5")
14 | assert Version(0, "1.2.4") == Version(0, "1.2.3")
15 |
16 |
17 | def test_package_from_json():
18 | package_json = {
19 | 'name': 'package',
20 | 'version': '1.2.3',
21 | 'releaseVersion': 10
22 | }
23 | p = Package.from_json(package_json)
24 |
25 | assert p.get_name() == package_json['name']
26 | assert p.get_version() == Version(10, '1.2.3')
27 |
28 |
29 | def test_package_starts_with_beta_is_beta():
30 | p = Package('beta-package', None)
31 |
32 | assert p.is_beta()
33 |
34 |
35 | def test_normal_package_is_not_beta():
36 |
37 | p = Package('package', None)
38 |
39 | assert not p.is_beta()
40 |
41 |
42 | def test_non_beta_backage_beta_name_is_name():
43 |
44 | p = Package('package', None)
45 |
46 | assert p.get_name() == p.get_non_beta_name()
47 |
48 |
49 | def test_beta_package_beta_name():
50 | p = Package('beta-package', None)
51 |
52 | assert p.get_non_beta_name() == 'package'
53 |
54 |
55 | def test_elastic_ordering():
56 | p7 = Package.from_json({
57 | "name": "beta-elastic",
58 | "version": "1.0.16-5.5.1-beta",
59 | "releaseVersion": 7
60 | })
61 | p0 = Package.from_json({
62 | "name": "beta-elastic",
63 | "version": "1.0.9-5.3.0-beta",
64 | "releaseVersion": 0
65 | })
66 | p1 = Package.from_json({
67 | "name": "beta-elastic",
68 | "version": "1.0.10-5.3.0-beta",
69 | "releaseVersion": 1
70 | })
71 |
72 | assert p0 < p1
73 | assert p7 > p0
74 |
--------------------------------------------------------------------------------
/tools/pip/cmd_wrapper/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import os
4 | import os.path
5 | import subprocess
6 | import sys
7 |
8 | __PARENT_DIR_PATH = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
9 | __PARENT_DIR_NAME = os.path.basename(__PARENT_DIR_PATH)
10 |
11 | def __log(msg):
12 | sys.stderr.write(msg + '\n')
13 |
14 |
15 | def __get_file_error(file_path):
16 | if not os.path.exists(file_path):
17 | return 'Path does not exist: {}'.format(file_path)
18 | if not os.path.isfile(file_path):
19 | return 'Path is not a regular file: {}'.format(file_path)
20 | if not os.access(file_path, os.X_OK):
21 | return 'Path is not executable: {}'.format(file_path)
22 | return None
23 |
24 |
25 | def __syntax():
26 | available_files = []
27 | for root, dirs, files in os.walk(__PARENT_DIR_PATH):
28 | for f in files:
29 | file_path = os.path.join(root, f)
30 | # ignore self:
31 | if file_path == os.path.join(__PARENT_DIR_PATH, '__init__.py'):
32 | continue
33 | # ignore files that aren't regular+executable:
34 | file_error = __get_file_error(file_path)
35 | if file_error:
36 | continue
37 | # get path relative to this dir:
38 | available_files.append(file_path[len(__PARENT_DIR_PATH) + 1:])
39 | available_files.sort()
40 |
41 | __log('''Syntax: {} [args]
42 | {} executable files in {}:
43 | {}'''.format(__PARENT_DIR_NAME, len(available_files), __PARENT_DIR_PATH, '\n '.join(available_files)))
44 |
45 |
46 | def main():
47 | if len(sys.argv) < 2:
48 | __syntax()
49 | return 1
50 |
51 | file_to_run = sys.argv[1]
52 | path_to_run = os.path.join(__PARENT_DIR_PATH, file_to_run)
53 | file_error = __get_file_error(path_to_run)
54 | if file_error:
55 | __log(file_error)
56 | __syntax()
57 | return 1
58 |
59 | return subprocess.call([path_to_run] + sys.argv[2:])
60 |
61 |
62 | if __name__ == '__main__':
63 | sys.exit(main())
64 |
--------------------------------------------------------------------------------
/scripts/init.groovy.d/mesos-auth.groovy:
--------------------------------------------------------------------------------
1 | import com.cloudbees.plugins.credentials.*
2 | import com.cloudbees.plugins.credentials.domains.*
3 | import com.cloudbees.plugins.credentials.impl.*
4 | import hudson.tasks.*
5 | import jenkins.model.*
6 | import org.jenkinsci.plugins.mesos.MesosCloud
7 |
8 | def changePassword = { userName ->
9 | def cloud = MesosCloud.get()
10 | def credentialsId = cloud.getCredentialsId()
11 | def credId = "mesos-${userName}"
12 |
13 | if (credentialsId && credentialsId == credId) {
14 | // do nothing if credential already exists
15 | println "--> [mesos] credentials already selected"
16 | } else {
17 | // create a new credential with an expected ID
18 | println "--> [mesos] creating new credentials"
19 | String randomPwd = org.apache.commons.lang.RandomStringUtils.random(9, true, true)
20 |
21 | mesosFrameworkCreds = new UsernamePasswordCredentialsImpl(
22 | CredentialsScope.GLOBAL,
23 | "mesos-${userName}",
24 | "mesos authentication",
25 | userName, randomPwd)
26 | SystemCredentialsProvider.getInstance().getStore().addCredentials(Domain.global(), mesosFrameworkCreds)
27 | cloud.setCredentialsId(mesosFrameworkCreds.getId())
28 | Jenkins.getInstance().save()
29 | cloud.restartMesos()
30 |
31 | println "--> [mesos] creating new credentials... done"
32 | }
33 | }
34 |
35 | // the env var is set by DCOS when using a service account to run Jenkins
36 | def accountCreds = System.getenv("DCOS_SERVICE_ACCOUNT_CREDENTIAL")
37 | if (accountCreds) {
38 | Thread.start {
39 | // wait 30s, this gives the mesos plugin time to start
40 | sleep 30000
41 | def credURL = new URL(accountCreds)
42 | def credFile = new File(credURL.toURI())
43 | def credJSON = new groovy.json.JsonSlurper().parseText(credFile.text)
44 | if (credJSON && credJSON.uid) {
45 | changePassword(credJSON.uid)
46 | } else {
47 | println "--> [mesos] Failed to read principal from credentials file"
48 | }
49 | }
50 | } else {
51 | println "--> [mesos] No DC/OS account detected; skipping mesos auth"
52 | }
53 |
--------------------------------------------------------------------------------
/scripts/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [[ ! -z "$JENKINS_OPT_ADDITIONAL_PLUGINS" ]]; then
4 | echo "Installing additional plugins..."
5 | export REF=/var/jenkins_home
6 | export JENKINS_WAR=${JENKINS_FOLDER}/jenkins.war
7 | CURL_OPTIONS="-fkL" /usr/local/bin/install-plugins.sh $JENKINS_OPT_ADDITIONAL_PLUGINS 2>&1
8 | echo "Completed installing additional plugins..."
9 | else
10 | echo "No additional plugins requested for installation..."
11 | fi
12 |
13 | #Run setup scripts.
14 | . /usr/local/jenkins/bin/export-libssl.sh
15 | . /usr/local/jenkins/bin/dcos-account.sh
16 | . /usr/local/jenkins/bin/dcos-quota.sh
17 | . /usr/local/jenkins/bin/dcos-framework-dns-name.sh
18 | . /usr/local/jenkins/bin/dcos-write-known-hosts-file.sh
19 |
20 | # Set Nginx parameters
21 | envsubst '\$PORT0 \$PORT1 \$JENKINS_CONTEXT' < /var/nginx/nginx.conf.template > /var/nginx/nginx.conf
22 |
23 | # Remove any previous jenkins-mesos-plugin builds directly injected into the build path.
24 | # This was done in versions 3.5.4-2.150.1 and prior, this practice is now deprecated.
25 | rm -f /var/jenkins_home/plugins/mesos.hpi
26 |
27 | nginx -c /var/nginx/nginx.conf \
28 | && java ${JVM_OPTS} \
29 | -Duser.home="${MESOS_SANDBOX}" \
30 | -Dhudson.model.DirectoryBrowserSupport.CSP="${JENKINS_CSP_OPTS}" \
31 | -Dhudson.udp=-1 \
32 | -Djava.awt.headless=true \
33 | -Dhudson.DNSMultiCast.disabled=true \
34 | -Djenkins.install.runSetupWizard=false \
35 | -Djenkins.model.Jenkins.slaveAgentPort=${PORT2} \
36 | -Djenkins.model.Jenkins.slaveAgentPortEnforce=true \
37 | -jar ${JENKINS_FOLDER}/jenkins.war \
38 | ${JENKINS_OPTS} \
39 | --httpPort=${PORT1} \
40 | --webroot=${JENKINS_FOLDER}/war \
41 | --ajp13Port=-1 \
42 | --httpListenAddress=127.0.0.1 \
43 | --ajp13ListenAddress=127.0.0.1 \
44 | --prefix=${JENKINS_CONTEXT}
45 |
--------------------------------------------------------------------------------
/tools/universe/package.py:
--------------------------------------------------------------------------------
1 | import json
2 | import functools
3 |
4 | @functools.total_ordering
5 | class Package:
6 | @staticmethod
7 | def from_json(json):
8 | """Construct a Package object from a json definition"""
9 | return Package(json['name'],
10 | Version(json['releaseVersion'], json['version']))
11 |
12 | def __init__(self, name, version, raw_data={}):
13 | self._name = name
14 | self._version = version
15 | self._raw_data = raw_data
16 |
17 | def __eq__(self, other):
18 | if self.get_name() != other.get_name():
19 | return False
20 |
21 | return self.get_version() == other.get_version()
22 |
23 | def __lt__(self, other):
24 | if self.get_name() < other.get_name():
25 | return True
26 |
27 | return self.get_version() < other.get_version()
28 |
29 | def __str__(self):
30 | return json.dumps({
31 | 'name': self.get_name(),
32 | 'version': self._version.package_version,
33 | 'releaseVersion': self._version.release_version,
34 | })
35 |
36 | def is_beta(self):
37 | return self._name.startswith('beta-')
38 |
39 | def get_name(self):
40 | return self._name
41 |
42 | def get_non_beta_name(self):
43 | if self.is_beta():
44 | return self._name[5:]
45 |
46 | return self._name
47 |
48 | def get_version(self):
49 | return self._version
50 |
51 |
52 | @functools.total_ordering
53 | class Version:
54 | """Encapsulates the releases-package version pair."""
55 |
56 | def __init__(self, release_version, package_version):
57 | self.release_version = int(release_version)
58 | self.package_version = package_version
59 |
60 | def __eq__(self, other):
61 | return self.release_version == other.release_version
62 |
63 | def __lt__(self, other):
64 | return self.release_version < other.release_version
65 |
66 | def __str__(self):
67 | return str(self.package_version)
68 |
69 | def to_json(self):
70 | return {
71 | 'release_version': self.release_version,
72 | 'package_version': self.package_version,
73 | }
74 |
--------------------------------------------------------------------------------
/tools/create_testing_volumes.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import json
3 | import os
4 | import time
5 |
6 | import dcos_launch
7 | from dcos_test_utils import logger, helpers, ssh_client
8 |
9 |
10 | def mount_volumes():
11 | """ Will create 200MB partions on clusters launched by dcos-launch
12 | """
13 | script = """
14 | #!/bin/bash
15 | sudo systemctl stop dcos-mesos-slave.service
16 | sudo rm -f /var/lib/dcos/mesos-resources
17 | sudo rm -f /var/lib/mesos/slave/meta/slaves/latest
18 | """
19 | for i in range(2):
20 | script += """
21 | sudo mkdir -p /dcos/volume{idx}
22 | sudo dd if=/dev/zero of=/root/volume{idx}.img bs=1M count={size}
23 | sudo losetup /dev/loop{idx} /root/volume{idx}.img
24 | sudo mkfs -t ext4 /dev/loop{idx}
25 | sudo losetup -d /dev/loop{idx}
26 | echo "/root/volume{idx}.img /dcos/volume{idx} auto loop 0 2" | sudo tee -a /etc/fstab
27 | sudo mount /dcos/volume{idx}
28 | """.format(idx=i, size=200)
29 |
30 | script += """
31 | sudo systemctl restart dcos-mesos-slave.service
32 | """
33 |
34 | cluster_info_path = os.getenv('CLUSTER_INFO_PATH', 'cluster_info.json')
35 | if not os.path.exists(cluster_info_path):
36 | raise Exception('No cluster info to work with!')
37 | cluster_info_json = json.load(open(cluster_info_path))
38 | launcher = dcos_launch.get_launcher(cluster_info_json)
39 | description = launcher.describe()
40 | ssh = launcher.get_ssh_client()
41 | with ssh.tunnel(description['masters'][0]['public_ip']) as t:
42 | t.copy_file(helpers.session_tempfile(ssh.key), 'ssh_key')
43 | t.copy_file(helpers.session_tempfile(script), 'volume_script.sh')
44 | t.command(['chmod', '600', 'ssh_key'])
45 | ssh_command = ['ssh', '-i', 'ssh_key'] + ssh_client.SHARED_SSH_OPTS
46 | scp_command = ['scp', '-i', 'ssh_key'] + ssh_client.SHARED_SSH_OPTS
47 | for private_agent in description['private_agents']:
48 | target = '{}@{}'.format(ssh.user, private_agent['private_ip'])
49 | t.command(scp_command + ['volume_script.sh', target + ':~/volume_script.sh'])
50 | t.command(ssh_command + [target, 'bash', 'volume_script.sh'])
51 | # nasty hack until we add a better post-flight
52 | time.sleep(60)
53 |
54 |
55 | if __name__ == '__main__':
56 | logger.setup(os.getenv('LOG_LEVEL', 'DEBUG'))
57 | mount_volumes()
58 |
--------------------------------------------------------------------------------
/conf/nginx/nginx.conf.template:
--------------------------------------------------------------------------------
1 | error_log stderr;
2 | pid /var/nginx/run.pid;
3 |
4 | events {
5 | worker_connections 1024;
6 | }
7 |
8 | http {
9 | client_max_body_size 1024M;
10 | server {
11 | listen $PORT0 default_server;
12 |
13 | access_log /var/log/nginx/jenkins/access.log;
14 | error_log /var/log/nginx/jenkins/error.log;
15 |
16 | location ^~ $JENKINS_CONTEXT {
17 | proxy_pass http://127.0.0.1:$PORT1;
18 | proxy_set_header Host $http_host;
19 | proxy_set_header X-Real-IP $remote_addr;
20 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
21 | proxy_set_header Connection "";
22 | proxy_max_temp_file_size 0;
23 |
24 | # Based on https://wiki.jenkins-ci.org/display/JENKINS/Jenkins+behind+an+NGinX+reverse+proxy
25 | client_body_buffer_size 128k;
26 |
27 | proxy_connect_timeout 600;
28 | proxy_send_timeout 600;
29 | proxy_read_timeout 600;
30 | send_timeout 600;
31 |
32 | proxy_buffer_size 4k;
33 | proxy_buffers 4 32k;
34 | proxy_busy_buffers_size 64k;
35 | proxy_temp_file_write_size 64k;
36 | }
37 |
38 | location ~ ^/(?.*)$ {
39 | rewrite ^/(?.*)$ $JENKINS_CONTEXT/$url break;
40 | proxy_pass http://127.0.0.1:$PORT1;
41 | proxy_set_header Host $http_host;
42 | proxy_set_header X-Real-IP $remote_addr;
43 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
44 | proxy_set_header Connection "";
45 | proxy_max_temp_file_size 0;
46 |
47 | # Based on https://wiki.jenkins-ci.org/display/JENKINS/Jenkins+behind+an+NGinX+reverse+proxy
48 | client_body_buffer_size 128k;
49 |
50 | proxy_connect_timeout 600;
51 | proxy_send_timeout 600;
52 | proxy_read_timeout 600;
53 | send_timeout 600;
54 |
55 | proxy_buffer_size 4k;
56 | proxy_buffers 4 32k;
57 | proxy_busy_buffers_size 64k;
58 | proxy_temp_file_write_size 64k;
59 | }
60 |
61 | }
62 | }
63 |
--------------------------------------------------------------------------------
/conf/nginx/nginx.conf:
--------------------------------------------------------------------------------
1 | error_log stderr;
2 | pid /var/nginx/run.pid;
3 |
4 | events {
5 | worker_connections 1024;
6 | }
7 |
8 | http {
9 | client_max_body_size 1024M;
10 | server {
11 | listen _XNGINX_PORT default_server;
12 |
13 | access_log /var/log/nginx/jenkins/access.log;
14 | error_log /var/log/nginx/jenkins/error.log;
15 |
16 | location ^~ _XJENKINS_CONTEXT {
17 | proxy_pass http://127.0.0.1:_XJENKINS_PORT;
18 | proxy_set_header Host $http_host;
19 | proxy_set_header X-Real-IP $remote_addr;
20 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
21 | proxy_set_header Connection "";
22 | proxy_max_temp_file_size 0;
23 |
24 | # Based on https://wiki.jenkins-ci.org/display/JENKINS/Jenkins+behind+an+NGinX+reverse+proxy
25 | client_body_buffer_size 128k;
26 |
27 | proxy_connect_timeout 600;
28 | proxy_send_timeout 600;
29 | proxy_read_timeout 600;
30 | send_timeout 600;
31 |
32 | proxy_buffer_size 4k;
33 | proxy_buffers 4 32k;
34 | proxy_busy_buffers_size 64k;
35 | proxy_temp_file_write_size 64k;
36 | }
37 |
38 | location ~ ^/(?.*)$ {
39 | rewrite ^/(?.*)$ _XJENKINS_CONTEXT/$url break;
40 | proxy_pass http://127.0.0.1:_XJENKINS_PORT;
41 | proxy_set_header Host $http_host;
42 | proxy_set_header X-Real-IP $remote_addr;
43 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
44 | proxy_set_header Connection "";
45 | proxy_max_temp_file_size 0;
46 |
47 | # Based on https://wiki.jenkins-ci.org/display/JENKINS/Jenkins+behind+an+NGinX+reverse+proxy
48 | client_body_buffer_size 128k;
49 |
50 | proxy_connect_timeout 600;
51 | proxy_send_timeout 600;
52 | proxy_read_timeout 600;
53 | send_timeout 600;
54 |
55 | proxy_buffer_size 4k;
56 | proxy_buffers 4 32k;
57 | proxy_busy_buffers_size 64k;
58 | proxy_temp_file_write_size 64k;
59 | }
60 |
61 | }
62 | }
63 |
--------------------------------------------------------------------------------
/jenkins-agent-images/linux/dcos-jenkins-dind-agent/Dockerfile.alpine:
--------------------------------------------------------------------------------
1 | FROM docker:19.03-dind
2 |
3 | MAINTAINER Mesosphere Support
4 |
5 | # http://bugs.python.org/issue19846
6 | # > At the moment, setting "LANG=C" on a Linux system *fundamentally breaks Python 3*, and that's not OK.
7 | ENV ALPINE_EDGE_COMMUNITY_REPO=http://dl-cdn.alpinelinux.org/alpine/edge/community \
8 | ALPINE_GLIBC_BASE_URL=https://github.com/sgerrand/alpine-pkg-glibc/releases/download/2.23-r2 \
9 | ALPINE_GLIBC_PACKAGE=glibc-2.23-r2.apk \
10 | ALPINE_GLIBC_BIN_PACKAGE=glibc-bin-2.23-r2.apk \
11 | ALPINE_GLIBC_I18N_PACKAGE=glibc-i18n-2.23-r2.apk \
12 | ALPINE_GLIBC_RSA_PUB_URL=https://github.com/sgerrand/alpine-pkg-glibc/releases/download/2.23-r2/sgerrand.rsa.pub \
13 | JAVA_HOME=/usr/lib/jvm/default-jvm \
14 | LANG=en_US.UTF-8 \
15 | LANGUAGE=en_US.UTF-8 \
16 | LC_ALL=en_US.UTF-8 \
17 | SSH_KNOWN_HOSTS=github.com
18 |
19 | ENV PATH=${PATH}:${JAVA_HOME}/bin
20 |
21 | # Please keep each package list in alphabetical order
22 | RUN apk --update add \
23 | bash \
24 | bzip2 \
25 | ca-certificates \
26 | git \
27 | glib \
28 | jq \
29 | less \
30 | libsm \
31 | libstdc++ \
32 | make \
33 | nss \
34 | openjdk8 \
35 | openssh-client \
36 | openssl \
37 | perl \
38 | py-pip \
39 | python \
40 | python3 \
41 | tar \
42 | unzip \
43 | && cd /tmp \
44 | && pip install --upgrade \
45 | pip \
46 | setuptools \
47 | virtualenv \
48 | wheel \
49 | && apk add --update --repository ${ALPINE_EDGE_COMMUNITY_REPO} tini \
50 | && wget -q -O /etc/apk/keys/sgerrand.rsa.pub "${ALPINE_GLIBC_RSA_PUB_URL}" \
51 | && wget -q "${ALPINE_GLIBC_BASE_URL}/${ALPINE_GLIBC_PACKAGE}" \
52 | "${ALPINE_GLIBC_BASE_URL}/${ALPINE_GLIBC_BIN_PACKAGE}" \
53 | "${ALPINE_GLIBC_BASE_URL}/${ALPINE_GLIBC_I18N_PACKAGE}" \
54 | && apk add ${ALPINE_GLIBC_PACKAGE} ${ALPINE_GLIBC_BIN_PACKAGE} ${ALPINE_GLIBC_I18N_PACKAGE} \
55 | && cd \
56 | && rm -rf /tmp/* /var/cache/apk/* \
57 | && /usr/glibc-compat/bin/localedef -i en_US -f UTF-8 en_US.UTF-8 \
58 | && echo 'export PATH=$PATH:${JAVA_HOME}/bin' >> /etc/profile.d/java.sh \
59 | && ssh-keyscan $SSH_KNOWN_HOSTS | tee /etc/ssh/ssh_known_hosts \
60 | && echo 'Done'
61 |
62 | COPY wrapper.sh /usr/local/bin/
63 | RUN chmod +x /usr/local/bin/wrapper.sh
64 |
65 | ENTRYPOINT ["wrapper.sh"]
66 | CMD []
67 |
--------------------------------------------------------------------------------
/scripts/test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set +x -e -o pipefail
3 | declare -i poll_period=10
4 | declare -i seconds_until_timeout=$((60 * 30))
5 |
6 | CLUSTER_ID=$(http \
7 | --ignore-stdin \
8 | "https://ccm.mesosphere.com/api/cluster/" \
9 | "Authorization:Token ${CCM_AUTH_TOKEN}" \
10 | "name=${JOB_NAME##*/}-${BUILD_NUMBER}" \
11 | "cluster_desc=${JOB_NAME##*/} ${BUILD_NUMBER}" \
12 | time=60 \
13 | cloud_provider=0 \
14 | region=us-west-2 \
15 | channel=testing/master \
16 | template=ee.single-master.cloudformation.json \
17 | adminlocation=0.0.0.0/0 \
18 | public_agents=0 \
19 | private_agents=1 \
20 | | jq ".id"
21 | )
22 |
23 | echo "Waiting for DC/OS cluster to form... (ID: ${CLUSTER_ID})"
24 |
25 | while (("$seconds_until_timeout" >= "0")); do
26 | STATUS=$(http \
27 | --ignore-stdin \
28 | "https://ccm.mesosphere.com/api/cluster/${CLUSTER_ID}/" \
29 | "Authorization:Token ${CCM_AUTH_TOKEN}" \
30 | | jq ".status"
31 | )
32 |
33 | if [[ ${STATUS} -eq 0 ]]; then
34 | break
35 | elif [[ ${STATUS} -eq 7 ]]; then
36 | echo "ERROR: cluster creation failed."
37 | exit 7
38 | fi
39 |
40 | sleep $poll_period
41 | let "seconds_until_timeout -= $poll_period"
42 | done
43 |
44 | if (("$seconds_until_timeout" <= "0")); then
45 | echo "ERROR: timed out waiting for cluster."
46 | exit 2
47 | fi
48 |
49 | CLUSTER_INFO=$(http \
50 | --ignore-stdin \
51 | "https://ccm.mesosphere.com/api/cluster/${CLUSTER_ID}/" \
52 | "Authorization:Token ${CCM_AUTH_TOKEN}" \
53 | | jq -r ".cluster_info"
54 | )
55 |
56 | DCOS_URL="http://$(echo "${CLUSTER_INFO}" | jq -r ".DnsAddress")"
57 | echo "DCOS_URL is '${DCOS_URL}'"
58 |
59 | ln -s "${DOT_SHAKEDOWN}" ~/.shakedown
60 | TERM=velocity shakedown --stdout all --ssh-key-file "${CLI_TEST_SSH_KEY}" --dcos-url "${DCOS_URL}"
61 |
62 | http \
63 | --ignore-stdin \
64 | DELETE \
65 | "https://ccm.mesosphere.com/api/cluster/${CLUSTER_ID}/" \
66 | "Authorization:Token ${CCM_AUTH_TOKEN}"
67 |
--------------------------------------------------------------------------------
/jenkins-agent-images/linux/dcos-jenkins-dind-agent/README.md:
--------------------------------------------------------------------------------
1 | # Jenkins Docker-in-Docker Agent
2 | [][docker-hub]
3 | [][docker-hub]
4 | [](http://microbadger.com/images/mesosphere/jenkins-dind "Get your own image badge on microbadger.com")
5 |
6 | A simple Docker image for running a Jenkins agent alongside its very
7 | own Docker daemon. This is useful if you're trying to run Jenkins agents on a
8 | Mesos cluster, and you also want to build and push Docker images using your
9 | CI system.
10 |
11 | For full documentation on how to use this Docker image, please refer to
12 | .
13 |
14 | ## Usage
15 | ### Command line
16 | Try it out locally by running the following command:
17 |
18 | ```bash
19 | docker run --privileged mesosphere/jenkins-dind:0.6.0-alpine \
20 | wrapper.sh "java -version && docker run hello-world"
21 | ```
22 |
23 | ### Jenkins
24 | You'll need to configure the Mesos plugin on your Jenkins master to use this
25 | image. You'll probably also want to give it a special slave label, so that you
26 | don't unnecessarily run builds using the dind image. A relevant snippet of the
27 | Mesos plugin within the Jenkins master's `config.xml` follows:
28 |
29 | ```xml
30 |
31 | 0.1
32 | 512
33 | 0.1
34 | 2
35 | 128
36 | jenkins
37 | 3
38 |
39 | -Xms16m -XX:+UseConcMarkSweepGC -Djava.net.preferIPv4Stack=true
40 |
41 |
42 |
43 | DOCKER
44 | mesosphere/jenkins-dind:0.6.0-alpine
45 | BRIDGE
46 | true
47 | wrapper.sh
48 | true
49 | false
50 |
51 | NORMAL
52 | dind
53 |
54 | ```
55 |
56 | [docker-hub]: https://hub.docker.com/r/mesosphere/jenkins-dind
57 |
58 |
59 | ### Older Archived Repository
60 | This image was previously housed under https://github.com/mesosphere/dcos-jenkins-dind-agent
61 |
62 | Older branches and tags can be found there.
63 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Jenkins on DC/OS
2 | [](https://jenkins.mesosphere.com/service/jenkins/view/Velocity/job/Jenkins/job/public-jenkins-dcos-master/)
3 | [][docker-hub]
4 | [][docker-hub]
5 | [](http://microbadger.com/images/mesosphere/jenkins "Get your own image badge on microbadger.com")
6 |
7 | Run a Jenkins master on DC/OS, using Docker and Nginx. This Jenkins instance is pre-configured to autoscale build agents onto the DC/OS cluster using the [Jenkins Mesos plugin][mesos-plugin].
8 |
9 | ## Overview
10 | This repo contains a [Dockerfile](Dockerfile) that runs Jenkins inside a Docker
11 | container and uses [Nginx][nginx-home] as a reverse proxy. It also provides
12 | several Jenkins plugins and a basic Jenkins configuration in order to get you
13 | up and running quickly with Jenkins on DC/OS.
14 |
15 | ## Reporting issues
16 |
17 | Please report issues and submit feature requests for Jenkins on DC/OS by [creating an issue in the DC/OS JIRA][dcos-jira] (JIRA account required).
18 |
19 | ## Included in this repo
20 | Base packages:
21 | * [Jenkins][jenkins-home] 2.190.1 (LTS)
22 | * [Nginx][nginx-home] 1.10.1
23 |
24 | Jenkins plugins: see [plugins.conf](plugins.conf).
25 |
26 | ## Packaging
27 | Jenkins is available as a package in the [Mesosphere Universe][universe].
28 | To make changes to the Jenkins package, submit a pull request against the
29 | Universe.
30 |
31 | ## Installation
32 |
33 | To install Jenkins for the DC/OS, simply run `dcos package install jenkins` or install via the Universe page in the DC/OS UI.
34 |
35 | Jenkins should now be available at .
36 | See [Getting Started][getting-started] for more in-depth instructions and
37 | configuration options.
38 |
39 | ## Releasing
40 | To release a new version of this package:
41 |
42 | 1. Update [the Jenkins conf][jenkins-conf] to reference the current release of
43 | the [jenkins-dind][jenkins-dind] Docker image (if needed).
44 | 2. Add some release notes to [CHANGELOG.md](CHANGELOG.md)
45 | 3. Tag the commit on master that you want to be released.
46 | 4. Once [the build][jenkins-build] has successfully completed, submit a new
47 | pull request against [the Universe][universe] referencing the new tag.
48 |
49 | [dcos-jira]: https://jira.mesosphere.com/secure/CreateIssueDetails!init.jspa?pid=14110&issuetype=3
50 | [docker-hub]: https://hub.docker.com/r/mesosphere/jenkins
51 | [getting-started]: https://docs.mesosphere.com/service-docs/jenkins/quickstart/
52 | [jenkins-conf]: /conf/jenkins/config.xml
53 | [jenkins-dind]: https://github.com/mesosphere/jenkins-dind-agent
54 | [jenkins-home]: https://jenkins-ci.org/
55 | [mesos-plugin]: https://github.com/jenkinsci/mesos-plugin
56 | [nginx-home]: http://nginx.org/en/
57 | [jenkins-build]: https://jenkins.mesosphere.com/service/jenkins/job/public-jenkins-dcos-master/
58 | [universe]: https://github.com/mesosphere/universe
59 |
--------------------------------------------------------------------------------
/testing/sdk_networks.py:
--------------------------------------------------------------------------------
1 | '''
2 | ************************************************************************
3 | FOR THE TIME BEING WHATEVER MODIFICATIONS ARE APPLIED TO THIS FILE
4 | SHOULD ALSO BE APPLIED TO sdk_networks IN ANY OTHER PARTNER REPOS
5 | ************************************************************************
6 | '''
7 | import logging
8 | import shakedown
9 | import sdk_cmd
10 |
11 | log = logging.getLogger(__name__)
12 |
13 | ENABLE_VIRTUAL_NETWORKS_OPTIONS = {'service': {'virtual_network_enabled': True}}
14 |
15 |
16 | def check_task_network(task_name, expected_network_name="dcos"):
17 | """Tests whether a task (and it's parent pod) is on a given network
18 | """
19 | _task = shakedown.get_task(task_id=task_name, completed=False)
20 |
21 | assert _task is not None, "Unable to find task named {}".format(task_name)
22 | if type(_task) == list or type(_task) == tuple:
23 | assert len(_task) == 1, "Found too many tasks matching {}, got {}"\
24 | .format(task_name, _task)
25 | _task = _task[0]
26 |
27 | for status in _task["statuses"]:
28 | if status["state"] == "TASK_RUNNING":
29 | for network_info in status["container_status"]["network_infos"]:
30 | if expected_network_name is not None:
31 | assert "name" in network_info, \
32 | "Didn't find network name in NetworkInfo for task {task} with " \
33 | "status:{status}".format(task=task_name, status=status)
34 | assert network_info["name"] == expected_network_name, \
35 | "Expected network name:{expected} found:{observed}" \
36 | .format(expected=expected_network_name, observed=network_info["name"])
37 | else:
38 | assert "name" not in network_info, \
39 | "Task {task} has network name when it shouldn't has status:{status}" \
40 | .format(task=task_name, status=status)
41 |
42 |
43 | def get_and_test_endpoints(package_name, service_name, endpoint_to_get, correct_count):
44 | """Gets the endpoints for a service or the specified 'endpoint_to_get' similar to running
45 | $ docs endpoints
46 | or
47 | $ dcos endpoints
48 | Checks that there is the correct number of endpoints"""
49 | endpoints = sdk_cmd.svc_cli(package_name, service_name, "endpoints {}".format(endpoint_to_get), json=True)
50 | assert len(endpoints) == correct_count, "Wrong number of endpoints, got {} should be {}" \
51 | .format(len(endpoints), correct_count)
52 | return endpoints
53 |
54 |
55 | def check_endpoints_on_overlay(endpoints):
56 | def check_ip_addresses_on_overlay():
57 | # the overlay IP address should not contain any agent IPs
58 | return len(set(ip_addresses).intersection(set(shakedown.get_agents()))) == 0
59 |
60 | assert "address" in endpoints, "endpoints: {} missing 'address' key".format(endpoints)
61 | assert "dns" in endpoints, "endpoints: {} missing 'dns' key".format(endpoints)
62 |
63 | # endpoints should have the format :port
64 | ip_addresses = [e.split(":")[0] for e in endpoints["address"]]
65 | assert check_ip_addresses_on_overlay(), \
66 | "IP addresses for this service should not contain agent IPs, IPs were {}".format(ip_addresses)
67 |
68 | for dns in endpoints["dns"]:
69 | assert "autoip.dcos.thisdcos.directory" in dns, \
70 | "DNS {} is incorrect should have autoip.dcos.thisdcos.directory".format(dns)
71 |
--------------------------------------------------------------------------------
/tools/print_package_tag.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # Retrieves package version from a cluster using the CLI (must be configured/logged in),
4 | # then determines what SHA that package has in the provided repo (must be locally checked out)
5 | #
6 | # On success: Prints package version string and zero is returned
7 | # On failure: non-zero is returned
8 |
9 | import json
10 | import logging
11 | import os.path
12 | import subprocess
13 | import sys
14 |
15 | logger = logging.getLogger(__name__)
16 | logging.basicConfig(level=logging.DEBUG, format="%(message)s")
17 |
18 |
19 | class PackageVersion(object):
20 |
21 | def __init__(self, package_name):
22 | self._package_name = package_name
23 |
24 |
25 | def get_version(self):
26 | response_raw = self._get_cmd_stdout('dcos package describe {}'.format(self._package_name))
27 | try:
28 | return json.loads(response_raw)['version']
29 | except:
30 | logger.error('Failed to parse version from output of command "{}": {}'.format(cmd, response_raw))
31 | raise
32 |
33 |
34 | def get_version_sha_for_path(self, repo_path):
35 | version_tag = self.get_version()
36 | try:
37 | # ensure the tag is visible in the repo copy:
38 | repo_dotgit_path = os.path.join(repo_path, '.git')
39 | self._get_cmd_stdout('git --git-dir={} fetch origin --tags'.format(repo_dotgit_path))
40 | # get the rev for the tag. use % instead of .format() to preserve a literal '{}':
41 | return self._get_cmd_stdout('git --git-dir=%s rev-parse %s^{}' % (repo_dotgit_path, version_tag))
42 | except:
43 | logger.error('Failed to retrieve SHA1 from git for tag "{}"'.format(version_tag))
44 | raise
45 |
46 |
47 | def get_version_sha_for_url(self, repo_url):
48 | version_tag = self.get_version()
49 | try:
50 | # get the rev for the remote tag. use % instead of .format() to preserve a literal '{}':
51 | rev = self._get_cmd_stdout('git ls-remote --tags %s refs/tags/%s^{}' % (repo_url, version_tag))
52 | if len(rev) == 0:
53 | # no tag with '^{}' suffix was found. retry without the suffix:
54 | rev = self._get_cmd_stdout('git ls-remote --tags {} refs/tags/{}'.format(repo_url, version_tag))
55 | # output format: ' '
56 | return rev.split()[0]
57 | except:
58 | logger.error('Failed to retrieve SHA1 from git for tag "{}"'.format(version_tag))
59 | raise
60 |
61 |
62 | def _get_cmd_stdout(self, cmd):
63 | try:
64 | logger.info("CMD: {}".format(cmd))
65 | output = subprocess.check_output(cmd.split(' ')).decode('utf-8').strip()
66 | logger.info("Output ({}b):\n{}".format(len(output), output))
67 | return output
68 | except:
69 | logger.error('Failed to run command: "{}"'.format(cmd))
70 | raise
71 |
72 |
73 | def main(argv):
74 | if len(argv) != 2 and len(argv) != 3:
75 | logger.error('Syntax: {} [/local/repo/path or git@host.com:remote/repo]'.format(argv[0]))
76 | logger.error('Received arguments {}'.format(str(argv)))
77 | return 1
78 | if len(argv) == 2:
79 | print(PackageVersion(argv[1]).get_version())
80 | else:
81 | if os.path.isdir(argv[2]):
82 | print(PackageVersion(argv[1]).get_version_sha_for_path(argv[2]))
83 | else:
84 | print(PackageVersion(argv[1]).get_version_sha_for_url(argv[2]))
85 |
86 | return 0
87 |
88 | if __name__ == '__main__':
89 | sys.exit(main(sys.argv))
90 |
--------------------------------------------------------------------------------
/testing/sdk_quota.py:
--------------------------------------------------------------------------------
1 | """Utilities relating to interaction with Mesos quota
2 |
3 | ************************************************************************
4 | FOR THE TIME BEING WHATEVER MODIFICATIONS ARE APPLIED TO THIS FILE
5 | SHOULD ALSO BE APPLIED TO sdk_plan IN ANY OTHER PARTNER REPOS
6 | ************************************************************************
7 | """
8 | import enum
9 | import logging
10 | import json
11 |
12 | import sdk_cmd
13 |
14 | from enum import Enum
15 |
16 | log = logging.getLogger(__name__)
17 |
18 |
19 | class QuotaType(Enum):
20 | GUARANTEE = 1
21 | LIMIT = 2
22 |
23 |
24 | def create_quota(role: str, cpus=0.0, mem=0, gpus=0, quota_type=QuotaType.GUARANTEE):
25 | """ Used to create a Mesos quota for the specificed role.
26 |
27 | :param role: the role to create quota for
28 | :param cpus: the amount of cpus
29 | :param mem: the amount of memory
30 | :param gpus: the amount of GPUs
31 |
32 | Raises an exception on failure
33 | """
34 | if cpus == 0.0 and mem == 0.0 and gpus == 0.0:
35 | log.info(
36 | "create_quota called with cpus, mem, and gpus all 0.0. Not creating any quota."
37 | )
38 | if quota_type is QuotaType.GUARANTEE:
39 | guarantee = []
40 | if cpus != 0.0:
41 | guarantee.append(_create_guarantee("cpus", cpus))
42 | if mem != 0:
43 | guarantee.append(_create_guarantee("mem", mem))
44 | if gpus != 0:
45 | guarantee.append(_create_guarantee("gpus", gpus))
46 |
47 | log.info(
48 | "Creating quota guarantee for role %s. cpus: %s mem: %s gpus: %s",
49 | role,
50 | cpus,
51 | mem,
52 | gpus,
53 | )
54 | sdk_cmd.cluster_request(
55 | "POST", "/mesos/quota", json=_create_quota_request(role, guarantee)
56 | )
57 |
58 | else:
59 | # Limits right now queries the mesos/api/v1 endpoint till
60 | # the mesos/quota enpoint is built out. This code should
61 | # be restructured to use the /quota endpoint when ready.
62 | log.info(
63 | "Creating quota limit for role %s. cpus: %s mem: %s gpus: %s",
64 | role,
65 | cpus,
66 | mem,
67 | gpus,
68 | )
69 | sdk_cmd.cluster_request(
70 | "POST",
71 | "/mesos/api/v1",
72 | json=_create_quota_limit_request(role, cpus, mem, gpus),
73 | )
74 | log.info("Quota created for role %s", role)
75 |
76 |
77 | def remove_quota(role: str):
78 | """ Removes any quota for the specified role
79 |
80 | :param role: the role to remove quota from
81 | """
82 | log.info("Removing quota for role %s", role)
83 | sdk_cmd.cluster_request("DELETE", "/mesos/quota/{}".format(role))
84 | log.info("Quota removed for role %s", role)
85 |
86 |
87 | def list_quotas() -> dict:
88 | log.info("Listing all quota")
89 | return json.loads(sdk_cmd.cluster_request("GET", "/mesos/quota").text)
90 |
91 |
92 | def _create_quota_request(role: str, guarantee: list):
93 | return {"role": role, "guarantee": guarantee}
94 |
95 |
96 | def _create_guarantee(type: str, value: float):
97 | return {"name": type, "type": "SCALAR", "scalar": {"value": value}}
98 |
99 |
100 | def _create_quota_limit_request(role: str, cpus: float, mem: float, gpus: float):
101 | limits = {}
102 | if cpus != 0.0:
103 | limits["cpus"] = {"value": cpus}
104 | if mem != 0:
105 | limits["mem"] = {"value": mem}
106 | if gpus != 0:
107 | limits["gpus"] = {"value": gpus}
108 |
109 | return {
110 | "type": "UPDATE_QUOTA",
111 | "update_quota": {"quota_configs": [{"role": role, "limits": limits}]},
112 | }
113 |
--------------------------------------------------------------------------------
/tools/pip/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import os
4 | import os.path
5 | import setuptools
6 | import shutil
7 | import subprocess
8 | import sys
9 |
10 | def syntax():
11 | print('Syntax: OUTPUT_NAME=foo INPUT_DIR=path/to/foo VERSION=0.123.0 {} -q bdist_wheel'.format(sys.argv[0]))
12 |
13 | def main():
14 | output_name = os.getenv('OUTPUT_NAME')
15 | input_dir_path = os.getenv('INPUT_DIR')
16 | version = os.getenv('VERSION')
17 | if not output_name or not input_dir_path or not version:
18 | print('Missing OUTPUT_NAME, INPUT_DIR, or VERSION envvars.')
19 | syntax()
20 | return 1
21 |
22 | if not os.path.isdir(input_dir_path):
23 | print('Provided input path is not a directory: {}'.format(input_dir_path))
24 | syntax()
25 | return 1
26 |
27 | # only include files that are tracked by git
28 | input_relative_file_paths = subprocess.check_output(['git', 'ls-files'], cwd=input_dir_path).decode('utf-8').split()
29 | print('Packing {} files from {} into {}-{}:\n {}'.format(
30 | len(input_relative_file_paths), input_dir_path, output_name, version, '\n '.join(input_relative_file_paths)))
31 |
32 | # wipe/recreate output directory
33 | script_dir = os.path.abspath(os.path.dirname(__file__))
34 | output_dir_path = os.path.join(script_dir, output_name)
35 | if os.path.exists(output_dir_path):
36 | shutil.rmtree(output_dir_path)
37 | os.makedirs(output_dir_path)
38 | build_dir_path = os.path.join(script_dir, 'build')
39 | if os.path.exists(build_dir_path):
40 | shutil.rmtree(build_dir_path)
41 | # copy all input files to .//...
42 | for input_relative_file_path in input_relative_file_paths:
43 | src_path = os.path.join(input_dir_path, input_relative_file_path)
44 | dest_path = os.path.join(output_dir_path, input_relative_file_path)
45 | dest_dir = os.path.dirname(dest_path)
46 | if not os.path.isdir(dest_dir):
47 | os.makedirs(dest_dir)
48 | shutil.copy(src_path, dest_path)
49 |
50 | init_filename = '__init__.py'
51 |
52 | # ensure that a root-level .//__init__.py exists, so that the python module has a __file__ attribute
53 | open(os.path.join(output_dir_path, init_filename), 'a').close()
54 |
55 | # copy cmd_wrapper entrypoint into .//cmd_wrapper/ as well
56 | entrypoint_package = 'cmd_wrapper'
57 | endpoint_output_dir = os.path.join(output_dir_path, entrypoint_package)
58 | os.makedirs(endpoint_output_dir)
59 | shutil.copy(
60 | os.path.join(script_dir, entrypoint_package, init_filename),
61 | os.path.join(endpoint_output_dir, init_filename))
62 | input_relative_file_paths.append(os.path.join(entrypoint_package, init_filename))
63 |
64 | # run setup with list of files to include
65 | setuptools.setup(
66 | name=output_name,
67 | version=version,
68 | url='http://github.com/mesosphere/dcos-commons',
69 | packages=[output_name],
70 | entry_points={ 'console_scripts': [
71 | '{} = {}.{}:main'.format(output_name, output_name, entrypoint_package) ] },
72 | package_data={ output_name: input_relative_file_paths })
73 |
74 | # clean up build detritus:
75 | shutil.rmtree(build_dir_path)
76 | shutil.rmtree(os.path.join(script_dir, '{}.egg-info'.format(output_name)))
77 | shutil.rmtree(output_dir_path)
78 | # move whl file into script dir:
79 | output_file = '{}-{}-py3-none-any.whl'.format(output_name, version)
80 | output_path = os.path.join(script_dir, output_file)
81 | os.rename(os.path.join(script_dir, 'dist', output_file), output_path)
82 | shutil.rmtree(os.path.join(script_dir, 'dist'))
83 |
84 | print('''Built {}-{}: {}'''.format(output_name, version, output_path))
85 | return 0
86 |
87 | if __name__ == '__main__':
88 | sys.exit(main())
89 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM jenkins/jenkins:2.204.6
2 | WORKDIR /tmp
3 |
4 | # Environment variables used throughout this Dockerfile
5 | #
6 | # $JENKINS_HOME will be the final destination that Jenkins will use as its
7 | # data directory. This cannot be populated before Marathon
8 | # has a chance to create the host-container volume mapping.
9 | #
10 | ENV JENKINS_FOLDER /usr/share/jenkins
11 |
12 | # Build Args
13 | ARG BLUEOCEAN_VERSION=1.22.0
14 | ARG JENKINS_STAGING=/usr/share/jenkins/ref/
15 | ARG PROMETHEUS_PLUG_HASH=61ea0cd0bb26d937c8f4df00c7e226c0b51c7b50
16 | ARG STATSD_PLUG_HASH=929d4a6cb3d3ce5f1e03af73075b13687d4879c8
17 | ARG JENKINS_DCOS_HOME=/var/jenkinsdcos_home
18 | ARG user=nobody
19 | ARG uid=99
20 | ARG gid=99
21 |
22 | ENV JENKINS_HOME $JENKINS_DCOS_HOME
23 | # Default policy according to https://wiki.jenkins.io/display/JENKINS/Configuring+Content+Security+Policy
24 | ENV JENKINS_CSP_OPTS="sandbox; default-src 'none'; img-src 'self'; style-src 'self';"
25 |
26 | USER root
27 |
28 | # install dependencies
29 | RUN apt-get update && apt-get install -y nginx python zip jq gettext-base
30 | # update to newer git version
31 | RUN echo "deb http://ftp.debian.org/debian testing main" >> /etc/apt/sources.list \
32 | && apt-get update && apt-get -t testing install -y git
33 |
34 | RUN mkdir -p "${JENKINS_HOME}" "${JENKINS_FOLDER}/war"
35 |
36 | # Override the default property for DNS lookup caching
37 | RUN echo 'networkaddress.cache.ttl=60' >> ${JAVA_HOME}/jre/lib/security/java.security
38 |
39 | # bootstrap scripts and needed dir setup
40 | COPY scripts/export-libssl.sh /usr/local/jenkins/bin/export-libssl.sh
41 | COPY scripts/dcos-quota.sh /usr/local/jenkins/bin/dcos-quota.sh
42 | COPY scripts/dcos-framework-dns-name.sh /usr/local/jenkins/bin/dcos-framework-dns-name.sh
43 | COPY scripts/dcos-write-known-hosts-file.sh /usr/local/jenkins/bin/dcos-write-known-hosts-file.sh
44 | COPY scripts/run.sh /usr/local/jenkins/bin/run.sh
45 |
46 | # nginx setup
47 | RUN mkdir -p /var/log/nginx/jenkins /var/nginx/
48 | COPY conf/nginx/nginx.conf.template /var/nginx/nginx.conf.template
49 |
50 | # jenkins setup
51 | ENV CASC_JENKINS_CONFIG /usr/local/jenkins/jenkins.yaml
52 | COPY conf/jenkins/configuration.yaml "${CASC_JENKINS_CONFIG}"
53 | COPY conf/jenkins/jenkins.model.JenkinsLocationConfiguration.xml "${JENKINS_STAGING}/jenkins.model.JenkinsLocationConfiguration.xml"
54 | COPY conf/jenkins/nodeMonitors.xml "${JENKINS_STAGING}/nodeMonitors.xml"
55 | COPY scripts/init.groovy.d/mesos-auth.groovy "${JENKINS_STAGING}/init.groovy.d/mesos-auth.groovy"
56 |
57 | # add plugins
58 | COPY plugins.conf /tmp/
59 | RUN sed -i "s/\${BLUEOCEAN_VERSION}/${BLUEOCEAN_VERSION}/g" /tmp/plugins.conf
60 | RUN /usr/local/bin/install-plugins.sh < /tmp/plugins.conf
61 |
62 | # Note: There is a cleaner way of accomplishing the following which is documented in https://jira.d2iq.com/browse/DCOS_OSS-5906
63 | ADD https://infinity-artifacts.s3.amazonaws.com/prometheus-jenkins/prometheus.hpi-${PROMETHEUS_PLUG_HASH} "${JENKINS_STAGING}/plugins/prometheus.hpi"
64 | ADD https://infinity-artifacts.s3.amazonaws.com/statsd-jenkins/metrics-graphite.hpi-${STATSD_PLUG_HASH} "${JENKINS_STAGING}/plugins/metrics-graphite.hpi"
65 |
66 | # Note: For development purposes, the developer can COPY a pre-release mesos.hpi file into JENKINS_STAGING/plugins/mesos.jpi
67 | # The new naming convention is to use *.jpi files instead of *.hpi files.
68 |
69 | RUN chmod -R ugo+rw "$JENKINS_HOME" "${JENKINS_FOLDER}" \
70 | && chmod -R ugo+r "${JENKINS_STAGING}" \
71 | && chmod -R ugo+rx /usr/local/jenkins/bin/ \
72 | && chmod -R ugo+rw /var/jenkins_home/ \
73 | && chmod -R ugo+rw /var/lib/nginx/ /var/nginx/ /var/log/nginx \
74 | && chmod ugo+rx /usr/local/jenkins/bin/*
75 |
76 | USER ${user}
77 |
78 | # disable first-run wizard
79 | RUN echo 2.0 > /usr/share/jenkins/ref/jenkins.install.UpgradeWizard.state
80 |
81 | CMD /usr/local/jenkins/bin/run.sh
82 |
--------------------------------------------------------------------------------
/tools/build_package.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e -x
4 |
5 | user_usage() {
6 | # This script is generally called by an upstream 'build.sh' which would be invoked directly by users.
7 | # This function returns the syntax expected to be used by that upstream 'build.sh'
8 | echo "Syntax: build.sh [-h|--help] [aws|local]"
9 | }
10 |
11 | dev_usage() {
12 | # Called when a syntax error appears to be an error on the part of the developer.
13 | echo "Developer syntax: build_package.sh [-a 'path1' -a 'path2' ...] [aws|local]"
14 | }
15 |
16 | # Optional envvars:
17 | # REPO_ROOT_DIR: path to root of source repository (default: parent directory of this file)
18 | # REPO_NAME: name of the source repository (default: directory name of REPO_ROOT_DIR)
19 | # UNIVERSE_DIR: path to universe packaging (default: /universe/)
20 |
21 | if [ $# -lt 3 ]; then
22 | dev_usage
23 | exit 1
24 | fi
25 |
26 | # required args:
27 | FRAMEWORK_NAME=$1
28 | shift
29 | FRAMEWORK_DIR=$1
30 | shift
31 |
32 | echo "Building $FRAMEWORK_NAME in $FRAMEWORK_DIR:"
33 |
34 | # optional args, currently just used for providing paths to service artifacts:
35 | custom_artifacts=
36 | while getopts 'a:' opt; do
37 | case $opt in
38 | a)
39 | custom_artifacts="$custom_artifacts $OPTARG"
40 | ;;
41 | \?)
42 | dev_usage
43 | exit 1
44 | ;;
45 | esac
46 | done
47 | shift $((OPTIND-1))
48 |
49 | # optional publish method should come after any args:
50 | publish_method="no"
51 | case $1 in
52 | aws)
53 | publish_method="aws"
54 | shift
55 | ;;
56 | local)
57 | publish_method="local"
58 | shift
59 | ;;
60 | .dcos)
61 | publish_method=".dcos"
62 | shift
63 | ;;
64 | "")
65 | # no publish method specified
66 | ;;
67 | *)
68 | # unknown verb
69 | user_usage
70 | exit 1
71 | ;;
72 | esac
73 |
74 | TOOLS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
75 | export REPO_ROOT_DIR=${REPO_ROOT_DIR:=$(dirname $TOOLS_DIR)} # default to parent of this script's dir
76 | export REPO_NAME=${REPO_NAME:=$(basename $REPO_ROOT_DIR)} # default to name of REPO_ROOT_DIR
77 |
78 | UNIVERSE_DIR=${UNIVERSE_DIR:=${FRAMEWORK_DIR}/universe} # default to 'universe' directory in framework dir
79 | echo "- Universe: $UNIVERSE_DIR"
80 | echo "- Artifacts:$custom_artifacts"
81 | echo "- Publish: $publish_method"
82 | echo "---"
83 |
84 | # Verify airgap (except for hello world)
85 | if [ $FRAMEWORK_NAME != "hello-world" ]; then
86 | ${TOOLS_DIR}/airgap_linter.py ${FRAMEWORK_DIR}
87 | fi
88 |
89 | # Upload using requested method
90 | case "$publish_method" in
91 | local)
92 | echo "Launching HTTP artifact server"
93 | PUBLISH_SCRIPT=${TOOLS_DIR}/publish_http.py
94 | ;;
95 | aws)
96 | echo "Uploading to S3"
97 | PUBLISH_SCRIPT=${TOOLS_DIR}/publish_aws.py
98 | ;;
99 | .dcos)
100 | echo "Uploading .dcos files to S3"
101 | PUBLISH_SCRIPT=${TOOLS_DIR}/publish_dcos_file.py
102 | ;;
103 | *)
104 | echo "---"
105 | echo "Build complete, skipping publish step."
106 | echo "Use one of the following additional arguments to get something that runs on a cluster:"
107 | echo "- 'local': Host the build in a local HTTP server for use by a DC/OS Vagrant cluster."
108 | echo "- 'aws': Upload the build to S3."
109 | ;;
110 | esac
111 |
112 | PACKAGE_VERSION=${1:-"stub-universe"}
113 |
114 | if [ -n "$PUBLISH_SCRIPT" ]; then
115 | # All the scripts use the same argument format:
116 | $PUBLISH_SCRIPT "${FRAMEWORK_NAME}" "${PACKAGE_VERSION}" "${UNIVERSE_DIR}" ${custom_artifacts}
117 | fi
118 |
--------------------------------------------------------------------------------
/tests/scale/test_scale_utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import uuid
3 |
4 | import config
5 | import jenkins
6 | import jenkins_remote_access
7 | import pytest
8 | import retrying
9 | import sdk_install
10 | import sdk_utils
11 | import shakedown
12 |
13 | log = logging.getLogger(__name__)
14 |
15 |
16 | @pytest.fixture(scope='module', autouse=True)
17 | def configure_package():
18 | try:
19 | sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
20 | sdk_install.install(config.PACKAGE_NAME, config.SERVICE_NAME, 0, wait_for_deployment=False)
21 |
22 | yield # let the test session execute
23 | finally:
24 | pass
25 | sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
26 |
27 |
28 | @pytest.fixture
29 | def create_slave() -> str:
30 | label = sdk_utils.random_string()
31 | jenkins.create_mesos_slave_node(label)
32 | r = jenkins.create_mesos_slave_node(label, service_name=config.SERVICE_NAME)
33 | assert r.status_code == 200, 'create_mesos_slave_node failed : {}'.format(
34 | r.status_code
35 | )
36 | assert label in r.text, 'Label {} missing from {}'.format(label, r.text)
37 | log.info("Set of labels is now: %s", r.text)
38 | yield label
39 | log.info("Removing label %s", label)
40 | r = jenkins_remote_access.remove_slave_info(
41 | label, service_name=config.SERVICE_NAME
42 | )
43 | assert r.status_code == 200, 'remove_slave_info failed : {}'.format(
44 | r.status_code
45 | )
46 | assert label not in r.text, 'Label {} still present in {}'.format(
47 | label, r.text
48 | )
49 | log.info("Set of labels is now: %s", r.text)
50 |
51 |
52 | @pytest.mark.sanity
53 | def test_create_job():
54 | test_job_name = get_test_job_name()
55 | jenkins.create_job(config.SERVICE_NAME, test_job_name, "echo \"test command\";", 5)
56 | job = jenkins.get_job(config.SERVICE_NAME, test_job_name)
57 |
58 | assert test_job_name == job['name']
59 |
60 |
61 | @pytest.mark.sanity
62 | def test_label_create_job(create_slave):
63 | test_job_name = get_test_job_name()
64 | jenkins.create_job(config.SERVICE_NAME, test_job_name, "echo \"test command\";", 1, create_slave)
65 | job = jenkins.get_job(config.SERVICE_NAME, test_job_name)
66 |
67 | assert test_job_name == job['name']
68 | wait_until_job_run(config.SERVICE_NAME, test_job_name)
69 |
70 |
71 | @pytest.mark.sanity
72 | def test_install_custom_name():
73 | svc_name = 'jenkins-custom'
74 | test_job_name = get_test_job_name()
75 |
76 | sdk_install.uninstall(config.PACKAGE_NAME, svc_name)
77 |
78 | try:
79 | client = shakedown.marathon.create_client()
80 | jenkins.install(svc_name, client)
81 | jenkins.create_job(svc_name, test_job_name)
82 | job = jenkins.get_job(svc_name, test_job_name)
83 | assert test_job_name == job['name']
84 | finally:
85 | sdk_install.uninstall(config.PACKAGE_NAME, svc_name)
86 |
87 |
88 | @pytest.mark.sanity
89 | def test_get_job_failures():
90 | r = jenkins_remote_access.get_job_failures(config.SERVICE_NAME)
91 | assert r.status_code == 200
92 |
93 |
94 | @pytest.mark.sanity
95 | def test_change_mesos_creds():
96 | r = jenkins_remote_access.change_mesos_creds('myusername',
97 | config.SERVICE_NAME)
98 | assert r.status_code == 200
99 | assert "changed jenkins creds" in r.text
100 |
101 |
102 | def get_test_job_name():
103 | return 'test-job-{}'.format(uuid.uuid4())
104 |
105 |
106 | @retrying.retry(
107 | stop_max_delay=5 * 60 * 1000,
108 | wait_fixed=20 * 1000
109 | )
110 | def wait_until_job_run(service_name: str, job_name: str) -> None:
111 | last_build = jenkins.get_last_build(service_name, job_name)
112 | log.info(last_build)
113 | # TODO make this check strong if needed.
114 | assert last_build
115 |
--------------------------------------------------------------------------------
/testing/sdk_hosts.py:
--------------------------------------------------------------------------------
1 | '''Utilities relating to mapping tasks and services to hostnames
2 |
3 | ************************************************************************
4 | FOR THE TIME BEING WHATEVER MODIFICATIONS ARE APPLIED TO THIS FILE
5 | SHOULD ALSO BE APPLIED TO sdk_hosts IN ANY OTHER PARTNER REPOS
6 | ************************************************************************
7 | '''
8 | import json
9 | import retrying
10 |
11 | import sdk_cmd
12 | import sdk_utils
13 |
14 |
15 | SYSTEM_HOST_SUFFIX = 'mesos'
16 | AUTOIP_HOST_SUFFIX = 'autoip.dcos.thisdcos.directory'
17 | VIP_HOST_SUFFIX = 'l4lb.thisdcos.directory'
18 |
19 |
20 | def system_host(service_name, task_name, port=-1):
21 | '''Returns the mesos DNS name for the host machine of a given task, with handling of foldered services.
22 | This maps to the host IP, which may be different from the container IP if CNI is enabled.
23 |
24 | service=marathon task=/path/to/scheduler => scheduler-to-path.marathon.mesos
25 | service=/path/to/scheduler task=node-0 => node-0.pathtoscheduler.mesos
26 |
27 | See also: https://dcos.io/docs/1.8/usage/service-discovery/dns-overview/'''
28 | return _to_host(
29 | _safe_mesos_dns_taskname(task_name),
30 | _safe_name(service_name),
31 | SYSTEM_HOST_SUFFIX,
32 | port)
33 |
34 |
35 | def autoip_host(service_name, task_name, port=-1):
36 | '''Returns the autoip hostname for the container of a given task, with handling of foldered services.
37 | In CNI cases, this may vary from the host of the agent system.'''
38 | return _to_host(
39 | _safe_name(task_name),
40 | _safe_name(service_name),
41 | AUTOIP_HOST_SUFFIX,
42 | port)
43 |
44 |
45 | def custom_host(service_name, task_name, custom_domain, port=-1):
46 | """
47 | Returns a properly constructed hostname for the container of the given task using the
48 | supplied custom domain.
49 | """
50 | return _to_host(
51 | _safe_name(task_name),
52 | _safe_name(service_name),
53 | custom_domain,
54 | port)
55 |
56 |
57 | def vip_host(service_name, vip_name, port=-1):
58 | '''Returns the hostname of a specified service VIP, with handling of foldered services.'''
59 | return _to_host(
60 | _safe_name(vip_name),
61 | _safe_name(service_name),
62 | VIP_HOST_SUFFIX,
63 | port)
64 |
65 |
66 | def _safe_name(name):
67 | '''Converts a potentially slash-delimited name to one that works for 'thisdcos.directory'
68 | hostnames used by autoip and vips. In both cases the slashes may just be stripped out.'''
69 | return name.replace('/', '')
70 |
71 |
72 | def _safe_mesos_dns_taskname(task_name):
73 | '''Converts a potentially slash-delimited task name to one that works for '.mesos' task names
74 | Mesos DNS task names handle folders like this: /path/to/myservice => myservice-to-path'''
75 | elems = task_name.strip('/').split('/')
76 | elems.reverse()
77 | return '-'.join(elems)
78 |
79 |
80 | def _to_host(host_first, host_second, host_third, port):
81 | host = '{}.{}.{}'.format(host_first, host_second, host_third)
82 | if port != -1:
83 | return '{}:{}'.format(host, port)
84 | return host
85 |
86 |
87 | def get_foldered_dns_name(service_name):
88 | if sdk_utils.dcos_version_less_than('1.10'):
89 | return service_name
90 | return sdk_utils.get_foldered_name(service_name).replace("/", "")
91 |
92 |
93 | @retrying.retry(
94 | wait_fixed=2000,
95 | stop_max_delay=5*60*1000)
96 | def get_crypto_id_domain():
97 | """
98 | Returns the cluster cryptographic ID equivalent of autoip.dcos.thisdcos.directory.
99 |
100 | These addresses are routable within the cluster but can be used to test setting a custom
101 | service domain.
102 | """
103 | ok, lashup_response = sdk_cmd.master_ssh("curl localhost:62080/lashup/key/")
104 | assert ok
105 |
106 | crypto_id = json.loads(lashup_response.strip())["zbase32_public_key"]
107 |
108 | return "autoip.dcos.{}.dcos.directory".format(crypto_id)
109 |
--------------------------------------------------------------------------------
/testing/sdk_repository.py:
--------------------------------------------------------------------------------
1 | '''
2 | ************************************************************************
3 | FOR THE TIME BEING WHATEVER MODIFICATIONS ARE APPLIED TO THIS FILE
4 | SHOULD ALSO BE APPLIED TO sdk_repository IN ANY OTHER PARTNER REPOS
5 | ************************************************************************
6 | '''
7 | import json
8 | import logging
9 | import os
10 | from itertools import chain
11 | from typing import List
12 |
13 | import sdk_cmd
14 | import sdk_utils
15 |
16 | log = logging.getLogger(__name__)
17 |
18 |
19 | def flatmap(f, items):
20 | """
21 | lines = ["one,two", "three", "four,five"]
22 | f = lambda s: s.split(",")
23 |
24 | >>> map(f, lines)
25 | [['one', 'two'], ['three'], ['four', 'five']]
26 |
27 | >>> flatmap(f, lines)
28 | ['one', 'two', 'three', 'four', 'five']
29 | """
30 | return chain.from_iterable(map(f, items))
31 |
32 |
33 | def parse_stub_universe_url_string(stub_universe_url_string):
34 | """Handles newline- and comma-separated strings."""
35 | lines = stub_universe_url_string.split("\n")
36 | return list(filter(None, flatmap(lambda s: s.split(","), lines)))
37 |
38 |
39 | def get_universe_repos() -> List:
40 | # prepare needed universe repositories
41 | stub_universe_url_string = os.environ.get('STUB_UNIVERSE_URL', '')
42 | return parse_stub_universe_url_string(stub_universe_url_string)
43 |
44 |
45 | def add_stub_universe_urls(stub_universe_urls: list) -> dict:
46 | stub_urls = {}
47 |
48 | if not stub_universe_urls:
49 | return stub_urls
50 |
51 | log.info('Adding stub URLs: {}'.format(stub_universe_urls))
52 | for idx, url in enumerate(stub_universe_urls):
53 | log.info('URL {}: {}'.format(idx, repr(url)))
54 | package_name = 'testpkg-{}'.format(sdk_utils.random_string())
55 | stub_urls[package_name] = url
56 |
57 | # clean up any duplicate repositories
58 | current_universes = sdk_cmd.run_cli('package repo list --json')
59 | for repo in json.loads(current_universes)['repositories']:
60 | if repo['uri'] in stub_urls.values():
61 | log.info('Removing duplicate stub URL: {}'.format(repo['uri']))
62 | sdk_cmd.run_cli('package repo remove {}'.format(repo['name']))
63 |
64 | # add the needed universe repositories
65 | for name, url in stub_urls.items():
66 | log.info('Adding stub repo {} URL: {}'.format(name, url))
67 | rc, stdout, stderr = sdk_cmd.run_raw_cli('package repo add --index=0 {} {}'.format(name, url))
68 | if rc != 0 or stderr:
69 | raise Exception(
70 | 'Failed to add stub repo {} ({}): stdout=[{}], stderr=[{}]'.format(
71 | name, url, stdout, stderr))
72 |
73 | log.info('Finished adding universe repos')
74 |
75 | return stub_urls
76 |
77 |
78 | def remove_universe_repos(stub_urls):
79 | log.info('Removing universe repos')
80 |
81 | # clear out the added universe repositories at testing end
82 | for name, url in stub_urls.items():
83 | log.info('Removing stub URL: {}'.format(url))
84 | rc, stdout, stderr = sdk_cmd.run_raw_cli('package repo remove {}'.format(name))
85 | if rc != 0 or stderr:
86 | if stderr.endswith('is not present in the list'):
87 | # tried to remove something that wasn't there, move on.
88 | pass
89 | else:
90 | raise Exception('Failed to remove stub repo: stdout=[{}], stderr=[{}]'.format(stdout, stderr))
91 |
92 | log.info('Finished removing universe repos')
93 |
94 |
95 | def universe_session():
96 | """Add the universe package repositories defined in $STUB_UNIVERSE_URL.
97 |
98 | This should generally be used as a fixture in a framework's conftest.py:
99 |
100 | @pytest.fixture(scope='session')
101 | def configure_universe():
102 | yield from sdk_repository.universe_session()
103 | """
104 | stub_urls = {}
105 | try:
106 | stub_urls = add_stub_universe_urls(get_universe_repos())
107 | yield
108 | finally:
109 | remove_universe_repos(stub_urls)
110 |
--------------------------------------------------------------------------------
/tools/update_config_json.py:
--------------------------------------------------------------------------------
1 | """
2 | A simple script to ensure that the ordering of the "service" section in config.json is standardized.
3 | NOTE: This overwrites the files.
4 |
5 | Usage: from the `dcos-commons` root:
6 |
7 | $ python tools/update_config_json.py
8 |
9 | """
10 | import collections
11 | import json
12 | import logging
13 | import sys
14 | import difflib
15 |
16 |
17 | logging.basicConfig(
18 | format='[%(asctime)s|%(name)s|%(levelname)s]: %(message)s',
19 | level="INFO",
20 | stream=sys.stdout)
21 |
22 | LOG = logging.getLogger(__name__)
23 |
24 |
25 | def read_file(file_path: str) -> str:
26 | LOG.info("Reading from %s", file_path)
27 | with open(file_path, "r") as handle:
28 | return handle.read()
29 |
30 |
31 | def read_json_file(file_path: str) -> collections.OrderedDict:
32 | return json.loads(read_file(file_path), object_pairs_hook=collections.OrderedDict)
33 |
34 |
35 | def write_file(file_path: str, content: str) -> str:
36 | LOG.info("Writing to %s", file_path)
37 | with open(file_path, "w") as handle:
38 | handle.write(content)
39 | if not content.endswith("\n"):
40 | handle.write("\n")
41 |
42 |
43 | def write_json_file(file_path: str, content: collections.OrderedDict):
44 | write_file(file_path, json.dumps(content, indent=2))
45 |
46 |
47 | def reorder(original: collections.OrderedDict, head: list=[], tail: list=[],
48 | mapper=lambda x: x) -> collections.OrderedDict:
49 | remaining = []
50 |
51 | if not isinstance(original, dict):
52 | return original
53 |
54 | for p in original.keys():
55 | if p in tail:
56 | continue
57 | if p in head:
58 | continue
59 | remaining.append(p)
60 |
61 | reordered = collections.OrderedDict()
62 | for p in head:
63 | if p in original:
64 | reordered[p] = mapper(original[p])
65 |
66 | for p in remaining:
67 | reordered[p] = mapper(original[p])
68 |
69 | for p in tail:
70 | if p in original:
71 | reordered[p] = mapper(original[p])
72 |
73 | return reordered
74 |
75 |
76 | def reorder_property(schema: collections.OrderedDict) -> collections.OrderedDict:
77 | return reorder(schema, head=["description", "type", "enum", "default", ], tail=["properties", ])
78 |
79 |
80 | def reorder_service(service_properties: collections.OrderedDict) -> collections.OrderedDict:
81 | expected_order_head = ["name",
82 | "user",
83 | "service_account",
84 | "service_account_secret",
85 | "virtual_network_enabled",
86 | "virtual_network_name",
87 | "virtual_network_plugin_labels",
88 | "mesos_api_version",
89 | "log_level", ]
90 |
91 | expected_order_tail = ["security", ]
92 |
93 | return reorder(service_properties,
94 | expected_order_head, expected_order_tail,
95 | reorder_property)
96 |
97 |
98 | def print_diff(original: collections.OrderedDict, new: collections.OrderedDict):
99 | o = json.dumps(original, indent=2)
100 | c = json.dumps(new, indent=2)
101 |
102 | diff = difflib.unified_diff(o.split("\n"), c.split("\n"))
103 |
104 | LOG.info("\n".join(diff))
105 |
106 |
107 | def process(filename: str):
108 | contents = read_json_file(filename)
109 | original = read_json_file(filename)
110 |
111 | reordered = reorder_service(contents["properties"]["service"]["properties"])
112 | contents["properties"]["service"]["properties"] = reordered
113 |
114 | print_diff(original, contents)
115 |
116 | write_json_file(filename, contents)
117 |
118 |
119 | if __name__ == "__main__":
120 | files = [
121 | "frameworks/cassandra/universe/config.json",
122 | "frameworks/elastic/universe-kibana/config.json",
123 | "frameworks/elastic/universe/config.json",
124 | "frameworks/hdfs/universe/config.json",
125 | "frameworks/helloworld/universe/config.json",
126 | "frameworks/kafka/universe/config.json",
127 | "frameworks/template/universe/config.json",
128 | ]
129 |
130 | for f in files:
131 | process(f)
132 |
--------------------------------------------------------------------------------
/test-runner.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Build a framework, package, upload it, and then run its integration tests.
4 | # (Or all frameworks depending on arguments.) Expected to be called by test.sh
5 |
6 | # Exit immediately on errors
7 | set -e -x
8 |
9 | # Export the required environment variables:
10 | export DCOS_ENTERPRISE
11 | export PYTHONUNBUFFERED=1
12 | export SECURITY
13 |
14 |
15 | REPO_ROOT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
16 |
17 | if [ "$FRAMEWORK" = "all" ]; then
18 | if [ -n "$STUB_UNIVERSE_URL" ]; then
19 | echo "Cannot set \$STUB_UNIVERSE_URL when building all frameworks"
20 | exit 1
21 | fi
22 | # randomize the FRAMEWORK_LIST
23 | FRAMEWORK_LIST=$(ls $REPO_ROOT_DIR/frameworks | while read -r fw; do printf "%05d %s\n" "$RANDOM" "$fw"; done | sort -n | cut -c7- )
24 | else
25 | FRAMEWORK_LIST=$FRAMEWORK
26 | fi
27 |
28 | echo "Beginning integration tests at "`date`
29 |
30 | # Strip the quotes from the -k and -m options to pytest
31 | PYTEST_K_FROM_PYTEST_ARGS=`echo "$PYTEST_ARGS" \
32 | | sed -e "s#.*-k [\'\"]\([^\'\"]*\)['\"].*#\1#"`
33 | if [ "$PYTEST_K_FROM_PYTEST_ARGS" == "$PYTEST_ARGS" ]; then
34 | PYTEST_K_FROM_PYTEST_ARGS=
35 | else
36 | if [ -n "$PYTEST_K" ]; then
37 | PYTEST_K="$PYTEST_K "
38 | fi
39 | PYTEST_K="${PYTEST_K}${PYTEST_K_FROM_PYTEST_ARGS}"
40 | PYTEST_ARGS=`echo "$PYTEST_ARGS" \
41 | | sed -e "s#-k [\'\"]\([^\'\"]*\)['\"]##"`
42 | fi
43 |
44 | PYTEST_M_FROM_PYTEST_ARGS=`echo "$PYTEST_ARGS" \
45 | | sed -e "s#.*-m [\'\"]\([^\'\"]*\)['\"].*#\1#"`
46 | if [ "$PYTEST_M_FROM_PYTEST_ARGS" == "$PYTEST_ARGS" ]; then
47 | PYTEST_M_FROM_PYTEST_ARGS=
48 | else
49 | if [ -n "$PYTEST_M" ]; then
50 | PYTEST_M="$PYTEST_M "
51 | fi
52 | PYTEST_M="${PYTEST_M}${PYTEST_M_FROM_PYTEST_ARGS}"
53 | PYTEST_ARGS=`echo "$PYTEST_ARGS" \
54 | | sed -e "s#-m [\'\"]\([^\'\"]*\)['\"]##"`
55 | fi
56 |
57 |
58 | pytest_args=()
59 |
60 | # PYTEST_K and PYTEST_M are treated as single strings, and should thus be added
61 | # to the pytest_args array in quotes.
62 | if [ -n "$PYTEST_K" ]; then
63 | pytest_args+=(-k "$PYTEST_K")
64 | fi
65 |
66 | if [ -n "$PYTEST_M" ]; then
67 | pytest_args+=(-m "$PYTEST_M")
68 | fi
69 |
70 | # Each of the space-separated parts of PYTEST_ARGS are treated separately.
71 | if [ -n "$PYTEST_ARGS" ]; then
72 | pytest_args+=($PYTEST_ARGS)
73 | fi
74 |
75 | if [ -f /ssh/key ]; then
76 | eval "$(ssh-agent -s)"
77 | ssh-add /ssh/key
78 | fi
79 |
80 |
81 | if [ -n $PYTHONPATH ]; then
82 | if [ -d ${REPO_ROOT_DIR}/testing ]; then
83 | export PYTHONPATH=${REPO_ROOT_DIR}/testing
84 | fi
85 | fi
86 |
87 | for framework in $FRAMEWORK_LIST; do
88 | echo "STARTING: $framework"
89 | FRAMEWORK_DIR=$REPO_ROOT_DIR/frameworks/${framework}
90 |
91 | if [ ! -d ${FRAMEWORK_DIR} -a "${FRAMEWORK}" != "all" ]; then
92 | echo "FRAMEWORK_DIR=${FRAMEWORK_DIR} does not exist."
93 | echo "Assuming single framework in ${REPO_ROOT}."
94 | FRAMEWORK_DIR=${REPO_ROOT_DIR}
95 | fi
96 |
97 | if [ -z "$CLUSTER_URL" ]; then
98 |
99 | echo "No DC/OS cluster specified. Attempting to create one now"
100 | dcos-launch create -c /build/config.yaml
101 | dcos-launch wait
102 |
103 | # configure the dcos-cli/shakedown backend
104 | export CLUSTER_URL=https://`dcos-launch describe | jq -r .masters[0].public_ip`
105 | CLUSTER_WAS_CREATED=True
106 | fi
107 |
108 | echo "Configuring dcoscli for cluster: $CLUSTER_URL"
109 | echo "\tDCOS_ENTERPRISE=$DCOS_ENTERPRISE"
110 | /build/tools/dcos_login.py
111 |
112 | if [ -f cluster_info.json ]; then
113 | if [ `cat cluster_info.json | jq .key_helper` == 'true' ]; then
114 | cat cluster_info.json | jq -r .ssh_private_key > /root/.ssh/id_rsa
115 | chmod 600 /root/.ssh/id_rsa
116 | fi
117 | fi
118 |
119 | echo "Starting test for $framework at "`date`
120 | py.test -vv -s "${pytest_args[@]}" ${FRAMEWORK_DIR}/tests
121 | exit_code=$?
122 | echo "Finished test for $framework at "`date`
123 | done
124 |
125 | echo "Finished integration tests at "`date`
126 |
127 | if [ -n "$CLUSTER_WAS_CREATED" ]; then
128 | echo "The DC/OS cluster $CLUSTER_URL was created. Please run"
129 | echo "\t\$ dcos-launch delete"
130 | echo "to remove the cluster."
131 | fi
132 |
133 | exit $exit_code
134 |
--------------------------------------------------------------------------------
/testing/sdk_jobs.py:
--------------------------------------------------------------------------------
1 | '''Utilities relating to creation and verification of Metronome jobs
2 |
3 | ************************************************************************
4 | FOR THE TIME BEING WHATEVER MODIFICATIONS ARE APPLIED TO THIS FILE
5 | SHOULD ALSO BE APPLIED TO sdk_jobs IN ANY OTHER PARTNER REPOS
6 | ************************************************************************
7 | '''
8 | import json
9 | import logging
10 | import traceback
11 |
12 | import retrying
13 |
14 | import sdk_cmd
15 |
16 | log = logging.getLogger(__name__)
17 |
18 |
19 | # --- Install/uninstall jobs to the cluster
20 |
21 |
22 | def install_job(job_dict):
23 | job_name = job_dict['id']
24 |
25 | # attempt to delete current job, if any:
26 | _remove_job_by_name(job_name)
27 |
28 | log.info('Adding job {}:\n{}'.format(job_name, json.dumps(job_dict)))
29 | sdk_cmd.service_request('POST', 'metronome', '/v1/jobs', json=job_dict)
30 |
31 |
32 | def remove_job(job_dict):
33 | _remove_job_by_name(job_dict['id'])
34 |
35 |
36 | def _remove_job_by_name(job_name):
37 | try:
38 | # Metronome doesn't understand 'True' -- only 'true' will do.
39 | sdk_cmd.service_request(
40 | 'DELETE', 'metronome', '/v1/jobs/{}'.format(job_name),
41 | retry=False,
42 | params={'stopCurrentJobRuns': 'true'})
43 | except:
44 | log.info('Failed to remove any existing job named {} (this is likely as expected):\n{}'.format(
45 | job_name, traceback.format_exc()))
46 |
47 |
48 | class InstallJobContext(object):
49 | """Context manager for temporarily installing and removing metronome jobs."""
50 |
51 | def __init__(self, jobs):
52 | self.job_dicts = jobs
53 |
54 | def __enter__(self):
55 | for j in self.job_dicts:
56 | install_job(j)
57 |
58 | def __exit__(self, *args):
59 | for j in self.job_dicts:
60 | remove_job(j)
61 |
62 |
63 | # --- Run jobs and check their outcomes
64 |
65 |
66 | def run_job(job_dict, timeout_seconds=600, raise_on_failure=True):
67 | job_name = job_dict['id']
68 |
69 | # Start job run, get run ID to poll against:
70 | run_id = sdk_cmd.service_request('POST', 'metronome', '/v1/jobs/{}/runs'.format(job_name), log_args=False).json()['id']
71 | log.info('Started job {}: run id {}'.format(job_name, run_id))
72 |
73 | # Wait for run to succeed, throw if run fails:
74 | @retrying.retry(
75 | wait_fixed=1000,
76 | stop_max_delay=timeout_seconds*1000,
77 | retry_on_result=lambda res: not res)
78 | def wait():
79 | # Note: We COULD directly query the run here via /v1/jobs//runs/, but that
80 | # only works for active runs -- for whatever reason the run will disappear after it's done.
81 | # Therefore we have to query the full run history from the parent job and find our run_id there.
82 | run_history = sdk_cmd.service_request(
83 | 'GET', 'metronome', '/v1/jobs/{}'.format(job_name),
84 | retry=False,
85 | params={'embed': 'history'}).json()['history']
86 |
87 | successful_run_ids = [run['id'] for run in run_history['successfulFinishedRuns']]
88 | failed_run_ids = [run['id'] for run in run_history['failedFinishedRuns']]
89 |
90 | log.info('Job {} run history (waiting for successful {}): successful={} failed={}'.format(
91 | job_name, run_id, successful_run_ids, failed_run_ids))
92 |
93 | # Note: If a job has restart.policy=ON_FAILURE, it won't show up in failed_run_ids even when it fails.
94 | # Instead it will just keep restarting automatically until it succeeds or is deleted.
95 | if raise_on_failure and run_id in failed_run_ids:
96 | raise Exception('Job {} with id {} has failed, exiting early'.format(job_name, run_id))
97 |
98 | return run_id in successful_run_ids
99 |
100 | wait()
101 |
102 | return run_id
103 |
104 |
105 | class RunJobContext(object):
106 | """Context manager for running different named jobs at startup/shutdown."""
107 |
108 | def __init__(self, before_jobs=[], after_jobs=[], timeout_seconds=600):
109 | self.before_job_dicts = before_jobs
110 | self.after_job_dicts = after_jobs
111 | self.timeout_seconds = timeout_seconds
112 |
113 | def __enter__(self):
114 | for j in self.before_job_dicts:
115 | run_job(j, timeout_seconds=self.timeout_seconds)
116 |
117 | def __exit__(self, *args):
118 | for j in self.after_job_dicts:
119 | run_job(j, timeout_seconds=self.timeout_seconds)
120 |
--------------------------------------------------------------------------------
/tests/acceptance/test_jenkins.py:
--------------------------------------------------------------------------------
1 | """Jenkins acceptance tests for DC/OS."""
2 |
3 | from shakedown import *
4 |
5 | PACKAGE_NAME = 'jenkins'
6 | JOB_NAME = 'jenkins-acceptance-test-job'
7 | DCOS_SERVICE_URL = dcos_service_url(PACKAGE_NAME)
8 | WAIT_TIME_IN_SECS = 300
9 |
10 |
11 | def test_install_jenkins():
12 | """Install the Jenkins package for DC/OS.
13 | """
14 | client = shakedown.marathon.create_client()
15 | install_package_and_wait(PACKAGE_NAME, client)
16 | assert package_installed(PACKAGE_NAME), 'Package failed to install'
17 |
18 | end_time = time.time() + WAIT_TIME_IN_SECS
19 | found = False
20 | while time.time() < end_time:
21 | found = get_service(PACKAGE_NAME) is not None
22 | if found and service_healthy(PACKAGE_NAME):
23 | break
24 | time.sleep(1)
25 |
26 | assert found, 'Service did not register with DCOS'
27 |
28 |
29 | def test_jenkins_is_alive():
30 | """Ensure Jenkins is alive by attempting to get the version from the
31 | Jenkins master, which is present in the HTTP headers.
32 | """
33 | end_time = time.time() + WAIT_TIME_IN_SECS
34 | while time.time() < end_time:
35 | try:
36 | # This can except on a 500 until the Jenkins app has started
37 | r = http.get(DCOS_SERVICE_URL)
38 | if r.status_code < 500:
39 | assert r.status_code == 200, 'Did not receive HTTP 200 OK status code'
40 | assert 'X-Jenkins' in r.headers, 'Missing the X-Jenkins HTTP header.'
41 | break
42 | except:
43 | pass
44 | time.sleep(1)
45 |
46 |
47 | def test_create_a_job():
48 | """Create a new Jenkins job.
49 | """
50 | here = os.path.dirname(__file__)
51 | headers = {'Content-Type': 'application/xml'}
52 | job_config = ''
53 | url = "{}/createItem?name={}".format(DCOS_SERVICE_URL, JOB_NAME)
54 |
55 | with open(os.path.join(here, 'fixtures', 'test-job.xml')) as test_job:
56 | job_config = test_job.read()
57 |
58 | r = http.post(url, headers=headers, data=job_config)
59 | assert r.status_code == 200, 'Failed to create test job.'
60 |
61 |
62 | def test_trigger_a_job_build():
63 | """Build the new Jenkins job we've created.
64 | """
65 | url = "{}/job/{}/build".format(DCOS_SERVICE_URL, JOB_NAME)
66 | r = http.post(url)
67 |
68 | assert r.status_code == 201, 'Failed to build tet job.'
69 |
70 |
71 | def test_wait_for_jenkins_build_agent():
72 | """A dynamic build agent needs to connect before the build can kick off.
73 | """
74 | success = False
75 | end_time = time.time() + WAIT_TIME_IN_SECS
76 | while time.time() < end_time:
77 | if get_service_tasks('jenkins'):
78 | success = True
79 | break
80 | time.sleep(5)
81 |
82 | assert success, 'Agent did not connect within allowed time.'
83 |
84 |
85 | def test_wait_for_build_to_start():
86 | """Wait for the job we triggered to start.
87 | """
88 | success = False
89 | url = "{}/job/{}/1/api/json".format(DCOS_SERVICE_URL, JOB_NAME)
90 | end_time = time.time() + WAIT_TIME_IN_SECS
91 | while time.time() < end_time:
92 | try:
93 | # This can except on a 404 until the job is created on the build agent
94 | r = http.get(url)
95 | success = True
96 | break
97 | except:
98 | pass
99 | time.sleep(5)
100 |
101 | assert success, 'Build did not start within allowed time.'
102 |
103 |
104 | def test_wait_for_build_to_finish():
105 | """Wait for the job we kicked off to finish.
106 | """
107 | success = False
108 | url = "{}/job/{}/1/api/json".format(DCOS_SERVICE_URL, JOB_NAME)
109 | end_time = time.time() + WAIT_TIME_IN_SECS
110 | while time.time() < end_time:
111 | r = http.get(url)
112 |
113 | if r.status_code == 200:
114 | data = r.json()
115 | if data['result'] in ('SUCCESS', 'FAILURE'):
116 | print(data['result'])
117 | success = True
118 | break
119 | time.sleep(5)
120 |
121 | assert success, 'Build did not finish within allowed time.'
122 |
123 |
124 | def test_delete_a_job():
125 | """Delete our test job.
126 | """
127 | url = "{}/job/{}/doDelete".format(DCOS_SERVICE_URL, JOB_NAME)
128 | r = http.post(url)
129 |
130 | assert r.status_code == 200, 'Failed to delete test job.'
131 |
132 |
133 | def test_uninstall_jenkins():
134 | """Uninstall the Jenkins package for DC/OS.
135 | """
136 | uninstall_package_and_wait(PACKAGE_NAME)
137 | assert not package_installed(PACKAGE_NAME), 'Package failed to uninstall'
138 |
--------------------------------------------------------------------------------
/tools/dcos_login.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import logging
3 | import os
4 |
5 | import dcos.cluster
6 | import requests
7 | import retrying
8 |
9 | from dcos_test_utils import logger
10 |
11 | __CLI_LOGIN_OPEN_TOKEN = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6Ik9UQkVOakZFTWtWQ09VRTRPRVpGTlRNMFJrWXlRa015Tnprd1JrSkVRemRCTWpBM1FqYzVOZyJ9.eyJlbWFpbCI6ImFsYmVydEBiZWtzdGlsLm5ldCIsImVtYWlsX3ZlcmlmaWVkIjp0cnVlLCJpc3MiOiJodHRwczovL2Rjb3MuYXV0aDAuY29tLyIsInN1YiI6Imdvb2dsZS1vYXV0aDJ8MTA5OTY0NDk5MDExMTA4OTA1MDUwIiwiYXVkIjoiM3lGNVRPU3pkbEk0NVExeHNweHplb0dCZTlmTnhtOW0iLCJleHAiOjIwOTA4ODQ5NzQsImlhdCI6MTQ2MDE2NDk3NH0.OxcoJJp06L1z2_41_p65FriEGkPzwFB_0pA9ULCvwvzJ8pJXw9hLbmsx-23aY2f-ydwJ7LSibL9i5NbQSR2riJWTcW4N7tLLCCMeFXKEK4hErN2hyxz71Fl765EjQSO5KD1A-HsOPr3ZZPoGTBjE0-EFtmXkSlHb1T2zd0Z8T5Z2-q96WkFoT6PiEdbrDA-e47LKtRmqsddnPZnp0xmMQdTr2MjpVgvqG7TlRvxDcYc-62rkwQXDNSWsW61FcKfQ-TRIZSf2GS9F9esDF4b5tRtrXcBNaorYa9ql0XAWH5W_ct4ylRNl3vwkYKWa4cmPvOqT5Wlj9Tf0af4lNO40PQ' # noqa
12 | __CLI_LOGIN_EE_USERNAME = 'bootstrapuser'
13 | __CLI_LOGIN_EE_PASSWORD = 'deleteme'
14 |
15 | log = logging.getLogger(__name__)
16 |
17 |
18 | @retrying.retry(wait_fixed=2000, stop_max_delay=120 * 1000)
19 | def login(dcosurl: str, username: str, password: str, is_enterprise: bool) -> str:
20 | if is_enterprise:
21 | log.info('logging into {} as {}'.format(dcosurl, username))
22 | payload = {'uid': username, 'password': password}
23 | else:
24 | log.info('logging into {} with default open token'.format(dcosurl))
25 | payload = {'token': __CLI_LOGIN_OPEN_TOKEN}
26 |
27 | headers = {'Content-Type': 'application/json'}
28 | login_endpoint = '{dcosurl}/acs/api/v1/auth/login'.format(dcosurl=dcosurl)
29 | r = requests.post(login_endpoint, headers=headers, json=payload, verify=False)
30 | assert r.status_code == 200, '{} failed {}: {}'.format(login_endpoint, r.status_code, r.text)
31 | log.info('Login was successful!')
32 |
33 | return r.json()['token']
34 |
35 |
36 | def _netloc(url: str):
37 | return url.split('-1')[-1]
38 |
39 |
40 | def configure_cli(dcosurl: str, token: str) -> None:
41 | for cluster in dcos.cluster.get_clusters():
42 | # check to see if the target cluster has been configured
43 | if _netloc(cluster.get_url()) == _netloc(dcosurl):
44 | dcos.cluster.set_attached(cluster.cluster_path)
45 | log.info('Attached to already setup cluster: ' + cluster.cluster_id)
46 | # cluster attached successfully, can begin using CLI/tests
47 | return
48 | log.warning('Target cluster has not been setup yet. Performing setup...')
49 | with dcos.cluster.setup_directory() as temp_path:
50 | dcos.cluster.set_attached(temp_path)
51 | dcos.config.set_val('core.dcos_url', dcosurl)
52 | dcos.config.set_val('core.ssl_verify', 'False')
53 | dcos.config.set_val('core.dcos_acs_token', token)
54 | dcos.cluster.setup_cluster_config(dcosurl, temp_path, False)
55 |
56 |
57 | def logout(dcosurl: str):
58 | pass
59 |
60 |
61 | def login_session() -> None:
62 | """Login to DC/OS.
63 |
64 | Behavior is determined by the following environment variables:
65 | CLUSTER_URL: full URL to the test cluster
66 | DCOS_LOGIN_USERNAME: the EE user (defaults to bootstrapuser)
67 | DCOS_LOGIN_PASSWORD: the EE password (defaults to deleteme)
68 | DCOS_ENTERPRISE: determine how to authenticate (defaults to false)
69 | DCOS_ACS_TOKEN: bypass auth and use the user supplied token
70 | """
71 | cluster_url = os.environ.get('CLUSTER_URL')
72 | if not cluster_url:
73 | raise Exception('Must have CLUSTER_URL set in environment!')
74 |
75 | def ignore_empty(envvar, default):
76 | # Ignore the user passing in empty ENVVARs.
77 | value = os.environ.get(envvar, "").strip()
78 | if not value:
79 | return default
80 |
81 | return value
82 |
83 | dcos_login_username = ignore_empty('DCOS_LOGIN_USERNAME', __CLI_LOGIN_EE_USERNAME)
84 | dcos_login_password = ignore_empty('DCOS_LOGIN_PASSWORD', __CLI_LOGIN_EE_PASSWORD)
85 | dcos_enterprise = ignore_empty('DCOS_ENTERPRISE', 'true').lower() == 'true'
86 | dcos_acs_token = os.environ.get('DCOS_ACS_TOKEN')
87 | if not dcos_acs_token:
88 | log.info('No ACS token provided, logging in...')
89 | dcos_acs_token = login(
90 | dcosurl=cluster_url,
91 | username=dcos_login_username,
92 | password=dcos_login_password,
93 | is_enterprise=dcos_enterprise)
94 | configure_cli(dcosurl=cluster_url, token=dcos_acs_token)
95 |
96 |
97 | if __name__ == '__main__':
98 | logger.setup(os.getenv('TEST_LOG_LEVEL', 'INFO'))
99 | login_session()
100 |
--------------------------------------------------------------------------------
/tools/universe/test_package_manager.py:
--------------------------------------------------------------------------------
1 | from .package_manager import PackageManager
2 | from .package import Package
3 | from .package import Version
4 |
5 |
6 | def create_package_manager(mocker, packages):
7 | """A utility function to create a package manager that returns the specified list
8 | of packages"""
9 | pm = PackageManager()
10 | pm._get_packages = mocker.MagicMock(return_value=packages)
11 |
12 | return pm
13 |
14 |
15 | def test_no_packages(mocker):
16 | dummy_packages = []
17 | pm = create_package_manager(mocker, dummy_packages)
18 | assert pm.get_package_versions("package") == []
19 |
20 |
21 | def test_single_package_single_version(mocker):
22 |
23 | dummy_packages = [
24 | {
25 | "name": "package",
26 | "version": "1.2.3",
27 | "releaseVersion": 0,
28 | },
29 | ]
30 |
31 | pm = create_package_manager(mocker, dummy_packages)
32 |
33 | print(Package.from_json(dummy_packages[0]))
34 | print(Package("package", Version(0, "1.2.3")))
35 |
36 | assert pm.get_package_versions("package") == [
37 | Package("package", Version(0, "1.2.3")),
38 | ]
39 |
40 |
41 | def test_single_package_multiple_versions(mocker):
42 |
43 | dummy_packages = [
44 | {
45 | "name": "package",
46 | "version": "1.2.3",
47 | "releaseVersion": 0,
48 | },
49 | {
50 | "name": "package",
51 | "version": "1.2.4",
52 | "releaseVersion": 0,
53 | },
54 | ]
55 |
56 | pm = create_package_manager(mocker, dummy_packages)
57 | versions = pm.get_package_versions("package")
58 | assert [p.get_version() for p in versions] == [
59 | Version(0, "1.2.3"),
60 | Version(0, "1.2.4"),
61 | ]
62 |
63 |
64 | def test_multiple_packages_single_versions(mocker):
65 |
66 | dummy_packages = [
67 | {
68 | "name": "package1",
69 | "version": "1.2.3",
70 | "releaseVersion": 0,
71 | },
72 | {
73 | "name": "package2",
74 | "version": "1.2.4",
75 | "releaseVersion": 0,
76 | },
77 | ]
78 |
79 | pm = create_package_manager(mocker, dummy_packages)
80 |
81 | versions = pm.get_package_versions("package1")
82 | assert versions == [
83 | Package("package1", Version(0, "1.2.3")),
84 | ]
85 | versions = pm.get_package_versions("package2")
86 | assert versions == [
87 | Package("package2", Version(0, "1.2.4")),
88 | ]
89 |
90 |
91 | def test_multiple_packages_multiple_versions(mocker):
92 |
93 | dummy_packages = [
94 | {
95 | "name": "package1",
96 | "version": "1.2.3",
97 | "releaseVersion": 0,
98 | },
99 | {
100 | "name": "package2",
101 | "version": "1.2.4",
102 | "releaseVersion": 0,
103 | },
104 | {
105 | "name": "package1",
106 | "version": "1.2.5",
107 | "releaseVersion": 0,
108 | },
109 | ]
110 |
111 | pm = create_package_manager(mocker, dummy_packages)
112 |
113 | versions = pm.get_package_versions("package1")
114 | assert [p.get_version() for p in versions] == [
115 | Version(0, "1.2.3"),
116 | Version(0, "1.2.5"),
117 | ]
118 | versions = pm.get_package_versions("package2")
119 | assert [p.get_version() for p in versions] == [
120 | Version(0, "1.2.4"),
121 | ]
122 |
123 |
124 | def test_version_for_specified_package_not_found(mocker):
125 | dummy_packages = [
126 | {
127 | "name": "package1",
128 | "version": "1.2.3",
129 | "releaseVersion": 0,
130 | },
131 | {
132 | "name": "package2",
133 | "version": "1.2.4",
134 | "releaseVersion": 0,
135 | },
136 | {
137 | "name": "package1",
138 | "version": "1.2.5",
139 | "releaseVersion": 0,
140 | },
141 | ]
142 |
143 | pm = create_package_manager(mocker, dummy_packages)
144 |
145 | versions = pm.get_package_versions(package_name="package_not_found")
146 | assert versions == []
147 |
148 |
149 | def test_latest_version(mocker):
150 |
151 | dummy_packages = [
152 | {
153 | "name": "package",
154 | "version": "1.2.3",
155 | "releaseVersion": 0,
156 | },
157 | {
158 | "name": "package",
159 | "version": "1.2.4",
160 | "releaseVersion": 10,
161 | },
162 | ]
163 |
164 | pm = create_package_manager(mocker, dummy_packages)
165 |
166 | assert pm.get_latest(package_name="package").get_version() == Version(
167 | 10, "1.2.4")
168 |
--------------------------------------------------------------------------------
/tools/universe/package_manager.py:
--------------------------------------------------------------------------------
1 | """
2 | A simple package manager for Universe packages.
3 |
4 | The PackageManager class can also be used to determine the latest version of a particular package
5 | in the Universe.
6 | """
7 | import logging
8 | import subprocess
9 | import json
10 | import os
11 | import tempfile
12 |
13 | from . import package
14 | try:
15 | import requests
16 | _HAS_REQUESTS = True
17 | except ImportError:
18 | _HAS_REQUESTS = False
19 |
20 |
21 | LOGGER = logging.getLogger(__name__)
22 |
23 |
24 | class PackageManager:
25 | """A simple package manager for retrieving universe packages"""
26 | def __init__(self, universe_url="https://universe.mesosphere.com/repo",
27 | dcos_version="1.10",
28 | package_version="4",
29 | dry_run=False):
30 |
31 | self._dry_run = dry_run
32 | self._universe_url = universe_url
33 | self._headers = {
34 | "User-Agent": "dcos/{}".format(dcos_version),
35 | "Accept": "application/vnd.dcos.universe.repo+json;"
36 | "charset=utf-8;version=v{}".format(package_version),
37 | }
38 |
39 | self.__package_cache = None
40 |
41 | if _HAS_REQUESTS:
42 | self._get_packages = _get_packages_with_requests
43 | else:
44 | self._get_packages = _get_packages_with_curl
45 |
46 | def get_package_versions(self, package_name):
47 | """Get all versions for a specified package"""
48 |
49 | packages = self.get_packages()
50 |
51 | return packages.get(package_name, [])
52 |
53 | def get_latest(self, package_name):
54 | if isinstance(package_name, package.Package):
55 | package_name = package_name.get_name()
56 |
57 | all_package_versions = self.get_package_versions(package_name)
58 |
59 | if all_package_versions:
60 | return all_package_versions[-1]
61 |
62 | return None
63 |
64 | def get_packages(self):
65 | """Query the uninverse to get a list of packages"""
66 | if self._dry_run:
67 | return DryRunPackages()
68 |
69 | if not self.__package_cache:
70 | LOGGER.info("Package cache is empty. Retrieving package information")
71 | raw_package_list = self._get_packages(self._universe_url, self._headers)
72 |
73 | package_dict = {}
74 | for p in raw_package_list:
75 | package_name = p['name']
76 | package_object = package.Package.from_json(p)
77 |
78 | if package_name in package_dict:
79 | package_dict[package_name].append(package_object)
80 | else:
81 | package_dict[package_name] = [package_object, ]
82 |
83 | self.__package_cache = {}
84 | for p, packages in package_dict.items():
85 | self.__package_cache[p] = sorted(packages)
86 |
87 | return self.__package_cache
88 |
89 |
90 | def _get_packages_with_curl(universe_url, headers):
91 | """Use curl to download the packages from the universe"""
92 | with tempfile.TemporaryDirectory() as tmp_dir:
93 | tmp_filename = os.path.join(tmp_dir, 'packages.json')
94 |
95 | cmd = ["curl",
96 | "--write-out", "%{http_code}",
97 | "--silent",
98 | "-L",
99 | "--max-time", "5",
100 | "-X", "GET",
101 | "-o", tmp_filename, ]
102 | for k, header in headers.items():
103 | cmd.extend(["-H", "{}: {}".format(k, header)])
104 |
105 | cmd.append(universe_url)
106 |
107 | try:
108 | output = subprocess.check_output(cmd)
109 | status_code = int(output)
110 |
111 | if status_code != 200:
112 | raise Exception("Curl returned status code {}".format(status_code))
113 |
114 | with open(tmp_filename, "r") as f:
115 | packages = json.load(f)['packages']
116 | except Exception as e:
117 | LOGGER.error("Retrieving packages with curl failed. %s", e)
118 | packages = []
119 |
120 | return packages
121 |
122 |
123 | def _get_packages_with_requests(universe_url, headers):
124 | """Use the requests module to get the packages from the universe"""
125 | try:
126 | response = requests.get(universe_url, headers=headers)
127 | response.raise_for_status()
128 | packages = response.json()['packages']
129 | except Exception as e:
130 | LOGGER.error("Retrieving packages with requests failed. %s", e)
131 | packages = []
132 |
133 | return packages
134 |
135 |
136 | class DryRunPackages:
137 | def get(self, package_name, default):
138 | return [package.Package(package_name,
139 | package.Version(0, "DRY_RUN_VERSION")), ]
140 |
--------------------------------------------------------------------------------
/tools/distribution/init:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import logging
4 | import argparse
5 | import subprocess
6 | import os
7 | import re
8 |
9 | logging.basicConfig(level="INFO")
10 |
11 | LOGGER = logging.getLogger(__name__)
12 |
13 | DCOS_COMMONS_DIST_ROOT = os.environ.get("DCOS_COMMONS_DIST_ROOT", "/dcos-commons-dist")
14 | DCOS_SDK_VERSION = "0.40.2"
15 |
16 |
17 | def get_sdk_version():
18 | version_path = os.path.join(DCOS_COMMONS_DIST_ROOT, ".version")
19 | if os.path.exists(version_path):
20 | with open(version_path) as f:
21 | version = f.read().strip().strip("'")
22 | LOGGER.info("Read version: %s", version)
23 |
24 | if version:
25 | DCOS_SDK_VERSION = version
26 |
27 | return os.environ.get("DCOS_SDK_VERSION", DCOS_SDK_VERSION)
28 |
29 |
30 | def read_file(file_path: str) -> str:
31 | LOGGER.info("Reading from %s", file_path)
32 | with open(file_path, "r") as handle:
33 | return handle.read()
34 |
35 |
36 | def write_file(file_path: str, content: str) -> str:
37 | LOGGER.info("Writing to %s", file_path)
38 | with open(file_path, "w") as handle:
39 | handle.write(content)
40 |
41 |
42 | def copy_dist_file(filename: str, output_path: str):
43 | """Copy a distribution file to the specified output path"""
44 | source_file = os.path.join(DCOS_COMMONS_DIST_ROOT, filename)
45 |
46 | LOGGER.info("Copying %s to %s", source_file, output_path)
47 | subprocess.check_output(["cp", source_file, output_path])
48 |
49 |
50 | def copy_dist_folder(folder: str, output_path: str, exclude: list=[]):
51 | """Copy a distribution folder to the specified ouput path"""
52 | source_folder = os.path.join(DCOS_COMMONS_DIST_ROOT, folder.rstrip("/"))
53 |
54 | LOGGER.info("Copying %s to %s", source_folder, output_path)
55 | cmd = ["rsync", "-avz", "--delete", ]
56 |
57 | if exclude:
58 | for e in exclude:
59 | cmd.extend(["--exclude={}".format(e)])
60 |
61 | cmd.extend([source_folder, output_path])
62 |
63 | subprocess.check_output(cmd)
64 |
65 |
66 | def distribute_test_utils(output_path: str):
67 | """Copies the required files into the target folders"""
68 |
69 | output_path = output_path.rstrip("/") + "/"
70 |
71 | files = ["conftest.py",
72 | "test.sh",
73 | "TESTING.md",
74 | "UPDATING.md", ]
75 |
76 | for f in files:
77 | copy_dist_file(f, output_path)
78 |
79 | folders = [("tools", ["tools/ci", "tools/distribution"]), ("testing", []), ]
80 | for f in folders:
81 | copy_dist_folder(f[0], output_path, f[1])
82 |
83 |
84 | def update_sdk(output_path: str, target_version: str):
85 | build_gradle_path = os.path.join(output_path, "build.gradle")
86 |
87 | gradle_file_contents = read_file(build_gradle_path)
88 |
89 | gradle_file_contents = re.sub('dcosSDKVer = ".*?"',
90 | 'dcosSDKVer = "{}"'.format(target_version),
91 | gradle_file_contents)
92 |
93 | gradle_file_contents = re.sub(r'compile "mesosphere:scheduler:[\d\w\.\-]"',
94 | 'compile "mesosphere:scheduler:{}"'.format(target_version),
95 | gradle_file_contents)
96 | gradle_file_contents = re.sub(r'compile "mesosphere:executor:[\d\w\.\-]"',
97 | 'compile "mesosphere:executor:{}"'.format(target_version),
98 | gradle_file_contents)
99 | gradle_file_contents = re.sub(r'testCompile "mesosphere:testing:[\d\w\.\-]"',
100 | 'testCompile "mesosphere:testing:{}"'.format(target_version),
101 | gradle_file_contents)
102 |
103 | write_file(build_gradle_path, gradle_file_contents)
104 |
105 | package_builder_path = os.path.join(output_path, "tools", "universe", "package_builder.py")
106 |
107 | package_builder_contents = read_file(package_builder_path)
108 |
109 | package_builder_contents = re.sub('_dcos_sdk_version = [\'"].*?[\'"]',
110 | '_dcos_sdk_version = "{}"'.format(target_version),
111 | package_builder_contents)
112 | write_file(package_builder_path, package_builder_contents)
113 |
114 | LOGGER.info("Updated to SDK version %s", target_version)
115 |
116 |
117 | def parse_args():
118 | parser = argparse.ArgumentParser(description="Init DC/OS test environment")
119 | parser.add_argument('output_path', type=str,
120 | help='The absolute path where the testing tools should be created')
121 | parser.add_argument("--update-sdk", type=str,
122 | help="Update the SDK in the target framework.")
123 | return parser.parse_args()
124 |
125 |
126 | def main():
127 | args = parse_args()
128 | distribute_test_utils(args.output_path)
129 |
130 | if (args.update_sdk):
131 | update_sdk(args.output_path, args.update_sdk)
132 |
133 |
134 | if __name__ == "__main__":
135 | main()
136 |
--------------------------------------------------------------------------------
/tools/airgap_linter.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #
3 | # Validates that the supplied framework (identified by the directory passed in) will function, to the best
4 | # of our knowledge, in an airgapped cluster.
5 | #
6 | # The checks are:
7 | # - No external URIs defined in anything but resource.json
8 | # - No images defined in anything but resource.json (and templated into the svc.yml)
9 | #
10 |
11 | import re
12 | import sys
13 | import os
14 |
15 |
16 | def extract_uris(file_name):
17 | with open(file_name, "r") as file:
18 | lines = file.readlines()
19 |
20 | matcher = re.compile(".*https?:\/\/([^\?\s]*)", re.IGNORECASE)
21 | matches = []
22 | for line in lines:
23 | line = line.strip()
24 | # Do not grab comments
25 | if line.startswith("*") or line.startswith("#") or line.startswith("//"):
26 | continue
27 | # Do not grab "id" lines
28 | if '"id":' in line:
29 | continue
30 |
31 | match = matcher.match(line)
32 | if match:
33 | matches.append(match.group(1))
34 |
35 | return matches
36 |
37 |
38 | def validate_uris_in(file_name):
39 | uris = extract_uris(file_name)
40 |
41 | bad_uri = False
42 | for uri in uris:
43 | if is_bad_uri(uri, file_name):
44 | bad_uri = True
45 |
46 | return not bad_uri
47 |
48 |
49 | def is_bad_uri(uri, file_name):
50 | # A FQDN is a valid cluster internal FQDN if it contains one of the listed exceptions
51 | exceptions = [
52 | ".thisdcos",
53 | ".mesos:",
54 | ".mesos/",
55 | "$MESOS_CONTAINER_IP",
56 | "${MESOS_CONTAINER_IP}",
57 | "{{FRAMEWORK_HOST}}",
58 | "$FRAMEWORK_HOST",
59 | "${FRAMEWORK_HOST}",
60 | ]
61 |
62 | # Are any of the exceptions present?
63 | for exception in exceptions:
64 | if exception in uri:
65 | return False
66 |
67 | print("Found a bad URI:", uri, "in:", file_name,
68 | "Export URIs to resource.json to allow packaging for airgapped clusters.")
69 |
70 | return True
71 |
72 | def get_files_to_check_for_uris(framework_directory):
73 | # There's a set of files that will always be present.
74 | files = [os.path.join(framework_directory, "universe", "config.json"),
75 | os.path.join(framework_directory, "universe", "marathon.json.mustache")]
76 |
77 | # Always check every file in the `dist` directory of the scheduler.
78 | dist_dir = os.path.join(framework_directory, "src", "main", "dist")
79 |
80 | for dp, dn, filenames in os.walk(dist_dir):
81 | for file in filenames:
82 | files.append(os.path.join(dp, file))
83 |
84 | return files
85 |
86 |
87 | def validate_all_uris(framework_directory):
88 | bad_file = False
89 | files = get_files_to_check_for_uris(framework_directory)
90 | for file in files:
91 | if not validate_uris_in(file):
92 | bad_file = True
93 |
94 | return not bad_file
95 |
96 |
97 | def validate_images(framework_directory):
98 | files = get_files_to_check_for_uris(framework_directory)
99 |
100 | for file in files:
101 | with open(file, "r") as file:
102 | lines = file.readlines()
103 |
104 | bad_image = False
105 | for line in lines:
106 | line = line.strip()
107 | if "image:" in line:
108 | image_matcher = re.compile("image:\s?(.*)$", re.IGNORECASE)
109 | match = image_matcher.match(line)
110 | image_path = match.group(1)
111 | env_var_matcher = re.compile("\{\{[A-Z0-9_]*\}\}")
112 | if not env_var_matcher.match(image_path):
113 | print("""Bad image found in {}. It is a direct reference instead of a templated reference: {}
114 | Export images to resource.json to allow packaging for airgapped clusters.""".format(file, image_path))
115 | bad_image = True
116 |
117 | return not bad_image
118 |
119 |
120 | def print_help():
121 | print("""Scans a framework for any airgap issues. Checks all files for external URIs,
122 | and docker images for direct references
123 |
124 | usage: python airgap_linter.py """)
125 |
126 |
127 | def main(argv):
128 | if len(argv) < 2:
129 | print_help()
130 | sys.exit(0)
131 |
132 | framework_directory = argv[1]
133 |
134 | if not os.path.isdir(framework_directory):
135 | print("Supplied framework directory", framework_directory, "does not exist or is not a directory.")
136 |
137 | uris_valid = validate_all_uris(framework_directory)
138 | images_valid = validate_images(framework_directory)
139 |
140 | invalid = False
141 | if not uris_valid:
142 | invalid = True
143 |
144 | if not images_valid:
145 | invalid = True
146 |
147 | if invalid:
148 | print("Airgap check FAILED. This framework will NOT work in an airgap. Fix the detected issues.")
149 | sys.exit(1)
150 |
151 | print("Airgap check complete. This framework will probably work in an airgapped cluster, but for the love of everything test that.")
152 | sys.exit(0)
153 |
154 |
155 | if __name__ == '__main__':
156 | sys.exit(main(sys.argv))
157 |
--------------------------------------------------------------------------------
/tools/kdc/kdc.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | """
4 | Wrapper script around sdk_auth.py used to ad-hoc setup and tear down a KDC environment.
5 |
6 | This assumes there will be only one KDC in the cluster at any time, and thus that only instance
7 | of the KDC will be aptly named `kdc`.
8 |
9 | In order to run the script from the `dcos-commons` repo root, the `PYTHONPATH` environment
10 | variable must also be set:
11 | ```bash
12 | $ PYTHONPATH=testing ./tools/kdc/kdc.py SUBCOMMAND
13 | ```
14 |
15 | ## Deploying KDC
16 |
17 | This tool can be used to deploy a KDC applciation for testing.
18 |
19 | First create a principals file containing a newline-separated list of principals.
20 | As an example, a file (`kafka-principals.txt`) for Apache Kafka would contain:
21 | ```
22 | kafka/kafka-0-broker.kafka.autoip.dcos.thisdcos.directory@LOCAL
23 | kafka/kafka-1-broker.kafka.autoip.dcos.thisdcos.directory@LOCAL
24 | kafka/kafka-2-broker.kafka.autoip.dcos.thisdcos.directory@LOCAL
25 | client@LOCAL
26 | ```
27 | (assuming three Kafka brokers and a single client principal)
28 |
29 | Running this utility as follows:
30 | ```bash
31 | $ PYTHONPATH=testing ./tools/kdc/kdc.py deploy kafka-principals.txt
32 | ```
33 | will perform the following actions:
34 | 1. Deploys a KDC Marathon application named `kdc` as defined in `tools/kdc/kdc.json`
35 | 2. Adds the principals in `kafka-principals.txt` to the KDC store
36 | 3. Saves the generated keytab as the DC/OS secret `__dcos_base64___keytab`
37 |
38 | ## Removing KDC
39 |
40 | This tool can be used to remove an existing KDC deployment.
41 |
42 | Running this utility as follows:
43 | ```bash
44 | $ PYTHONPATH=testing ./tools/kdc/kdc.py
45 | ```
46 | will perform the following actions:
47 | 1. Remove the KDC Marathoin application named `kdc`
48 | 2. Remove the DC/OS secret `__dcos_base64___keytab`
49 |
50 |
51 | Note: The KDC this tool launches uses the following encryption key types:
52 | - aes256-cts-hmac-sha1-96
53 | - des3-cbc-sha1
54 | - arcfour-hmac-md5
55 | """
56 | import argparse
57 | import logging
58 | import os
59 |
60 | import sdk_auth
61 | import sdk_cmd
62 | import sdk_security
63 |
64 |
65 | logging.basicConfig(
66 | format='[%(asctime)s|%(name)s|%(levelname)s]: %(message)s',
67 | level=logging.INFO)
68 |
69 | log = logging.getLogger(__name__)
70 |
71 |
72 | def parse_principals(principals_file: str) -> list:
73 | """
74 | Parses the given file and extracts the list of principals.
75 | :param principals_file: The file to extract principals from.
76 | :return (str): List of principals.
77 | """
78 | if not os.path.exists(principals_file):
79 | print(principals_file)
80 | raise RuntimeError("The provided principal file path is invalid")
81 |
82 | with open(principals_file) as f:
83 | principals = [principal.strip() for principal in f.readlines()]
84 |
85 | print("Successfully parsed principals")
86 | for principal in principals:
87 | print(principal)
88 |
89 | return principals
90 |
91 |
92 | def deploy(args: dict):
93 | log.info("Deploying KDC")
94 |
95 | kerberos = sdk_auth.KerberosEnvironment()
96 |
97 | if args.principals_file:
98 | create_keytab_secret(args, kerberos)
99 |
100 | log.info("KDC cluster successfully deployed")
101 |
102 |
103 | def create_keytab_secret(args: dict, kerberos=None):
104 |
105 | if not kerberos:
106 | kerberos = sdk_auth.KerberosEnvironment()
107 |
108 | principals = parse_principals(args.principals_file)
109 | kerberos.add_principals(principals)
110 |
111 | if args.secret_name:
112 | kerberos.set_keytab_path(args.secret_name, args.binary_secret)
113 |
114 | kerberos.finalize()
115 |
116 | log.info("KDC cluster successfully deployed")
117 |
118 |
119 | def teardown(args: dict):
120 | log.info("Tearing down KDC")
121 |
122 | sdk_cmd.run_cli(" ".join(["marathon", "app", "remove", "kdc"]))
123 |
124 | sdk_security.install_enterprise_cli()
125 | if args.binary_secret:
126 | sdk_security.delete_secret(args.secret_name)
127 | else:
128 | sdk_security.delete_secret('__dcos_base64__{}'.format(args.secret_name))
129 |
130 | log.info("KDC cluster successfully torn down")
131 |
132 |
133 | def parse_args():
134 | parser = argparse.ArgumentParser(description='Manage a KDC instance')
135 |
136 | parser.add_argument('--secret-name', type=str, required=False,
137 | default=None,
138 | help='The secret name to use for the generated keytab')
139 | parser.add_argument('--binary-secret', action='store_true',
140 | help='The secret should be stored as a binary secret')
141 | subparsers = parser.add_subparsers(help='deploy help')
142 |
143 | deploy_parser = subparsers.add_parser('deploy', help='deploy help')
144 | deploy_parser.add_argument('principals_file', type=str, default=None,
145 | help='Path to a file listing the principals as newline-separated strings')
146 | deploy_parser.set_defaults(func=deploy)
147 |
148 | teardown_parser = subparsers.add_parser('teardown', help='deploy help')
149 | teardown_parser.set_defaults(func=teardown)
150 |
151 | return parser.parse_args()
152 |
153 |
154 | def main():
155 | args = parse_args()
156 | args.func(args)
157 |
158 |
159 | if __name__ == "__main__":
160 | main()
161 |
--------------------------------------------------------------------------------
/universe/marathon.json.mustache:
--------------------------------------------------------------------------------
1 | {
2 | "id": "{{service.name}}",
3 | "cpus": {{jenkins-master.cpus}},
4 | "mem": {{jenkins-master.mem}},
5 | "instances": 1,
6 | {{#service.user}}
7 | "user":"{{service.user}}",
8 | {{/service.user}}
9 | {{#service.security.strict-mode}}
10 | "secrets": {
11 | "private_key": {
12 | "source": "{{service.security.secret-name}}"
13 | }
14 | },
15 | {{/service.security.strict-mode}}
16 | "env": {
17 | "JENKINS_AGENT_ROLE": "{{service.roles.jenkins-agent-role}}",
18 | "JENKINS_AGENT_USER": "{{jenkins-agent.jenkins-agent-user}}",
19 | "JENKINS_FRAMEWORK_NAME": "{{service.name}}",
20 | "MARATHON_NAME": "{{service.marathon-name}}",
21 | {{#service.security.strict-mode}}
22 | "DCOS_STRICT_SECURITY_ENABLED": "{{service.security.strict-mode}}",
23 | "DCOS_SERVICE_ACCOUNT_PRIVATE_KEY": { "secret": "private_key" },
24 | "DCOS_SERVICE_ACCOUNT": "{{service.security.service-account}}",
25 | {{/service.security.strict-mode}}
26 | "JENKINS_CONTEXT": "/service/{{service.name}}",
27 | "JENKINS_MESOS_MASTER": "{{service.mesos-master}}",
28 | "JENKINS_HOME": "/var/jenkins_home",
29 | {{#service.marathon-lb.virtual-host}}
30 | "JENKINS_ROOT_URL": "{{#service.marathon-lb.https-redirect}}https://{{/service.marathon-lb.https-redirect}}{{^service.marathon-lb.https-redirect}}http://{{/service.marathon-lb.https-redirect}}{{service.marathon-lb.virtual-host}}/service/{{service.name}}",
31 | {{/service.marathon-lb.virtual-host}}
32 | "JVM_OPTS": "{{jenkins-master.jvm-opts}}",
33 | "JENKINS_OPTS": "{{jenkins-master.jenkins-opts}}",
34 | "PROMETHEUS_ENDPOINT": "{{service.prometheus-endpoint}}",
35 | {{#jenkins-master.additional-plugins}}
36 | "JENKINS_OPT_ADDITIONAL_PLUGINS": "{{jenkins-master.additional-plugins}}",
37 | {{/jenkins-master.additional-plugins}}
38 | "SSH_KNOWN_HOSTS": "{{jenkins-master.known-hosts}}",
39 |
40 | "JENKINS_LINUX_AGENT_LABEL": "{{jenkins-agent.linux-agent.label}}",
41 | "JENKINS_LINUX_AGENT_CPUS": "{{jenkins-agent.linux-agent.cpus}}",
42 | "JENKINS_LINUX_AGENT_MEM": "{{jenkins-agent.linux-agent.mem}}",
43 | "JENKINS_LINUX_AGENT_DISK": "{{jenkins-agent.linux-agent.disk}}",
44 | "JENKINS_LINUX_AGENT_MAX_EXECUTORS": "{{jenkins-agent.linux-agent.max-executors}}",
45 | "JENKINS_LINUX_AGENT_MIN_EXECUTORS": "{{jenkins-agent.linux-agent.min-executors}}",
46 | "JENKINS_LINUX_AGENT_IDLE_TERMINATION_MINUTES": "{{jenkins-agent.linux-agent.idle-termination-minutes}}",
47 | "JENKINS_LINUX_AGENT_OFFER_SELECTION_ATTRIBUTES": "{{jenkins-agent.linux-agent.offer-selection-attributes}}",
48 | "JENKINS_LINUX_AGENT_JNLP_ARGS": "{{jenkins-agent.linux-agent.jnlp-args}}",
49 | "JENKINS_LINUX_AGENT_IMAGE": "{{jenkins-agent.linux-agent.image}}"
50 | },
51 | "portDefinitions": [
52 | {"port": 0, "protocol": "tcp", "name": "nginx"},
53 | {"port": 0, "protocol": "tcp", "name": "jenkins"},
54 | {"port": {{jenkins-master.agent-port}}, "protocol": "tcp", "name": "agent"}
55 | ],
56 | "requirePorts": true,
57 | "container": {
58 | "type": "{{service.containerizer}}",
59 | "docker": {
60 | {{#service.docker-image}}
61 | "image": "{{service.docker-image}}"
62 | {{/service.docker-image}}
63 | {{^service.docker-image}}
64 | "image": "{{resource.assets.container.docker.jenkins}}"
65 | {{/service.docker-image}}
66 | },
67 | "volumes": [
68 | {{^service.storage.host-volume}}
69 | {
70 | "containerPath": "jenkins_home",
71 | "persistent": {
72 | "type": "root",
73 | "size": 2048,
74 | "constraints": []
75 | },
76 | "mode": "RW"
77 | },
78 | {{/service.storage.host-volume}}
79 | {
80 | "containerPath": "/var/jenkins_home",
81 | {{#service.storage.host-volume}}
82 | "hostPath": "{{service.storage.host-volume}}/{{service.name}}",
83 | {{/service.storage.host-volume}}
84 | {{^service.storage.host-volume}}
85 | "hostPath": "jenkins_home",
86 | {{/service.storage.host-volume}}
87 | "mode": "RW"
88 | }
89 | ]
90 | },
91 | {{#service.docker-credentials-uri}}
92 | "fetch": [
93 | {
94 | "uri": "{{service.docker-credentials-uri}}",
95 | "executable": false,
96 | "extract": true
97 | }
98 | ],
99 | {{/service.docker-credentials-uri}}
100 | "role": "{{service.roles.jenkins-master-role}}",
101 | "healthChecks": [
102 | {
103 | "path": "/service/{{service.name}}/login",
104 | "portIndex": 0,
105 | "protocol": "MESOS_HTTP",
106 | "gracePeriodSeconds": {{health-checks.grace-period}},
107 | "intervalSeconds": {{health-checks.interval}},
108 | "timeoutSeconds": {{health-checks.timeout}},
109 | "maxConsecutiveFailures": {{health-checks.max-consecutive-failures}}
110 | }
111 | ],
112 | "labels": {
113 | {{#service.marathon-lb.virtual-host}}
114 | "HAPROXY_GROUP":"external",
115 | "HAPROXY_0_VHOST":"{{service.marathon-lb.virtual-host}}",
116 | "HAPROXY_0_REDIRECT_TO_HTTPS": "{{service.marathon-lb.https-redirect}}",
117 | {{/service.marathon-lb.virtual-host}}
118 | "MARATHON_SINGLE_INSTANCE_APP": "true",
119 | "DCOS_PACKAGE_FRAMEWORK_NAME": "{{service.name}}",
120 | "DCOS_SERVICE_NAME": "{{service.name}}",
121 | "DCOS_SERVICE_PORT_INDEX": "0",
122 | "DCOS_SERVICE_SCHEME": "http"
123 | },
124 | {{#service.storage.pinned-hostname}}
125 | "constraints": [["hostname", "CLUSTER", "{{service.storage.pinned-hostname}}"]],
126 | {{/service.storage.pinned-hostname}}
127 | "upgradeStrategy":{
128 | "minimumHealthCapacity": 0,
129 | "maximumOverCapacity": 0
130 | }
131 | }
132 |
--------------------------------------------------------------------------------
/testing/testData/gen-job.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | false
6 |
7 |
8 |
9 |
10 | SINGLE_USE
11 | Percentage of jobs that should be Single Use mesos agent.
12 | 100
13 | true
14 |
15 |
16 | AGENT_LABEL
17 | Job runs restricted to this label.
18 |
19 | true
20 |
21 |
22 | JOBCOUNT
23 | The number of jobs to create and manage.
24 | 5
25 | true
26 |
27 |
28 | EVERY_XMIN
29 | Every X minutes run this job.
30 | 5
31 | false
32 |
33 |
34 | SLEEP_DURATION
35 | Job sleeps for this many seconds.
36 | 600
37 | false
38 |
39 |
40 | SCENARIO
41 | Test scenario to run.
42 | sleep
43 | true
44 |
45 |
46 |
47 |
48 |
49 | linux
50 | false
51 | false
52 | false
53 | false
54 |
55 | false
56 |
57 |
58 | def singleP = "${SINGLE_USE}" as Integer
59 | def jobCount = "${JOBCOUNT}" as Integer
60 | def agentLabel = "${AGENT_LABEL}" as String
61 | def sleepDur = "${SLEEP_DURATION}" as Integer
62 | def everyMin = "${EVERY_XMIN}" as Integer
63 |
64 | Random random = new Random()
65 |
66 | (1..jobCount).each { c ->
67 | def jobName = "test-job-${c}"
68 |
69 | def j = job(jobName) {
70 | if (agentLabel) {
71 | label(agentLabel)
72 | }
73 |
74 | triggers {
75 | cron("*/${EVERY_XMIN} * * * *")
76 | }
77 |
78 | wrappers {
79 | if (singleP == 100 || (singleP != 0 && Math.abs(random.nextInt() % 100) + 1 <= singleP)) {
80 | configure { node ->
81 | node / 'buildWrappers' / 'org.jenkinsci.plugins.mesos.MesosSingleUseSlave'()
82 | }
83 | }
84 | }
85 |
86 | if (SCENARIO == "buildmarathon") {
87 | scm {
88 | git {
89 | remote {
90 | name('origin')
91 | url('https://github.com/mesosphere/marathon.git')
92 | }
93 | branches('v1.6.352')
94 | }
95 | }
96 | }
97 |
98 | steps {
99 | if (SCENARIO == "buildmarathon") {
100 | shell(' export SBT_OPTS="-Xmx750M -Xms750M -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -Xss2M"; curl -LO https://piccolo.link/sbt-1.1.2.tgz; tar -zxf sbt*.tgz; sbt/bin/sbt compile; ')
101 | } else {
102 | shell("echo 'hello, world'; sleep ${sleepDur}")
103 | }
104 | }
105 |
106 | }
107 | queue(j)
108 |
109 | }
110 |
111 | true
112 | false
113 | false
114 | false
115 | false
116 | false
117 | DELETE
118 | DELETE
119 | DELETE
120 | JENKINS_ROOT
121 |
122 |
123 |
124 |
125 |
126 |
--------------------------------------------------------------------------------
/testing/security/transport_encryption.py:
--------------------------------------------------------------------------------
1 | """
2 | A collection of utilities used for SSL tests.
3 | """
4 | import json
5 | import logging
6 |
7 |
8 | import sdk_cmd
9 | import sdk_security
10 | import sdk_utils
11 |
12 | log = logging.getLogger(__name__)
13 |
14 |
15 | def setup_service_account(service_name: str,
16 | service_account_secret: str=None) -> dict:
17 | """
18 | Setup the service account for TLS. If the account or secret of the specified
19 | name already exists, these are deleted.
20 | """
21 |
22 | if sdk_utils.is_open_dcos():
23 | log.error("The setup of a service account requires DC/OS EE. service_name=%s", service_name)
24 | raise Exception("The setup of a service account requires DC/OS EE")
25 |
26 | name = service_name
27 | secret = name if service_account_secret is None else service_account_secret
28 |
29 | service_account_info = sdk_security.setup_security(service_name,
30 | service_account=name,
31 | service_account_secret=secret)
32 |
33 | log.info("Adding permissions required for TLS.")
34 | if sdk_utils.dcos_version_less_than("1.11"):
35 | sdk_cmd.run_cli("security org groups add_user superusers {name}".format(name=name))
36 | else:
37 | acls = [
38 | {"rid": "dcos:secrets:default:/{}/*".format(service_name), "action": "full"},
39 | {"rid": "dcos:secrets:list:default:/{}".format(service_name), "action": "read"},
40 | {"rid": "dcos:adminrouter:ops:ca:rw", "action": "full"},
41 | {"rid": "dcos:adminrouter:ops:ca:ro", "action": "full"},
42 | ]
43 |
44 | for acl in acls:
45 | cmd_list = ["security", "org", "users", "grant",
46 | "--description", "\"Allow provisioning TLS certificates\"",
47 | name, acl["rid"], acl["action"]
48 | ]
49 |
50 | sdk_cmd.run_cli(" ".join(cmd_list))
51 |
52 | return service_account_info
53 |
54 |
55 | def cleanup_service_account(service_name: str, service_account_info: dict):
56 | """
57 | Clean up the specified service account.
58 |
59 | Ideally, this service account was created using the setup_service_account function.
60 | """
61 | if isinstance(service_account_info, str):
62 | service_account_info = {"name": service_account_info}
63 |
64 | name = service_account_info["name"]
65 | secret = service_account_info["secret"] if "secret" in service_account_info else name
66 |
67 | sdk_security.cleanup_security(service_name,
68 | service_account=name,
69 | service_account_secret=secret)
70 |
71 |
72 | def fetch_dcos_ca_bundle(marathon_task: str) -> str:
73 | """Fetch the DC/OS CA bundle from the leading Mesos master"""
74 | local_bundle_file = "dcos-ca.crt"
75 |
76 | cmd = ["curl", "-L", "--insecure", "-v",
77 | "leader.mesos/ca/dcos-ca.crt",
78 | "-o", local_bundle_file]
79 |
80 | sdk_cmd.marathon_task_exec(marathon_task, " ".join(cmd))
81 |
82 | return local_bundle_file
83 |
84 |
85 | def create_tls_artifacts(cn: str, marathon_task: str) -> str:
86 | pub_path = "{}_pub.crt".format(cn)
87 | priv_path = "{}_priv.key".format(cn)
88 | log.info("Generating certificate. cn={}, task={}".format(cn, marathon_task))
89 |
90 | output = sdk_cmd.marathon_task_exec(
91 | marathon_task,
92 | 'openssl req -nodes -newkey rsa:2048 -keyout {} -out request.csr '
93 | '-subj "/C=US/ST=CA/L=SF/O=Mesosphere/OU=Mesosphere/CN={}"'.format(priv_path, cn))
94 | assert output[0] is 0
95 |
96 | rc, raw_csr, _ = sdk_cmd.marathon_task_exec(marathon_task, 'cat request.csr')
97 | assert rc is 0
98 | request = {
99 | "certificate_request": raw_csr
100 | }
101 |
102 | token = sdk_cmd.run_cli("config show core.dcos_acs_token")
103 |
104 | output = sdk_cmd.marathon_task_exec(
105 | marathon_task,
106 | "curl --insecure -L -X POST "
107 | "-H 'Authorization: token={}' "
108 | "leader.mesos/ca/api/v2/sign "
109 | "-d '{}'".format(token, json.dumps(request)))
110 | assert output[0] is 0
111 |
112 | # Write the public cert to the client
113 | certificate = json.loads(output[1])["result"]["certificate"]
114 | output = sdk_cmd.marathon_task_exec(marathon_task, "bash -c \"echo '{}' > {}\"".format(certificate, pub_path))
115 | assert output[0] is 0
116 |
117 | _create_keystore_truststore(cn, marathon_task)
118 | return "CN={},OU=Mesosphere,O=Mesosphere,L=SF,ST=CA,C=US".format(cn)
119 |
120 |
121 | def _create_keystore_truststore(cn: str, marathon_task: str):
122 | pub_path = "{}_pub.crt".format(cn)
123 | priv_path = "{}_priv.key".format(cn)
124 | keystore_path = "{}_keystore.jks".format(cn)
125 | truststore_path = "{}_truststore.jks".format(cn)
126 |
127 | log.info("Generating keystore and truststore, task:{}".format(marathon_task))
128 | dcos_ca_bundle = fetch_dcos_ca_bundle(marathon_task)
129 |
130 | # Convert to a PKCS12 key
131 | output = sdk_cmd.marathon_task_exec(
132 | marathon_task,
133 | 'bash -c "export RANDFILE=/mnt/mesos/sandbox/.rnd && '
134 | 'openssl pkcs12 -export -in {} -inkey {} '
135 | '-out keypair.p12 -name keypair -passout pass:export '
136 | '-CAfile {} -caname root"'.format(pub_path, priv_path, dcos_ca_bundle))
137 | assert output[0] is 0
138 |
139 | log.info("Generating certificate: importing into keystore and truststore")
140 | # Import into the keystore and truststore
141 | output = sdk_cmd.marathon_task_exec(
142 | marathon_task,
143 | "keytool -importkeystore "
144 | "-deststorepass changeit -destkeypass changeit -destkeystore {} "
145 | "-srckeystore keypair.p12 -srcstoretype PKCS12 -srcstorepass export "
146 | "-alias keypair".format(keystore_path))
147 | assert output[0] is 0
148 |
149 | output = sdk_cmd.marathon_task_exec(
150 | marathon_task,
151 | "keytool -import -trustcacerts -noprompt "
152 | "-file {} -storepass changeit "
153 | "-keystore {}".format(dcos_ca_bundle, truststore_path))
154 | assert output[0] is 0
155 |
--------------------------------------------------------------------------------
/tools/build_go_exe.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # exit immediately on failure
4 | set -e
5 |
6 | syntax() {
7 | echo "Syntax: $0 [platform2 platform3 ...]"
8 | echo "Platforms: 'linux', 'darwin', and/or 'windows'. Must specify at least one."
9 | echo "Required envvars:"
10 | echo "- REPO_ROOT_DIR: Path to root of repository"
11 | echo "- REPO_NAME: Name of repository"
12 | echo "Optional envvars:"
13 | echo "- GOPATH_REPO_ORG: Path within GOPATH/src/ under which REPO_NAME resides. GOPATH/src///... (default: 'github.com/mesosphere')"
14 | echo "- SKIP_UPX: If non-empty, disables UPX compression of binaries"
15 | }
16 |
17 | if [ $# -lt 2 ]; then
18 | syntax
19 | exit 1
20 | fi
21 |
22 | RELATIVE_EXE_DIR=$1
23 | shift
24 | EXE_BASE_NAME=$1
25 | shift
26 | PLATFORMS=$@
27 | if [ -z "$RELATIVE_EXE_DIR" -o -z "$EXE_BASE_NAME" -o -z "$PLATFORMS" ]; then
28 | syntax
29 | exit 1
30 | fi
31 | echo "Building $EXE_BASE_NAME for [$PLATFORMS] in $RELATIVE_EXE_DIR"
32 |
33 | if [ -z "$(which go)" ]; then
34 | echo "Missing 'go' executable. Please download Go 1.8+ from golang.org, and add 'go' to your PATH."
35 | syntax
36 | exit 1
37 | fi
38 |
39 | if [ -z "$REPO_ROOT_DIR" -o -z "$REPO_NAME" ]; then
40 | echo "Missing REPO_ROOT_DIR or REPO_NAME environment variables."
41 | syntax
42 | exit 1
43 | fi
44 |
45 | # Detect Go version to determine if the user has a compatible Go version or not.
46 | GO_VERSION=$(go version | awk '{print $3}')
47 | # Note, UPX only works on binaries produced by Go 1.7+. However, we require Go 1.8+
48 | UPX_BINARY="$(which upx || which upx-ucl || echo '')"
49 | # For dev iteration; upx takes a long time; can set env var
50 | if [ -n "$SKIP_UPX" ]; then
51 | UPX_BINARY=
52 | fi
53 | case "$GO_VERSION" in
54 | go1.[8-9]*|go1.1[0-9]*|go[2-9]*) # go1.8+, go2+ (must come before go1.0-go1.7: support e.g. go1.10)
55 | ;;
56 | go0.*|go1.[0-7]*) # go0.*, go1.0-go1.7
57 | echo "Detected Go <=1.7. This is too old, please install Go 1.8+: $(which go) $GO_VERSION"
58 | exit 1
59 | ;;
60 | *) # ???
61 | echo "Unrecognized go version: $(which go) $GO_VERSION"
62 | exit 1
63 | ;;
64 | esac
65 |
66 | # create a fake gopath structure within the repo at ${REPO}/.gopath/
67 | export GOPATH=${REPO_ROOT_DIR}/.gopath
68 |
69 | GOPATH_REPO_ORG=${ORG_PATH:=github.com/mesosphere}
70 | # ex: /.gopath/src/github.com/mesosphere
71 | GOPATH_REPO_ORG_DIR=${GOPATH}/src/${GOPATH_REPO_ORG}
72 | # ex: /.gopath/src/github.com/mesosphere/dcos-commons/sdk/cli
73 | GOPATH_EXE_DIR="$GOPATH_REPO_ORG_DIR/$REPO_NAME/$RELATIVE_EXE_DIR"
74 |
75 | # Add symlink from GOPATH which points into the repository directory, if necessary:
76 | SYMLINK_LOCATION="$GOPATH_REPO_ORG_DIR/$REPO_NAME"
77 | if [ ! -h "$SYMLINK_LOCATION" -o "$(readlink $SYMLINK_LOCATION)" != "$REPO_ROOT_DIR" ] && [ ! -d "$SYMLINK_LOCATION" -o "$SYMLINK_LOCATION" != "$REPO_ROOT_DIR" ]; then
78 | echo "Creating symlink from GOPATH=$SYMLINK_LOCATION to REPOPATH=$REPO_ROOT_DIR"
79 | rm -rf "$SYMLINK_LOCATION"
80 | mkdir -p "$GOPATH_REPO_ORG_DIR"
81 | cd $GOPATH_REPO_ORG_DIR
82 | ln -s "$REPO_ROOT_DIR" $REPO_NAME
83 | fi
84 |
85 | # Run 'go test'/'go build' from within GOPATH:
86 | cd $GOPATH_EXE_DIR
87 |
88 | # optimization: build a native version of the executable and check if the sha1 matches a
89 | # previous native build. if the sha1 matches, then we can skip the rebuild.
90 | NATIVE_FILENAME=".native-${EXE_BASE_NAME}"
91 | NATIVE_SHA1SUM_FILENAME="${NATIVE_FILENAME}.sha1sum"
92 | go build -o $NATIVE_FILENAME
93 | # 'shasum' is available on OSX as well as (most?) Linuxes:
94 | NATIVE_SHA1SUM=$(shasum $NATIVE_FILENAME | awk '{print $1}')
95 |
96 | set_platform_filename() {
97 | # if only one platform is being built, don't append a suffix:
98 | if [ $(echo $PLATFORMS | wc -w) == "1" ]; then
99 | PLATFORM_FILENAME=${EXE_BASE_NAME}
100 | return
101 | fi
102 | case $1 in
103 | linux)
104 | PLATFORM_FILENAME=${EXE_BASE_NAME}-linux
105 | ;;
106 | darwin)
107 | PLATFORM_FILENAME=${EXE_BASE_NAME}-darwin
108 | ;;
109 | windows)
110 | PLATFORM_FILENAME=${EXE_BASE_NAME}.exe
111 | ;;
112 | *)
113 | echo "Unknown platform: $1"
114 | exit 1
115 | ;;
116 | esac
117 | }
118 |
119 | ALL_PLATFORM_FILES_EXIST="y"
120 | for PLATFORM in $PLATFORMS; do
121 | set_platform_filename $PLATFORM
122 | if [ ! -f $PLATFORM_FILENAME ]; then
123 | ALL_PLATFORM_FILES_EXIST=""
124 | break
125 | fi
126 | done
127 |
128 | if [ -f $NATIVE_SHA1SUM_FILENAME -a -n "$ALL_PLATFORM_FILES_EXIST" -a "$NATIVE_SHA1SUM" = "$(cat $NATIVE_SHA1SUM_FILENAME 2>&1)" ]; then
129 | # build output hasn't changed. skip.
130 | echo "Up to date, skipping build"
131 | else
132 | # build output is missing, or native build changed. test and build.
133 |
134 | # Run unit tests, if any '_test.go' files exist and GO_TESTS is not manually disabled:
135 | if [ x"${GO_TESTS:-true}" == x"true" ]; then
136 | if [ -n "$(find . -iname '*_test.go')" ]; then
137 | go test -v
138 | else
139 | echo "No unit tests found in $GOPATH_EXE_DIR"
140 | fi
141 | fi
142 |
143 | for PLATFORM in $PLATFORMS; do
144 | set_platform_filename $PLATFORM
145 |
146 | # available GOOS/GOARCH permutations are listed at:
147 | # https://golang.org/doc/install/source#environment
148 | CGO_ENABLED=0 GOOS=$PLATFORM GOARCH=386 go build -ldflags="-s -w" -o $PLATFORM_FILENAME
149 |
150 | # use upx if:
151 | # - upx is installed
152 | # - golang is recent enough to be compatible with upx
153 | # - the target OS isn't darwin: compressed darwin builds immediately fail with "Killed: 9"
154 | if [ -n "$UPX_BINARY" -a "$PLATFORM" != "darwin" ]; then
155 | $UPX_BINARY -q --best $PLATFORM_FILENAME
156 | else
157 | echo "Skipping UPX compression of $PLATFORM_FILENAME"
158 | fi
159 | done
160 |
161 | # avoid mistakenly marking old builds as good: update sha1sum AFTER successfully building binaries
162 | echo $NATIVE_SHA1SUM > $NATIVE_SHA1SUM_FILENAME
163 | fi
164 |
--------------------------------------------------------------------------------
/testing/sdk_utils.py:
--------------------------------------------------------------------------------
1 | '''
2 | ************************************************************************
3 | FOR THE TIME BEING WHATEVER MODIFICATIONS ARE APPLIED TO THIS FILE
4 | SHOULD ALSO BE APPLIED TO sdk_utils IN ANY OTHER PARTNER REPOS
5 | ************************************************************************
6 | '''
7 | import functools
8 | import logging
9 | import operator
10 | import random
11 | import string
12 | from distutils.version import LooseVersion
13 |
14 | import dcos
15 | import shakedown
16 | import pytest
17 | import os
18 | import os.path
19 |
20 | log = logging.getLogger(__name__)
21 |
22 |
23 | def is_env_var_set(key: str, default: str) -> bool:
24 | return str(os.environ.get(key, default)).lower() in ["true", "1"]
25 |
26 |
27 | def get_package_name(default: str) -> str:
28 | return os.environ.get("INTEGRATION_TEST__PACKAGE_NAME") or default
29 |
30 |
31 | def get_service_name(default: str) -> str:
32 | return os.environ.get("INTEGRATION_TEST__SERVICE_NAME") or default
33 |
34 |
35 | def list_reserved_resources():
36 | '''Displays the currently reserved resources on all agents via state.json;
37 | Currently for INFINITY-1881 where we believe uninstall may not be
38 | always doing its job correctly.'''
39 | state_json_slaveinfo = dcos.mesos.DCOSClient().get_state_summary()['slaves']
40 |
41 | for slave in state_json_slaveinfo:
42 | reserved_resources = slave['reserved_resources']
43 | if reserved_resources == {}:
44 | continue
45 | msg = 'on slaveid=%s hostname=%s reserved resources: %s'
46 | log.info(msg % (slave['id'], slave['hostname'], reserved_resources))
47 |
48 |
49 | def get_foldered_name(service_name):
50 | # DCOS 1.9 & earlier don't support "foldered", service names aka marathon
51 | # group names
52 | if dcos_version_less_than('1.10'):
53 | return service_name
54 | return '/test/integration/' + service_name
55 |
56 |
57 | def get_task_id_service_name(service_name):
58 | '''Converts the provided service name to a sanitized name as used in task ids.
59 |
60 | For example: /test/integration/foo => test.integration.foo'''
61 | return service_name.lstrip('/').replace('/', '.')
62 |
63 |
64 | def get_task_id_prefix(service_name, task_name):
65 | '''Returns the TaskID prefix to be used for the provided service name and task name.
66 | The full TaskID would consist of this prefix, plus two underscores and a UUID.
67 |
68 | For example: /test/integration/foo + hello-0-server => test.integration.foo__hello-0-server'''
69 | return '{}__{}'.format(get_task_id_service_name(service_name), task_name)
70 |
71 |
72 | def get_deslashed_service_name(service_name):
73 | # Foldered services have slashes removed: '/test/integration/foo' => 'test__integration__foo'.
74 | return service_name.lstrip('/').replace('/', '__')
75 |
76 |
77 | def get_zk_path(service_name):
78 | return 'dcos-service-{}'.format(get_deslashed_service_name(service_name))
79 |
80 |
81 | @functools.lru_cache()
82 | def dcos_version():
83 | return shakedown.dcos_version()
84 |
85 |
86 | @functools.lru_cache()
87 | def dcos_version_less_than(version):
88 | return shakedown.dcos_version_less_than(version)
89 |
90 |
91 | def dcos_version_at_least(version):
92 | return not dcos_version_less_than(version)
93 |
94 |
95 | def check_dcos_min_version_mark(item: pytest.Item):
96 | '''Enforces the dcos_min_version pytest annotation, which should be used like this:
97 |
98 | @pytest.mark.dcos_min_version('1.10')
99 | def your_test_here(): ...
100 |
101 | In order for this annotation to take effect, this function must be called by a pytest_runtest_setup() hook.
102 | '''
103 | min_version_mark = item.get_marker('dcos_min_version')
104 | if min_version_mark:
105 | min_version = min_version_mark.args[0]
106 | message = 'Feature only supported in DC/OS {} and up'.format(min_version)
107 | if 'reason' in min_version_mark.kwargs:
108 | message += ': {}'.format(min_version_mark.kwargs['reason'])
109 | if dcos_version_less_than(min_version):
110 | pytest.skip(message)
111 |
112 |
113 | def is_open_dcos():
114 | '''Determine if the tests are being run against open DC/OS. This is presently done by
115 | checking the envvar DCOS_ENTERPRISE.'''
116 | return not (os.environ.get('DCOS_ENTERPRISE', 'true').lower() == 'true')
117 |
118 |
119 | def is_strict_mode():
120 | '''Determine if the tests are being run on a strict mode cluster.'''
121 | return os.environ.get('SECURITY', '') == 'strict'
122 |
123 |
124 | def random_string(length=8):
125 | return ''.join(
126 | random.choice(
127 | string.ascii_lowercase +
128 | string.digits
129 | ) for _ in range(length)
130 | )
131 |
132 |
133 | dcos_ee_only = pytest.mark.skipif(
134 | is_open_dcos(),
135 | reason="Feature only supported in DC/OS EE.")
136 |
137 |
138 | # Pretty much https://github.com/pytoolz/toolz/blob/a8cd0adb5f12ec5b9541d6c2ef5a23072e1b11a3/toolz/dicttoolz.py#L279
139 | def get_in(keys, coll, default=None):
140 | """ Reaches into nested associative data structures. Returns the value for path ``keys``.
141 |
142 | If the path doesn't exist returns ``default``.
143 |
144 | >>> transaction = {'name': 'Alice',
145 | ... 'purchase': {'items': ['Apple', 'Orange'],
146 | ... 'costs': [0.50, 1.25]},
147 | ... 'credit card': '5555-1234-1234-1234'}
148 | >>> get_in(['purchase', 'items', 0], transaction)
149 | 'Apple'
150 | >>> get_in(['name'], transaction)
151 | 'Alice'
152 | >>> get_in(['purchase', 'total'], transaction)
153 | >>> get_in(['purchase', 'items', 'apple'], transaction)
154 | >>> get_in(['purchase', 'items', 10], transaction)
155 | >>> get_in(['purchase', 'total'], transaction, 0)
156 | 0
157 | """
158 | try:
159 | return functools.reduce(operator.getitem, keys, coll)
160 | except (KeyError, IndexError, TypeError):
161 | return default
162 |
163 |
164 | def sort(coll):
165 | """ Sorts a collection and returns it. """
166 | coll.sort()
167 | return coll
168 |
169 |
170 | def invert_dict(d: dict) -> dict:
171 | """ Returns a dictionary with its values being its keys and vice-versa. """
172 | return dict((v, k) for k, v in d.items())
173 |
--------------------------------------------------------------------------------
/tools/ci/test_runner.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Exit immediately on errors
3 | set -e -x
4 |
5 | # Export the required environment variables:
6 | export DCOS_ENTERPRISE
7 | export PYTHONUNBUFFERED=1
8 | export SECURITY
9 | export PACKAGE_REGISTRY_ENABLED
10 | export PACKAGE_REGISTRY_STUB_URL
11 | export DCOS_FILES_PATH
12 |
13 | BUILD_TOOL_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
14 | REPO_ROOT_DIR="${REPO_ROOT:-$1}"
15 |
16 | SINGLE_FRAMEWORK="True"
17 | # Determine the list of frameworks if it is not specified
18 | if [ -z "${FRAMEWORK}" -o x"${AUTO_DETECT_FRAMEWORKS}" == x"True" -o x"$FRAMEWORK" == x"all" ]; then
19 | if [ -d "$REPO_ROOT_DIR/frameworks" ]; then
20 | FRAMEWORK_LIST=$(ls $REPO_ROOT_DIR/frameworks)
21 | SINGLE_FRAMEWORK="False"
22 | else
23 | FRAMEWORK_LIST=$(basename ${REPO_ROOT_DIR})
24 | fi
25 | else
26 | FRAMEWORK_LIST=$FRAMEWORK
27 | fi
28 |
29 | # First we need to build the framework(s)
30 | echo "Using FRAMEWORK_LIST:\n${FRAMEWORK_LIST}"
31 | echo "PACKAGE_REGISTRY_ENABLED ${PACKAGE_REGISTRY_ENABLED}"
32 | echo "PACKAGE_REGISTRY_STUB_URL ${PACKAGE_REGISTRY_STUB_URL}"
33 | echo "DCOS_FILES_PATH ${DCOS_FILES_PATH}"
34 |
35 | if [ -n "$STUB_UNIVERSE_URL" ]; then
36 | if [ x"$SINGLE_FRAMEWORK" == x"False" ]; then
37 | echo "\$STUB_UNIVERSE_URL can only be set when building single frameworks"
38 | exit 1
39 | fi
40 | echo "Using provided STUB_UNIVERSE_URL: $STUB_UNIVERSE_URL"
41 | else
42 | for framework in $FRAMEWORK_LIST; do
43 | FRAMEWORK_DIR=$REPO_ROOT_DIR/frameworks/${framework}
44 | if [ ! -d ${FRAMEWORK_DIR} ]; then
45 | echo "FRAMEWORK_DIR=${FRAMEWORK_DIR} does not exist."
46 | echo "Assuming single framework in ${REPO_ROOT}."
47 | FRAMEWORK_DIR=${REPO_ROOT_DIR}
48 | fi
49 |
50 | echo "Starting build for $framework at "`date`
51 | export UNIVERSE_URL_PATH=${FRAMEWORK_DIR}/${framework}-universe-url
52 | ${FRAMEWORK_DIR}/build.sh aws
53 | if [ ! -f "$UNIVERSE_URL_PATH" ]; then
54 | echo "Missing universe URL file: $UNIVERSE_URL_PATH"
55 | exit 1
56 | fi
57 | if [ -z ${STUB_UNIVERSE_LIST} ]; then
58 | STUB_UNIVERSE_LIST=$(cat ${UNIVERSE_URL_PATH})
59 | else
60 | STUB_UNIVERSE_LIST="${STUB_UNIVERSE_LIST},$(cat ${UNIVERSE_URL_PATH})"
61 | fi
62 | echo "Finished build for $framework at "`date`
63 | done
64 | export STUB_UNIVERSE_URL=${STUB_UNIVERSE_LIST}
65 | echo "Using STUB_UNIVERSE_URL: $STUB_UNIVERSE_URL"
66 | fi
67 |
68 |
69 | # Now create a cluster if it doesn't exist.
70 | if [ -z "$CLUSTER_URL" ]; then
71 | echo "No DC/OS cluster specified. Attempting to create one now"
72 |
73 | ${BUILD_TOOL_DIR}/launch_cluster.sh ${REPO_ROOT_DIR}/config.yaml ${REPO_ROOT_DIR}/cluster_info.json
74 |
75 | if [ -f ${REPO_ROOT_DIR}/cluster_info.json ]; then
76 | export CLUSTER_URL=https://$(dcos-launch describe --info-path=${REPO_ROOT_DIR}/cluster_info.json | jq -r .masters[0].public_ip)
77 | if [ -z $CLUSTER_URL ]; then
78 | echo "Could not determine CLUSTER_URL"
79 | exit 1
80 | fi
81 | CLUSTER_WAS_CREATED="True"
82 | else
83 | echo "Error creating cluster"
84 | exit 1
85 | fi
86 | fi
87 |
88 | echo "Configuring dcoscli for cluster: $CLUSTER_URL"
89 | echo "\tDCOS_ENTERPRISE=$DCOS_ENTERPRISE"
90 | ${REPO_ROOT_DIR}/tools/dcos_login.py
91 |
92 | # Ensure that the ssh-agent is running:
93 | eval "$(ssh-agent -s)"
94 | if [ -f /ssh/key ]; then
95 | ssh-add /ssh/key
96 | fi
97 |
98 | if [ -f ${REPO_ROOT_DIR}/cluster_info.json ]; then
99 | if [ `cat ${REPO_ROOT_DIR}/cluster_info.json | jq .key_helper` == 'true' ]; then
100 | cat ${REPO_ROOT_DIR}/cluster_info.json | jq -r .ssh_private_key > /root/.ssh/id_rsa
101 | chmod 600 /root/.ssh/id_rsa
102 | ssh-add /root/.ssh/id_rsa
103 | fi
104 | fi
105 |
106 |
107 | # Determine the pytest args
108 | pytest_args=()
109 |
110 | # PYTEST_K and PYTEST_M are treated as single strings, and should thus be added
111 | # to the pytest_args array in quotes.
112 |
113 | PYTEST_K=`echo "$PYTEST_ARGS" \
114 | | sed -e "s#.*-k [\'\"]\([^\'\"]*\)['\"].*#\1#"`
115 | if [ "$PYTEST_K" != "$PYTEST_ARGS" ]; then
116 | if [ -n "$PYTEST_K" ]; then
117 | pytest_args+=(-k "$PYTEST_K")
118 | fi
119 | PYTEST_ARGS=`echo "$PYTEST_ARGS" \
120 | | sed -e "s#-k [\'\"]\([^\'\"]*\)['\"]##"`
121 | fi
122 |
123 | PYTEST_M=`echo "$PYTEST_ARGS" \
124 | | sed -e "s#.*-m [\'\"]\([^\'\"]*\)['\"].*#\1#"`
125 | if [ "$PYTEST_M" != "$PYTEST_ARGS" ]; then
126 | if [ -n "$PYTEST_M" ]; then
127 | pytest_args+=(-m "$PYTEST_M")
128 | fi
129 | PYTEST_ARGS=`echo "$PYTEST_ARGS" \
130 | | sed -e "s#-m [\'\"]\([^\'\"]*\)['\"]##"`
131 | fi
132 |
133 | # Each of the space-separated parts of PYTEST_ARGS are treated separately.
134 | if [ -n "$PYTEST_ARGS" ]; then
135 | pytest_args+=($PYTEST_ARGS)
136 | fi
137 |
138 | # If not already set, ensure that the PYTHONPATH is correct
139 | if [ -n $PYTHONPATH ]; then
140 | if [ -d ${REPO_ROOT_DIR}/testing ]; then
141 | export PYTHONPATH=${REPO_ROOT_DIR}/testing
142 | fi
143 | fi
144 |
145 | # Now run the tests:
146 | # First in the root.
147 | if [ -d ${REPO_ROOT_DIR}/tests ]; then
148 | FRAMEWORK_TESTS_DIR=${REPO_ROOT_DIR}/tests
149 | echo "Starting test for $FRAMEWORK_TESTS_DIR at "`date`
150 | py.test -vv -s "${pytest_args[@]}" ${FRAMEWORK_TESTS_DIR}
151 | exit_code=$?
152 | echo "Finished test for $FRAMEWORK_TESTS_DIR at "`date`
153 | fi
154 |
155 | # Now each of the selected frameworks:
156 | for framework in $FRAMEWORK_LIST; do
157 | echo "Checking framework ${framework}"
158 | FRAMEWORK_TESTS_DIR=$REPO_ROOT_DIR/frameworks/${framework}/tests
159 | if [ ! -d ${FRAMEWORK_TESTS_DIR} ]; then
160 | echo "No tests found for ${framework} at ${FRAMEWORK_TESTS_DIR}"
161 | else
162 | echo "Starting test for $FRAMEWORK_TESTS_DIR at "`date`
163 | py.test -vv -s "${pytest_args[@]}" ${FRAMEWORK_TESTS_DIR}
164 | exit_code=$?
165 | echo "Finished test for $FRAMEWORK_TESTS_DIR at "`date`
166 | fi
167 | done
168 |
169 | echo "Finished integration tests at "`date`
170 |
171 | if [ -n "$CLUSTER_WAS_CREATED" ]; then
172 | echo "The DC/OS cluster $CLUSTER_URL was created. Please run"
173 | echo "\t\$ dcos-launch delete --info-path=${CLUSTER_INFO_FILE}"
174 | echo "to remove the cluster."
175 | fi
176 |
177 | exit $exit_code
178 |
--------------------------------------------------------------------------------
/tools/distribution/UPDATING.md:
--------------------------------------------------------------------------------
1 | # Updating this repository
2 |
3 | This framework is built using the [DC/OS Commons SDK](https://github.com/mesosphere/dcos-commons), and in order to make use of new features in the SDK or consume bugfixes it should be updated regularly.
4 |
5 | The parts of the SDK consumed consist of:
6 | * The SDK Java libraries including:
7 | * scheduler libraries
8 | * executor libraries
9 | * testing libraries
10 | * SDK artefacts including:
11 | * The custom executor for use on DC/OS 1.9 clusters
12 | * The `bootstrap` utility
13 | * CLI binaries for the three supported platforms
14 | * Build tooling
15 | * Testing utilities
16 |
17 | ## Preparation
18 |
19 | If this repository has never been updated in this way, then the following changes may be required:
20 |
21 | ### Check `build.gradle`
22 |
23 | Check that `build.gradle` in the project root contains the following dependencies in addition to any others required:
24 | ```
25 | dependencies {
26 | compile "mesosphere:scheduler:${dcosSDKVer}"
27 | compile "mesosphere:executor:${dcosSDKVer}"
28 | testCompile "mesosphere:testing:${dcosSDKVer}"
29 | }
30 | ```
31 | as well as the following entry in the `ext` specification:
32 | ```
33 | ext {
34 | dcosSDKVer = ""
35 | }
36 | ```
37 | (where `` represents a version string such as `0.30.1`)
38 |
39 | Older versions of `build.gradle` contained the following dependencies and no entry in the `ext` specification:
40 | * `compile "mesosphere:scheduler:"`
41 | * `compile "mesosphere:executor:"`
42 | * `testCompile "mesosphere:testing:"`
43 |
44 | Although this is supported in the current upgrade path, it is recommended that hese are changed to match the dependencies at the start of this section as this will result in a single line diff in the `build.gradle` file on update.
45 |
46 | ### Check the `universe/resource.json` file
47 |
48 | #### URIs
49 | In order to facilitate upgrades, the `universe/resource.json` file should contain the following entries in the `"uris"` section:
50 | ```json
51 | "uris": {
52 | "...": "...",
53 | "bootstrap-zip": "https://downloads.mesosphere.com/dcos-commons/artifacts/{{dcos-sdk-version}}/bootstrap.zip",
54 | "executor-zip": "http://downloads.mesosphere.com/dcos-commons/artifacts/{{dcos-sdk-version}}/executor.zip",
55 | "...": "..."
56 | }
57 | ```
58 | Note the use of the `{{dcos-skd-version}}` mustache template to replace an explicit version specification.
59 |
60 | #### CLIs
61 |
62 | In addition, if no custom CLI command are required, the `"cli"` section in the `universe/resource.json` can be replaced by:
63 | ```json
64 | "cli":{
65 | "binaries":{
66 | "darwin":{
67 | "x86-64":{
68 | "contentHash":[ { "algo":"sha256", "value":"{{sha256:dcos-service-cli-darwin@https://downloads.mesosphere.com/dcos-commons/artifacts/{{dcos-sdk-version}}/SHA256SUMS}}" } ],
69 | "kind":"executable",
70 | "url":"https://downloads.mesosphere.com/dcos-commons/artifacts/{{dcos-sdk-version}}/dcos-service-cli-darwin"
71 | }
72 | },
73 | "linux":{
74 | "x86-64":{
75 | "contentHash":[ { "algo":"sha256", "value":"{{sha256:dcos-service-cli-linux@https://downloads.mesosphere.com/dcos-commons/artifacts/{{dcos-sdk-version}}/SHA256SUMS}}" } ],
76 | "kind":"executable",
77 | "url":"https://downloads.mesosphere.com/dcos-commons/artifacts/{{dcos-sdk-version}}/dcos-service-cli-linux"
78 | }
79 | },
80 | "windows":{
81 | "x86-64":{
82 | "contentHash":[ { "algo":"sha256", "value":"{{sha256:dcos-service-cli.exe@https://downloads.mesosphere.com/dcos-commons/artifacts/{{dcos-sdk-version}}/SHA256SUMS}}" } ],
83 | "kind":"executable",
84 | "url":"https://downloads.mesosphere.com/dcos-commons/artifacts/{{dcos-sdk-version}}/dcos-service-cli.exe"
85 | }
86 | }
87 | }
88 | }
89 | ```
90 | Meaning that the CLIs for the templated `{{dcos-sdk-version}}` are used directly instead of building these separately.
91 |
92 | ## Updating
93 |
94 | ### Clean the current working directory
95 |
96 | It is recommended that the update be performed in a **clean git repository**. Running the following commands should ensure this:
97 |
98 | **NOTE**: This is a destructive operation
99 |
100 | ```bash
101 | $ git checkout -b update-sdk-version-to-
102 | $ git reset --hard HEAD
103 | $ git clean -fdx
104 | ```
105 |
106 | Now running `git status should yield:
107 | ```bash
108 | $ git status
109 | On branch update-sdk-version-to-
110 | nothing to commit, working tree clean
111 | ```
112 |
113 | ### Perform the update
114 |
115 | Assuming the `build.gradle` and `resource.json` files have been updated accordingly, the update to a specific version of the SDK can be performed as follows:
116 | ```bash
117 | $ docker pull mesosphere/dcos-commons:latest
118 | $ docker run --rm -ti -v $(pwd):$(pwd) mesosphere/dcos-commons:latest init $(pwd) --update-sdk
119 | ```
120 |
121 | Running a `git status` after this process should show something like:
122 | ```bash
123 | $ git status
124 | On branch update-sdk-version-to-0.41.0
125 | Changes not staged for commit:
126 | (use "git add ..." to update what will be committed)
127 | (use "git checkout -- ..." to discard changes in working directory)
128 |
129 | modified: build.gradle
130 | modified: testing/sdk_auth.py
131 | modified: testing/sdk_cmd.py
132 | modified: testing/sdk_hosts.py
133 | modified: testing/sdk_install.py
134 | modified: testing/sdk_marathon.py
135 | modified: testing/sdk_repository.py
136 | modified: testing/sdk_security.py
137 | modified: testing/sdk_upgrade.py
138 | modified: testing/sdk_utils.py
139 | modified: testing/security/transport_encryption.py
140 | modified: tools/ci/init
141 | modified: tools/release_builder.py
142 | modified: tools/universe/package_builder.py
143 | modified: tools/universe/package_manager.py
144 |
145 | no changes added to commit (use "git add" and/or "git commit -a")
146 | ```
147 | Note that the update procedure could also *delete* unneeded files.
148 |
149 | Check the differences in `build.gradle` and `tools/release_builder.py` to ensure that the `` is present in both files.
150 |
151 | Now add the changes to version control using the required git commants (`git add`, `git rm`).
152 |
153 | ## Further steps
154 |
155 | * See the SDK release notes for any changes required when consuming the SKD.
156 | * If the build process is heavily customized, it may be that additional changes will be required to the `build.sh` file in the repo.
157 | * The API of the testing tools in `testing` could have changed, and any integration tests may need to be updted. Run `git diff testing` to check for any relevant changes.
158 |
--------------------------------------------------------------------------------
/tools/kdc/kdc.conf:
--------------------------------------------------------------------------------
1 | [libdefaults]
2 | default_tkt_enctypes = aes256-cts des3-cbc-sha1 des-cbc-md5 des-cbc-crc
3 | default_tgs_enctypes = aes256-cts des3-cbc-sha1 des-cbc-md5 des-cbc-crc
4 | permitted_enctypes = aes256-cts des3-cbc-sha1 des-cbc-md5 des-cbc-crc
5 |
6 | [logging]
7 | # Specifies that entity should use the specified
8 | # destination for logging.
9 | #
10 | # Format format destinations:
11 | #
12 | # STDERR
13 | # This logs to the program's stderr.
14 | #
15 | # FILE:/file
16 | # FILE=/file
17 | # Log to the specified file. The form using a colon appends to the file,
18 | # the form with an equal truncates the file. The truncating form keeps the
19 | # file open, while the appending form closes it after each log message
20 | # (which makes it possible to rotate logs). The truncating form is mainly
21 | # for compatibility with the MIT libkrb5.
22 | #
23 | # DEVICE=/device
24 | # This logs to the specified device, at present this is the same as
25 | # FILE:/device.
26 | #
27 | # CONSOLE
28 | # Log to the console, this is the same as DEVICE=/dev/console.
29 |
30 | # SYSLOG[:priority[:facility]]
31 | # Send messages to the syslog system, using priority, and facility. To get
32 | # the name for one of these, you take the name of the macro passed to
33 | # syslog(3), and remove the leading LOG_ (LOG_NOTICE becomes NOTICE). The
34 | # default values (as well as the values used for unrecognised values), are ERR,
35 | # and AUTH, respectively. See syslog(3) for a list of priorities and
36 | # facilities.
37 | #
38 | # Each destination may optionally be prepended with a range of logging levels,
39 | # specified as min-max/. Either of the min and max valued may be omitted, in
40 | # this case min is assumed to be zero, and max is assumed to be infinity. If
41 | # you don't include a dash, both min and max gets set to the specified value.
42 | # If no range is specified, all messages gets logged.
43 | #
44 | kdc = STDERR
45 | kdc = SYSLOG:LOG_DEBUG
46 |
47 | [kdc]
48 |
49 | # database = {
50 | #
51 | # dbname = DATABASENAME
52 | # Use this database for this realm. See
53 | # the info documetation how to configure
54 | # diffrent database backends.
55 | #
56 | # realm = REALM
57 | # Specifies the realm that will be stored
58 | # in this database. It realm isn't set,
59 | # it will used as the default database,
60 | # there can only be one entry that doesn't
61 | # have a realm stanza.
62 | #
63 | # mkey_file = FILENAME
64 | # Use this keytab file for the master key
65 | # of this database. If not specified
66 | # DATABASENAME.mkey will be used.
67 | #
68 | # acl_file = PA FILENAME
69 | # Use this file for the ACL list of this
70 | # database.
71 | #
72 | # log_file = FILENAME
73 | # Use this file as the log of changes per-
74 | # formed to the database. This file is
75 | # used by ipropd-master for propagating
76 | # changes to slaves.
77 | #
78 | # }
79 |
80 | database = {
81 | dbname = /var/lib/heimdal-kdc/heimdal
82 | acl_file = /etc/heimdal-kdc/kadmind.acl
83 | }
84 |
85 | # Maximum size of a kdc request.
86 | #
87 | # max-request = SIZE
88 |
89 | # If set pre-authentication is required. Since krb4
90 | # requests are not pre-authenticated they will be
91 | # rejected.
92 | #
93 | # require-preauth = BOOL
94 |
95 | # List of ports the kdc should listen to.
96 | #
97 | # ports = list of ports
98 |
99 | # List of addresses the kdc should bind to.
100 | #
101 | # addresses = list of interfaces
102 |
103 | # Should the kdc answer kdc-requests over http.
104 | #
105 | # enable-http = BOOL
106 |
107 | # If this kdc should emulate the AFS kaserver.
108 | #
109 | # enable-kaserver = BOOL
110 |
111 | # Verify the addresses in the tickets used in tgs
112 | # requests.
113 | #
114 | # check-ticket-addresses = BOOL
115 |
116 | # Allow address-less tickets.
117 | #
118 | # allow-null-ticket-addresses = BOOL
119 |
120 | # If the kdc is allowed to hand out anonymous tick-
121 | # ets.
122 | #
123 | # allow-anonymous = BOOL
124 |
125 | # Encode as-rep as tgs-rep tobe compatible with mis-
126 | # takes older DCE secd did.
127 | # encode_as_rep_as_tgs_rep = BOOL
128 |
129 | # The time before expiration that the user should be
130 | # warned that her password is about to expire.
131 | #
132 | # kdc_warn_pwexpire = TIME
133 |
134 | # What type of logging the kdc should use, see also
135 | # [logging]/kdc.
136 | #
137 |
138 | # use_2b = {
139 | #
140 | # principal = BOOL
141 | # boolean value if the 524 daemon should
142 | # return AFS 2b tokens for principal.
143 | #
144 | # ...
145 | #
146 | # }
147 |
148 | # If the LDAP backend is used for storing principals,
149 | # this is the structural object that will be used
150 | # when creating and when reading objects. The
151 | # default value is account .
152 | #
153 | # hdb-ldap-structural-object structural object
154 |
155 | # is the dn that will be appended to the principal
156 | # when creating entries. Default value is the search
157 | # dn.
158 | #
159 | # hdb-ldap-create-base creation dn
160 |
161 | [kadmin]
162 |
163 | # If pre-authentication is required to talk to the
164 | # kadmin server.
165 | #
166 | # require-preauth = BOOL
167 |
168 | # If a principal already have its password set for
169 | # expiration, this is the time it will be valid for
170 | # after a change.
171 | #
172 | # password_lifetime = time
173 |
174 | # For each entry in default_keys try to parse it as a
175 | # sequence of etype:salttype:salt syntax of this if
176 | # something like:
177 | #
178 | # [(des|des3|etype):](pw-salt|afs3-salt)[:string]
179 | #
180 | # If etype is omitted it means everything, and if
181 | # string is omitted it means the default salt string
182 | # (for that principal and encryption type). Addi-
183 | # tional special values of keytypes are:
184 | #
185 | # v5 The Kerberos 5 salt pw-salt
186 | #
187 | # v4 The Kerberos 4 salt des:pw-salt:
188 | #
189 | # default_keys = keytypes...
190 |
191 | # When true, this is the same as
192 | #
193 | # default_keys = des3:pw-salt v4
194 | #
195 | # and is only left for backwards compatibility.
196 | # use_v4_salt = BOOL
197 |
198 | [password_quality]
199 | # Check the Password quality assurance in the info documentation
200 | # for more information.
201 |
202 | # Library name that contains the password check_func-
203 | # tion
204 | #
205 | # check_library = library-name
206 |
207 | # Function name for checking passwords in
208 | # check_library
209 | #
210 | # check_function = function-name
211 |
212 | # List of libraries that can do password policy
213 | # checks
214 | #
215 | # policy_libraries = library1 ... libraryN
216 |
217 | # List of policy names to apply to the password.
218 | # Builtin policies are among other minimum-length,
219 | # character-class, external-check.
220 | #
221 | # policies = policy1 ... policyN
222 |
223 |
--------------------------------------------------------------------------------
/testing/sdk_metrics.py:
--------------------------------------------------------------------------------
1 | '''
2 | Utilities relating to verifying the metrics functionality as reported
3 | by the DC/OS metrics component.
4 |
5 | ************************************************************************
6 | FOR THE TIME BEING WHATEVER MODIFICATIONS ARE APPLIED TO THIS FILE
7 | SHOULD ALSO BE APPLIED TO sdk_metrics IN ANY OTHER PARTNER REPOS
8 | ************************************************************************
9 | '''
10 | import json
11 | import logging
12 |
13 | import retrying
14 | import shakedown
15 |
16 | import sdk_cmd
17 |
18 | log = logging.getLogger(__name__)
19 |
20 |
21 | def get_scheduler_metrics(service_name, timeout_seconds=15*60):
22 | """Returns a dict tree of Scheduler metrics fetched directly from the scheduler.
23 | Returned data will match the content of /service//v1/metrics.
24 | """
25 | return sdk_cmd.service_request('GET', service_name, '/v1/metrics').json()
26 |
27 |
28 | def get_scheduler_counter(service_name, counter_name, timeout_seconds=15*60):
29 | """Waits for and returns the specified counter value from the scheduler"""
30 | @retrying.retry(
31 | wait_fixed=1000,
32 | stop_max_delay=timeout_seconds*1000,
33 | retry_on_result=lambda res: not res)
34 | def check_for_value():
35 | try:
36 | sched_metrics = get_scheduler_metrics(service_name)
37 | if 'counters' not in sched_metrics:
38 | log.info("No counters present for service {}. Types were: {}".format(
39 | service_name, sched_metrics.keys()))
40 | return None
41 | sched_counters = sched_metrics['counters']
42 | if counter_name not in sched_counters:
43 | log.info("No counter named '{}' was found for service {}. Counters were: {}".format(
44 | counter_name, service_name, sched_counters.keys()))
45 | return None
46 | value = sched_counters[counter_name]['count']
47 | log.info("{} metric counter: {}={}".format(service_name, counter_name, value))
48 | return value
49 | except Exception as e:
50 | log.error("Caught exception trying to get metrics: {}".format(e))
51 | return None
52 |
53 | return check_for_value()
54 |
55 |
56 | def wait_for_scheduler_counter_value(service_name, counter_name, min_value, timeout_seconds=15*60):
57 | """Waits for the specified counter value to be reached by the scheduler
58 | For example, check that `offers.processed` is equal or greater to 1."""
59 | @retrying.retry(
60 | wait_fixed=1000,
61 | stop_max_delay=timeout_seconds*1000,
62 | retry_on_result=lambda res: not res)
63 | def check_for_value():
64 | value = get_scheduler_counter(service_name, counter_name, timeout_seconds)
65 | return value >= min_value
66 |
67 | return check_for_value()
68 |
69 |
70 | def get_metrics(package_name, service_name, task_name):
71 | """Return a list of DC/OS metrics datapoints.
72 |
73 | Keyword arguments:
74 | package_name -- the name of the package the service is using
75 | service_name -- the name of the service to get metrics for
76 | task_name -- the name of the task whose agent to run metrics commands from
77 | """
78 | tasks = shakedown.get_service_tasks(service_name)
79 | for task in tasks:
80 | if task['name'] == task_name:
81 | task_to_check = task
82 |
83 | if task_to_check is None:
84 | raise Exception("Could not find task")
85 |
86 | agent_id = task_to_check['slave_id']
87 | executor_id = task_to_check['executor_id']
88 |
89 | pod_name = '-'.join(task_name.split("-")[:2])
90 | pod_info = sdk_cmd.svc_cli(package_name, service_name, "pod info {}".format(pod_name), json=True)
91 | task_info = None
92 | for task in pod_info:
93 | if task["info"]["name"] == task_name:
94 | task_info = task
95 | break
96 |
97 | if not task_info:
98 | return []
99 |
100 | task_container_id = task_info["status"]["containerStatus"]["containerId"]["value"]
101 |
102 | # Not related to functionality but consuming this
103 | # endpoint to verify downstream integrity
104 | containers_response = sdk_cmd.cluster_request(
105 | "GET", "/system/v1/agent/{}/metrics/v0/containers".format(agent_id), retry=False)
106 | reported_container_ids = json.loads(containers_response.text)
107 |
108 | container_id_reported = False
109 | for container_id in reported_container_ids:
110 | if container_id == task_container_id:
111 | container_id_reported = True
112 |
113 | if not container_id_reported:
114 | raise ValueError("The metrics /container endpoint returned {}, expecting {} to be returned as well".format(
115 | reported_container_ids, task_container_id))
116 |
117 | app_response = sdk_cmd.cluster_request(
118 | "GET", "/system/v1/agent/{}/metrics/v0/containers/{}/app".format(agent_id, task_container_id), retry=False)
119 | app_json = json.loads(app_response.text)
120 | if app_json['dimensions']['executor_id'] == executor_id:
121 | return app_json['datapoints']
122 |
123 | raise Exception("No metrics found")
124 |
125 |
126 | def check_metrics_presence(emitted_metrics, expected_metrics):
127 | metrics_exist = True
128 | for metric in expected_metrics:
129 | if metric not in emitted_metrics:
130 | metrics_exist = False
131 | log.error("Unable to find metric {}".format(metric))
132 | # don't short-circuit to log if multiple metrics are missing
133 |
134 | if not metrics_exist:
135 | log.info("Metrics emitted: {},\nMetrics expected: {}".format(emitted_metrics, expected_metrics))
136 |
137 | log.info("Expected metrics exist: {}".format(metrics_exist))
138 | return metrics_exist
139 |
140 |
141 | def wait_for_service_metrics(package_name, service_name, task_name, timeout, expected_metrics_exist):
142 | """Checks that the service is emitting the expected values into DC/OS Metrics.
143 | The assumption is that if the expected metrics are being emitted then so
144 | are the rest of the metrics.
145 |
146 | Arguments:
147 | package_name -- the name of the package the service is using
148 | service_name -- the name of the service to get metrics for
149 | task_name -- the name of the task whose agent to run metrics commands from
150 | expected_metrics_exist -- serivce-specific callback that checks for service-specific metrics
151 | """
152 | @retrying.retry(
153 | wait_fixed=1000,
154 | stop_max_delay=timeout*1000,
155 | retry_on_result=lambda res: not res)
156 | def check_for_service_metrics():
157 | try:
158 | log.info("verifying metrics exist for {}".format(service_name))
159 | service_metrics = get_metrics(package_name, service_name, task_name)
160 | emitted_metric_names = [metric["name"] for metric in service_metrics]
161 | return expected_metrics_exist(emitted_metric_names)
162 |
163 | except Exception as e:
164 | log.error("Caught exception trying to get metrics: {}".format(e))
165 | return False
166 |
167 | check_for_service_metrics()
168 |
--------------------------------------------------------------------------------