├── .circle ├── buildenv_common.sh ├── buildenv_st2.sh ├── configure-rabbitmq.sh ├── configure-services.sh ├── docker-compose.sh ├── docker-compose2.sh ├── fix-cache-permissions.sh ├── packagecloud.sh └── save_payload.py ├── .circleci └── config.yml ├── .github ├── FUNDING.yml └── ISSUE_TEMPLATE.md ├── .gitignore ├── .rspec ├── CHANGELOG.rst ├── Gemfile ├── Makefile ├── README.md ├── Rakefile ├── Vagrantfile ├── docker-compose.circle.yml ├── docker-compose.circle2.yml ├── docker-compose.override.yml ├── docker-compose.yml ├── packages └── st2 │ ├── Makefile │ ├── bin │ └── runners.sh │ ├── component.makefile │ ├── debian │ ├── README.Debian │ ├── README.source │ ├── changelog │ ├── compat │ ├── control │ ├── copyright │ ├── docs │ ├── install │ ├── postinst │ ├── postrm │ ├── preinst │ ├── rules │ ├── source │ │ └── format │ ├── st2.dirs │ ├── st2.links │ ├── st2actionrunner.service │ ├── st2actionrunner@.service │ ├── st2api-generator │ ├── st2api.service │ ├── st2auth-generator │ ├── st2auth.service │ ├── st2garbagecollector.service │ ├── st2notifier.service │ ├── st2rulesengine.service │ ├── st2scheduler.service │ ├── st2sensorcontainer.service │ ├── st2stream-generator │ ├── st2stream.service │ ├── st2timersengine.service │ └── st2workflowengine.service │ ├── dist_utils.py │ ├── in-requirements.txt │ ├── rpm │ ├── postinst_script.spec │ ├── preinst_script.spec │ ├── st2.spec │ ├── st2actionrunner.service │ ├── st2actionrunner@.service │ ├── st2api.service │ ├── st2auth.service │ ├── st2garbagecollector.service │ ├── st2notifier.service │ ├── st2rulesengine.service │ ├── st2scheduler.service │ ├── st2sensorcontainer.service │ ├── st2stream.service │ ├── st2timersengine.service │ └── st2workflowengine.service │ ├── setup.py │ └── st2 │ └── __init__.py ├── rake ├── build │ ├── environment.rb │ ├── package_st2.rake │ ├── setup.rake │ └── upload_checkout.rake ├── formatter.rb ├── pipeline.rb ├── pipeline_options.rb ├── remote.rb ├── shellout.rb └── spec │ ├── default │ ├── 10-package_st2-consistency_spec.rb │ ├── 10-package_st2-logfiles_spec.rb │ ├── 50-installed-cli-versions_spec.rb │ ├── 60-st2_all-services-ok_spec.rb │ ├── 70-st2_actions-integrity_spec.rb │ └── 99-uninstall-system-packages_spec.rb │ ├── examples │ └── show-service-log-on-failure.rb │ ├── remote_helpers.rb │ ├── spec_helper.rb │ └── spec_package_iterables.rb ├── rpmspec ├── helpers.spec ├── package_venv.spec └── st2pkg_toptags.spec ├── scripts ├── build_os_package.sh ├── generate_st2_config.sh ├── includes │ ├── common.sh │ └── rhel.sh ├── install_os_packages.sh ├── platform_major_version.py ├── setup-vagrant.sh ├── st2_bootstrap.sh ├── st2bootstrap-deb.sh ├── st2bootstrap-deb.template.sh ├── st2bootstrap-el8.sh ├── st2bootstrap-el8.template.sh ├── st2bootstrap-el9.sh └── st2bootstrap-el9.template.sh └── tools └── generate_final_installer_scripts.py /.circle/buildenv_common.sh: -------------------------------------------------------------------------------- 1 | # Write export lines into ~/.buildenv and also source it in ~/.circlerc 2 | write_env() { 3 | for e in $*; do 4 | eval "value=\$$e" 5 | [ -z "$value" ] || echo "export $e=$value" >> ~/.buildenv 6 | done 7 | echo ". ~/.buildenv" >> ~/.circlerc 8 | } 9 | 10 | distros=($DISTROS) 11 | DISTRO=${distros[$CIRCLE_NODE_INDEX]} 12 | 13 | ST2_PACKAGES="st2" 14 | 15 | write_env ST2_PACKAGES 16 | 17 | cat ~/.buildenv 18 | -------------------------------------------------------------------------------- /.circle/buildenv_st2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is maintained in the st2 and st2-packages github repository. Keeping them as consistent 4 | # as possible will help avoid inconsistent behaviour in CircleCI pipeline. 5 | set -e 6 | set +x 7 | 8 | my_dir="$(dirname "$0")" 9 | source "$my_dir/buildenv_common.sh" 10 | 11 | # DISTROS environment is set from the CircleCI pipeline. 12 | distros=($DISTROS) 13 | DISTRO=${distros[$CIRCLE_NODE_INDEX]} 14 | 15 | echo "Using distro: ${DISTRO}" 16 | echo "Using Python: $(python --version 2>&1)" 17 | 18 | fetch_version() { 19 | if [ -f ../st2/st2common/st2common/__init__.py ]; then 20 | # Get st2 version based on hardcoded string in st2common 21 | # build takes place in `st2` repo 22 | python -c 'exec(open("../st2/st2common/st2common/__init__.py").read()); print(__version__)' 23 | else 24 | # build takes place in `st2-packages` repo 25 | curl -sSL -o /tmp/st2_version.py ${ST2_GITURL}/raw/${ST2_GITREV}/st2common/st2common/__init__.py 26 | python -c 'exec(open("/tmp/st2_version.py").read()); print(__version__)' 27 | fi 28 | } 29 | 30 | # Needs explantion??? 31 | st2_giturl() { 32 | # Handle pull requests properly 33 | if [ -z "$CIRCLE_PR_REPONAME" ]; then 34 | echo "https://github.com/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}" 35 | else 36 | echo "https://github.com/${CIRCLE_PR_USERNAME}/${CIRCLE_PR_REPONAME}" 37 | fi 38 | } 39 | 40 | # --- 41 | # ST2_GITURL - st2 GitHub repository (ex: https://github.com/StackStorm/st2) 42 | # ST2_GITREV - st2 branch name (ex: master, v1.2.1). This will be used to determine correct Docker Tag: `latest`, `1.2.1` 43 | # ST2PKG_VERSION - st2 version, will be reused in Docker image metadata (ex: 1.2dev) 44 | # ST2PKG_RELEASE - Release number aka revision number for `st2` package, will be reused in Docker metadata (ex: 4) 45 | 46 | ST2_GITURL=${ST2_GITURL:-https://github.com/StackStorm/st2} 47 | ST2_GITREV=${ST2_GITREV:-master} 48 | ST2PKG_VERSION=$(fetch_version) 49 | 50 | # for PackageCloud 51 | if [ -n "$PACKAGECLOUD_TOKEN" ]; then 52 | ST2PKG_RELEASE=$(.circle/packagecloud.sh next-revision ${DISTRO} ${ST2PKG_VERSION} st2) 53 | else 54 | # is fork 55 | ST2PKG_RELEASE=1 56 | fi 57 | 58 | re="\\b$DISTRO\\b" 59 | [[ "$NOTESTS" =~ $re ]] && TESTING=0 60 | 61 | # Used by docker compose when run from CircleCI 62 | ST2_CIRCLE_URL=${CIRCLE_BUILD_URL} 63 | 64 | write_env ST2_GITURL ST2_GITREV ST2PKG_VERSION ST2PKG_RELEASE DISTRO TESTING ST2_CIRCLE_URL 65 | cat ~/.buildenv 66 | -------------------------------------------------------------------------------- /.circle/configure-rabbitmq.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Enable remote guest access 4 | CONFIG=$(cat < /etc/rabbitmq/rabbitmq.config 11 | service rabbitmq-server start 12 | -------------------------------------------------------------------------------- /.circle/configure-services.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # change into script directory 5 | cd $(dirname `readlink -f $0`) 6 | 7 | set -x 8 | sudo ./configure-rabbitmq.sh 9 | -------------------------------------------------------------------------------- /.circle/docker-compose.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Usage: docker-compose.sh OPERATION 3 | # This script is a st2 packages build pipeline invocation wrapper. 4 | # 5 | # Operations: 6 | # pull, build and test operations are available. Which pull containers, 7 | # build and test packages respectivly. 8 | # 9 | 10 | set -e 11 | # Source the build environment defintion (details in buildenv.sh) 12 | . ~/.buildenv 13 | 14 | # Used for `RABBITMQHOST`, `MONGODBHOST`, and `REDISHOST` see docker-compose.override.yml 15 | HOST_IP=$(ifconfig docker0 | grep 'inet addr' | awk -F: '{print $2}' | awk '{print $1}') 16 | 17 | set -x 18 | case "$1" in 19 | # Perform fake command invocation, technically provides images "pull" phase. 20 | pull) 21 | echo Pulling dependent Docker images for $2 ... 22 | docker-compose -f docker-compose.circle.yml run \ 23 | -e ST2_GITURL=${ST2_GITURL} \ 24 | -e ST2_GITREV=${ST2_GITREV} \ 25 | -e ST2PKG_VERSION=${ST2PKG_VERSION} \ 26 | -e ST2PKG_RELEASE=${ST2PKG_RELEASE} \ 27 | -e RABBITMQHOST=${HOST_IP} \ 28 | -e MONGODBHOST=${HOST_IP} \ 29 | -e REDISHOST=${HOST_IP} \ 30 | -e ST2_CIRCLE_URL=${CIRCLE_BUILD_URL} \ 31 | $2 /bin/true 32 | ;; 33 | build) 34 | echo Starting Packages Build for $2 ... 35 | docker-compose -f docker-compose.circle.yml run \ 36 | -e ST2_GITURL=${ST2_GITURL} \ 37 | -e ST2_GITREV=${ST2_GITREV} \ 38 | -e ST2PKG_VERSION=${ST2PKG_VERSION} \ 39 | -e ST2PKG_RELEASE=${ST2PKG_RELEASE} \ 40 | -e RABBITMQHOST=${HOST_IP} \ 41 | -e MONGODBHOST=${HOST_IP} \ 42 | -e REDISHOST=${HOST_IP} \ 43 | -e ST2_PACKAGES="${ST2_PACKAGES}" \ 44 | -e ST2_CIRCLE_URL=${CIRCLE_BUILD_URL} \ 45 | $2 build 46 | ;; 47 | test) 48 | [ "$TESTING" = 0 ] && { echo "Omitting Tests for $2 ..." ; exit 0; } 49 | echo Starting Tests for $2 ... 50 | docker-compose -f docker-compose.circle.yml run \ 51 | -e ST2_GITURL=${ST2_GITURL} \ 52 | -e ST2_GITREV=${ST2_GITREV} \ 53 | -e ST2PKG_VERSION=${ST2PKG_VERSION} \ 54 | -e ST2PKG_RELEASE=${ST2PKG_RELEASE} \ 55 | -e RABBITMQHOST=${HOST_IP} \ 56 | -e MONGODBHOST=${HOST_IP} \ 57 | -e REDISHOST=${HOST_IP} \ 58 | -e ST2_PACKAGES="${ST2_PACKAGES}" \ 59 | -e ST2_CIRCLE_URL=${CIRCLE_BUILD_URL} \ 60 | $2 test 61 | ;; 62 | esac 63 | 64 | -------------------------------------------------------------------------------- /.circle/docker-compose2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Usage: docker-compose.sh OPERATION 3 | # This script is a st2 packages build pipeline invocation wrapper. 4 | # 5 | # Operations: 6 | # pull, build and test operations are available. Which pull containers, 7 | # build and test packages respectivly. 8 | # 9 | 10 | set -e 11 | # Source the build environment defintion (details in buildenv.sh) 12 | # shellcheck disable=SC1090 13 | . ~/.buildenv 14 | 15 | set -x 16 | case "$1" in 17 | # Clean up cached Docker containers from the previous CircleCI build 18 | # With https://circleci.com/docs/2.0/docker-layer-caching/ and 'reusable: true' we may see 19 | # containers running from the previous cached build 20 | clean) 21 | echo Cleaning cached Docker containers which could be there from the previous build ... 22 | docker compose -f docker-compose.circle2.yml -f docker-compose.override.yml rm -v --stop --force || true 23 | ;; 24 | # Perform fake command invocation, technically provides images "pull" phase. 25 | pull) 26 | echo Pulling dependent Docker images for $2 ... 27 | docker compose -f docker-compose.circle2.yml -f docker-compose.override.yml pull --include-deps $2 28 | ;; 29 | build) 30 | echo Starting Packages Build for $2 ... 31 | docker compose -f docker-compose.circle2.yml -f docker-compose.override.yml run \ 32 | -e ST2_CHECKOUT=${ST2_CHECKOUT} \ 33 | -e ST2_GITURL=${ST2_GITURL} \ 34 | -e ST2_GITREV=${ST2_GITREV} \ 35 | -e ST2_GITDIR=${ST2_GITDIR} \ 36 | -e ST2PKG_VERSION=${ST2PKG_VERSION} \ 37 | -e ST2PKG_RELEASE=${ST2PKG_RELEASE} \ 38 | -e ST2_PACKAGES="${ST2_PACKAGES}" \ 39 | -e ST2_CIRCLE_URL="${CIRCLE_BUILD_URL}" \ 40 | $2 build 41 | ;; 42 | test) 43 | [ "$TESTING" = 0 ] && { echo "Omitting Tests for $2 ..." ; exit 0; } 44 | echo Starting Tests for $2 ... 45 | docker compose -f docker-compose.circle2.yml -f docker-compose.override.yml run \ 46 | -e ST2_PACKAGES="${ST2_PACKAGES}" \ 47 | "$2" test 48 | ;; 49 | esac 50 | -------------------------------------------------------------------------------- /.circle/fix-cache-permissions.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Script is run at dependencies:pre stage, thus 3 | # it's invoked after cache restore. 4 | set -e 5 | set -x 6 | 7 | # Make ubuntu's .cache/pip and wheelhouse owned by root 8 | mkdir -p ~/.cache/pip && sudo chown -R root:root ~/.cache/pip 9 | mkdir -p ~/wheelhouse && sudo chown -R root:root ~/wheelhouse 10 | -------------------------------------------------------------------------------- /.circle/packagecloud.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # For script debug set -x or +x to disable 4 | set +x 5 | 6 | # Requires: `jq` binary 7 | # Requires: `package_cloud` gem 8 | 9 | # Pass these ENV Variables 10 | # PACKAGECLOUD_ORGANIZATION - Packagecloud organization (default is stackstorm) 11 | # PACKAGECLOUD_TOKEN - act as a password for REST authentication 12 | # IS_PRODUCTION - whether packages are for production repo (default is 0, eg. staging repo will be used) 13 | # IS_ENTERPRISE - whether packages are for enterprise repo (default is 0, eg. community repo will be used) 14 | 15 | # Number of latest revisions to keep for package version 16 | # Ex: With `MAX_REVISIONS=10`, after uploading `1.3dev-20`, `1.3dev-10` will be deleted during the same run 17 | MAX_REVISIONS=5 18 | 19 | # Usage: 20 | # packagecloud.sh deploy el8 /tmp/st2-packages 21 | # IS_ENTERPRISE=1 packagecloud.sh deploy trusty /tmp/st2-packages 22 | # packagecloud.sh next-revision trusty 0.14dev st2 23 | # packagecloud.sh next-revision el8 1.3.1 st2web 24 | function main() { 25 | : ${PACKAGECLOUD_ORGANIZATION:=stackstorm} 26 | : ${PACKAGECLOUD_TOKEN:? PACKAGECLOUD_TOKEN env is required} 27 | : ${IS_PRODUCTION:=0} 28 | : ${IS_ENTERPRISE:=0} 29 | 30 | case "$1" in 31 | deploy) 32 | deploy "$2" "$3" 33 | ;; 34 | next-revision) 35 | LATEST_REVISION=$(latest_revision "$2" "$3" "$4") || exit $? 36 | debug "Latest version detected: '$LATEST_REVISION'" 37 | if [ -n "${LATEST_REVISION}" ]; then 38 | echo $((LATEST_REVISION+1)) 39 | else 40 | echo 1 41 | fi 42 | ;; 43 | *) 44 | echo $"Usage: deploy {focal|jammy|el8|el9} /tmp/st2-packages" 45 | echo $"Usage: next-revision {focal|jammy|el8|el9} 3.9dev st2" 46 | exit 1 47 | esac 48 | } 49 | 50 | # Get PackageCloud repo name depending on environment 51 | # 52 | ### Community: 53 | # https://packagecloud.io/stackstorm/stable 54 | # https://packagecloud.io/stackstorm/unstable 55 | # https://packagecloud.io/stackstorm/staging-stable 56 | # https://packagecloud.io/stackstorm/staging-unstable 57 | ### Enterprise: 58 | # https://packagecloud.io/stackstorm/enterprise 59 | # https://packagecloud.io/stackstorm/enterprise-unstable 60 | # https://packagecloud.io/stackstorm/staging-enterprise 61 | # https://packagecloud.io/stackstorm/staging-enterprise-unstable 62 | function get_repo_name() { 63 | if [ ${IS_ENTERPRISE} -eq 0 ]; then 64 | if [ ${PKG_IS_UNSTABLE} -eq 0 ]; then 65 | PACKAGECLOUD_REPO=stable 66 | else 67 | PACKAGECLOUD_REPO=unstable 68 | fi 69 | 70 | if [ ${IS_PRODUCTION} -eq 0 ]; then 71 | PACKAGECLOUD_REPO="staging-${PACKAGECLOUD_REPO}" 72 | fi 73 | else 74 | if [ ${IS_PRODUCTION} -eq 1 ]; then 75 | PACKAGECLOUD_REPO=enterprise 76 | else 77 | PACKAGECLOUD_REPO=staging-enterprise 78 | fi 79 | 80 | if [ ${PKG_IS_UNSTABLE} -eq 1 ]; then 81 | PACKAGECLOUD_REPO="${PACKAGECLOUD_REPO}-unstable" 82 | fi 83 | fi 84 | 85 | } 86 | 87 | # Arguments 88 | # $1 PKG_OS - distribution the package is built for 89 | # $2 PKG_DIR - directory with packages to upload 90 | function deploy() { 91 | : ${DEPLOY_PACKAGES:=1} 92 | if [ ${DEPLOY_PACKAGES} -eq 0 ]; then 93 | echo 'Skipping Deploy because DEPLOY_PACKAGES=0' 94 | exit 95 | fi 96 | 97 | PKG_OS=$1 98 | PKG_DIR=$2 99 | : ${PKG_OS:? os (first arg) is required} 100 | : ${PKG_DIR:? dir (second arg) is required} 101 | 102 | if [ ! -d "$PKG_DIR" ]; then 103 | echo "No directory $PKG_DIR, aborting..." 104 | exit 1 105 | fi 106 | 107 | for PKG_PATH in ${PKG_DIR}/*.{deb,rpm}; do 108 | if grep -q '*' <<< "${PKG_PATH}"; then continue; fi 109 | 110 | # Package name 111 | PKG=`basename ${PKG_PATH}` 112 | # deb or rpm 113 | PKG_TYPE=${PKG##*.} 114 | # Parse package metadata 115 | parse_${PKG_TYPE} 116 | # Get repo name depending on env 117 | get_repo_name 118 | # Version of the distro 119 | PKG_PATH_BASE=`basename $PKG_DIR` 120 | # Get package OS in format, suited for Packagecloud 121 | get_pkg_os "$PKG_OS" 122 | 123 | if [ -z "$PKG_NAME" ] || [ -z "$PKG_VERSION" ] || [ -z "$PKG_RELEASE" ]; then 124 | echo "$PKG_PATH doesn't look like package, skipping..." 125 | continue 126 | fi 127 | 128 | debug "PACKAGECLOUD_ORGANIZATION: ${PACKAGECLOUD_ORGANIZATION}" 129 | debug "PACKAGECLOUD_REPO: ${PACKAGECLOUD_REPO}" 130 | debug "PKG_PATH: ${PKG_PATH}" 131 | debug "PKG: ${PKG}" 132 | debug "PKG_NAME: ${PKG_NAME}" 133 | debug "PKG_VERSION: ${PKG_VERSION}" 134 | debug "PKG_RELEASE: ${PKG_RELEASE}" 135 | debug "PKG_ARCH: ${PKG_ARCH}" 136 | debug "PKG_TYPE: ${PKG_TYPE}" 137 | debug "PKG_OS_NAME: ${PKG_OS_NAME}" 138 | debug "PKG_OS_VERSION: ${PKG_OS_VERSION}" 139 | debug "PKG_IS_UNSTABLE: ${PKG_IS_UNSTABLE}" 140 | 141 | publish 142 | prune_old_revision 143 | done 144 | } 145 | 146 | function debug() { 147 | echo "$(date -Is) [${PACKAGECLOUD_REPO} ${PKG}] $1" >&2 148 | } 149 | 150 | # Parse DEB metadata from package file name `st2api_1.2dev-20_amd64.deb` 151 | function parse_deb() { 152 | METAPKG=( $(sed -r 's@^([^_]+)_([^-]+)-([^_]+)_([^.]+)\.deb$@\1 \2 \3 \4@g' <<<$PKG) ) 153 | if [[ 4 -ne ${#METAPKG[@]} ]]; then 154 | echo "Failed to extract package metadata from filename ${PKG}." 155 | exit 1 156 | fi 157 | # st2api 158 | PKG_NAME=${METAPKG[0]} 159 | # 1.2dev 160 | PKG_VERSION=${METAPKG[1]} 161 | # 20 162 | PKG_RELEASE=${METAPKG[2]} 163 | # amd64 164 | PKG_ARCH=${METAPKG[3]} 165 | # stable/unstable 166 | PKG_IS_UNSTABLE=$(grep -qv dev <<<${PKG_VERSION}; echo $?) 167 | } 168 | 169 | # Parse RPM metadata from package file name `st2api-1.2dev-20.x86_64.rpm` 170 | # https://fedoraproject.org/wiki/Packaging:NamingGuidelines 171 | function parse_rpm() { 172 | METAPKG=( $(sed -r 's@^([^-]+)-([^-]+)-([^.]+)\.([^.]+)\.rpm$@\1 \2 \3 \4@g' <<<$PKG) ) 173 | if [[ 4 -ne ${#METAPKG[@]} ]]; then 174 | echo "Failed to extract package metadata from filename ${PKG}." 175 | exit 1 176 | fi 177 | # st2api 178 | PKG_NAME=${METAPKG[0]} 179 | # 1.2dev 180 | PKG_VERSION=${METAPKG[1]} 181 | # 20 182 | PKG_RELEASE=${METAPKG[2]} 183 | # x86_64 184 | PKG_ARCH=${METAPKG[3]} 185 | # stable/unstable 186 | PKG_IS_UNSTABLE=$(grep -qv dev <<<${PKG_VERSION}; echo $?) 187 | } 188 | 189 | function publish() { 190 | debug "Publishing ${PKG_PATH}..." 191 | package_cloud push ${PACKAGECLOUD_ORGANIZATION}/${PACKAGECLOUD_REPO}/${PKG_OS_NAME}/${PKG_OS_VERSION} ${PKG_PATH} || exit 1 192 | } 193 | 194 | function prune_old_revision() { 195 | if [ "$PKG_RELEASE" -gt "$MAX_REVISIONS" ]; then 196 | RELEASE_TO_DELETE=$((PKG_RELEASE-MAX_REVISIONS)) 197 | PKG_TO_DELETE=${PKG/$PKG_VERSION-$PKG_RELEASE/$PKG_VERSION-$RELEASE_TO_DELETE} 198 | debug "Pruning obsolete revision ${PKG_VERSION}-${RELEASE_TO_DELETE} ..." 199 | package_cloud yank ${PACKAGECLOUD_ORGANIZATION}/${PACKAGECLOUD_REPO}/${PKG_OS_NAME}/${PKG_OS_VERSION} ${PKG_TO_DELETE} 200 | RET=$? 201 | if [[ $RET -eq 0 ]]; then 202 | debug "${PKG_VERSION}-${RELEASE_TO_DELETE} deleted" 203 | else 204 | debug "Unable to delete ${PKG_VERSION}-${RELEASE_TO_DELETE} (Error: ${RET})" 205 | fi 206 | fi 207 | } 208 | 209 | # Arguments: 210 | # $1 PKG_OS - distribution the package is built for 211 | # $2 PKG_VERSION - Target package version to find latest revision for (1.1, 1.2dev) 212 | # $3 PKG_NAME - Target package name to find latest revision for (st2, st2web) 213 | function latest_revision() { 214 | PKG_OS=$1 215 | PKG_VERSION=$2 216 | PKG_NAME=$3 217 | : ${PKG_OS:? OS (first arg) is required} 218 | : ${PKG_VERSION:? version (second arg) is required} 219 | : ${PKG_NAME:? name (third arg) is required} 220 | debug "Find latest revision using ${PKG_OS} ${PKG_VERSION} ${PKG_NAME}" 221 | PKG_IS_UNSTABLE=$(echo ${PKG_VERSION} | grep -qv 'dev'; echo $?) 222 | get_repo_name 223 | get_pkg_os "$PKG_OS" 224 | 225 | get_revision 226 | } 227 | 228 | # TODO: Check if CURL response code was successful 229 | function get_versions_url() { 230 | REPO_HOST="packagecloud.io" 231 | REPO_PATH="/api/v1/repos/${PACKAGECLOUD_ORGANIZATION}/${PACKAGECLOUD_REPO}/packages/${PKG_TYPE}/${PKG_OS_NAME}/${PKG_OS_VERSION}/${PKG_NAME}.json" 232 | REPOMETA_FILE="/tmp/${PKG_TYPE}_${PKG_OS_NAME}_${PKG_OS_VERSION}_${PKG_NAME}.json" 233 | 234 | curl -Ss -q "https://${PACKAGECLOUD_TOKEN}@${REPO_HOST}${REPO_PATH}" > "${REPOMETA_FILE}" 235 | jq -r .[0].versions_url "${REPOMETA_FILE}" 236 | } 237 | 238 | # TODO: Check if CURL response code was successful 239 | function get_revision() { 240 | VERSION_URL="$(get_versions_url)?per_page=1000" 241 | VERSIONS_FILE="/tmp/${PKG_TYPE}_${PKG_OS_NAME}_${PKG_OS_VERSION}_versions.json" 242 | if [[ "${VERSION_URL}" == /* ]]; then 243 | curl -Ss -q "https://${PACKAGECLOUD_TOKEN}@packagecloud.io${VERSION_URL}" > "${VERSIONS_FILE}" 244 | egrep -q xtrace <<<"$SHELLOPTS" && jq . "$VERSIONS_FILE" >&2 245 | # A regex is used to match .version to workaround packagecloud metadata differing between rpm & deb. 246 | jq -r "[.[] | select(.version | test(\"^${PKG_VERSION}(-[0-9]+)?$\")) | .release | tonumber] | max" "${VERSIONS_FILE}" 247 | fi 248 | } 249 | 250 | # Arguments: 251 | # $1 PKG_OS - OS codename 252 | function get_pkg_os() { 253 | case "$1" in 254 | buster|bullseye|bookworm) 255 | PKG_OS_NAME=debian 256 | PKG_OS_VERSION=$PKG_OS 257 | PKG_TYPE="deb" 258 | ;; 259 | bionic|focal|jammy|noble) 260 | PKG_OS_NAME=ubuntu 261 | PKG_OS_VERSION=$PKG_OS 262 | PKG_TYPE="deb" 263 | ;; 264 | el8|el9) 265 | PKG_OS_NAME=el 266 | PKG_OS_VERSION=${PKG_OS//[^0-9]/} 267 | PKG_TYPE="rpm" 268 | ;; 269 | *) 270 | echo "Unknown distribution '$1', aborting..." 271 | exit 1 272 | esac 273 | } 274 | 275 | main "$@" 276 | -------------------------------------------------------------------------------- /.circle/save_payload.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import argparse 4 | import glob 5 | import os 6 | import sys 7 | import json 8 | import re 9 | from itertools import islice 10 | 11 | DISTROS = (os.environ.get('DISTROS') or sys.exit('DISTROS env variable is required!')).split(' ') 12 | CIRCLE_NODE_TOTAL = int(os.environ.get('CIRCLE_NODE_TOTAL')) or sys.exit('CIRCLE_NODE_TOTAL env variable is required!') 13 | 14 | 15 | def env(var, default=''): 16 | """ 17 | Shortcut to get ENV variable value 18 | :param var: Input environment variable name 19 | :type var: ``str`` 20 | :return: 21 | :rtype: ``str`` 22 | """ 23 | return os.environ.get(var, default) 24 | 25 | 26 | class Payload(object): 27 | """ 28 | Representation of data to be send to ST2 via Web Hook 29 | """ 30 | data = { 31 | 'success': True, 32 | 'reason': [], 33 | 'circle': { 34 | 'doc': 'https://circleci.com/docs/environment-variables', 35 | 'CIRCLE_PROJECT_USERNAME': env('CIRCLE_PROJECT_USERNAME'), 36 | 'CIRCLE_PROJECT_REPONAME': env('CIRCLE_PROJECT_REPONAME'), 37 | 'CIRCLE_BRANCH': env('CIRCLE_BRANCH'), 38 | 'CIRCLE_SHA1': env('CIRCLE_SHA1'), 39 | 'CIRCLE_COMPARE_URL': env('CIRCLE_COMPARE_URL'), 40 | 'CIRCLE_BUILD_NUM': env('CIRCLE_BUILD_NUM'), 41 | 'CIRCLE_PREVIOUS_BUILD_NUM': env('CIRCLE_PREVIOUS_BUILD_NUM'), 42 | 'CI_PULL_REQUESTS': env('CI_PULL_REQUESTS'), 43 | 'CI_PULL_REQUEST': env('CI_PULL_REQUEST'), 44 | 'CIRCLE_USERNAME': env('CIRCLE_USERNAME'), 45 | 'CIRCLE_PR_USERNAME': env('CIRCLE_PR_USERNAME'), 46 | 'CIRCLE_PR_REPONAME': env('CIRCLE_PR_REPONAME'), 47 | 'CIRCLE_PR_NUMBER': env('CIRCLE_PR_NUMBER'), 48 | 'CIRCLE_NODE_TOTAL': env('CIRCLE_NODE_TOTAL'), 49 | }, 50 | 'build': { 51 | 'ST2_GITURL': env('ST2_GITURL'), 52 | 'ST2_GITREV': env('ST2_GITREV'), 53 | 'DEPLOY_PACKAGES': env('DEPLOY_PACKAGES'), 54 | 'DISTROS': env('DISTROS'), 55 | 'NOTESTS': env('NOTESTS'), 56 | }, 57 | 'packages': [], 58 | } 59 | 60 | 61 | class BasePackageParse(object): 62 | """ 63 | Base class for Package name parsers 64 | """ 65 | 66 | def __init__(self, package_file): 67 | self.package = os.path.basename(package_file) 68 | 69 | match = self.PATTERN.match(self.package) 70 | if not match: 71 | raise ValueError("'{0}' naming doesn't looks like package".format(self.package)) 72 | 73 | self.name = match.group('name') 74 | self.version = match.group('version') 75 | self.revision = match.group('revision') 76 | self.architecture = match.group('architecture') 77 | 78 | 79 | class DebParse(BasePackageParse): 80 | """ 81 | Parse metadata from .deb file name like: version number, revision number, architecture. 82 | Ex: st2api_1.2dev-20_amd64.deb 83 | """ 84 | PATTERN = re.compile('^(?P[^\/\n_]*)_(?P[^_-]*)-(?P[^_-]*)_(?P[^_]*)\.deb$') 85 | 86 | 87 | class RpmParse(BasePackageParse): 88 | """ 89 | Parse metadata from .deb file name like: version number, revision number, architecture. 90 | Ex: st2api-1.2dev-20.x86_64.rpm 91 | """ 92 | PATTERN = re.compile('^(?P[^\/]*)-(?P[^-]*)-(?P[^-]*)\.(?P[^-]*)\.rpm$') 93 | 94 | 95 | if __name__ == '__main__': 96 | parser = argparse.ArgumentParser(description="Send Web Hook with build results to StackStorm") 97 | parser.add_argument('dir', help='directory tree with created packages') 98 | args = parser.parse_args() 99 | 100 | if int(env('DEPLOY_PACKAGES', '1')): 101 | for distro in islice(DISTROS, CIRCLE_NODE_TOTAL): 102 | try: 103 | filename = (glob.glob(os.path.join(args.dir, distro, 'st2*.deb')) + glob.glob(os.path.join(args.dir, distro, 'st2*.rpm')))[0] 104 | if filename.endswith('.deb'): 105 | package = DebParse(filename) 106 | elif filename.endswith('.rpm'): 107 | package = RpmParse(filename) 108 | 109 | Payload.data['packages'].append({ 110 | 'distro': distro, 111 | 'version': package.version, 112 | 'revision': package.revision 113 | }) 114 | except IndexError: 115 | Payload.data['success'] = False 116 | Payload.data['reason'].append("CircleCI build produced no packages for '{0}'".format(distro)) 117 | 118 | payload_file = os.path.abspath(os.path.join(args.dir, 'payload.json')) 119 | with open(payload_file, 'w') as f: 120 | f.write(json.dumps(Payload.data)) 121 | 122 | print('Build metadata will be available via URL:') 123 | print('https://circle-artifacts.com/gh/{0}/{1}/{2}/artifacts/0{3}'.format( 124 | os.environ.get('CIRCLE_PR_USERNAME') or env('CIRCLE_PROJECT_USERNAME'), 125 | os.environ.get('CIRCLE_PR_REPONAME') or env('CIRCLE_PROJECT_REPONAME'), 126 | env('CIRCLE_BUILD_NUM')), 127 | payload_file 128 | ) 129 | print("") 130 | print(json.dumps(Payload.data)) 131 | -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | # Setup in CircleCI account the following ENV variables: 2 | # IS_PRODUCTION (default: 0) 3 | # IS_ENTERPRISE (default: 0) 4 | # PACKAGECLOUD_ORGANIZATION (default: stackstorm) 5 | # PACKAGECLOUD_TOKEN 6 | 7 | version: 2 8 | jobs: 9 | # Build & Test st2 packages 10 | packages: 11 | parallelism: 4 12 | # 4CPUs & 8GB RAM CircleCI machine 13 | # sadly, it doesn't work with 'setup_remote_docker' 14 | resource_class: large 15 | docker: 16 | # The primary container is an instance of the first list image listed. Your build commands run in this container. 17 | - image: circleci/python:3.8 18 | working_directory: ~/st2-packages 19 | environment: 20 | DISTROS: "focal jammy el8 el9" 21 | BASH_ENV: ~/.buildenv 22 | # These should be set to an empty string, so that st2cd prep tasks are able to replace these 23 | # with real gitrevs during releases. Note that they are commented out, so that they do not interfere 24 | # with build parameters. st2cd prep tasks will uncomment these on a branch, and replace with proper 25 | # gitrefs. 26 | # ST2_GITREV: "" 27 | steps: 28 | - checkout 29 | - run: 30 | name: Install latest Docker Compose V2 31 | command: | 32 | set -x 33 | export CODENAME=$(source /etc/os-release && echo "$VERSION_CODENAME") 34 | export DISTRO=$(source /etc/os-release && echo "$ID") 35 | export ARCH=$(dpkg --print-architecture) 36 | # get gpg key for download.docker 37 | curl -fsSL https://download.docker.com/linux/${DISTRO}/gpg | sudo gpg --dearmor -o /etc/apt/trusted.gpg.d/download.docker.gpg 38 | # set source list 39 | sudo tee <<<"deb [arch=${ARCH}] https://download.docker.com/linux/${DISTRO} ${CODENAME} stable" /etc/apt/sources.list.d/download.docker.list 40 | # update package list 41 | sudo apt update 42 | # install docker CLI and Docker Compose v2 43 | sudo apt install docker-ce-cli docker-compose-plugin 44 | - setup_remote_docker: 45 | reusable: true # default - false 46 | exclusive: true # default - true 47 | version: docker24 48 | - run: 49 | name: Ensure installation scripts are synced with their templates 50 | command: make .generated-files-check 51 | - run: 52 | name: Docker version 53 | command: | 54 | set -x 55 | docker --version 56 | docker compose version 57 | - run: 58 | name: Initialize packages Build Environment 59 | command: | 60 | set -x 61 | .circle/buildenv_st2.sh 62 | # Verify that Docker environment is properly cleaned up and there is nothing left from the previous build 63 | # See issue: https://discuss.circleci.com/t/no-space-left-on-device-while-creating-mongo/11532/13 64 | - run: 65 | name: Pre Docker cleanup 66 | command: | 67 | set -x 68 | # Clean-up running containers 69 | .circle/docker-compose2.sh clean 70 | # Remove st2-packages-vol container 71 | docker rm -v --force st2-packages-vol || true 72 | # Clean-up any created volumes 73 | docker volume prune --force 74 | - run: 75 | # Workaround for CircleCI docker-compose limtation where volumes don't work 76 | # See detailed explanation: https://circleci.com/docs/2.0/building-docker-images/#mounting-folders 77 | name: Copy st2-packages files to build containers 78 | command: | 79 | # creating dummy container which will hold a volume with data files 80 | docker create -v /root/st2-packages -v /root/build -v /var/log/st2 -v /root/.cache/pip -v /tmp/wheelhouse --name st2-packages-vol alpine:3.4 /bin/true 81 | # copy st2-packages data files into this volume 82 | docker cp ~/st2-packages st2-packages-vol:/root 83 | - run: 84 | name: Pull dependent Docker Images 85 | command: .circle/docker-compose2.sh pull ${DISTRO} || .circle/docker-compose2.sh pull ${DISTRO} 86 | - run: 87 | name: Build the ${DISTRO} Packages 88 | command: | 89 | # Create necessary directories 90 | mkdir -p ~/st2-packages/build/${DISTRO}/log/ 91 | 92 | # Run the build 93 | .circle/docker-compose2.sh build ${DISTRO} 94 | 95 | # Once build container finishes we can copy packages directly from it 96 | docker cp st2-packages-vol:/root/build/. ~/st2-packages/build/${DISTRO} 97 | - run: 98 | name: Test the Packages 99 | command: .circle/docker-compose2.sh test ${DISTRO} 100 | - run: 101 | when: always 102 | name: Grab the st2 logs 103 | command: docker cp st2-packages-vol:/var/log/st2 ~/st2-packages/build/${DISTRO}/log/st2 104 | - store_artifacts: 105 | path: ~/st2-packages/build 106 | destination: packages 107 | - persist_to_workspace: 108 | root: ~/st2-packages 109 | paths: 110 | - .circle/packagecloud.sh 111 | - build 112 | # Verify that Docker environment is properly cleaned up, and there is nothing left for the next build 113 | # See issue: https://discuss.circleci.com/t/no-space-left-on-device-while-creating-mongo/11532/13 114 | - run: 115 | name: Post Docker cleanup 116 | # don't cleanup resources on error since failed container might be used for SSH debug 117 | when: on_success 118 | command: | 119 | set -x 120 | # Clean-up running containers 121 | .circle/docker-compose2.sh clean 122 | # Remove st2-packages-vol container 123 | docker rm -v --force st2-packages-vol || true 124 | # Clean-up any created volumes 125 | docker volume prune --force 126 | 127 | # Deploy produced deb/rpm packages to PackageCloud staging 128 | deploy: 129 | docker: 130 | # The primary container is an instance of the first list image listed. Your build commands run in this container. 131 | - image: circleci/ruby:2.7 132 | working_directory: /tmp/deploy 133 | environment: 134 | - DISTROS: "focal jammy el8 el9" 135 | steps: 136 | - attach_workspace: 137 | at: . 138 | - run: 139 | name: List workspace files 140 | command: find . | sed 's|[^/]*/| |g' 141 | - run: 142 | name: Install dependencies 143 | command: | 144 | set -x 145 | sudo apt-get -y install parallel jq 146 | gem install package_cloud 147 | - run: 148 | name: Deploy deb/rpm packages to PackageCloud 149 | command: "parallel -v -j0 --line-buffer .circle/packagecloud.sh deploy {} build/{} ::: ${DISTROS}" 150 | 151 | # TODO: Return to workflows when "Auto-cancel redundant builds” feature is implemented for Workflows: https://discuss.circleci.com/t/auto-cancel-redundant-builds-not-working-for-workflow/13852 152 | # Putting everything together 153 | workflows: 154 | version: 2 155 | package-test-deploy: 156 | jobs: 157 | - packages 158 | - deploy: 159 | requires: 160 | - packages 161 | filters: 162 | branches: 163 | only: 164 | - master 165 | - /v[0-9]+\.[0-9]+/ 166 | - feature/circleci 167 | 168 | experimental: 169 | notify: 170 | branches: 171 | only: 172 | - master 173 | - /v[0-9]+\.[0-9]+/ 174 | 175 | notify: 176 | webhooks: 177 | - url: https://ci-webhooks.stackstorm.com/webhooks/build/events 178 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # https://stackstorm.com/2020/06/12/sponsoring-stackstorm/ 2 | # FAQ: https://stackstorm.com/donate/ 3 | # Expenses: https://github.com/StackStorm/discussions/issues/36 4 | community_bridge: stackstorm 5 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | If you find an issue in packages, please file an issue and we'll have a look as soon as we can. 2 | In order to expedite the process, it would be helpful to follow this checklist and provide 3 | relevant information. 4 | 5 | * [ ] Operating system: `uname -a`, `./etc/lsb_release` or `cat /etc/redhat-release` 6 | * [ ] StackStorm version: `st2 --version` 7 | * [ ] Actual package versions of all packages (st2, st2web, st2chatops, nginx, mongo, rabbitmq-server) 8 | DEB: apt-cache policy ${PACKAGE_NAME} will give you the version of package. 9 | RPM: yum info ${PACKAGE_NAME} will you give the version of package. 10 | Note the exact name of mongo, nginx, rabbitmq changes based on OS. 11 | * [ ] Contents of /etc/st2/st2.conf 12 | * [ ] Output of st2ctl status 13 | * [ ] Optional - Details about target box. E.g. vagrant box link or AWS AMI link. 14 | 15 | #Issue details 16 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[cod] 2 | *.sqlite 3 | *.swp 4 | *.log 5 | */requirements.txt 6 | !st2client/requirements.txt 7 | .stamp* 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Packages 13 | *.egg 14 | *.egg-info 15 | dist 16 | build 17 | !rake/build/ 18 | .venv 19 | eggs 20 | parts 21 | #bin 22 | var 23 | sdist 24 | develop-eggs 25 | .installed.cfg 26 | lib64 27 | virtualenv 28 | 29 | # Installer logs 30 | pip-log.txt 31 | 32 | # Unit test / coverage reports 33 | .coverage 34 | .coverage.integration.* 35 | .coverage.integration 36 | .coverage.unit 37 | cover 38 | coverage*.xml 39 | .tox 40 | nosetests.xml 41 | htmlcov 42 | 43 | # Mr Developer 44 | .idea 45 | .DS_Store 46 | ._* 47 | .vscode 48 | *.sublime-project 49 | *.sublime-workspace 50 | 51 | # Editor Saves 52 | *~ 53 | \#*\# 54 | 55 | # Cut out Gemfile*, since they are built into packagingrunner container. 56 | # Gemfile* will be copied on runtime. 57 | 58 | # Need Gemfile for EL8 59 | # Gemfile 60 | Gemfile.lock 61 | .vagrant 62 | # Custom pypi creds 63 | *pip.conf 64 | -------------------------------------------------------------------------------- /.rspec: -------------------------------------------------------------------------------- 1 | --color 2 | --format documentation 3 | --default-path rake/spec 4 | -------------------------------------------------------------------------------- /CHANGELOG.rst: -------------------------------------------------------------------------------- 1 | Changelog 2 | ========= 3 | 4 | in development 5 | -------------- 6 | 7 | Added 8 | ~~~~~ 9 | * Add Ubuntu Jammy packaging 10 | Contributed by @mamercad 11 | -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | source 'https://rubygems.org' 2 | 3 | gem 'serverspec' 4 | gem 'hashie' 5 | gem 'slop' 6 | gem 'sshkit', git: 'https://github.com/capistrano/sshkit.git', ref: 'acfa0e221f499da01b2cbba097acdb37a71555fa' 7 | gem 'bintray', git:'https://github.com/stefanozanella/bintray.git' 8 | gem 'net-ssh', '<5.0' 9 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: scriptsgen 2 | scriptsgen: 3 | @echo 4 | @echo "================== scripts gen ====================" 5 | @echo 6 | /usr/bin/env python3 tools/generate_final_installer_scripts.py 7 | 8 | .PHONY: .generated-files-check 9 | .generated-files-check: 10 | # Verify that all the files which are automatically generated have indeed been re-generated and 11 | # committed 12 | @echo "==================== generated-files-check ====================" 13 | 14 | # 1. Sample config - conf/st2.conf.sample 15 | cp scripts/st2bootstrap-deb.sh /tmp/st2bootstrap-deb.sh.upstream 16 | cp scripts/st2bootstrap-el8.sh /tmp/st2bootstrap-el8.sh.upstream 17 | cp scripts/st2bootstrap-el9.sh /tmp/st2bootstrap-el9.sh.upstream 18 | 19 | make scriptsgen 20 | 21 | diff scripts/st2bootstrap-deb.sh /tmp/st2bootstrap-deb.sh.upstream || (echo "scripts/st2bootstrap-deb.sh hasn't been re-generated and committed. Please run \"make scriptsgen\" and include and commit the generated file." && exit 1) 22 | diff scripts/st2bootstrap-el8.sh /tmp/st2bootstrap-el8.sh.upstream || (echo "scripts/st2bootstrap-el8.sh hasn't been re-generated and committed. Please run \"make scriptsgen\" and include and commit the generated file." && exit 1) 23 | diff scripts/st2bootstrap-el9.sh /tmp/st2bootstrap-el9.sh.upstream || (echo "scripts/st2bootstrap-el9.sh hasn't been re-generated and committed. Please run \"make scriptsgen\" and include and commit the generated file." && exit 1) 24 | 25 | @echo "All automatically generated files are up to date." 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Stackstorm packages build environment 2 | 3 | [![Circle CI Build Status](https://circleci.com/gh/StackStorm/st2-packages/tree/master.svg?style=shield)](https://circleci.com/gh/StackStorm/st2-packages) 4 | [![Go to Docker Hub](https://img.shields.io/badge/Docker%20Hub-%E2%86%92-blue.svg)](https://hub.docker.com/r/stackstorm/) 5 | [![Download deb/rpm](https://img.shields.io/badge/Download-deb/rpm-blue.svg)](https://packagecloud.io/StackStorm/) 6 | 7 | ## Highlights 8 | 9 | - **Docker based**. Leveraging docker it's possible to deliver packages for any OS distro in a fast and reliable way. *Use the latest Docker version with a Docker Compose plugin that supports V2 syntax.* 10 | - [Rake](https://github.com/ruby/rake) + [sshkit](https://github.com/capistrano/sshkit)-based execution enables easy configuration via **simple DSL** and brings **parallel task processing** out of the box. 11 | - **Test-driven workflow**. Artifacts built are not only available for any enabled OS distro but at the same time tested on a bunch of platforms, providing feedback such as can be installed, services can start up, operations can be executed etc. 12 | 13 | ## Overview 14 | 15 | Packages build environment is a _multi-container docker_ application defined and managed with [docker-compose](https://github.com/docker/compose). It consists of four types of containers: 16 | 17 | - **Packaging runner** (https://quay.io/stackstorm/packagingrunner) - the main entry point, package build and test processing controller container. 18 | - **Packaging build** (https://hub.docker.com/r/stackstorm/packagingbuild/) - container where actual `.deb`/`.rpm` artifacts build takes place. It's used to bring up the build environment specific for OS distro. This means that different containers are available such as _packagingbuild:rocky8_, _packagingbuild:focal_, _packagingbuild:jammy_, correspondingly for RockyLinux 8, Ubuntu Focal, and Ubuntu Jammy. 19 | - **Packaging test** (https://hub.docker.com/r/stackstorm/packagingtest/) - containers where built artifacts are tested, i.e. _artifacts are installed, configuration is written and tests are performed_. 20 | - **Services** - these are different containers required for testing such as _rabbitmq and mongodb_ 21 | 22 | `Dockerfiles` sources are available at [StackStorm/st2-dockerfiles](https://github.com/stackstorm/st2-dockerfiles). 23 | 24 | The Packages build environment compose application brings a self-sufficient pipeline to deliver ready to use packages. 25 | 26 | # Usage 27 | 28 | It's very simple to invoke the whole build-test pipeline. First just make sure that [docker-compose.yml](docker-compose.yml) has your distro specification, after that issue the following commands: 29 | 30 | ```shell 31 | # (Optional) First clean out previous build containers 32 | docker compose kill 33 | docker compose rm -f 34 | 35 | # To build packages for ubuntu focal (--rm will wipe packaging runner container. All others will remain active). 36 | docker compose run --rm focal 37 | ``` 38 | 39 | Execution takes a while, so grab a cup of tea or coffee and wait until it finishes. When build and test processes succeed, you'll find the StackStorm packages in `/tmp/st2-packages` on your host machine: 40 | 41 | ```shell 42 | ls -l1 | grep ".deb$" 43 | -rw-r--r-- 1 root root 30872652 Feb 9 18:32 st2_1.4dev-1_amd64.deb 44 | ``` 45 | 46 | ## Manual testing inside the docker environment 47 | 48 | After the build and test stages are finished all docker containers remain active, so you are welcome to do more in-depth testing if desired. To do so simply run: 49 | 50 | ```shell 51 | docker ps 52 | # Find the required testing container 53 | # In our case it will be st2packages_jammytest_1 54 | 55 | # Simply exec to docker 56 | docker exec -it st2packages_jammytest_1 bash 57 | ``` 58 | 59 | Once done, you are inside the testing environment where all services are up and running. Don't forget to do (after exec): 60 | 61 | ```shell 62 | export TERM=xterm 63 | ``` 64 | 65 | At this point you can do any manual testing which is required. 66 | 67 | # Vagrant based build and test 68 | 69 | In order to build, package, install and test ST2 in an isolated Vagrant VM, run the following: 70 | 71 | ```shell 72 | vagrant up $TARGET 73 | ``` 74 | 75 | Where `$TARGET` is one of `focal`, `jammy`, `el8` or `el9`. If you are using `el8`, comment 76 | out the `vm_config.vm.provision :docker` line in the Vagrantfile. There is logic in `setup-vagrant.sh` 77 | to install docker in `el8`. 78 | 79 | The following steps are run while provisioning the Vagrant VM: 80 | 81 | 1. Install `docker` that includes `docker compose` V2. 82 | 2. Run `docker compose run --rm $TARGET` to build, test and package ST2 as described in prior 83 | sections. 84 | 3. Install the packages built in step 2, unless the host `$ST2_INSTALL` environment variable is set to 85 | a value other than `yes`. 86 | 4. Execute the `st2-self-check` script, unless the host `$ST2_VERIFY` environment variable is set to 87 | a value other than `yes`. 88 | 89 | As currently implemented, it is not possible to bypass steps 1 and 2. In the future, we may want to 90 | consider allowing the host to provide existing ST2 packages, and install/self-check those in the 91 | Vagrant VM. 92 | 93 | To specify the ST2 source URL and REV (i.e., branch), use `ST2_GITURL` and `ST2_GITREV` environment 94 | variables on the host prior to provisioning the VM. 95 | 96 | Prior to running `st2-self-check`, the required auth token is generated using `st2 auth`. If necessary, 97 | you can change the default username and password passed to `st2 auth`. To do this, set the `ST2USER` 98 | and `ST2PASSWORD` environment variables on the host prior to provisioning the VM. The default values 99 | are `st2admin` and `Ch@ngeMe` respectively. 100 | 101 | # Installation 102 | 103 | Current community packages are hosted on https://packagecloud.io/StackStorm. For detailed instructions how install st2 and perform basic configuration follow these instructions: 104 | 105 | - [Ubuntu/Debian](https://docs.stackstorm.com/install/deb.html) 106 | - [RHEL8/RockyLinux8](https://docs.stackstorm.com/install/rhel8.html) 107 | 108 | ## Adding Support For a New Distribution 109 | 110 | If you are adding support for a new distribution for which `packagingbuild` and `packagingtest` 111 | images are not yet published to Docker Hub and you want to test the build pipeline locally, you 112 | need to update `docker-compose.yml` file to use locally built Docker images. 113 | 114 | For example: 115 | 116 | ```yaml 117 | services: 118 | ... 119 | jammy: 120 | ... 121 | image: quay.io/stackstorm/packagingrunner 122 | ... 123 | ... 124 | jammybuild: 125 | ... 126 | image: jammybuild 127 | ... 128 | ... 129 | jammytest: 130 | ... 131 | image: jammytest 132 | ... 133 | ``` 134 | 135 | NOTE: Main ``distro`` definition (e.g. ``focal``, ``el8`` etc.) needs to use packaging runner image. 136 | 137 | As you can see, `image` attribute references local image tagged `jammybuild` instead of a 138 | remote image (e.g. `stackstorm/packagingbuild:jammy` or similar). 139 | 140 | Before that will work, you of course also need to build those images locally. 141 | 142 | For example: 143 | 144 | ```bash 145 | cd ~/st2packaging-dockerfiles/packagingbuild/jammy 146 | docker build -t jammybuild . 147 | 148 | cd ~/st2packaging-dockerfiles/packagingtest/jammy/systemd 149 | docker build -t jammytest . 150 | ``` 151 | 152 | # License and Authors 153 | 154 | - Author:: StackStorm (st2-dev) () 155 | - Author:: Denis Baryshev () 156 | -------------------------------------------------------------------------------- /Rakefile: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | # 3 | require 'rspec/core/rake_task' 4 | require './rake/pipeline' 5 | require './rake/build/environment' 6 | 7 | # Import build tasks 8 | build_files = Dir.glob('rake/build/*.rake') 9 | build_files.each { |file| import file } 10 | 11 | task :default => ['build:all', 'setup:all'] 12 | 13 | # Hopefully it will speed up make calls from pip 14 | desc 'Store build node nproc value' 15 | task :nproc do 16 | pipeline do 17 | run hostname: opts[:buildnode] do 18 | capture(:nproc).strip rescue nil 19 | end 20 | end.tap do |nproc| 21 | pipeopts { build_nproc nproc } 22 | end 23 | end 24 | 25 | namespace :build do 26 | ## Default build task, triggers the whole build task pipeline. 27 | # 28 | task :all => [:nproc, 'upload:to_buildnode', 'upload:checkout', 'build:packages'] do 29 | pipeline do 30 | run(:local) {|o| execute :ls, "-l #{o[:artifact_dir]}", verbosity: :debug} 31 | end 32 | end 33 | 34 | ## Packages task and build multitask (which invokes builds concurrently) 35 | # 36 | task :packages => [:prebuild, :build] 37 | multitask :build => pipeopts.packages.map {|p| "package:#{p}"} 38 | 39 | ## Prebuild task invokes all packages prebuild tasks. 40 | # These task are executed sequentially (we require this not to mess up pip)! 41 | task :prebuild do 42 | pipeopts.packages.each do |p| 43 | task = "package:prebuild_#{p}" 44 | if Rake::Task.task_defined?(task) 45 | Rake::Task[task].invoke 46 | end 47 | end 48 | end 49 | end 50 | 51 | # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 52 | # SPECS SHOULD BE REWRITEN COMPLETLY THEY ARE SO BAD AND UGLY. 53 | # But the are left for now since the do its work and we need 54 | # to ship packages faster. 55 | # 56 | namespace :spec do 57 | targets = [] 58 | 59 | Dir.glob('./rake/spec/*').each do |dir| 60 | next unless File.directory?(dir) 61 | targets << File.basename(dir) 62 | end 63 | 64 | task :all => targets 65 | 66 | targets.each do |target| 67 | RSpec::Core::RakeTask.new(target.to_sym) do |t| 68 | t.pattern = "./rake/spec/#{target}/*_spec.rb" 69 | end 70 | end 71 | end 72 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # Vagrantfile API/syntax version. Don't touch unless you know what you're doing! 5 | VAGRANTFILE_API_VERSION = "2" 6 | 7 | VIRTUAL_MACHINES = { 8 | :jammy => { 9 | :hostname => 'st2-packages-jammy', 10 | :box => 'ubuntu/jammy64', 11 | :ip => '192.168.16.27', 12 | }, 13 | :focal => { 14 | :hostname => 'st2-packages-focal', 15 | :box => 'ubuntu/focal64', 16 | :ip => '192.168.16.25', 17 | }, 18 | :el8 => { 19 | :hostname => 'st2-packages-el8', 20 | :box => 'rockylinux/8', 21 | :ip => '192.168.16.24', 22 | }, 23 | :el9 => { 24 | :hostname => 'st2-packages-el9', 25 | :box => 'rockylinux/9', 26 | :ip => '192.168.16.26', 27 | }, 28 | } 29 | 30 | Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| 31 | VIRTUAL_MACHINES.each do |name, cfg| 32 | config.vm.define name do |vm_config| 33 | vm_config.vm.hostname = cfg[:hostname] 34 | vm_config.vm.box = cfg[:box] 35 | 36 | # Give VM access to all CPU cores on the host 37 | # docker-compose & rake build can benefit from more CPUs 38 | host_os = RbConfig::CONFIG['host_os'] 39 | if host_os =~ /darwin/ 40 | cpus = `sysctl -n hw.ncpu`.to_i 41 | elsif host_os =~ /linux/ 42 | cpus = `nproc`.to_i 43 | else # sorry Windows folks, I can't help you 44 | cpus = 2 45 | end 46 | 47 | # Box Specifications 48 | vm_config.vm.provider :virtualbox do |vb| 49 | vb.name = "#{cfg[:hostname]}" 50 | # NOTE: With 2048MB, system slows due to kswapd. Recommend at least 4096MB. 51 | vb.customize ['modifyvm', :id, '--memory', '4096'] 52 | vb.customize ["modifyvm", :id, "--cpus", cpus] 53 | 54 | # Docker connection fix 55 | vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] 56 | vb.customize ["modifyvm", :id, "--natdnsproxy1", "on"] 57 | vb.customize ["modifyvm", :id, "--nictype1", "virtio"] 58 | end 59 | 60 | # Sync folder using NFS 61 | # vm_config.vm.synced_folder '.', '/vagrant', nfs: true 62 | 63 | # Configure a private network 64 | vm_config.vm.network "private_network", ip: "#{cfg[:ip]}" 65 | 66 | # Public (bridged) network may come handy for external access to VM (e.g. sensor development) 67 | # See https://www.vagrantup.com/docs/networking/public_network.html 68 | # st2.vm.network "public_network", bridge: 'en0: Wi-Fi (AirPort)' 69 | 70 | # Install docker-engine 71 | vm_config.vm.provision :docker 72 | 73 | vm_config.vm.provision 'shell', path: 'scripts/setup-vagrant.sh', privileged: false, env: { 74 | "ST2_TARGET" => "#{name}", 75 | "ST2_USER" => ENV['ST2USER'] ? ENV['ST2USER'] : 'st2admin', 76 | "ST2_PASSWORD" => ENV['ST2PASSWORD'] ? ENV['ST2PASSWORD'] : 'st2admin', 77 | "ST2_PACKAGES" => ENV['ST2_PACKAGES'] ? ENV['ST2_PACKAGES'] : 'st2', 78 | "ST2_INSTALL" => ENV['ST2_INSTALL'] ? ENV['ST2_INSTALL'] : 'yes', 79 | "ST2_VERIFY" => ENV['ST2_VERIFY'] ? ENV['ST2_VERIFY'] : 'yes', 80 | "ST2_GITURL" => ENV['ST2_GITURL'], 81 | "ST2_GITREV" => ENV['ST2_GITREV'], 82 | } 83 | end 84 | end 85 | end 86 | -------------------------------------------------------------------------------- /docker-compose.circle.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | services: 3 | focal: 4 | image: quay.io/stackstorm/packagingrunner 5 | extends: 6 | file: docker-compose.override.yml 7 | service: suite-compose 8 | environment: 9 | - BUILDNODE=focalbuild 10 | - TESTNODE=focaltest 11 | - ST2_PACKAGES=st2 12 | links: 13 | - focalbuild 14 | - focaltest 15 | - rabbitmq 16 | - mongodb 17 | - redis 18 | 19 | jammy: 20 | image: quay.io/stackstorm/packagingrunner 21 | extends: 22 | file: docker-compose.override.yml 23 | service: suite-compose 24 | environment: 25 | - BUILDNODE=jammybuild 26 | - TESTNODE=jammytest 27 | - ST2_PACKAGES=st2 28 | links: 29 | - jammybuild 30 | - jammytest 31 | - rabbitmq 32 | - mongodb 33 | - redis 34 | 35 | el8: 36 | image: quay.io/stackstorm/packagingrunner 37 | extends: 38 | file: docker-compose.override.yml 39 | service: suite-compose 40 | environment: 41 | - BUILDNODE=rockylinux8build 42 | - TESTNODE=rockylinux8test 43 | - ST2_PACKAGES=st2 44 | links: 45 | - rockylinux8build 46 | - rockylinux8test 47 | - rabbitmq 48 | - mongodb 49 | - redis 50 | 51 | el9: 52 | image: quay.io/stackstorm/packagingrunner 53 | extends: 54 | file: docker-compose.override.yml 55 | service: suite-compose 56 | environment: 57 | - BUILDNODE=rockylinux9build 58 | - TESTNODE=rockylinux9test 59 | - ST2_PACKAGES=st2 60 | links: 61 | - rockylinux9build 62 | - rockylinux9test 63 | - rabbitmq 64 | - mongodb 65 | - redis 66 | 67 | ## Package build nodes 68 | # 69 | focalbuild: 70 | image: stackstorm/packagingbuild:focal 71 | volumes_from: 72 | - st2-packages-vol 73 | 74 | jammybuild: 75 | image: stackstorm/packagingbuild:jammy 76 | volumes_from: 77 | - st2-packages-vol 78 | 79 | rockylinux8build: 80 | image: stackstorm/packagingbuild:rockylinux8 81 | extends: 82 | file: docker-compose.override.yml 83 | service: volumes-compose 84 | 85 | rockylinux9build: 86 | image: stackstorm/packagingbuild:rockylinux9 87 | extends: 88 | file: docker-compose.override.yml 89 | service: volumes-compose 90 | 91 | ## Package testing nodes 92 | # 93 | focaltest: 94 | image: stackstorm/packagingtest:focal-systemd 95 | privileged: true 96 | volumes_from: 97 | - st2-packages-vol 98 | volumes: 99 | - /sys/fs/cgroup:/sys/fs/cgroup:ro 100 | 101 | jammytest: 102 | image: stackstorm/packagingtest:jammy-systemd 103 | privileged: true 104 | volumes_from: 105 | - st2-packages-vol 106 | volumes: 107 | - /sys/fs/cgroup:/sys/fs/cgroup:ro 108 | 109 | rockylinux8test: 110 | image: stackstorm/packagingtest:rockylinux8-systemd 111 | privileged: true 112 | cap_add: 113 | - SYS_ADMIN 114 | security_opt: 115 | - seccomp:unconfined 116 | volumes: 117 | - /sys/fs/cgroup:/sys/fs/cgroup 118 | 119 | rockylinux9test: 120 | image: stackstorm/packagingtest:rockylinux9-systemd 121 | privileged: true 122 | cap_add: 123 | - SYS_ADMIN 124 | security_opt: 125 | - seccomp:unconfined 126 | volumes: 127 | - /sys/fs/cgroup:/sys/fs/cgroup 128 | 129 | rabbitmq: 130 | image: rabbitmq:3.12-management 131 | privileged: true 132 | hostname: rabbit 133 | ports: 134 | - "15672:15672" # managment plugin port 135 | - "5672:5672" 136 | 137 | mongodb: 138 | image: mongo:4.4 139 | ports: 140 | - "27017:27017" 141 | 142 | redis: 143 | image: redis:7.2.4 144 | ports: 145 | - "6379:6379" 146 | -------------------------------------------------------------------------------- /docker-compose.circle2.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | 3 | services: 4 | focal: 5 | image: quay.io/stackstorm/packagingrunner 6 | working_dir: /root/st2-packages 7 | volumes_from: 8 | - container:st2-packages-vol 9 | environment: 10 | - BUILDNODE=focalbuild 11 | - TESTNODE=focaltest 12 | - ST2_PACKAGES=st2 13 | links: 14 | - focalbuild 15 | - focaltest 16 | - rabbitmq 17 | - mongodb 18 | - redis 19 | 20 | jammy: 21 | image: quay.io/stackstorm/packagingrunner 22 | working_dir: /root/st2-packages 23 | volumes_from: 24 | - container:st2-packages-vol 25 | environment: 26 | - BUILDNODE=jammybuild 27 | - TESTNODE=jammytest 28 | - ST2_PACKAGES=st2 29 | links: 30 | - jammybuild 31 | - jammytest 32 | - rabbitmq 33 | - mongodb 34 | - redis 35 | 36 | el8: 37 | image: quay.io/stackstorm/packagingrunner 38 | working_dir: /root/st2-packages 39 | volumes_from: 40 | - container:st2-packages-vol 41 | environment: 42 | - BUILDNODE=rockylinux8build 43 | - TESTNODE=rockylinux8test 44 | links: 45 | - rockylinux8build 46 | - rockylinux8test 47 | - rabbitmq 48 | - mongodb 49 | - redis 50 | 51 | el9: 52 | image: quay.io/stackstorm/packagingrunner 53 | working_dir: /root/st2-packages 54 | volumes_from: 55 | - container:st2-packages-vol 56 | environment: 57 | - BUILDNODE=rockylinux9build 58 | - TESTNODE=rockylinux9test 59 | links: 60 | - rockylinux9build 61 | - rockylinux9test 62 | - rabbitmq 63 | - mongodb 64 | - redis 65 | 66 | ## Package build nodes 67 | # 68 | focalbuild: 69 | image: stackstorm/packagingbuild:focal 70 | volumes_from: 71 | - container:st2-packages-vol 72 | 73 | jammybuild: 74 | image: stackstorm/packagingbuild:jammy 75 | volumes_from: 76 | - container:st2-packages-vol 77 | 78 | rockylinux8build: 79 | image: stackstorm/packagingbuild:rockylinux8 80 | volumes_from: 81 | - container:st2-packages-vol 82 | 83 | rockylinux9build: 84 | image: stackstorm/packagingbuild:rockylinux9 85 | volumes_from: 86 | - container:st2-packages-vol 87 | 88 | ## Package testing nodes 89 | # 90 | focaltest: 91 | image: stackstorm/packagingtest:focal-systemd 92 | privileged: true 93 | cgroup: host 94 | volumes_from: 95 | - container:st2-packages-vol 96 | volumes: 97 | - /sys/fs/cgroup:/sys/fs/cgroup 98 | 99 | jammytest: 100 | image: stackstorm/packagingtest:jammy-systemd 101 | privileged: true 102 | cgroup: host 103 | volumes_from: 104 | - container:st2-packages-vol 105 | volumes: 106 | - /sys/fs/cgroup:/sys/fs/cgroup 107 | 108 | rockylinux8test: 109 | image: stackstorm/packagingtest:rockylinux8-systemd 110 | privileged: true 111 | cgroup: host 112 | cap_add: 113 | - SYS_ADMIN 114 | security_opt: 115 | - seccomp:unconfined 116 | volumes_from: 117 | - container:st2-packages-vol 118 | volumes: 119 | - /sys/fs/cgroup:/sys/fs/cgroup 120 | 121 | rockylinux9test: 122 | image: stackstorm/packagingtest:rockylinux9-systemd 123 | privileged: true 124 | cgroup: host 125 | cap_add: 126 | - SYS_ADMIN 127 | security_opt: 128 | - seccomp:unconfined 129 | volumes_from: 130 | - container:st2-packages-vol 131 | volumes: 132 | - /sys/fs/cgroup:/sys/fs/cgroup 133 | 134 | rabbitmq: 135 | image: rabbitmq:3.12-management 136 | privileged: true 137 | hostname: rabbit 138 | ports: 139 | - "15672:15672" # managment plugin port 140 | - "5672:5672" 141 | 142 | mongodb: 143 | image: mongo:4.4 144 | ports: 145 | - "27017:27017" 146 | 147 | redis: 148 | image: redis:7.2.4 149 | ports: 150 | - "6379:6379" 151 | -------------------------------------------------------------------------------- /docker-compose.override.yml: -------------------------------------------------------------------------------- 1 | ## Suites configuration 2 | # 3 | version: "3.8" 4 | 5 | services: 6 | suite: 7 | image: fake 8 | working_dir: /root/st2-packages 9 | environment: 10 | - DEBUG_LEVEL=0 11 | volumes: 12 | - .:/root/st2-packages 13 | - /tmp/st2-packages:/root/build 14 | 15 | suite-compose: 16 | image: fake 17 | extends: 18 | file: docker-compose.override.yml 19 | service: suite 20 | 21 | suite-circle: 22 | image: fake 23 | command: build 24 | extends: 25 | file: docker-compose.override.yml 26 | service: suite 27 | 28 | ## Volumes passed during compose or ci builds 29 | # 30 | volumes-compose: 31 | image: fake 32 | volumes: 33 | - /tmp/st2-packages:/root/build 34 | 35 | volumes-circle: 36 | image: fake 37 | volumes: 38 | - /tmp/st2-packages:/root/build 39 | - /tmp/st2-packages/log/st2:/var/log/st2 40 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | 3 | services: 4 | focal: 5 | image: quay.io/stackstorm/packagingrunner 6 | extends: 7 | file: docker-compose.override.yml 8 | service: suite-compose 9 | environment: 10 | - BUILDNODE=focalbuild 11 | - TESTNODE=focaltest 12 | - ST2_PACKAGES=st2 13 | links: 14 | - focalbuild 15 | - focaltest 16 | - rabbitmq 17 | - mongodb 18 | - redis 19 | 20 | jammy: 21 | image: quay.io/stackstorm/packagingrunner 22 | extends: 23 | file: docker-compose.override.yml 24 | service: suite-compose 25 | environment: 26 | - BUILDNODE=jammybuild 27 | - TESTNODE=jammytest 28 | - ST2_PACKAGES=st2 29 | links: 30 | - jammybuild 31 | - jammytest 32 | - rabbitmq 33 | - mongodb 34 | - redis 35 | 36 | el8: 37 | image: quay.io/stackstorm/packagingrunner 38 | extends: 39 | file: docker-compose.override.yml 40 | service: suite-compose 41 | environment: 42 | - BUILDNODE=rockylinux8build 43 | - TESTNODE=rockylinux8test 44 | - ST2_PACKAGES=st2 45 | links: 46 | - rockylinux8build 47 | - rockylinux8test 48 | - rabbitmq 49 | - mongodb 50 | - redis 51 | 52 | el9: 53 | image: quay.io/stackstorm/packagingrunner 54 | extends: 55 | file: docker-compose.override.yml 56 | service: suite-compose 57 | environment: 58 | - BUILDNODE=rockylinux9build 59 | - TESTNODE=rockylinux9test 60 | - ST2_PACKAGES=st2 61 | links: 62 | - rockylinux9build 63 | - rockylinux9test 64 | - rabbitmq 65 | - mongodb 66 | - redis 67 | 68 | ## Package build nodes 69 | # 70 | focalbuild: 71 | image: stackstorm/packagingbuild:focal 72 | extends: 73 | file: docker-compose.override.yml 74 | service: volumes-compose 75 | 76 | jammybuild: 77 | image: stackstorm/packagingbuild:jammy 78 | extends: 79 | file: docker-compose.override.yml 80 | service: volumes-compose 81 | 82 | rockylinux8build: 83 | image: stackstorm/packagingbuild:rockylinux8 84 | extends: 85 | file: docker-compose.override.yml 86 | service: volumes-compose 87 | 88 | rockylinux9build: 89 | image: stackstorm/packagingbuild:rockylinux9 90 | extends: 91 | file: docker-compose.override.yml 92 | service: volumes-compose 93 | 94 | ## Package testing nodes 95 | # 96 | focaltest: 97 | image: stackstorm/packagingtest:focal-systemd 98 | privileged: true 99 | cgroup: host 100 | extends: 101 | file: docker-compose.override.yml 102 | service: volumes-compose 103 | volumes: 104 | - /sys/fs/cgroup:/sys/fs/cgroup 105 | 106 | jammytest: 107 | image: stackstorm/packagingtest:jammy-systemd 108 | privileged: true 109 | cgroup: host 110 | extends: 111 | file: docker-compose.override.yml 112 | service: volumes-compose 113 | volumes: 114 | - /sys/fs/cgroup:/sys/fs/cgroup 115 | 116 | rockylinux8test: 117 | image: stackstorm/packagingtest:rockylinux8-systemd 118 | privileged: true 119 | cgroup: host 120 | cap_add: 121 | - SYS_ADMIN 122 | security_opt: 123 | - seccomp:unconfined 124 | extends: 125 | file: docker-compose.override.yml 126 | service: volumes-compose 127 | volumes: 128 | - /sys/fs/cgroup:/sys/fs/cgroup 129 | 130 | rockylinux9test: 131 | image: stackstorm/packagingtest:rockylinux9-systemd 132 | privileged: true 133 | cgroup: host 134 | cap_add: 135 | - SYS_ADMIN 136 | security_opt: 137 | - seccomp:unconfined 138 | extends: 139 | file: docker-compose.override.yml 140 | service: volumes-compose 141 | volumes: 142 | - /sys/fs/cgroup:/sys/fs/cgroup 143 | 144 | rabbitmq: 145 | image: rabbitmq:3.12-management 146 | privileged: true 147 | hostname: rabbit 148 | ports: 149 | - "15672:15672" # managment plugin port 150 | - "5672:5672" 151 | 152 | mongodb: 153 | image: mongo:4.4 154 | ports: 155 | - "27017:27017" 156 | 157 | redis: 158 | image: redis:7.2.4 159 | ports: 160 | - "6379:6379" 161 | -------------------------------------------------------------------------------- /packages/st2/Makefile: -------------------------------------------------------------------------------- 1 | WHEELDIR ?= /tmp/wheelhouse 2 | ST2_COMPONENT := $(notdir $(CURDIR)) 3 | ST2PKG_RELEASE ?= 1 4 | CHANGELOG_COMMENT ?= "automated build, version: $(ST2PKG_VERSION)" 5 | 6 | ifneq (,$(wildcard /etc/debian_version)) 7 | DEBIAN := 1 8 | #DEB_EPOCH := $(shell echo $(ST2PKG_VERSION) | grep -q dev || echo '1') 9 | DEB_DISTRO := $(shell lsb_release -cs) 10 | else ifneq (,$(wildcard /etc/rocky-release)) 11 | EL_DISTRO:=rocky 12 | EL_VERSION := $(shell cat /etc/rocky-release | grep -oP '(?<= )[0-9]+(?=\.)') 13 | REDHAT := 1 14 | else ifneq (,$(wildcard /etc/redhat-release)) 15 | EL_DISTRO:=redhat 16 | EL_VERSION := $(shell cat /etc/redhat-release | grep -oP '(?<= )[0-9]+(?=\.)') 17 | REDHAT := 1 18 | else 19 | REDHAT := 1 20 | DEB_DISTRO := unstable 21 | endif 22 | 23 | ifeq ($(DEB_DISTRO),focal) 24 | PYTHON_BINARY := /usr/bin/python3 25 | PIP_BINARY := /usr/local/bin/pip3.8 26 | else ifeq ($(EL_VERSION),8) 27 | PYTHON_BINARY := /usr/bin/python3.8 28 | PIP_BINARY := /usr/local/bin/pip3.8 29 | PYTHON_ALT_BINARY := python3.8 30 | else 31 | PYTHON_BINARY := /usr/bin/python3 32 | PIP_BINARY := pip3 33 | PYTHON_ALT_BINARY := python3 34 | endif 35 | 36 | RUNNERS := $(shell ls ../contrib/runners) 37 | 38 | # Moved from top of file to handle when only py2 or py3 available 39 | ST2PKG_VERSION ?= $(shell $(PYTHON_BINARY) -c "from $(ST2_COMPONENT) import __version__; print(__version__),") 40 | 41 | # Note: We dynamically obtain the version, this is required because dev 42 | # build versions don't store correct version identifier in __init__.py 43 | # and we need setup.py to normalize it (e.g. 1.4dev -> 1.4.dev0) 44 | ST2PKG_NORMALIZED_VERSION ?= $(shell $(PYTHON_BINARY) setup.py --version || echo "failed_to_retrieve_version") 45 | 46 | # Makefile function to retry the failed command 'N' times 47 | # Make sure you use it for nothing but networking stuff (think about race conditons) 48 | # Example: $(call retry,3,some_script.sh) 49 | retry = $(2) $(foreach t,$(shell seq 1 ${1}),|| (echo -e "\033[33m Failed ($$?): '$(2)'\n Retrying $t ... \033[0m"; $(2))) 50 | 51 | .PHONY: all install wheelhouse 52 | all: info install 53 | 54 | .PHONY: info 55 | info: 56 | @echo "DEBIAN=$(DEBIAN)" 57 | @echo "REDHAT=$(REDHAT)" 58 | @echo "EL_DISTRO=$(EL_DISTRO)" 59 | @echo "EL_VERSION=$(EL_VERSION)" 60 | @echo "DEB_DISTRO=$(DEB_DISTRO)" 61 | @echo "PYTHON_BINARY=$(PYTHON_BINARY)" 62 | @echo "PIP_BINARY=$(PIP_BINARY)" 63 | 64 | install: wheelhouse changelog 65 | 66 | post_install: 67 | # post_install is triggered from debian/rules file. 68 | # Don't call it from makefile install target! 69 | install -m640 /dev/null $(DESTDIR)/etc/st2/htpasswd 70 | install -m644 ../conf/st2.package.conf $(DESTDIR)/etc/st2/st2.conf 71 | install -m644 ../conf/logrotate.conf $(DESTDIR)/etc/logrotate.d/st2 72 | install -m644 ../st2actions/conf/logging.conf $(DESTDIR)/etc/st2/logging.actionrunner.conf 73 | install -m644 ../st2actions/conf/syslog.conf $(DESTDIR)/etc/st2/syslog.actionrunner.conf 74 | install -m644 ../st2actions/conf/logging.notifier.conf $(DESTDIR)/etc/st2/logging.notifier.conf 75 | install -m644 ../st2actions/conf/syslog.notifier.conf $(DESTDIR)/etc/st2/syslog.notifier.conf 76 | install -m644 ../st2actions/conf/logging.workflowengine.conf $(DESTDIR)/etc/st2/logging.workflowengine.conf 77 | install -m644 ../st2actions/conf/syslog.workflowengine.conf $(DESTDIR)/etc/st2/syslog.workflowengine.conf 78 | install -m644 ../st2api/conf/logging.conf $(DESTDIR)/etc/st2/logging.api.conf 79 | install -m644 ../st2api/conf/logging.gunicorn.conf $(DESTDIR)/etc/st2/logging.api.gunicorn.conf 80 | install -m644 ../st2api/conf/syslog.conf $(DESTDIR)/etc/st2/syslog.api.conf 81 | install -m644 ../st2stream/conf/logging.conf $(DESTDIR)/etc/st2/logging.stream.conf 82 | install -m644 ../st2stream/conf/logging.gunicorn.conf $(DESTDIR)/etc/st2/logging.stream.gunicorn.conf 83 | install -m644 ../st2stream/conf/syslog.conf $(DESTDIR)/etc/st2/syslog.stream.conf 84 | install -m644 ../st2auth/conf/logging.conf $(DESTDIR)/etc/st2/logging.auth.conf 85 | install -m644 ../st2auth/conf/logging.gunicorn.conf $(DESTDIR)/etc/st2/logging.auth.gunicorn.conf 86 | install -m644 ../st2auth/conf/syslog.conf $(DESTDIR)/etc/st2/syslog.auth.conf 87 | install -m644 ../st2reactor/conf/logging.timersengine.conf $(DESTDIR)/etc/st2/logging.timersengine.conf 88 | install -m644 ../st2reactor/conf/syslog.timersengine.conf $(DESTDIR)/etc/st2/syslog.timersengine.conf 89 | install -m644 ../st2reactor/conf/logging.sensorcontainer.conf $(DESTDIR)/etc/st2/logging.sensorcontainer.conf 90 | install -m644 ../st2reactor/conf/syslog.sensorcontainer.conf $(DESTDIR)/etc/st2/syslog.sensorcontainer.conf 91 | install -m644 ../st2reactor/conf/logging.rulesengine.conf $(DESTDIR)/etc/st2/logging.rulesengine.conf 92 | install -m644 ../st2reactor/conf/syslog.rulesengine.conf $(DESTDIR)/etc/st2/syslog.rulesengine.conf 93 | install -m644 ../st2reactor/conf/logging.garbagecollector.conf $(DESTDIR)/etc/st2/logging.garbagecollector.conf 94 | install -m644 ../st2reactor/conf/syslog.garbagecollector.conf $(DESTDIR)/etc/st2/syslog.garbagecollector.conf 95 | install -m644 ../st2actions/conf/logging.scheduler.conf $(DESTDIR)/etc/st2/logging.scheduler.conf 96 | install -m644 ../st2actions/conf/syslog.scheduler.conf $(DESTDIR)/etc/st2/syslog.scheduler.conf 97 | install -m755 ../st2client/conf/st2.complete.sh $(DESTDIR)/etc/bash_completion.d/st2 98 | install -m755 bin/runners.sh $(DESTDIR)/opt/stackstorm/st2/bin/runners.sh 99 | sed -i -r "/args\s*=\s*/s%logs%/var/log/st2%g" $(DESTDIR)/etc/st2/logging.*conf 100 | sed -i "/\[logger_root\]/,/\[.*\]\|\s*$$/ {s/level=DEBUG/level=INFO/}" $(DESTDIR)/etc/st2/logging.*conf 101 | sed -i "/\[logger_root\]/,/\[.*\]\|\s*$$/ {s/level=DEBUG/level=INFO/}" $(DESTDIR)/etc/st2/syslog.*conf 102 | ifeq ($(REDHAT),1) 103 | if [[ `$(PYTHON_ALT_BINARY) /root/scripts/platform_major_version.py` = 8 ]]; then sed -i -r 's/virtualenv_opts\s*=.*/virtualenv_opts =/' $(DESTDIR)/etc/st2/st2.conf; fi 104 | endif 105 | 106 | populate_version: .stamp-populate_version 107 | .stamp-populate_version: 108 | # populate version should be run before any pip/setup.py works 109 | sh ../scripts/populate-version.sh 110 | touch $@ 111 | 112 | requirements: 113 | $(PYTHON_BINARY) ../scripts/fixate-requirements.py -s ../st2*/in-requirements.txt ../contrib/runners/*/in-requirements.txt -f ../fixed-requirements.txt 114 | cat requirements.txt 115 | 116 | changelog: populate_version 117 | ifeq ($(DEBIAN),1) 118 | [ -z "$(DEB_EPOCH)" ] && _epoch="" || _epoch="$(DEB_EPOCH):"; \ 119 | dch -m --force-distribution -v$${_epoch}$(ST2PKG_VERSION)-$(ST2PKG_RELEASE) -D$(DEB_DISTRO) $(CHANGELOG_COMMENT) 120 | endif 121 | 122 | wheelhouse: .build-runners .stamp-wheelhouse 123 | .stamp-wheelhouse: | populate_version requirements inject-deps 124 | cat requirements.txt 125 | # Try to install wheels 2x in case the first one fails 126 | $(PIP_BINARY) --use-deprecated=legacy-resolver wheel --wheel-dir=$(WHEELDIR) --find-links=$(WHEELDIR) -r requirements.txt || \ 127 | $(PIP_BINARY) --use-deprecated=legacy-resolver wheel --wheel-dir=$(WHEELDIR) --find-links=$(WHEELDIR) -r requirements.txt 128 | touch $@ 129 | 130 | .build-runners: 131 | @echo "##########################################" 132 | @echo "# Building Runners #" 133 | @echo "##########################################" 134 | @echo $(RUNNERS) 135 | @for runner in $(RUNNERS); do \ 136 | echo "Installing $$runner"; \ 137 | $(PIP_BINARY) wheel --wheel-dir=$(WHEELDIR) --find-links=$(WHEELDIR) -r ../contrib/runners/$$runner/requirements.txt ../contrib/runners/$$runner; \ 138 | done 139 | touch $@ 140 | 141 | # Note: We want to dynamically inject "st2client" dependency. This way we can 142 | # pin it to the version we build so the requirement is satisfied by locally 143 | # built wheel and not version from PyPi 144 | inject-deps: .stamp-inject-deps 145 | .stamp-inject-deps: 146 | echo "st2client==$(ST2PKG_NORMALIZED_VERSION)" >> requirements.txt 147 | touch $@ 148 | -------------------------------------------------------------------------------- /packages/st2/bin/runners.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Default number of workers 4 | WORKERS="${WORKERS:-10}" 5 | 6 | # Choose init system to perform actions with a service. 7 | choose_sysinit() { 8 | local service="$1" svinit="sysv" 9 | if [ -d /run/systemd/system ]; then 10 | svinit=systemd 11 | else 12 | if [ ! -x /etc/init.d/${service} ]; then 13 | >&2 echo "Supported init systems: systemd and sysv" 14 | >&2 echo "/etc/init.d/${service} not found or disabled" 15 | exit 99 16 | fi 17 | fi 18 | echo $svinit 19 | } 20 | 21 | # Perform service action over the given number of workers. 22 | spawn_workers() { 23 | local action=$1 init= seq= 24 | seq=$(bash -c "printf '%g\\n' {1..$WORKERS}") 25 | 26 | # Choose init system and exit if it's not supported. 27 | init=$(choose_sysinit st2actionrunner) 28 | [ $? -gt 0 ] && exit $? 29 | 30 | case $init in 31 | systemd) 32 | echo "$seq" | xargs -I{} /bin/systemctl $action \ 33 | st2actionrunner@{} 34 | ;; 35 | sysv) 36 | echo "$seq" | xargs -I{} /bin/sh -c \ 37 | "WORKERID={} /etc/init.d/st2actionrunner-worker $action" 38 | ;; 39 | esac 40 | # return 1 in case if xargs failed any invoked commands. 41 | retval=$?; [ $retval -ge 123 ] && return 1 || return $retval 42 | } 43 | 44 | # Perform service action on all actionrunners 45 | if [ -z "$1" ]; then 46 | echo >&2 "Usage: $0 action" 47 | exit 99 48 | fi 49 | 50 | spawn_workers $1 51 | -------------------------------------------------------------------------------- /packages/st2/component.makefile: -------------------------------------------------------------------------------- 1 | WHEELDIR ?= /tmp/wheelhouse 2 | ST2_COMPONENT := $(notdir $(CURDIR)) 3 | ST2PKG_RELEASE ?= 1 4 | 5 | ifneq (,$(wildcard /etc/debian_version)) 6 | DEBIAN := 1 7 | DEB_DISTRO := $(shell lsb_release -cs) 8 | DESTDIR ?= $(CURDIR)/debian/$(ST2_COMPONENT) 9 | else ifneq (,$(wildcard /etc/rocky-release)) 10 | EL_DISTRO := rocky 11 | EL_VERSION := $(shell cat /etc/rocky-release | grep -oP '(?<= )[0-9]+(?=\.)') 12 | REDHAT := 1 13 | else ifneq (,$(wildcard /etc/redhat-release)) 14 | EL_DISTRO := redhat 15 | EL_VERSION := $(shell cat /etc/redhat-release | grep -oP '(?<= )[0-9]+(?=\.)') 16 | REDHAT := 1 17 | else 18 | REDHAT := 1 19 | DEB_DISTRO := unstable 20 | endif 21 | 22 | ifeq ($(DEB_DISTRO),focal) 23 | PYTHON_BINARY := /usr/bin/python3 24 | PIP_BINARY := /usr/local/bin/pip3.8 25 | else ifeq ($(EL_VERSION),8) 26 | PYTHON_BINARY := /usr/bin/python3.8 27 | PIP_BINARY := /usr/local/bin/pip3.8 28 | else 29 | PYTHON_BINARY := /usr/bin/python3 30 | PIP_BINARY := pip3 31 | PYTHON_ALT_BINARY := python3 32 | endif 33 | 34 | # Moved from top of file to handle when only py2 or py3 available 35 | ST2PKG_VERSION ?= $(shell $(PYTHON_BINARY) -c "from $(ST2_COMPONENT) import __version__; print(__version__),") 36 | 37 | # Note: We dynamically obtain the version, this is required because dev 38 | # build versions don't store correct version identifier in __init__.py 39 | # and we need setup.py to normalize it (e.g. 1.4dev -> 1.4.dev0) 40 | ST2PKG_NORMALIZED_VERSION ?= $(shell $(PYTHON_BINARY) setup.py --version || echo "failed_to_retrieve_version") 41 | 42 | .PHONY: info 43 | info: 44 | @echo "DEBIAN=$(DEBIAN)" 45 | @echo "REDHAT=$(REDHAT)" 46 | @echo "DEB_DISTRO=$(DEB_DISTRO)" 47 | @echo "PYTHON_BINARY=$(PYTHON_BINARY)" 48 | @echo "PIP_BINARY=$(PIP_BINARY)" 49 | @echo "EL_VERSION=$(EL_VERSION)" 50 | @echo "EL_DISTRO=$(EL_DISTRO)" 51 | $(PIP_BINARY) --version 52 | 53 | .PHONY: populate_version requirements wheelhouse bdist_wheel 54 | all: info populate_version requirements bdist_wheel 55 | 56 | populate_version: .stamp-populate_version 57 | .stamp-populate_version: 58 | # populate version should be run before any pip/setup.py works 59 | sh ../scripts/populate-version.sh 60 | touch $@ 61 | 62 | requirements: .stamp-requirements 63 | .stamp-requirements: 64 | $(PYTHON_BINARY) ../scripts/fixate-requirements.py -s in-requirements.txt -f ../fixed-requirements.txt 65 | cat requirements.txt 66 | 67 | wheelhouse: .stamp-wheelhouse 68 | .stamp-wheelhouse: | populate_version requirements 69 | # Install wheels into shared location 70 | cat requirements.txt 71 | # Try to install wheels 2x in case the first one fails 72 | $(PIP_BINARY) --use-deprecated=legacy-resolver wheel --wheel-dir=$(WHEELDIR) --find-links=$(WHEELDIR) -r requirements.txt || \ 73 | $(PIP_BINARY) --use-deprecated=legacy-resolver wheel --wheel-dir=$(WHEELDIR) --find-links=$(WHEELDIR) -r requirements.txt 74 | touch $@ 75 | 76 | bdist_wheel: .stamp-bdist_wheel 77 | .stamp-bdist_wheel: | populate_version requirements inject-deps 78 | cat requirements.txt 79 | # We need to install these python packages to handle rpmbuild 4.14 in EL8 80 | ifeq ($(EL_VERSION),8) 81 | $(PIP_BINARY) install wheel setuptools virtualenv 82 | $(PIP_BINARY) install cryptography 83 | endif 84 | $(PYTHON_BINARY) setup.py bdist_wheel -d $(WHEELDIR) || \ 85 | $(PYTHON_BINARY) setup.py bdist_wheel -d $(WHEELDIR) 86 | touch $@ 87 | 88 | # Note: We want to dynamically inject "st2client" dependency. This way we can 89 | # pin it to the version we build so the requirement is satisfied by locally 90 | # built wheel and not version from PyPi 91 | inject-deps: .stamp-inject-deps 92 | .stamp-inject-deps: 93 | echo "st2client==$(ST2PKG_NORMALIZED_VERSION)" >> requirements.txt 94 | touch $@ 95 | -------------------------------------------------------------------------------- /packages/st2/debian/README.Debian: -------------------------------------------------------------------------------- 1 | st2 for Debian 2 | -------------------- 3 | 4 | 5 | 6 | -- StackStorm Engineering Tue, 30 Jun 2015 11:56:21 +0000 7 | -------------------------------------------------------------------------------- /packages/st2/debian/README.source: -------------------------------------------------------------------------------- 1 | st2 for Debian 2 | -------------------- 3 | 4 | 6 | 7 | 8 | 9 | -- StackStorm Engineering Tue, 30 Jun 2015 11:56:21 +0000 10 | 11 | -------------------------------------------------------------------------------- /packages/st2/debian/changelog: -------------------------------------------------------------------------------- 1 | st2 (0.10.dev0-1) unstable; urgency=low 2 | 3 | * Initial release (Closes: #nnnn) 4 | 5 | -- StackStorm Engineering Tue, 30 Jun 2015 11:56:21 +0000 6 | -------------------------------------------------------------------------------- /packages/st2/debian/compat: -------------------------------------------------------------------------------- 1 | 10 -------------------------------------------------------------------------------- /packages/st2/debian/control: -------------------------------------------------------------------------------- 1 | Source: st2 2 | Section: Python 3 | Priority: optional 4 | Maintainer: StackStorm Engineering 5 | Build-Depends: debhelper (>= 9), 6 | python3, 7 | dh-virtualenv (>= 0.8), 8 | libldap2-dev, 9 | libsasl2-dev 10 | Standards-Version: 3.9.5 11 | Homepage: http://stackstorm.com/ 12 | Vcs-Git: git://github.com/stackstorm/st2.git 13 | Vcs-Browser: https://github.com/stackstorm/st2 14 | 15 | Package: st2 16 | Architecture: any 17 | Pre-Depends: dpkg (>= 1.16.16), ${pre:Depends}, ${misc:Pre-Depends}, adduser 18 | Depends: ${shlibs:Depends}, ${misc:Depends}, sudo, adduser, ${Depends}, libssl-dev, libffi-dev, git, libpam0g, openssh-server, openssh-client, bash, netbase 19 | Conflicts: st2common 20 | Description: StackStorm Event-driven automation 21 | Package is full standalone st2 installation including 22 | all components 23 | -------------------------------------------------------------------------------- /packages/st2/debian/copyright: -------------------------------------------------------------------------------- 1 | Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ 2 | Upstream-Name: st2 3 | Source: 4 | 5 | Files: * 6 | Copyright: 7 | 8 | License: Apache-2.0 9 | 10 | Files: debian/* 11 | Copyright: 2015 StackStorm Engineering 12 | License: Apache-2.0 13 | 14 | License: Apache-2.0 15 | Licensed under the Apache License, Version 2.0 (the "License"); 16 | you may not use this file except in compliance with the License. 17 | You may obtain a copy of the License at 18 | . 19 | http://www.apache.org/licenses/LICENSE-2.0 20 | . 21 | Unless required by applicable law or agreed to in writing, software 22 | distributed under the License is distributed on an "AS IS" BASIS, 23 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 24 | See the License for the specific language governing permissions and 25 | limitations under the License. 26 | . 27 | On Debian systems, the complete text of the Apache version 2.0 license 28 | can be found in "/usr/share/common-licenses/Apache-2.0". 29 | 30 | # Please also look if there are files or directories which have a 31 | # different copyright/license attached and list them here. 32 | # Please avoid to pick license terms that are more restrictive than the 33 | # packaged work, as it may make Debian's contributions unacceptable upstream. 34 | -------------------------------------------------------------------------------- /packages/st2/debian/docs: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StackStorm/st2-packages/d4d2d8dfdf1c88412e5d58635adb87da9c671952/packages/st2/debian/docs -------------------------------------------------------------------------------- /packages/st2/debian/install: -------------------------------------------------------------------------------- 1 | ../contrib/core opt/stackstorm/packs/ 2 | ../contrib/packs opt/stackstorm/packs/ 3 | ../contrib/linux opt/stackstorm/packs/ 4 | ../contrib/chatops opt/stackstorm/packs/ 5 | ../contrib/default opt/stackstorm/packs/ 6 | ../contrib/examples usr/share/doc/st2/ 7 | ../conf/nginx usr/share/doc/st2/conf/ 8 | ../st2actions/bin/runners.sh opt/stackstorm/st2/bin/ 9 | ../package.meta opt/stackstorm/st2/ 10 | debian/st2api-generator lib/systemd/system-generators/ 11 | debian/st2auth-generator lib/systemd/system-generators/ 12 | debian/st2stream-generator lib/systemd/system-generators/ 13 | -------------------------------------------------------------------------------- /packages/st2/debian/postinst: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # postinst script for st2 3 | # 4 | # see: dh_installdeb(1) 5 | 6 | set -e 7 | 8 | # summary of how this script can be called: 9 | # * `configure' 10 | # * `abort-upgrade' 11 | # * `abort-remove' `in-favour' 12 | # 13 | # * `abort-remove' 14 | # * `abort-deconfigure' `in-favour' 15 | # `removing' 16 | # 17 | # for details, see http://www.debian.org/doc/debian-policy/ or 18 | # the debian-policy package 19 | 20 | ST2_USER=st2 21 | PACKS_GROUP=st2packs 22 | ST2_UPGRADESTAMP="/tmp/.stamp-stackstorm-st2-deb-package" 23 | upgrading=0 24 | 25 | ## Permissions of files which should be set on install 26 | SET_PERMS=$(cat </dev/null 2>&1 || true 59 | ;; 60 | abort-upgrade|abort-remove|abort-deconfigure) 61 | ;; 62 | 63 | *) 64 | echo "postinst called with unknown argument \`$1'" >&2 65 | exit 1 66 | ;; 67 | esac 68 | 69 | # dh_installdeb will replace this with shell code automatically 70 | # generated by other debhelper scripts. 71 | 72 | #DEBHELPER# 73 | 74 | exit 0 75 | -------------------------------------------------------------------------------- /packages/st2/debian/postrm: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # postrm script for st2 3 | # 4 | # see: dh_installdeb(1) 5 | 6 | set -e 7 | 8 | # summary of how this script can be called: 9 | # * `remove' 10 | # * `purge' 11 | # * `upgrade' 12 | # * `failed-upgrade' 13 | # * `abort-install' 14 | # * `abort-install' 15 | # * `abort-upgrade' 16 | # * `disappear' 17 | # 18 | # for details, see http://www.debian.org/doc/debian-policy/ or 19 | # the debian-policy package 20 | 21 | ## Save st2 logrotate config on remove, but wipe it out on purge. 22 | preserve_logrotate() { 23 | if [ "$1" = remove ]; then 24 | [ -f /etc/logrotate.d/st2 ] && mv /etc/logrotate.d/st2-pkgsaved.disabled 1>/dev/null 2>&1 || : 25 | elif [ "$1" = purge ]; then 26 | rm -f /etc/logrotate.d/st2-pkgsaved.disabled 1>/dev/null 2>&1 || : 27 | # Clean up other StackStorm related configs and directories 28 | rm -rf /etc/st2 1>/dev/null 2>&1 || : 29 | rm -rf /opt/stackstorm 1>/dev/null 2>&1 || : 30 | rm -rf /root/.st2 1>/dev/null 2>&1 || : 31 | rm -rf /var/log/st2 1>/dev/null 2>&1 || : 32 | rm -f /etc/sudoers.d/st2 1>/dev/null 2>&1 || : 33 | fi 34 | } 35 | 36 | case "$1" in 37 | remove|purge) 38 | preserve_logrotate "$1" 39 | ;; 40 | upgrade|failed-upgrade|abort-install|abort-upgrade|disappear) 41 | ;; 42 | *) 43 | echo "postrm called with unknown argument \`$1'" >&2 44 | exit 1 45 | ;; 46 | esac 47 | 48 | # dh_installdeb will replace this with shell code automatically 49 | # generated by other debhelper scripts. 50 | 51 | #DEBHELPER# 52 | 53 | exit 0 54 | -------------------------------------------------------------------------------- /packages/st2/debian/preinst: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # preinst script for st2 3 | # 4 | # see: dh_installdeb(1) 5 | 6 | set -e 7 | 8 | # summary of how this script can be called: 9 | # * `install' 10 | # * `install' 11 | # * `upgrade' 12 | # * `abort-upgrade' 13 | # for details, see http://www.debian.org/doc/debian-policy/ or 14 | # the debian-policy package 15 | 16 | PACKS_GROUP=st2packs 17 | SYS_USER=stanley 18 | ST2_USER=st2 19 | ST2_CONFPATH="/etc/st2/st2.conf" 20 | ST2_UPGRADESTAMP="/tmp/.stamp-stackstorm-st2-deb-package" 21 | 22 | ## Permissions of directories which has to be reset on upgrade 23 | RESET_PERMS=$(cat </dev/null 2>&1) || 37 | adduser --group --disabled-password --no-create-home --system $ST2_USER 38 | 39 | # make st2 member of st2packs group 40 | (getent group $PACKS_GROUP 1>/dev/null 2>&1) || groupadd -r $PACKS_GROUP 41 | (groups $ST2_USER 2>/dev/null | grep -q "\b${PACKS_GROUP}\b") || 42 | usermod -a -G $PACKS_GROUP $ST2_USER 43 | 44 | # create stanley user (for actionrunner service) 45 | if (! id $SYS_USER 1>/dev/null 2>&1); then 46 | adduser --group $SYS_USER 47 | adduser --disabled-password --gecos "" --ingroup $SYS_USER $SYS_USER 48 | fi 49 | } 50 | 51 | ## [NOT USED!] Get current system user from the st2.conf 52 | config_sysuser() { 53 | # exit hooked 54 | return 0 55 | local sysuser= 56 | if [ -f $ST2_CONFPATH ]; then 57 | sysuser=$(cat $ST2_CONFPATH | 58 | sed -n -e '/\[system_user\]/,/\[.*\]\|\$/ { /\[.*\]/d; /user\s*=/ { s/\s*user\s*=\s*//; p } }') 59 | fi 60 | echo $sysuser 61 | } 62 | 63 | ## Update logrotate configuration 64 | enable_logrotate() { 65 | [ -f /etc/logrotate.d/st2-pkgsaved.disabled ] && 66 | mv -f /etc/logrotate.d/st2-pkgsaved.disabled /etc/logrotate.d/st2 || : 67 | } 68 | 69 | ## Fix directories permissions on upgrade (different across maint scripts!) 70 | # NB! USED FOR COMPATIBILITY ON UPGRADE FROM PREVIOUS VERSIONS OF PACKAGES. 71 | # NB! In future package releases reseting permissions SHOULD BE REMOVED. 72 | # 73 | set_permissions() { 74 | local fileperms="$1" mode= ownership= path= current_ownership= user= group= 75 | 76 | echo "$fileperms" | sed -e "s/_packsgroup/$PACKS_GROUP/g" -e "s/_st2user/$ST2_USER/g" | 77 | while read mode ownership path; do 78 | user=$(echo $ownership | cut -f1 -d:) 79 | group=$(echo $ownership | cut -f2 -d:) 80 | # set top level permissions whether it's a file or directory 81 | [ -e $path ] || continue 82 | chown $ownership $path && chmod $mode $path 83 | 84 | # recursively change permissions of children (since those are directories) 85 | find $path -mindepth 1 -maxdepth 1 -not \( -user $user -group $group \) | 86 | xargs -I {} sh -c "echo chown -R $ownership {} && echo chmod -R $mode {}" 87 | done 88 | } 89 | 90 | case "$1" in 91 | install) 92 | create_users 93 | enable_logrotate 94 | ;; 95 | upgrade) 96 | create_users 97 | enable_logrotate 98 | set_permissions "$RESET_PERMS" 99 | touch $ST2_UPGRADESTAMP 100 | ;; 101 | abort-upgrade) 102 | ;; 103 | *) 104 | echo "preinst called with unknown argument \`$1'" >&2 105 | exit 1 106 | ;; 107 | esac 108 | 109 | # dh_installdeb will replace this with shell code automatically 110 | # generated by other debhelper scripts. 111 | 112 | #DEBHELPER# 113 | 114 | exit 0 115 | -------------------------------------------------------------------------------- /packages/st2/debian/rules: -------------------------------------------------------------------------------- 1 | #!/usr/bin/make -f 2 | # DH_VERBOSE = 1 3 | 4 | # see EXAMPLES in dpkg-buildflags(1) and read /usr/share/dpkg/* 5 | #DPKG_EXPORT_BUILDFLAGS = 1 6 | #include /usr/share/dpkg/default.mk 7 | 8 | # Virtualenv and pip should be updated (better not to use the distro defaults). 9 | # Updated ones can be found under /use/local. 10 | PATH = /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 11 | WHEELDIR ?= /tmp/wheelhouse 12 | DH_VIRTUALENV_INSTALL_ROOT := /opt/stackstorm 13 | export DH_VIRTUALENV_INSTALL_ROOT 14 | PIP_VERSION = 20.3.3 15 | 16 | IS_SYSTEMD = $(shell command -v dh_systemd_enable > /dev/null 2>&1 && echo true) 17 | DEB_DISTRO := $(shell lsb_release -cs) 18 | 19 | BUILD_PRE_DEPENDS := python3 (>= 3.8) 20 | BUILD_DEPENDS := python3-distutils, python3-dev 21 | 22 | %: 23 | dh $@ --with python-virtualenv --python /usr/bin/python3 24 | 25 | override_dh_installdirs: 26 | dh_installdirs 27 | # Restore dh_auto_install behaviour, because it was removed by dh_virtualenv 28 | # dh_auto_install same with: 29 | $(MAKE) install 30 | 31 | override_dh_gencontrol: 32 | dh_gencontrol -- -Vpre:Depends="$(BUILD_PRE_DEPENDS)" -VDepends="$(BUILD_DEPENDS)" 33 | 34 | # Packaging recognizes only 1 {package_name} service file (http://manpages.ubuntu.com/manpages/xenial/man1/dh_installinit.1.html) 35 | # We have many, and so force it to install multiple service files 36 | override_dh_installinit: 37 | dh_systemd_enable --name=st2actionrunner st2actionrunner.service 38 | install -p -m644 debian/st2actionrunner@.service debian/st2/lib/systemd/system/st2actionrunner@.service 39 | dh_systemd_enable --name=st2api st2api.service 40 | dh_systemd_enable --name=st2stream st2stream.service 41 | dh_systemd_enable --name=st2auth st2auth.service 42 | dh_systemd_enable --name=st2notifier st2notifier.service 43 | dh_systemd_enable --name=st2rulesengine st2rulesengine.service 44 | dh_systemd_enable --name=st2sensorcontainer st2sensorcontainer.service 45 | dh_systemd_enable --name=st2garbagecollector st2garbagecollector.service 46 | dh_systemd_enable --name=st2timersengine st2timersengine.service 47 | dh_systemd_enable --name=st2workflowengine st2workflowengine.service 48 | dh_systemd_enable --name=st2scheduler st2scheduler.service 49 | 50 | override_dh_installdeb: 51 | DESTDIR=debian/st2 $(MAKE) post_install 52 | dh_installdeb 53 | 54 | override_dh_virtualenv: 55 | # NB! Wheels must be pre-populated by now, we use --no-index to skip 56 | # querying pypi and rely only on --find-links. 57 | # 58 | # NB! Use '--no-download' arg for 'virtualenv' is required, 59 | # otherwise it downloads latest PIP version instead of bundled/pinned one. 60 | # NB! Use '--copies' else /opt/stackstorm/st2/bin/python is symlink to 61 | # /usr/bin/pythonx rather than copy - and this breaks pack install 62 | # which follows the path to calculate the location of the bin directory 63 | 64 | dh_virtualenv --extra-virtualenv-arg='--copies' --extra-virtualenv-arg='--no-download' \ 65 | '--upgrade-pip-to=$(PIP_VERSION)' \ 66 | --extra-pip-arg '--find-links=$(WHEELDIR)' \ 67 | --extra-pip-arg '--no-index' --no-test 68 | 69 | override_dh_compress: 70 | dh_compress -X.conf 71 | -------------------------------------------------------------------------------- /packages/st2/debian/source/format: -------------------------------------------------------------------------------- 1 | 3.0 (quilt) 2 | -------------------------------------------------------------------------------- /packages/st2/debian/st2.dirs: -------------------------------------------------------------------------------- 1 | /etc/bash_completion.d 2 | /etc/logrotate.d 3 | /etc/st2 4 | /opt/stackstorm/configs 5 | /opt/stackstorm/exports 6 | /opt/stackstorm/overrides 7 | /opt/stackstorm/virtualenvs 8 | /var/log/st2 9 | /var/run/st2 10 | -------------------------------------------------------------------------------- /packages/st2/debian/st2.links: -------------------------------------------------------------------------------- 1 | # Note: This file is used for both - debian and rpm packages 2 | opt/stackstorm/st2/bin/st2 usr/bin/st2 3 | opt/stackstorm/st2/bin/st2-trigger-refire usr/bin/st2-trigger-refire 4 | opt/stackstorm/st2/bin/st2-rule-tester usr/bin/st2-rule-tester 5 | opt/stackstorm/st2/bin/st2-run-pack-tests usr/bin/st2-run-pack-tests 6 | opt/stackstorm/st2/bin/st2-register-content usr/bin/st2-register-content 7 | opt/stackstorm/st2/bin/st2-apply-rbac-definitions usr/bin/st2-apply-rbac-definitions 8 | opt/stackstorm/st2/bin/st2-bootstrap-rmq usr/bin/st2-bootstrap-rmq 9 | opt/stackstorm/st2/bin/st2-generate-symmetric-crypto-key usr/bin/st2-generate-symmetric-crypto-key 10 | opt/stackstorm/st2/bin/st2-self-check usr/bin/st2-self-check 11 | opt/stackstorm/st2/bin/st2-track-result usr/bin/st2-track-result 12 | opt/stackstorm/st2/bin/st2-validate-pack-config usr/bin/st2-validate-pack-config 13 | opt/stackstorm/st2/bin/st2ctl usr/bin/st2ctl 14 | -------------------------------------------------------------------------------- /packages/st2/debian/st2actionrunner.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StackStorm service st2actionrunner 3 | After=network.target 4 | 5 | [Service] 6 | Type=oneshot 7 | EnvironmentFile=-/etc/default/st2actionrunner 8 | ExecStart=/bin/bash /opt/stackstorm/st2/bin/runners.sh start 9 | ExecStop=/bin/bash /opt/stackstorm/st2/bin/runners.sh stop 10 | PrivateTmp=true 11 | RemainAfterExit=true 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /packages/st2/debian/st2actionrunner@.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StackStorm service st2actionrunner 3 | After=network.target 4 | JoinsNamespaceOf=st2actionrunner.service 5 | 6 | [Service] 7 | Type=simple 8 | User=root 9 | Group=st2packs 10 | UMask=002 11 | Environment="DAEMON_ARGS=--config-file /etc/st2/st2.conf" 12 | Environment="WORKERID=%i" 13 | EnvironmentFile=-/etc/default/st2actionrunner 14 | ExecStart=/opt/stackstorm/st2/bin/st2actionrunner $DAEMON_ARGS 15 | TimeoutSec=60 16 | PrivateTmp=true 17 | Restart=on-failure 18 | RestartSec=5 19 | 20 | [Install] 21 | WantedBy=multi-user.target 22 | -------------------------------------------------------------------------------- /packages/st2/debian/st2api-generator: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import configparser 3 | import logging 4 | import time 5 | import sys 6 | 7 | ST2SVC = "st2api" 8 | DEFAULT_IP = "127.0.0.1" 9 | DEFAULT_PORT = "9101" 10 | ST2CFG = "/etc/st2/st2.conf" 11 | 12 | # Systemd passes 3 paths to a generator, normal_dir, early_dir, late_dir. 13 | default_paths = ["/tmp", "/tmp", "/tmp"] 14 | for i, p in enumerate(sys.argv[1:]): 15 | default_paths[i] = p 16 | EARLY_DIR, NORMAL_DIR, LATE_DIR = default_paths 17 | 18 | LOG_TO_DISK = True 19 | LOG_KW = { 20 | "level": logging.DEBUG, 21 | "format": "%(asctime)s - %(levelname)s - %(message)s", 22 | } 23 | if LOG_TO_DISK: 24 | LOG_KW["filename"] = f"{NORMAL_DIR}/{ST2SVC}_generator.log" 25 | 26 | logging.basicConfig(**LOG_KW) 27 | LOG = logging.getLogger() 28 | 29 | LOG.debug( 30 | f"Systemd directories: Early='{EARLY_DIR}' Normal='{NORMAL_DIR}' Late='{LATE_DIR}'" 31 | ) 32 | 33 | config = configparser.ConfigParser(strict=False) 34 | config.read(ST2CFG) 35 | 36 | section = ST2SVC[3:] 37 | bind_address = config[section].get("host", DEFAULT_IP) 38 | bind_port = config[section].get("port", DEFAULT_PORT) 39 | 40 | contents = f"""[Unit] 41 | # Generated by {sys.argv[0]} at {time.asctime(time.localtime())} 42 | Description=StackStorm {ST2SVC} Socket. 43 | PartOf={ST2SVC}.service 44 | SourcePath={ST2CFG} 45 | 46 | [Socket] 47 | ListenStream={bind_address}:{bind_port} 48 | 49 | [Install] 50 | WantedBy=sockets.target 51 | """ 52 | 53 | with open(f"{NORMAL_DIR}/{ST2SVC}.socket", "w") as f: 54 | f.write(contents) 55 | 56 | LOG.info(f"{ST2SVC} generated.") 57 | -------------------------------------------------------------------------------- /packages/st2/debian/st2api.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StackStorm service st2api 3 | After=network.target st2api.socket 4 | Requires=st2api.socket 5 | 6 | [Service] 7 | Type=simple 8 | User=st2 9 | Group=st2 10 | Environment="DAEMON_ARGS=-k eventlet -b 127.0.0.1:9101 --workers 1 --threads 1 --graceful-timeout 10 --timeout 30 --log-config /etc/st2/logging.api.gunicorn.conf --error-logfile /var/log/st2/st2api.log" 11 | EnvironmentFile=-/etc/default/st2api 12 | ExecStart=/opt/stackstorm/st2/bin/gunicorn st2api.wsgi:application $DAEMON_ARGS 13 | TimeoutSec=60 14 | PrivateTmp=true 15 | Restart=on-failure 16 | RestartSec=5 17 | 18 | [Install] 19 | WantedBy=multi-user.target 20 | -------------------------------------------------------------------------------- /packages/st2/debian/st2auth-generator: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import configparser 3 | import logging 4 | import time 5 | import sys 6 | 7 | ST2SVC="st2auth" 8 | DEFAULT_IP="127.0.0.1" 9 | DEFAULT_PORT="9100" 10 | ST2CFG = "/etc/st2/st2.conf" 11 | 12 | # Systemd passes 3 paths to a generator, normal_dir, early_dir, late_dir. 13 | default_paths = ["/tmp", "/tmp", "/tmp"] 14 | for i, p in enumerate(sys.argv[1:]): 15 | default_paths[i] = p 16 | EARLY_DIR, NORMAL_DIR, LATE_DIR = default_paths 17 | 18 | LOG_TO_DISK = True 19 | LOG_KW = { 20 | "level": logging.DEBUG, 21 | "format": "%(asctime)s - %(levelname)s - %(message)s", 22 | } 23 | if LOG_TO_DISK: 24 | LOG_KW["filename"] = f"{NORMAL_DIR}/{ST2SVC}_generator.log" 25 | 26 | logging.basicConfig(**LOG_KW) 27 | LOG = logging.getLogger() 28 | 29 | LOG.debug( 30 | f"Systemd directories: Early='{EARLY_DIR}' Normal='{NORMAL_DIR}' Late='{LATE_DIR}'" 31 | ) 32 | 33 | config = configparser.ConfigParser(strict=False) 34 | config.read(ST2CFG) 35 | 36 | section = ST2SVC[3:] 37 | bind_address = config[section].get("host", DEFAULT_IP) 38 | bind_port = config[section].get("port", DEFAULT_PORT) 39 | 40 | contents = f"""[Unit] 41 | # Generated by {sys.argv[0]} at {time.asctime(time.localtime())} 42 | Description=StackStorm {ST2SVC} Socket. 43 | PartOf={ST2SVC}.service 44 | SourcePath={ST2CFG} 45 | 46 | [Socket] 47 | ListenStream={bind_address}:{bind_port} 48 | 49 | [Install] 50 | WantedBy=sockets.target 51 | """ 52 | 53 | with open(f"{NORMAL_DIR}/{ST2SVC}.socket", "w") as f: 54 | f.write(contents) 55 | 56 | LOG.info(f"{ST2SVC} generated.") 57 | -------------------------------------------------------------------------------- /packages/st2/debian/st2auth.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StackStorm service st2auth 3 | After=network.target st2auth.socket 4 | Requires=st2auth.socket 5 | 6 | [Service] 7 | Type=simple 8 | User=st2 9 | Group=st2 10 | Environment="DAEMON_ARGS=-k eventlet -b 127.0.0.1:9100 --workers 1 --threads 1 --graceful-timeout 10 --timeout 30 --log-config /etc/st2/logging.auth.gunicorn.conf --error-logfile /var/log/st2/st2auth.log" 11 | EnvironmentFile=-/etc/default/st2auth 12 | ExecStart=/opt/stackstorm/st2/bin/gunicorn st2auth.wsgi:application $DAEMON_ARGS 13 | TimeoutSec=60 14 | PrivateTmp=true 15 | Restart=on-failure 16 | RestartSec=5 17 | 18 | [Install] 19 | WantedBy=multi-user.target 20 | -------------------------------------------------------------------------------- /packages/st2/debian/st2garbagecollector.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StackStorm service st2garbagecollector 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | User=st2 8 | Group=st2 9 | Environment="DAEMON_ARGS=--config-file /etc/st2/st2.conf" 10 | EnvironmentFile=-/etc/default/st2garbagecollector 11 | ExecStart=/opt/stackstorm/st2/bin/st2garbagecollector $DAEMON_ARGS 12 | TimeoutSec=60 13 | PrivateTmp=true 14 | Restart=on-failure 15 | RestartSec=5 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /packages/st2/debian/st2notifier.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StackStorm service st2notifier 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | User=st2 8 | Group=st2 9 | Environment="DAEMON_ARGS=--config-file /etc/st2/st2.conf" 10 | EnvironmentFile=-/etc/default/st2notifier 11 | ExecStart=/opt/stackstorm/st2/bin/st2notifier $DAEMON_ARGS 12 | TimeoutSec=60 13 | PrivateTmp=true 14 | Restart=on-failure 15 | RestartSec=5 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /packages/st2/debian/st2rulesengine.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StackStorm service st2rulesengine 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | User=st2 8 | Group=st2 9 | Environment="DAEMON_ARGS=--config-file /etc/st2/st2.conf" 10 | EnvironmentFile=-/etc/default/st2rulesengine 11 | ExecStart=/opt/stackstorm/st2/bin/st2rulesengine $DAEMON_ARGS 12 | TimeoutSec=60 13 | PrivateTmp=true 14 | Restart=on-failure 15 | RestartSec=5 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /packages/st2/debian/st2scheduler.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StackStorm service st2scheduler 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | User=st2 8 | Group=st2 9 | Environment="DAEMON_ARGS=--config-file /etc/st2/st2.conf" 10 | EnvironmentFile=-/etc/default/st2scheduler 11 | ExecStart=/opt/stackstorm/st2/bin/st2scheduler $DAEMON_ARGS 12 | TimeoutSec=60 13 | PrivateTmp=true 14 | Restart=on-failure 15 | RestartSec=5 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /packages/st2/debian/st2sensorcontainer.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StackStorm service st2sensorcontainer 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | User=st2 8 | Group=st2 9 | Environment="DAEMON_ARGS=--config-file /etc/st2/st2.conf" 10 | EnvironmentFile=-/etc/default/st2sensorcontainer 11 | ExecStart=/opt/stackstorm/st2/bin/st2sensorcontainer $DAEMON_ARGS 12 | TimeoutSec=60 13 | PrivateTmp=true 14 | Restart=on-failure 15 | RestartSec=5 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /packages/st2/debian/st2stream-generator: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import configparser 3 | import logging 4 | import time 5 | import sys 6 | 7 | ST2SVC="st2stream" 8 | DEFAULT_IP="127.0.0.1" 9 | DEFAULT_PORT="9102" 10 | ST2CFG = "/etc/st2/st2.conf" 11 | 12 | # Systemd passes 3 paths to a generator, normal_dir, early_dir, late_dir. 13 | default_paths = ["/tmp", "/tmp", "/tmp"] 14 | for i, p in enumerate(sys.argv[1:]): 15 | default_paths[i] = p 16 | EARLY_DIR, NORMAL_DIR, LATE_DIR = default_paths 17 | 18 | LOG_TO_DISK = True 19 | LOG_KW = { 20 | "level": logging.DEBUG, 21 | "format": "%(asctime)s - %(levelname)s - %(message)s", 22 | } 23 | if LOG_TO_DISK: 24 | LOG_KW["filename"] = f"{NORMAL_DIR}/{ST2SVC}_generator.log" 25 | 26 | logging.basicConfig(**LOG_KW) 27 | LOG = logging.getLogger() 28 | 29 | LOG.debug( 30 | f"Systemd directories: Early='{EARLY_DIR}' Normal='{NORMAL_DIR}' Late='{LATE_DIR}'" 31 | ) 32 | 33 | config = configparser.ConfigParser(strict=False) 34 | config.read(ST2CFG) 35 | 36 | section = ST2SVC[3:] 37 | bind_address = config[section].get("host", DEFAULT_IP) 38 | bind_port = config[section].get("port", DEFAULT_PORT) 39 | 40 | contents = f"""[Unit] 41 | # Generated by {sys.argv[0]} at {time.asctime(time.localtime())} 42 | Description=StackStorm {ST2SVC} Socket. 43 | PartOf={ST2SVC}.service 44 | SourcePath={ST2CFG} 45 | 46 | [Socket] 47 | ListenStream={bind_address}:{bind_port} 48 | 49 | [Install] 50 | WantedBy=sockets.target 51 | """ 52 | 53 | with open(f"{NORMAL_DIR}/{ST2SVC}.socket", "w") as f: 54 | f.write(contents) 55 | 56 | LOG.info(f"{ST2SVC} generated.") 57 | -------------------------------------------------------------------------------- /packages/st2/debian/st2stream.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StackStorm service st2stream 3 | After=network.target st2stream.socket 4 | Requires=st2stream.socket 5 | 6 | [Service] 7 | Type=simple 8 | User=st2 9 | Group=st2 10 | Environment="DAEMON_ARGS=-k eventlet -b 127.0.0.1:9102 --workers 1 --threads 10 --graceful-timeout 10 --timeout 30 --log-config /etc/st2/logging.stream.gunicorn.conf --error-logfile /var/log/st2/st2stream.log" 11 | EnvironmentFile=-/etc/default/st2stream 12 | ExecStart=/opt/stackstorm/st2/bin/gunicorn st2stream.wsgi:application $DAEMON_ARGS 13 | TimeoutSec=60 14 | PrivateTmp=true 15 | Restart=on-failure 16 | RestartSec=5 17 | 18 | [Install] 19 | WantedBy=multi-user.target 20 | -------------------------------------------------------------------------------- /packages/st2/debian/st2timersengine.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StackStorm service st2timersengine 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | User=st2 8 | Group=st2 9 | Environment="DAEMON_ARGS=--config-file /etc/st2/st2.conf" 10 | EnvironmentFile=-/etc/default/st2timersengine 11 | ExecStart=/opt/stackstorm/st2/bin/st2timersengine $DAEMON_ARGS 12 | TimeoutSec=60 13 | PrivateTmp=true 14 | Restart=on-failure 15 | RestartSec=5 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /packages/st2/debian/st2workflowengine.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StackStorm service st2workflowengine 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | User=st2 8 | Group=st2 9 | Environment="DAEMON_ARGS=--config-file /etc/st2/st2.conf" 10 | EnvironmentFile=-/etc/default/st2workflowengine 11 | ExecStart=/opt/stackstorm/st2/bin/st2workflowengine $DAEMON_ARGS 12 | TimeoutSec=60 13 | PrivateTmp=true 14 | Restart=on-failure 15 | RestartSec=5 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /packages/st2/dist_utils.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # NOTE: This file is auto-generated - DO NOT EDIT MANUALLY 3 | # Instead copy from https://github.com/StackStorm/st2/blob/master/scripts/dist_utils.py 4 | 5 | # Copyright 2020 The StackStorm Authors. 6 | # Copyright 2019 Extreme Networks, Inc. 7 | # 8 | # Licensed under the Apache License, Version 2.0 (the "License"); 9 | # you may not use this file except in compliance with the License. 10 | # You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | 20 | from __future__ import absolute_import 21 | 22 | import os 23 | import re 24 | import sys 25 | 26 | # NOTE: This script can't rely on any 3rd party dependency so we need to use this code here 27 | # 28 | # TODO: Why can't this script rely on 3rd party dependencies? Is it because it has to import 29 | # from pip? 30 | # 31 | # TODO: Dear future developer, if you are back here fixing a bug with how we parse 32 | # requirements files, please look into using the packaging package on PyPI: 33 | # https://packaging.pypa.io/en/latest/requirements/ 34 | # and specifying that in the `setup_requires` argument to `setuptools.setup()` 35 | # for subpackages. 36 | # At the very least we can vendorize some of their code instead of reimplementing 37 | # each piece of their code every time our parsing breaks. 38 | PY3 = sys.version_info[0] == 3 39 | 40 | if PY3: 41 | text_type = str 42 | else: 43 | text_type = unicode # noqa # pylint: disable=E0602 44 | 45 | GET_PIP = "curl https://bootstrap.pypa.io/get-pip.py | python" 46 | 47 | __all__ = [ 48 | "fetch_requirements", 49 | "apply_vagrant_workaround", 50 | "get_version_string", 51 | "parse_version_string", 52 | ] 53 | 54 | 55 | def fetch_requirements(requirements_file_path): 56 | """ 57 | Return a list of requirements and links by parsing the provided requirements file. 58 | """ 59 | links = [] 60 | reqs = [] 61 | 62 | def _get_link(line): 63 | vcs_prefixes = ["git+", "svn+", "hg+", "bzr+"] 64 | 65 | for vcs_prefix in vcs_prefixes: 66 | if line.startswith(vcs_prefix) or line.startswith("-e %s" % (vcs_prefix)): 67 | req_name = re.findall(".*#egg=(.+)([&|@]).*$", line) 68 | 69 | if not req_name: 70 | req_name = re.findall(".*#egg=(.+?)$", line) 71 | else: 72 | req_name = req_name[0] 73 | 74 | if not req_name: 75 | raise ValueError( 76 | 'Line "%s" is missing "#egg="' % (line) 77 | ) 78 | 79 | link = line.replace("-e ", "").strip() 80 | return link, req_name[0] 81 | elif vcs_prefix in line and line.count("@") == 2: 82 | # PEP 440 direct reference: @ @version 83 | req_name, link = line.split("@", 1) 84 | req_name = req_name.strip() 85 | link = f"{link.strip()}#egg={req_name}" 86 | return link, req_name 87 | 88 | return None, None 89 | 90 | with open(requirements_file_path, "r") as fp: 91 | for line in fp.readlines(): 92 | line = line.strip() 93 | 94 | if line.startswith("#") or not line: 95 | continue 96 | 97 | link, req_name = _get_link(line=line) 98 | 99 | if link: 100 | links.append(link) 101 | else: 102 | req_name = line 103 | 104 | reqs.append(req_name) 105 | 106 | return (reqs, links) 107 | 108 | 109 | def apply_vagrant_workaround(): 110 | """ 111 | Function which detects if the script is being executed inside vagrant and if it is, it deletes 112 | "os.link" attribute. 113 | Note: Without this workaround, setup.py sdist will fail when running inside a shared directory 114 | (nfs / virtualbox shared folders). 115 | """ 116 | if os.environ.get("USER", None) == "vagrant": 117 | del os.link 118 | 119 | 120 | def get_version_string(init_file): 121 | """ 122 | Read __version__ string for an init file. 123 | """ 124 | 125 | with open(init_file, "r") as fp: 126 | content = fp.read() 127 | version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", content, re.M) 128 | if version_match: 129 | return version_match.group(1) 130 | 131 | raise RuntimeError("Unable to find version string in %s." % (init_file)) 132 | 133 | 134 | # alias for get_version_string 135 | parse_version_string = get_version_string 136 | -------------------------------------------------------------------------------- /packages/st2/in-requirements.txt: -------------------------------------------------------------------------------- 1 | st2common 2 | st2actions 3 | st2api 4 | st2stream 5 | st2auth 6 | st2reactor 7 | st2tests 8 | git+https://github.com/StackStorm/st2-auth-backend-pam.git@master#egg=st2-auth-backend-pam 9 | stackstorm-runner-action-chain 10 | stackstorm-runner-announcement 11 | stackstorm-runner-http 12 | stackstorm-runner-inquirer 13 | stackstorm-runner-local 14 | stackstorm-runner-noop 15 | stackstorm-runner-orquesta 16 | stackstorm-runner-python 17 | stackstorm-runner-remote 18 | stackstorm-runner-winrm 19 | -------------------------------------------------------------------------------- /packages/st2/rpm/postinst_script.spec: -------------------------------------------------------------------------------- 1 | set -e 2 | 3 | # make sure that our socket generators run 4 | systemctl daemon-reload >/dev/null 2>&1 || true 5 | -------------------------------------------------------------------------------- /packages/st2/rpm/preinst_script.spec: -------------------------------------------------------------------------------- 1 | set -e 2 | 3 | PACKS_GROUP=%{packs_group} 4 | SYS_USER=%{stanley_user} 5 | ST2_USER=%{svc_user} 6 | 7 | ## Permissions of directories which has to be reset on upgrade 8 | RESET_PERMS=$(cat </dev/null 2>&1) || 22 | adduser --no-create-home --system --user-group $ST2_USER 23 | 24 | # make st2 member of st2packs group 25 | (getent group $PACKS_GROUP 1>/dev/null 2>&1) || groupadd -r $PACKS_GROUP 26 | (groups $ST2_USER 2>/dev/null | grep -q "\b${PACKS_GROUP}\b") || 27 | usermod -a -G $PACKS_GROUP $ST2_USER 28 | 29 | # create stanley user (unprivileged action user, we don't ship sudoers.d config) 30 | (id $SYS_USER 1>/dev/null 2>&1) || 31 | adduser --user-group $SYS_USER 32 | } 33 | 34 | ## Fix directories permissions on upgrade (different across maint scripts!) 35 | # NB! USED FOR COMPATIBILITY ON UPGRADE FROM PREVIOUS VERSIONS OF PACKAGES. 36 | # NB! In future package releases reseting permissions SHOULD BE REMOVED. 37 | # 38 | set_permissions() { 39 | local fileperms="$1" mode= ownership= path= current_ownership= user= group= 40 | 41 | echo "$fileperms" | sed -e "s/_packsgroup/$PACKS_GROUP/g" -e "s/_st2user/$ST2_USER/g" | 42 | while read mode ownership path; do 43 | user=$(echo $ownership | cut -f1 -d:) 44 | group=$(echo $ownership | cut -f2 -d:) 45 | # set top level permissions whether it's a file or directory 46 | [ -e $path ] || continue 47 | chown $ownership $path && chmod $mode $path 48 | 49 | # recursively change permissions of children (since those are directories) 50 | find $path -mindepth 1 -maxdepth 1 -not \( -user $user -group $group \) | 51 | xargs -I {} sh -c "chown -R $ownership {} && chmod -R $mode {}" 52 | done 53 | } 54 | 55 | create_users 56 | 57 | # We perform upgrade (when install count > 1) 58 | if [ "$1" -gt 1 ]; then 59 | set_permissions "$RESET_PERMS" 60 | fi 61 | -------------------------------------------------------------------------------- /packages/st2/rpm/st2.spec: -------------------------------------------------------------------------------- 1 | %define package st2 2 | %define venv_name st2 3 | %define svc_user st2 4 | %define stanley_user stanley 5 | %define packs_group st2packs 6 | 7 | %include ../rpmspec/st2pkg_toptags.spec 8 | 9 | %if 0%{?epoch} 10 | Epoch: %{epoch} 11 | %endif 12 | 13 | %if 0%{?rhel} == 8 14 | %global _build_id_links none 15 | %endif 16 | 17 | Requires: openssl-devel, libffi-devel, git, pam, openssh-server, openssh-clients, bash, setup 18 | %if 0%{?rhel} == 8 19 | Requires: python38-devel 20 | %endif 21 | %if 0%{?rhel} == 9 22 | Requires: python3-devel 23 | %endif 24 | 25 | # EL8 requires a few python packages available within 'BUILDROOT' when outside venv 26 | # These are in the el8 packagingbuild dockerfile 27 | # Reference https://fossies.org/linux/ansible/packaging/rpm/ansible.spec 28 | %if 0%{?rhel} == 8 29 | # Will use the python3 stdlib venv 30 | BuildRequires: python38-devel 31 | BuildRequires: python38-setuptools 32 | %endif 33 | %if 0%{?rhel} == 9 34 | BuildRequires: python3-devel 35 | BuildRequires: python3-setuptools 36 | %endif 37 | 38 | # Apply this to both RHEL 8 and RHEL 9 39 | %if 0%{?rhel} > 7 40 | # By default the RPM helper scripts will try to generate Requires: section which lists every 41 | # Python dependencies. That process / script works by recursively scanning all the package Python 42 | # dependencies which is very slow (5-6 minutes). 43 | # Our package bundles virtualenv with all the dependendencies and doesn't rely on this metadata 44 | # so we skip that step to vastly speed up the build. 45 | # Technically we also don't Require or Provide any of those libraries auto-detected by that script 46 | # because those are only used internally inside a package specific virtual environment. 47 | # See https://github.com/StackStorm/st2-packages/pull/697#issuecomment-808971874 and that PR for 48 | # more details. 49 | # That issue was found by enabling rpmbuild -vv flag. 50 | %undefine __pythondist_provides 51 | %undefine __pythondist_requires 52 | %undefine __python_provides 53 | %undefine __python_requires 54 | %endif 55 | 56 | Summary: StackStorm all components bundle 57 | Conflicts: st2common 58 | 59 | %description 60 | Package is full standalone stackstorm installation including 61 | all components 62 | 63 | # Define worker name 64 | %define worker_name st2actionrunner@ 65 | 66 | # WORKAROUND: RockyLinux9 doesn't have a python virtualenv rpm so it's installed during build as a dependency with pip. 67 | %if 0%{?rhel} == 9 68 | %build 69 | pip install virtualenv 70 | %endif 71 | 72 | %install 73 | %default_install 74 | %pip_install_venv 75 | %service_install st2actionrunner %{worker_name} st2api st2stream st2auth st2notifier st2workflowengine 76 | %service_install st2rulesengine st2timersengine st2sensorcontainer st2garbagecollector 77 | %service_install st2scheduler 78 | make post_install DESTDIR=%{buildroot} 79 | 80 | # We build cryptography for RHEL8/RHEL9, and this can contain buildroot path in the 81 | # built .so files. We use strip on these libraries so that there are no 82 | # references to the buildroot in the st2 rpm 83 | %cleanup_so_abspath 84 | %cleanup_python_abspath 85 | 86 | %prep 87 | rm -rf %{buildroot} 88 | mkdir -p %{buildroot} 89 | 90 | %clean 91 | rm -rf %{buildroot} 92 | 93 | %pre 94 | %include rpm/preinst_script.spec 95 | 96 | %post 97 | %service_post st2actionrunner st2api st2stream st2auth st2notifier st2workflowengine 98 | %service_post st2rulesengine st2timersengine st2sensorcontainer st2garbagecollector 99 | %service_post st2scheduler 100 | %include rpm/postinst_script.spec 101 | 102 | %preun 103 | %service_preun st2actionrunner %{worker_name} st2api st2stream st2auth st2notifier st2workflowengine 104 | %service_preun st2rulesengine st2timersengine st2sensorcontainer st2garbagecollector 105 | %service_preun st2scheduler 106 | 107 | %postun 108 | %service_postun st2actionrunner %{worker_name} st2api st2stream st2auth st2notifier st2workflowengine 109 | %service_postun st2rulesengine st2timersengine st2sensorcontainer st2garbagecollector 110 | %service_postun st2scheduler 111 | # Remove st2 logrotate config, since there's no analog of apt-get purge available 112 | if [ $1 -eq 0 ]; then 113 | rm -f /etc/logrotate.d/st2 114 | fi 115 | 116 | %files 117 | %defattr(-,root,root,-) 118 | /opt/stackstorm/%{venv_name} 119 | %{_bindir}/* 120 | %config %{_sysconfdir}/bash_completion.d/st2 121 | %config(noreplace) %{_sysconfdir}/logrotate.d/st2 122 | %config(noreplace) %attr(600, %{svc_user}, %{svc_user}) %{_sysconfdir}/st2/htpasswd 123 | %config(noreplace) %{_sysconfdir}/st2/* 124 | %{_datadir}/doc/st2 125 | %attr(755, %{svc_user}, root) /opt/stackstorm/configs 126 | %attr(755, %{svc_user}, root) /opt/stackstorm/exports 127 | %attr(755, %{svc_user}, root) /opt/stackstorm/overrides 128 | %attr(755, %{svc_user}, root) %{_localstatedir}/log/st2 129 | %attr(755, %{svc_user}, root) %{_localstatedir}/run/st2 130 | %attr(775, root, %{packs_group}) /opt/stackstorm/packs/* 131 | %attr(775, root, %{packs_group}) /usr/share/doc/st2/examples 132 | %attr(775, root, %{packs_group}) /opt/stackstorm/virtualenvs 133 | %{_unitdir}/st2actionrunner.service 134 | %{_unitdir}/%{worker_name}.service 135 | %{_unitdir}/st2api.service 136 | %{_unitdir}/st2stream.service 137 | %{_unitdir}/st2auth.service 138 | %{_unitdir}/st2notifier.service 139 | %{_unitdir}/st2rulesengine.service 140 | %{_unitdir}/st2sensorcontainer.service 141 | %{_unitdir}/st2garbagecollector.service 142 | %{_unitdir}/st2timersengine.service 143 | %{_unitdir}/st2workflowengine.service 144 | %{_unitdir}/st2scheduler.service 145 | /usr/lib/systemd/system-generators/st2api-generator 146 | /usr/lib/systemd/system-generators/st2auth-generator 147 | /usr/lib/systemd/system-generators/st2stream-generator 148 | -------------------------------------------------------------------------------- /packages/st2/rpm/st2actionrunner.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StackStorm service st2actionrunner 3 | After=network.target 4 | 5 | [Service] 6 | Type=oneshot 7 | EnvironmentFile=-/etc/sysconfig/st2actionrunner 8 | ExecStart=/bin/bash /opt/stackstorm/st2/bin/runners.sh start 9 | ExecStop=/bin/bash /opt/stackstorm/st2/bin/runners.sh stop 10 | PrivateTmp=true 11 | RemainAfterExit=true 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /packages/st2/rpm/st2actionrunner@.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StackStorm service st2actionrunner 3 | After=network.target 4 | JoinsNamespaceOf=st2actionrunner.service 5 | 6 | [Service] 7 | Type=simple 8 | User=root 9 | Group=st2packs 10 | UMask=002 11 | Environment="DAEMON_ARGS=--config-file /etc/st2/st2.conf" 12 | Environment="WORKERID=%i" 13 | EnvironmentFile=-/etc/sysconfig/st2actionrunner 14 | ExecStart=/opt/stackstorm/st2/bin/st2actionrunner $DAEMON_ARGS 15 | TimeoutSec=60 16 | PrivateTmp=true 17 | Restart=on-failure 18 | RestartSec=5 19 | 20 | [Install] 21 | WantedBy=multi-user.target 22 | -------------------------------------------------------------------------------- /packages/st2/rpm/st2api.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StackStorm service st2api 3 | After=network.target st2api.socket 4 | Requires=st2api.socket 5 | 6 | [Service] 7 | Type=simple 8 | User=st2 9 | Group=st2 10 | Environment="DAEMON_ARGS=-k eventlet -b 127.0.0.1:9101 --workers 1 --threads 1 --graceful-timeout 10 --timeout 30 --log-config /etc/st2/logging.api.gunicorn.conf --error-logfile /var/log/st2/st2api.log" 11 | EnvironmentFile=-/etc/sysconfig/st2api 12 | ExecStart=/opt/stackstorm/st2/bin/gunicorn st2api.wsgi:application $DAEMON_ARGS 13 | TimeoutSec=60 14 | PrivateTmp=true 15 | Restart=on-failure 16 | RestartSec=5 17 | 18 | [Install] 19 | WantedBy=multi-user.target 20 | -------------------------------------------------------------------------------- /packages/st2/rpm/st2auth.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StackStorm service st2auth 3 | After=network.target st2auth.socket 4 | Requires=st2auth.socket 5 | 6 | [Service] 7 | Type=simple 8 | User=st2 9 | Group=st2 10 | Environment="DAEMON_ARGS=-k eventlet -b 127.0.0.1:9100 --workers 1 --threads 1 --graceful-timeout 10 --timeout 30 --log-config /etc/st2/logging.auth.gunicorn.conf --error-logfile /var/log/st2/st2auth.log" 11 | EnvironmentFile=-/etc/sysconfig/st2auth 12 | ExecStart=/opt/stackstorm/st2/bin/gunicorn st2auth.wsgi:application $DAEMON_ARGS 13 | TimeoutSec=60 14 | PrivateTmp=true 15 | Restart=on-failure 16 | RestartSec=5 17 | 18 | [Install] 19 | WantedBy=multi-user.target 20 | -------------------------------------------------------------------------------- /packages/st2/rpm/st2garbagecollector.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StackStorm service st2garbagecollector 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | User=st2 8 | Group=st2 9 | Environment="DAEMON_ARGS=--config-file /etc/st2/st2.conf" 10 | EnvironmentFile=-/etc/sysconfig/st2garbagecollector 11 | ExecStart=/opt/stackstorm/st2/bin/st2garbagecollector $DAEMON_ARGS 12 | TimeoutSec=60 13 | PrivateTmp=true 14 | Restart=on-failure 15 | RestartSec=5 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /packages/st2/rpm/st2notifier.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StackStorm service st2notifier 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | User=st2 8 | Group=st2 9 | Environment="DAEMON_ARGS=--config-file /etc/st2/st2.conf" 10 | EnvironmentFile=-/etc/sysconfig/st2notifier 11 | ExecStart=/opt/stackstorm/st2/bin/st2notifier $DAEMON_ARGS 12 | TimeoutSec=60 13 | PrivateTmp=true 14 | Restart=on-failure 15 | RestartSec=5 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /packages/st2/rpm/st2rulesengine.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StackStorm service st2rulesengine 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | User=st2 8 | Group=st2 9 | Environment="DAEMON_ARGS=--config-file /etc/st2/st2.conf" 10 | EnvironmentFile=-/etc/sysconfig/st2rulesengine 11 | ExecStart=/opt/stackstorm/st2/bin/st2rulesengine $DAEMON_ARGS 12 | TimeoutSec=60 13 | PrivateTmp=true 14 | Restart=on-failure 15 | RestartSec=5 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /packages/st2/rpm/st2scheduler.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StackStorm service st2scheduler 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | User=st2 8 | Group=st2 9 | Environment="DAEMON_ARGS=--config-file /etc/st2/st2.conf" 10 | EnvironmentFile=-/etc/sysconfig/st2scheduler 11 | ExecStart=/opt/stackstorm/st2/bin/st2scheduler $DAEMON_ARGS 12 | TimeoutSec=60 13 | PrivateTmp=true 14 | Restart=on-failure 15 | RestartSec=5 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /packages/st2/rpm/st2sensorcontainer.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StackStorm service st2sensorcontainer 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | User=st2 8 | Group=st2 9 | Environment="DAEMON_ARGS=--config-file /etc/st2/st2.conf" 10 | EnvironmentFile=-/etc/sysconfig/st2sensorcontainer 11 | ExecStart=/opt/stackstorm/st2/bin/st2sensorcontainer $DAEMON_ARGS 12 | TimeoutSec=60 13 | PrivateTmp=true 14 | Restart=on-failure 15 | RestartSec=5 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /packages/st2/rpm/st2stream.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StackStorm service st2stream 3 | After=network.target st2stream.socket 4 | Requires=st2stream.socket 5 | 6 | [Service] 7 | Type=simple 8 | User=st2 9 | Group=st2 10 | Environment="DAEMON_ARGS=-k eventlet -b 127.0.0.1:9102 --workers 1 --threads 10 --graceful-timeout 10 --timeout 30 --log-config /etc/st2/logging.stream.gunicorn.conf --error-logfile /var/log/st2/st2stream.log" 11 | EnvironmentFile=-/etc/sysconfig/st2stream 12 | ExecStart=/opt/stackstorm/st2/bin/gunicorn st2stream.wsgi:application $DAEMON_ARGS 13 | TimeoutSec=60 14 | PrivateTmp=true 15 | Restart=on-failure 16 | RestartSec=5 17 | 18 | [Install] 19 | WantedBy=multi-user.target 20 | -------------------------------------------------------------------------------- /packages/st2/rpm/st2timersengine.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StackStorm service st2timersengine 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | User=st2 8 | Group=st2 9 | Environment="DAEMON_ARGS=--config-file /etc/st2/st2.conf" 10 | EnvironmentFile=-/etc/sysconfig/st2timersengine 11 | ExecStart=/opt/stackstorm/st2/bin/st2timersengine $DAEMON_ARGS 12 | TimeoutSec=60 13 | PrivateTmp=true 14 | Restart=on-failure 15 | RestartSec=5 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /packages/st2/rpm/st2workflowengine.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StackStorm service st2workflowengine 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | User=st2 8 | Group=st2 9 | Environment="DAEMON_ARGS=--config-file /etc/st2/st2.conf" 10 | EnvironmentFile=-/etc/sysconfig/st2workflowengine 11 | ExecStart=/opt/stackstorm/st2/bin/st2workflowengine $DAEMON_ARGS 12 | TimeoutSec=60 13 | PrivateTmp=true 14 | Restart=on-failure 15 | RestartSec=5 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /packages/st2/setup.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright 2019 Extreme Networks, Inc. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | from __future__ import absolute_import 17 | import os.path 18 | 19 | from setuptools import find_packages 20 | from distutils.core import setup 21 | 22 | from dist_utils import fetch_requirements 23 | from dist_utils import apply_vagrant_workaround 24 | from st2 import __version__ 25 | 26 | ST2_COMPONENT = 'st2' 27 | BASE_DIR = os.path.dirname(os.path.abspath(__file__)) 28 | REQUIREMENTS_FILE = os.path.join(BASE_DIR, 'requirements.txt') 29 | 30 | install_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE) 31 | 32 | apply_vagrant_workaround() 33 | setup( 34 | name=ST2_COMPONENT, 35 | version=__version__, 36 | description='{} component'.format(ST2_COMPONENT), 37 | author='StackStorm', 38 | author_email='info@stackstorm.com', 39 | license='Apache License (2.0)', 40 | url='https://stackstorm.com/', 41 | install_requires=install_reqs, 42 | dependency_links=dep_links, 43 | test_suite=ST2_COMPONENT, 44 | zip_safe=False, 45 | include_package_data=True, 46 | setup_requires=['wheel'], 47 | packages=find_packages(exclude=['setuptools', 'tests']) 48 | ) 49 | -------------------------------------------------------------------------------- /packages/st2/st2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StackStorm/st2-packages/d4d2d8dfdf1c88412e5d58635adb87da9c671952/packages/st2/st2/__init__.py -------------------------------------------------------------------------------- /rake/build/environment.rb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | require 'hashie' 3 | require 'resolv' 4 | 5 | ## Build pipeline environment configuration file 6 | # --------------------------------------------- 7 | 8 | # St2 components which are part of the bundle package 9 | ST2_COMPONENTS = %w( 10 | st2api st2stream st2actions st2common 11 | st2auth st2client 12 | st2reactor 13 | st2tests) 14 | 15 | # Default list of packages to build 16 | BUILDLIST = 'st2' 17 | 18 | ## Helper procs 19 | convert_to_ipaddr = ->(v) {(v !~ Resolv::AddressRegex) ? Resolv.getaddress(v) : v} 20 | convert_to_int = ->(v) {v.to_i} 21 | convert_to_array = ->(a) do 22 | if a.is_a? Array 23 | a 24 | else 25 | list = a.to_s.split(' ') 26 | list.empty? ? [] : list 27 | end 28 | end 29 | 30 | pipeopts do 31 | ssh_options({ 32 | keys: %w(/root/.ssh/busybee), 33 | auth_methods: %w(publickey) 34 | }) 35 | 36 | ## Attributes 37 | # buildnode - hostname or address of remote build node (where build is run) 38 | # testnode - hostname or address of remote test node (where tests are run) 39 | # package_list - a space separated list of packages to built, overrides BUILDLIST 40 | # 41 | env :buildnode 42 | env :testnode 43 | env :packages, BUILDLIST, from: 'ST2_PACKAGES', proc: convert_to_array 44 | env :package_list, BUILDLIST, from: 'ST2_PACKAGES' 45 | 46 | ## Envpass attributes 47 | # are fetch from environment variables, however they are also made 48 | # visible to remote nodes. 49 | # 50 | # basedir - base directory (intermediate files are copied there) 51 | # artifact_directory - directory on the main node where artifacts are copied 52 | # wheeldir - directory where wheels are prefetched (cache directory) 53 | envpass :basedir, '/root' 54 | envpass :debug_level, 1, proc: convert_to_int 55 | envpass :artifact_dir, '/root/build' 56 | envpass :wheeldir, '/tmp/wheelhouse' 57 | 58 | # Default hostnames of dependat services (the value can take an address also) 59 | envpass :rabbitmqhost, 'rabbitmq', proc: convert_to_ipaddr 60 | envpass :mongodbhost, 'mongodb', proc: convert_to_ipaddr 61 | envpass :redishost, 'redis', proc: convert_to_ipaddr 62 | 63 | # upload_sources - a list of directories which should be propogated 64 | # to remote nodes. 65 | upload_sources 'packages', 'scripts', 'rpmspec' 66 | end 67 | 68 | pipeopts 'st2' do 69 | env :components, ST2_COMPONENTS, proc: convert_to_array 70 | envpass :checkout, 1, from: 'ST2_CHECKOUT', proc: convert_to_int 71 | envpass :giturl, 'https://github.com/StackStorm/st2', from: 'ST2_GITURL' 72 | envpass :gitrev, 'master', from: 'ST2_GITREV' 73 | envpass :gitdir, make_tmpname('st2-'), from: 'ST2_GITDIR' 74 | envpass :st2pkg_version 75 | envpass :st2pkg_release, 1 76 | envpass :st2_circle_url 77 | end 78 | -------------------------------------------------------------------------------- /rake/build/package_st2.rake: -------------------------------------------------------------------------------- 1 | namespace :package do 2 | 3 | ## Create wheels for components and write them to the wheelhouse directory. 4 | # 5 | task :prebuild_st2 do 6 | pipeline 'st2' do 7 | run hostname: opts[:buildnode] do |opts| 8 | command show_uuid: false 9 | with opts.env do 10 | opts.components.each do |component| 11 | within ::File.join(opts.gitdir, component) do 12 | make :info, label: "make info" 13 | make :bdist_wheel, label: "bdist: #{component}" 14 | end 15 | end 16 | within ::File.join(opts.gitdir, 'st2') do 17 | make :wheelhouse, label: 'wheelhouse: st2' 18 | end 19 | end 20 | end 21 | end 22 | end 23 | 24 | ## Prepare st2 bundle package to be built 25 | # 26 | task :post_checkout_st2 do 27 | pipeline 'st2' do 28 | run hostname: opts[:buildnode] do |opts| 29 | command show_uuid: false, label: "checkout: update st2" 30 | with opts.env do 31 | # Update gitdir with rpmspecs and st2 packagedir 32 | within opts.basedir do 33 | execute :cp, '-r rpmspec/ packages/st2/ $GITDIR' 34 | opts.components.each do |component| 35 | execute :cp, "packages/st2/component.makefile ${GITDIR}/#{component}/Makefile" 36 | end 37 | execute :bash, '$GITDIR/scripts/populate-package-meta.sh' 38 | end 39 | end 40 | end 41 | end 42 | end 43 | 44 | ## Build st2 bundle package 45 | # 46 | task :st2 do 47 | pipeline 'st2' do 48 | run hostname: opts[:buildnode] do |opts| 49 | command label: 'package: st2', show_uuid: false 50 | with opts.env do 51 | within ::File.join(opts.gitdir, 'st2') do 52 | make :changelog 53 | execute :bash, '$BASEDIR/scripts/build_os_package.sh st2' 54 | end 55 | end 56 | end 57 | end 58 | end 59 | 60 | end 61 | -------------------------------------------------------------------------------- /rake/build/setup.rake: -------------------------------------------------------------------------------- 1 | # Depends on SPECS, so the code bellow just makes it work. 2 | # 3 | # 4 | 5 | namespace :setup do 6 | task :all => [:install_artifacts, :configure] 7 | 8 | task :install_artifacts => ['upload:to_testnode'] do 9 | pipeline do 10 | run hostname: opts[:testnode] do |opts| 11 | with opts.env do 12 | within opts.artifact_dir do 13 | execute :bash, "$BASEDIR/scripts/install_os_packages.sh #{opts[:package_list]}" 14 | end 15 | end 16 | end 17 | end 18 | end 19 | 20 | task :configure do 21 | pipeline do 22 | run hostname: opts[:testnode] do |opts| 23 | with opts.env do 24 | if opts.packages.include? 'st2' 25 | execute :bash, "$BASEDIR/scripts/generate_st2_config.sh" 26 | end 27 | end 28 | end 29 | end 30 | end 31 | end 32 | -------------------------------------------------------------------------------- /rake/build/upload_checkout.rake: -------------------------------------------------------------------------------- 1 | # Upload and checkout tasks 2 | # 3 | 4 | namespace :upload do 5 | 6 | ## Rule generates upload_to_* tasks (upload to remote nodes). 7 | # 8 | rule %r/^upload:to_/ do |task| 9 | nodename = task.short_name.sub(/^to_/, '') 10 | Rake::Task['upload:sources'].invoke(nodename) 11 | end 12 | 13 | ## Multitask which depends on parameterized upload_sources_* tasks. 14 | # Dependents are evaluated based on pipeopts.upload_sources list. 15 | # 16 | source_tasks = pipeopts.upload_sources.map {|s| :"%sources_#{s}" } 17 | multitask :sources, [:nodename] => source_tasks do |task| 18 | task.reenable 19 | end 20 | 21 | ## Rule generates %sources_* tasks. 22 | # Uploads particular source to a remote node passed as argument. 23 | # 24 | rule %r/^%sources_/, [:nodename] do |task, args| 25 | # Task is restartable, since it's can be invoked with different arguments. 26 | task.reenable 27 | source_path = task.short_name.sub(/^%sources_/, '') 28 | host = pipeopts.send(args[:nodename]).to_s 29 | # Perform only if remote node hostname is provided 30 | unless host.empty? 31 | pipeline do 32 | run hostname: host do |opts| 33 | upload! source_path, opts[:basedir], recursive: true 34 | end 35 | end 36 | end 37 | end 38 | 39 | ## Multitask checks out git source of a package (if pipopts.checkout == true) 40 | # 41 | package_list = pipeopts.packages.select {|p| defined?(pipeopts(p).checkout)} 42 | multitask :checkout => package_list.map {|p| :"%checkout_#{p}"} 43 | 44 | ## Rule generates %checkout_* tasks. 45 | # These tasks checkout required git sources. 46 | # 47 | rule %r/^%checkout_/ do |task| 48 | package = context = task.short_name.sub(/^%checkout_/, '') 49 | pipeline context do 50 | run hostname: opts[:buildnode] do |opts| 51 | command label: "checkout: #{package}", show_uuid: false 52 | with opts.env do 53 | execute :mkdir, '-p $ARTIFACT_DIR' 54 | within opts.basedir do 55 | if opts.checkout == 1 56 | execute :git, :clone, '--depth 1 -b $GITREV $GITURL $GITDIR' 57 | end 58 | end 59 | end 60 | end 61 | end 62 | # Invoke post checkout task if it's defined 63 | post_checkout = "package:post_checkout_#{package}" 64 | if Rake::Task.task_defined?(post_checkout) 65 | Rake::Task[post_checkout].invoke 66 | end 67 | end 68 | 69 | end 70 | -------------------------------------------------------------------------------- /rake/formatter.rb: -------------------------------------------------------------------------------- 1 | require 'thread' 2 | require './rake/shellout' 3 | 4 | module SSHKit 5 | module Formatter 6 | class ShellOut < Abstract 7 | HEADERS_LIST = [:label, :uuid, :debug].freeze 8 | HEADERS_COLORS = Hash[ HEADERS_LIST.zip([ 9 | :cyan, :magenta, :light_black 10 | ])].freeze 11 | 12 | Dispatcher = ::ShellOut 13 | attr_accessor :header_spacing, :command_spacing 14 | 15 | def initialize(output) 16 | super 17 | @header_spacing = 2 18 | @command_spacing = 4 19 | Dispatcher.add_header_proc(:info) do |lines, data| 20 | spaces = header_spacing 21 | inject_headers(data[:command], lines, spaces) 22 | end 23 | Dispatcher.add_header_proc(:command) do |lines, data| 24 | spaces = header_spacing + command_spacing 25 | inject_headers(data[:command], lines, spaces) 26 | end 27 | end 28 | 29 | def log_command_data(command, stream_type, stream_data) 30 | if [:stdout, :stderr].none? {|t| t == stream_type} 31 | raise "Unrecognised stream_type #{stream_type}, expected :stdout or :stderr" 32 | end 33 | unless command.finished? 34 | message, _, trailing = stream_data.rpartition("\n") 35 | write_command(command, message, trailing) 36 | end 37 | end 38 | 39 | def log_command_start(command) 40 | if command.options[:show_start_message] 41 | host_prefix = command.host.user ? "as #{colorize(command.host.user, :blue)}@" : 'on ' 42 | message = "Running #{colorize(command, :yellow, :bold)} #{host_prefix}#{colorize(command.host, :blue)}" 43 | write_message(command, message) 44 | end 45 | end 46 | 47 | def log_command_exit(command) 48 | if command.options[:show_exit_status] 49 | runtime = sprintf('%5.3f seconds', command.runtime) 50 | successful_or_failed = command.failure? ? colorize('failed', :red, :bold) : colorize('successful', :green, :bold) 51 | message = "Finished in #{runtime} with exit status #{command.exit_status} (#{successful_or_failed})." 52 | write_message(command, message) 53 | end 54 | end 55 | 56 | def write(_obj) 57 | # Nothing, nothing to do 58 | end 59 | 60 | private 61 | 62 | def logger(verbosity) 63 | verbosity.is_a?(Integer) ? verbosity : Logger.const_get(verbosity.upcase) 64 | end 65 | 66 | def write_message(command, message, trailing=nil) 67 | Dispatcher.update_data(command.uuid, {command: command}) 68 | Dispatcher << Dispatcher.message(message, trailing, 69 | id: command.uuid, 70 | header: :info) 71 | end 72 | 73 | def write_command(command, message, trailing=nil) 74 | Dispatcher.update_data(command.uuid, {command: command}) 75 | Dispatcher << Dispatcher.message(message, trailing, 76 | id: command.uuid, 77 | header: :command) 78 | end 79 | 80 | # Inject headers into each line of text 81 | def inject_headers(command, lines, spaces=2) 82 | headers = command_headers(command) 83 | spacing = headers.empty? ? 0 : spaces 84 | fullheader = ("%s%#{spacing}s") % [headers, ''] 85 | lines.gsub(/^/m, fullheader) 86 | end 87 | 88 | def command_headers(command) 89 | ShellOut::HEADERS_LIST.inject('') do |result, k| 90 | color = command.failure? ? [:red, :bold] : Array(ShellOut::HEADERS_COLORS[k]) 91 | header = send(:"header_#{k}", command) if respond_to?(:"header_#{k}", true) 92 | header ? result << '[%s] [%s]' % [colorize(header, *color), Time.at(Time.new).utc.strftime("%H:%M:%S")] : result 93 | end 94 | end 95 | 96 | def header_label(command) 97 | command.options[:label] and command.options[:label].to_s 98 | end 99 | 100 | def header_uuid(command) 101 | command.options[:show_uuid] and command.uuid and command.uuid.to_s 102 | end 103 | 104 | def header_debug(command) 105 | if logger(command.verbosity || Remote.output_verbosity) == Logger::DEBUG 106 | 'DEBUG' 107 | end 108 | end 109 | 110 | end 111 | end 112 | end 113 | -------------------------------------------------------------------------------- /rake/pipeline.rb: -------------------------------------------------------------------------------- 1 | require 'hashie' 2 | require 'logger' 3 | require 'rake' 4 | require './rake/pipeline_options' 5 | require './rake/remote' 6 | 7 | module Pipeline 8 | module Rake 9 | module TaskDSL 10 | def short_name 11 | name.split(':', 2).pop 12 | end 13 | end 14 | end 15 | end 16 | 17 | module Pipeline 18 | include Pipeline::Options 19 | 20 | # Invoke our sshkit remote wrapper with merged options 21 | def pipeline(context_name=nil, &block) 22 | if block 23 | options = context.fetch 24 | current_options = context.fetch(context_name) 25 | options.merge!(current_options) unless context_name.nil? 26 | ssh_options = _ssh_options(options.ssh_options) 27 | sshkit_wrapper(options, ssh_options).instance_exec(&block) 28 | end 29 | end 30 | 31 | def self.included(includer) 32 | ::Rake::Task.send(:include, Rake::TaskDSL) 33 | end 34 | 35 | private 36 | 37 | # somehow bothering method missing, using dash 38 | def _ssh_options(mash) 39 | (mash || {}).inject({}) {|hash, (k, v)| hash[k.to_sym] = v; hash} 40 | end 41 | 42 | # Get our sshkit wrapper (sshkit methods are accessible in ssh method DSL) 43 | def sshkit_wrapper(options, ssh_options) 44 | Remote.new(options, ssh_options).tap do 45 | Remote.output_verbosity = logger(options.debug_level || :info) 46 | end 47 | end 48 | 49 | def logger(verbosity) 50 | if verbosity.is_a?(String) && verbosity.match(/^\d/) 51 | verbosity = verbosity.to_i 52 | end 53 | verbosity.is_a?(Integer) ? verbosity : Logger.const_get(verbosity.upcase) 54 | end 55 | end 56 | 57 | include Pipeline 58 | -------------------------------------------------------------------------------- /rake/pipeline_options.rb: -------------------------------------------------------------------------------- 1 | require 'tempfile' 2 | 3 | module Pipeline 4 | module Options 5 | 6 | # This class implements merged mash context. 7 | # Level 1 (named) context merged with level 0 (global). 8 | # 9 | class MergedContext 10 | def initialize 11 | @global ||= Hashie::Mash.new 12 | @named_context ||= Hash.new {|h, k| h[k] = Hashie::Mash.new} 13 | end 14 | 15 | # Lookup value in the MergedContext. 16 | # First try to fetch value from level 1 named context, then fallback 17 | # to level 0. 18 | def lookup_value(key, context=nil) 19 | [named_context[context] || {}, global].map {|c| c[key]}.compact.first 20 | end 21 | 22 | # Get global or named context 23 | def fetch(context=nil) 24 | context.nil? ? global : named_context[context] 25 | end 26 | 27 | private 28 | attr_reader :global, :named_context 29 | end 30 | 31 | # DSL to build up merged context from environment variables. 32 | class EnvironmentDSL 33 | def initialize(context_name, merged_context) 34 | @context_name = context_name 35 | @context = merged_context 36 | @current = merged_context.fetch(context_name) 37 | end 38 | 39 | # Sets option from env 40 | def env(attribute, default=nil, opts={}) 41 | _, value, opts = parse_attribute(attribute, default, opts) 42 | convert_value(value, opts).tap do |v| 43 | current.assign_property(attribute, v) if v 44 | end 45 | end 46 | 47 | # Sets option from env as well as corresponding options env[attribute] 48 | def envpass(attribute, default=nil, opts={}) 49 | varname, value, opts = parse_attribute(attribute, default, opts) 50 | convert_value(value, opts).tap do |v| 51 | if v 52 | current.assign_property(attribute, v) 53 | current[:env] ||= Hashie::Mash.new 54 | current[:env].merge!({varname => v.to_s}) 55 | end 56 | end 57 | end 58 | 59 | def pipeopts(context_name=nil) 60 | context.fetch(context_name) 61 | end 62 | 63 | def make_tmpname(basename='', tmpdir=Dir.tmpdir) 64 | Dir::Tmpname.make_tmpname File.join(tmpdir, basename), nil 65 | end 66 | 67 | def method_missing(method_name, *args) 68 | if args.size == 0 69 | # act as getter ONLY IF variable was already assigned 70 | value = context.lookup_value(method_name, context_name) 71 | return value unless value.nil? 72 | end 73 | # Otherwise we act as setter (even if args.size == 0) 74 | value = args.size < 2 ? args.pop : args 75 | current.assign_property(method_name, value) 76 | value 77 | end 78 | 79 | private 80 | attr_reader :context_name, :context, :current 81 | 82 | 83 | # Parse attribute read configuration, to build up [varname, value, opts]. 84 | # During parsing we fetch values from ENV. 85 | def parse_attribute(attribute, default, opts) 86 | opts, default = default, nil if default.is_a?(Hash) 87 | # Merge-in default opts! 88 | opts = {upcase: true}.merge!(opts) 89 | caseattr = opts[:upcase] == true ? attribute.to_s.upcase : attribute.to_s 90 | value = ENV[opts[:from] || caseattr] 91 | if opts[:reset] 92 | # don't parse env variable, just set value 93 | [caseattr, default, opts] 94 | elsif value == "" 95 | # use default, when passed env is empty 96 | [caseattr, default, opts] 97 | else 98 | [caseattr, value || default, opts] 99 | end 100 | end 101 | 102 | # Convert value if proc option is provided. 103 | def convert_value(value, opts) 104 | if opts[:proc].is_a? Proc 105 | opts[:proc].(value) 106 | else 107 | value 108 | end 109 | end 110 | end 111 | 112 | # Store options into mash either global or context specific. 113 | def pipeopts(context_name=nil, &block) 114 | unless block.nil? 115 | # evaluate pipopts DSL 116 | EnvironmentDSL.new(context_name, context).instance_exec(&block) 117 | else 118 | # return current context 119 | context.fetch(context_name) 120 | end 121 | end 122 | 123 | private 124 | 125 | def context 126 | @context ||= MergedContext.new 127 | end 128 | 129 | end 130 | end 131 | -------------------------------------------------------------------------------- /rake/remote.rb: -------------------------------------------------------------------------------- 1 | require 'sshkit' 2 | require 'forwardable' 3 | require './rake/formatter' 4 | require './rake/shellout' 5 | 6 | class Remote 7 | # SSHkit mimic backend DSL with a bit reduced interface, 8 | # but also with a few enhancments. 9 | class SSHKitDSL 10 | extend Forwardable 11 | attr_reader :backend, :default_command_options 12 | 13 | # Methods which are passed as is 14 | def_delegators :backend, :as, :with, :within, :upload!, :download!, :debug, :info, :warn, :error, :fatal 15 | 16 | # Customized delegated methods, they automatically inject 17 | # options into the call argument list. 18 | [:test, :capture, :execute].each do |method_name| 19 | define_method method_name do |*args| 20 | command_options = args.extract_options!.merge!(command) 21 | begin 22 | backend.send(method_name, *args, command_options) 23 | rescue SSHKit::Command::Failed 24 | ShellOut.flush 25 | Thread.main.send(:raise, SystemExit.new(false)) 26 | end 27 | end 28 | end 29 | 30 | def make(commands=[], options={}) 31 | execute :make, commands, options 32 | end 33 | 34 | def rake(commands=[], options={}) 35 | execute :rake, commands, options 36 | end 37 | 38 | def initialize(backend) 39 | @backend, @command = backend, default_command_options 40 | end 41 | 42 | def default_command_options 43 | @default_command_options ||= { 44 | show_exit_status: true, 45 | show_start_message: true, 46 | show_uuid: true 47 | } 48 | end 49 | 50 | # Command options settings passed through to sshkit DSL methods 51 | def command(*args) 52 | # Extract_options method pacthes array to return first hash 53 | # (patched in sshkit) 54 | command_options = args.extract_options! 55 | command_options.empty? ? @command : @command = default_command_options.merge(command_options) 56 | end 57 | end 58 | 59 | attr_reader :options, :ssh_options 60 | alias :opts :options 61 | 62 | # Cache for sshkit backends 63 | @@backend_cache = {} 64 | 65 | def initialize(options, ssh_options={}) 66 | @options, @ssh_options = options, ssh_options 67 | end 68 | 69 | # Perform operation on a remote node in SSHKitDSL wrapper 70 | def run(host_arg_or_hash, &block) 71 | wrapper = SSHKitDSL.new(fetch_backend(host_arg_or_hash)) 72 | wrapper.instance_exec(options, &block) if block 73 | end 74 | 75 | class << self 76 | extend Forwardable 77 | def_delegators :'SSHKit.config', :output_verbosity, :output_verbosity= 78 | end 79 | 80 | private 81 | 82 | # Retrieve backed for a given host from cache 83 | def fetch_backend(host_arg_or_hash) 84 | host = SSHKit::Host.new(host_arg_or_hash) 85 | if @@backend_cache[host] 86 | @@backend_cache[host] 87 | else 88 | SSHKit.config.use_format(:shellout) 89 | host.ssh_options = ssh_options 90 | host.user ||= 'root' # figure this out, current user? 91 | klass = host.local? ? :Local : :Netssh 92 | @@backend_cache[host] = SSHKit::Backend.const_get(klass).new(host) 93 | end 94 | end 95 | end 96 | -------------------------------------------------------------------------------- /rake/shellout.rb: -------------------------------------------------------------------------------- 1 | require 'thread' 2 | require 'ostruct' 3 | 4 | class ShellOut 5 | # 6 | # It respects ege case when a bunch of lines should be written out 7 | # however there's a trailing line which shouldn't be messed up! 8 | # 9 | class Writer 10 | attr_reader :id 11 | 12 | def initialize(id) 13 | @id = id 14 | @trailing = nil 15 | end 16 | 17 | def trailing=(value) 18 | unless value.to_s.empty? 19 | @trailing = value 20 | end 21 | end 22 | 23 | def trailing 24 | @trailing.to_s.empty? ? nil : @trailing 25 | end 26 | 27 | def trailing! 28 | trailing.dup.tap { @trailing = nil } 29 | end 30 | 31 | # Indicates that a writer hasn't yet flushed its trailing message 32 | def pending? 33 | !trailing.nil? 34 | end 35 | end 36 | 37 | class << self 38 | attr_reader :thread 39 | 40 | def update_data(id, hash) 41 | if hash.is_a?(Hash) && !hash.empty? 42 | userdata[id] = userdata[id].merge(hash) 43 | else 44 | {} 45 | end 46 | end 47 | 48 | def output=(new_value) 49 | @output = new_value 50 | end 51 | 52 | def output 53 | @output ||= $stdout 54 | end 55 | 56 | def header_procs 57 | @header_procs ||= Hash.new 58 | end 59 | 60 | def add_header_proc(procid, &header_proc) 61 | header_procs[procid] = header_proc if header_proc 62 | end 63 | 64 | # Creates ShellOut message 65 | def message(message, trailing=nil, options={}) 66 | raise ArgumentError, "#message requires options[:id]" unless options[:id] 67 | OpenStruct.new( id: options[:id], 68 | message: message, 69 | trailing: trailing, 70 | options: options 71 | ) 72 | end 73 | 74 | # Enqueue messgage to ShellOut 75 | def <<(obj) 76 | if [:id, :message, :trailing].all? {|m| obj.respond_to?(m)} 77 | enqueue_message(obj) 78 | else 79 | raise ArgumentError, "#<< method requires argument responding to :id, :message, :trailing" 80 | end 81 | end 82 | 83 | def stop 84 | flush 85 | thread.exit 86 | end 87 | 88 | # Flush the output queue 89 | def flush 90 | Thread.exclusive do 91 | while !output_queue.empty? do 92 | write_message(output_queue.pop) 93 | end 94 | end 95 | end 96 | 97 | # Consume ShellOut output queue 98 | def run 99 | start_processing 100 | at_exit { ShellOut.stop } 101 | end 102 | 103 | private 104 | 105 | attr_reader :thread 106 | 107 | def start_processing 108 | return if @thread 109 | @thread = Thread.new do 110 | while true do 111 | write_message(output_queue.pop) 112 | end 113 | end 114 | end 115 | 116 | # Custom user data, for holding various user information 117 | def userdata 118 | @userdata ||= Hash.new({}) 119 | end 120 | 121 | def semaphore 122 | @semaphore ||= Mutex.new 123 | end 124 | 125 | # Enqueues a new message obj into the ShellOut queue 126 | def enqueue_message(obj) 127 | output_queue << obj 128 | end 129 | 130 | # Write trailing message returns a new message with 131 | # a clean out continuation. 132 | def write_trailing_message(obj) 133 | if writer_for(obj).pending? 134 | continuation, new_message = obj.message.split("\n", 2) 135 | trailing = writer_for(obj).trailing! 136 | output << "%s%s\n" % [trailing, continuation] 137 | new_message 138 | else 139 | obj.message 140 | end 141 | end 142 | 143 | # Atomic message output, we synchronize parallel threads 144 | def write_message(obj) 145 | semaphore.synchronize do 146 | new_message = write_trailing_message(obj) 147 | if new_message 148 | obj = obj.dup 149 | obj.message = new_message.chomp 150 | inject_headers!(obj) 151 | writer_for(obj).trailing = obj.trailing 152 | output << obj.message + "\n" 153 | output.flush 154 | end 155 | end 156 | end 157 | 158 | # Invoke header proc over a message object. 159 | def inject_headers!(obj) 160 | procid = obj[:options][:header] 161 | if procid && header_proc = header_procs[procid] 162 | data = userdata[obj.id] 163 | obj.trailing, trailing = nil, obj.trailing 164 | obj.message = header_proc.call(obj.message, data) 165 | obj.trailing = header_proc.call(trailing, data) if !trailing.to_s.empty? 166 | end 167 | end 168 | 169 | def output_queue 170 | @output_queue ||= Queue.new 171 | end 172 | 173 | def writer_for(obj) 174 | writers[obj.id] 175 | end 176 | 177 | def writers 178 | @writers ||= Hash.new {|hash, id| hash[id] = Writer.new(id)} 179 | end 180 | end 181 | end 182 | 183 | 184 | # We have to run and finalize threaded output dispatcher. 185 | # Otherwise we won't see any output :) 186 | ShellOut.run 187 | -------------------------------------------------------------------------------- /rake/spec/default/10-package_st2-consistency_spec.rb: -------------------------------------------------------------------------------- 1 | require 'spec_helper' 2 | require 'spec_package_iterables' 3 | 4 | # OS package shared example group 5 | # 6 | shared_examples 'os package' do |name, _opts| 7 | extend SpecPackageIterables 8 | set_context_vars(name, _opts) 9 | 10 | describe package(name) do 11 | it { is_expected.to be_installed } 12 | end 13 | 14 | # Check for presence of users 15 | # 16 | context 'users' do 17 | set_context_vars(name, _opts) 18 | 19 | get_users do |u, opts| 20 | describe user(u) do 21 | it { is_expected.to exist } 22 | it { is_expected.to instance_eval(&opts[:example])} if opts[:example] 23 | end 24 | end 25 | end 26 | 27 | context 'files' do 28 | set_context_vars(name, _opts) 29 | 30 | # Check for presences of directories 31 | # 32 | get_directories do |path, opts| 33 | describe file(path) do 34 | it { is_expected.to be_directory } 35 | it { is_expected.to instance_eval(&opts[:example])} if opts[:example] 36 | end 37 | end 38 | 39 | # Check files 40 | # 41 | get_files do |path, opts| 42 | describe file(path) do 43 | it { is_expected.to be_file } 44 | it { is_expected.to instance_eval(&opts[:example])} if opts[:example] 45 | end 46 | end 47 | 48 | # Check binaries 49 | # 50 | get_binaries do |bin_name, opts| 51 | unless bin_name.start_with? '/' 52 | prefix = File.join(spec[:bin_prefix], '') 53 | end 54 | 55 | describe file("#{prefix}#{bin_name}") do 56 | it_behaves_like 'script or binary' 57 | it { is_expected.to instance_eval(&opts[:example])} if opts[:example] 58 | end 59 | end 60 | end 61 | 62 | context 'services' do 63 | set_context_vars(name, _opts) 64 | 65 | # Check services 66 | # 67 | get_services do |service_name, opts| 68 | binary_name = opts[:binary_name] || service_name 69 | describe file("/opt/stackstorm/#{venv_name}/bin/#{binary_name}") do 70 | it_behaves_like 'script or binary' 71 | end 72 | end 73 | end 74 | end 75 | 76 | # Main example group checking package consistency 77 | # 78 | describe 'packages consistency' do 79 | spec[:package_list].each do |pkg_name| 80 | it_behaves_like 'os package', pkg_name, spec[:package_opts][pkg_name] 81 | end 82 | end 83 | -------------------------------------------------------------------------------- /rake/spec/default/10-package_st2-logfiles_spec.rb: -------------------------------------------------------------------------------- 1 | require 'spec_helper' 2 | 3 | module LogHelpers 4 | # Get package name of particular st2 service 5 | def package_name(svc_name) 6 | found = spec[:package_has_services].find do |(_, list)| 7 | list.include? svc_name 8 | end 9 | found ? found.first : svc_name 10 | end 11 | 12 | # Get config path of st2 service 13 | def config_path(svc_name) 14 | # strip st2 prefix 15 | noprefix_name = svc_name.sub(/^st2/, '') 16 | config_name = ['logging', noprefix_name, 'conf'].compact.join('.') 17 | File.join([spec[:conf_dir], config_name]) 18 | end 19 | 20 | # Get log destination regex list 21 | def dest_re_list(svc_name) 22 | pattern = spec[:logdest_pattern][svc_name] || svc_name 23 | [ 24 | /#{File.join(spec[:log_dir], pattern)}.log/, 25 | /#{File.join(spec[:log_dir], pattern)}.audit.log/ 26 | ] 27 | end 28 | end 29 | 30 | # Checking log configuration file if it has the correct output destination 31 | # 32 | describe 'logs configuration' do 33 | extend LogHelpers 34 | 35 | spec[:st2_services].each do |svc_name| 36 | # Don't test logging configuration for a service if its package is not installed 37 | next unless spec[:package_list].include?(package_name(svc_name)) 38 | 39 | describe file(config_path(svc_name)) do 40 | let(:content) { described_class.content } 41 | re_list = dest_re_list(svc_name) 42 | 43 | it { is_expected.to be_file } 44 | it "should match #{re_list.map(&:inspect).join(', ')}" do 45 | re_list.each { |re| expect(content.match(re)).not_to be_nil } 46 | end 47 | end 48 | end 49 | end 50 | -------------------------------------------------------------------------------- /rake/spec/default/50-installed-cli-versions_spec.rb: -------------------------------------------------------------------------------- 1 | require 'spec_helper' 2 | 3 | describe 'st2 cli version checks' do 4 | describe command("st2 --version") do 5 | its(:exit_status) { is_expected.to eq 0 } 6 | its(:stdout) { should match /on Python (3.8|3.9|3.10|3.11)/ } 7 | # show version number in Rspec output 8 | after(:all) do 9 | puts puts " " + described_class.stderr 10 | end 11 | end 12 | 13 | end 14 | -------------------------------------------------------------------------------- /rake/spec/default/60-st2_all-services-ok_spec.rb: -------------------------------------------------------------------------------- 1 | require 'spec_helper' 2 | require 'examples/show-service-log-on-failure' 3 | 4 | describe 'external services' do 5 | # Buggy buggy netcat vs serverspec :( 6 | # Hostnames have to priorly resolved as addresses. 7 | 8 | describe 'rabbitmq' do 9 | subject { host(spec[:rabbitmqhost]) } 10 | it { is_expected.to be_reachable.with :port => 5672, :timeout => 1 } 11 | end 12 | 13 | describe 'mongodb' do 14 | subject { host(spec[:mongodbhost]) } 15 | it { is_expected.to be_reachable.with :port => 27017, :timeout => 1 } 16 | end 17 | 18 | describe 'redis' do 19 | subject { host(spec[:redishost]) } 20 | it { is_expected.to be_reachable.with :port => 6379, :timeout => 1 } 21 | end 22 | 23 | end 24 | 25 | describe 'start st2 components and services' do 26 | before(:all) do 27 | puts "===> Starting st2 services #{spec[:service_list].join(', ')}..." 28 | remote_start_services(spec[:service_list]) 29 | puts 30 | end 31 | 32 | # Run register content 33 | describe command(spec[:register_content_command]) do 34 | its(:exit_status) { is_expected.to eq 0 } 35 | after(:all) do 36 | if described_class.exit_status > 0 37 | puts "Register content has failed!", '>>>>>', 38 | described_class.stderr 39 | puts 40 | end 41 | end 42 | end 43 | end 44 | 45 | # Check if component services are running/enabled 46 | describe 'st2 services' do 47 | include_examples 'show service log on failure' 48 | 49 | spec[:service_list].each do |name| 50 | describe service(name), prompt_on_failure: true do 51 | it { is_expected.to be_running } 52 | it { should be_enabled } 53 | end 54 | end 55 | 56 | describe 'st2auth', prompt_on_failure: true do 57 | subject { port(9100) } 58 | it { should be_listening } 59 | end 60 | 61 | describe 'st2api', prompt_on_failure: true do 62 | subject { port(9101) } 63 | it { should be_listening } 64 | end 65 | 66 | describe 'st2stream', prompt_on_failure: true do 67 | subject { port(9102) } 68 | it { should be_listening } 69 | end 70 | 71 | end 72 | 73 | # all st2 services should work immediately after restart 74 | describe 'st2 services availability after restart' do 75 | describe command("st2ctl restart && st2 action list") do 76 | its(:exit_status) { is_expected.to eq 0 } 77 | end 78 | end 79 | -------------------------------------------------------------------------------- /rake/spec/default/70-st2_actions-integrity_spec.rb: -------------------------------------------------------------------------------- 1 | # Check that st2 run and st2 pack commands execute successfully 2 | describe 'st2 actions integrity checks' do 3 | describe command("st2 run core.local -- hostname") do 4 | its(:exit_status) { is_expected.to eq 0 } 5 | end 6 | 7 | describe command("st2 pack install hubot") do 8 | its(:exit_status) { is_expected.to eq 0 } 9 | end 10 | 11 | describe command("st2 run core.local cmd=locale") do 12 | its(:stdout) { should match /UTF-8/ } 13 | end 14 | 15 | describe command("st2 run core.local cmd=\"echo '¯\_(ツ)_/¯'\"") do 16 | its(:exit_status) { is_expected.to eq 0 } 17 | end 18 | end 19 | -------------------------------------------------------------------------------- /rake/spec/default/99-uninstall-system-packages_spec.rb: -------------------------------------------------------------------------------- 1 | # Check that StackStorm packages can be uninstalled without errors 2 | describe 'st2 packages uninstall test' do 3 | spec[:package_list].each do |pkg_name| 4 | if os[:family] == 'redhat' 5 | describe command("sudo yum -y remove #{pkg_name}") do 6 | its(:exit_status) { is_expected.to eq 0 } 7 | end 8 | elsif os[:family] == 'ubuntu' 9 | describe command("sudo apt-get remove -y --purge #{pkg_name}") do 10 | its(:exit_status) { is_expected.to eq 0 } 11 | end 12 | end 13 | end 14 | end 15 | -------------------------------------------------------------------------------- /rake/spec/examples/show-service-log-on-failure.rb: -------------------------------------------------------------------------------- 1 | require 'spec_helper' 2 | 3 | # Share example showing remote logs of failed to start services 4 | # 5 | shared_examples 'show service log on failure' do 6 | before(:all) { @failed_services = [] } 7 | 8 | after(:each, prompt_on_failure: true) do |example| 9 | @failed_services << example if example.exception 10 | end 11 | 12 | after(:all) do 13 | unless @failed_services.empty? 14 | puts '===> Showing output from log files of the failed services' 15 | @failed_services.each do |example| 16 | service = example.metadata[:described_class] 17 | lines_num = spec[:loglines_to_show] 18 | 19 | unless service.is_a? Serverspec::Type::Service 20 | fail 'Serverspec service is required to be described class!' 21 | end 22 | 23 | output = remote_grab_service_logs(service.name, lines_num) 24 | unless output.empty? 25 | puts "\nlast #{lines_num} lines from log file of service " \ 26 | "#{service.name}" 27 | puts '>>>', output 28 | end 29 | end 30 | end 31 | end 32 | end 33 | -------------------------------------------------------------------------------- /rake/spec/remote_helpers.rb: -------------------------------------------------------------------------------- 1 | module RemoteHelpers 2 | # Module provides helpers for various remote operations. 3 | 4 | # start service or a list of services 5 | def remote_start_services(sv_or_list) 6 | Array(sv_or_list).each do |sv| 7 | sv_start_cmd = spec.backend.command.get(:start_service, sv) 8 | spec.backend.run_command(sv_start_cmd) 9 | end 10 | end 11 | 12 | # Use different ways to grab logs on a remote spec instance. 13 | def remote_init_type 14 | probe_cmd = <<-EOS 15 | ls -1 /etc/debian_version 1>/dev/null 2>&1 && echo debian && exit 16 | ls -1 /usr/bin/systemctl 1>/dev/null 2>&1 && echo systemd && exit 17 | EOS 18 | svtype = spec.backend.run_command(probe_cmd).stdout 19 | svtype.empty? ? nil : svtype.strip.to_sym 20 | end 21 | 22 | # Grab remote service stdout logs 23 | def remote_grab_service_stdout(service_name, lines_num = 30) 24 | init_type = remote_init_type 25 | output = case init_type 26 | when :systemd 27 | spec.backend.run_command("journalctl -n #{lines_num} -u #{service_name}").stdout 28 | else 29 | '' 30 | end 31 | if output.empty? 32 | "!!! Couldn't locate #{service_name} #{init_type} service stdout logs" 33 | else 34 | output 35 | end 36 | end 37 | 38 | # Just tail latest remote log file 39 | def remote_tail_logfile(path, lines_num = 20) 40 | cat_cmd = <<-EOS 41 | file=$(ls -1t #{path}*.log \ 42 | 2>/dev/null | sed '1!d') 43 | [ -z "$file" ] || { cat "$file" | tail -n #{lines_num}; } 44 | EOS 45 | spec.backend.run_command(cat_cmd).stdout 46 | end 47 | 48 | # Grab remote logs, try logfile or try servicestdout logs 49 | def remote_grab_service_logs(service_name, lines_num = 20) 50 | path = File.join(spec[:log_dir], service_name) 51 | output = remote_tail_logfile(path, lines_num) 52 | if output.empty? 53 | remote_grab_service_stdout(service_name, lines_num) 54 | else 55 | output 56 | end 57 | end 58 | end 59 | -------------------------------------------------------------------------------- /rake/spec/spec_helper.rb: -------------------------------------------------------------------------------- 1 | require 'hashie' 2 | require 'specinfra' 3 | require 'serverspec' 4 | require 'remote_helpers' 5 | require './rake/pipeline_options' 6 | 7 | 8 | SSH_OPTIONS = { 9 | user: 'root', 10 | keys: ['/root/.ssh/busybee'], 11 | keys_only: true 12 | } 13 | 14 | set :backend, :ssh 15 | set :host, ENV['TESTNODE'] 16 | set :ssh_options, SSH_OPTIONS 17 | set :env, LANG: 'en_US.UTF-8', LC_ALL: 'en_US.UTF-8' 18 | 19 | # ST2Spec 20 | class ST2Spec 21 | extend Pipeline::Options 22 | instance_eval(File.read('rake/build/environment.rb')) 23 | 24 | ST2_SERVICES = %w(st2api st2stream st2auth st2actionrunner st2notifier st2workflowengine 25 | st2rulesengine st2timersengine st2sensorcontainer st2garbagecollector st2scheduler) 26 | 27 | SPECCONF = { 28 | bin_prefix: '/usr/bin', 29 | conf_dir: '/etc/st2', 30 | log_dir: '/var/log/st2', 31 | package_list: pipeopts.packages, 32 | rabbitmqhost: pipeopts.rabbitmqhost, 33 | mongodbhost: pipeopts.mongodbhost, 34 | redishost: pipeopts.redishost, 35 | loglines_to_show: 100, 36 | logdest_pattern: { 37 | st2actionrunner: 'st2actionrunner.{pid}' 38 | }, 39 | register_content_command: '/usr/bin/st2-register-content' \ 40 | ' --register-fail-on-failure' \ 41 | ' --register-all' \ 42 | ' --config-dir /etc/st2', 43 | 44 | st2_services: ST2_SERVICES, 45 | package_opts: {}, 46 | 47 | package_has_services: { 48 | st2: ST2_SERVICES, 49 | }, 50 | 51 | package_has_binaries: { 52 | st2: %w(st2 st2ctl st2-bootstrap-rmq st2-register-content st2-rule-tester st2-run-pack-tests 53 | st2-trigger-refire st2 st2-self-check st2-track-result 54 | st2-validate-pack-config st2-generate-symmetric-crypto-key st2-apply-rbac-definitions), 55 | }, 56 | 57 | package_has_directories: { 58 | st2: [ 59 | '/etc/st2', 60 | '/etc/logrotate.d', 61 | '/opt/stackstorm/packs', 62 | '/opt/stackstorm/overrides', 63 | [ '/var/log/st2', example: Proc.new {|_| be_writable.by('owner')} ] 64 | ], 65 | }, 66 | 67 | package_has_files: { 68 | st2: %w(/etc/st2/st2.conf /etc/logrotate.d/st2), 69 | }, 70 | 71 | package_has_users: { 72 | st2: [ 73 | 'st2', 74 | ['stanley', example: Proc.new {|_| have_home_directory '/home/stanley'} ] 75 | ], 76 | } 77 | } 78 | 79 | class << self 80 | ROUTED = [ 81 | :service_list 82 | ] 83 | 84 | # spec conf reader 85 | def [](key) 86 | if ROUTED.include? key.to_sym 87 | send(key) 88 | else 89 | spec[key] 90 | end 91 | end 92 | 93 | def service_list 94 | @services_available ||= begin 95 | list = ST2_SERVICES 96 | list 97 | end 98 | end 99 | 100 | def spec 101 | @spec ||= Hashie::Mash.new(SPECCONF) 102 | end 103 | 104 | def backend 105 | @backend ||= Specinfra::Backend::Ssh.new( 106 | host: ENV['TESTHOST'], 107 | ssh_options: ::SSH_OPTIONS 108 | ) 109 | end 110 | end 111 | 112 | module Mixin 113 | def spec 114 | ST2Spec 115 | end 116 | end 117 | end 118 | 119 | RSpec.configure do |c| 120 | [ST2Spec::Mixin, RemoteHelpers].each do |m| 121 | c.send(:include, m) 122 | c.send(:extend, m) 123 | end 124 | end 125 | 126 | # Tests binary or script, in later case checks interpreater. 127 | shared_examples 'script or binary' do 128 | it { is_expected.to be_file & be_executable } 129 | 130 | shebang = /^#!(?.*?)$/m 131 | if described_class.content.match(shebang) 132 | # Note: We skip /usr/bin/env lines 133 | interpreter_path = Regexp.last_match[:interpreter] 134 | if not Regexp.last_match[:interpreter].start_with?("/usr/bin/env") 135 | 136 | describe file(interpreter_path) do 137 | it { is_expected.to be_file & be_executable } 138 | end 139 | end 140 | end 141 | end 142 | -------------------------------------------------------------------------------- /rake/spec/spec_package_iterables.rb: -------------------------------------------------------------------------------- 1 | module SpecPackageIterables 2 | attr_reader :name, :venv_name, :opts 3 | 4 | def set_context_vars(name, opts) 5 | @name = name 6 | @opts = Hashie::Mash.new.merge(opts || {}) 7 | # we use different venv name for st2 package (bundle) 8 | @venv_name = (name.to_s == 'st2' ? 'st2' : name).to_s 9 | end 10 | 11 | # Collection iterating methods over spec lists 12 | # opts[:files] + spec[:package_has_files], etc. 13 | %w( 14 | users 15 | files 16 | directories 17 | binaries 18 | services 19 | ).each do |collection| 20 | class_eval <<-"end_eval", __FILE__, __LINE__ 21 | def get_#{collection}(&block) 22 | list = Array(self.opts[:#{collection}]) + 23 | Array(self.spec[:package_has_#{collection}][name]) 24 | list.each do |v| 25 | # Invoke w/wo opts. For example if pair is given ['stanley', {'home'=>true}] 26 | # it's passed as is if just a value such as 'st2' it'll be passed as ['st2', {}]. 27 | v.is_a?(Array) ? block.call(v) : block.call([v, {}]) 28 | end 29 | end 30 | end_eval 31 | end 32 | end 33 | -------------------------------------------------------------------------------- /rpmspec/helpers.spec: -------------------------------------------------------------------------------- 1 | # !!! Following list of variables must be defined before this file is included. 2 | # - package 3 | 4 | # Cat debian/package.dirs, set buildroot prefix and create directories. 5 | %define debian_dirs cat debian/%{name}.dirs | grep -v '^\\s*#' | sed 's~^~%{buildroot}/~' | \ 6 | while read dir_path; do \ 7 | mkdir -p "${dir_path}" \ 8 | done \ 9 | %{nil} 10 | 11 | # Cat debian/package.links, set buildroot prefix and create symlinks. 12 | %define debian_links cat debian/%{name}.links | grep -v '^\\s*#' | \ 13 | sed -r -e 's~\\b~/~' -e 's~\\s+\\b~ %{buildroot}/~' | \ 14 | while read link_rule; do \ 15 | linkpath=$(echo "$link_rule" | cut -f2 -d' ') && [ -d $(dirname "$linkpath") ] || \ 16 | mkdir -p $(dirname "$linkpath") && ln -s $link_rule \ 17 | done \ 18 | %{nil} 19 | 20 | # Cat debian/install, set buildroot prefix and copy files. 21 | %define debian_install cat debian/install | grep -v '^\s*#' | sed -r -e 's~ lib/systemd~ usr/lib/systemd~' -e 's~ +~ %{buildroot}/~' | \ 22 | while read copy_rule; do \ 23 | parent=$(echo "$copy_rule" | cut -f2 -d' ') \ 24 | [ -d "$parent" ] || install -d "$parent" && cp -r $copy_rule \ 25 | done \ 26 | %{nil} 27 | 28 | # We hate duplication right :)?, so let's use debian files 29 | %define default_install \ 30 | %debian_dirs \ 31 | %debian_install \ 32 | %debian_links \ 33 | %make_install \ 34 | %{nil} 35 | 36 | # Find a supported version of Python. 37 | %define pyexecutable %(export PYEXEC=""; for pyv in 3.{11..8}; do PYEXEC=$(command -v python$pyv); test -n "$PYEXEC" && basename $PYEXEC && break; done) 38 | 39 | ## Clean up RECORD and some other files left by python, which may contain 40 | # absolute buildroot paths. 41 | %define cleanup_python_abspath \ 42 | find %{buildroot} -name RECORD -o -name '*.egg-link' -o -name '*.pth' | \ 43 | xargs -I{} -n1 sed -i 's@%{buildroot}@@' {} \ 44 | %{nil} 45 | 46 | #Cleanup .so files that contain buildroot 47 | %define cleanup_so_abspath \ 48 | for f in `find %{venv_dir}/lib -type f -name "*.so" | \ 49 | xargs grep -l %{buildroot} `; do strip $f; done \ 50 | %{nil} 51 | 52 | # Define use_systemd to know if we on a systemd system 53 | # 54 | %if 0%{?_unitdir:1} 55 | %define use_systemd 1 56 | %endif 57 | 58 | ## St2 package version parsing 59 | # if package name starts with st2 then it's st2 component. 60 | # 61 | %if %(PKG=%{package}; [ "${PKG##st2}" != "$PKG" ] && echo 1 || echo 0 ) == 1 62 | %define st2pkg_version %(%{pyexecutable} -c "from %{package} import __version__; print(__version__),") 63 | # st2 package version parsing 64 | %endif 65 | 66 | # Redefine and to drop python brp bytecompile 67 | # 68 | %define __os_install_post() \ 69 | /usr/lib/rpm/brp-compress \ 70 | %{!?__debug_package:/usr/lib/rpm/brp-strip %{__strip}} \ 71 | /usr/lib/rpm/brp-strip-static-archive %{__strip} \ 72 | /usr/lib/rpm/brp-strip-comment-note %{__strip} %{__objdump} \ 73 | %{nil} 74 | 75 | # Install systemd service into the package 76 | # 77 | %define service_install() \ 78 | for svc in %{?*}; do \ 79 | install -D -p -m0644 %{SOURCE0}/rpm/$svc.service %{buildroot}%{_unitdir}/$svc.service \ 80 | [ -f %{SOURCE0}/rpm/$svc.socket ] && install -D -p -m0644 %{SOURCE0}/rpm/$svc.socket %{buildroot}%{_unitdir}/$svc.socket \ 81 | done \ 82 | %{nil} 83 | 84 | # Service post stage action 85 | # enables used to enforce the policy, which seems to be disabled by default 86 | # 87 | %define service_post() \ 88 | %{expand: %systemd_post %%{?*}} \ 89 | systemctl --no-reload enable %{?*} >/dev/null 2>&1 || : \ 90 | %{nil} 91 | 92 | # Service preun stage action 93 | # 94 | %define service_preun() \ 95 | %{expand: %systemd_preun %%{?*}} \ 96 | %{nil} 97 | 98 | # Service postun stage action 99 | # ($1 > 1 on package upgrade) 100 | # 101 | %define service_postun() \ 102 | %{expand: %systemd_postun_with_restart %%{?*}} \ 103 | %{nil} 104 | -------------------------------------------------------------------------------- /rpmspec/package_venv.spec: -------------------------------------------------------------------------------- 1 | %{!?venv_name: %define venv_name %{package}} 2 | %define div_links bin/st2-bootstrap-rmq bin/st2-register-content 3 | %define wheel_dir %(echo ${WHEELDIR:-/tmp/wheelhouse}) 4 | 5 | # virtualenv macros 6 | %define venv_install_dir opt/stackstorm/%{venv_name} 7 | %define venv_dir %{buildroot}/%{venv_install_dir} 8 | %define venv_bin %{venv_dir}/bin 9 | 10 | %if 0%{?rhel} == 8 11 | %define python_binname python3.8 12 | %define pip_binname pip3.8 13 | %else 14 | %define python_binname python3 15 | %define pip_binname pip3 16 | %endif 17 | 18 | %define venv_python %{venv_bin}/%{python_binname} 19 | # https://github.com/StackStorm/st2/wiki/Where-all-to-update-pip-and-or-virtualenv 20 | 21 | %define pin_pip %{venv_python} %{venv_bin}/%{pip_binname} install pip==24.2 22 | %define install_venvctrl %{python_binname} -m pip install venvctrl 23 | 24 | %if 0%{?rhel} == 8 25 | %define install_crypto %{venv_python} %{venv_bin}/pip3.8 install cryptography==43.0.1 26 | %else 27 | %define install_crypto %{nil} 28 | %endif 29 | 30 | %define venv_pip %{venv_python} %{venv_bin}/pip3 install --find-links=%{wheel_dir} --no-index 31 | 32 | # Change the virtualenv path to the target installation directory. 33 | # - Install dependencies 34 | # - Install package itself 35 | 36 | # EL8 requires crypto built locally. venvctrl must be available outside of venv. 37 | %define pip_install_venv \ 38 | %{python_binname} -m venv %{venv_dir} \ 39 | %{pin_pip} \ 40 | %{install_crypto} \ 41 | %{venv_pip} --use-deprecated=legacy-resolver -r requirements.txt \ 42 | %{venv_pip} --use-deprecated=legacy-resolver . \ 43 | %{install_venvctrl} \ 44 | venvctrl-relocate --source=%{venv_dir} --destination=/%{venv_install_dir} \ 45 | %{nil} 46 | -------------------------------------------------------------------------------- /rpmspec/st2pkg_toptags.spec: -------------------------------------------------------------------------------- 1 | # package must be defined before inclusion! 2 | 3 | %define _sourcedir ../ 4 | %define _builddir %{SOURCE0} 5 | 6 | %include ../rpmspec/helpers.spec 7 | %include ../rpmspec/package_venv.spec 8 | 9 | %define version %(echo "${ST2PKG_VERSION:-%{st2pkg_version}}") 10 | %define release %(echo "${ST2PKG_RELEASE:-1}") 11 | 12 | Name: %{package} 13 | Version: %{version} 14 | Release: %{release} 15 | Group: System/Management 16 | License: ASL 2.0 17 | Url: https://github.com/StackStorm/st2 18 | Source0: %{package} 19 | -------------------------------------------------------------------------------- /scripts/build_os_package.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | set -o pipefail 4 | 5 | package_name="$1" 6 | artifact_dir="${ARTIFACT_DIR}" 7 | cores_num=$(/usr/bin/nproc) 8 | 9 | export WHEELDIR 10 | 11 | if [[ -z "$package_name" ]]; then 12 | echo "Usage: $0 package_name" 13 | exit 1; 14 | fi 15 | 16 | # Determine build target from available build software. 17 | # Caveat: The case of RPM systems with debian build software installed or vice-versa is not handled. 18 | if command -v dpkg-buildpackage; then 19 | export PKGTYPE=deb 20 | elif command -v rpmbuild; then 21 | export PKGTYPE=rpm 22 | else 23 | echo "Unable to build package because one of dpkg-buildpackage or rpmbuild wasn't found." 24 | echo "This means the build environment isn't setup correctly or the build system isn't supported." 25 | exit 1 26 | fi 27 | 28 | # NOTE: If you want to troubleshoot rpmbuild, add -vv flag to enable debug mode 29 | build_rpm() { 30 | rpmbuild -bb --define '_topdir %(readlink -f build)' rpm/"$package_name".spec; 31 | } 32 | 33 | build_deb() { 34 | dpkg-buildpackage -b -uc -us -j"$cores_num" 35 | } 36 | 37 | copy_rpm() { 38 | sudo cp -v build/RPMS/*/$1*.rpm "$artifact_dir"; 39 | # Also print some package info for easier troubleshooting 40 | rpm -q --requires -p build/RPMS/*/"$1"*.rpm 41 | rpm -q --provides -p build/RPMS/*/"$1"*.rpm 42 | } 43 | 44 | copy_deb() { 45 | sudo cp -v ../"$package_name"*.deb "$artifact_dir" || { echo "Failed to copy .deb file into artifact directory \`$artifact_dir'" ; exit 1; } 46 | sudo cp -v ../"$package_name"{*.changes,*.dsc} "$artifact_dir" || :; 47 | } 48 | 49 | "build_${PKGTYPE}" 50 | "copy_${PKGTYPE}" 51 | -------------------------------------------------------------------------------- /scripts/generate_st2_config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Config scripts mangles /etc/st2/st2.conf to substitute needed values into 4 | # the configuration file. 5 | # 6 | set -e 7 | 8 | # --- Go! 9 | MONGOHOST="${MONGODBHOST:-mongodb}" 10 | RABBITMQHOST="${RABBITMQHOST:-rabbitmq}" 11 | REDISHOST="${REDISHOST:-redis}" 12 | 13 | CONF=/etc/st2/st2.conf 14 | AMQP="amqp://guest:guest@$RABBITMQHOST:5672/" 15 | MONGO=$(cat </dev/null) || echo "$MONGO" >> /etc/st2/st2.conf 32 | 33 | # Specify redis host 34 | sed -i "/\[coordination\]/,/\[.*\]\|url/ {n; s#url.*=.*#url = $REDIS#}" $CONF 35 | 36 | echo "Resulting $CONF >>>" "$(cat $CONF)" 37 | -------------------------------------------------------------------------------- /scripts/includes/common.sh: -------------------------------------------------------------------------------- 1 | function configure_proxy() { 2 | # Allow bypassing 'proxy' env vars via sudo 3 | local sudoers_proxy='Defaults env_keep += "http_proxy https_proxy no_proxy proxy_ca_bundle_path DEBIAN_FRONTEND"' 4 | if ! sudo grep -s -q ^"${sudoers_proxy}" /etc/sudoers.d/st2; then 5 | sudo sh -c "echo '${sudoers_proxy}' >> /etc/sudoers.d/st2" 6 | fi 7 | 8 | # Configure proxy env vars for 'st2api', 'st2actionrunner' and 'st2chatops' system configs 9 | # See: https://docs.stackstorm.com/packs.html#installing-packs-from-behind-a-proxy 10 | local service_config_path=$(hash apt-get >/dev/null 2>&1 && echo '/etc/default' || echo '/etc/sysconfig') 11 | for service in st2api st2actionrunner st2chatops; do 12 | service_config="${service_config_path}/${service}" 13 | # create file if doesn't exist yet 14 | sudo test -e ${service_config} || sudo touch ${service_config} 15 | for env_var in http_proxy https_proxy no_proxy proxy_ca_bundle_path; do 16 | # delete line from file if specific proxy env var is unset 17 | if sudo test -z "${!env_var:-}"; then 18 | sudo sed -i "/^${env_var}=/d" ${service_config} 19 | # add proxy env var if it doesn't exist yet 20 | elif ! sudo grep -s -q ^"${env_var}=" ${service_config}; then 21 | sudo sh -c "echo '${env_var}=${!env_var}' >> ${service_config}" 22 | # modify existing proxy env var value 23 | elif ! sudo grep -s -q ^"${env_var}=${!env_var}$" ${service_config}; then 24 | sudo sed -i "s#^${env_var}=.*#${env_var}=${!env_var}#" ${service_config} 25 | fi 26 | done 27 | done 28 | } 29 | 30 | function get_package_url() { 31 | # Retrieve direct package URL for the provided dev build, subtype and package name regex. 32 | DEV_BUILD=$1 # Repo name and build number - / (e.g. st2/5646) 33 | DISTRO=$2 # Distro name (e.g. focal,jammy,el8,el9) 34 | 35 | PACKAGE_NAME_REGEX=$3 36 | 37 | PACKAGES_METADATA=$(curl -sSL -q https://circleci.com/api/v1.1/project/github/StackStorm/${DEV_BUILD}/artifacts) 38 | 39 | if [ -z "${PACKAGES_METADATA}" ]; then 40 | echo "Failed to retrieve packages metadata from https://circleci.com/api/v1.1/project/github/StackStorm/${DEV_BUILD}/artifacts" 1>&2 41 | return 2 42 | fi 43 | 44 | PACKAGES_URLS="$(echo ${PACKAGES_METADATA} | jq -r '.[].url')" 45 | PACKAGE_URL=$(echo "${PACKAGES_URLS}" | egrep "${DISTRO}/${PACKAGE_NAME_REGEX}") 46 | 47 | if [ -z "${PACKAGE_URL}" ]; then 48 | echo "Failed to find url for ${DISTRO} package (${PACKAGE_NAME_REGEX})" 1>&2 49 | echo "Circle CI response: ${PACKAGES_METADATA}" 1>&2 50 | return 2 51 | fi 52 | 53 | echo ${PACKAGE_URL} 54 | } 55 | 56 | 57 | function port_status() { 58 | # If the specified tcp4 port is bound, then return the "port pid/procname", 59 | # else if a pipe command fails, return "Unbound", 60 | # else return "". 61 | # 62 | # Please note that all return values end with a newline. 63 | # 64 | # Use netstat and awk to get a list of all the tcp4 sockets that are in the LISTEN state, 65 | # matching the specified port. 66 | # 67 | # The `netstat -tunlp --inet` command is assumed to output data in the following format: 68 | # Active Internet connections (only servers) 69 | # Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name 70 | # tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 7506/httpd 71 | # 72 | # The awk command prints the 4th and 7th columns of any line matching both the following criteria: 73 | # 1) The 4th column contains the port passed to port_status() (i.e., $1) 74 | # 2) The 6th column contains "LISTEN" 75 | # 76 | # Sample output: 77 | # 0.0.0.0:25000 7506/sshd 78 | ret=$(sudo netstat -tunlp --inet | awk -v port=":$1$" '$4 ~ port && $6 ~ /LISTEN/ { print $4 " " $7 }' || echo 'Unbound'); 79 | echo "$ret"; 80 | } 81 | 82 | 83 | check_st2_host_dependencies() { 84 | # CHECK 1: Determine which, if any, of the required ports are used by an existing process. 85 | 86 | # Abort the installation early if the following ports are being used by an existing process. 87 | # nginx (80, 443), mongodb (27017), rabbitmq (4369, 5672, 25672), redis (6379) 88 | # and st2 (9100-9102). 89 | 90 | declare -a ports=("80" "443" "4369" "5672" "6379" "9100" "9101" "9102" "25672" "27017") 91 | declare -a used=() 92 | 93 | for i in "${ports[@]}" 94 | do 95 | rv=$(port_status $i | sed 's/.*-$\|.*systemd\|.*beam.smp.*\|.*epmd\|.*st2.*\|.*nginx.*\|.*python.*\|.*postmaster.*\|.*mongod\|.*init//') 96 | if [ "$rv" != "Unbound" ] && [ "$rv" != "" ]; then 97 | used+=("$rv") 98 | fi 99 | done 100 | 101 | # If any used ports were found, display helpful message and exit 102 | if [ ${#used[@]} -gt 0 ]; then 103 | printf "\nNot all required TCP ports are available. ST2 and related services will fail to start.\n\n" 104 | echo "The following ports are in use by the specified pid/process and need to be stopped:" 105 | for port_pid_process in "${used[@]}" 106 | do 107 | echo " $port_pid_process" 108 | done 109 | echo "" 110 | exit 1 111 | fi 112 | 113 | # CHECK 2: Ensure there is enough space at /var/lib/mongodb 114 | VAR_SPACE=`df -Pk /var/lib | grep -vE '^Filesystem|tmpfs|cdrom' | awk '{print $4}'` 115 | if [ ${VAR_SPACE} -lt 358400 ]; then 116 | echo "" 117 | echo "MongoDB requires at least 350MB free in /var/lib/mongodb" 118 | echo "There is not enough space for MongoDB. It will fail to start." 119 | echo "Please, add some space to /var or clean it up." 120 | exit 1 121 | fi 122 | } 123 | 124 | 125 | generate_random_passwords() { 126 | # Generate random password used for MongoDB user authentication 127 | ST2_MONGODB_PASSWORD=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 24 ; echo '') 128 | # Generate random password used for RabbitMQ user authentication 129 | ST2_RABBITMQ_PASSWORD=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 24 ; echo '') 130 | } 131 | 132 | 133 | configure_st2_user () { 134 | # Create an SSH system user (default `stanley` user may be already created) 135 | if (! id stanley 2>/dev/null); then 136 | sudo useradd stanley 137 | fi 138 | 139 | SYSTEM_HOME=$(echo ~stanley) 140 | 141 | if [ ! -d "${SYSTEM_HOME}/.ssh" ]; then 142 | sudo mkdir ${SYSTEM_HOME}/.ssh 143 | sudo chmod 700 ${SYSTEM_HOME}/.ssh 144 | fi 145 | 146 | # Generate ssh keys on StackStorm box and copy over public key into remote box. 147 | # NOTE: If the file already exists and is non-empty, then assume the key does not need 148 | # to be generated again. 149 | if ! sudo test -s ${SYSTEM_HOME}/.ssh/stanley_rsa; then 150 | # added PEM to enforce PEM ssh key type in EL8 to maintain consistency 151 | sudo ssh-keygen -f ${SYSTEM_HOME}/.ssh/stanley_rsa -P "" -m PEM 152 | fi 153 | 154 | if ! sudo grep -s -q -f ${SYSTEM_HOME}/.ssh/stanley_rsa.pub ${SYSTEM_HOME}/.ssh/authorized_keys; 155 | then 156 | # Authorize key-base access 157 | sudo sh -c "cat ${SYSTEM_HOME}/.ssh/stanley_rsa.pub >> ${SYSTEM_HOME}/.ssh/authorized_keys" 158 | fi 159 | 160 | sudo chmod 0600 ${SYSTEM_HOME}/.ssh/authorized_keys 161 | sudo chmod 0700 ${SYSTEM_HOME}/.ssh 162 | sudo chown -R stanley:stanley ${SYSTEM_HOME} 163 | 164 | # Enable passwordless sudo 165 | local STANLEY_SUDOERS="stanley ALL=(ALL) NOPASSWD: SETENV: ALL" 166 | if ! sudo grep -s -q ^"${STANLEY_SUDOERS}" /etc/sudoers.d/st2; then 167 | sudo sh -c "echo '${STANLEY_SUDOERS}' >> /etc/sudoers.d/st2" 168 | fi 169 | 170 | sudo chmod 0440 /etc/sudoers.d/st2 171 | 172 | # Disable requiretty for all users 173 | sudo sed -i -r "s/^Defaults\s+\+?requiretty/# Defaults requiretty/g" /etc/sudoers 174 | } 175 | 176 | 177 | configure_st2_cli_config() { 178 | # Configure CLI config (write credentials for the root user and user which ran the script) 179 | ROOT_USER="root" 180 | CURRENT_USER=$(whoami) 181 | 182 | ROOT_HOME=$(eval echo ~${ROOT_USER}) 183 | : "${HOME:=$(eval echo ~${CURRENT_USER})}" 184 | 185 | ROOT_USER_CLI_CONFIG_DIRECTORY="${ROOT_HOME}/.st2" 186 | ROOT_USER_CLI_CONFIG_PATH="${ROOT_USER_CLI_CONFIG_DIRECTORY}/config" 187 | 188 | CURRENT_USER_CLI_CONFIG_DIRECTORY="${HOME}/.st2" 189 | CURRENT_USER_CLI_CONFIG_PATH="${CURRENT_USER_CLI_CONFIG_DIRECTORY}/config" 190 | 191 | if ! sudo test -d ${ROOT_USER_CLI_CONFIG_DIRECTORY}; then 192 | sudo mkdir -p ${ROOT_USER_CLI_CONFIG_DIRECTORY} 193 | fi 194 | 195 | sudo sh -c "cat < ${ROOT_USER_CLI_CONFIG_PATH} 196 | [credentials] 197 | username = ${USERNAME} 198 | password = ${PASSWORD} 199 | EOT" 200 | 201 | # Write config for root user 202 | if [ "${CURRENT_USER}" == "${ROOT_USER}" ]; then 203 | return 204 | fi 205 | 206 | # Write config for current user (in case current user != root) 207 | if [ ! -d ${CURRENT_USER_CLI_CONFIG_DIRECTORY} ]; then 208 | sudo mkdir -p ${CURRENT_USER_CLI_CONFIG_DIRECTORY} 209 | fi 210 | 211 | sudo sh -c "cat < ${CURRENT_USER_CLI_CONFIG_PATH} 212 | [credentials] 213 | username = ${USERNAME} 214 | password = ${PASSWORD} 215 | EOT" 216 | 217 | # Fix the permissions 218 | sudo chown -R ${CURRENT_USER}:${CURRENT_USER} ${CURRENT_USER_CLI_CONFIG_DIRECTORY} 219 | } 220 | 221 | 222 | generate_symmetric_crypto_key_for_datastore() { 223 | DATASTORE_ENCRYPTION_KEYS_DIRECTORY="/etc/st2/keys" 224 | DATASTORE_ENCRYPTION_KEY_PATH="${DATASTORE_ENCRYPTION_KEYS_DIRECTORY}/datastore_key.json" 225 | 226 | sudo mkdir -p ${DATASTORE_ENCRYPTION_KEYS_DIRECTORY} 227 | 228 | # If the file ${DATASTORE_ENCRYPTION_KEY_PATH} exists and is not empty, then do not generate 229 | # a new key. st2-generate-symmetric-crypto-key fails if the key file already exists. 230 | if ! sudo test -s ${DATASTORE_ENCRYPTION_KEY_PATH}; then 231 | sudo st2-generate-symmetric-crypto-key --key-path ${DATASTORE_ENCRYPTION_KEY_PATH} 232 | fi 233 | 234 | # Make sure only st2 user can read the file 235 | sudo chgrp st2 ${DATASTORE_ENCRYPTION_KEYS_DIRECTORY} 236 | sudo chmod o-r ${DATASTORE_ENCRYPTION_KEYS_DIRECTORY} 237 | sudo chgrp st2 ${DATASTORE_ENCRYPTION_KEY_PATH} 238 | sudo chmod o-r ${DATASTORE_ENCRYPTION_KEY_PATH} 239 | 240 | # set path to the key file in the config 241 | sudo crudini --set /etc/st2/st2.conf keyvalue encryption_key_path ${DATASTORE_ENCRYPTION_KEY_PATH} 242 | 243 | # NOTE: We need to restart all the affected services so they pick the key and load it in memory 244 | sudo st2ctl restart-component st2api 245 | sudo st2ctl restart-component st2sensorcontainer 246 | sudo st2ctl restart-component st2workflowengine 247 | sudo st2ctl restart-component st2actionrunner 248 | } 249 | 250 | 251 | verify_st2() { 252 | st2 --version 253 | st2 -h 254 | 255 | st2 auth $USERNAME -p $PASSWORD 256 | # A shortcut to authenticate and export the token 257 | export ST2_AUTH_TOKEN=$(st2 auth $USERNAME -p $PASSWORD -t) 258 | 259 | # List the actions from a 'core' pack 260 | st2 action list --pack=core 261 | 262 | # Run a local shell command 263 | st2 run core.local -- date -R 264 | 265 | # See the execution results 266 | st2 execution list 267 | 268 | # Fire a remote comand via SSH (Requires passwordless SSH) 269 | st2 run core.remote hosts='127.0.0.1' -- uname -a 270 | 271 | # Install a pack 272 | st2 pack install st2 273 | } 274 | 275 | 276 | ok_message() { 277 | echo "" 278 | echo "" 279 | echo "███████╗████████╗██████╗ ██████╗ ██╗ ██╗"; 280 | echo "██╔════╝╚══██╔══╝╚════██╗ ██╔═══██╗██║ ██╔╝"; 281 | echo "███████╗ ██║ █████╔╝ ██║ ██║█████╔╝ "; 282 | echo "╚════██║ ██║ ██╔═══╝ ██║ ██║██╔═██╗ "; 283 | echo "███████║ ██║ ███████╗ ╚██████╔╝██║ ██╗"; 284 | echo "╚══════╝ ╚═╝ ╚══════╝ ╚═════╝ ╚═╝ ╚═╝"; 285 | echo "" 286 | echo " st2 is installed and ready to use." 287 | echo "" 288 | echo "Head to https://YOUR_HOST_IP/ to access the WebUI" 289 | echo "" 290 | echo "Don't forget to dive into our documentation! Here are some resources" 291 | echo "for you:" 292 | echo "" 293 | echo "* Documentation - https://docs.stackstorm.com" 294 | echo "* Pack Exchange - https://exchange.stackstorm.org/" 295 | echo "" 296 | echo "Thanks for installing StackStorm! Come visit us in our Slack Channel" 297 | echo "and tell us how it's going. We'd love to hear from you!" 298 | echo "http://stackstorm.com/community-signup" 299 | } 300 | 301 | 302 | fail() { 303 | echo "############### ERROR ###############" 304 | echo "# Failed on $STEP #" 305 | echo "#####################################" 306 | exit 2 307 | } 308 | -------------------------------------------------------------------------------- /scripts/includes/rhel.sh: -------------------------------------------------------------------------------- 1 | install_yum_utils() { 2 | # We need repoquery tool to get package_name-package_ver-package_rev in RPM based distros 3 | # if we don't want to construct this string manually using yum info --show-duplicates and 4 | # doing a bunch of sed awk magic. Problem is this is not installed by default on all images. 5 | sudo yum install -y yum-utils 6 | } 7 | 8 | 9 | get_full_pkg_versions() { 10 | if [ "$VERSION" != '' ]; 11 | then 12 | local RHMAJVER=`cat /etc/redhat-release | sed 's/[^0-9.]*\([0-9.]\).*/\1/'` 13 | local YES_FLAG="" 14 | if [ "$RHMAJVER" -ge "8" ]; then 15 | # RHEL 8 and newer, you need "-y" flag to avoid being prompted to confirm "yes" 16 | local YES_FLAG="-y" 17 | fi 18 | 19 | local ST2_VER=$(repoquery ${YES_FLAG} --nvr --show-duplicates st2 | grep -F st2-${VERSION} | sort --version-sort | tail -n 1) 20 | if [ -z "$ST2_VER" ]; then 21 | echo "Could not find requested version of st2!!!" 22 | sudo repoquery ${YES_FLAG} --nvr --show-duplicates st2 23 | exit 3 24 | fi 25 | ST2_PKG=${ST2_VER} 26 | 27 | local ST2WEB_VER=$(repoquery ${YES_FLAG} --nvr --show-duplicates st2web | grep -F st2web-${VERSION} | sort --version-sort | tail -n 1) 28 | if [ -z "$ST2WEB_VER" ]; then 29 | echo "Could not find requested version of st2web." 30 | sudo repoquery ${YES_FLAG} --nvr --show-duplicates st2web 31 | exit 3 32 | fi 33 | ST2WEB_PKG=${ST2WEB_VER} 34 | 35 | local ST2CHATOPS_VER=$(repoquery ${YES_FLAG} --nvr --show-duplicates st2chatops | grep -F st2chatops-${VERSION} | sort --version-sort | tail -n 1) 36 | if [ -z "$ST2CHATOPS_VER" ]; then 37 | echo "Could not find requested version of st2chatops." 38 | sudo repoquery ${YES_FLAG} --nvr --show-duplicates st2chatops 39 | exit 3 40 | fi 41 | ST2CHATOPS_PKG=${ST2CHATOPS_VER} 42 | 43 | echo "##########################################################" 44 | echo "#### Following versions of packages will be installed ####" 45 | echo "${ST2_PKG}" 46 | echo "${ST2WEB_PKG}" 47 | echo "${ST2CHATOPS_PKG}" 48 | echo "##########################################################" 49 | fi 50 | } 51 | -------------------------------------------------------------------------------- /scripts/install_os_packages.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | platform() { 5 | [ -f /etc/debian_version ] && { echo 'deb'; return 0; } 6 | echo 'rpm' 7 | } 8 | 9 | version_delemiter() { 10 | [ "$(platform)" = "deb" ] && echo '_' || echo '-' 11 | } 12 | 13 | install_rpm() { 14 | sudo yum deplist $(lookup_fullnames $@) 15 | sudo yum -y install $(lookup_fullnames $@); 16 | } 17 | 18 | install_deb() { 19 | sudo apt-get -o Acquire::ForceIPv4=true update -y 20 | 21 | for fpath in $(lookup_fullnames $@); do 22 | echo "Installing package: $fpath" 23 | gdebi -o Acquire::ForceIPv4=true -o APT::Acquire::ForceIPv4=true -n "$fpath" 24 | done 25 | } 26 | 27 | lookup_fullnames() { 28 | list="" 29 | for name_or_path in "$@"; do 30 | path="" 31 | # If file exists and is readable 32 | if [[ -r "$name_or_path" ]]; then 33 | path="$name_or_path" 34 | # Try and find the st2* package file 35 | else 36 | regex="${name_or_path}$(version_delemiter)"'[0-9].*' 37 | path=$(ls -1 ${name_or_path}$(version_delemiter)*".$(platform)" | grep "$regex" | head -n1) 38 | fi 39 | [[ -z "$path" ]] && { echo "Couldn't find package: \`'$name_or_path'"; exit 1; } 40 | [[ -z "$list" ]] && list="$path" || list="$list $path" 41 | done 42 | echo $list 43 | } 44 | 45 | [[ $# -eq 0 ]] && { echo "usage: $0 (name | path) ..." && exit 1; } 46 | 47 | install_$(platform) "$@" 48 | -------------------------------------------------------------------------------- /scripts/platform_major_version.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import platform 3 | 4 | version = platform.linux_distribution(full_distribution_name=1)[1] 5 | print(str.split(version, '.')[0]) 6 | -------------------------------------------------------------------------------- /scripts/setup-vagrant.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | fail() { 6 | exit 2 7 | } 8 | 9 | trap 'fail' EXIT 10 | 11 | case $ST2_TARGET in 12 | "focal") 13 | DC_TARGET=$ST2_TARGET 14 | INSTALL_CMD="apt-get";; 15 | *) 16 | echo "[Error] Unknown target $ST2_TARGET" 17 | exit 1;; 18 | esac 19 | 20 | echo "[Install] dependencies" 21 | sudo $INSTALL_CMD update 22 | if [[ $ST2_TARGET == 'focal' ]]; then 23 | sudo apt-get -y autoremove 24 | sudo apt-get install -y gdebi-core 25 | fi 26 | 27 | sudo $INSTALL_CMD install -y git curl wget 28 | 29 | # Install docker-compose 30 | DC_BIN="/usr/local/bin/docker-compose" 31 | DC_URL="https://github.com/docker/compose/releases/download/1.21.0/docker-compose-`uname -s`-`uname -m`" 32 | if [[ ! -x $DC_BIN ]]; then 33 | echo "[Install] docker-compose $ST2_TARGET" 34 | sudo sh -c "curl -sL $DC_URL > $DC_BIN" 35 | sudo chmod +x $DC_BIN 36 | fi 37 | 38 | # Using docker-compose, 1) build packages, and 2) run quick tests 39 | if [[ "${ST2_PACKAGES}" != "" ]]; then 40 | ST2PACKAGES="-e ST2_PACKAGES='$ST2_PACKAGES'" 41 | fi 42 | if [[ "${ST2_GITURL}" != "" ]]; then 43 | ST2URL="-e ST2_GITURL=$ST2_GITURL" 44 | fi 45 | if [[ "${ST2_GITREV}" != "" ]]; then 46 | ST2REV="-e ST2_GITREV=$ST2_GITREV" 47 | fi 48 | sudo sh -c "(cd /vagrant && $DC_BIN run $ST2PACKAGES $ST2URL $ST2REV --rm $ST2_TARGET)" 49 | 50 | if [ "$ST2_INSTALL" = "yes" ]; then 51 | echo 'Install st2 packages' 52 | 53 | # Halt the docker test environment (otherwise, the subsequent self-verification will fail) 54 | sudo docker stop "vagrant_${DC_TARGET}test_1" 55 | 56 | # Install the packages we just built 57 | if [[ $ST2_TARGET == 'focal' ]]; then 58 | sudo /usr/bin/gdebi -n /tmp/st2-packages/st2_*.deb 59 | else 60 | sudo $INSTALL_CMD install -y /tmp/st2-packages/st2*.rpm 61 | fi 62 | 63 | # Setup SSH keys and sudo access 64 | sudo mkdir -p /home/stanley/.ssh 65 | sudo chmod 0700 /home/stanley/.ssh 66 | 67 | sudo ssh-keygen -f /home/stanley/.ssh/stanley_rsa -P "" 68 | sudo sh -c 'cat /home/stanley/.ssh/stanley_rsa.pub >> /home/stanley/.ssh/authorized_keys' 69 | sudo chown -R stanley:stanley /home/stanley/.ssh 70 | 71 | sudo sh -c 'echo "stanley ALL=(ALL) NOPASSWD: SETENV: ALL" >> /etc/sudoers.d/st2' 72 | sudo chmod 0440 /etc/sudoers.d/st2 73 | 74 | sudo sed -i -r "s/^Defaults\s+\+?requiretty/# Defaults +requiretty/g" /etc/sudoers 75 | 76 | # Create htpasswd file 77 | HT='/usr/bin/htpasswd' 78 | if [[ ! -x "$HT" ]]; then 79 | if [[ $ST2_TARGET == 'focal' ]]; then 80 | sudo $INSTALL_CMD install -y apache2-utils 81 | else 82 | sudo $INSTALL_CMD install -y httpd-tools 83 | fi 84 | fi 85 | 86 | HP='/etc/st2/htpasswd' 87 | if [[ ! -f "$HP" ]]; then 88 | echo $ST2_PASSWORD | sudo htpasswd -c -i $HP $ST2_USER 89 | else 90 | echo $ST2_PASSWORD | sudo htpasswd -i $HP $ST2_USER 91 | fi 92 | 93 | # Setup datastore encryption 94 | sudo sh -c 'cat <> /etc/st2/st2.conf 95 | 96 | [keyvalue] 97 | encryption_key_path = /etc/st2/keys/datastore_key.json 98 | EOF' 99 | 100 | sudo mkdir -p /etc/st2/keys 101 | sudo st2-generate-symmetric-crypto-key --key-path /etc/st2/keys/datastore_key.json 102 | 103 | # Start ST2 services 104 | sudo st2ctl start 105 | sudo st2ctl reload 106 | 107 | if [ "$ST2_VERIFY" = "yes" ]; then 108 | echo 'Running self-verification' 109 | sudo sh -c "export ST2_AUTH_TOKEN=`st2 auth $ST2_USER -p $ST2_PASSWORD -t` && /usr/bin/st2-self-check" 110 | fi 111 | fi 112 | 113 | trap - EXIT 114 | 115 | exit 0 116 | -------------------------------------------------------------------------------- /scripts/st2_bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BASE_PATH="https://raw.githubusercontent.com/StackStorm/st2-packages" 4 | BOOTSTRAP_FILE='st2bootstrap.sh' 5 | 6 | ARCH=`arch` 7 | DEBTEST=`lsb_release -a 2> /dev/null | grep Distributor | awk '{print $3}'` 8 | RHTEST=`cat /etc/redhat-release 2> /dev/null | sed -e "s~\(.*\)release.*~\1~g"` 9 | VERSION='' 10 | RELEASE='stable' 11 | REPO_TYPE='' 12 | ST2_PKG_VERSION='' 13 | DEV_BUILD='' 14 | USERNAME='' 15 | PASSWORD='' 16 | EXTRA_OPTS='' 17 | 18 | # Note: This variable needs to default to a branch of the latest stable release 19 | BRANCH='v3.8' 20 | FORCE_BRANCH="" 21 | 22 | adddate() { 23 | while IFS= read -r line; do 24 | echo "$(date +%Y%m%dT%H%M%S%z) $line" 25 | done 26 | } 27 | 28 | setup_args() { 29 | for i in "$@" 30 | do 31 | case $i in 32 | -v=*|--version=*) 33 | VERSION="${i#*=}" 34 | shift 35 | ;; 36 | -s|--stable) 37 | RELEASE=stable 38 | shift 39 | ;; 40 | -u|--unstable) 41 | RELEASE=unstable 42 | shift 43 | ;; 44 | --staging) 45 | REPO_TYPE='staging' 46 | shift 47 | ;; 48 | # Used to install the packages from CircleCI build artifacts 49 | # Examples: 'st2/5017', 'mistral/1012', 'st2-packages/3021', 50 | # where first part is repository name, second is CircleCI build number. 51 | --dev=*) 52 | DEV_BUILD="${i#*=}" 53 | shift 54 | ;; 55 | --user=*) 56 | USERNAME="${i#*=}" 57 | shift 58 | ;; 59 | --password=*) 60 | PASSWORD="${i#*=}" 61 | shift 62 | ;; 63 | # Used to specify which branch of st2-packages repo to use. This comes handy when you 64 | # need to use a non-master branch of st2-package repo (e.g. when testing installer script 65 | # changes which are in a branch) 66 | --force-branch=*) 67 | FORCE_BRANCH="${i#*=}" 68 | shift 69 | ;; 70 | # Provide a flag to enable installing Python3 from 3rd party insecure PPA for Ubuntu Xenial 71 | # TODO: Remove once Ubuntu Xenial is dropped 72 | --u16-add-insecure-py3-ppa) 73 | EXTRA_OPTS="--u16-add-insecure-py3-ppa" 74 | shift 75 | ;; 76 | *) 77 | # unknown option 78 | ;; 79 | esac 80 | done 81 | 82 | if [[ "$VERSION" != '' ]]; then 83 | if [[ ! "$VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]] && [[ ! "$VERSION" =~ ^[0-9]+\.[0-9]+dev$ ]]; then 84 | echo "$VERSION does not match supported formats x.y.z or x.ydev" 85 | exit 1 86 | fi 87 | 88 | if [[ "$VERSION" =~ ^[0-9]+\.[0-9]+dev$ ]]; then 89 | echo "You're requesting a dev version! Switching to unstable!" 90 | RELEASE='unstable' 91 | fi 92 | fi 93 | 94 | if [[ "$USERNAME" = '' || "$PASSWORD" = '' ]]; then 95 | USERNAME=${USERNAME:-st2admin} 96 | PASSWORD=${PASSWORD:-Ch@ngeMe} 97 | echo "You can use \"--user=\" and \"--password=\" to override following default st2 credentials." 98 | SLEEP_TIME=10 99 | echo "Username: ${USERNAME}" 100 | echo "Password: ${PASSWORD}" 101 | echo "Sleeping for ${SLEEP_TIME} seconds if you want to Ctrl + C now..." 102 | sleep ${SLEEP_TIME} 103 | echo "Resorting to default username and password... You have an option to change password later!" 104 | fi 105 | } 106 | 107 | setup_args $@ 108 | 109 | # Note: If either --unstable or --staging flag is provided we default branch to master 110 | if [[ "$RELEASE" == 'unstable' ]]; then 111 | BRANCH="master" 112 | fi 113 | 114 | if [[ "$REPO_TYPE" == 'staging' ]]; then 115 | BRANCH="master" 116 | fi 117 | 118 | if [[ "$DEV_BUILD" != '' ]]; then 119 | BRANCH="master" 120 | fi 121 | 122 | get_version_branch() { 123 | if [[ "$RELEASE" == 'stable' ]]; then 124 | BRANCH="v$(echo ${VERSION} | awk 'BEGIN {FS="."}; {print $1 "." $2}')" 125 | fi 126 | } 127 | 128 | if [[ "$VERSION" != '' ]]; then 129 | get_version_branch $VERSION 130 | VERSION="--version=${VERSION}" 131 | fi 132 | 133 | if [[ "$RELEASE" != '' ]]; then 134 | RELEASE="--${RELEASE}" 135 | fi 136 | 137 | if [[ "$REPO_TYPE" == 'staging' ]]; then 138 | REPO_TYPE="--staging" 139 | fi 140 | 141 | if [[ "$DEV_BUILD" != '' ]]; then 142 | DEV_BUILD="--dev=${DEV_BUILD}" 143 | fi 144 | 145 | if [[ "${FORCE_BRANCH}" != "" ]]; then 146 | BRANCH=${FORCE_BRANCH} 147 | fi 148 | 149 | USERNAME="--user=${USERNAME}" 150 | PASSWORD="--password=${PASSWORD}" 151 | 152 | if [[ "$ARCH" != 'x86_64' ]]; then 153 | echo "Unsupported architecture. Please use a 64-bit OS! Aborting!" 154 | exit 2 155 | fi 156 | 157 | if [[ -n "$RHTEST" ]]; then 158 | TYPE="rpms" 159 | echo "*** Detected Distro is ${RHTEST} ***" 160 | RHMAJVER=`cat /etc/redhat-release | sed 's/[^0-9.]*\([0-9.]\).*/\1/'` 161 | echo "*** Detected distro version ${RHMAJVER} ***" 162 | if [[ "$RHMAJVER" != '6' && "$RHMAJVER" != '7' && "$RHMAJVER" != '8' ]]; then 163 | echo "Unsupported distro version $RHMAJVER! Aborting!" 164 | exit 2 165 | fi 166 | ST2BOOTSTRAP="${BASE_PATH}/${BRANCH}/scripts/st2bootstrap-el${RHMAJVER}.sh" 167 | BOOTSTRAP_FILE="st2bootstrap-el${RHMAJVER}.sh" 168 | elif [[ -n "$DEBTEST" ]]; then 169 | TYPE="debs" 170 | echo "*** Detected Distro is ${DEBTEST} ***" 171 | SUBTYPE=`lsb_release -a 2>&1 | grep Codename | grep -v "LSB" | awk '{print $2}'` 172 | echo "*** Detected flavor ${SUBTYPE} ***" 173 | if [[ "$SUBTYPE" != 'focal' ]]; then 174 | echo "Unsupported ubuntu codename ${SUBTYPE}. Please use Ubuntu 20.04 (focal) as base system!" 175 | exit 2 176 | fi 177 | ST2BOOTSTRAP="${BASE_PATH}/${BRANCH}/scripts/st2bootstrap-deb.sh" 178 | BOOTSTRAP_FILE="st2bootstrap-deb.sh" 179 | else 180 | echo "Unknown Operating System" 181 | exit 2 182 | fi 183 | 184 | hash curl 2>/dev/null || { echo >&2 "'curl' is not installed. Aborting."; exit 1; } 185 | 186 | CURLTEST=`curl --output /dev/null --silent --head --fail ${ST2BOOTSTRAP}` 187 | if [ $? -ne 0 ]; then 188 | echo -e "Could not find file ${ST2BOOTSTRAP}" 189 | exit 2 190 | else 191 | echo "Downloading deployment script from: ${ST2BOOTSTRAP}..." 192 | # Make sure we are in a writable directory 193 | if [ ! -w $(pwd) ]; then 194 | echo "$(pwd) not writable, please cd to a different directory and try again." 195 | exit 2 196 | fi 197 | curl -sSL -k -o ${BOOTSTRAP_FILE} ${ST2BOOTSTRAP} 198 | chmod +x ${BOOTSTRAP_FILE} 199 | 200 | echo "Running deployment script for st2 ${VERSION}..." 201 | echo "OS specific script cmd: bash ${BOOTSTRAP_FILE} ${VERSION} ${RELEASE} ${REPO_TYPE} ${DEV_BUILD} ${USERNAME} --password=****" 202 | TS=$(date +%Y%m%dT%H%M%S) 203 | sudo mkdir -p /var/log/st2 204 | bash ${BOOTSTRAP_FILE} ${VERSION} ${RELEASE} ${REPO_TYPE} ${DEV_BUILD} ${USERNAME} ${PASSWORD} ${EXTRA_OPTS} 2>&1 | adddate | sudo tee /var/log/st2/st2-install.${TS}.log 205 | exit ${PIPESTATUS[0]} 206 | fi 207 | -------------------------------------------------------------------------------- /tools/generate_final_installer_scripts.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | 5 | BASE_DIR = os.path.dirname(os.path.abspath(__file__)) 6 | SCRIPTS_PATH = os.path.abspath(os.path.join(BASE_DIR, '../scripts')) 7 | 8 | COMMON_INCLUDE_PATH = os.path.join(SCRIPTS_PATH, 'includes/common.sh') 9 | RHEL_INCLUDE_PATH = os.path.join(SCRIPTS_PATH, 'includes/rhel.sh') 10 | 11 | SCRIPT_FILES = [ 12 | 'st2bootstrap-deb.sh', 13 | 'st2bootstrap-el8.sh', 14 | 'st2bootstrap-el9.sh' 15 | ] 16 | 17 | HEADER_WARNING = """ 18 | #!/usr/bin/env bash 19 | # NOTE: This file is automatically generated by the tools/generate_final_installer_scripts.py 20 | # script using the template file and common include files in scripts/includes/*.sh. 21 | # 22 | # DO NOT EDIT MANUALLY. 23 | # 24 | # Please edit corresponding template file and include files. 25 | """.strip() 26 | 27 | 28 | def main(): 29 | with open(COMMON_INCLUDE_PATH, 'r') as fp: 30 | common_script_content = fp.read() 31 | 32 | with open(RHEL_INCLUDE_PATH, 'r') as fp: 33 | rhel_common_script_content = fp.read() 34 | 35 | for script_filename in SCRIPT_FILES: 36 | script_file_path = os.path.join(SCRIPTS_PATH, script_filename) 37 | template_file_path = script_file_path.replace('.sh', '.template.sh') 38 | 39 | print('Generating script file "%s" -> "%s"' % (template_file_path, script_file_path)) 40 | 41 | with open(template_file_path, 'r') as fp: 42 | template_content = fp.read() 43 | 44 | result = '' 45 | result += HEADER_WARNING 46 | result += '\n\n' 47 | result += template_content 48 | 49 | # Add in content from includes/ files 50 | result = result.replace('# include:includes/common.sh', common_script_content) 51 | result = result.replace('# include:includes/rhel.sh', rhel_common_script_content) 52 | 53 | with open(script_file_path, 'w') as fp: 54 | fp.write(result) 55 | 56 | print('File "%s" has been generated.' % (script_file_path)) 57 | 58 | 59 | if __name__ == '__main__': 60 | main() 61 | --------------------------------------------------------------------------------