├── .gitignore ├── CODEOWNERS ├── README.md ├── SSO ├── Google_Oauth │ ├── README.md │ └── google_oauth_config.sh ├── LDAP │ ├── README.md │ ├── beta │ │ ├── README.md │ │ ├── api_user_creation.sh │ │ ├── mapping_config.sh │ │ ├── settings_mapping_hybrid.json │ │ └── settings_mapping_simple.json │ ├── login_config.sh │ ├── settings_login_group.json │ ├── settings_login_group_deprecated.json │ ├── settings_login_referral_follow.json │ ├── settings_login_simple.json │ └── verify_user.sh ├── OpenID_Connect │ ├── README.md │ └── oidc_config.sh ├── README.md ├── SAML │ ├── README.md │ └── saml_config.sh ├── env.sh └── utils.sh ├── agent_deploy ├── IBMCloud-Kubernetes-Service │ └── install-agent-k8s.sh ├── ibm-iks │ └── install-agent-k8s.sh ├── kubernetes │ ├── sysdig-agent-clusterrole.yaml │ ├── sysdig-agent-configmap.yaml │ ├── sysdig-agent-daemonset-v1.yaml │ ├── sysdig-agent-daemonset-v2.yaml │ ├── sysdig-agent-service.yaml │ ├── sysdig-agent-slim-daemonset-v1.yaml │ ├── sysdig-agent-slim-daemonset-v2.yaml │ ├── sysdig-benchmark-runner-configmap.yaml │ ├── sysdig-host-analyzer-configmap.yaml │ ├── sysdig-image-analyzer-configmap.yaml │ ├── sysdig-image-analyzer-daemonset.yaml │ ├── sysdig-kmod-thin-agent-slim-daemonset.yaml │ └── sysdig-node-analyzer-daemonset.yaml └── openshift │ ├── sysdig-agent-clusterrole.yaml │ ├── sysdig-agent-configmap.yaml │ ├── sysdig-agent-daemonset-redhat-openshift.yaml │ └── sysdig-agent-service.yaml ├── integrations └── slack │ └── sysdigbot │ ├── Dockerfile │ ├── LICENSE │ ├── README.md │ ├── bot.py │ ├── docker-entrypoint.sh │ └── requirements.txt ├── k8s_audit_config ├── README.md ├── apiserver-config.patch.sh ├── audit-policy-v2.yaml ├── audit-policy.yaml ├── audit-sink.yaml.in ├── enable-k8s-audit.sh └── webhook-config.yaml.in ├── onprem_deploy └── amazon_aws │ ├── all-in-a-box │ └── sysdigcloud-onprem-all-in-a-box.cftemplate │ └── full-distributed │ └── sysdigcloud-onprem-full-distributed.cftemplate ├── support_bundle ├── README.md └── get_support_bundle.sh └── user_creation ├── README.md ├── create_user.sh ├── env.sh └── update_default_user_role.sh /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | .idea 3 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # The CODEOWNERS file describes areas of ownership within the repo. 2 | # Owners are required to approve Pull Requests as part of the Software Development Lifecycle 3 | # https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners 4 | # Right now there is a single Github team as root codeowner, but we can add granularity over time 5 | /agent_deploy/** @draios/agent-cloud-scripts 6 | /k8s_audit_config/** @draios/agent-cloud-scripts 7 | 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # sysdig-cloud-scripts 2 | This repository holds various scripts and templates that may be helpful for (among other things): 3 | 4 | - Deploying the Sysdig Cloud agent in various platforms: [/agent_deploy/](https://github.com/draios/sysdig-cloud-scripts/tree/master/agent_deploy) 5 | - Deploying Sysdig Enterprise on-premise: [/onprem_deploy/](https://github.com/draios/sysdig-cloud-scripts/tree/master/onprem_deploy) 6 | - Integrating Sysdig Cloud with other platforms: [/integrations/](https://github.com/draios/sysdig-cloud-scripts/tree/master/integrations) 7 | 8 | Please note that this functionality is different from the core [Sysdig Cloud application checks](http://support.sysdigcloud.com/hc/en-us/articles/205147903), which automatically collect metrics from a wide variety of applications and infrastructure components - these app checks should work out of the box with minimal configuration. 9 | 10 | # Sysdig Cloud APIs 11 | You may also be interested in the [Sysdig Cloud python client](https://github.com/draios/python-sdc-client): a wrapper for the [Sysdig Cloud REST API](https://sysdig.gitbooks.io/sysdig-cloud-api/content/), which supports a bunch of cool functionality. 12 | 13 | # Requests and Contributions 14 | We love contributions! If you've got some code that might be helpful or interesting to other Sysdig Cloud users, please just send us a pull request. 15 | 16 | If you're interested in any new content not available here: please contact us at support@sysdig.com, and we'll do our best to help you out! 17 | -------------------------------------------------------------------------------- /SSO/Google_Oauth/README.md: -------------------------------------------------------------------------------- 1 | # Configure Google OAUTH 2 | 3 | Remember to fill out your environment URL and the Monitor or Secure API token at `../env.sh`. Depending on which API token you choose, the script will configure the settings for one or the other product. 4 | 5 | ## Examples 6 | 7 | Show command help 8 | 9 | ``` 10 | ./google_oauth_config.sh -h 11 | ``` 12 | 13 | Get current configuration 14 | 15 | ``` 16 | ./google_oauth_config.sh 17 | ``` 18 | 19 | Configure some settings 20 | 21 | ``` 22 | ./google_oauth_config.sh -s -i foobar.apps.googleusercontent.com -e foobar -r https://sysdig.example.org:443/api/oauth/google/auth -a yourdomain.com,yourdomain.org 23 | 24 | ``` 25 | 26 | Delete current settings: 27 | 28 | ``` 29 | ./google_oauth_config.sh -d 30 | ``` 31 | -------------------------------------------------------------------------------- /SSO/Google_Oauth/google_oauth_config.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -uo pipefail 3 | 4 | ENV="../env.sh" 5 | UTILS="../utils.sh" 6 | 7 | SET=false 8 | DELETE=false 9 | HELP=false 10 | CLIENT_ID="" 11 | CLIENT_SECRET="" 12 | ALLOWED_DOMAINS="" 13 | REDIRECT_URL="" 14 | SSO_KEYWORD="google-oauth" 15 | SCRIPT_NAME=`basename "${0}"` 16 | 17 | function print_usage() { 18 | echo "Usage: ./${SCRIPT_NAME} [OPTIONS]" 19 | echo 20 | echo "Affect Google Oauth login settings for your Sysdig software platform installation" 21 | echo 22 | echo "If no OPTIONS are specified, the current login config settings are printed" 23 | echo 24 | echo "Options:" 25 | 26 | echo " -s Set the current Google Oauth configuration" 27 | echo " -i Client ID from Google config" 28 | echo " -e Client Secret from Google config" 29 | echo " -a [\"Comma\", \"separated\", \"list\"] of allowed domains" 30 | echo " -r Allowed redirect URL" 31 | echo " -d Delete the current Google Oauth login config" 32 | echo " -h Print this Usage output" 33 | exit 1 34 | } 35 | 36 | function check_provider_variables() { 37 | if [ -z "${CLIENT_ID}" -o -z "${CLIENT_SECRET}" ] ; then 38 | echo "To change settings, you must enter values for Client ID, and Client Secret" 39 | echo 40 | print_usage 41 | fi 42 | } 43 | 44 | function set_settings() { 45 | check_provider_variables 46 | get_settings_id 47 | PARSED_DOMAINS="[" 48 | for i in $(echo ${ALLOWED_DOMAINS} | tr "," "\n") ; do 49 | PARSED_DOMAINS="${PARSED_DOMAINS}\"${i}\"," 50 | done 51 | ALLOWED_DOMAINS="${PARSED_DOMAINS%?}]" 52 | 53 | if [[ -z "${SETTINGS_ID}" ]] ; then 54 | curl ${CURL_OPTS} \ 55 | -H "Authorization: Bearer ${API_TOKEN}" \ 56 | -H "Content-Type: application/json" \ 57 | -X POST \ 58 | -d '{ 59 | "authenticationSettings": { 60 | "type": "'"${SSO_KEYWORD}"'", 61 | "settings": { 62 | "redirectUrl":"'"${REDIRECT_URL}"'", 63 | "allowedDomains": '"${ALLOWED_DOMAINS}"', 64 | "clientId":"'"${CLIENT_ID}"'", 65 | "clientSecret":"'"${CLIENT_SECRET}"'"}}}' \ 66 | ${SETTINGS_ENDPOINT} | ${JSON_FILTER} 67 | else 68 | get_settings_version 69 | curl ${CURL_OPTS} \ 70 | -H "Authorization: Bearer ${API_TOKEN}" \ 71 | -H "Content-Type: application/json" \ 72 | -X PUT \ 73 | -d '{ 74 | "authenticationSettings": { 75 | "type": "'"${SSO_KEYWORD}"'", 76 | "version": "'"${VERSION}"'", 77 | "settings": { 78 | "redirectUrl":"'"${REDIRECT_URL}"'", 79 | "allowedDomains":'"${ALLOWED_DOMAINS}"', 80 | "clientId":"'"${CLIENT_ID}"'", 81 | "clientSecret":"'"${CLIENT_SECRET}"'"}}}' \ 82 | ${SETTINGS_ENDPOINT}/${SETTINGS_ID} | ${JSON_FILTER} 83 | fi 84 | set_as_active_setting 85 | } 86 | 87 | eval "set -- $(getopt sdhi:e:a:r: "$@")" 88 | while [[ $# -gt 0 ]] ; do 89 | case "${1}" in 90 | (-s) SET=true ;; 91 | (-d) DELETE=true ;; 92 | (-h) HELP=true ;; 93 | (-i) CLIENT_ID="${CLIENT_ID}$2"; shift;; 94 | (-e) CLIENT_SECRET="${CLIENT_SECRET}$2"; shift;; 95 | (-a) ALLOWED_DOMAINS="${ALLOWED_DOMAINS}$2"; shift;; 96 | (-r) REDIRECT_URL="${ALLOWED_DOMAINS}$2"; shift;; 97 | (--) shift; break;; 98 | (-*) echo "${0}: error - unrecognized option ${1}" 1>&2; exit 1;; 99 | (*) break;; 100 | esac 101 | shift 102 | done 103 | 104 | if [[ $HELP = true ]] ; then 105 | print_usage 106 | fi 107 | 108 | if [[ $# -gt 0 ]] ; then 109 | echo "Excess command-line arguments detected. Exiting." 110 | echo 111 | print_usage 112 | fi 113 | 114 | if [[ -e "${ENV}" ]] ; then 115 | source "${ENV}" 116 | else 117 | echo "File not found: ${ENV}" 118 | echo "See the Google Oauth documentation for details on populating this file with your settings" 119 | exit 1 120 | fi 121 | 122 | if [[ -e "${UTILS}" ]] ; then 123 | source "${UTILS}" 124 | else 125 | echo "File not found: ${UTILS}" 126 | echo "See the Google Oauth documentation for details on populating this file with your settings" 127 | exit 1 128 | fi 129 | 130 | SETTINGS_ENDPOINT="${URL}/api/admin/auth/settings" 131 | ACTIVE_ENDPOINT="${URL}/api/auth/settings/active" 132 | 133 | if [[ ${SET} = true ]] ; then 134 | if [[ ${DELETE} = true ]] ; then 135 | print_usage 136 | fi 137 | set_settings 138 | elif [[ ${DELETE} = true ]] ; then 139 | if [[ ${SET} = true ]] ; then 140 | print_usage 141 | fi 142 | delete_settings 143 | else 144 | get_settings 145 | fi 146 | 147 | exit $? 148 | -------------------------------------------------------------------------------- /SSO/LDAP/README.md: -------------------------------------------------------------------------------- 1 | # Configure LDAP auth 2 | 3 | Remember to fill out your environment URL and the Monitor or Secure API token at `../env.sh`. Depending on which API token you choose, the script will configure the settings for one or the other product. 4 | 5 | ## Examples 6 | 7 | Show command help 8 | 9 | ``` 10 | ./login_config.sh -h 11 | ``` 12 | 13 | Get current configuration 14 | 15 | ``` 16 | ./login_config.sh 17 | ``` 18 | 19 | Configure some default LDAP login following existing example 20 | 21 | ``` 22 | ./login_config.sh -s settings_login_simple.json 23 | 24 | ``` 25 | 26 | Delete current settings: 27 | 28 | ``` 29 | ./login_config.sh -d 30 | ``` 31 | 32 | Once settings have been set, information existing at LDAP tree regarding an existing user can be retrieved: 33 | 34 | Show command help 35 | 36 | ``` 37 | ./verify_user.sh -h 38 | ``` 39 | 40 | Get user information (test LDAP configuration) 41 | 42 | ``` 43 | ./verify_user.sh -u john.doe 44 | ``` 45 | -------------------------------------------------------------------------------- /SSO/LDAP/beta/README.md: -------------------------------------------------------------------------------- 1 | # Configure LDAP auth 2 | 3 | Remember to fill out your environment URL and the Monitor or Secure API token at `../../env.sh`. Depending on which API token you choose, the script will configure the settings for one or the other product. 4 | 5 | ## Examples 6 | 7 | ### LDAP users/teams mapping 8 | 9 | Show command help 10 | 11 | ``` 12 | ./mapping_config.sh -h 13 | ``` 14 | 15 | Get current configuration 16 | 17 | ``` 18 | ./mapping_config.sh 19 | ``` 20 | 21 | Configure users/teams sync settings 22 | 23 | ``` 24 | ./mapping_config.sh -s settings_mapping_simple.json 25 | 26 | ``` 27 | 28 | Force synchronisation job 29 | 30 | ``` 31 | ./mapping_config.sh -f 32 | ``` 33 | 34 | Get last synchronisation report 35 | 36 | ``` 37 | ./mapping_config.sh -r 38 | ``` 39 | 40 | Configure users/teams sync settings and force synchronisation after that 41 | 42 | ``` 43 | ./mapping_config.sh -f -s settings_mapping_simple.json 44 | 45 | ``` 46 | 47 | Delete current settings: 48 | 49 | ``` 50 | ./mapping_config.sh -d 51 | ``` 52 | 53 | ### API user creation allowing configuration 54 | 55 | After enabling this LDAP feature disabling API user creation might be desired. 56 | 57 | Get current API user creation status 58 | 59 | ``` 60 | ./api_user_creation.sh 61 | ``` 62 | 63 | Disable user creation via API 64 | 65 | ``` 66 | ./api_user_creation.sh -d 67 | ``` 68 | 69 | Enable API user creation 70 | 71 | ``` 72 | ./api_user_creation.sh -e 73 | ``` 74 | -------------------------------------------------------------------------------- /SSO/LDAP/beta/api_user_creation.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -uo pipefail 3 | 4 | ENV="../../env.sh" 5 | 6 | ENABLE=false 7 | DISABLE=false 8 | HELP=false 9 | 10 | function print_usage() { 11 | echo "Usage: ./api_user_creation.sh [OPTION]" 12 | echo 13 | echo "Enables or disables API user creation for your Sysdig software platform installation" 14 | echo 15 | echo "If no OPTION is specified, current setting is printed" 16 | echo 17 | echo "Options:" 18 | echo " -e Enable API user creation" 19 | echo " -d Disable API user creation" 20 | echo " -h Print this Usage output" 21 | exit 1 22 | } 23 | 24 | eval "set -- $(getopt dhe "$@")" 25 | while [[ $# -gt 0 ]] ; do 26 | case "${1}" in 27 | (-e) ENABLE=true ;; 28 | (-d) DISABLE=true ;; 29 | (-h) HELP=true ;; 30 | (--) shift; break;; 31 | (-*) echo "${0}: error - unrecognized option ${1}" 1>&2; exit 1;; 32 | (*) break;; 33 | esac 34 | shift 35 | done 36 | 37 | if [[ $HELP = true ]] ; then 38 | print_usage 39 | fi 40 | 41 | if [[ $# -gt 0 ]] ; then 42 | echo "Excess command-line arguments detected. Exiting." 43 | echo 44 | print_usage 45 | fi 46 | 47 | if [[ -e "${ENV}" ]] ; then 48 | source "${ENV}" 49 | else 50 | echo "File not found: ${ENV}" 51 | echo "See the LDAP documentation for details on populating this file with your settings" 52 | exit 1 53 | fi 54 | 55 | API_USER_CREATION="${URL}/api/admin/customer/1/apiPermissionSettings" 56 | 57 | if [[ ${ENABLE} = true ]] ; then 58 | if [[ ${DISABLE} = true ]] ; then 59 | print_usage 60 | else 61 | curl ${CURL_OPTS} \ 62 | -H "Content-Type: application/json" \ 63 | -H "Authorization: Bearer ${API_TOKEN}" \ 64 | -X POST \ 65 | -d '{"allowApiUserCreation":true}' \ 66 | ${API_USER_CREATION} | ${JSON_FILTER} 67 | exit ${?} 68 | fi 69 | 70 | elif [[ ${DISABLE} = true ]] ; then 71 | if [[ ${ENABLE} = true ]] ; then 72 | print_usage 73 | else 74 | curl ${CURL_OPTS} \ 75 | -H "Content-Type: application/json" \ 76 | -H "Authorization: Bearer ${API_TOKEN}" \ 77 | -X POST \ 78 | -d '{"allowApiUserCreation":false}' \ 79 | ${API_USER_CREATION} | ${JSON_FILTER} 80 | exit ${?} 81 | fi 82 | 83 | else 84 | curl ${CURL_OPTS} \ 85 | -H "Authorization: Bearer ${API_TOKEN}" \ 86 | -X GET \ 87 | ${API_USER_CREATION} | ${JSON_FILTER} 88 | exit ${?} 89 | fi 90 | -------------------------------------------------------------------------------- /SSO/LDAP/beta/mapping_config.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -uo pipefail 3 | 4 | ENV="../../env.sh" 5 | 6 | SET=false 7 | SETTINGS_JSON="" 8 | FORCESYNC=false 9 | REPORT=false 10 | DELETE=false 11 | HELP=false 12 | 13 | function print_usage() { 14 | echo "Usage: ./mapping_config.sh [OPTION]" 15 | echo 16 | echo "Affect LDAP mapping settings for your Sysdig software platform installation" 17 | echo 18 | echo "If no OPTION is specified, the current mapping config settings are printed" 19 | echo 20 | echo "Options:" 21 | echo " -s JSON_FILE Set the current LDAP mapping config to the contents of JSON_FILE" 22 | echo " -f Force an immediate sync" 23 | echo " -r Print the report of the most recent sync operation" 24 | echo " -d Delete the current LDAP mapping config" 25 | echo " -h Print this Usage output" 26 | exit 1 27 | } 28 | 29 | eval "set -- $(getopt dhfrs: "$@")" 30 | while [[ $# -gt 0 ]] ; do 31 | case "${1}" in 32 | (-d) DELETE=true ;; 33 | (-h) HELP=true ;; 34 | (-f) FORCESYNC=true ;; 35 | (-r) REPORT=true ;; 36 | (-s) SET=true; SETTINGS_JSON="${SETTINGS_JSON}${2}"; shift;; 37 | (--) shift; break;; 38 | (-*) echo "${0}: error - unrecognized option ${1}" 1>&2; exit 1;; 39 | (*) break;; 40 | esac 41 | shift 42 | done 43 | 44 | if [[ $HELP = true ]] ; then 45 | print_usage 46 | fi 47 | 48 | if [[ $# -gt 0 ]] ; then 49 | echo "Excess command-line arguments detected. Exiting." 50 | echo 51 | print_usage 52 | fi 53 | 54 | if [[ -e "${ENV}" ]] ; then 55 | source "${ENV}" 56 | else 57 | echo "File not found: ${ENV}" 58 | echo "See the LDAP documentation for details on populating this file with your settings" 59 | exit 1 60 | fi 61 | 62 | SYNC_LDAP_ENDPOINT="${URL}/api/admin/ldap/syncLdap" 63 | SYNC_SETTINGS_ENDPOINT="${URL}/api/admin/ldap/settings/sync" 64 | SYNC_REPORT_ENDPOINT="${URL}/api/admin/ldap/syncReport" 65 | 66 | function force_sync() { 67 | echo "Forcing sync" 68 | curl ${CURL_OPTS} \ 69 | -H "Authorization: Bearer ${API_TOKEN}" \ 70 | -X PUT \ 71 | ${SYNC_LDAP_ENDPOINT} 72 | exit ${?} 73 | } 74 | 75 | if [[ ${SET} = true ]] ; then 76 | if [[ ${DELETE} = true || ${REPORT} = true ]] ; then 77 | print_usage 78 | else 79 | if [[ ! -e ${SETTINGS_JSON} ]] ; then 80 | echo "Settings file \"${SETTINGS_JSON}\" does not exist. No settings were changed." 81 | exit 1 82 | fi 83 | if [[ ${?} -eq 0 ]] ; then 84 | echo "JSON checked successfully" 85 | curl ${CURL_OPTS} \ 86 | -H "Content-Type: application/json" \ 87 | -H "Authorization: Bearer ${API_TOKEN}" \ 88 | -X POST \ 89 | -d @${SETTINGS_JSON} \ 90 | ${URL}/api/admin/ldap/settings/sync | ${JSON_FILTER} 91 | if [[ ${?} -eq 0 ]] ; then 92 | if [[ ${FORCESYNC} = true ]] ; then 93 | force_sync 94 | else 95 | exit 0 96 | fi 97 | else 98 | exit ${?} 99 | fi 100 | else 101 | echo "\"${SETTINGS_JSON}\" contains invalid JSON. No settings were changed." 102 | exit 1 103 | fi 104 | fi 105 | 106 | elif [[ ${DELETE} = true ]] ; then 107 | if [[ ${SET} = true || ${REPORT} = true ]] ; then 108 | print_usage 109 | else 110 | curl ${CURL_OPTS} \ 111 | -H "Authorization: Bearer ${API_TOKEN}" \ 112 | -X DELETE \ 113 | ${SYNC_SETTINGS_ENDPOINT} | ${JSON_FILTER} 114 | if [[ ${?} -eq 0 ]] ; then 115 | if [[ ${FORCESYNC} = true ]] ; then 116 | force_sync 117 | else 118 | exit 0 119 | fi 120 | else 121 | exit ${?} 122 | fi 123 | fi 124 | 125 | elif [[ ${REPORT} = true ]] ; then 126 | if [[ ${SET} = true || ${DELETE} = true || ${FORCESYNC} = true ]] ; then 127 | print_usage 128 | else 129 | curl ${CURL_OPTS} \ 130 | -H "Authorization: Bearer ${API_TOKEN}" \ 131 | -X GET \ 132 | ${SYNC_REPORT_ENDPOINT} | ${JSON_FILTER} 133 | exit ${?} 134 | fi 135 | 136 | elif [[ ${FORCESYNC} = true ]] ; then 137 | if [[ ${SET} = true || ${DELETE} = true || ${REPORT} = true ]] ; then 138 | print_usage 139 | else 140 | force_sync 141 | fi 142 | 143 | else 144 | curl ${CURL_OPTS} \ 145 | -H "Authorization: Bearer ${API_TOKEN}" \ 146 | -X GET \ 147 | ${SYNC_SETTINGS_ENDPOINT} | ${JSON_FILTER} 148 | exit ${?} 149 | fi 150 | -------------------------------------------------------------------------------- /SSO/LDAP/beta/settings_mapping_hybrid.json: -------------------------------------------------------------------------------- 1 | { 2 | "ldapTeamMapping": [ 3 | { 4 | "ldapFilterSettings": { 5 | "searchFilter": "(&(objectClass=organizationalPerson)(memberOf=CN=Sysdig Viewers,CN=Users,DC=example,DC=local)(sAMAccountName=*))", 6 | "searchBase": "cn=Users" 7 | }, 8 | "teams": [ 9 | "Viewers" 10 | ], 11 | "teamRole": "ROLE_TEAM_READ", 12 | "usernameAttribute": "mail" 13 | }, 14 | { 15 | "ldapFilterSettings": { 16 | "searchFilter": "(&(objectClass=organizationalPerson)(memberOf=CN=Sysdig Editors,CN=Users,DC=example,DC=local)(sAMAccountName=*))", 17 | "searchBase": "cn=Users" 18 | }, 19 | "teams": [ 20 | "Editors1", 21 | "Editors2" 22 | ], 23 | "teamRole": "ROLE_TEAM_EDIT", 24 | "usernameAttribute": "mail" 25 | }, 26 | { 27 | "ldapFilterSettings": { 28 | "searchFilter": "(&(objectClass=organizationalPerson)(givenName=Mary))", 29 | "searchBase": "cn=Users" 30 | }, 31 | "teams": [ 32 | "Mixed" 33 | ], 34 | "teamRole": "ROLE_TEAM_EDIT", 35 | "usernameAttribute": "mail" 36 | }, 37 | { 38 | "ldapFilterSettings": { 39 | "searchFilter": "(&(objectClass=organizationalPerson)(sAMAccountName=jdoe))", 40 | "searchBase": "cn=Users" 41 | }, 42 | "teams": [ 43 | "Mixed" 44 | ], 45 | "teamRole": "ROLE_TEAM_READ", 46 | "usernameAttribute": "mail" 47 | } 48 | ], 49 | "teamDefinitions": [ 50 | { 51 | "name": "Mixed", 52 | "show": "host", 53 | "products": ["SDC"] 54 | }, 55 | { 56 | "name": "Viewers", 57 | "show": "host", 58 | "products": ["SDC"] 59 | }, 60 | { 61 | "name": "Editors1", 62 | "show": "host", 63 | "products": ["SDC"] 64 | }, 65 | { 66 | "name": "Editors2", 67 | "theme": "#FF5C49", 68 | "show": "container", 69 | "filter": "container.image contains \"mysql\"", 70 | "canUseSysdigCapture": false, 71 | "canUseCustomEvents": false, 72 | "canUseAwsMetrics": false, 73 | "entryPoint": { 74 | "module": "Dashboards" 75 | }, 76 | "products": ["SDC"] 77 | } 78 | ], 79 | "dryRun": true 80 | } 81 | -------------------------------------------------------------------------------- /SSO/LDAP/beta/settings_mapping_simple.json: -------------------------------------------------------------------------------- 1 | { 2 | "ldapTeamMapping": [ 3 | { 4 | "ldapFilterSettings": { 5 | "searchFilter": "(&(objectClass=organizationalPerson)(memberOf=CN=Sysdig Viewers,CN=Users,DC=example,DC=local)(sAMAccountName=*))", 6 | "searchBase": "cn=Users" 7 | }, 8 | "teams": [ 9 | "Viewers" 10 | ], 11 | "teamRole": "ROLE_TEAM_READ", 12 | "usernameAttribute": "sAMAccountName" 13 | }, 14 | { 15 | "ldapFilterSettings": { 16 | "searchFilter": "(&(objectClass=organizationalPerson)(memberOf=CN=Sysdig Editors,CN=Users,DC=example,DC=local)(sAMAccountName=*))", 17 | "searchBase": "cn=Users" 18 | }, 19 | "teams": [ 20 | "Editors1", 21 | "Editors2" 22 | ], 23 | "teamRole": "ROLE_TEAM_EDIT", 24 | "usernameAttribute": "sAMAccountName" 25 | }, 26 | { 27 | "ldapFilterSettings": { 28 | "searchFilter": "(&(objectClass=organizationalPerson)(givenName=Mary))", 29 | "searchBase": "cn=Users" 30 | }, 31 | "teams": [ 32 | "Mixed" 33 | ], 34 | "teamRole": "ROLE_TEAM_EDIT", 35 | "usernameAttribute": "sAMAccountName" 36 | }, 37 | { 38 | "ldapFilterSettings": { 39 | "searchFilter": "(&(objectClass=organizationalPerson)(sAMAccountName=jdoe))", 40 | "searchBase": "cn=Users" 41 | }, 42 | "teams": [ 43 | "Mixed" 44 | ], 45 | "teamRole": "ROLE_TEAM_READ", 46 | "usernameAttribute": "sAMAccountName" 47 | } 48 | ], 49 | "teamDefinitions": [ 50 | { 51 | "name": "Mixed", 52 | "products": ["SDC"], 53 | "show": "host" 54 | }, 55 | { 56 | "name": "Viewers", 57 | "products": ["SDC"], 58 | "show": "host" 59 | }, 60 | { 61 | "name": "Editors1", 62 | "products": ["SDC"], 63 | "show": "host" 64 | }, 65 | { 66 | "name": "Editors2", 67 | "products": ["SDC"], 68 | "theme": "#FF5C49", 69 | "show": "container", 70 | "filter": "container.image contains \"mysql\"", 71 | "canUseSysdigCapture": false, 72 | "canUseCustomEvents": false, 73 | "canUseAwsMetrics": false, 74 | "entryPoint": { 75 | "module": "Dashboards" 76 | } 77 | } 78 | ], 79 | "dryRun": true 80 | } 81 | -------------------------------------------------------------------------------- /SSO/LDAP/login_config.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -uo pipefail 3 | 4 | ENV="../env.sh" 5 | UTILS="../utils.sh" 6 | 7 | SET=false 8 | SETTINGS_JSON="" 9 | DELETE=false 10 | HELP=false 11 | SSO_KEYWORD="ldap" 12 | SCRIPT_NAME=`basename "${0}"` 13 | 14 | function print_usage() { 15 | echo "Usage: ./${SCRIPT_NAME} [OPTION]" 16 | echo 17 | echo "Affect LDAP login settings for your Sysdig software platform installation" 18 | echo 19 | echo "If no OPTION is specified, the current login config settings are printed" 20 | echo 21 | echo "Options:" 22 | echo " -s JSON_FILE Set the current LDAP login config to the contents of JSON_FILE" 23 | echo " -d Delete the current LDAP login config" 24 | echo " -h Print this Usage output" 25 | exit 1 26 | } 27 | 28 | function set_settings() { 29 | get_settings_id 30 | if [[ -z "${SETTINGS_ID}" ]] ; then 31 | sed -i "s/\"version\".*$/\"version\": 1,/" ${SETTINGS_JSON} 32 | curl ${CURL_OPTS} \ 33 | -H "Authorization: Bearer ${API_TOKEN}" \ 34 | -H "Content-Type: application/json" \ 35 | -X POST \ 36 | -d @${SETTINGS_JSON} \ 37 | ${SETTINGS_ENDPOINT} | ${JSON_FILTER} 38 | else 39 | get_settings_version 40 | sed -i "s/\"version\".*$/\"version\": ${VERSION},/" ${SETTINGS_JSON} 41 | curl ${CURL_OPTS} \ 42 | -H "Authorization: Bearer ${API_TOKEN}" \ 43 | -H "Content-Type: application/json" \ 44 | -X PUT \ 45 | -d @${SETTINGS_JSON} \ 46 | ${SETTINGS_ENDPOINT}/${SETTINGS_ID} | ${JSON_FILTER} 47 | fi 48 | set_as_active_setting 49 | } 50 | 51 | eval "set -- $(getopt dhs: "$@")" 52 | while [[ $# -gt 0 ]] ; do 53 | case "${1}" in 54 | (-d) DELETE=true ;; 55 | (-h) HELP=true ;; 56 | (-s) SET=true; SETTINGS_JSON="${SETTINGS_JSON}${2}"; shift;; 57 | (--) shift; break;; 58 | (-*) echo "${0}: error - unrecognized option ${1}" 1>&2; exit 1;; 59 | (*) break;; 60 | esac 61 | shift 62 | done 63 | 64 | if [[ ${HELP} = true ]] ; then 65 | print_usage 66 | fi 67 | 68 | if [[ $# -gt 0 ]] ; then 69 | echo "Excess command-line arguments detected. Exiting." 70 | echo 71 | print_usage 72 | fi 73 | 74 | if [[ -e "${ENV}" ]] ; then 75 | source "${ENV}" 76 | else 77 | echo "File not found: ${ENV}" 78 | echo "See the LDAP documentation for details on populating this file with your settings" 79 | exit 1 80 | fi 81 | 82 | if [[ -e "${UTILS}" ]] ; then 83 | source "${UTILS}" 84 | else 85 | echo "File not found: ${UTILS}" 86 | echo "See the LDAP documentation for details on populating this file with your settings" 87 | exit 1 88 | fi 89 | 90 | SETTINGS_ENDPOINT="${URL}/api/admin/auth/settings" 91 | ACTIVE_ENDPOINT="${URL}/api/auth/settings/active" 92 | 93 | if [[ ${SET} = true ]] ; then 94 | if [[ ${DELETE} = true ]] ; then 95 | print_usage 96 | fi 97 | set_settings 98 | elif [[ ${DELETE} = true ]] ; then 99 | if [[ ${SET} = true ]] ; then 100 | print_usage 101 | fi 102 | delete_settings 103 | else 104 | get_settings 105 | fi 106 | 107 | exit $? 108 | -------------------------------------------------------------------------------- /SSO/LDAP/settings_login_group.json: -------------------------------------------------------------------------------- 1 | { 2 | "authenticationSettings": { 3 | "settings": { 4 | "loginConnectionSettings": { 5 | "server": "ldap://172.16.0.1", 6 | "rootDn": "dc=example,dc=local", 7 | "managerDn": "cn=Administrator,cn=Users,dc=example,dc=local", 8 | "managerPassword": "myMgrPassword" 9 | }, 10 | "loginFilter": { 11 | "searchBase": "cn=Users", 12 | "searchFilter": "(&(memberOf=CN=Mars,OU=Planets,DC=example,DC=local)(objectClass=organizationalPerson)(sAMAccountName={0}))" 13 | } 14 | }, 15 | "version": 1, 16 | "type": "ldap" 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /SSO/LDAP/settings_login_group_deprecated.json: -------------------------------------------------------------------------------- 1 | { 2 | "authenticationSettings": { 3 | "settings": { 4 | "loginConnectionSettings": { 5 | "server": "ldap://172.16.0.1", 6 | "rootDn": "dc=example,dc=local", 7 | "managerDn": "cn=Administrator,cn=Users,dc=example,dc=local", 8 | "managerPassword": "myMgrPassword" 9 | }, 10 | "loginFilter": { 11 | "searchBase": "cn=Users", 12 | "searchFilter": "(&(objectClass=organizationalPerson)(sAMAccountName={0}))", 13 | "groupSearchBase": "ou=Planets", 14 | "groupSearchFilter": "(&(cn=Mars)(objectclass=group))", 15 | "groupMembershipFilter": "" 16 | } 17 | }, 18 | "version": 1, 19 | "type": "ldap" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /SSO/LDAP/settings_login_referral_follow.json: -------------------------------------------------------------------------------- 1 | { 2 | "authenticationSettings": { 3 | "settings": { 4 | "loginConnectionSettings": { 5 | "server": "ldap://172.16.0.1", 6 | "managerDn": "cn=Administrator,cn=Users,dc=example,dc=local", 7 | "managerPassword": "myMgrPassword", 8 | "referral": "FOLLOW" 9 | }, 10 | "loginFilter": { 11 | "searchBase": "dc=example,dc=local", 12 | "searchFilter": "(&(objectClass=organizationalPerson)(sAMAccountName={0}))" 13 | } 14 | }, 15 | "version": 1, 16 | "type": "ldap" 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /SSO/LDAP/settings_login_simple.json: -------------------------------------------------------------------------------- 1 | { 2 | "authenticationSettings": { 3 | "settings": { 4 | "loginConnectionSettings": { 5 | "server": "ldap://172.16.0.1", 6 | "rootDn": "dc=example,dc=local", 7 | "managerDn": "cn=Administrator,cn=Users,dc=example,dc=local", 8 | "managerPassword": "myMgrPassword" 9 | }, 10 | "loginFilter": { 11 | "searchBase": "cn=Users", 12 | "searchFilter": "(&(objectClass=organizationalPerson)(sAMAccountName={0}))" 13 | } 14 | }, 15 | "version": 1, 16 | "type": "ldap" 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /SSO/LDAP/verify_user.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -uo pipefail 3 | 4 | ENV="../env.sh" 5 | USER_SPECIFIED=false 6 | USERNAME="" 7 | HELP=false 8 | 9 | function print_usage() { 10 | echo "Usage: ./verify_user -u USERNAME" 11 | echo 12 | echo "Verify a user could login via current LDAP Authentication configuration" 13 | echo 14 | echo "Options:" 15 | echo " -u USERNAME Name of the directory user to query via LDAP" 16 | echo " -h Print this Usage output" 17 | exit 1 18 | } 19 | 20 | eval "set -- $(getopt hu: "$@")" 21 | while [[ $# -gt 0 ]] ; do 22 | case "${1}" in 23 | (-h) HELP=true ;; 24 | (-u) USER_SPECIFIED=true; USERNAME="${2}"; shift;; 25 | (--) shift; break;; 26 | (-*) echo "${0}: error - unrecognized option ${1}" 1>&2; exit 1;; 27 | (*) break;; 28 | esac 29 | shift 30 | done 31 | 32 | if [[ ${HELP} = true ]] ; then 33 | print_usage 34 | fi 35 | 36 | if [[ $# -gt 0 ]] ; then 37 | echo "Excess command-line arguments detected. Exiting." 38 | echo 39 | print_usage 40 | fi 41 | 42 | if [[ -e "${ENV}" ]] ; then 43 | source "${ENV}" 44 | else 45 | echo "File not found: ${ENV}" 46 | echo "See the LDAP documentation for details on populating this file with your settings" 47 | exit 1 48 | fi 49 | 50 | if [[ ${USER_SPECIFIED} = true ]] ; then 51 | curl -f ${CURL_OPTS} \ 52 | -H "Authorization: Bearer ${API_TOKEN}" \ 53 | $URL/api/admin/ldap/settings/verify/"${USERNAME}" | ${JSON_FILTER} 54 | 55 | RET=$? 56 | if [[ ${RET} -ne 0 ]] ; then 57 | echo "Could not verify user \"${USERNAME}\". Check LDAP login config settings and/or system log." 58 | exit ${RET} 59 | else 60 | exit 0 61 | fi 62 | 63 | else 64 | print_usage 65 | fi 66 | -------------------------------------------------------------------------------- /SSO/OpenID_Connect/README.md: -------------------------------------------------------------------------------- 1 | # Configure OpenID auth 2 | 3 | Remember to fill out your environment URL and the Monitor or Secure API token at `../env.sh`. Depending on which API token you choose, the script will configure the settings for one or the other product. 4 | 5 | ## Examples 6 | 7 | Show command help 8 | 9 | ``` 10 | ./oidc_config.sh -h 11 | ``` 12 | 13 | Get current configuration 14 | 15 | ``` 16 | ./oidc_config.sh 17 | ``` 18 | 19 | Configure some settings 20 | 21 | ``` 22 | ./oidc_config.sh -s -u https://foo.oktapreview.com -i foobar -e foobar 23 | 24 | ``` 25 | 26 | Delete current settings: 27 | 28 | ``` 29 | ./oidc_config.sh -d 30 | ``` 31 | -------------------------------------------------------------------------------- /SSO/OpenID_Connect/oidc_config.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -uo pipefail 3 | 4 | ENV="../env.sh" 5 | UTILS="../utils.sh" 6 | 7 | SET=false 8 | DELETE=false 9 | HELP=false 10 | ISSUER_URL="" 11 | CLIENT_ID="" 12 | CLIENT_SECRET="" 13 | SSO_KEYWORD="openid" 14 | SCRIPT_NAME=`basename "${0}"` 15 | 16 | function print_usage() { 17 | echo "Usage: ./${SCRIPT_NAME} [OPTIONS]" 18 | echo 19 | echo "Affect OpenID Connect login settings for your Sysdig software platform installation" 20 | echo 21 | echo "If no OPTIONS are specified, the current login config settings are printed" 22 | echo 23 | echo "Options:" 24 | 25 | echo " -s Set the current OpenID Connect login config" 26 | echo " -u Issuer URL from your OpenID Provider config" 27 | echo " -i Client ID from your OpenID Provider config" 28 | echo " -e Client Secret from your OpenID Provider config" 29 | echo " -d Delete the current OpenID Connect login config" 30 | echo " -h Print this Usage output" 31 | exit 1 32 | } 33 | 34 | function check_provider_variables() { 35 | if [ -z "${ISSUER_URL}" -o -z "${CLIENT_ID}" -o -z "${CLIENT_SECRET}" ] ; then 36 | echo "To change settings, you must enter values for Issuer URL, Client ID, and Client Secret" 37 | echo 38 | print_usage 39 | fi 40 | } 41 | 42 | function set_settings() { 43 | check_provider_variables 44 | get_settings_id 45 | if [[ -z "${SETTINGS_ID}" ]] ; then 46 | curl ${CURL_OPTS} \ 47 | -H "Authorization: Bearer ${API_TOKEN}" \ 48 | -H "Content-Type: application/json" \ 49 | -X POST \ 50 | -d '{ 51 | "authenticationSettings": { 52 | "type": "'"${SSO_KEYWORD}"'", 53 | "settings": { 54 | "issuer":"'"${ISSUER_URL}"'", 55 | "clientId":"'"${CLIENT_ID}"'", 56 | "clientSecret":"'"${CLIENT_SECRET}"'", 57 | "metadataDiscovery":true}}}' \ 58 | ${SETTINGS_ENDPOINT} | ${JSON_FILTER} 59 | else 60 | get_settings_version 61 | curl ${CURL_OPTS} \ 62 | -H "Authorization: Bearer ${API_TOKEN}" \ 63 | -H "Content-Type: application/json" \ 64 | -X PUT \ 65 | -d '{ 66 | "authenticationSettings": { 67 | "type": "'"${SSO_KEYWORD}"'", 68 | "version": "'"${VERSION}"'", 69 | "settings": { 70 | "issuer":"'"${ISSUER_URL}"'", 71 | "clientId":"'"${CLIENT_ID}"'", 72 | "clientSecret":"'"${CLIENT_SECRET}"'", 73 | "metadataDiscovery":true}}}' \ 74 | ${SETTINGS_ENDPOINT}/${SETTINGS_ID} | ${JSON_FILTER} 75 | fi 76 | set_as_active_setting 77 | } 78 | 79 | eval "set -- $(getopt sdhu:i:e: "$@")" 80 | while [[ $# -gt 0 ]] ; do 81 | case "${1}" in 82 | (-s) SET=true ;; 83 | (-d) DELETE=true ;; 84 | (-h) HELP=true ;; 85 | (-u) ISSUER_URL="${ISSUER_URL}${2}"; shift;; 86 | (-i) CLIENT_ID="${CLIENT_ID}${2}"; shift;; 87 | (-e) CLIENT_SECRET="${CLIENT_SECRET}${2}"; shift;; 88 | (--) shift; break;; 89 | (-*) echo "${0}: error - unrecognized option ${1}" 1>&2; exit 1;; 90 | (*) break;; 91 | esac 92 | shift 93 | done 94 | 95 | if [[ ${HELP} = true ]] ; then 96 | print_usage 97 | fi 98 | 99 | if [[ $# -gt 0 ]] ; then 100 | echo "Excess command-line arguments detected. Exiting." 101 | echo 102 | print_usage 103 | fi 104 | 105 | if [[ -e "${ENV}" ]] ; then 106 | source "${ENV}" 107 | else 108 | echo "File not found: ${ENV}" 109 | echo "See the OpenID Connect documentation for details on populating this file with your settings" 110 | exit 1 111 | fi 112 | 113 | if [[ -e "${UTILS}" ]] ; then 114 | source "${UTILS}" 115 | else 116 | echo "File not found: ${UTILS}" 117 | echo "See the OpenID Connect documentation for details on populating this file with your settings" 118 | exit 1 119 | fi 120 | 121 | SETTINGS_ENDPOINT="${URL}/api/admin/auth/settings" 122 | ACTIVE_ENDPOINT="${URL}/api/auth/settings/active" 123 | 124 | if [[ ${SET} = true ]] ; then 125 | if [[ ${DELETE} = true ]] ; then 126 | print_usage 127 | fi 128 | set_settings 129 | elif [[ ${DELETE} = true ]] ; then 130 | if [[ ${SET} = true ]] ; then 131 | print_usage 132 | fi 133 | delete_settings 134 | else 135 | get_settings 136 | fi 137 | 138 | exit $? 139 | -------------------------------------------------------------------------------- /SSO/README.md: -------------------------------------------------------------------------------- 1 | # SSO onprem helper scripts 2 | 3 | Under every folder you will find a helper script to configure and enable the following login methods: 4 | 5 | * SAML 6 | * OpenId 7 | * Google Oauth 8 | * LDAP 9 | 10 | Probably you reached this repo coming from our documentation. If not, [this link to the docs](https://docs.sysdig.com/en/docs/administration/on-premises-deployments/authentication-and-authorization-on-prem-options/) should be helpful. 11 | 12 | ## How to run the scripts 13 | 14 | * Edit `env.sh` file with your onprem instance URL and the API_TOKEN of the "super user" obtained from Sysdig Monitor or Sysdig Secure product. Depending on which token you take, the auth settings will be applied to Monitor or Secure (the exception is LDAP, as its configuration affects both products in the same way). 15 | * Use the auth type of interest as the folder name (and take a look at the README file there) 16 | * Run the script 17 | 18 | ``` 19 | cd SAML 20 | ./saml_config.sh -h 21 | ``` 22 | -------------------------------------------------------------------------------- /SSO/SAML/README.md: -------------------------------------------------------------------------------- 1 | # Configure SAML auth 2 | 3 | Remember to fill out your environment URL and the Monitor or Secure API token at `../env.sh`. Depending on which API token you choose, the script will configure the settings for one or the other product. 4 | 5 | ## Examples 6 | 7 | Show command help 8 | 9 | ``` 10 | ./saml_config.sh -h 11 | ``` 12 | 13 | Get current SAML configuration 14 | 15 | ``` 16 | ./saml_config.sh 17 | ``` 18 | 19 | Set some OKTA settings 20 | 21 | ``` 22 | ./saml_config.sh -s -i okta -m 'https://foo.oktapreview.com/app/bar/sso/saml/metadata' 23 | ``` 24 | 25 | Disable user autocreation after login succeeds at IDP 26 | 27 | ``` 28 | ./saml_config.sh -s -n -i okta -m 'https://foo.oktapreview.com/app/bar/sso/saml/metadata' 29 | ``` 30 | 31 | Delete current settings: 32 | 33 | ``` 34 | ./saml_config.sh -d 35 | ``` 36 | -------------------------------------------------------------------------------- /SSO/SAML/saml_config.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -uo pipefail 3 | 4 | ENV="../env.sh" 5 | UTILS="../utils.sh" 6 | 7 | SET=false 8 | DELETE=false 9 | HELP=false 10 | IDP="" 11 | AUTOCREATE=true 12 | METADATA_URL="" 13 | SSO_KEYWORD="saml" 14 | SCRIPT_NAME=`basename "${0}"` 15 | 16 | function print_usage() { 17 | echo "Usage: ./${SCRIPT_NAME} [OPTIONS]" 18 | echo 19 | echo "Affect SAML login settings for your Sysdig software platform installation" 20 | echo 21 | echo "If no OPTIONS are specified, the current login config settings are printed" 22 | echo 23 | echo "Options:" 24 | 25 | echo " -s Set the specified SAML login config" 26 | echo " -i Use SAML config options based on a supported IDP" 27 | echo " -m Metadata URL (provided from IDP-side configuration)" 28 | echo " -n Disable auto-creation of user records upon first successful auth" 29 | echo " -d Delete the current SAML login config" 30 | echo " -h Print this Usage output" 31 | exit 1 32 | } 33 | 34 | function set_provider_variables() { 35 | if [[ -z "${IDP}" ]] ; then 36 | echo "IDP is unspecified. Contact Sysdig Support for assistance." 37 | echo 38 | print_usage 39 | fi 40 | 41 | if [[ ${IDP} = "okta" ]] ; then 42 | SIGNED_ASSERTION="true" 43 | VALIDATE_SIGNATURE="true" 44 | VERIFY_DESTINATION="true" 45 | EMAIL_PARAM="email" 46 | elif [[ ${IDP} = "onelogin" ]] ; then 47 | SIGNED_ASSERTION="false" 48 | VALIDATE_SIGNATURE="true" 49 | VERIFY_DESTINATION="true" 50 | EMAIL_PARAM="User.email" 51 | else 52 | echo "IDP is unknown. Contact Sysdig Support for assistance." 53 | exit 1 54 | fi 55 | 56 | if [[ -z "${METADATA_URL}" ]] ; then 57 | echo "Must specify a metadata URL (provided from IDP-side configuration)" 58 | echo 59 | print_usage 60 | fi 61 | } 62 | 63 | function set_settings() { 64 | set_provider_variables 65 | get_settings_id 66 | if [[ -z "${SETTINGS_ID}" ]] ; then 67 | curl ${CURL_OPTS} \ 68 | -H "Authorization: Bearer ${API_TOKEN}" \ 69 | -H "Content-Type: application/json" \ 70 | -X POST \ 71 | -d '{ 72 | "authenticationSettings": { 73 | "type": "'"${SSO_KEYWORD}"'", 74 | "settings": { 75 | "metadataUrl": "'"${METADATA_URL}"'", 76 | "enabled": "'"true"'", 77 | "signedAssertion": "'"${SIGNED_ASSERTION}"'", 78 | "validateSignature": "'"${VALIDATE_SIGNATURE}"'", 79 | "verifyDestination": "'"${VERIFY_DESTINATION}"'", 80 | "emailParameter": "'"${EMAIL_PARAM}"'", 81 | "createUserOnLogin": "'"${AUTOCREATE}"'" }}}' \ 82 | ${SETTINGS_ENDPOINT} | ${JSON_FILTER} 83 | else 84 | get_settings_version 85 | curl ${CURL_OPTS} \ 86 | -H "Authorization: Bearer ${API_TOKEN}" \ 87 | -H "Content-Type: application/json" \ 88 | -X PUT \ 89 | -d '{ 90 | "authenticationSettings": { 91 | "type": "'"${SSO_KEYWORD}"'", 92 | "version": "'"${VERSION}"'", 93 | "settings": { 94 | "metadataUrl": "'"${METADATA_URL}"'", 95 | "enabled": "'"true"'", 96 | "signedAssertion": "'"${SIGNED_ASSERTION}"'", 97 | "validateSignature": "'"${VALIDATE_SIGNATURE}"'", 98 | "verifyDestination": "'"${VERIFY_DESTINATION}"'", 99 | "emailParameter": "'"${EMAIL_PARAM}"'", 100 | "createUserOnLogin": "'"${AUTOCREATE}"'" }}}' \ 101 | ${SETTINGS_ENDPOINT}/${SETTINGS_ID} | ${JSON_FILTER} 102 | fi 103 | set_as_active_setting 104 | } 105 | 106 | 107 | eval "set -- $(getopt sdhni:m: "$@")" 108 | while [[ $# -gt 0 ]] ; do 109 | case "${1}" in 110 | (-s) SET=true ;; 111 | (-d) DELETE=true ;; 112 | (-h) HELP=true ;; 113 | (-n) AUTOCREATE="false" ;; 114 | (-i) IDP="${IDP}${2}"; shift;; 115 | (-m) METADATA_URL="${METADATA_URL}${2}"; shift;; 116 | (--) shift; break;; 117 | (-*) echo "${0}: error - unrecognized option ${1}" 1>&2; exit 1;; 118 | (*) break;; 119 | esac 120 | shift 121 | done 122 | 123 | if [[ ${HELP} = true ]] ; then 124 | print_usage 125 | fi 126 | 127 | if [[ $# -gt 0 ]] ; then 128 | echo "Excess command-line arguments detected. Exiting." 129 | echo 130 | print_usage 131 | fi 132 | 133 | if [[ -e "${ENV}" ]] ; then 134 | source "${ENV}" 135 | else 136 | echo "File not found: ${ENV}" 137 | echo "See the SAML documentation for details on populating this file with your settings" 138 | exit 1 139 | fi 140 | 141 | if [[ -e "${UTILS}" ]] ; then 142 | source "${UTILS}" 143 | else 144 | echo "File not found: ${UTILS}" 145 | echo "See the SAML documentation for details on populating this file with your settings" 146 | exit 1 147 | fi 148 | 149 | SETTINGS_ENDPOINT="${URL}/api/admin/auth/settings" 150 | ACTIVE_ENDPOINT="${URL}/api/auth/settings/active" 151 | 152 | if [[ ${SET} = true ]] ; then 153 | if [[ ${DELETE} = true ]] ; then 154 | print_usage 155 | fi 156 | set_settings 157 | elif [[ ${DELETE} = true ]] ; then 158 | if [[ ${SET} = true ]] ; then 159 | print_usage 160 | fi 161 | delete_settings 162 | else 163 | get_settings 164 | fi 165 | 166 | exit $? 167 | -------------------------------------------------------------------------------- /SSO/env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # 4 | # Set this to "Super" Admin User Sysdig Monitor or Sysdig Secure API Token value. 5 | # You will find it at "User Profile" under "Settings" page. Depending on the token you 6 | # type here Monitor or Secure settings will be changed. 7 | # 8 | export API_TOKEN="aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" 9 | 10 | # 11 | # Set this to the URL through which you access your Sysdig application UI. 12 | # 13 | export URL="https://10.0.0.1" 14 | 15 | # 16 | # Set options used in other scripts that invoke curl. We've set these to what we think 17 | # are sensible defaults: 18 | # 19 | # -s 20 | # Silent mode, to make outputs brief. If you're debugging and want verbose outputs, 21 | # you might want to change this to -v. 22 | # 23 | # -k 24 | # Leave this set to "-k" to allow curl to connect to your Sysdig API even if a self- 25 | # signed certificate is in use (the default in a Sysdig software platform install). 26 | # 27 | # -w \n 28 | # Print a newline after curl prints responses. This will make the Sysdig platform's 29 | # JSON responses easier to read. 30 | # 31 | export CURL_OPTS="-s -k -w \n" 32 | 33 | # 34 | # Install jq command line tool to run the script. This should be achieved by running: 35 | # sudo apt install jq 36 | # Or similar command, depending your OS 37 | # 38 | 39 | if hash jq 2>/dev/null ; then 40 | export JSON_FILTER="jq" 41 | else 42 | echo "Please install jq tool to run this command" 43 | echo "This should be achieved by running 'sudo apt install jq' or similar command" 44 | echo 45 | exit 1 46 | fi 47 | -------------------------------------------------------------------------------- /SSO/utils.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function get_settings_id() { 4 | SETTINGS_ID=`curl ${CURL_OPTS} \ 5 | -H "Authorization: Bearer ${API_TOKEN}" \ 6 | -X GET \ 7 | ${SETTINGS_ENDPOINT} | jq '.authenticationSettings | .[] | select(.type=="'"${SSO_KEYWORD}"'") | .id'` 8 | } 9 | 10 | # Should be run after get_settings_id so ${SETTINGS_ID} might by set 11 | function exit_if_no_settings_id() { 12 | if [[ -z "${SETTINGS_ID}" ]] ; then 13 | echo "No ${SSO_KEYWORD} settings are set" 14 | echo "Run for further info: ./${SCRIPT_NAME} -h" 15 | echo 16 | exit 0 17 | fi 18 | } 19 | 20 | function get_active_settings_type() { 21 | ACTIVE_SETTINGS_TYPE=`curl ${CURL_OPTS} \ 22 | -H "Authorization: Bearer ${API_TOKEN}" \ 23 | -X GET \ 24 | ${ACTIVE_ENDPOINT} | jq '.activeSettings | .type'` 25 | } 26 | 27 | function get_settings_version() { 28 | VERSION=`curl ${CURL_OPTS} \ 29 | -H "Authorization: Bearer ${API_TOKEN}" \ 30 | -X GET \ 31 | ${SETTINGS_ENDPOINT} | jq '.authenticationSettings | .[] | select(.type=="'"${SSO_KEYWORD}"'") | .version'` 32 | } 33 | 34 | function set_as_active_setting() { 35 | get_settings_id 36 | curl ${CURL_OPTS} \ 37 | -H "Authorization: Bearer ${API_TOKEN}" \ 38 | -X PUT \ 39 | ${ACTIVE_ENDPOINT}/${SETTINGS_ID} | ${JSON_FILTER} 40 | } 41 | 42 | function disable_current_sso_auth_if_needed() { 43 | if [[ ${ACTIVE_SETTINGS_TYPE} == *${SSO_KEYWORD}* ]]; then 44 | curl ${CURL_OPTS} \ 45 | -H "Authorization: Bearer ${API_TOKEN}" \ 46 | -X DELETE \ 47 | ${ACTIVE_ENDPOINT} | ${JSON_FILTER} 48 | fi 49 | } 50 | 51 | function delete_settings() { 52 | get_settings_id 53 | exit_if_no_settings_id 54 | get_active_settings_type 55 | disable_current_sso_auth_if_needed 56 | 57 | curl ${CURL_OPTS} \ 58 | -H "Authorization: Bearer ${API_TOKEN}" \ 59 | -X DELETE \ 60 | ${SETTINGS_ENDPOINT}/${SETTINGS_ID} | ${JSON_FILTER} 61 | } 62 | 63 | function get_settings() { 64 | get_settings_id 65 | exit_if_no_settings_id 66 | 67 | get_active_settings_type 68 | if [[ ${ACTIVE_SETTINGS_TYPE} == *${SSO_KEYWORD}* ]]; then 69 | echo "${SSO_KEYWORD} is selected as auth method" 70 | else 71 | echo "${SSO_KEYWORD} is not selected as auth method" 72 | fi 73 | 74 | curl ${CURL_OPTS} \ 75 | -H "Authorization: Bearer ${API_TOKEN}" \ 76 | -X GET \ 77 | ${SETTINGS_ENDPOINT}/${SETTINGS_ID} | ${JSON_FILTER} 78 | } 79 | -------------------------------------------------------------------------------- /agent_deploy/ibm-iks/install-agent-k8s.sh: -------------------------------------------------------------------------------- 1 | ../IBMCloud-Kubernetes-Service/install-agent-k8s.sh -------------------------------------------------------------------------------- /agent_deploy/kubernetes/sysdig-agent-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: sysdig-agent 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - pods 10 | - replicationcontrollers 11 | - services 12 | - endpoints 13 | - events 14 | - limitranges 15 | - namespaces 16 | - nodes 17 | - nodes/metrics 18 | - nodes/proxy 19 | - resourcequotas 20 | - persistentvolumes 21 | - persistentvolumeclaims 22 | verbs: 23 | - get 24 | - list 25 | - watch 26 | - apiGroups: 27 | - apps 28 | resources: 29 | - daemonsets 30 | - deployments 31 | - replicasets 32 | - statefulsets 33 | verbs: 34 | - get 35 | - list 36 | - watch 37 | - apiGroups: 38 | - autoscaling 39 | resources: 40 | - horizontalpodautoscalers 41 | verbs: 42 | - get 43 | - list 44 | - watch 45 | - apiGroups: 46 | - batch 47 | resources: 48 | - cronjobs 49 | - jobs 50 | verbs: 51 | - get 52 | - list 53 | - watch 54 | - apiGroups: 55 | - networking.k8s.io 56 | resources: 57 | - networkpolicies 58 | - ingresses 59 | verbs: 60 | - get 61 | - list 62 | - watch 63 | - apiGroups: 64 | - extensions 65 | resources: 66 | - daemonsets 67 | - deployments 68 | - replicasets 69 | verbs: 70 | - get 71 | - list 72 | - watch 73 | - apiGroups: 74 | - coordination.k8s.io 75 | resources: 76 | - leases 77 | verbs: 78 | - get 79 | - list 80 | - create 81 | - update 82 | - watch 83 | - apiGroups: 84 | - storage.k8s.io 85 | resources: 86 | - storageclasses 87 | verbs: 88 | - get 89 | - list 90 | - watch 91 | - apiGroups: 92 | - certificates.k8s.io 93 | resources: 94 | - certificatesigningrequests 95 | verbs: 96 | - get 97 | - list 98 | - watch 99 | - apiGroups: 100 | - policy 101 | resources: 102 | - poddisruptionbudgets 103 | verbs: 104 | - get 105 | - list 106 | - watch 107 | - nonResourceURLs: 108 | - /metrics 109 | verbs: 110 | - get 111 | -------------------------------------------------------------------------------- /agent_deploy/kubernetes/sysdig-agent-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: sysdig-agent 5 | data: 6 | dragent.yaml: | 7 | configmap: true 8 | ### Agent tags 9 | # tags: linux:ubuntu,dept:dev,local:nyc 10 | 11 | #### Sysdig Software related config #### 12 | 13 | # Sysdig collector address 14 | # collector: 192.168.1.1 15 | 16 | # Collector TCP port 17 | # collector_port: 6666 18 | 19 | # Whether collector accepts ssl 20 | # ssl: true 21 | 22 | # collector certificate validation 23 | # ssl_verify_certificate: true 24 | 25 | ####################################### 26 | # new_k8s: true 27 | # k8s_cluster_name: production 28 | -------------------------------------------------------------------------------- /agent_deploy/kubernetes/sysdig-agent-daemonset-v1.yaml: -------------------------------------------------------------------------------- 1 | ############################ DEPRECATION NOTICE ########################## 2 | # This daemonset yaml configures everything as an environment variable. 3 | # We recommend to use v2 daemonset which instead leverages Kubernetes 4 | # best practices like Secrets and ConfigMaps 5 | ########################################################################## 6 | apiVersion: extensions/v1beta1 7 | kind: DaemonSet 8 | metadata: 9 | name: sysdig-agent 10 | labels: 11 | app: sysdig-agent 12 | spec: 13 | updateStrategy: 14 | type: RollingUpdate 15 | template: 16 | metadata: 17 | labels: 18 | app: sysdig-agent 19 | spec: 20 | volumes: 21 | - name: modprobe-d 22 | hostPath: 23 | path: /etc/modprobe.d 24 | ### uncomment for minikube 25 | # - name: etc-version 26 | # hostPath: 27 | # path: /etc/VERSION 28 | # type: FileOrCreate 29 | - name: dshm 30 | emptyDir: 31 | medium: Memory 32 | - name: dev-vol 33 | hostPath: 34 | path: /dev 35 | - name: proc-vol 36 | hostPath: 37 | path: /proc 38 | - name: boot-vol 39 | hostPath: 40 | path: /boot 41 | - name: modules-vol 42 | hostPath: 43 | path: /lib/modules 44 | - name: usr-vol 45 | hostPath: 46 | path: /usr 47 | - name: run-vol 48 | hostPath: 49 | path: /run 50 | - name: varrun-vol 51 | hostPath: 52 | path: /var/run 53 | - name: podinfo 54 | downwardAPI: 55 | defaultMode: 420 56 | items: 57 | - fieldRef: 58 | apiVersion: v1 59 | fieldPath: metadata.namespace 60 | path: namespace 61 | - fieldRef: 62 | apiVersion: v1 63 | fieldPath: metadata.name 64 | path: name 65 | hostNetwork: true 66 | dnsPolicy: ClusterFirstWithHostNet 67 | hostPID: true 68 | tolerations: 69 | - effect: NoSchedule 70 | key: node-role.kubernetes.io/master 71 | ### OPTIONAL: If using OpenShift or Kubernetes RBAC you need to uncomment the following line 72 | # serviceAccount: sysdig-agent 73 | terminationGracePeriodSeconds: 5 74 | containers: 75 | - name: sysdig-agent 76 | image: quay.io/sysdig/agent 77 | imagePullPolicy: Always 78 | securityContext: 79 | privileged: true 80 | resources: 81 | # Resources needed are subjective on the actual workload 82 | # please refer to Sysdig Support for more info about it 83 | requests: 84 | cpu: 100m 85 | memory: 512Mi 86 | limits: 87 | memory: 1024Mi 88 | readinessProbe: 89 | exec: 90 | command: [ "test", "-e", "/opt/draios/logs/running" ] 91 | initialDelaySeconds: 10 92 | env: 93 | ### REQUIRED: replace with your Sysdig Platform access key 94 | - name: ACCESS_KEY 95 | value: key 96 | ### OPTIONAL: add tags for this host 97 | # - name: TAGS 98 | # value: linux:ubuntu,dept:dev,local:nyc 99 | ### OPTIONAL: Needed to connect to a Sysdig On-Premises backend 100 | # - name: COLLECTOR_PORT 101 | # value: "6443" 102 | # - name: COLLECTOR 103 | # value: 192.168.1.200 104 | # - name: SECURE 105 | # value: "true" 106 | # - name: CHECK_CERTIFICATE 107 | # value: "false" 108 | ### OPTIONAL: Add additional parameters to the agent, refer to our Docs to know all options available 109 | # - name: ADDITIONAL_CONF 110 | # value: | 111 | # new_k8s: true 112 | # k8s_cluster_name: production 113 | volumeMounts: 114 | - mountPath: /etc/modprobe.d 115 | name: modprobe-d 116 | readOnly: true 117 | ### uncomment for minikube 118 | # - mountPath: /host/etc/VERSION 119 | # name: etc-version 120 | # readOnly: true 121 | - mountPath: /host/dev 122 | name: dev-vol 123 | readOnly: false 124 | - mountPath: /host/proc 125 | name: proc-vol 126 | readOnly: true 127 | - mountPath: /host/boot 128 | name: boot-vol 129 | readOnly: true 130 | - mountPath: /host/lib/modules 131 | name: modules-vol 132 | readOnly: true 133 | - mountPath: /host/usr 134 | name: usr-vol 135 | readOnly: true 136 | - mountPath: /host/run 137 | name: run-vol 138 | - mountPath: /host/var/run 139 | name: varrun-vol 140 | - mountPath: /dev/shm 141 | name: dshm 142 | - mountPath: /etc/podinfo 143 | name: podinfo 144 | -------------------------------------------------------------------------------- /agent_deploy/kubernetes/sysdig-agent-daemonset-v2.yaml: -------------------------------------------------------------------------------- 1 | ### WARNING: this file is supported from Sysdig Agent 0.80.0 2 | # apiVersion: extensions/v1beta1 # If you are in Kubernetes version 1.8 or less please use this line instead of the following one 3 | apiVersion: apps/v1 4 | kind: DaemonSet 5 | metadata: 6 | name: sysdig-agent 7 | labels: 8 | app: sysdig-agent 9 | spec: 10 | selector: 11 | matchLabels: 12 | app: sysdig-agent 13 | updateStrategy: 14 | type: RollingUpdate 15 | template: 16 | metadata: 17 | labels: 18 | app: sysdig-agent 19 | spec: 20 | volumes: 21 | - name: modprobe-d 22 | hostPath: 23 | path: /etc/modprobe.d 24 | ### uncomment for minikube 25 | # - name: etc-version 26 | # hostPath: 27 | # path: /etc/VERSION 28 | # type: FileOrCreate 29 | - name: dshm 30 | emptyDir: 31 | medium: Memory 32 | - name: etc-vol 33 | hostPath: 34 | path: /etc 35 | - name: dev-vol 36 | hostPath: 37 | path: /dev 38 | - name: proc-vol 39 | hostPath: 40 | path: /proc 41 | - name: boot-vol 42 | hostPath: 43 | path: /boot 44 | - name: modules-vol 45 | hostPath: 46 | path: /lib/modules 47 | - name: usr-vol 48 | hostPath: 49 | path: /usr 50 | - name: run-vol 51 | hostPath: 52 | path: /run 53 | - name: varrun-vol 54 | hostPath: 55 | path: /var/run 56 | ### Uncomment these lines if you'd like to map /root/ from the 57 | # host into the container. This can be useful to map 58 | # /root/.sysdig to pick up custom kernel modules. 59 | # - name: host-root-vol 60 | # hostPath: 61 | # path: /root 62 | - name: sysdig-agent-config 63 | configMap: 64 | name: sysdig-agent 65 | optional: true 66 | - name: sysdig-agent-secrets 67 | secret: 68 | secretName: sysdig-agent 69 | - name: podinfo 70 | downwardAPI: 71 | defaultMode: 420 72 | items: 73 | - fieldRef: 74 | apiVersion: v1 75 | fieldPath: metadata.namespace 76 | path: namespace 77 | - fieldRef: 78 | apiVersion: v1 79 | fieldPath: metadata.name 80 | path: name 81 | # This section is for eBPF support. Please refer to Sysdig Support before 82 | # uncommenting, as eBPF is recommended for only a few configurations. 83 | #- name: sys-tracing 84 | # hostPath: 85 | # path: /sys/kernel/debug 86 | hostNetwork: true 87 | dnsPolicy: ClusterFirstWithHostNet 88 | hostPID: true 89 | tolerations: 90 | - effect: NoSchedule 91 | key: node-role.kubernetes.io/master 92 | - effect: NoSchedule 93 | key: node-role.kubernetes.io/control-plane 94 | - effect: NoSchedule 95 | key: node-role.kubernetes.io/controlplane 96 | operator: Equal 97 | value: "true" 98 | - effect: NoExecute 99 | key: node-role.kubernetes.io/etcd 100 | operator: Equal 101 | value: "true" 102 | # The following line is necessary for RBAC 103 | serviceAccount: sysdig-agent 104 | terminationGracePeriodSeconds: 5 105 | ### Uncomment following 2 lines to pull images from a private registry, 106 | ### replacing secret-name with your secret name (previously created) 107 | #imagePullSecrets: 108 | #- name: secret-name 109 | containers: 110 | - name: sysdig-agent 111 | image: quay.io/sysdig/agent 112 | imagePullPolicy: Always 113 | securityContext: 114 | privileged: true 115 | runAsUser: 0 116 | resources: 117 | # Resources needed are subjective to the actual workload. 118 | # Please refer to Sysdig Support for more info. 119 | # See also: https://docs.sysdig.com/en/tuning-sysdig-agent.html 120 | requests: 121 | cpu: 1000m 122 | memory: 1024Mi 123 | limits: 124 | cpu: 1000m 125 | memory: 1024Mi 126 | readinessProbe: 127 | exec: 128 | command: [ "test", "-e", "/opt/draios/logs/running" ] 129 | initialDelaySeconds: 10 130 | env: 131 | - name: K8S_NODE 132 | valueFrom: 133 | fieldRef: 134 | fieldPath: spec.nodeName 135 | # This section is for eBPF support. Please refer to Sysdig Support before 136 | # uncommenting, as eBPF is recommended for only a few configurations. 137 | # - name: SYSDIG_BPF_PROBE 138 | # value: "" 139 | volumeMounts: 140 | - mountPath: /etc/modprobe.d 141 | name: modprobe-d 142 | readOnly: true 143 | ### uncomment for minikube 144 | # - mountPath: /host/etc/VERSION 145 | # name: etc-version 146 | # readOnly: true 147 | - mountPath: /host/etc 148 | name: etc-vol 149 | readOnly: true 150 | - mountPath: /host/dev 151 | name: dev-vol 152 | readOnly: false 153 | - mountPath: /host/proc 154 | name: proc-vol 155 | readOnly: true 156 | - mountPath: /host/boot 157 | name: boot-vol 158 | readOnly: true 159 | - mountPath: /host/lib/modules 160 | name: modules-vol 161 | readOnly: true 162 | - mountPath: /host/usr 163 | name: usr-vol 164 | readOnly: true 165 | - mountPath: /host/run 166 | name: run-vol 167 | - mountPath: /host/var/run 168 | name: varrun-vol 169 | - mountPath: /dev/shm 170 | name: dshm 171 | - mountPath: /opt/draios/etc/kubernetes/config 172 | name: sysdig-agent-config 173 | - mountPath: /opt/draios/etc/kubernetes/secrets 174 | name: sysdig-agent-secrets 175 | - mountPath: /etc/podinfo 176 | name: podinfo 177 | ### Uncomment these lines if you'd like to map /root/ from the 178 | # host into the container. This can be useful to map 179 | # /root/.sysdig to pick up custom kernel modules. 180 | # - mountPath: /root 181 | # name: host-root-vol 182 | # This section is for eBPF support. Please refer to Sysdig Support before 183 | # uncommenting, as eBPF is recommended for only a few configurations. 184 | #- mountPath: /sys/kernel/debug 185 | # name: sys-tracing 186 | # readOnly: true 187 | 188 | 189 | -------------------------------------------------------------------------------- /agent_deploy/kubernetes/sysdig-agent-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: sysdig-agent 5 | labels: 6 | app: sysdig-agent 7 | spec: 8 | selector: 9 | app: sysdig-agent 10 | ports: 11 | - protocol: TCP 12 | port: 7765 13 | targetPort: 7765 14 | -------------------------------------------------------------------------------- /agent_deploy/kubernetes/sysdig-agent-slim-daemonset-v1.yaml: -------------------------------------------------------------------------------- 1 | ############################ DEPRECATION NOTICE ########################## 2 | # This daemonset yaml configures everything as an environment variable. 3 | # We recommend to use v2 daemonset which instead leverages Kubernetes 4 | # best practices like Secrets and ConfigMaps 5 | ########################################################################## 6 | apiVersion: extensions/v1beta1 7 | kind: DaemonSet 8 | metadata: 9 | name: sysdig-agent 10 | labels: 11 | app: sysdig-agent 12 | spec: 13 | updateStrategy: 14 | type: RollingUpdate 15 | template: 16 | metadata: 17 | labels: 18 | app: sysdig-agent 19 | spec: 20 | volumes: 21 | - name: modprobe-d 22 | hostPath: 23 | path: /etc/modprobe.d 24 | ### uncomment for minikube 25 | # - name: etc-version 26 | # hostPath: 27 | # path: /etc/VERSION 28 | # type: FileOrCreate 29 | - name: dshm 30 | emptyDir: 31 | medium: Memory 32 | - name: dev-vol 33 | hostPath: 34 | path: /dev 35 | - name: proc-vol 36 | hostPath: 37 | path: /proc 38 | - name: boot-vol 39 | hostPath: 40 | path: /boot 41 | - name: modules-vol 42 | hostPath: 43 | path: /lib/modules 44 | - name: usr-vol 45 | hostPath: 46 | path: /usr 47 | - name: run-vol 48 | hostPath: 49 | path: /run 50 | - name: varrun-vol 51 | hostPath: 52 | path: /var/run 53 | - name: podinfo 54 | downwardAPI: 55 | defaultMode: 420 56 | items: 57 | - fieldRef: 58 | apiVersion: v1 59 | fieldPath: metadata.namespace 60 | path: namespace 61 | - fieldRef: 62 | apiVersion: v1 63 | fieldPath: metadata.name 64 | path: name 65 | hostNetwork: true 66 | dnsPolicy: ClusterFirstWithHostNet 67 | hostPID: true 68 | tolerations: 69 | - effect: NoSchedule 70 | key: node-role.kubernetes.io/master 71 | ### OPTIONAL: If using OpenShift or Kubernetes RBAC you need to uncomment the following line 72 | # serviceAccount: sysdig-agent 73 | terminationGracePeriodSeconds: 5 74 | initContainers: 75 | - name: sysdig-agent-kmodule 76 | image: quay.io/sysdig/agent-kmodule 77 | imagePullPolicy: Always 78 | securityContext: 79 | privileged: true 80 | resources: 81 | # Resources needed are subjective on the actual workload 82 | # please refer to Sysdig Support for more info about it 83 | requests: 84 | cpu: 1 85 | memory: 384Mi 86 | limits: 87 | memory: 512Mi 88 | volumeMounts: 89 | ### uncomment for minikube 90 | # - mountPath: /host/etc/VERSION 91 | # name: etc-version 92 | # readOnly: true 93 | - mountPath: /host/boot 94 | name: boot-vol 95 | readOnly: true 96 | - mountPath: /host/lib/modules 97 | name: modules-vol 98 | readOnly: true 99 | - mountPath: /etc/modprobe.d 100 | name: modprobe-d 101 | readOnly: true 102 | - mountPath: /host/usr 103 | name: usr-vol 104 | readOnly: true 105 | containers: 106 | - name: sysdig-agent 107 | image: quay.io/sysdig/agent-slim 108 | imagePullPolicy: Always 109 | securityContext: 110 | privileged: true 111 | resources: 112 | # Resources needed are subjective on the actual workload 113 | # please refer to Sysdig Support for more info about it 114 | requests: 115 | cpu: 100m 116 | memory: 512Mi 117 | limits: 118 | memory: 1024Mi 119 | readinessProbe: 120 | exec: 121 | command: [ "test", "-e", "/opt/draios/logs/running" ] 122 | initialDelaySeconds: 10 123 | env: 124 | ### REQUIRED: replace with your Sysdig Platform access key 125 | - name: ACCESS_KEY 126 | value: key 127 | ### OPTIONAL: add tags for this host 128 | # - name: TAGS 129 | # value: linux:ubuntu,dept:dev,local:nyc 130 | ### OPTIONAL: Needed to connect to a Sysdig On-Premises backend 131 | # - name: COLLECTOR_PORT 132 | # value: "6443" 133 | # - name: COLLECTOR 134 | # value: 192.168.1.200 135 | # - name: SECURE 136 | # value: "true" 137 | # - name: CHECK_CERTIFICATE 138 | # value: "false" 139 | ### OPTIONAL: Add additional parameters to the agent, refer to our Docs to know all options available 140 | # - name: ADDITIONAL_CONF 141 | # value: | 142 | # new_k8s: true 143 | # k8s_cluster_name: production 144 | volumeMounts: 145 | - mountPath: /host/dev 146 | name: dev-vol 147 | readOnly: false 148 | - mountPath: /host/proc 149 | name: proc-vol 150 | readOnly: true 151 | - mountPath: /host/run 152 | name: run-vol 153 | - mountPath: /host/var/run 154 | name: varrun-vol 155 | - mountPath: /dev/shm 156 | name: dshm 157 | - mountPath: /etc/podinfo 158 | name: podinfo 159 | -------------------------------------------------------------------------------- /agent_deploy/kubernetes/sysdig-agent-slim-daemonset-v2.yaml: -------------------------------------------------------------------------------- 1 | ### WARNING: this file is supported from Sysdig Agent 0.80.0 2 | # apiVersion: extensions/v1beta1 # If you are in Kubernetes version 1.8 or less please use this line instead of the following one 3 | apiVersion: apps/v1 4 | kind: DaemonSet 5 | metadata: 6 | name: sysdig-agent 7 | labels: 8 | app: sysdig-agent 9 | spec: 10 | selector: 11 | matchLabels: 12 | app: sysdig-agent 13 | updateStrategy: 14 | type: RollingUpdate 15 | template: 16 | metadata: 17 | labels: 18 | app: sysdig-agent 19 | spec: 20 | volumes: 21 | - name: modprobe-d 22 | hostPath: 23 | path: /etc/modprobe.d 24 | ### uncomment for minikube 25 | # - name: etc-version 26 | # hostPath: 27 | # path: /etc/VERSION 28 | # type: FileOrCreate 29 | - name: dshm 30 | emptyDir: 31 | medium: Memory 32 | - name: etc-vol 33 | hostPath: 34 | path: /etc 35 | - name: dev-vol 36 | hostPath: 37 | path: /dev 38 | - name: proc-vol 39 | hostPath: 40 | path: /proc 41 | - name: boot-vol 42 | hostPath: 43 | path: /boot 44 | - name: modules-vol 45 | hostPath: 46 | path: /lib/modules 47 | - name: usr-vol 48 | hostPath: 49 | path: /usr 50 | - name: run-vol 51 | hostPath: 52 | path: /run 53 | - name: varrun-vol 54 | hostPath: 55 | path: /var/run 56 | - name: sysdig-agent-config 57 | configMap: 58 | name: sysdig-agent 59 | optional: true 60 | - name: sysdig-agent-secrets 61 | secret: 62 | secretName: sysdig-agent 63 | - name: podinfo 64 | downwardAPI: 65 | defaultMode: 420 66 | items: 67 | - fieldRef: 68 | apiVersion: v1 69 | fieldPath: metadata.namespace 70 | path: namespace 71 | - fieldRef: 72 | apiVersion: v1 73 | fieldPath: metadata.name 74 | path: name 75 | # This section is for eBPF support. Please refer to Sysdig Support before 76 | # uncommenting, as eBPF is recommended for only a few configurations. 77 | #- name: bpf-probes 78 | # emptyDir: {} 79 | #- name: sys-tracing 80 | # hostPath: 81 | # path: /sys/kernel/debug 82 | hostNetwork: true 83 | dnsPolicy: ClusterFirstWithHostNet 84 | hostPID: true 85 | tolerations: 86 | - effect: NoSchedule 87 | key: node-role.kubernetes.io/master 88 | - effect: NoSchedule 89 | key: node-role.kubernetes.io/control-plane 90 | - effect: NoSchedule 91 | key: node-role.kubernetes.io/controlplane 92 | operator: Equal 93 | value: "true" 94 | - effect: NoExecute 95 | key: node-role.kubernetes.io/etcd 96 | operator: Equal 97 | value: "true" 98 | # The following line is necessary for RBAC 99 | serviceAccount: sysdig-agent 100 | terminationGracePeriodSeconds: 5 101 | ### Uncomment following 2 lines to pull images from a private registry, 102 | ### replacing secret-name with your secret name (previously created) 103 | #imagePullSecrets: 104 | #- name: secret-name 105 | initContainers: 106 | - name: sysdig-agent-kmodule 107 | image: quay.io/sysdig/agent-kmodule 108 | imagePullPolicy: Always 109 | securityContext: 110 | privileged: true 111 | runAsUser: 0 112 | resources: 113 | requests: 114 | cpu: 1000m 115 | memory: 384Mi 116 | limits: 117 | memory: 512Mi 118 | env: 119 | - name: K8S_NODE 120 | valueFrom: 121 | fieldRef: 122 | fieldPath: spec.nodeName 123 | # This section is for eBPF support. Please refer to Sysdig Support before 124 | # uncommenting, as eBPF is recommended for only a few configurations. 125 | # - name: SYSDIG_BPF_PROBE 126 | # value: "" 127 | volumeMounts: 128 | - mountPath: /host/etc 129 | name: etc-vol 130 | readOnly: true 131 | - mountPath: /etc/modprobe.d 132 | name: modprobe-d 133 | readOnly: true 134 | ### uncomment for minikube 135 | # - mountPath: /host/etc/VERSION 136 | # name: etc-version 137 | # readOnly: true 138 | - mountPath: /host/boot 139 | name: boot-vol 140 | readOnly: true 141 | - mountPath: /host/lib/modules 142 | name: modules-vol 143 | readOnly: true 144 | - mountPath: /host/usr 145 | name: usr-vol 146 | readOnly: true 147 | # This section is for eBPF support. Please refer to Sysdig Support before 148 | # uncommenting, as eBPF is recommended for only a few configurations. 149 | #- mountPath: /root/.sysdig 150 | # name: bpf-probes 151 | #- mountPath: /sys/kernel/debug 152 | # name: sys-tracing 153 | # readOnly: true 154 | containers: 155 | - name: sysdig-agent 156 | # WARNING: the agent-slim release is currently dependent on the above 157 | # initContainer and thus only functions correctly in a kubernetes cluster 158 | image: quay.io/sysdig/agent-slim 159 | imagePullPolicy: Always 160 | securityContext: 161 | privileged: true 162 | runAsUser: 0 163 | resources: 164 | # Resources needed are subjective to the actual workload. 165 | # Please refer to Sysdig Support for more info. 166 | # See also: https://docs.sysdig.com/en/tuning-sysdig-agent.html 167 | requests: 168 | cpu: 1000m 169 | memory: 1024Mi 170 | limits: 171 | cpu: 1000m 172 | memory: 1024Mi 173 | readinessProbe: 174 | exec: 175 | command: [ "test", "-e", "/opt/draios/logs/running" ] 176 | initialDelaySeconds: 10 177 | # This section is for eBPF support. Please refer to Sysdig Support before 178 | # uncommenting, as eBPF is recommended for only a few configurations. 179 | #env: 180 | # - name: SYSDIG_BPF_PROBE 181 | # value: "" 182 | volumeMounts: 183 | - mountPath: /host/etc 184 | name: etc-vol 185 | readOnly: true 186 | - mountPath: /host/dev 187 | name: dev-vol 188 | readOnly: false 189 | - mountPath: /host/proc 190 | name: proc-vol 191 | readOnly: true 192 | - mountPath: /host/run 193 | name: run-vol 194 | - mountPath: /host/var/run 195 | name: varrun-vol 196 | - mountPath: /dev/shm 197 | name: dshm 198 | - mountPath: /opt/draios/etc/kubernetes/config 199 | name: sysdig-agent-config 200 | - mountPath: /opt/draios/etc/kubernetes/secrets 201 | name: sysdig-agent-secrets 202 | - mountPath: /etc/podinfo 203 | name: podinfo 204 | # This section is for eBPF support. Please refer to Sysdig Support before 205 | # uncommenting, as eBPF is recommended for only a few configurations. 206 | #- mountPath: /root/.sysdig 207 | # name: bpf-probes 208 | #- mountPath: /sys/kernel/debug 209 | # name: sys-tracing 210 | # readOnly: true 211 | -------------------------------------------------------------------------------- /agent_deploy/kubernetes/sysdig-benchmark-runner-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: sysdig-benchmark-runner 5 | data: 6 | debug: "false" 7 | 8 | # Set and customize the following to enable proxy support 9 | # http_proxy: "http://proxy_server:8080" 10 | # https_proxy: "https://proxy_server:8080" 11 | # no_proxy: "127.0.0.1,localhost,192.168.0.0/16,172.16.0.0/12,10.0.0.0/8" 12 | 13 | # The endpoint to the Sysdig collector 14 | # Required: yes 15 | # collector_endpoint: https:// 16 | -------------------------------------------------------------------------------- /agent_deploy/kubernetes/sysdig-host-analyzer-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: sysdig-host-analyzer 5 | data: 6 | debug: "false" 7 | 8 | # Set the following to choose your scanning schedule 9 | schedule: "@dailydefault" 10 | 11 | # Set and customize the following to enable proxy support 12 | # http_proxy: "http://proxy_server:8080" 13 | # https_proxy: "https://proxy_server:8080" 14 | # no_proxy: "127.0.0.1,localhost,192.168.0.0/16,172.16.0.0/12,10.0.0.0/8" 15 | 16 | # analyze_at_startup: "false" 17 | 18 | # The endpoint to the Scanning Analysis collector 19 | # Required: yes 20 | # collector_endpoint: "https:///internal/scanning/scanning-analysis-collector" 21 | 22 | # uncomment the following line to use a self-signed cert for backend communication 23 | # ssl_verify_certificate: "false" 24 | dirs_to_scan: "/etc,/var/lib/dpkg,/usr/local,/usr/lib/sysimage/rpm,/var/lib/rpm,/lib/apk/db" 25 | -------------------------------------------------------------------------------- /agent_deploy/kubernetes/sysdig-image-analyzer-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: sysdig-image-analyzer 5 | data: 6 | debug: "false" 7 | 8 | # Set and customize the following to enable proxy support 9 | # http_proxy: "http://proxy_server:8080" 10 | # https_proxy: "https://proxy_server:8080" 11 | # no_proxy: "127.0.0.1,localhost,192.168.0.0/16,172.16.0.0/12,10.0.0.0/8" 12 | 13 | # The endpoint to the Scanning Analysis collector 14 | # Required: yes 15 | # collector_endpoint: https:///internal/scanning/scanning-analysis-collector 16 | -------------------------------------------------------------------------------- /agent_deploy/kubernetes/sysdig-image-analyzer-daemonset.yaml: -------------------------------------------------------------------------------- 1 | # apiVersion: extensions/v1beta1 # If you are in Kubernetes version 1.8 or less please use this line instead of the following one 2 | apiVersion: apps/v1 3 | kind: DaemonSet 4 | metadata: 5 | name: sysdig-image-analyzer 6 | labels: 7 | app: sysdig-image-analyzer 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: sysdig-image-analyzer 12 | updateStrategy: 13 | type: RollingUpdate 14 | template: 15 | metadata: 16 | labels: 17 | app: sysdig-image-analyzer 18 | spec: 19 | volumes: 20 | # Needed for cri-o image inspection. 21 | # cri-o and especially OCP 4.x by default use containers/storage to handle images, and this makes sure that the 22 | # analyzer has access to the configuration. This file is mounted read-only. 23 | - name: etc-containers-storage-vol 24 | hostPath: 25 | path: /etc/containers/storage.conf 26 | # Needed for cri-o image inspection. 27 | # This is the directory where image data is stored by default when using cri-o and OCP 4.x and the analyzer 28 | # uses it to get the data to scan. This directory must be mounted r/w because proper access to its files through 29 | # the containers/storage library is always regulated with a lockfile. 30 | - name: var-lib-containers-vol 31 | hostPath: 32 | path: /var/lib/containers 33 | # Needed for some IBM OpenShift clusters which symlink /var/run/containers/storage to contents of /var/data by default 34 | - name: vardata-vol 35 | hostPath: 36 | path: /var/data 37 | # Needed for socket access 38 | - name: varrun-vol 39 | hostPath: 40 | path: /var/run 41 | # Add custom volume here 42 | - name: sysdig-image-analyzer-config 43 | configMap: 44 | name: sysdig-image-analyzer 45 | optional: true 46 | tolerations: 47 | - effect: NoSchedule 48 | key: node-role.kubernetes.io/master 49 | - effect: NoSchedule 50 | key: node-role.kubernetes.io/control-plane 51 | - effect: NoSchedule 52 | key: node-role.kubernetes.io/controlplane 53 | operator: Equal 54 | value: "true" 55 | - effect: NoExecute 56 | key: node-role.kubernetes.io/etcd 57 | operator: Equal 58 | value: "true" 59 | # The following line is necessary for RBAC 60 | serviceAccount: sysdig-agent 61 | terminationGracePeriodSeconds: 5 62 | containers: 63 | - name: sysdig-image-analyzer 64 | image: quay.io/sysdig/node-image-analyzer 65 | securityContext: 66 | # The privileged flag is necessary for OCP 4.x and other Kubernetes setups that deny host filesystem access to 67 | # running containers by default regardless of volume mounts. In those cases, access to the CRI socket would fail. 68 | privileged: true 69 | imagePullPolicy: Always 70 | resources: 71 | limits: 72 | cpu: 500m 73 | memory: 1536Mi 74 | requests: 75 | cpu: 250m 76 | memory: 512Mi 77 | volumeMounts: 78 | - mountPath: /var/run 79 | name: varrun-vol 80 | - mountPath: /etc/containers/storage.conf 81 | name: etc-containers-storage-vol 82 | readOnly: true 83 | - mountPath: /var/lib/containers 84 | name: var-lib-containers-vol 85 | - mountPath: /var/data 86 | name: vardata-vol 87 | # Add custom volume mount here 88 | env: 89 | - name: ACCESS_KEY 90 | valueFrom: 91 | secretKeyRef: 92 | name: sysdig-agent 93 | key: access-key 94 | - name: IMAGE_PERIOD 95 | valueFrom: 96 | configMapKeyRef: 97 | name: sysdig-image-analyzer 98 | key: image_period 99 | optional: true 100 | - name: IMAGE_CACHE_TTL 101 | valueFrom: 102 | configMapKeyRef: 103 | name: sysdig-image-analyzer 104 | key: image_cache_ttl 105 | optional: true 106 | - name: REPORT_PERIOD 107 | valueFrom: 108 | configMapKeyRef: 109 | name: sysdig-image-analyzer 110 | key: report_period 111 | optional: true 112 | - name: DOCKER_SOCKET_PATH 113 | valueFrom: 114 | configMapKeyRef: 115 | name: sysdig-image-analyzer 116 | key: docker_socket_path 117 | optional: true 118 | - name: CRI_SOCKET_PATH 119 | valueFrom: 120 | configMapKeyRef: 121 | name: sysdig-image-analyzer 122 | key: cri_socket_path 123 | optional: true 124 | - name: CONTAINERD_SOCKET_PATH 125 | valueFrom: 126 | configMapKeyRef: 127 | name: sysdig-image-analyzer 128 | key: containerd_socket_path 129 | optional: true 130 | - name: AM_COLLECTOR_ENDPOINT 131 | valueFrom: 132 | configMapKeyRef: 133 | name: sysdig-image-analyzer 134 | key: collector_endpoint 135 | optional: true 136 | - name: AM_COLLECTOR_TIMEOUT 137 | valueFrom: 138 | configMapKeyRef: 139 | name: sysdig-image-analyzer 140 | key: collector_timeout 141 | optional: true 142 | - name: VERIFY_CERTIFICATE 143 | valueFrom: 144 | configMapKeyRef: 145 | name: sysdig-image-analyzer 146 | key: ssl_verify_certificate 147 | optional: true 148 | - name: K8S_NODE_NAME 149 | valueFrom: 150 | fieldRef: 151 | fieldPath: spec.nodeName 152 | - name: K8S_POD_NAME 153 | valueFrom: 154 | fieldRef: 155 | fieldPath: metadata.name 156 | - name: K8S_POD_NAMESPACE 157 | valueFrom: 158 | fieldRef: 159 | fieldPath: metadata.namespace 160 | - name: DEBUG 161 | valueFrom: 162 | configMapKeyRef: 163 | name: sysdig-image-analyzer 164 | key: debug 165 | optional: true 166 | - name: HTTP_PROXY 167 | valueFrom: 168 | configMapKeyRef: 169 | key: http_proxy 170 | name: sysdig-image-analyzer 171 | optional: true 172 | - name: HTTPS_PROXY 173 | valueFrom: 174 | configMapKeyRef: 175 | key: https_proxy 176 | name: sysdig-image-analyzer 177 | optional: true 178 | - name: NO_PROXY 179 | valueFrom: 180 | configMapKeyRef: 181 | key: no_proxy 182 | name: sysdig-image-analyzer 183 | optional: true 184 | -------------------------------------------------------------------------------- /agent_deploy/kubernetes/sysdig-kmod-thin-agent-slim-daemonset.yaml: -------------------------------------------------------------------------------- 1 | ### WARNING: this file is supported from Sysdig Agent 11.0.0 2 | apiVersion: apps/v1 3 | kind: DaemonSet 4 | metadata: 5 | name: sysdig-agent 6 | labels: 7 | app: sysdig-agent 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: sysdig-agent 12 | updateStrategy: 13 | type: RollingUpdate 14 | template: 15 | metadata: 16 | labels: 17 | app: sysdig-agent 18 | spec: 19 | volumes: 20 | - name: modprobe-d 21 | hostPath: 22 | path: /etc/modprobe.d 23 | - name: dshm 24 | emptyDir: 25 | medium: Memory 26 | - name: etc-vol 27 | hostPath: 28 | path: /etc 29 | - name: dev-vol 30 | hostPath: 31 | path: /dev 32 | - name: proc-vol 33 | hostPath: 34 | path: /proc 35 | - name: boot-vol 36 | hostPath: 37 | path: /boot 38 | - name: modules-vol 39 | hostPath: 40 | path: /lib/modules 41 | - name: usr-vol 42 | hostPath: 43 | path: /usr 44 | - name: run-vol 45 | hostPath: 46 | path: /run 47 | - name: varrun-vol 48 | hostPath: 49 | path: /var/run 50 | - name: sysdig-agent-config 51 | configMap: 52 | name: sysdig-agent 53 | optional: true 54 | - name: sysdig-agent-secrets 55 | secret: 56 | secretName: sysdig-agent 57 | - name: podinfo 58 | downwardAPI: 59 | defaultMode: 420 60 | items: 61 | - fieldRef: 62 | apiVersion: v1 63 | fieldPath: metadata.namespace 64 | path: namespace 65 | - fieldRef: 66 | apiVersion: v1 67 | fieldPath: metadata.name 68 | path: name 69 | # This section is for eBPF support. Please refer to Sysdig Support before 70 | # uncommenting, as eBPF is recommended for only a few configurations. 71 | #- name: bpf-probes 72 | # emptyDir: {} 73 | #- name: sys-tracing 74 | # hostPath: 75 | # path: /sys/kernel/debug 76 | hostNetwork: true 77 | dnsPolicy: ClusterFirstWithHostNet 78 | hostPID: true 79 | tolerations: 80 | - effect: NoSchedule 81 | key: node-role.kubernetes.io/master 82 | - effect: NoSchedule 83 | key: node-role.kubernetes.io/control-plane 84 | - effect: NoSchedule 85 | key: node-role.kubernetes.io/controlplane 86 | operator: Equal 87 | value: "true" 88 | - effect: NoExecute 89 | key: node-role.kubernetes.io/etcd 90 | operator: Equal 91 | value: "true" 92 | # The following line is necessary for RBAC 93 | serviceAccount: sysdig-agent 94 | terminationGracePeriodSeconds: 5 95 | ### Uncomment following 2 lines to pull images from a private registry, 96 | ### replacing secret-name with your secret name (previously created) 97 | #imagePullSecrets: 98 | #- name: secret-name 99 | initContainers: 100 | - name: sysdig-agent-kmodule 101 | image: quay.io/sysdig/agent-kmodule-thin 102 | imagePullPolicy: Always 103 | securityContext: 104 | privileged: true 105 | runAsUser: 0 106 | resources: 107 | requests: 108 | cpu: 1000m 109 | memory: 384Mi 110 | limits: 111 | memory: 512Mi 112 | env: 113 | - name: K8S_NODE 114 | valueFrom: 115 | fieldRef: 116 | fieldPath: spec.nodeName 117 | # This section is for eBPF support. Please refer to Sysdig Support before 118 | # uncommenting, as eBPF is recommended for only a few configurations. 119 | # - name: SYSDIG_BPF_PROBE 120 | # value: "" 121 | volumeMounts: 122 | - mountPath: /host/etc 123 | name: etc-vol 124 | readOnly: true 125 | - mountPath: /etc/modprobe.d 126 | name: modprobe-d 127 | readOnly: true 128 | - mountPath: /host/boot 129 | name: boot-vol 130 | readOnly: true 131 | - mountPath: /host/lib/modules 132 | name: modules-vol 133 | readOnly: true 134 | - mountPath: /host/usr 135 | name: usr-vol 136 | readOnly: true 137 | # This section is for eBPF support. Please refer to Sysdig Support before 138 | # uncommenting, as eBPF is recommended for only a few configurations. 139 | #- mountPath: /root/.sysdig 140 | # name: bpf-probes 141 | #- mountPath: /sys/kernel/debug 142 | # name: sys-tracing 143 | # readOnly: true 144 | containers: 145 | - name: sysdig-agent 146 | # WARNING: the agent-slim release is currently dependent on the above 147 | # initContainer and thus only functions correctly in a kubernetes cluster 148 | image: quay.io/sysdig/agent-slim 149 | imagePullPolicy: Always 150 | securityContext: 151 | privileged: true 152 | runAsUser: 0 153 | resources: 154 | # Resources needed are subjective to the actual workload. 155 | # Please refer to Sysdig Support for more info. 156 | # See also: https://docs.sysdig.com/en/tuning-sysdig-agent.html 157 | requests: 158 | cpu: 1000m 159 | memory: 1024Mi 160 | limits: 161 | cpu: 1000m 162 | memory: 1024Mi 163 | readinessProbe: 164 | exec: 165 | command: [ "test", "-e", "/opt/draios/logs/running" ] 166 | initialDelaySeconds: 10 167 | # This section is for eBPF support. Please refer to Sysdig Support before 168 | # uncommenting, as eBPF is recommended for only a few configurations. 169 | #env: 170 | # - name: SYSDIG_BPF_PROBE 171 | # value: "" 172 | volumeMounts: 173 | - mountPath: /host/etc 174 | name: etc-vol 175 | readOnly: true 176 | - mountPath: /host/dev 177 | name: dev-vol 178 | readOnly: false 179 | - mountPath: /host/proc 180 | name: proc-vol 181 | readOnly: true 182 | - mountPath: /host/run 183 | name: run-vol 184 | - mountPath: /host/var/run 185 | name: varrun-vol 186 | - mountPath: /dev/shm 187 | name: dshm 188 | - mountPath: /opt/draios/etc/kubernetes/config 189 | name: sysdig-agent-config 190 | - mountPath: /opt/draios/etc/kubernetes/secrets 191 | name: sysdig-agent-secrets 192 | - mountPath: /etc/podinfo 193 | name: podinfo 194 | # This section is for eBPF support. Please refer to Sysdig Support before 195 | # uncommenting, as eBPF is recommended for only a few configurations. 196 | #- mountPath: /root/.sysdig 197 | # name: bpf-probes 198 | #- mountPath: /sys/kernel/debug 199 | # name: sys-tracing 200 | # readOnly: true 201 | -------------------------------------------------------------------------------- /agent_deploy/kubernetes/sysdig-node-analyzer-daemonset.yaml: -------------------------------------------------------------------------------- 1 | # apiVersion: extensions/v1beta1 # If you are in Kubernetes version 1.8 or less please use this line instead of the following one 2 | apiVersion: apps/v1 3 | kind: DaemonSet 4 | metadata: 5 | name: sysdig-node-analyzer 6 | labels: 7 | app: sysdig-node-analyzer 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: sysdig-node-analyzer 12 | updateStrategy: 13 | type: RollingUpdate 14 | template: 15 | metadata: 16 | labels: 17 | app: sysdig-node-analyzer 18 | spec: 19 | volumes: 20 | - name: sysdig-agent-config 21 | configMap: 22 | name: sysdig-agent 23 | optional: true 24 | # Needed for cri-o image inspection. 25 | # cri-o and especially OCP 4.x by default use containers/storage to handle images, and this makes sure that the 26 | # analyzer has access to the configuration. This file is mounted read-only. 27 | - name: etc-containers-storage-vol 28 | hostPath: 29 | path: /etc/containers/storage.conf 30 | # Needed for cri-o image inspection. 31 | # This is the directory where image data is stored by default when using cri-o and OCP 4.x and the analyzer 32 | # uses it to get the data to scan. This directory must be mounted r/w because proper access to its files through 33 | # the containers/storage library is always regulated with a lockfile. 34 | - name: var-lib-containers-vol 35 | hostPath: 36 | path: /var/lib/containers 37 | # Needed for some IBM OpenShift clusters which symlink /var/run/containers/storage to contents of /var/data by default 38 | - name: vardata-vol 39 | hostPath: 40 | path: /var/data 41 | # Needed for socket access 42 | - name: varrun-vol 43 | hostPath: 44 | path: /var/run 45 | # Add custom volume here 46 | - name: sysdig-image-analyzer-config 47 | configMap: 48 | name: sysdig-image-analyzer 49 | optional: true 50 | # Needed to run Benchmarks. This mount is read-only. 51 | # Benchmarks include numerous checks that run tests against config files in the host filesystem. There are also 52 | # checks that test various host configurations such as loaded modules and enabled security features. 53 | - name: root-vol 54 | hostPath: 55 | path: / 56 | - name: tmp-vol 57 | emptyDir: {} 58 | tolerations: 59 | - effect: NoSchedule 60 | key: node-role.kubernetes.io/master 61 | - effect: NoSchedule 62 | key: node-role.kubernetes.io/control-plane 63 | - effect: NoSchedule 64 | key: node-role.kubernetes.io/controlplane 65 | operator: Equal 66 | value: "true" 67 | - effect: NoExecute 68 | key: node-role.kubernetes.io/etcd 69 | operator: Equal 70 | value: "true" 71 | # The following line is necessary for RBAC 72 | serviceAccount: sysdig-agent 73 | terminationGracePeriodSeconds: 5 74 | # Use the Host Network Namespace. 75 | # This is required by the Benchmark container to determine the hostname and host mac address 76 | hostNetwork: true 77 | dnsPolicy: ClusterFirstWithHostNet 78 | # Use the Host PID namespace. 79 | # This is required for Kubernetes benchmarks, as they contain tests that check Kubernetes processes running on 80 | # the host 81 | hostPID: true 82 | containers: 83 | - name: sysdig-image-analyzer 84 | image: quay.io/sysdig/node-image-analyzer 85 | securityContext: 86 | # The privileged flag is necessary for OCP 4.x and other Kubernetes setups that deny host filesystem access to 87 | # running containers by default regardless of volume mounts. In those cases, access to the CRI socket would fail. 88 | privileged: true 89 | imagePullPolicy: Always 90 | resources: 91 | limits: 92 | cpu: 500m 93 | memory: 1536Mi 94 | requests: 95 | cpu: 150m 96 | memory: 512Mi 97 | volumeMounts: 98 | - mountPath: /var/run 99 | name: varrun-vol 100 | - mountPath: /etc/containers/storage.conf 101 | name: etc-containers-storage-vol 102 | readOnly: true 103 | - mountPath: /var/lib/containers 104 | name: var-lib-containers-vol 105 | - mountPath: /var/data 106 | name: vardata-vol 107 | # Add custom volume mount here 108 | env: 109 | - name: ACCESS_KEY 110 | valueFrom: 111 | secretKeyRef: 112 | name: sysdig-agent 113 | key: access-key 114 | - name: IMAGE_PERIOD 115 | valueFrom: 116 | configMapKeyRef: 117 | name: sysdig-image-analyzer 118 | key: image_period 119 | optional: true 120 | - name: IMAGE_CACHE_TTL 121 | valueFrom: 122 | configMapKeyRef: 123 | name: sysdig-image-analyzer 124 | key: image_cache_ttl 125 | optional: true 126 | - name: REPORT_PERIOD 127 | valueFrom: 128 | configMapKeyRef: 129 | name: sysdig-image-analyzer 130 | key: report_period 131 | optional: true 132 | - name: DOCKER_SOCKET_PATH 133 | valueFrom: 134 | configMapKeyRef: 135 | name: sysdig-image-analyzer 136 | key: docker_socket_path 137 | optional: true 138 | - name: CRI_SOCKET_PATH 139 | valueFrom: 140 | configMapKeyRef: 141 | name: sysdig-image-analyzer 142 | key: cri_socket_path 143 | optional: true 144 | - name: CONTAINERD_SOCKET_PATH 145 | valueFrom: 146 | configMapKeyRef: 147 | name: sysdig-image-analyzer 148 | key: containerd_socket_path 149 | optional: true 150 | - name: AM_COLLECTOR_ENDPOINT 151 | valueFrom: 152 | configMapKeyRef: 153 | name: sysdig-image-analyzer 154 | key: collector_endpoint 155 | optional: true 156 | - name: AM_COLLECTOR_TIMEOUT 157 | valueFrom: 158 | configMapKeyRef: 159 | name: sysdig-image-analyzer 160 | key: collector_timeout 161 | optional: true 162 | - name: VERIFY_CERTIFICATE 163 | valueFrom: 164 | configMapKeyRef: 165 | name: sysdig-image-analyzer 166 | key: ssl_verify_certificate 167 | optional: true 168 | - name: K8S_NODE_NAME 169 | valueFrom: 170 | fieldRef: 171 | fieldPath: spec.nodeName 172 | - name: K8S_POD_NAME 173 | valueFrom: 174 | fieldRef: 175 | fieldPath: metadata.name 176 | - name: K8S_POD_NAMESPACE 177 | valueFrom: 178 | fieldRef: 179 | fieldPath: metadata.namespace 180 | - name: DEBUG 181 | valueFrom: 182 | configMapKeyRef: 183 | name: sysdig-image-analyzer 184 | key: debug 185 | optional: true 186 | - name: HTTP_PROXY 187 | valueFrom: 188 | configMapKeyRef: 189 | key: http_proxy 190 | name: sysdig-image-analyzer 191 | optional: true 192 | - name: HTTPS_PROXY 193 | valueFrom: 194 | configMapKeyRef: 195 | key: https_proxy 196 | name: sysdig-image-analyzer 197 | optional: true 198 | - name: NO_PROXY 199 | valueFrom: 200 | configMapKeyRef: 201 | key: no_proxy 202 | name: sysdig-image-analyzer 203 | optional: true 204 | - name: sysdig-host-analyzer 205 | image: quay.io/sysdig/host-analyzer:latest 206 | securityContext: 207 | # The privileged flag is necessary for OCP 4.x and other Kubernetes setups that deny host filesystem access to 208 | # running containers by default regardless of volume mounts. In those cases, access to any host related components 209 | # would fail 210 | privileged: true 211 | imagePullPolicy: Always 212 | resources: 213 | limits: 214 | cpu: 500m 215 | memory: 1536Mi 216 | requests: 217 | cpu: 150m 218 | memory: 512Mi 219 | volumeMounts: 220 | - mountPath: /host 221 | name: root-vol 222 | readOnly: true 223 | env: 224 | - name: ACCESS_KEY 225 | valueFrom: 226 | secretKeyRef: 227 | name: sysdig-agent 228 | key: access-key 229 | - name: AM_COLLECTOR_ENDPOINT 230 | valueFrom: 231 | configMapKeyRef: 232 | name: sysdig-host-analyzer 233 | key: collector_endpoint 234 | - name: AM_COLLECTOR_TIMEOUT 235 | valueFrom: 236 | configMapKeyRef: 237 | name: sysdig-host-analyzer 238 | key: collector_timeout 239 | optional: true 240 | - name: SCHEDULE 241 | valueFrom: 242 | configMapKeyRef: 243 | name: sysdig-host-analyzer 244 | key: schedule 245 | optional: true 246 | - name: ANALYZE_AT_STARTUP 247 | valueFrom: 248 | configMapKeyRef: 249 | name: sysdig-host-analyzer 250 | key: analyze_at_startup 251 | optional: true 252 | - name: HOST_BASE 253 | value: /host 254 | - name: DIRS_TO_SCAN 255 | valueFrom: 256 | configMapKeyRef: 257 | name: sysdig-host-analyzer 258 | key: dirs_to_scan 259 | optional: true 260 | - name: MAX_SEND_ATTEMPTS 261 | valueFrom: 262 | configMapKeyRef: 263 | name: sysdig-host-analyzer 264 | key: max_send_attempts 265 | optional: true 266 | - name: VERIFY_CERTIFICATE 267 | valueFrom: 268 | configMapKeyRef: 269 | name: sysdig-host-analyzer 270 | key: ssl_verify_certificate 271 | optional: true 272 | - name: DEBUG 273 | valueFrom: 274 | configMapKeyRef: 275 | name: sysdig-host-analyzer 276 | key: debug 277 | optional: true 278 | - name: HTTP_PROXY 279 | valueFrom: 280 | configMapKeyRef: 281 | key: http_proxy 282 | name: sysdig-host-analyzer 283 | optional: true 284 | - name: HTTPS_PROXY 285 | valueFrom: 286 | configMapKeyRef: 287 | key: https_proxy 288 | name: sysdig-host-analyzer 289 | optional: true 290 | - name: NO_PROXY 291 | valueFrom: 292 | configMapKeyRef: 293 | key: no_proxy 294 | name: sysdig-host-analyzer 295 | optional: true 296 | - name: sysdig-benchmark-runner 297 | image: quay.io/sysdig/compliance-benchmark-runner 298 | imagePullPolicy: Always 299 | securityContext: 300 | # The privileged flag is necessary for OCP 4.x and other Kubernetes setups that deny host filesystem access to 301 | # running containers by default regardless of volume mounts. In those cases, the benchmark process would fail. 302 | privileged: true 303 | resources: 304 | limits: 305 | cpu: 500m 306 | memory: 256Mi 307 | requests: 308 | cpu: 150m 309 | memory: 128Mi 310 | volumeMounts: 311 | - mountPath: /opt/draios/etc/kubernetes/config 312 | name: sysdig-agent-config 313 | - mountPath: /host 314 | name: root-vol 315 | readOnly: true 316 | - mountPath: /host/tmp 317 | name: tmp-vol 318 | env: 319 | - name: ACCESS_KEY 320 | valueFrom: 321 | secretKeyRef: 322 | name: sysdig-agent 323 | key: access-key 324 | - name: BACKEND_ENDPOINT 325 | valueFrom: 326 | configMapKeyRef: 327 | name: sysdig-benchmark-runner 328 | key: collector_endpoint 329 | - name: BACKEND_VERIFY_TLS 330 | valueFrom: 331 | configMapKeyRef: 332 | name: sysdig-benchmark-runner 333 | key: ssl_verify_certificate 334 | optional: true 335 | - name: KUBERNETES_NODE_NAME 336 | valueFrom: 337 | fieldRef: 338 | fieldPath: spec.nodeName 339 | - name: DEBUG 340 | valueFrom: 341 | configMapKeyRef: 342 | name: sysdig-benchmark-runner 343 | key: debug 344 | optional: true 345 | - name: HTTP_PROXY 346 | valueFrom: 347 | configMapKeyRef: 348 | key: http_proxy 349 | name: sysdig-benchmark-runner 350 | optional: true 351 | - name: HTTPS_PROXY 352 | valueFrom: 353 | configMapKeyRef: 354 | key: https_proxy 355 | name: sysdig-benchmark-runner 356 | optional: true 357 | - name: NO_PROXY 358 | valueFrom: 359 | configMapKeyRef: 360 | key: no_proxy 361 | name: sysdig-benchmark-runner 362 | optional: true 363 | -------------------------------------------------------------------------------- /agent_deploy/openshift/sysdig-agent-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: sysdig-agent 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - pods 10 | - replicationcontrollers 11 | - services 12 | - endpoints 13 | - events 14 | - limitranges 15 | - namespaces 16 | - nodes 17 | - resourcequotas 18 | - persistentvolumes 19 | - persistentvolumeclaims 20 | verbs: 21 | - get 22 | - list 23 | - watch 24 | - apiGroups: 25 | - apps 26 | resources: 27 | - daemonsets 28 | - deployments 29 | - replicasets 30 | - statefulsets 31 | verbs: 32 | - get 33 | - list 34 | - watch 35 | - apiGroups: 36 | - autoscaling 37 | resources: 38 | - horizontalpodautoscalers 39 | verbs: 40 | - get 41 | - list 42 | - watch 43 | - apiGroups: 44 | - batch 45 | resources: 46 | - cronjobs 47 | - jobs 48 | verbs: 49 | - get 50 | - list 51 | - watch 52 | - apiGroups: 53 | - networking.k8s.io 54 | resources: 55 | - networkpolicies 56 | - ingresses 57 | verbs: 58 | - get 59 | - list 60 | - watch 61 | - apiGroups: 62 | - extensions 63 | resources: 64 | - daemonsets 65 | - deployments 66 | - replicasets 67 | verbs: 68 | - get 69 | - list 70 | - watch 71 | - apiGroups: 72 | - coordination.k8s.io 73 | resources: 74 | - leases 75 | verbs: 76 | - get 77 | - list 78 | - create 79 | - update 80 | - watch 81 | - apiGroups: 82 | - storage.k8s.io 83 | resources: 84 | - storageclasses 85 | verbs: 86 | - get 87 | - list 88 | - watch 89 | - apiGroups: 90 | - certificates.k8s.io 91 | resources: 92 | - certificatesigningrequests 93 | verbs: 94 | - get 95 | - list 96 | - watch 97 | - apiGroups: 98 | - policy 99 | resources: 100 | - poddisruptionbudgets 101 | verbs: 102 | - get 103 | - list 104 | - watch 105 | -------------------------------------------------------------------------------- /agent_deploy/openshift/sysdig-agent-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: sysdig-agent 5 | data: 6 | dragent.yaml: | 7 | ### Agent tags 8 | # tags: linux:ubuntu,dept:dev,local:nyc 9 | tags: cluster:openshift 10 | 11 | #### Sysdig Software related config #### 12 | 13 | # Sysdig collector address 14 | # collector: 192.168.1.1 15 | 16 | # Collector TCP port 17 | # collector_port: 6666 18 | 19 | # Whether collector accepts ssl 20 | # ssl: true 21 | 22 | # collector certificate validation 23 | # ssl_verify_certificate: true 24 | 25 | ####################################### 26 | new_k8s: true 27 | # k8s_cluster_name: production 28 | -------------------------------------------------------------------------------- /agent_deploy/openshift/sysdig-agent-daemonset-redhat-openshift.yaml: -------------------------------------------------------------------------------- 1 | ### WARNING: this file is supported from Sysdig Agent 0.80.0 2 | # apiVersion: extensions/v1beta1 # If you are in Kubernetes version 1.8 or less please use this line instead of the following one 3 | apiVersion: apps/v1 4 | kind: DaemonSet 5 | metadata: 6 | name: sysdig-agent 7 | labels: 8 | app: sysdig-agent 9 | spec: 10 | selector: 11 | matchLabels: 12 | app: sysdig-agent 13 | updateStrategy: 14 | type: RollingUpdate 15 | template: 16 | metadata: 17 | labels: 18 | app: sysdig-agent 19 | spec: 20 | volumes: 21 | - name: modprobe-d 22 | hostPath: 23 | path: /etc/modprobe.d 24 | - name: dshm 25 | emptyDir: 26 | medium: Memory 27 | - name: etc-vol 28 | hostPath: 29 | path: /etc 30 | - name: dev-vol 31 | hostPath: 32 | path: /dev 33 | - name: proc-vol 34 | hostPath: 35 | path: /proc 36 | - name: boot-vol 37 | hostPath: 38 | path: /boot 39 | - name: modules-vol 40 | hostPath: 41 | path: /lib/modules 42 | - name: usr-vol 43 | hostPath: 44 | path: /usr 45 | - name: run-vol 46 | hostPath: 47 | path: /run 48 | - name: varrun-vol 49 | hostPath: 50 | path: /var/run 51 | ### Uncomment these lines if you'd like to map /root/ from the 52 | # host into the container. This can be useful to map 53 | # /root/.sysdig to pick up custom kernel modules. 54 | # - name: host-root-vol 55 | # hostPath: 56 | # path: /root 57 | - name: sysdig-agent-config 58 | configMap: 59 | name: sysdig-agent 60 | optional: true 61 | - name: sysdig-agent-secrets 62 | secret: 63 | secretName: sysdig-agent 64 | - name: podinfo 65 | downwardAPI: 66 | defaultMode: 420 67 | items: 68 | - fieldRef: 69 | apiVersion: v1 70 | fieldPath: metadata.namespace 71 | path: namespace 72 | - fieldRef: 73 | apiVersion: v1 74 | fieldPath: metadata.name 75 | path: name 76 | # This section is for eBPF support. Please refer to Sysdig Support before 77 | # uncommenting, as eBPF is recommended for only a few configurations. 78 | #- name: sys-tracing 79 | # hostPath: 80 | # path: /sys/kernel/debug 81 | hostNetwork: true 82 | dnsPolicy: ClusterFirstWithHostNet 83 | hostPID: true 84 | tolerations: 85 | - effect: NoSchedule 86 | key: node-role.kubernetes.io/master 87 | # The following line is necessary for RBAC 88 | serviceAccount: sysdig-agent 89 | terminationGracePeriodSeconds: 5 90 | ### Uncomment following 2 lines to pull images from a private registry, 91 | ### replacing secret-name with your secret name (previously created) 92 | #imagePullSecrets: 93 | #- name: secret-name 94 | containers: 95 | - name: sysdig-agent 96 | image: registry.connect.redhat.com/sysdig/agent:latest 97 | imagePullPolicy: Always 98 | securityContext: 99 | privileged: true 100 | runAsUser: 0 101 | resources: 102 | # Resources needed are subjective to the actual workload. 103 | # Please refer to Sysdig Support for more info. 104 | # See also: https://docs.sysdig.com/en/tuning-sysdig-agent.html 105 | requests: 106 | cpu: 1000m 107 | memory: 1024Mi 108 | limits: 109 | cpu: 1000m 110 | memory: 1024Mi 111 | readinessProbe: 112 | exec: 113 | command: [ "test", "-e", "/opt/draios/logs/running" ] 114 | initialDelaySeconds: 10 115 | env: 116 | - name: K8S_NODE 117 | valueFrom: 118 | fieldRef: 119 | fieldPath: spec.nodeName 120 | # This section is for eBPF support. Please refer to Sysdig Support before 121 | # uncommenting, as eBPF is recommended for only a few configurations. 122 | # - name: SYSDIG_BPF_PROBE 123 | # value: "" 124 | volumeMounts: 125 | - mountPath: /etc/modprobe.d 126 | name: modprobe-d 127 | readOnly: true 128 | - mountPath: /host/etc 129 | name: etc-vol 130 | readOnly: true 131 | - mountPath: /host/dev 132 | name: dev-vol 133 | readOnly: false 134 | - mountPath: /host/proc 135 | name: proc-vol 136 | readOnly: true 137 | - mountPath: /host/boot 138 | name: boot-vol 139 | readOnly: true 140 | - mountPath: /host/lib/modules 141 | name: modules-vol 142 | readOnly: true 143 | - mountPath: /host/usr 144 | name: usr-vol 145 | readOnly: true 146 | - mountPath: /host/run 147 | name: run-vol 148 | readOnly: false 149 | - mountPath: /host/var/run 150 | name: varrun-vol 151 | readOnly: false 152 | - mountPath: /dev/shm 153 | name: dshm 154 | - mountPath: /opt/draios/etc/kubernetes/config 155 | name: sysdig-agent-config 156 | - mountPath: /opt/draios/etc/kubernetes/secrets 157 | name: sysdig-agent-secrets 158 | - mountPath: /etc/podinfo 159 | name: podinfo 160 | ### Uncomment these lines if you'd like to map /root/ from the 161 | # host into the container. This can be useful to map 162 | # /root/.sysdig to pick up custom kernel modules. 163 | # - mountPath: /root 164 | # name: host-root-vol 165 | # This section is for eBPF support. Please refer to Sysdig Support before 166 | # uncommenting, as eBPF is recommended for only a few configurations. 167 | #- mountPath: /sys/kernel/debug 168 | # name: sys-tracing 169 | # readOnly: true -------------------------------------------------------------------------------- /agent_deploy/openshift/sysdig-agent-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: sysdig-agent 5 | labels: 6 | app: sysdig-agent 7 | spec: 8 | selector: 9 | app: sysdig-agent 10 | ports: 11 | - protocol: TCP 12 | port: 7765 13 | targetPort: 7765 14 | -------------------------------------------------------------------------------- /integrations/slack/sysdigbot/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:alpine 2 | MAINTAINER Sysdig 3 | 4 | WORKDIR /app 5 | ADD requirements.txt /app/ 6 | RUN pip install -r requirements.txt 7 | 8 | ADD bot.py /app 9 | ENTRYPOINT [ "python", "bot.py" ] 10 | -------------------------------------------------------------------------------- /integrations/slack/sysdigbot/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Draios, Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /integrations/slack/sysdigbot/README.md: -------------------------------------------------------------------------------- 1 | # sysdigbot 2 | Use this python script to spin up a Sysdigbot, a chat bot that allows you to interact with Sysdig Cloud though Slack. 3 | 4 | Currently Sysdigbot allows you to post custom events directly to Sysdig Cloud through chats in Slack. These chats can come from you and your teammates, or from any other app that you have integrated with Slack (think: code deploys, support tickets, marketing events, etc.) 5 | 6 | Check out the Sysdigbot [launch blog post](https://sysdig.com/blog/universal-slack-event-router/) for more info. 7 | 8 | Note, this script utilizes the [Sysdig Cloud python client](https://github.com/draios/python-sdc-client), a wrapper for the Sysdig Cloud REST API. 9 | 10 | # Install Instructions 11 | 12 | 1. Create a [new Slack bot user](https://api.slack.com/apps?new_classic_app=1) called "Sysdigbot" (or whatever you want), and note your Slack API token. Since the bot uses the `RTM` protocol to communicate with Slack, notice that you are creating a classic-style app and not a new-style. 13 | 2. Go to the [User Settings page](https://app.sysdigcloud.com/#/settings/user) in Sysdig Cloud, and note your Sysdig Cloud API Token (which, to be clear, is different from your Access Key). 14 | 3. If you are not using the default SaaS instance of Sysdig ( app.sysdigcloud.com ) then you must explicitly add your endpoint as a parameter. 15 | 4. Pull and run the Sysdigbot container from Docker Hub: 16 | 17 | `docker run [-d] --name sysdig-bot -e SYSDIG_API_TOKEN= -e SLACK_TOKEN= [-e SDC_URL=] sysdig/sysdig-bot [--help] [--quiet] [--no-auto-events] [--log-level LOG_LEVEL]` 18 | 19 | ## Manual Installation 20 | 21 | 1. `pip install -r requirements.txt` 22 | 2. `python bot.py --sysdig-api-token --slack-token ` 23 | 24 | Alternatively you can use our provided Dockerfile: 25 | 26 | 1. `docker build -t sysdig-bot .` 27 | 2. `docker run [-d] --name sysdig-bot -e SYSDIG_API_TOKEN= -e SLACK_TOKEN= [-e SDC_URL=] sysdig/sysdig-bot [--help] [--quiet] [--no-auto-events] [--log-level LOG_LEVEL]` 28 | 29 | # Usage 30 | 31 | Sysdigbot will automatically translate each message it hears on Slack into a Sysdig Cloud event: 32 | `description [name=] [severity=<1 to 7>] [some_tag_key=some_tag_value]` 33 | 34 | You can send messages directly to Sysdigbot, or invite Sysdigbot to any Slack channel to listen in. This channel listening behavior can be handy if there are other bots in the channel that post automatic notifications. 35 | 36 | ## Available commands 37 | 38 | * `!help` - Shows this message. 39 | * `[!post_event] description [name=] [severity=<1 to 7>] [some_tag_key=some_tag_value]` - Sends a custom event to Sysdig Cloud. Note, the `!post_event` prefix is only necessary if you launch bot.py with the `--no-auto-events` parameter. 40 | 41 | ## Examples 42 | 43 | * `!post_event load balancer going down for maintenance` 44 | 45 | * `!post_event my test event name="test 1" severity=5` 46 | 47 | * `!post_event name="test 2" severity=1 tag=value` 48 | 49 | # Improvements 50 | 51 | If you like any of these ideas and want to see them, or if you have any cool ideas of your own, let us know at support@sysdig.com - thanks! 52 | 53 | - Add more commands. For example, it would be very cool to have a `!get_data` or `!get_alert_notification` that mimic the behavior of the Python client API 54 | - Better parsing from bot messages. For example, we can recognize when a github/jenkins bot posts a message, and automatically dissect the message into name/description/tags 55 | - Hosted Sysdigbot 56 | -------------------------------------------------------------------------------- /integrations/slack/sysdigbot/bot.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding=utf8 3 | # Copyright (c) 2016 Draios inc. 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | # SOFTWARE. 22 | 23 | import sys 24 | import time 25 | import re 26 | import argparse 27 | import logging 28 | import os 29 | 30 | from slackclient import SlackClient 31 | from sdcclient import SdcClient 32 | 33 | ############################################################################### 34 | # Basic slack interface class 35 | ############################################################################### 36 | class SlackWrapper(object): 37 | inputs = [] 38 | 39 | def __init__(self, slack_client, slack_id): 40 | self.slack_client = slack_client 41 | self.slack_id = slack_id 42 | 43 | self.slack_users = {} 44 | for user in self.slack_client.server.users: 45 | self.slack_users[user.id] = user.name 46 | self.resolved_channels = {} 47 | self.resolved_users = {} 48 | 49 | def resolve_channel(self, channel): 50 | channel_type = channel[0] 51 | if channel in self.resolved_channels: 52 | return self.resolved_channels[channel] 53 | elif channel_type == 'C': 54 | channel_info = self.slack_client.api_call("channels.info", channel=channel) 55 | logging.debug("channels.info channel=%s response=%s" % (channel, channel_info)) 56 | if channel_info["ok"]: 57 | self.resolved_channels[channel] = channel_info['channel']['name'] 58 | return self.resolved_channels[channel] 59 | else: 60 | return channel 61 | elif channel_type == 'G': 62 | group_info = self.slack_client.api_call("groups.info", channel=channel) 63 | logging.debug("groups.info channel=%s response=%s" % (channel, group_info)) 64 | if group_info["ok"]: 65 | self.resolved_channels[channel] = group_info['group']['name'] 66 | return self.resolved_channels[channel] 67 | else: 68 | return channel 69 | elif channel_type == 'D': 70 | return "Direct" 71 | else: 72 | return channel 73 | 74 | def resolve_user(self, user): 75 | user_type = user[0] 76 | if user in self.resolved_users: 77 | return self.resolved_users[user] 78 | elif user_type == 'U': 79 | user_info = self.slack_client.api_call("users.info", user=user) 80 | logging.debug("users.info user=%s response=%s" % (user, user_info)) 81 | if user_info["ok"]: 82 | self.resolved_users[user] = user_info["user"]["name"] 83 | return self.resolved_users[user] 84 | else: 85 | return user 86 | elif user_type == 'B': 87 | # Right now we are not able to resolve bots 88 | # see https://api.slack.com/methods/bots.info 89 | return "bot" 90 | else: 91 | return user 92 | 93 | def say(self, channelid, text): 94 | message_json = {'type': 'message', 'channel': channelid, 'text': text} 95 | self.slack_client.server.send_to_websocket(message_json) 96 | 97 | def listen(self): 98 | self.inputs = [] 99 | while True: 100 | try: 101 | rv = self.slack_client.rtm_read() 102 | time.sleep(.1) 103 | except KeyboardInterrupt: 104 | sys.exit(0) 105 | except Exception as ex: 106 | logging.warning("Error on Slack WebSocket: %s" % str(ex)) 107 | 108 | for t in [2**i for i in range(12)]: 109 | logging.info("Reconnecting to Slack in %d seconds..." % t) 110 | time.sleep(t) 111 | if self.slack_client.rtm_connect(): 112 | logging.info("Successfully reconnected to Slack") 113 | break 114 | else: 115 | logging.error("Cannot connect to Slack, terminating...") 116 | sys.exit(1) 117 | 118 | 119 | for reply in rv: 120 | logging.debug("Data from Slack: %s", repr(reply)) 121 | if 'type' not in reply: 122 | continue 123 | 124 | if reply['type'] != 'message': 125 | continue 126 | 127 | if 'subtype' in reply and reply['subtype'] not in ('bot_message'): 128 | continue 129 | 130 | if 'channel' not in reply: 131 | continue 132 | 133 | if 'user' in reply and reply['user'] == self.slack_id: 134 | continue 135 | 136 | if 'text' in reply and len(reply['text']) > 0: 137 | txt = reply['text'] 138 | elif 'attachments' in reply and 'fallback' in reply['attachments'][0]: 139 | txt = reply['attachments'][0]['fallback'] 140 | else: 141 | continue 142 | if 'user' in reply: 143 | user_id = reply['user'] 144 | elif 'bot_id' in reply: 145 | user_id = reply['bot_id'] 146 | else: 147 | user_id = None 148 | 149 | self.inputs.append((user_id, reply['channel'], txt.strip(' \t\n\r'))) 150 | 151 | if len(self.inputs) != 0: 152 | return 153 | 154 | ############################################################################### 155 | # Chat endpoint class 156 | ############################################################################### 157 | 158 | SLACK_BUDDY_HELP = """ 159 | *Usage*: 160 | 161 | Just type something, it will be automatically converted to a Sysdig Cloud event: 162 | 163 | _load balancer going down for maintenance_ 164 | 165 | You can customize all the event fields in this way: 166 | 167 | _Turning down API server severity=3_ 168 | _Turning down API server severity=3 name=manteinance_ 169 | 170 | or add custom tags: 171 | 172 | _Turning down API server severity=3 name=manteinance region=us-east-1_ 173 | 174 | *Available commands*: 175 | `!help` - Shows this message. 176 | `[!post_event] description [name=] [severity=<1 to 7>] [some_tag_key=some_tag_value]` - Sends a custom event to Sysdig Cloud. Note, the `!post_event` prefix is only necessary if bot.py was launched with the `--no-auto-events` parameter. 177 | 178 | """ 179 | 180 | class SlackBuddy(SlackWrapper): 181 | inputs = [] 182 | PARAMETER_MATCHER = re.compile(u"([a-z_]+) ?= ?(?:\u201c([^\u201c]*)\u201d|\"([^\"]*)\"|([^\s]+))") 183 | SLACK_LINK_MATCHER = re.compile('') 184 | 185 | def __init__(self, sdclient, slack_client, slack_id, quiet): 186 | self._sdclient = sdclient 187 | self._quiet = quiet 188 | self.auto_events_message_sent = set() 189 | super(SlackBuddy, self).__init__(slack_client, slack_id) 190 | 191 | 192 | def links_2_mkdown(self, str): 193 | res = str 194 | sllinks = re.finditer(self.SLACK_LINK_MATCHER, str) 195 | for l in sllinks: 196 | txt = l.group() 197 | span = l.span() 198 | if '|' in txt: 199 | # Link is in the format . Conver it to [desc](http(s)://xxx) 200 | components = txt[1:-1].split("|") 201 | newlink = '[%s](%s)' % (components[1], components[0]) 202 | res = res[:span[0]] + newlink + res[span[1]:] 203 | else: 204 | # Link is in the format . Just remove the '<' and '>' 205 | res = res[:span[0]] + txt[1:-1] + res[span[1]:] 206 | 207 | # Done converting the first link in the message. Recursively convert the following ones 208 | return self.links_2_mkdown(res) 209 | return res 210 | 211 | def handle_help(self, channel): 212 | self.say(channel, SLACK_BUDDY_HELP) 213 | 214 | def post_event(self, user, channel, evt): 215 | tags = evt.get('tags', {}) 216 | tags['channel'] = self.resolve_channel(channel) 217 | tags['user'] = self.resolve_user(user) 218 | tags['source'] = 'slack' 219 | evt['tags'] = tags 220 | 221 | logging.info("Posting event=%s channel=%s" % (repr(evt), channel)) 222 | return self._sdclient.post_event(**evt) 223 | 224 | def handle_post_event(self, user, channel, line, silent=False): 225 | line = self.links_2_mkdown(line) 226 | purged_line = re.sub(self.PARAMETER_MATCHER, "", line).strip(' \t\n\r?!.') 227 | event_from = self.resolve_channel(channel) 228 | if event_from == "Direct": 229 | event_from = self.resolve_user(user) 230 | else: 231 | if self._quiet: 232 | silent = True 233 | event = { 234 | "name": "Slack Event From " + event_from, 235 | "description": purged_line, 236 | "severity": 6, 237 | "tags": {} 238 | } 239 | for item in re.finditer(self.PARAMETER_MATCHER, line): 240 | key = item.group(1) 241 | value = item.group(2) 242 | if value is None: 243 | value = item.group(3) 244 | if value is None: 245 | value = item.group(4) 246 | if key in ("name", "description"): 247 | event[key] = value 248 | elif key == "severity": 249 | try: 250 | severity = int(value) 251 | except ValueError: 252 | severity = 0 253 | 254 | if severity >= 1 and severity <= 7: 255 | event[key] = int(value) 256 | else: 257 | self.say(channel, "invalid severity, it must be a number from 1 (highest) to 7 (lowest)") 258 | return 259 | else: 260 | event["tags"][key] = value 261 | 262 | res, error = self.post_event(user, channel, event) 263 | if res: 264 | if not silent: 265 | self.say(channel, 'Event successfully posted to Sysdig Cloud.') 266 | else: 267 | self.say(channel, 'Error posting event to Sysdig Cloud: ' + error) 268 | logging.error('Error posting event: ' + error) 269 | 270 | def run(self): 271 | while True: 272 | self.listen() 273 | 274 | for user, channel, txt in self.inputs: 275 | channel_type = channel[0] 276 | logging.debug("Received message user=%s channel=%s line=%s" % (user, channel, txt)) 277 | if txt.startswith('!help'): 278 | self.handle_help(channel) 279 | elif txt.startswith('!post_event'): 280 | self.handle_post_event(user, channel, txt[len("!post_event"):].strip(' \t\n\r?!.')) 281 | elif self.auto_events: 282 | ch = self.resolve_channel(channel) 283 | if user in self.auto_events_message_sent: 284 | #not first message from user 285 | if ch != "Direct": 286 | # Post silently in channels after confirming once, to avoid noise 287 | self.handle_post_event(user, channel, txt, silent=True) 288 | else: 289 | self.handle_post_event(user, channel, txt) 290 | else: 291 | #first message from user 292 | self.handle_post_event(user, channel, txt) 293 | if ch == "Direct": 294 | self.say(channel, "By the way, you have the option to customize title, description, severity and other event properties. Type `!help` to learn how to do it.") 295 | elif (ch != "Direct" and not self._quiet): 296 | self.say(channel, "To reduce channel noise, Sysdigbot will now stop confirming events automatically posted on chats from this user.") 297 | self.auto_events_message_sent.add(user) 298 | elif channel_type == 'D': 299 | self.say(channel, "Unknown command!") 300 | self.handle_help(channel) 301 | 302 | def LogLevelFromString(level): 303 | return getattr(logging, level.upper()) 304 | 305 | ############################################################################### 306 | # Entry point 307 | ############################################################################### 308 | def init(): 309 | sdc_token = None 310 | try: 311 | sdc_token = os.environ["SYSDIG_API_TOKEN"] 312 | except KeyError: 313 | pass 314 | slack_token = None 315 | try: 316 | slack_token = os.environ["SLACK_TOKEN"] 317 | except KeyError: 318 | pass 319 | parser = argparse.ArgumentParser(description='Sysdigbot: the Sysdig Cloud Slack bot.') 320 | parser.add_argument('--sysdig-api-token', dest='sdc_token', required=(sdc_token is None), default=sdc_token, type=str, help='Sysdig API Token, you can use also SYSDIG_API_TOKEN environment variable to set it') 321 | parser.add_argument('--slack-token', dest='slack_token', required=(slack_token is None), default=slack_token, type=str, help='Slack Token, you can use also SLACK_TOKEN environment variable to set it') 322 | parser.add_argument('--quiet', dest='quiet', action='store_true', help='Prevents the bot from printing output on channels, which is useful to avoid any kind of channel pollution') 323 | parser.add_argument('--no-auto-events', dest='auto_events', action='store_false', help='By default Sysdigbot converts every message in a channel in a Sysdig Cloud event, this flag disables it') 324 | parser.add_argument('--log-level', dest='log_level', type=LogLevelFromString, help='Logging level, available values: debug, info, warning, error') 325 | args = parser.parse_args() 326 | 327 | logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s", level=args.log_level) 328 | # requests generates too noise on information level 329 | logging.getLogger("requests").setLevel(logging.WARNING) 330 | logging.getLogger("urllib3").setLevel(logging.WARNING) 331 | logging.debug("Starting Sysdigbot, config=%s", repr(args)) 332 | 333 | # 334 | # Instantiate the SDC client and Retrieve the SDC user information to make sure we have a valid connection 335 | # 336 | sdclient = SdcClient(args.sdc_token) 337 | 338 | # 339 | # Make a connection to the slack API 340 | # 341 | sc = SlackClient(args.slack_token) 342 | sc.rtm_connect() 343 | 344 | sinfo = sc.api_call('auth.test') 345 | slack_id = sinfo['user_id'] 346 | 347 | # 348 | # Start talking! 349 | # 350 | dude = SlackBuddy(sdclient, sc, slack_id, args.quiet) 351 | dude.auto_events = args.auto_events 352 | dude.run() 353 | 354 | if __name__ == "__main__": 355 | init() 356 | -------------------------------------------------------------------------------- /integrations/slack/sysdigbot/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | exec python bot.py --sysdig-api-token $SYSDIG_API_TOKEN --slack-token $SLACK_TOKEN $* 3 | -------------------------------------------------------------------------------- /integrations/slack/sysdigbot/requirements.txt: -------------------------------------------------------------------------------- 1 | slackclient==1.0.0 2 | sdcclient==0.4.0 3 | -------------------------------------------------------------------------------- /k8s_audit_config/README.md: -------------------------------------------------------------------------------- 1 | Kubernetes audit log integration enables Sysdig Secure to use the Kubernetes log data for Falco rules, activity audit, and to test the impact of Pod Security Policies. 2 | 3 | For information on how to use these files and scripts to integrate Kubernetes audit log data with Sysdig on a variety of platforms and distributions, see https://docs.sysdig.com/en/kubernetes-audit-logging.html. 4 | -------------------------------------------------------------------------------- /k8s_audit_config/apiserver-config.patch.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | IFS='' 6 | SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" 7 | 8 | VARIANT=${1:-None} 9 | 10 | function fatal() { 11 | MESSAGE=$1 12 | 13 | echo "${MESSAGE}" 14 | echo "Exiting." 15 | exit 1 16 | } 17 | 18 | function modify_openshift_master_yaml() { 19 | MASTER_CONFIG=$1 20 | ORIGFILE="${SCRIPTDIR}/master-config.yaml.original" 21 | 22 | cp "${MASTER_CONFIG}" "${ORIGFILE}" 23 | 24 | PATCH=$( 25 | cat <"${MASTER_CONFIG}" 40 | } 41 | 42 | echo "Updating API Server config files for variant ${VARIANT}:" 43 | 44 | if [ "${VARIANT}" == "openshift-3.11" ]; then 45 | MASTER_CONFIG=/etc/origin/master/master-config.yaml 46 | 47 | if [ ! -f ${MASTER_CONFIG} ]; then 48 | fatal "Could not locate openshift apiserver configuration file" 49 | fi 50 | 51 | if grep "logFormat: json" ${MASTER_CONFIG}; then 52 | fatal "Existing audit config found. Remove that audit config before continuing." 53 | fi 54 | 55 | echo "Found openshift configuration file at ${MASTER_CONFIG}, modifying..." 56 | mkdir -p /etc/origin/master/ 57 | cp "${SCRIPTDIR}/webhook-config.yaml" /etc/origin/master/webhook-config.yaml 58 | cp "${SCRIPTDIR}/audit-policy.yaml" /etc/origin/master/audit-policy.yaml 59 | modify_openshift_master_yaml ${MASTER_CONFIG} 60 | 61 | elif [ "${VARIANT}" == "minishift-3.11" ]; then 62 | # Only need to copy the webhook/audit policy files. The config patching occurs in 63 | # enable-k8s-audit.sh using "minishift openshift config set" 64 | echo "Copying webhook config/audit policy files to /var/lib/minishift/base/kube-apiserver/..." 65 | cp "${SCRIPTDIR}/webhook-config.yaml" /var/lib/minishift/base/kube-apiserver/webhook-config.yaml 66 | cp "${SCRIPTDIR}/audit-policy.yaml" /var/lib/minishift/base/kube-apiserver/audit-policy.yaml 67 | 68 | elif [[ "${VARIANT}" == minikube* ]]; then 69 | 70 | sudo mkdir -p /var/lib/k8s_audit 71 | cp "${SCRIPTDIR}/webhook-config.yaml" /var/lib/k8s_audit/webhook-config.yaml 72 | if [[ "${VARIANT}" == *1.12* ]]; then 73 | cp "${SCRIPTDIR}/audit-policy.yaml" /var/lib/k8s_audit/audit-policy.yaml 74 | else 75 | cp "${SCRIPTDIR}/audit-policy-v2.yaml" /var/lib/k8s_audit/audit-policy.yaml 76 | fi 77 | 78 | APISERVER_PREFIX=" -" 79 | APISERVER_LINE="- kube-apiserver" 80 | MANIFEST="/etc/kubernetes/manifests/kube-apiserver.yaml" 81 | 82 | if grep audit-policy-file "$MANIFEST"; then 83 | fatal "apiserver config patch already applied." 84 | fi 85 | 86 | TMPFILE="$SCRIPTDIR/kube-apiserver.yaml.patched" 87 | rm -f "$TMPFILE" 88 | 89 | while read -r LINE; do 90 | echo "$LINE" >>"$TMPFILE" 91 | case "$LINE" in 92 | *$APISERVER_LINE*) 93 | echo "$APISERVER_PREFIX --audit-log-path=/var/lib/k8s_audit/k8s_audit_events.log" >>"$TMPFILE" 94 | echo "$APISERVER_PREFIX --audit-policy-file=/var/lib/k8s_audit/audit-policy.yaml" >>"$TMPFILE" 95 | echo "$APISERVER_PREFIX --audit-log-maxbackup=1" >>"$TMPFILE" 96 | echo "$APISERVER_PREFIX --audit-log-maxsize=10" >>"$TMPFILE" 97 | echo "$APISERVER_PREFIX --audit-webhook-config-file=/var/lib/k8s_audit/webhook-config.yaml" >>"$TMPFILE" 98 | echo "$APISERVER_PREFIX --audit-webhook-batch-max-wait=5s" >>"$TMPFILE" 99 | ;; 100 | *"volumeMounts:"*) 101 | echo " - mountPath: /var/lib/k8s_audit/" >>"$TMPFILE" 102 | echo " name: data" >>"$TMPFILE" 103 | ;; 104 | *"volumes:"*) 105 | echo " - hostPath:" >>"$TMPFILE" 106 | echo " path: /var/lib/k8s_audit" >>"$TMPFILE" 107 | echo " name: data" >>"$TMPFILE" 108 | ;; 109 | 110 | esac 111 | done <"$MANIFEST" 112 | 113 | cp "$MANIFEST" "$SCRIPTDIR/kube-apiserver.yaml.original" 114 | cp "$TMPFILE" "$MANIFEST" 115 | elif [[ "${VARIANT}" == "rke-1.13" ]]; then 116 | echo "Copying audit-policy.yaml to /var/lib/k8s_audit/audit-policy.yaml" 117 | sudo mkdir -p /var/lib/k8s_audit 118 | cp "$SCRIPTDIR"/audit-policy-v2.yaml /var/lib/k8s_audit/audit-policy.yaml 119 | cp "$SCRIPTDIR"/webhook-config.yaml /var/lib/k8s_audit/webhook-config.yaml 120 | else 121 | fatal "Unknown variant $VARIANT" 122 | fi 123 | -------------------------------------------------------------------------------- /k8s_audit_config/audit-policy-v2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: audit.k8s.io/v1 # This is required. 2 | kind: Policy 3 | # Don't generate audit events for all requests in RequestReceived stage. 4 | omitStages: 5 | - "RequestReceived" 6 | rules: 7 | # Log pod changes at RequestResponse level 8 | - level: RequestResponse 9 | resources: 10 | - group: "" 11 | # Resource "pods" doesn't match requests to any subresource of pods, 12 | # which is consistent with the RBAC policy. 13 | resources: ["pods", "deployments"] 14 | 15 | - level: RequestResponse 16 | resources: 17 | - group: "rbac.authorization.k8s.io" 18 | # Resource "pods" doesn't match requests to any subresource of pods, 19 | # which is consistent with the RBAC policy. 20 | resources: ["clusterroles", "clusterrolebindings"] 21 | 22 | # Log "pods/log", "pods/status" at Metadata level 23 | - level: Metadata 24 | resources: 25 | - group: "" 26 | resources: ["pods/log", "pods/status"] 27 | 28 | # Don't log requests to a configmap called "controller-leader" 29 | - level: None 30 | resources: 31 | - group: "" 32 | resources: ["configmaps"] 33 | resourceNames: ["controller-leader"] 34 | 35 | # Don't log watch requests by the "system:kube-proxy" on endpoints or services 36 | - level: None 37 | users: ["system:kube-proxy"] 38 | verbs: ["watch"] 39 | resources: 40 | - group: "" # core API group 41 | resources: ["endpoints", "services"] 42 | 43 | # Don't log authenticated requests to certain non-resource URL paths. 44 | - level: None 45 | userGroups: ["system:authenticated"] 46 | nonResourceURLs: 47 | - "/api*" # Wildcard matching. 48 | - "/version" 49 | 50 | # Log the request body of configmap changes in kube-system. 51 | - level: Request 52 | resources: 53 | - group: "" # core API group 54 | resources: ["configmaps"] 55 | # This rule only applies to resources in the "kube-system" namespace. 56 | # The empty string "" can be used to select non-namespaced resources. 57 | namespaces: ["kube-system"] 58 | 59 | # Log configmap changes in all other namespaces at the RequestResponse level. 60 | - level: RequestResponse 61 | resources: 62 | - group: "" # core API group 63 | resources: ["configmaps"] 64 | 65 | # Log secret changes in all other namespaces at the Metadata level. 66 | - level: Metadata 67 | resources: 68 | - group: "" # core API group 69 | resources: ["secrets"] 70 | 71 | # Log all other resources in core and extensions at the Request level. 72 | - level: Request 73 | resources: 74 | - group: "" # core API group 75 | - group: "extensions" # Version of group should NOT be included. 76 | 77 | # A catch-all rule to log all other requests at the Metadata level. 78 | - level: Metadata 79 | # Long-running requests like watches that fall under this rule will not 80 | # generate an audit event in RequestReceived. 81 | omitStages: 82 | - "RequestReceived" 83 | -------------------------------------------------------------------------------- /k8s_audit_config/audit-policy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: audit.k8s.io/v1beta1 # This is required. 2 | kind: Policy 3 | # Don't generate audit events for all requests in RequestReceived stage. 4 | omitStages: 5 | - "RequestReceived" 6 | rules: 7 | # Log pod changes at RequestResponse level 8 | - level: RequestResponse 9 | resources: 10 | - group: "" 11 | # Resource "pods" doesn't match requests to any subresource of pods, 12 | # which is consistent with the RBAC policy. 13 | resources: ["pods", "deployments"] 14 | 15 | - level: RequestResponse 16 | resources: 17 | - group: "rbac.authorization.k8s.io" 18 | # Resource "pods" doesn't match requests to any subresource of pods, 19 | # which is consistent with the RBAC policy. 20 | resources: ["clusterroles", "clusterrolebindings"] 21 | 22 | # Log "pods/log", "pods/status" at Metadata level 23 | - level: Metadata 24 | resources: 25 | - group: "" 26 | resources: ["pods/log", "pods/status"] 27 | 28 | # Don't log requests to a configmap called "controller-leader" 29 | - level: None 30 | resources: 31 | - group: "" 32 | resources: ["configmaps"] 33 | resourceNames: ["controller-leader"] 34 | 35 | # Don't log watch requests by the "system:kube-proxy" on endpoints or services 36 | - level: None 37 | users: ["system:kube-proxy"] 38 | verbs: ["watch"] 39 | resources: 40 | - group: "" # core API group 41 | resources: ["endpoints", "services"] 42 | 43 | # Don't log authenticated requests to certain non-resource URL paths. 44 | - level: None 45 | userGroups: ["system:authenticated"] 46 | nonResourceURLs: 47 | - "/api*" # Wildcard matching. 48 | - "/version" 49 | 50 | # Log the request body of configmap changes in kube-system. 51 | - level: Request 52 | resources: 53 | - group: "" # core API group 54 | resources: ["configmaps"] 55 | # This rule only applies to resources in the "kube-system" namespace. 56 | # The empty string "" can be used to select non-namespaced resources. 57 | namespaces: ["kube-system"] 58 | 59 | # Log configmap changes in all other namespaces at the RequestResponse level. 60 | - level: RequestResponse 61 | resources: 62 | - group: "" # core API group 63 | resources: ["configmaps"] 64 | 65 | # Log secret changes in all other namespaces at the Metadata level. 66 | - level: Metadata 67 | resources: 68 | - group: "" # core API group 69 | resources: ["secrets"] 70 | 71 | # Log all other resources in core and extensions at the Request level. 72 | - level: Request 73 | resources: 74 | - group: "" # core API group 75 | - group: "extensions" # Version of group should NOT be included. 76 | 77 | # A catch-all rule to log all other requests at the Metadata level. 78 | - level: Metadata 79 | # Long-running requests like watches that fall under this rule will not 80 | # generate an audit event in RequestReceived. 81 | omitStages: 82 | - "RequestReceived" 83 | -------------------------------------------------------------------------------- /k8s_audit_config/audit-sink.yaml.in: -------------------------------------------------------------------------------- 1 | apiVersion: auditregistration.k8s.io/v1alpha1 2 | kind: AuditSink 3 | metadata: 4 | name: sysdig-agent 5 | spec: 6 | policy: 7 | level: RequestResponse 8 | stages: 9 | - ResponseComplete 10 | - ResponseStarted 11 | webhook: 12 | throttle: 13 | qps: 10 14 | burst: 15 15 | clientConfig: 16 | url: "http://$AGENT_SERVICE_CLUSTERIP:7765/k8s_audit" 17 | -------------------------------------------------------------------------------- /k8s_audit_config/enable-k8s-audit.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euxo pipefail 4 | 5 | VARIANT=${1:-NONE} 6 | 7 | function fatal() { 8 | MESSAGE=$1 9 | 10 | echo "${MESSAGE}" 11 | echo "Exiting." 12 | exit 1 13 | } 14 | 15 | function copy_using_cmd() { 16 | COPY_CMD_TMPL=$1 17 | SOURCEFILE=$2 18 | DESTFILE=$3 19 | 20 | CMD=$(echo "${COPY_CMD_TMPL}" | SOURCEFILE=$SOURCEFILE DESTFILE=$DESTFILE envsubst) 21 | 22 | bash -c "${CMD}" 23 | } 24 | 25 | function prepare_webhook_config() { 26 | echo "***Creating suitable webhook configuration file from sysdig agent service cluster ip..." 27 | AGENT_SERVICE_CLUSTERIP="${AGENT_SERVICE_CLUSTERIP}" envsubst webhook-config.yaml 28 | } 29 | 30 | function prepare_audit_sink_config() { 31 | echo "***Creating suitable audit sink configurations files from sysdig agent service cluster ip..." 32 | AGENT_SERVICE_CLUSTERIP="${AGENT_SERVICE_CLUSTERIP}" envsubst audit-sink.yaml 33 | } 34 | 35 | echo "Checking for required commands..." 36 | command -v yq || fatal "Could not find program \"yq\"" 37 | command -v jq || fatal "Could not find program \"jq\"" 38 | 39 | APISERVER=$(kubectl config view --minify | grep server | cut -f 2- -d ":" | tr -d " " | cut -f2 -d: | cut -f3 -d/) 40 | AGENT_SERVICE_CLUSTERIP=$(kubectl get service sysdig-agent -o=jsonpath="{.spec.clusterIP}" -n sysdig-agent) 41 | 42 | if [[ "$VARIANT" == minikube* ]]; then 43 | SSH_CMD="ssh -i $(minikube ssh-key) docker@$(minikube ip)" 44 | COPY_CMD="scp -i $(minikube ssh-key) \$SOURCEFILE docker@$(minikube ip):\$DESTFILE" 45 | elif [ "$VARIANT" == "minishift-3.11" ]; then 46 | SSH_CMD="minishift ssh" 47 | COPY_CMD="(minishift ssh \"cat > \$DESTFILE\") < \$SOURCEFILE" 48 | elif [ "$VARIANT" == "openshift-3.11" ]; then 49 | SSH_CMD="ssh centos@$APISERVER" 50 | COPY_CMD="scp \$SOURCEFILE centos@$APISERVER:\$DESTFILE" 51 | elif [[ "$VARIANT" == "openshift-4.2" ]]; then 52 | echo "***Updating kube-apiserver configuration..." 53 | CUR_REVISION=$(oc get pod -l app=openshift-kube-apiserver -n openshift-kube-apiserver -o jsonpath={.items[0].metadata.labels.revision}) 54 | EXP_REVISION=$((CUR_REVISION+1)) 55 | oc patch kubeapiserver cluster --type=merge -p '{"spec":{"unsupportedConfigOverrides":{"apiServerArguments":{"audit-dynamic-configuration":["true"],"feature-gates":["DynamicAuditing=true"],"runtime-config":["auditregistration.k8s.io/v1alpha1=true"]}}}}' 56 | echo "Waiting for apiserver pod to restart..." 57 | POD_STATUS="N/A" 58 | ATTEMPT=0 59 | while [ "${POD_STATUS}" != "Running" ]; do 60 | if [ "$ATTEMPT" == 20 ]; then 61 | fatal "kube-apiserver pod not restarted after 10 minutes, not continuing." 62 | fi 63 | sleep 30 64 | ATTEMPT=$((ATTEMPT+1)) 65 | echo "Checking pod status (Attempt ${ATTEMPT})..." 66 | POD_STATUS=$(oc get pod -l revision=${EXP_REVISION},app=openshift-kube-apiserver -n openshift-kube-apiserver -o jsonpath={.items[0].status.phase} 2>&1 || true) 67 | done 68 | echo "Creating dynamic audit sink..." 69 | prepare_audit_sink_config 70 | kubectl apply -f audit-sink.yaml 71 | elif [[ "$VARIANT" == "gke" ]]; then 72 | echo "Enter your gce project id: " 73 | read GCE_PROJECT_ID 74 | echo "Will use GCE Project Id: $GCE_PROJECT_ID" 75 | echo "Creating GCE Service Account that has the ability to read logs..." 76 | RET=$(gcloud iam service-accounts list --filter="name=projects/$GCE_PROJECT_ID/serviceAccounts/swb-logs-reader@$GCE_PROJECT_ID.iam.gserviceaccount.com" 2>&1) 77 | 78 | if [[ "$RET" != "Listed 0 items." ]]; then 79 | echo "Service account exists ($RET), not creating again" 80 | else 81 | gcloud iam service-accounts create swb-logs-reader --description "Service account used by stackdriver-webhook-bridge" --display-name "stackdriver-webhook-bridge logs reader" 82 | gcloud projects add-iam-policy-binding "$GCE_PROJECT_ID" --member serviceAccount:swb-logs-reader@"$GCE_PROJECT_ID".iam.gserviceaccount.com --role 'roles/logging.viewer' 83 | gcloud iam service-accounts keys create "$PWD"/swb-logs-reader-key.json --iam-account swb-logs-reader@"$GCE_PROJECT_ID".iam.gserviceaccount.com 84 | fi 85 | 86 | echo "Creating k8s secret containing service account keys..." 87 | kubectl delete secret stackdriver-webhook-bridge -n sysdig-agent|| true 88 | kubectl create secret generic stackdriver-webhook-bridge --from-file=key.json="$PWD"/swb-logs-reader-key.json -n sysdig-agent 89 | 90 | echo "Deploying stackdriver-webhook-bridge to sysdig-agent namespace..." 91 | curl -LO https://raw.githubusercontent.com/sysdiglabs/stackdriver-webhook-bridge/master/stackdriver-webhook-bridge.yaml 92 | kubectl apply -f stackdriver-webhook-bridge.yaml -n sysdig-agent 93 | 94 | echo "Done." 95 | 96 | exit 0 97 | 98 | elif [[ "$VARIANT" == "iks" ]]; then 99 | echo "Enter your IKS Cluster name/id: " 100 | read IKS_CLUSTER_NAME 101 | echo "Will use IKS cluster name: $IKS_CLUSTER_NAME" 102 | echo "Setting the cluster webhook backend url to the IP address of the sysdig-agent service..." 103 | ibmcloud ks cluster master audit-webhook set --cluster "$IKS_CLUSTER_NAME" --remote-server http://$(kubectl get service sysdig-agent -o=jsonpath={.spec.clusterIP} -n sysdig-agent):7765/k8s_audit 104 | echo "IKS webhook now set to:" 105 | ibmcloud ks cluster master audit-webhook get --cluster "$IKS_CLUSTER_NAME" 106 | echo "Refreshing the cluster master. It might take several minutes for the master to refresh..." 107 | ibmcloud ks cluster master refresh --cluster "$IKS_CLUSTER_NAME" 108 | 109 | echo "Done." 110 | 111 | exit 0 112 | 113 | elif [[ "$VARIANT" == "rke-1.13" ]]; then 114 | echo "Path to RKE cluster.yml file: " 115 | read RKE_CLUSTER_YAML 116 | RKE_CLUSTER_YAML="${RKE_CLUSTER_YAML/#\~/$HOME}" 117 | echo "Will modify ${RKE_CLUSTER_YAML} to add audit policy/webhook configuration. Saving current version to ${RKE_CLUSTER_YAML}.old" 118 | cp "${RKE_CLUSTER_YAML}" "${RKE_CLUSTER_YAML}.old" 119 | 120 | APISERVER=$(yq r -j ${RKE_CLUSTER_YAML} | jq -r '.nodes | map(select(.role[] | contains ("controlplane")))| .[] .address') 121 | SSH_USER=$(yq r -j ${RKE_CLUSTER_YAML} | jq -r '.nodes | map(select(.role[] | contains ("controlplane")))| .[] .user') 122 | 123 | SSH_CMD="ssh -t $SSH_USER@$APISERVER" 124 | COPY_CMD="scp \$SOURCEFILE $SSH_USER@$APISERVER:\$DESTFILE" 125 | elif [[ "$VARIANT" == "kops" ]]; then 126 | 127 | prepare_webhook_config 128 | prepare_audit_sink_config 129 | 130 | if [ -z ${KOPS_CLUSTER_NAME+x} ]; then 131 | echo "Enter your kops cluster name: " 132 | read KOPS_CLUSTER_NAME 133 | fi 134 | 135 | echo "Fetching current kops cluster configuration..." 136 | kops get cluster $KOPS_CLUSTER_NAME -o yaml > cluster-current.yaml 137 | 138 | echo "Adding webhook configuration/audit policy to cluster configuration..." 139 | 140 | cat < merge.yaml 141 | spec: 142 | fileAssets: 143 | - name: webhook-config 144 | path: /var/lib/k8s_audit/webhook-config.yaml 145 | roles: [Master] 146 | content: | 147 | $(cat webhook-config.yaml | sed -e 's/^/ /') 148 | - name: audit-policy 149 | path: /var/lib/k8s_audit/audit-policy.yaml 150 | roles: [Master] 151 | content: | 152 | $(cat audit-policy-v2.yaml | sed -e 's/^/ /') 153 | kubeAPIServer: 154 | auditLogPath: /var/lib/k8s_audit/audit.log 155 | auditLogMaxBackups: 1 156 | auditLogMaxSize: 10 157 | auditWebhookBatchMaxWait: 5s 158 | auditPolicyFile: /var/lib/k8s_audit/audit-policy.yaml 159 | auditWebhookConfigFile: /var/lib/k8s_audit/webhook-config.yaml 160 | EOF 161 | 162 | yq m -a=append cluster-current.yaml merge.yaml > cluster.yaml 163 | 164 | echo "Configuring kops with the new cluster configuration..." 165 | kops replace -f cluster.yaml 166 | 167 | echo "Updating the cluster configuration to prepare changes to the cluster." 168 | kops update cluster --yes 169 | 170 | echo "Performing a rolling update to redeploy the master nodes with the new files and apiserver configuration. . It make take several minutes for the rolling-update to complete..." 171 | kops rolling-update cluster --yes 172 | 173 | echo "Done." 174 | exit 0 175 | else 176 | echo "Unknown K8s Distribution+version $VARIANT. Exiting." 177 | exit 1 178 | fi 179 | 180 | # If here, we need to manually copy files to the apiserver node and patch the config. 181 | prepare_webhook_config 182 | prepare_audit_sink_config 183 | 184 | echo "***Copying apiserver config patch script/supporting files to apiserver..." 185 | $SSH_CMD "rm -rf /tmp/enable_k8s_audit && mkdir -p /tmp/enable_k8s_audit" 186 | 187 | for f in apiserver-config.patch.sh audit-policy.yaml audit-policy-v2.yaml webhook-config.yaml; do 188 | echo " $f" 189 | copy_using_cmd "${COPY_CMD}" $f /tmp/enable_k8s_audit/$f 190 | done 191 | 192 | echo "***Modifying k8s apiserver config.." 193 | 194 | $SSH_CMD "sudo bash /tmp/enable_k8s_audit/apiserver-config.patch.sh $VARIANT" 195 | 196 | if [ "$VARIANT" == "minishift-3.11" ]; then 197 | 198 | # The documented instructions to enable audit logs for 3.11 don't work for minishift, 199 | # as changes to master-config.yaml are not properly converted to command line arguments 200 | # by "hypershift openshift-kube-apiserver". So instead, we modify the api server command 201 | # line arguments directly. 202 | 203 | DEL_PATCH=$(cat < "${RKE_CLUSTER_YAML}.new" 252 | mv "${RKE_CLUSTER_YAML}.new" "${RKE_CLUSTER_YAML}" 253 | rke up --config "${RKE_CLUSTER_YAML}" 254 | fi 255 | 256 | echo "***Done!" 257 | -------------------------------------------------------------------------------- /k8s_audit_config/webhook-config.yaml.in: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Config 3 | clusters: 4 | - name: falco 5 | cluster: 6 | server: http://$AGENT_SERVICE_CLUSTERIP:7765/k8s_audit 7 | contexts: 8 | - context: 9 | cluster: falco 10 | user: "" 11 | name: default-context 12 | current-context: default-context 13 | preferences: {} 14 | users: [] 15 | -------------------------------------------------------------------------------- /onprem_deploy/amazon_aws/all-in-a-box/sysdigcloud-onprem-all-in-a-box.cftemplate: -------------------------------------------------------------------------------- 1 | { 2 | "Mappings": { 3 | "Image2Region": { 4 | "ap-northeast-1": { 5 | "64": "ami-1a15c77b" 6 | }, 7 | "ap-northeast-2": { 8 | "64": "ami-a04297ce" 9 | }, 10 | "ap-south-1": { 11 | "64": "ami-cacbbea5" 12 | }, 13 | "ap-southeast-1": { 14 | "64": "ami-7243e611" 15 | }, 16 | "ap-southeast-2": { 17 | "64": "ami-55d4e436" 18 | }, 19 | "eu-central-1": { 20 | "64": "ami-0044b96f" 21 | }, 22 | "eu-west-1": { 23 | "64": "ami-d41d58a7" 24 | }, 25 | "sa-east-1": { 26 | "64": "ami-b777e4db" 27 | }, 28 | "us-east-1": { 29 | "64": "ami-c481fad3" 30 | }, 31 | "us-west-1": { 32 | "64": "ami-de347abe" 33 | }, 34 | "us-west-2": { 35 | "64": "ami-b04e92d0" 36 | } 37 | } 38 | }, 39 | "Outputs": { 40 | "AdministrationConsoleURL": { 41 | "Description": "Administration console", 42 | "Value": { 43 | "Fn::Join": [ 44 | "", 45 | [ 46 | "https://", 47 | { 48 | "Fn::GetAtt": [ 49 | "sysdigOnpremAllInAboxConsole", 50 | "PublicIp" 51 | ] 52 | }, 53 | ":8800/dashboard" 54 | ] 55 | ] 56 | } 57 | }, 58 | "SysdigcloudConsoleURL": { 59 | "Description": "Sysdigcloud console", 60 | "Value": { 61 | "Fn::Join": [ 62 | "", 63 | [ 64 | "https://", 65 | { 66 | "Fn::GetAtt": [ 67 | "sysdigOnpremAllInAboxConsole", 68 | "PublicIp" 69 | ] 70 | } 71 | ] 72 | ] 73 | } 74 | } 75 | }, 76 | "Parameters": { 77 | "AgentImageTag": { 78 | "Default": "latest", 79 | "Type": "String" 80 | }, 81 | "InfrastructureName": { 82 | "Default": "sysdigcloud-on-prem-all-in-a-box", 83 | "Type": "String" 84 | }, 85 | "InstanceType": { 86 | "Default": "m4.large", 87 | "Type": "String" 88 | }, 89 | "LicenseFileURL": { 90 | "Default": "", 91 | "Type": "String" 92 | }, 93 | "SSHKey": { 94 | "Type": "AWS::EC2::KeyPair::KeyName" 95 | }, 96 | "ServerDiskSize": { 97 | "Default": "100", 98 | "Type": "String" 99 | }, 100 | "SysdigcloudEmail": { 101 | "Default": "", 102 | "Type": "String" 103 | }, 104 | "SysdigcloudPassword": { 105 | "Default": "", 106 | "NoEcho": true, 107 | "Type": "String" 108 | } 109 | }, 110 | "Resources": { 111 | "InternetRoute": { 112 | "Properties": { 113 | "DestinationCidrBlock": "0.0.0.0/0", 114 | "GatewayId": { 115 | "Ref": "internetGW" 116 | }, 117 | "RouteTableId": { 118 | "Ref": "sysdigOnpremAllInAboxRoutingTable" 119 | } 120 | }, 121 | "Type": "AWS::EC2::Route" 122 | }, 123 | "SNSPolicy": { 124 | "Properties": { 125 | "PolicyDocument": { 126 | "Statement": [ 127 | { 128 | "Action": [ 129 | "sns:Publish" 130 | ], 131 | "Effect": "Allow", 132 | "Resource": [ 133 | "*" 134 | ] 135 | } 136 | ], 137 | "Version": "2012-10-17" 138 | }, 139 | "PolicyName": "SNSSendPolicy", 140 | "Roles": [ 141 | { 142 | "Ref": "sysdigOnpremAllInAboxServerIamRole" 143 | } 144 | ] 145 | }, 146 | "Type": "AWS::IAM::Policy" 147 | }, 148 | "SgRulealltraffic5f93c6df1cd4a78977b598b5e177fec6": { 149 | "Properties": { 150 | "FromPort": "0", 151 | "GroupId": { 152 | "Ref": "sysdigOnpremAllInAboxSg" 153 | }, 154 | "IpProtocol": "-1", 155 | "SourceSecurityGroupId": { 156 | "Ref": "sysdigOnpremAllInAboxSg" 157 | }, 158 | "ToPort": "65535" 159 | }, 160 | "Type": "AWS::EC2::SecurityGroupIngress" 161 | }, 162 | "SgRulereplicatedConsole68740a3b02c9e7767e419b117e1510b1": { 163 | "Properties": { 164 | "CidrIp": "0.0.0.0/0", 165 | "FromPort": "8800", 166 | "GroupId": { 167 | "Ref": "sysdigOnpremAllInAboxSg" 168 | }, 169 | "IpProtocol": "tcp", 170 | "ToPort": "8800" 171 | }, 172 | "Type": "AWS::EC2::SecurityGroupIngress" 173 | }, 174 | "SgRulesysdigcloudCollector9db8b2767db43dc8e9528bd2bc723ffa": { 175 | "Properties": { 176 | "CidrIp": "0.0.0.0/0", 177 | "FromPort": "6443", 178 | "GroupId": { 179 | "Ref": "sysdigOnpremAllInAboxSg" 180 | }, 181 | "IpProtocol": "tcp", 182 | "ToPort": "6443" 183 | }, 184 | "Type": "AWS::EC2::SecurityGroupIngress" 185 | }, 186 | "SgRulesysdigcloudConsolef410cf23e0bcdadfa8ebd94b6f168ab0": { 187 | "Properties": { 188 | "CidrIp": "0.0.0.0/0", 189 | "FromPort": "443", 190 | "GroupId": { 191 | "Ref": "sysdigOnpremAllInAboxSg" 192 | }, 193 | "IpProtocol": "tcp", 194 | "ToPort": "443" 195 | }, 196 | "Type": "AWS::EC2::SecurityGroupIngress" 197 | }, 198 | "VPCGWAttachement": { 199 | "Properties": { 200 | "InternetGatewayId": { 201 | "Ref": "internetGW" 202 | }, 203 | "VpcId": { 204 | "Ref": "Vpc" 205 | } 206 | }, 207 | "Type": "AWS::EC2::VPCGatewayAttachment" 208 | }, 209 | "Vpc": { 210 | "Properties": { 211 | "CidrBlock": "10.10.0.0/16", 212 | "EnableDnsHostnames": "true", 213 | "EnableDnsSupport": "true", 214 | "InstanceTenancy": "default", 215 | "Tags": [ 216 | { 217 | "Key": "Infrastructure", 218 | "Value": { 219 | "Ref": "InfrastructureName" 220 | } 221 | }, 222 | { 223 | "Key": "Name", 224 | "Value": { 225 | "Fn::Join": [ 226 | "-", 227 | [ 228 | { 229 | "Ref": "InfrastructureName" 230 | }, 231 | "vpc" 232 | ] 233 | ] 234 | } 235 | } 236 | ] 237 | }, 238 | "Type": "AWS::EC2::VPC" 239 | }, 240 | "internetGW": { 241 | "Type": "AWS::EC2::InternetGateway" 242 | }, 243 | "sysdigOnpremAllInAboxConsole": { 244 | "Properties": { 245 | "BlockDeviceMappings": [ 246 | { 247 | "DeviceName": "/dev/xvda", 248 | "Ebs": { 249 | "DeleteOnTermination": "true", 250 | "VolumeSize": { 251 | "Ref": "ServerDiskSize" 252 | }, 253 | "VolumeType": "gp2" 254 | } 255 | } 256 | ], 257 | "IamInstanceProfile": { 258 | "Ref": "sysdigOnpremAllInAboxServerRole" 259 | }, 260 | "ImageId": { 261 | "Fn::FindInMap": [ 262 | "Image2Region", 263 | { 264 | "Ref": "AWS::Region" 265 | }, 266 | "64" 267 | ] 268 | }, 269 | "InstanceType": { 270 | "Ref": "InstanceType" 271 | }, 272 | "KeyName": { 273 | "Ref": "SSHKey" 274 | }, 275 | "NetworkInterfaces": [ 276 | { 277 | "AssociatePublicIpAddress": "true", 278 | "DeleteOnTermination": "true", 279 | "DeviceIndex": "0", 280 | "GroupSet": [ 281 | { 282 | "Ref": "sysdigOnpremAllInAboxSg" 283 | } 284 | ], 285 | "SubnetId": { 286 | "Ref": "sysdigOnpremAllInAboxSubnet0" 287 | } 288 | } 289 | ], 290 | "SourceDestCheck": "true", 291 | "Tags": [ 292 | { 293 | "Key": "Infrastructure", 294 | "Value": { 295 | "Ref": "InfrastructureName" 296 | } 297 | }, 298 | { 299 | "Key": "Name", 300 | "Value": "sysdigOnpremAllInAbox-console" 301 | } 302 | ], 303 | "UserData": { 304 | "Fn::Base64": { 305 | "Fn::Join": [ 306 | "", 307 | [ 308 | "#!/bin/bash\n", 309 | "set -exuo pipefail\n", 310 | "curl -sSL https://s3.amazonaws.com/draios-infrastructures/onprem/start-all-in-a-box.sh | bash -s -- ", 311 | { 312 | "Ref": "LicenseFileURL" 313 | }, 314 | " ", 315 | { 316 | "Ref": "SysdigcloudEmail" 317 | }, 318 | " ", 319 | { 320 | "Ref": "SysdigcloudPassword" 321 | }, 322 | " ", 323 | { 324 | "Ref": "AgentImageTag" 325 | }, 326 | "\n", 327 | "exit 0" 328 | ] 329 | ] 330 | } 331 | } 332 | }, 333 | "Type": "AWS::EC2::Instance" 334 | }, 335 | "sysdigOnpremAllInAboxRoutingTable": { 336 | "Properties": { 337 | "Tags": [ 338 | { 339 | "Key": "Infrastructure", 340 | "Value": { 341 | "Ref": "InfrastructureName" 342 | } 343 | }, 344 | { 345 | "Key": "Name", 346 | "Value": { 347 | "Fn::Join": [ 348 | "-", 349 | [ 350 | { 351 | "Ref": "InfrastructureName" 352 | }, 353 | "route-table" 354 | ] 355 | ] 356 | } 357 | } 358 | ], 359 | "VpcId": { 360 | "Ref": "Vpc" 361 | } 362 | }, 363 | "Type": "AWS::EC2::RouteTable" 364 | }, 365 | "sysdigOnpremAllInAboxServerIamRole": { 366 | "Properties": { 367 | "AssumeRolePolicyDocument": { 368 | "Statement": [ 369 | { 370 | "Action": "sts:AssumeRole", 371 | "Effect": "Allow", 372 | "Principal": { 373 | "Service": "ec2.amazonaws.com" 374 | }, 375 | "Sid": "" 376 | } 377 | ], 378 | "Version": "2012-10-17" 379 | }, 380 | "Path": "/" 381 | }, 382 | "Type": "AWS::IAM::Role" 383 | }, 384 | "sysdigOnpremAllInAboxServerRole": { 385 | "Properties": { 386 | "Path": "/", 387 | "Roles": [ 388 | { 389 | "Ref": "sysdigOnpremAllInAboxServerIamRole" 390 | } 391 | ] 392 | }, 393 | "Type": "AWS::IAM::InstanceProfile" 394 | }, 395 | "sysdigOnpremAllInAboxSg": { 396 | "Properties": { 397 | "GroupDescription": { 398 | "Fn::Join": [ 399 | "-", 400 | [ 401 | { 402 | "Ref": "InfrastructureName" 403 | }, 404 | "security-group" 405 | ] 406 | ] 407 | }, 408 | "Tags": [ 409 | { 410 | "Key": "Infrastructure", 411 | "Value": { 412 | "Ref": "InfrastructureName" 413 | } 414 | }, 415 | { 416 | "Key": "Name", 417 | "Value": { 418 | "Fn::Join": [ 419 | "-", 420 | [ 421 | { 422 | "Ref": "InfrastructureName" 423 | }, 424 | "security-group" 425 | ] 426 | ] 427 | } 428 | } 429 | ], 430 | "VpcId": { 431 | "Ref": "Vpc" 432 | } 433 | }, 434 | "Type": "AWS::EC2::SecurityGroup" 435 | }, 436 | "sysdigOnpremAllInAboxSubnet0": { 437 | "Properties": { 438 | "AvailabilityZone": { 439 | "Fn::Select": [ 440 | "0", 441 | { 442 | "Fn::GetAZs": "" 443 | } 444 | ] 445 | }, 446 | "CidrBlock": "10.10.0.0/26", 447 | "Tags": [ 448 | { 449 | "Key": "Infrastructure", 450 | "Value": { 451 | "Ref": "InfrastructureName" 452 | } 453 | }, 454 | { 455 | "Key": "Name", 456 | "Value": { 457 | "Fn::Join": [ 458 | "-", 459 | [ 460 | { 461 | "Ref": "InfrastructureName" 462 | }, 463 | "Subnet0" 464 | ] 465 | ] 466 | } 467 | } 468 | ], 469 | "VpcId": { 470 | "Ref": "Vpc" 471 | } 472 | }, 473 | "Type": "AWS::EC2::Subnet" 474 | }, 475 | "sysdigOnpremAllInAboxsubnet2RT0": { 476 | "Properties": { 477 | "RouteTableId": { 478 | "Ref": "sysdigOnpremAllInAboxRoutingTable" 479 | }, 480 | "SubnetId": { 481 | "Ref": "sysdigOnpremAllInAboxSubnet0" 482 | } 483 | }, 484 | "Type": "AWS::EC2::SubnetRouteTableAssociation" 485 | } 486 | } 487 | } -------------------------------------------------------------------------------- /support_bundle/README.md: -------------------------------------------------------------------------------- 1 | # On-Premise Support Bundle script 2 | 3 | ## Usage example 4 | Specify your current namespace with `-n` flag. 5 | 6 | ``` 7 | export API_TOKEN="xxxxx-xxxxx-xxxx-xxxxx" 8 | 9 | ./get_support_bundle.sh -a $API_TOKEN -n sysdigcloud 10 | ``` 11 | 12 | *NOTE:* For cases where the access to the API endpoint is limited/restricted use `-la` or `--local-api` flag. 13 | -------------------------------------------------------------------------------- /support_bundle/get_support_bundle.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | trap 'catch' ERR 5 | catch() { 6 | echo "An error has occurred. Please check your input and try again. Run this script with the -d flag for debugging" 7 | } 8 | 9 | #generate sysdigcloud support bundle on kubernetes 10 | 11 | API_LOCAL="" 12 | LABELS="" 13 | CONTEXT="" 14 | CONTEXT_OPTS="" 15 | NAMESPACE="sysdig" 16 | LOG_DIR=$(mktemp -d sysdigcloud-support-bundle-XXXX) 17 | SINCE_OPTS="" 18 | SINCE="" 19 | API_KEY="" 20 | SECURE_API_KEY="" 21 | SKIP_LOGS="false" 22 | ELASTIC_CURL="" 23 | 24 | print_help() { 25 | printf 'Usage: %s [-a|--api-key ] [c|--context ] [-d|--debug] [-l|--labels ] [-n|--namespace ] [-s|--since ] [--skip-logs] [-h|--help]\n' "$0" 26 | printf "\t%s\n" "-a,--api-key: Provide the Superuser API key for advanced data collection" 27 | printf "\t%s\n" "-c,--context: Specify the kubectl context. If not set, the current context will be used." 28 | printf "\t%s\n" "-d,--debug: Enables Debug" 29 | printf "\t%s\n" "-l,--labels: Specify Sysdig pod role label to collect (e.g. api,collector,worker)" 30 | printf "\t%s\n" "-la,--local-api: Uses kubectl port-forward feature for being able to access APIs for advanced data collection (for env that cannot reach APIs via domain/FQDN)" 31 | printf "\t%s\n" "-n,--namespace: Specify the Sysdig namespace. (default: ${NAMESPACE})" 32 | printf "\t%s\n" "-s,--since: Specify the timeframe of logs to collect (e.g. -s 1h)" 33 | printf "\t%s\n" "-sa,--secure-api-key: Provide the Secure Superuser API key for advanced data collection" 34 | printf "\t%s\n" "--skip-logs: Skip all log collection. (default: ${SKIP_LOGS})" 35 | printf "\t%s\n" "-h,--help: Prints help" 36 | } 37 | 38 | parse_commandline() { 39 | while test $# -gt 0 40 | do 41 | _key="$1" 42 | case "$_key" in 43 | -a|--api-key) 44 | test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1 45 | API_KEY="$2" 46 | shift 47 | ;; 48 | -c|--context) 49 | test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1 50 | CONTEXT="$2" 51 | shift 52 | ;; 53 | -d|--debug) 54 | set -x 55 | ;; 56 | -d*) 57 | set -x 58 | ;; 59 | -l|--labels) 60 | test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1 61 | LABELS="$2" 62 | shift 63 | ;; 64 | -la|--local-api) 65 | API_LOCAL="true" 66 | ;; 67 | -n|--namespace) 68 | test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1 69 | NAMESPACE="$2" 70 | shift 71 | ;; 72 | -s|--since) 73 | test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1 74 | SINCE="$2" 75 | shift 76 | ;; 77 | --skip-logs) 78 | SKIP_LOGS="true" 79 | ;; 80 | -h|--help) 81 | print_help 82 | exit 0 83 | ;; 84 | -h*) 85 | print_help 86 | exit 0 87 | ;; 88 | -sa|--secure-api-key) 89 | test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1 90 | SECURE_API_KEY="$2" 91 | shift 92 | ;; 93 | esac 94 | shift 95 | done 96 | } 97 | 98 | get_agent_version_metric_limits() { 99 | # function used to get metric JSON data for Agent versions and metric counts for each agent. 100 | # This is taken from the Sysdig Agent and Health Status Dashboard 101 | # arguments: 102 | PARAMS=( 103 | -sk --location --request POST "${API_URL}/api/data/batch?metricCompatibilityValidation=true&emptyValuesAsNull=true" 104 | --header 'X-Sysdig-Product: SDC' 105 | --header "Authorization: Bearer ${API_KEY}" 106 | --header 'Content-Type: application/json' 107 | -d "{\"requests\":[{\"format\":{\"type\":\"data\"},\"time\":{\"from\":${FROM_EPOCH_TIME}000000,\"to\":${TO_EPOCH_TIME}000000,\"sampling\":600000000},\"metrics\":{\"v0\":\"agent.version\",\"v1\":\"agent.mode\",\"v2\":\"metricCount.statsd\",\"v3\":\"metricCount.prometheus\",\"v4\":\"metricCount.appCheck\",\"v5\":\"metricCount.jmx\",\"k1\":\"host.hostName\",\"k2\":\"host.mac\"},\"group\":{\"aggregations\":{\"v0\":\"concat\",\"v1\":\"concat\",\"v2\":\"max\",\"v3\":\"max\",\"v4\":\"avg\",\"v5\":\"avg\"},\"groupAggregations\":{\"v0\":\"concat\",\"v1\":\"concat\",\"v2\":\"sum\",\"v3\":\"sum\",\"v4\":\"avg\",\"v5\":\"avg\"},\"by\":[{\"metric\":\"k1\"},{\"metric\":\"k2\"}],\"configuration\":{\"groups\":[{\"groupBy\":[]}]}},\"paging\":{\"from\":0,\"to\":9999},\"sort\":[{\"v0\":\"desc\"},{\"v1\":\"desc\"},{\"v2\":\"desc\"},{\"v3\":\"desc\"},{\"v4\":\"desc\"},{\"v5\":\"desc\"}],\"scope\":null,\"compareTo\":null}]}" 108 | ) 109 | curl "${PARAMS[@]}" >${LOG_DIR}/metrics/agent_version_metric_limits.json || echo "Curl failed collecting agent_version_metric_limits.json data!" && true 110 | } 111 | 112 | get_metrics() { 113 | # function used to get metric JSON data for particular metrics we are interested in from the agent 114 | # arguments: 115 | # 1 - metric_name 116 | # 2 - segment_by 117 | metric="${1}" 118 | segment_by="${2}" 119 | 120 | PARAMS=( 121 | -sk --location --request POST "${API_URL}/api/data/batch?metricCompatibilityValidation=true&emptyValuesAsNull=true" 122 | --header 'X-Sysdig-Product: SDC' 123 | --header "Authorization: Bearer ${API_KEY}" 124 | --header 'Content-Type: application/json' 125 | -d "{\"requests\":[{\"format\":{\"type\":\"data\"},\"time\":{\"from\":${FROM_EPOCH_TIME}000000,\"to\":${TO_EPOCH_TIME}000000,\"sampling\":600000000},\"metrics\":{\"v0\":\"${metric}\",\"k0\":\"timestamp\",\"k1\":\"${segment_by}\"},\"group\":{\"aggregations\":{\"v0\":\"avg\"},\"groupAggregations\":{\"v0\":\"avg\"},\"by\":[{\"metric\":\"k0\",\"value\":600000000},{\"metric\":\"k1\"}],\"configuration\":{\"groups\":[{\"groupBy\":[]}]}},\"paging\":{\"from\":0,\"to\":9999},\"sort\":[{\"v0\":\"desc\"}],\"scope\":null,\"compareTo\":null}]}'" 126 | ) 127 | curl "${PARAMS[@]}" >${LOG_DIR}/metrics/${metric}_${segment_by}.json || echo "Curl failed collecting ${metric}_${segment_by} data!" && true 128 | } 129 | 130 | main() { 131 | local error 132 | local RETVAL 133 | 134 | if [[ ! -z ${CONTEXT} ]]; then 135 | CONTEXT_OPTS="--context=${CONTEXT}" 136 | fi 137 | 138 | if [[ ! -z ${SINCE} ]]; then 139 | SINCE_OPTS="--since ${SINCE}" 140 | fi 141 | 142 | # Set options for kubectl commands 143 | KUBE_OPTS="--namespace ${NAMESPACE} ${CONTEXT_OPTS}" 144 | 145 | #verify that the provided namespace exists 146 | KUBE_OUTPUT=$(kubectl ${CONTEXT_OPTS} get namespace ${NAMESPACE} --no-headers >/dev/null 2>&1) && RETVAL=$? && error=0 || { RETVAL=$? && error=1; } 147 | 148 | if [[ ${error} -eq 1 ]]; then 149 | echo "We could not determine the namespace. Please check the spelling and try again. Return Code: ${RETVAL}" 150 | echo "kubectl ${CONTEXT_OPTS} get ns | grep ${NAMESPACE}" 151 | exit 1 152 | fi 153 | 154 | echo "$(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get deployment sysdigcloud-api -ojsonpath='{.spec.template.spec.containers[0].image}' | awk -F: '{ print $NF }')" > ${LOG_DIR}/backend_version.txt 155 | BACKEND_VERSION=$(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get deployment sysdigcloud-api -ojsonpath='{.spec.template.spec.containers[0].image}' | awk -F: '{ print $NF }' | awk -F. '{ print $1 }') || true 156 | 157 | # If API key is supplied, check the backend version, and send a GET to the relevant endpoints. 158 | if [[ ! -z ${API_KEY} ]]; then 159 | if [[ "$BACKEND_VERSION" =~ ^(7|6)$ ]]; then 160 | if [[ "$API_LOCAL" == "true" ]]; then 161 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} port-forward service/sysdigcloud-api 8080 > /dev/null 2>&1 & 162 | 163 | # Store the port-forward pid in order to kill the process once we finish 164 | pid=$! 165 | 166 | # kill the port-forward regardless of how this script exits 167 | trap '{ 168 | # echo killing $pid 169 | kill $pid 170 | }' EXIT 171 | 172 | # wait for port-forward to become available 173 | while ! curl -s localhost:8080 > /dev/null 2>&1 ; do 174 | sleep 0.2 175 | done 176 | API_URL="http://127.0.0.1:8080" 177 | else 178 | API_URL=$(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get cm sysdigcloud-collector-config -ojsonpath='{.data.collector-config\.conf}' | grep serverName | head -1 | awk '{print $3}' | sed 's/"//g') 179 | fi 180 | # Check that the API_KEY for the Super User is valid and exit 181 | CURL_OUT=$(curl -fks -H "Authorization: Bearer ${API_KEY}" -H "Content-Type: application/json" "${API_URL}/api/license" >/dev/null 2>&1) && RETVAL=$? && error=0 || { RETVAL=$? && error=1; } 182 | if [[ ${error} -eq 1 ]]; then 183 | echo "The API_KEY supplied is Unauthorized. Please check and try again. Return Code: ${RETVAL}" 184 | exit 1 185 | fi 186 | curl -ks -H "Authorization: Bearer ${API_KEY}" -H "Content-Type: application/json" "${API_URL}/api/admin/customer/1/meerkatSettings" >> ${LOG_DIR}/meerkat_settings.json 187 | elif [[ "$BACKEND_VERSION" =~ ^(5|4|3)$ ]]; then 188 | if [[ "$API_LOCAL" == "true" ]]; then 189 | kubectl ${KUBE_OPTS} port-forward service/sysdigcloud-api 8080 > /dev/null 2>&1 & 190 | 191 | # Store the port-forward pid in order to kill the process once we finish 192 | pid=$! 193 | 194 | # kill the port-forward regardless of how this script exits 195 | trap '{ 196 | # echo killing $pid 197 | kill $pid 198 | }' EXIT 199 | 200 | # wait for port-forward to become available 201 | while ! curl -s localhost:8080 > /dev/null 2>&1 ; do 202 | sleep 0.2 203 | done 204 | API_URL="http://127.0.0.1:8080" 205 | else 206 | API_URL=$(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get cm sysdigcloud-config -o yaml | grep -i api.url: | head -1 | awk '{print $2}') 207 | fi 208 | # Check that the API_KEY for the Super User is valid and exit 209 | CURL_OUT=$(curl -fks -H "Authorization: Bearer ${API_KEY}" -H "Content-Type: application/json" "${API_URL}/api/license" >/dev/null 2>&1) && RETVAL=$? && error=0 || { RETVAL=$? && error=1; } 210 | if [[ ${error} -eq 1 ]]; then 211 | echo "The API_KEY supplied is Unauthorized. Please check and try again. Return Code: ${RETVAL}" 212 | exit 1 213 | fi 214 | 215 | curl -ks -H "Authorization: Bearer ${API_KEY}" -H "Content-Type: application/json" "${API_URL}/api/admin/customer/1/fastPathSettings" >> ${LOG_DIR}/fastPath_settings.json 216 | curl -ks -H "Authorization: Bearer ${API_KEY}" -H "Content-Type: application/json" "${API_URL}/api/admin/customer/1/indexSettings" >> ${LOG_DIR}/index_settings.json 217 | else 218 | echo "We could not determine the backend version. Exiting." 219 | exit 1 220 | fi 221 | 222 | curl -ks -H "Authorization: Bearer ${API_KEY}" -H "Content-Type: application/json" "${API_URL}/api/license" >> ${LOG_DIR}/license.json 223 | curl -ks -H "Authorization: Bearer ${API_KEY}" -H "Content-Type: application/json" "${API_URL}/api/agents/connected?checkStatus=true" >> ${LOG_DIR}/agents_connected.json 224 | curl -ks -H "Authorization: Bearer ${API_KEY}" -H "Content-Type: application/json" "${API_URL}/api/admin/customer/1/storageSettings" >> ${LOG_DIR}/storage_settings.json 225 | curl -ks -H "Authorization: Bearer ${API_KEY}" -H "Content-Type: application/json" "${API_URL}/api/admin/customer/1/streamsnapSettings" >> ${LOG_DIR}/streamSnap_settings.json 226 | curl -ks -H "Authorization: Bearer ${API_KEY}" -H "Content-Type: application/json" "${API_URL}/api/admin/customers/1/snapshotSettings" >> ${LOG_DIR}/snapshot_settings.json 227 | curl -ks -H "Authorization: Bearer ${API_KEY}" -H "Content-Type: application/json" "${API_URL}/api/admin/customer/1/planSettings" >> ${LOG_DIR}/plan_settings.json 228 | curl -ks -H "Authorization: Bearer ${API_KEY}" -H "Content-Type: application/json" "${API_URL}/api/admin/customer/1/dataRetentionSettings" >> ${LOG_DIR}/dataRetention_settings.json 229 | curl -ks -H "Authorization: Bearer ${API_KEY}" -H "Content-Type: application/json" "${API_URL}/api/v2/users/light" >> ${LOG_DIR}/users.json 230 | curl -ks -H "Authorization: Bearer ${API_KEY}" -H "Content-Type: application/json" "${API_URL}/api/v2/teams/light" >> ${LOG_DIR}/teams.json 231 | curl -ks -H "Authorization: Bearer ${API_KEY}" -H "Content-Type: application/json" "${API_URL}/api/admin/auth/settings" >> ${LOG_DIR}/sso_settings.json 232 | curl -ks -H "Authorization: Bearer ${API_KEY}" -H "Content-Type: application/json" "${API_URL}/api/alerts" >> ${LOG_DIR}/alerts.json 233 | 234 | # If Secure API key is supplied, collect settings 235 | if [[ ! -z ${SECURE_API_KEY} ]]; then 236 | if [[ "$BACKEND_VERSION" =~ ^(7|6)$ ]]; then 237 | if [[ "$API_LOCAL" == "true" ]]; then 238 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} port-forward service/sysdigcloud-api 8080 > /dev/null 2>&1 & 239 | 240 | # Store the port-forward pid in order to kill the process once we finish 241 | pid=$! 242 | 243 | # kill the port-forward regardless of how this script exits 244 | trap '{ 245 | # echo killing $pid 246 | kill $pid 247 | }' EXIT 248 | 249 | # wait for port-forward to become available 250 | while ! curl -s localhost:8080 > /dev/null 2>&1 ; do 251 | sleep 0.2 252 | done 253 | API_URL="http://127.0.0.1:8080" 254 | else 255 | API_URL=$(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get cm sysdigcloud-collector-config -ojsonpath='{.data.collector-config\.conf}' | grep serverName | head -1 | awk '{print $3}' | sed 's/"//g') 256 | fi 257 | # Check that the SECURE_API_KEY for the Super User is valid and exit 258 | CURL_OUT=$(curl -fks -H "Authorization: Bearer ${SECURE_API_KEY}" -H "Content-Type: application/json" "${API_URL}/api/license" >/dev/null 2>&1) && RETVAL=$? && error=0 || { RETVAL=$? && error=1; } 259 | if [[ ${error} -eq 1 ]]; then 260 | echo "The SECURE_API_KEY supplied is Unauthorized. Please check and try again. Return Code: ${RETVAL}" 261 | exit 1 262 | fi 263 | elif [[ "$BACKEND_VERSION" =~ ^(5|4|3)$ ]]; then 264 | if [[ "$API_LOCAL" == "true" ]]; then 265 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} port-forward service/sysdigcloud-api 8080 > /dev/null 2>&1 & 266 | 267 | # Store the port-forward pid in order to kill the process once we finish 268 | pid=$! 269 | 270 | # kill the port-forward regardless of how this script exits 271 | trap '{ 272 | # echo killing $pid 273 | kill $pid 274 | }' EXIT 275 | 276 | # wait for port-forward to become available 277 | while ! curl -s localhost:8080 > /dev/null 2>&1 ; do 278 | sleep 0.2 279 | done 280 | API_URL="http://127.0.0.1:8080" 281 | else 282 | API_URL=$(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get cm sysdigcloud-config -o yaml | grep -i api.url: | head -1 | awk '{print $2}') 283 | fi 284 | # Check that the API_KEY for the Super User is valid and exit 285 | CURL_OUT=$(curl -fks -H "Authorization: Bearer ${API_KEY}" -H "Content-Type: application/json" "${API_URL}/api/license" >/dev/null 2>&1) && RETVAL=$? && error=0 || { RETVAL=$? && error=1; } 286 | if [[ ${error} -eq 1 ]]; then 287 | echo "The API_KEY supplied is Unauthorized. Please check and try again. Return Code: ${RETVAL}" 288 | exit 1 289 | fi 290 | else 291 | echo "We cannot determine the backend version. Exiting." 292 | exit 1 293 | fi 294 | 295 | # Check if ScanningV1 is enabled, and if so, do ... 296 | SCANNING_V1_ENABLED=$(curl -ks ${API_URL}/api/secure/customerSettings -H "Authorization: Bearer ${SECURE_API_KEY}" 2>&1 | grep -Eo "\"scanningV1Enabled\":true") || true 297 | if [[ ${SCANNING_V1_ENABLED} == "\"scanningV1Enabled\":true" ]]; then 298 | echo "Scanning v1 is enabled. Continuing..." 299 | # CURL COMMANDS GO HERE 300 | mkdir -p ${LOG_DIR}/scanning 301 | curl -ks ${API_URL}/api/scanning/v1/resultsDirect?limit=1 -H "Authorization: Bearer ${SECURE_API_KEY}" >> ${LOG_DIR}/scanning/scanningv1.txt 302 | else 303 | echo "Scanning V1 not detected. Continuing..." 304 | fi 305 | 306 | # Check if ScanningV2 is enabled, and if so, do ... 307 | SCANNING_V2_ENABLED=$(curl -ks ${API_URL}/api/secure/customerSettings -H "Authorization: Bearer ${SECURE_API_KEY}" 2>&1 | grep -Eo "\"scanningV2Enabled\":true") || true 308 | if [[ ${SCANNING_V2_ENABLED} == "\"scanningV2Enabled\":true" ]]; then 309 | echo "Scanning v2 is enabled. Continuing..." 310 | curl -ks ${API_URL}/api/scanning/scanresults/v2/results -H "Authorization: Bearer ${SECURE_API_KEY}" >> ${LOG_DIR}/scanning/scanningv2.txt 311 | # CURL COMMANDS GO HERE 312 | else 313 | echo "Scanning V2 not detected. Continuing..." 314 | fi 315 | fi 316 | 317 | if [[ $OSTYPE == 'darwin'* ]]; then 318 | TO_EPOCH_TIME=$(date -jf "%H:%M:%S" $(date +%H):00:00 +%s) 319 | else 320 | TO_EPOCH_TIME=$(date -d "$(date +%H):00:00" +%s) 321 | fi 322 | FROM_EPOCH_TIME=$((TO_EPOCH_TIME-86400)) 323 | METRICS=("syscall.count" "dragent.analyzer.sr" "container.count" "dragent.analyzer.n_drops_buffer" "dragent.analyzer.n_evts") 324 | DEFAULT_SEGMENT="host.hostName" 325 | SYSCALL_SEGMENTS=("host.hostName" "proc.name") 326 | 327 | mkdir -p ${LOG_DIR}/metrics 328 | for metric in ${METRICS[@]}; do 329 | if [ "${metric}" == "syscall.count" ]; then 330 | for segment in ${SYSCALL_SEGMENTS[@]}; do 331 | get_metrics "${metric}" "${segment}" 332 | done 333 | else 334 | get_metrics "${metric}" "${DEFAULT_SEGMENT}" 335 | fi 336 | done 337 | 338 | get_agent_version_metric_limits 339 | fi 340 | 341 | # Configure kubectl command if labels are set 342 | if [[ -z ${LABELS} ]]; then 343 | SYSDIGCLOUD_PODS=$(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get pods --no-headers -o custom-columns=NAME:metadata.name) 344 | else 345 | SYSDIGCLOUD_PODS=$(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} -l "role in (${LABELS})" get pods --no-headers -o custom-columns=NAME:metadata.name) 346 | fi 347 | 348 | echo "Using namespace ${NAMESPACE}" 349 | echo "Using context ${CONTEXT}" 350 | 351 | # Collect kubectl cluster dump 352 | CLUSTER_DUMP_DIR="${LOG_DIR}/kubectl-cluster-dump" 353 | mkdir -p ${CLUSTER_DUMP_DIR} 354 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} cluster-info dump --output-directory=${CLUSTER_DUMP_DIR} 355 | 356 | # Collect container logs for each pod 357 | if [[ "${SKIP_LOGS}" == "false" ]]; then 358 | echo "Gathering Logs from ${NAMESPACE} pods" 359 | command='tar czf - /logs/ /opt/draios/ /var/log/sysdigcloud/ /var/log/cassandra/ /tmp/redis.log /var/log/redis-server/redis.log /var/log/mysql/error.log /opt/prod.conf 2>/dev/null || true' 360 | for pod in ${SYSDIGCLOUD_PODS}; do 361 | echo "Getting support logs for ${pod}" 362 | mkdir -p ${LOG_DIR}/pod_logs/${pod} 363 | containers=$(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get pod ${pod} -o json | jq -r '.spec.containers[].name' || echo "") 364 | for container in ${containers}; do 365 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} logs ${pod} -c ${container} ${SINCE_OPTS} > ${LOG_DIR}/pod_logs/${pod}/${container}-kubectl-logs.txt || true 366 | echo "Execing into ${container}" 367 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec ${pod} -c ${container} -- bash -c "echo" >/dev/null 2>&1 && RETVAL=$? || RETVAL=$? && true 368 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec ${pod} -c ${container} -- sh -c "echo" >/dev/null 2>&1 && RETVAL1=$? || RETVAL1=$? && true 369 | if [ $RETVAL -eq 0 ]; then 370 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec ${pod} -c ${container} -- bash -c "${command}" > ${LOG_DIR}/pod_logs/${pod}/${container}-support-files.tgz || true 371 | elif [ $RETVAL1 -eq 0 ]; then 372 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec ${pod} -c ${container} -- sh -c "${command}" > ${LOG_DIR}/pod_logs/${pod}/${container}-support-files.tgz || true 373 | else 374 | echo "Skipping log gathering for ${pod}" 375 | fi 376 | done 377 | done 378 | fi 379 | 380 | echo "Gathering pod descriptions" 381 | for pod in ${SYSDIGCLOUD_PODS}; do 382 | echo "Getting pod description for ${pod}" 383 | mkdir -p ${LOG_DIR}/${pod} 384 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get pod ${pod} -o json > ${LOG_DIR}/${pod}/kubectl-describe.json || true 385 | done 386 | 387 | #Collect Describe Node Output 388 | echo "Collecting node information" 389 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} describe nodes | tee -a ${LOG_DIR}/describe_node_output.log || echo "No permission to describe nodes!" 390 | 391 | NODES=$(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get nodes --no-headers -o custom-columns=NAME:metadata.name) && RETVAL=0 || { RETVAL=$? && echo "No permission to get nodes!"; } 392 | if [[ "${RETVAL}" == "0" ]]; then 393 | mkdir -p ${LOG_DIR}/nodes 394 | for node in ${NODES[@]}; do 395 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get node ${node} -ojson > ${LOG_DIR}/nodes/${node}-kubectl.json 396 | done 397 | unset RETVAL 398 | fi 399 | 400 | #Collect PV info 401 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get pv | grep sysdig | tee -a ${LOG_DIR}/pv_output.log || echo "No permission to get PersistentVolumes" 402 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get pvc | grep sysdig | tee -a ${LOG_DIR}/pvc_output.log 403 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get storageclass | tee -a ${LOG_DIR}/sc_output.log || echo "No permission to get StorageClasses" 404 | 405 | # Get info on deployments, statefulsets, persistentVolumeClaims, daemonsets, ingresses, ocp routes and pod distruption budgets 406 | echo "Gathering Manifest Information" 407 | objects=("svc" "deployment" "sts" "pvc" "daemonset" "ingress" "replicaset" "networkpolicy" "cronjob" "configmap" "pdb") 408 | # Check within API server if "routes" api resource is available (in order to gather ingresses on OCP) 409 | if $(kubectl api-resources | grep -q "routes"); then 410 | objects+=("routes") 411 | fi 412 | for object in "${objects[@]}"; do 413 | items=$(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get ${object} -o jsonpath="{.items[*]['metadata.name']}") 414 | mkdir -p ${LOG_DIR}/${object} 415 | for item in ${items}; do 416 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get ${object} ${item} -o json > ${LOG_DIR}/${object}/${item}-kubectl.json 417 | done 418 | done 419 | 420 | # Fetch container density information 421 | num_nodes=0 422 | num_pods=0 423 | num_running_containers=0 424 | num_total_containers=0 425 | 426 | printf "%-30s %-10s %-10s %-10s %-10s\n" "Node" "Pods" "Running Containers" "Total Containers" >> ${LOG_DIR}/container_density.txt 427 | for node in $(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get nodes --no-headers -o custom-columns=node:.metadata.name); do 428 | total_pods=$(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get pods -A --no-headers -o wide | grep ${node} |wc -l |xargs) 429 | running_containers=$( kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get pods -A --no-headers -o wide |grep ${node} |awk '{print $3}' |cut -f 1 -d/ | awk '{ SUM += $1} END { print SUM }' |xargs) 430 | total_containers=$( kubectl ${CONTEXT_OPTS} get ${KUBE_OPTS} pods -A --no-headers -o wide |grep ${node} |awk '{print $3}' |cut -f 2 -d/ | awk '{ SUM += $1} END { print SUM }' |xargs) 431 | printf "%-30s %-15s %-20s %-10s\n" "${node}" "${total_pods}" "${running_containers}" "${total_containers}" >> ${LOG_DIR}/container_density.txt 432 | num_nodes=$((num_nodes+1)) 433 | num_pods=$((num_pods+${total_pods})) 434 | num_running_containers=$((num_running_containers+${running_containers})) 435 | num_total_containers=$((num_total_containers+${total_containers})) 436 | done 437 | 438 | printf "\nTotals\n-----\n" >> ${LOG_DIR}/container_density.txt 439 | printf "Nodes: ${num_nodes}\n" >> ${LOG_DIR}/container_density.txt 440 | printf "Pods: ${num_pods}\n" >> ${LOG_DIR}/container_density.txt 441 | printf "Running Containers: ${num_running_containers}\n" >> ${LOG_DIR}/container_density.txt 442 | printf "Containers: ${num_total_containers}\n" >> ${LOG_DIR}/container_density.txt 443 | 444 | # Fetch Cassandra Nodetool output 445 | echo "Fetching Cassandra statistics" 446 | for pod in $(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get pod -l role=cassandra --no-headers -o custom-columns=NAME:metadata.name) 447 | do 448 | mkdir -p ${LOG_DIR}/cassandra/${pod} 449 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec -it ${pod} -c cassandra -- nodetool info | tee -a ${LOG_DIR}/cassandra/${pod}/nodetool_info.log 450 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec -it ${pod} -c cassandra -- nodetool status | tee -a ${LOG_DIR}/cassandra/${pod}/nodetool_status.log 451 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec -it ${pod} -c cassandra -- nodetool getcompactionthroughput | tee -a ${LOG_DIR}/cassandra/${pod}/nodetool_getcompactionthroughput.log 452 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec -it ${pod} -c cassandra -- nodetool cfstats | tee -a ${LOG_DIR}/cassandra/${pod}/nodetool_cfstats.log 453 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec -it ${pod} -c cassandra -- nodetool cfhistograms draios message_data10 | tee -a ${LOG_DIR}/cassandra/${pod}/nodetool_cfhistograms.log 454 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec -it ${pod} -c cassandra -- nodetool proxyhistograms | tee -a ${LOG_DIR}/cassandra/${pod}/nodetool_proxyhistograms.log 455 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec -it ${pod} -c cassandra -- nodetool tpstats | tee -a ${LOG_DIR}/cassandra/${pod}/nodetool_tpstats.log 456 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec -it ${pod} -c cassandra -- nodetool compactionstats | tee -a ${LOG_DIR}/cassandra/${pod}/nodetool_compactionstats.log 457 | done 458 | 459 | echo "Fetching Elasticsearch health info" 460 | # CHECK HERE IF THE TLS ENV VARIABLE IS SET IN ELASTICSEARCH, AND BUILD THE CURL COMMAND OUT 461 | ELASTIC_POD=$(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get pods -l role=elasticsearch --no-headers -o custom-columns=NAME:metadata.name | head -1) || true 462 | 463 | if [ ! -z ${ELASTIC_POD} ]; then 464 | ELASTIC_IMAGE=$(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get pod ${ELASTIC_POD} -ojsonpath='{.spec.containers[?(@.name == "elasticsearch")].image}' | awk -F '/' '{print $NF}' | cut -f 1 -d ':') || true 465 | 466 | if [[ ${ELASTIC_IMAGE} == "opensearch"* ]]; then 467 | CERTIFICATE_DIRECTORY="/usr/share/opensearch/config" 468 | ELASTIC_TLS="true" 469 | else 470 | CERTIFICATE_DIRECTORY="/usr/share/elasticsearch/config" 471 | ELASTIC_TLS=$(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec ${ELASTIC_POD} -c elasticsearch -- env | grep -i ELASTICSEARCH_TLS_ENCRYPTION) || true 472 | if [[ ${ELASTIC_TLS} == *"ELASTICSEARCH_TLS_ENCRYPTION=true"* ]]; then 473 | ELASTIC_TLS="true" 474 | fi 475 | fi 476 | 477 | if [[ ${ELASTIC_TLS} == "true" ]]; then 478 | ELASTIC_CURL="curl -s --cacert ${CERTIFICATE_DIRECTORY}/root-ca.pem https://\${ELASTICSEARCH_ADMINUSER}:\${ELASTICSEARCH_ADMIN_PASSWORD}@\$(hostname):9200" 479 | else 480 | ELASTIC_CURL='curl -s -k http://$(hostname):9200' 481 | fi 482 | 483 | for pod in $(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get pods -l role=elasticsearch --no-headers -o custom-columns=NAME:metadata.name) 484 | do 485 | mkdir -p ${LOG_DIR}/elasticsearch/${pod} 486 | 487 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec ${pod} -c elasticsearch -- /bin/bash -c "${ELASTIC_CURL}/_cat/health" | tee -a ${LOG_DIR}/elasticsearch/${pod}/elasticsearch_health.log || true 488 | 489 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec ${pod} -c elasticsearch -- /bin/bash -c "${ELASTIC_CURL}/_cat/indices" | tee -a ${LOG_DIR}/elasticsearch/${pod}/elasticsearch_indices.log || true 490 | 491 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec ${pod} -c elasticsearch -- /bin/bash -c "${ELASTIC_CURL}/_cat/nodes?v" | tee -a ${LOG_DIR}/elasticsearch/${pod}/elasticsearch_nodes.log || true 492 | 493 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec ${pod} -c elasticsearch -- /bin/bash -c "${ELASTIC_CURL}/_cluster/allocation/explain?pretty" | tee -a ${LOG_DIR}/elasticsearch/${pod}/elasticsearch_index_allocation.log || true 494 | 495 | echo "Fetching ElasticSearch SSL Certificate Expiration Dates" 496 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec ${pod} -c elasticsearch -- openssl x509 -in ${CERTIFICATE_DIRECTORY}/node.pem -noout -enddate | tee -a ${LOG_DIR}/elasticsearch/${pod}/elasticsearch_node_pem_expiration.log || true 497 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec ${pod} -c elasticsearch -- openssl x509 -in ${CERTIFICATE_DIRECTORY}/admin.pem -noout -enddate | tee -a ${LOG_DIR}/elasticsearch/${pod}/elasticsearch_admin_pem_expiration.log || true 498 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec ${pod} -c elasticsearch -- openssl x509 -in ${CERTIFICATE_DIRECTORY}/root-ca.pem -noout -enddate | tee -a ${LOG_DIR}/elasticsearch/${pod}/elasticsearch_root_ca_pem_expiration.log || true 499 | 500 | 501 | echo "Fetching Elasticsearch Index Versions" 502 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec ${pod} -c elasticsearch -- bash -c "${ELASTIC_CURL}/_all/_settings/index.version\*?pretty" | tee -a ${LOG_DIR}/elasticsearch/${pod}/elasticsearch_index_versions.log || true 503 | 504 | echo "Checking Used Elasticsearch Storage - ${pod}" 505 | mountpath=$(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get sts sysdigcloud-elasticsearch -ojsonpath='{.spec.template.spec.containers[].volumeMounts[?(@.name == "data")].mountPath}') 506 | if [ ! -z $mountpath ]; then 507 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec ${pod} -c elasticsearch -- du -ch ${mountpath} | grep -i total | awk '{printf "%-13s %10s\n",$1,$2}' | tee -a ${LOG_DIR}/elasticsearch/${pod}/elasticsearch_storage.log || true 508 | else 509 | printf "Error getting ElasticSearch ${pod} mount path\n" | tee -a ${LOG_DIR}/elasticsearch/${pod}/elasticsearch_storage.log 510 | fi 511 | done 512 | else 513 | echo "Unable to fetch ElasticSearch pod to gather health info!" 514 | fi 515 | 516 | # Fetch Cassandra storage info 517 | for pod in $(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get pods -l role=cassandra --no-headers -o custom-columns=NAME:metadata.name) 518 | do 519 | echo "Checking Used Cassandra Storage - ${pod}" 520 | mkdir -p ${LOG_DIR}/cassandra/${pod} 521 | printf "${pod}\n" | tee -a ${LOG_DIR}/cassandra/${pod}/cassandra_storage.log 522 | mountpath=$(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get sts sysdigcloud-cassandra -ojsonpath='{.spec.template.spec.containers[].volumeMounts[?(@.name == "data")].mountPath}') 523 | if [ ! -z $mountpath ]; then 524 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec -it ${pod} -c cassandra -- du -ch ${mountpath} | grep -i total | awk '{printf "%-13s %10s\n",$1,$2}' | tee -a ${LOG_DIR}/cassandra/${pod}/cassandra_storage.log || true 525 | else 526 | printf "Error getting Cassandra ${pod} mount path\n" | tee -a ${LOG_DIR}/cassandra/${pod}/cassandra_storage.log 527 | fi 528 | done 529 | 530 | # Fetch postgresql storage info 531 | for pod in $(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get pods -l role=postgresql --no-headers -o custom-columns=NAME:metadata.name) 532 | do 533 | echo "Checking Used PostgreSQL Storage - ${pod}" 534 | mkdir -p ${LOG_DIR}/postgresql/${pod} 535 | printf "${pod}\n" | tee -a ${LOG_DIR}/postgresql/${pod}/postgresql_storage.log 536 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec -it ${pod} -c postgresql -- du -ch /var/lib/postgresql | grep -i total | awk '{printf "%-13s %10s\n",$1,$2}' | tee -a ${LOG_DIR}/postgresql/${pod}/postgresql_storage.log || true 537 | done 538 | 539 | # Fetch mysql storage info 540 | for pod in $(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get pods -l role=mysql --no-headers -o custom-columns=NAME:metadata.name) 541 | do 542 | echo "Checking Used MySQL Storage - ${pod}" 543 | mkdir -p ${LOG_DIR}/mysql/${pod} 544 | printf "${pod}\n" | tee -a ${LOG_DIR}/mysql/${pod}/mysql_storage.log 545 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec -it ${pod} -c mysql -- du -ch /var/lib/mysql | grep -i total | awk '{printf "%-13s %10s\n",$1,$2}' | tee -a ${LOG_DIR}/mysql/${pod}/mysql_storage.log || true 546 | done 547 | 548 | # Fetch kafka storage info 549 | for pod in $(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get pods -l role=cp-kafka --no-headers -o custom-columns=NAME:metadata.name) 550 | do 551 | echo "Checking Used Kafka Storage - ${pod}" 552 | mkdir -p ${LOG_DIR}/kafka/${pod} 553 | printf "${pod}\n" | tee -a ${LOG_DIR}/kafka/${pod}/kafka_storage.log 554 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec -it ${pod} -c broker -- du -ch /opt/kafka/data | grep -i total | awk '{printf "%-13s %10s\n",$1,$2}' | tee -a ${LOG_DIR}/kafka/${pod}/kafka_storage.log || true 555 | done 556 | 557 | # Fetch zookeeper storage info 558 | for pod in $(kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get pods -l role=zookeeper --no-headers -o custom-columns=NAME:metadata.name) 559 | do 560 | echo "Checking Used Zookeeper Storage - ${pod}" 561 | mkdir -p ${LOG_DIR}/zookeeper/${pod} 562 | printf "${pod}\n" | tee -a ${LOG_DIR}/zookeeper/${pod}/zookeeper_storage.log 563 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} exec -it ${pod} -c server -- du -ch /var/lib/zookeeper/data | grep -i total | awk '{printf "%-13s %10s\n",$1,$2}' | tee -a ${LOG_DIR}/zookeeper/${pod}/zookeeper_storage.log || true 564 | done 565 | 566 | # Collect the sysdigcloud-config configmap, and write to the log directory 567 | echo "Fetching the sysdigcloud-config ConfigMap" 568 | kubectl ${CONTEXT_OPTS} ${KUBE_OPTS} get configmap sysdigcloud-config -o yaml | grep -v password | grep -v apiVersion > ${LOG_DIR}/config.yaml || true 569 | 570 | # Generate the bundle name, create a tarball, and remove the temp log directory 571 | BUNDLE_NAME=$(date +%s)_sysdig_cloud_support_bundle.tgz 572 | echo "Creating the ${BUNDLE_NAME} archive now" 573 | tar czf ${BUNDLE_NAME} ${LOG_DIR} 574 | rm -rf ${LOG_DIR} 575 | 576 | echo "Support bundle generated:" ${BUNDLE_NAME} 577 | } 578 | 579 | parse_commandline "$@" 580 | main 581 | -------------------------------------------------------------------------------- /user_creation/README.md: -------------------------------------------------------------------------------- 1 | # User creation via API 2 | 3 | The typical workflow for creating users in the Sysdig platform is via [email invite](https://docs.sysdig.com/en/docs/administration/administration-settings/user-and-team-administration/manage-users/). However, [on-premises deployments](https://docs.sysdig.com/en/docs/administration/on-premises-deployments/) may also choose to use the Sysdig platform API to directly create user records and set an initial password. 4 | 5 | The `create_user.sh` helper script in this directory will assist you in hitting the correct API endpoints to: 6 | 7 | 1. Enable/disable the ability to create users via the API (this ability is enabled by default) 8 | 2. Create user records via the API 9 | 10 | Access to the API endpoints needed to run `create_user.sh` is only permitted by the ["super" Admin](https://docs.sysdig.com/en/docs/administration/on-premises-deployments/find-the-super-admin-credentials-and-api-token/) for your environment. To prepare, modify `env.sh` to set the required values for the `API_TOKEN` of the "super" Admin user, the URL for accessing the Sysdig platform API (which will be the same URL that your users access for the Sysdig Monitor application), and review the `CUSTOMER_ID` setting (which should be `1`, but confirm this via the steps described in [Find Your Customer Number](https://docs.sysdig.com/en/docs/administration/administration-settings/find-your-customer-id-and-name/)). 11 | 12 | # Usage examples: 13 | 14 | To see usage information: 15 | 16 | ``` 17 | # ./create_user.sh --help 18 | Usage: ./create_user.sh [OPTION] 19 | 20 | Create a user record, or change permissions for API-based user creation 21 | 22 | If no OPTION is specified, the current API User Creation settings are printed 23 | 24 | General options: 25 | -h | --help Print this Usage output 26 | 27 | Options for changing permissions: 28 | -e | --enable Enable API-based user creation (it's enabled by default) 29 | -d | --disable Disable API-based user creation 30 | 31 | Options for creating a user record: 32 | -u | --username Username for the user record to create 33 | -p | --password Password for the user record to create 34 | -f | --firstname (optional) First name for the user record to create 35 | -l | --lastname (optional) Last name for the user record to create 36 | ``` 37 | 38 | To see whether user creation is currently enabled/disabled, invoke with no options: 39 | 40 | ``` 41 | # ./create_user.sh 42 | { 43 | "apiPermissionSettings": { 44 | "allowApiUserCreation": true, 45 | "version": 1 46 | } 47 | } 48 | ``` 49 | 50 | To create a user record, at minimum, you must specify a username and password. The username should be a valid email address, unless you have LDAP authentication enabled in which case a simple username is also permitted. If successful, the call to the API will echo back a JSON object for the successfully-created user. 51 | 52 | ``` 53 | # ./create_user.sh -u jdoe@example.local -p JoeInitPasswd 54 | {"user":{"termsAndConditions":true, ... ,"username":"jdoe@example.local","dateCreated":1536878606750,"status":"confirmed","systemRole":"ROLE_USER"}} 55 | ``` 56 | 57 | Optional parameters to specify a first name and/or last name for the user record are also available. 58 | 59 | ``` 60 | # ./create_user.sh -u msmith@example.local -p MsmithInitPasswd -f Mary -l Smith 61 | {"user":{"termsAndConditions":true, ... ,"username":"msmith@example.local","dateCreated":1536878724933,"status":"confirmed","systemRole":"ROLE_USER","firstName":"Mary","lastName":"Smith"}} 62 | ``` 63 | 64 | To disable the ability to create users via the API: 65 | 66 | ``` 67 | # ./create_user.sh -d 68 | { 69 | "apiPermissionSettings": { 70 | "allowApiUserCreation": false, 71 | "version": 2 72 | } 73 | } 74 | 75 | # ./create_user.sh -u failure@example.local -p ThisWontWork 76 | {"errors":[{"reason":"Cannot add user","message":"User API creation is not enabled"}]} 77 | ``` 78 | 79 | To re-enable the ability to create users via the API: 80 | 81 | ``` 82 | # ./create_user.sh -e 83 | { 84 | "apiPermissionSettings": { 85 | "allowApiUserCreation": true, 86 | "version": 3 87 | } 88 | } 89 | ``` 90 | 91 | # Change Default User Role in Team via API 92 | 93 | The `update_default_user_role.sh` helper script in this directory will assist you in hitting the correct API endpoints to: 94 | 95 | 1. Display information about the available user roles and teams 96 | 2. Change the user role assigned by default to users in a team 97 | 98 | To prepare, modify `env.sh` to set the required values for the `API_TOKEN` of the Admin user, the URL for accessing the Sysdig platform API (which will be the same URL that your users access for the Sysdig Monitor application). 99 | 100 | # Usage examples: 101 | 102 | To see usage information: 103 | 104 | ``` 105 | # ./update_default_user_role.sh --help 106 | Usage: ./update_default_user_role.sh [OPTIONS] 107 | 108 | Update the default user role for the specified team 109 | 110 | If no OPTION is specified, available user roles and teams are displayed 111 | 112 | General options: 113 | -h | --help Print this Usage output 114 | 115 | Options for updating a team: 116 | -t | --team Team name 117 | -r | --role Default user 118 | ``` 119 | 120 | To display information about user roles and teams: 121 | 122 | ``` 123 | # ./update_default_user_role.sh User roles: 124 | 125 | Team Manager ROLE_TEAM_MANAGER 126 | Advanced User ROLE_TEAM_EDIT 127 | Standard User ROLE_TEAM_STANDARD 128 | View only ROLE_TEAM_READ 129 | Service Manager ROLE_TEAM_SERVICE_MANAGER 130 | 131 | Team names and current default user roles: 132 | 133 | Monitor Operations ROLE_TEAM_EDIT 134 | Secure Operations ROLE_TEAM_EDIT 135 | Second team ROLE_TEAM_EDIT 136 | Third team ROLE_TEAM_EDIT 137 | ``` 138 | 139 | To update the default user role for a given team 140 | ``` 141 | # ./update_default_user_role.sh -t "Secure Operations" -r ROLE_TEAM_STANDARD 142 | { 143 | "team": { 144 | "userRoles": [], 145 | "version": 87, 146 | "origin": "SYSDIG", 147 | "description": "Immutable Secure team with full visibility", 148 | "show": "host", 149 | "customerId": 1, 150 | "theme": "#7BB0B2", 151 | "products": [ 152 | "SDS" 153 | ], 154 | "entryPoint": { 155 | "module": "Explore" 156 | }, 157 | "dateCreated": 1591191680000, 158 | "lastUpdated": 1591298554000, 159 | "defaultTeamRole": "ROLE_TEAM_STANDARD", 160 | "immutable": true, 161 | "canUseSysdigCapture": true, 162 | "canUseCustomEvents": true, 163 | "canUseAwsMetrics": true, 164 | "canUseBeaconMetrics": true, 165 | "userCount": 2, 166 | "name": "Secure Operations", 167 | "properties": {}, 168 | "id": 2, 169 | "default": true 170 | } 171 | } 172 | ``` 173 | -------------------------------------------------------------------------------- /user_creation/create_user.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -uo pipefail 3 | 4 | OPTS=`getopt -o edu:f:l:p:h --long enable,disable,username:,firstname:,lastname:,password:,help -n 'parse-options' -- "$@"` 5 | if [ $? != 0 ] ; then 6 | echo "Failed parsing options." >&2 7 | exit 1 8 | fi 9 | 10 | ENV="./env.sh" 11 | ENABLE=false 12 | DISABLE=false 13 | USERNAME="" 14 | FIRSTNAME="" 15 | LASTNAME="" 16 | PASSWORD="" 17 | HELP=false 18 | 19 | eval set -- "$OPTS" 20 | 21 | function print_usage() { 22 | echo "Usage: ./create_user.sh [OPTION]" 23 | echo 24 | echo "Create a user record, or change permissions for API-based user creation" 25 | echo 26 | echo "If no OPTION is specified, the current API User Creation settings are printed" 27 | echo 28 | echo "General options:" 29 | echo " -h | --help Print this Usage output" 30 | echo 31 | echo "Options for changing permissions:" 32 | echo " -e | --enable Enable API-based user creation (it's enabled by default)" 33 | echo " -d | --disable Disable API-based user creation" 34 | echo 35 | echo "Options for creating a user record:" 36 | echo " -u | --username Username for the user record to create" 37 | echo " -p | --password Password for the user record to create" 38 | echo " -f | --firstname (optional) First name for the user record to create" 39 | echo " -l | --lastname (optional) Last name for the user record to create" 40 | exit 1 41 | } 42 | 43 | while true; do 44 | case "$1" in 45 | -e | --enable ) ENABLE=true; shift ;; 46 | -d | --disable ) DISABLE=true; shift ;; 47 | -u | --username ) USERNAME="$2"; shift; shift ;; 48 | -p | --password ) PASSWORD="$2"; shift; shift ;; 49 | -f | --firstname ) FIRSTNAME="$2"; shift; shift ;; 50 | -l | --lastname ) LASTNAME="$2"; shift; shift ;; 51 | -h | --help ) HELP=true; shift ;; 52 | -- ) shift; break ;; 53 | * ) break ;; 54 | esac 55 | done 56 | 57 | if [ $HELP = true ] ; then 58 | print_usage 59 | fi 60 | 61 | if [ $# -gt 0 ] ; then 62 | echo "Excess command-line arguments detected. Exiting." 63 | echo 64 | print_usage 65 | fi 66 | 67 | if [ -e "$ENV" ] ; then 68 | source "$ENV" 69 | else 70 | echo "File not found: $ENV" 71 | echo "See the README for details on populating this file with your settings" 72 | echo "(https://github.com/draios/sysdig-cloud-scripts/blob/master/user_creation/README.md)" 73 | exit 1 74 | fi 75 | 76 | if [ $ENABLE = true -o $DISABLE = true ] ; then 77 | if [ -n "$USERNAME" -o -n "$PASSWORD" -o -n "$FIRSTNAME" -o -n "$LASTNAME" ] ; then 78 | print_usage 79 | elif [ $ENABLE = true -a $DISABLE = true ] ; then 80 | print_usage 81 | else 82 | if [ $ENABLE = true ] ; then 83 | VALUE="true" 84 | else 85 | VALUE="false" 86 | fi 87 | curl ${CURL_OPTS} \ 88 | -H "Authorization: Bearer $API_TOKEN" \ 89 | -H "Content-Type: application/json; charset=UTF-8" \ 90 | -X POST \ 91 | --data-binary '{"allowApiUserCreation":"'"${VALUE}"'"}' \ 92 | $URL/api/admin/customer/${CUSTOMER_ID}/apiPermissionSettings | ${JSON_FILTER} 93 | exit $? 94 | fi 95 | 96 | elif [ -n "$USERNAME" ] ; then 97 | if [ $ENABLE = true -o $DISABLE = true -o -z "$PASSWORD" ] ; then 98 | print_usage 99 | else 100 | JSON='{"username": "'"${USERNAME}"'","password":"'"${PASSWORD}"'","customer":{"id":"'${CUSTOMER_ID}'"}' 101 | if [ -n "$FIRSTNAME" ] ; then 102 | JSON=${JSON}',"firstName": "'"${FIRSTNAME}"'"' 103 | fi 104 | if [ -n "$LASTNAME" ] ; then 105 | JSON=${JSON}',"lastName": "'"${LASTNAME}"'"' 106 | fi 107 | JSON=${JSON}'}' 108 | curl $CURL_OPTS \ 109 | -H "Authorization: Bearer $API_TOKEN" \ 110 | -H "Content-Type: application/json; charset=UTF-8" \ 111 | -X POST \ 112 | --data-binary "${JSON}" \ 113 | $URL/api/admin/user 114 | fi 115 | 116 | elif [ -n "$PASSWORD" -o -n "$FIRSTNAME" -o -n "$LASTNAME" ] ; then 117 | print_usage 118 | 119 | else 120 | curl $CURL_OPTS \ 121 | -H "Authorization: Bearer $API_TOKEN" \ 122 | -X GET \ 123 | $URL/api/admin/customer/${CUSTOMER_ID}/apiPermissionSettings | ${JSON_FILTER} 124 | exit $? 125 | fi 126 | -------------------------------------------------------------------------------- /user_creation/env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # 4 | # Set this to the Sysdig Monitor API Token value shown in the Sysdig UI under 5 | # Settings->User Profile when logged in as the "Super" Admin User. For 6 | # information on locating this user, see the following article: 7 | # 8 | # https://support.sysdig.com/hc/en-us/articles/115004951443-Locating-the-Super-Admin-User 9 | # 10 | export API_TOKEN="aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" 11 | 12 | # 13 | # Set this to the URL through which you access your Sysdig application UI. 14 | # 15 | export URL="https://10.0.0.1" 16 | 17 | # 18 | # This should be set to your customer ID number, which will almost always be 1. If 19 | # you are unsure, see the following article for instructions to verify: 20 | # https://support.sysdig.com/hc/en-us/articles/115005848823-Your-Customer-Number 21 | # 22 | export CUSTOMER_ID="1" 23 | 24 | # 25 | # Set options used in other scripts that invoke curl. We've set these to what we think 26 | # are sensible defaults: 27 | # 28 | # -s 29 | # Silent mode, to make outputs brief. If you're debugging and want verbose outputs, 30 | # you might want to change this to -v. 31 | # 32 | # -k 33 | # Leave this set to "-k" to allow curl to connect to your Sysdig API even if a self- 34 | # signed certificate is in use (the default in a Sysdig software platform install). 35 | # 36 | # -w \n 37 | # Print a newline after curl prints responses. This will make the Sysdig platform's 38 | # JSON responses easier to read. 39 | # 40 | export CURL_OPTS="-s -k -w \n" 41 | 42 | # 43 | # If Python is installed on the host where you're running these helper scripts, this 44 | # will enable some extra features like pretty printing of JSON output and checking if 45 | # JSON inputs are syntactically valid. If Python is not installed, it uses a no-op 46 | # "cat" instead and you'll get unformatted output and you'll get HTTP error codes 47 | # if you try to POST invalid JSON. 48 | # 49 | if hash python 2>/dev/null ; then 50 | export JSON_FILTER="python -m json.tool" 51 | else 52 | export JSON_FILTER="cat" 53 | fi 54 | -------------------------------------------------------------------------------- /user_creation/update_default_user_role.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -uo pipefail 3 | 4 | OPTS=$(getopt -o r:t:h --long role:,team:,help -n 'parse-options' -- "$@") 5 | 6 | if [ $? != 0 ] ; then 7 | echo "Failed parsing options." >&2 8 | exit 1 9 | fi 10 | 11 | ENV="./env.sh" 12 | #TODO enumerate values 13 | TEAM_NAME="" 14 | USER_ROLE="" 15 | HELP=false 16 | 17 | eval set -- "$OPTS" 18 | 19 | function print_usage() { 20 | echo "Usage: ./update_default_user_role.sh [OPTIONS]" 21 | echo 22 | echo "Update the default user role for the specified team" 23 | echo 24 | echo "If no OPTION is specified, available user roles and teams are displayed" 25 | echo 26 | echo "General options:" 27 | echo " -h | --help Print this Usage output" 28 | echo 29 | echo "Options for updating a team:" 30 | echo " -t | --team Team name" 31 | echo " -r | --role Default user" 32 | exit 1 33 | } 34 | 35 | while true; do 36 | case "$1" in 37 | -t | --team ) TEAM_NAME="$2"; shift; shift ;; 38 | -r | --role ) USER_ROLE="$2"; shift; shift ;; 39 | -h | --help ) HELP=true; shift ;; 40 | -- ) shift; break ;; 41 | * ) break ;; 42 | esac 43 | done 44 | 45 | if [ $HELP = true ] ; then 46 | print_usage 47 | fi 48 | 49 | if [ $# -gt 0 ] ; then 50 | echo "Excess command-line arguments detected. Exiting." 51 | echo 52 | print_usage 53 | fi 54 | 55 | if [ -e "$ENV" ] ; then 56 | source "$ENV" 57 | else 58 | echo "File not found: $ENV" 59 | echo "See the README for details on populating this file with your settings" 60 | echo "(https://github.com/draios/sysdig-cloud-scripts/blob/master/user_creation/README.md)" 61 | exit 1 62 | fi 63 | 64 | function print_roles { 65 | INFO=$(curl ${CURL_OPTS} \ 66 | -H "Authorization: Bearer $API_TOKEN" \ 67 | -H "Content-Type: application/json; charset=UTF-8" \ 68 | -X GET \ 69 | "$URL/api/customer/roles" | jq -r '.[] | "\(.displayName), \(.role)"') 70 | echo -e "User roles:\n" 71 | column -t -s ',' <<< "${INFO}" 72 | } 73 | 74 | function print_info { 75 | print_roles 76 | echo "" 77 | INFO=$(curl ${CURL_OPTS} \ 78 | -H "Authorization: Bearer $API_TOKEN" \ 79 | -H "Content-Type: application/json; charset=UTF-8" \ 80 | -X GET \ 81 | "$URL/api/teams/light" | jq -r '.teams | .[] | "\(.name), \(.defaultTeamRole)"') 82 | echo -e "Team names and current default user roles:\n" 83 | column -t -s ',' <<< "${INFO}" 84 | 85 | } 86 | 87 | function get_team { 88 | TEAM_NAME="$1" 89 | curl ${CURL_OPTS} \ 90 | -H "Authorization: Bearer $API_TOKEN" \ 91 | -H "Content-Type: application/json; charset=UTF-8" \ 92 | -X GET \ 93 | "$URL/api/teams/light" | jq -r --arg TEAM_NAME "${TEAM_NAME}" \ 94 | '.teams | .[] | select(.name == $TEAM_NAME)' 95 | } 96 | 97 | function update_team { 98 | TEAM="$1" 99 | USER_ROLE="$2" 100 | 101 | TEAM_ID=$(jq '.id' <<< "${TEAM}") 102 | TEAM_UPDATED=$(jq --arg ROLE "${USER_ROLE}" '.defaultTeamRole |= $ROLE' <<< "${TEAM}") 103 | curl --fail ${CURL_OPTS} \ 104 | -H "Authorization: Bearer $API_TOKEN" \ 105 | -H "Content-Type: application/json; charset=UTF-8" \ 106 | -X PUT \ 107 | --data-binary "${TEAM_UPDATED}" \ 108 | "$URL/api/teams/${TEAM_ID}" | jq 109 | } 110 | 111 | if ! command -v column > /dev/null || ! command -v jq > /dev/null; then 112 | echo "This script requires 'jq' and 'column' commands to run properly" 113 | exit 1 114 | fi 115 | 116 | if [ -z "${TEAM_NAME}" ] ; then 117 | print_info 118 | exit 0 119 | fi 120 | 121 | if [ -z "${USER_ROLE}" ] ; then 122 | echo -e "Please specify a role with --role option.\n" 123 | print_roles 124 | exit 0 125 | fi 126 | 127 | TEAM=$(get_team "${TEAM_NAME}") 128 | update_team "${TEAM}" "${USER_ROLE}" 129 | exit $? 130 | --------------------------------------------------------------------------------