├── .github └── workflows │ ├── after-release.yml │ └── release.yml ├── .gitignore ├── BMCFootprintsV11 ├── conf │ ├── config.json │ └── opsgenie-integration.conf ├── native │ └── ogAfter.sh ├── opsgenie-bmcfootprintsv11 │ └── send2opsgenie.go └── scripts │ └── actionExecutor.py ├── BMCFootprintsV12 ├── conf │ ├── config.json │ └── opsgenie-integration.conf ├── native │ └── ogAfter.sh ├── opsgenie-bmcfootprintsv12 │ └── send2opsgenie.go └── scripts │ └── actionExecutor.py ├── BMCRemedy ├── conf │ ├── config.json │ └── opsgenie-integration.conf ├── opsgenie-bmcremedy │ ├── OpsGenieFilter.def │ └── send2opsgenie.go └── scripts │ └── actionExecutor.py ├── Cherwell ├── conf │ └── config.json ├── opsgenie.bp └── scripts │ └── actionExecutor.py ├── DynatraceAppMon ├── OpsGenie_Plugin │ ├── META-INF │ │ └── MANIFEST.MF │ ├── build.properties │ ├── lib │ │ └── json-simple-1.1.1.jar │ ├── plugin.xml │ └── src │ │ └── com │ │ └── dynatrace │ │ └── opsgenie │ │ └── OpsGeniePlugin.java ├── com.opsgenie.plugin_1.0.0.jar ├── conf │ └── config.json └── scripts │ └── actionExecutor.py ├── Icinga ├── conf │ ├── config.json │ └── opsgenie-integration.conf ├── native │ └── ogAfter.sh ├── opsgenie-icinga │ ├── opsgenie.cfg │ └── send2opsgenie.go └── scripts │ └── actionExecutor.py ├── Icinga2 ├── conf │ ├── config.json │ └── opsgenie-integration.conf ├── native │ └── ogAfter.sh ├── opsgenie-icinga2 │ ├── opsgenie.conf │ └── send2opsgenie.go └── scripts │ └── actionExecutor.py ├── Jira ├── JIRA_Lambda_Script │ └── ogLambdaJira.js ├── conf │ └── config.json └── scripts │ └── actionExecutor.py ├── JiraServiceDesk ├── conf │ └── config.json └── scripts │ └── actionExecutor.py ├── LICENSE ├── LibreNMS ├── conf │ └── config.json ├── scripts │ └── actionExecutor.py └── transport.opsgenie.php ├── Marid └── groovy-executer.zip ├── Nagios ├── conf │ ├── config.json │ └── opsgenie-integration.conf ├── native │ └── ogAfter.sh ├── opsgenie-nagios │ ├── opsgenie.cfg │ └── send2opsgenie.go └── scripts │ └── actionExecutor.py ├── NagiosXI ├── conf │ ├── config.json │ └── opsgenie-integration.conf ├── native │ └── ogAfter.sh ├── opsgenie-nagiosxi │ └── send2opsgenie.go └── scripts │ └── actionExecutor.py ├── OEC ├── conf │ └── config.json └── scripts │ ├── actionExecutor.py │ └── http.py ├── OP5 ├── conf │ ├── config.json │ └── opsgenie-integration.conf.part ├── native │ └── ogAfter.sh ├── opsgenie-op5 │ ├── opsgenie.cfg │ └── send2opsgenie.go └── scripts │ └── actionExecutor.py ├── PRTG ├── Postdata.txt ├── Postdatav2.txt ├── conf │ └── config.json └── scripts │ └── actionExecutor.py ├── README.md ├── Solarwinds ├── conf │ └── config.json ├── resetActionBody.txt ├── scripts │ └── actionExecutor.py └── triggerActionBody.txt ├── SolarwindsMSPNCentral ├── conf │ └── config.json └── scripts │ └── actionExecutor.py ├── SolarwindsWebHelpdesk ├── conf │ └── config.json └── scripts │ └── actionExecutor.py ├── Splunk ├── conf │ └── config.json └── scripts │ └── actionExecutor.py ├── Trackit ├── conf │ └── config.json └── scripts │ ├── actionExecutor.py │ └── createTrackitWorkflow.py ├── Xmpp ├── conf │ └── config.json └── scripts │ └── actionExecutor.py ├── Zabbix ├── conf │ ├── config.json │ └── opsgenie-integration.conf ├── native │ ├── ogAfter.sh │ └── ogBefore.sh ├── opsgenie-zabbix │ ├── actionCommand.txt │ └── send2opsgenie.go └── scripts │ ├── actionExecutor.py │ ├── actionExecutorForZabbix4.py │ └── actionExecutorForZabbix6.py ├── Zendesk ├── conf │ └── config.json ├── scripts │ └── actionExecutor.py └── triggerInstructions.txt ├── Zenoss ├── conf │ ├── config.json │ └── opsgenie-integration.conf ├── native │ └── ogAfter.sh ├── opsgenie-zenoss │ └── send2opsgenie.go └── scripts │ └── actionExecutor.py └── release └── oec-builder ├── oec-deb ├── DEBIAN │ ├── control │ ├── postinst │ └── preinst ├── Dockerfile ├── build └── etc │ └── systemd │ └── system │ └── oec.service ├── oec-linux ├── Dockerfile └── build ├── oec-rpm ├── Dockerfile ├── SPECS │ ├── oec-rhel6.spec │ └── oec.spec ├── build ├── oec.service └── rhel6-service │ └── oec ├── oec-win32 ├── Dockerfile ├── build └── oecService.json.example ├── oec-win64 ├── Dockerfile ├── build └── oecService.json.example ├── oecScriptsVersion.json └── readmeIterationCount /.github/workflows/after-release.yml: -------------------------------------------------------------------------------- 1 | name: Update README.md 2 | on: 3 | release: 4 | types: [published] 5 | jobs: 6 | update-readme: 7 | name: Update version on README.md 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Set name and version of release to env 11 | id: setup 12 | run: | 13 | IFS='-' read -r name version <<< "$(basename $GITHUB_REF)" 14 | echo ::set-output name=INTEGRATION_NAME::$name 15 | echo ::set-output name=INTEGRATION_VERSION::$version 16 | - name: Checkout the repository with branch 17 | uses: actions/checkout@v4 18 | - name: Set branch name to env 19 | id: prepare-branch-name 20 | run: | 21 | branchName=after-release-$(cat release/oec-builder/readmeIterationCount) 22 | echo branchName:$branchName 23 | echo ::set-output name=BRANCH_NAME::$(echo $branchName) 24 | - name: Commit and push changes 25 | env: 26 | BRANCH_NAME: ${{ steps.prepare-branch-name.outputs.branch_name }} 27 | INTEGRATION_NAME: ${{ steps.setup.outputs.integration_name }} 28 | INTEGRATION_VERSION: ${{ steps.setup.outputs.integration_version }} 29 | run: | 30 | git config --global user.name 'Github Actions' 31 | git config --global user.email 'support@atlassian.com' 32 | git checkout ${{ env.BRANCH_NAME }} 2>/dev/null || git checkout -b ${{ env.BRANCH_NAME }} 33 | if (($(git ls-remote origin ${{ env.BRANCH_NAME }} | wc -l) > 0)); 34 | then 35 | echo "Branch already exists will rebase..." 36 | git pull origin ${{ env.BRANCH_NAME }} --rebase 37 | else 38 | echo "Increase iteration count...." 39 | iterationCount=$(($(cat release/oec-builder/readmeIterationCount) + 1)) 40 | echo $iterationCount > release/oec-builder/readmeIterationCount 41 | fi 42 | echo "Update readme file..." 43 | sed -i 's/\(tag\/${{ env.INTEGRATION_NAME }}-\).*)/\1${{ env.INTEGRATION_VERSION }})/gi' README.md 44 | grep --color -ie "${{ env.INTEGRATION_NAME }}" README.md 45 | git commit -am "Update README.md after release(s)" 46 | git push origin ${{ env.BRANCH_NAME }} 47 | - name: Create pull request if not exist 48 | env: 49 | BRANCH_NAME: ${{ steps.prepare-branch-name.outputs.branch_name }} 50 | uses: repo-sync/pull-request@v2 51 | with: 52 | source_branch: ${{ env.BRANCH_NAME }} 53 | pr_title: "Update README.md after release(s)" 54 | destination_branch: "master" 55 | pr_reviewer: ${{ github.actor }} 56 | github_token: ${{ secrets.GITHUB_TOKEN }} -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/* 2 | -------------------------------------------------------------------------------- /BMCFootprintsV11/conf/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey": "", 3 | "baseUrl": "https://api.opsgenie.com", 4 | "logLevel": "DEBUG", 5 | "globalArgs": [], 6 | "globalFlags": { 7 | "url": "", 8 | "username": "", 9 | "password": "", 10 | "incidentWorkspaceId": "", 11 | "problemWorkspaceId": "" 12 | }, 13 | "actionMappings": { 14 | "createIncident": { 15 | "filepath": "", 16 | "sourceType": "", 17 | "env": [], 18 | "stdout": "" 19 | }, 20 | "createProblem": { 21 | "filepath": "", 22 | "sourceType": "", 23 | "env": [], 24 | "stdout": "" 25 | }, 26 | "updateDescription": { 27 | "filepath": "", 28 | "sourceType": "", 29 | "env": [], 30 | "stdout": "" 31 | }, 32 | "updatePriority": { 33 | "filepath": "", 34 | "sourceType": "", 35 | "env": [], 36 | "stdout": "" 37 | } 38 | }, 39 | "pollerConf": { 40 | "pollingWaitIntervalInMillis": 100, 41 | "visibilityTimeoutInSec": 30, 42 | "maxNumberOfMessages": 10 43 | }, 44 | "poolConf": { 45 | "maxNumberOfWorker": 12, 46 | "minNumberOfWorker": 4, 47 | "monitoringPeriodInMillis": 15000, 48 | "keepAliveTimeInMillis": 6000, 49 | "queueSize": 0 50 | } 51 | } -------------------------------------------------------------------------------- /BMCFootprintsV11/conf/opsgenie-integration.conf: -------------------------------------------------------------------------------- 1 | #################################### BMCFOOTPRINTS CONFIGURATION ################################### 2 | bmcFootPrints2opsgenie.logger=warning 3 | logPath=/var/log/opsgenie/send2opsgenie.log 4 | # If your OS is Windows then you can set logPath=C:\opsgenie-integration\bmcFootPrints2opsgenie.log 5 | 6 | ####################################### PROXY CONFIGURATION FOR BMC FOOTPRINTS ############################################ 7 | bmcFootPrints2opsgenie.http.proxy.enabled=false 8 | bmcFootPrints2opsgenie.http.proxy.port=11111 9 | bmcFootPrints2opsgenie.http.proxy.host=localhost 10 | bmcFootPrints2opsgenie.http.proxy.protocol=http 11 | #bmcFootPrints2opsgenie.http.proxy.username=admin 12 | #bmcFootPrints2opsgenie.http.proxy.password=changeme -------------------------------------------------------------------------------- /BMCFootprintsV11/native/ogAfter.sh: -------------------------------------------------------------------------------- 1 | 2 | chmod 755 /home/opsgenie/oec/opsgenie-bmcfootprintsv11/send2opsgenie 3 | -------------------------------------------------------------------------------- /BMCFootprintsV12/conf/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey": "", 3 | "baseUrl": "https://api.opsgenie.com", 4 | "logLevel": "DEBUG", 5 | "globalArgs": [], 6 | "globalFlags": { 7 | "url": "", 8 | "username": "", 9 | "password": "", 10 | "workspaceName": "" 11 | }, 12 | "actionMappings": { 13 | "createIncident": { 14 | "filepath": "", 15 | "sourceType": "", 16 | "env": [], 17 | "stdout": "" 18 | }, 19 | "createProblem": { 20 | "filepath": "", 21 | "sourceType": "", 22 | "env": [], 23 | "stdout": "" 24 | }, 25 | "updateDescription": { 26 | "filepath": "", 27 | "sourceType": "", 28 | "env": [], 29 | "stdout": "" 30 | }, 31 | "updatePriority": { 32 | "filepath": "", 33 | "sourceType": "", 34 | "env": [], 35 | "stdout": "" 36 | } 37 | }, 38 | "pollerConf": { 39 | "pollingWaitIntervalInMillis": 100, 40 | "visibilityTimeoutInSec": 30, 41 | "maxNumberOfMessages": 10 42 | }, 43 | "poolConf": { 44 | "maxNumberOfWorker": 12, 45 | "minNumberOfWorker": 4, 46 | "monitoringPeriodInMillis": 15000, 47 | "keepAliveTimeInMillis": 6000, 48 | "queueSize": 0 49 | } 50 | } -------------------------------------------------------------------------------- /BMCFootprintsV12/conf/opsgenie-integration.conf: -------------------------------------------------------------------------------- 1 | #################################### BMCFOOTPRINTS CONFIGURATION ################################### 2 | bmcFootPrints2opsgenie.logger=warning 3 | logPath=/var/log/opsgenie/send2opsgenie.log 4 | # If your OS is Windows then you can set logPath=C:\opsgenie-integration\bmcFootPrints2opsgenie.log 5 | 6 | ####################################### PROXY CONFIGURATION FOR BMC FOOTPRINTS ############################################ 7 | bmcFootPrints2opsgenie.http.proxy.enabled=false 8 | bmcFootPrints2opsgenie.http.proxy.port=11111 9 | bmcFootPrints2opsgenie.http.proxy.host=localhost 10 | bmcFootPrints2opsgenie.http.proxy.protocol=http 11 | #bmcFootPrints2opsgenie.http.proxy.username=admin 12 | #bmcFootPrints2opsgenie.http.proxy.password=changeme 13 | -------------------------------------------------------------------------------- /BMCFootprintsV12/native/ogAfter.sh: -------------------------------------------------------------------------------- 1 | 2 | chmod 755 /home/opsgenie/oec/opsgenie-bmcfootprintsv12/send2opsgenie 3 | -------------------------------------------------------------------------------- /BMCRemedy/conf/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey": "", 3 | "baseUrl": "https://api.opsgenie.com", 4 | "logLevel": "DEBUG", 5 | "globalArgs": [], 6 | "globalFlags": { 7 | "username": "", 8 | "password": "", 9 | "midtierServerUrl": "", 10 | "serverName": "" 11 | }, 12 | "actionMappings": { 13 | "createIncident": { 14 | "filepath": "", 15 | "sourceType": "", 16 | "env": [], 17 | "stdout": "" 18 | }, 19 | "addWorkInfo": { 20 | "filepath": "", 21 | "sourceType": "", 22 | "env": [], 23 | "stdout": "" 24 | }, 25 | "closeIncident": { 26 | "filepath": "", 27 | "sourceType": "", 28 | "env": [], 29 | "stdout": "" 30 | } 31 | }, 32 | "pollerConf": { 33 | "pollingWaitIntervalInMillis": 100, 34 | "visibilityTimeoutInSec": 30, 35 | "maxNumberOfMessages": 10 36 | }, 37 | "poolConf": { 38 | "maxNumberOfWorker": 12, 39 | "minNumberOfWorker": 4, 40 | "monitoringPeriodInMillis": 15000, 41 | "keepAliveTimeInMillis": 6000, 42 | "queueSize": 0 43 | } 44 | } -------------------------------------------------------------------------------- /BMCRemedy/conf/opsgenie-integration.conf: -------------------------------------------------------------------------------- 1 | ####################################### BMC REMEDY CONFIGURATION ####################################### 2 | bmcRemedy2opsgenie.logger=warning 3 | 4 | ####################################### PROXY CONFIGURATION FOR BMC REMEDY ############################################ 5 | bmcRemedy2opsgenie.http.proxy.enabled=false 6 | bmcRemedy2opsgenie.http.proxy.port=11111 7 | bmcRemedy2opsgenie.http.proxy.host=localhost 8 | bmcRemedy2opsgenie.http.proxy.protocol=http 9 | #bmcRemedy2opsgenie.http.proxy.username=admin 10 | #bmcRemedy2opsgenie.http.proxy.password=changeme 11 | -------------------------------------------------------------------------------- /BMCRemedy/opsgenie-bmcremedy/OpsGenieFilter.def: -------------------------------------------------------------------------------- 1 | char-set: UTF-8 2 | # 3 | # File exported Fri Aug 18 12:32:18 UTC 2017 4 | # 5 | begin filter 6 | name : HPD:Help Desk OpsGenie Integration 7 | timestamp : 1503059514 8 | owner : appadmin 9 | last-changed : admin 10 | filter-op : 6 11 | enable : 1 12 | filter-order : 500 13 | wk-conn-type : 1 14 | schema-name : HPD:Help Desk 15 | export-version : 12 16 | action { 17 | command : C:\opsgenie\oec\bmcremedy\send2opsgenie.exe --incident-id "$1000000161$" --co 18 | command : mpany "$1000000001$" --customer "$303530000$" --contact-first-name "$1000005783$" --contact-last-name "$1000005782$" --contact-company "$1000000082$" --c 19 | command : ontact-sensitivity "$1000000027$" --contact-client-type "$1000000022$" --notes "$1000000151$" --template "$303558600$" --summary "$1000000000$" --service 20 | command : "$303544300$" --CI "$303544200$" --target-date "$1000005261$" --impact "$1000000163$" --urgency "$1000000162$" --priority "$1000000164$" --incident-type 21 | command : "$1000000099$" --reported-source "$1000000215$" --assigned-group "$1000000217$" --assignee "$1000000218$" --vendor-group "$1000003663$" --vendor-ticket- 22 | command : number "$1000000652$" --status "$7$" --status-reason "$1000000881$" --resolution "$1000000156$" 23 | } 24 | object-prop : 3\90016\4\1\1\90015\2\4\90002\4\26\Remedy Incident Management\ 25 | errhandler-opt : 0 26 | end 27 | -------------------------------------------------------------------------------- /Cherwell/conf/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey": "", 3 | "baseUrl": "https://api.opsgenie.com", 4 | "logLevel": "DEBUG", 5 | "globalArgs": [], 6 | "globalFlags": { 7 | "username": "", 8 | "password": "", 9 | "clientId": "", 10 | "apiUrl": "" 11 | }, 12 | "actionMappings": { 13 | "addJournal": { 14 | "filepath": "", 15 | "sourceType": "", 16 | "env": [], 17 | "stdout": "" 18 | }, 19 | "createIncident": { 20 | "filepath": "", 21 | "sourceType": "", 22 | "env": [], 23 | "stdout": "" 24 | }, 25 | "inProgressIncident": { 26 | "filepath": "", 27 | "sourceType": "", 28 | "env": [], 29 | "stdout": "" 30 | }, 31 | "resolveIncident": { 32 | "filepath": "", 33 | "sourceType": "", 34 | "env": [], 35 | "stdout": "" 36 | } 37 | }, 38 | "pollerConf": { 39 | "pollingWaitIntervalInMillis": 100, 40 | "visibilityTimeoutInSec": 30, 41 | "maxNumberOfMessages": 10 42 | }, 43 | "poolConf": { 44 | "maxNumberOfWorker": 12, 45 | "minNumberOfWorker": 4, 46 | "monitoringPeriodInMillis": 15000, 47 | "keepAliveTimeInMillis": 6000, 48 | "queueSize": 0 49 | } 50 | } -------------------------------------------------------------------------------- /DynatraceAppMon/OpsGenie_Plugin/META-INF/MANIFEST.MF: -------------------------------------------------------------------------------- 1 | Manifest-Version: 1.0 2 | Bundle-SymbolicName: com.opsgenie.plugin;singleton:=true 3 | Bundle-Name: OpsGenie Plugin 4 | Bundle-Version: 1.0.0 5 | Bundle-ClassPath: .,lib/json-simple-1.1.1.jar 6 | Require-Bundle: com.dynatrace.diagnostics.sdk 7 | Bundle-ManifestVersion: 2 8 | Bundle-Vendor: OpsGenie 9 | 10 | -------------------------------------------------------------------------------- /DynatraceAppMon/OpsGenie_Plugin/build.properties: -------------------------------------------------------------------------------- 1 | bin.includes=META-INF/,\ 2 | .,\ 3 | plugin.xml,\ 4 | lib/json-simple-1.1.1.jar 5 | source..=src/ -------------------------------------------------------------------------------- /DynatraceAppMon/OpsGenie_Plugin/lib/json-simple-1.1.1.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opsgenie/oec-scripts/3fa918dd1c3cfecaac475c75fd62f5dde9539436/DynatraceAppMon/OpsGenie_Plugin/lib/json-simple-1.1.1.jar -------------------------------------------------------------------------------- /DynatraceAppMon/OpsGenie_Plugin/plugin.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 15 | 16 | 17 | 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /DynatraceAppMon/com.opsgenie.plugin_1.0.0.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opsgenie/oec-scripts/3fa918dd1c3cfecaac475c75fd62f5dde9539436/DynatraceAppMon/com.opsgenie.plugin_1.0.0.jar -------------------------------------------------------------------------------- /DynatraceAppMon/conf/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey": "", 3 | "baseUrl": "https://api.opsgenie.com", 4 | "logLevel": "DEBUG", 5 | "globalArgs": [], 6 | "globalFlags": { 7 | "userName": "", 8 | "password": "", 9 | "profileName": "", 10 | "url": "" 11 | }, 12 | "actionMappings": { 13 | "confirmIncident": { 14 | "filepath": "", 15 | "sourceType": "", 16 | "env": [], 17 | "stdout": "" 18 | }, 19 | "inProgressIncident": { 20 | "filepath": "", 21 | "sourceType": "", 22 | "env": [], 23 | "stdout": "" 24 | } 25 | }, 26 | "pollerConf": { 27 | "pollingWaitIntervalInMillis": 100, 28 | "visibilityTimeoutInSec": 30, 29 | "maxNumberOfMessages": 10 30 | }, 31 | "poolConf": { 32 | "maxNumberOfWorker": 12, 33 | "minNumberOfWorker": 4, 34 | "monitoringPeriodInMillis": 15000, 35 | "keepAliveTimeInMillis": 6000, 36 | "queueSize": 0 37 | } 38 | } -------------------------------------------------------------------------------- /DynatraceAppMon/scripts/actionExecutor.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import logging 4 | import sys 5 | from xml.sax.saxutils import escape 6 | 7 | import requests 8 | from requests.auth import HTTPBasicAuth 9 | 10 | parser = argparse.ArgumentParser() 11 | parser.add_argument('-payload', '--payload', help='Payload from queue', required=True) 12 | parser.add_argument('-apiKey', '--apiKey', help='The apiKey of the integration', required=True) 13 | parser.add_argument('-opsgenieUrl', '--opsgenieUrl', help='The url', required=True) 14 | parser.add_argument('-filepath', '--filepath', help='Filepath', required=True) 15 | parser.add_argument('-logLevel', '--logLevel', help='Level of log', required=True) 16 | parser.add_argument('-userName', '--userName', help='Username', required=False) 17 | parser.add_argument('-password', '--password', help='Password', required=False) 18 | parser.add_argument('-url', '--url', help='Url', required=False) 19 | parser.add_argument('-profileName', '--profileName', help='Profile Name', required=False) 20 | parser.add_argument('-timeout', '--timeout', help='Timeout', required=False) 21 | 22 | args = vars(parser.parse_args()) 23 | 24 | logging.basicConfig(stream=sys.stdout, level=args['logLevel']) 25 | 26 | queue_message_string = args['payload'] 27 | queue_message = json.loads(queue_message_string) 28 | 29 | 30 | def create_xml(mapped_action, incident_id): 31 | state = '' 32 | 33 | if mapped_action == "confirmIncident": 34 | state = "Confirmed" 35 | elif mapped_action == "inProgressIncident": 36 | state = "InProgress" 37 | 38 | data = {'id': escape(incident_id), 'state': escape(state)} 39 | 40 | xml_format = """ 41 | 42 | %(state)s 43 | """ 44 | 45 | return xml_format % data 46 | 47 | 48 | def parse_field(key, mandatory): 49 | variable = queue_message[key] 50 | if not variable.strip(): 51 | variable = args[key] 52 | if mandatory and not variable: 53 | logging.error(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 54 | "' is missing. Check your configuration file.") 55 | raise ValueError(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 56 | "' is missing. Check your configuration file.") 57 | return variable 58 | 59 | 60 | def main(): 61 | global LOG_PREFIX 62 | 63 | alert_id = queue_message["alert"]["alertId"] 64 | mapped_action = queue_message["mappedActionV2"]["name"] 65 | 66 | LOG_PREFIX = "[" + mapped_action + "]:" 67 | logging.info(LOG_PREFIX + " Will execute " + mapped_action + " for alertId " + alert_id) 68 | 69 | username = parse_field('userName', True) 70 | password = parse_field('password', True) 71 | 72 | url = parse_field('url', True) 73 | profile_name = parse_field('profileName', True) 74 | 75 | incident_rule = queue_message["incidentRule"] 76 | incident_id = queue_message["alias"] 77 | timeout = args['timeout'] 78 | if not timeout: 79 | timeout = 30000 80 | else: 81 | timeout = int(timeout) 82 | 83 | logging.debug("Url: " + url) 84 | logging.debug("Username: " + username) 85 | logging.debug("Profile Name: " + profile_name) 86 | logging.debug("Incident Rule: " + str(incident_rule)) 87 | logging.debug("Incident Id: " + str(incident_id)) 88 | 89 | content_params = create_xml(mapped_action, incident_id) 90 | 91 | result_url = url + "/rest/management/profiles/" + profile_name + "/incidentRules/" + incident_rule + "/incidents/" \ 92 | + incident_id 93 | 94 | logging.debug("URL: " + result_url) 95 | 96 | auth_token = HTTPBasicAuth(username, password) 97 | 98 | response = requests.put(result_url, content_params, headers={"Content-Type": "application/xml"}, auth=auth_token, 99 | timeout=timeout) 100 | 101 | if response: 102 | if response.status_code < 400: 103 | logging.info(LOG_PREFIX + " Successfully executed at DynatraceAppMon. ") 104 | logging.debug( 105 | LOG_PREFIX + " DynatraceAppMon response: " + str(response.content) + " response code: " + str( 106 | response.status_code)) 107 | else: 108 | logging.error(LOG_PREFIX + " Could not execute at DynatraceAppMon; status code: " + str( 109 | response.status_code) + "response: " + str(response.content)) 110 | 111 | 112 | if __name__ == '__main__': 113 | main() 114 | -------------------------------------------------------------------------------- /Icinga/conf/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey": "", 3 | "baseUrl": "https://api.opsgenie.com", 4 | "logLevel": "DEBUG", 5 | "globalArgs": [], 6 | "globalFlags": { 7 | "alert_histogram_image_url": "http://localhost/icinga/cgi-bin/histogram.cgi", 8 | "trends_image_url": "http://localhost/icinga/cgi-bin/trends.cgi", 9 | "command_url": "http://localhost/icinga/cgi-bin/cmd.cgi", 10 | "user": "icingaadmin", 11 | "password": "icingaadmin" 12 | }, 13 | "actionMappings": { 14 | "Create": { 15 | "filepath": "", 16 | "sourceType": "", 17 | "env": [], 18 | "stdout": "" 19 | }, 20 | "Acknowledge": { 21 | "filepath": "", 22 | "sourceType": "", 23 | "env": [], 24 | "stdout": "" 25 | }, 26 | "AddNote": { 27 | "filepath": "", 28 | "sourceType": "", 29 | "env": [ 30 | "e1=v1", 31 | "e2=v2" 32 | ], 33 | "stdout": "" 34 | }, 35 | "TakeOwnership": { 36 | "filepath": "", 37 | "sourceType": "", 38 | "env": [], 39 | "stdout": "" 40 | }, 41 | "AssignOwnership": { 42 | "filepath": "", 43 | "sourceType": "", 44 | "env": [], 45 | "stdout": "" 46 | }, 47 | "UnAcknowledge": { 48 | "filepath": "", 49 | "sourceType": "", 50 | "env": [], 51 | "stdout": "" 52 | } 53 | }, 54 | "pollerConf": { 55 | "pollingWaitIntervalInMillis": 100, 56 | "visibilityTimeoutInSec": 30, 57 | "maxNumberOfMessages": 10 58 | }, 59 | "poolConf": { 60 | "maxNumberOfWorker": 12, 61 | "minNumberOfWorker": 4, 62 | "monitoringPeriodInMillis": 15000, 63 | "keepAliveTimeInMillis": 6000, 64 | "queueSize": 0 65 | } 66 | } -------------------------------------------------------------------------------- /Icinga/conf/opsgenie-integration.conf: -------------------------------------------------------------------------------- 1 | ################################### ICINGA2OPSGENIE CONFIGURATION ############################### 2 | #you can define log levels: warning, debug, info,error 3 | icinga2opsgenie.logger = warning 4 | icinga2opsgenie.timeout = 60 5 | logPath = /var/log/opsgenie/send2opsgenie.log 6 | #responders= 7 | #tags= 8 | 9 | 10 | ####################################### PROXY CONFIGURATION FOR ICINGA ############################################ 11 | icinga2opsgenie.http.proxy.enabled = false 12 | icinga2opsgenie.http.proxy.port = 11111 13 | icinga2opsgenie.http.proxy.host = localhost 14 | icinga2opsgenie.http.proxy.protocol = http 15 | #icinga2opsgenie.http.proxy.username=admin 16 | #icinga2opsgenie.http.proxy.password=changeme 17 | -------------------------------------------------------------------------------- /Icinga/native/ogAfter.sh: -------------------------------------------------------------------------------- 1 | 2 | chmod 755 /home/opsgenie/oec/opsgenie-icinga/send2opsgenie 3 | 4 | 5 | if id -u icinga >/dev/null 2>&1; then 6 | usermod -a -G opsgenie icinga 7 | chown -R icinga:opsgenie /var/log/opsgenie 8 | else 9 | echo "WARNING : icinga user does not exist. Please don't forget to add your icinga user to opsgenie group!" 10 | fi 11 | 12 | if [ -d "/usr/local/icinga/etc/objects" ]; then 13 | cp /etc/opsgenie/opsgenie.cfg /usr/local/icinga/etc/objects/ 14 | else 15 | echo "WARNING : Could not find your ICINGA_HOME directory. Please copy /etc/opsgenie/opsgenie.cfg file to your /etc/objects directory manually!" 16 | fi -------------------------------------------------------------------------------- /Icinga/opsgenie-icinga/opsgenie.cfg: -------------------------------------------------------------------------------- 1 | define contact { 2 | contact_name opsgenie 3 | alias OpsGenie Contact 4 | service_notification_period 24x7 5 | host_notification_period 24x7 6 | service_notification_options c,r 7 | host_notification_options d,r 8 | service_notification_commands notify-service-by-opsgenie 9 | host_notification_commands notify-host-by-opsgenie 10 | } 11 | 12 | define command { 13 | command_name notify-service-by-opsgenie 14 | command_line /home/opsgenie/oec/opsgenie-icinga/send2opsgenie -entityType=service -t="$NOTIFICATIONTYPE$" -sc="$SERVICECHECKCOMMAND$" -ldt="$LONGDATETIME$" -hn="$HOSTNAME$" -hdn="$HOSTDISPLAYNAME$" -hal="$HOSTALIAS$" -haddr="$HOSTADDRESS$" -hs="$HOSTSTATE$" -hsi="$HOSTSTATEID$" -lhs="$LASTHOSTSTATE$" -lhsi="$LASTHOSTSTATEID$" -hst="$HOSTSTATETYPE$" -ha="$HOSTATTEMPT$" -mha="$MAXHOSTATTEMPTS$" -hei="$HOSTEVENTID$" -lhei="$LASTHOSTEVENTID$" -hpi="$HOSTPROBLEMID$" -lhpi="$LASTHOSTPROBLEMID$" -hl="$HOSTLATENCY$" -het="$HOSTEXECUTIONTIME$" -hd="$HOSTDURATION$" -hds="$HOSTDURATIONSEC$" -hdt="$HOSTDOWNTIME$" -hpc="$HOSTPERCENTCHANGE$" -hgn="$HOSTGROUPNAME$" -hgns="$HOSTGROUPNAMES$" -lhc="$LASTHOSTCHECK$" -lhsc="$LASTHOSTSTATECHANGE$" -lhu="$LASTHOSTUP$" -lhd="$LASTHOSTDOWN$" -lhur="$LASTHOSTUNREACHABLE$" -ho="$HOSTOUTPUT$" -lho="$LONGHOSTOUTPUT$" -hpd="$HOSTPERFDATA$" -s="$SERVICEDESC$" -sdn="$SERVICEDISPLAYNAME$" -ss="$SERVICESTATE$" -ssi="$SERVICESTATEID$" -lss="$LASTSERVICESTATE$" -lssi="$LASTSERVICESTATEID$" -sst="$SERVICESTATETYPE$" -sa="$SERVICEATTEMPT$" -msa="$MAXSERVICEATTEMPTS$" -siv="$SERVICEISVOLATILE$" -sei="$SERVICEEVENTID$" -lsei="$LASTSERVICEEVENTID$" -spi="$SERVICEPROBLEMID$" -lspi="$LASTSERVICEPROBLEMID$" -sl="$SERVICELATENCY$" -set="$SERVICEEXECUTIONTIME$" -sd="$SERVICEDURATION$" -sds="$SERVICEDURATIONSEC$" -sdt="$SERVICEDOWNTIME$" -spc="$SERVICEPERCENTCHANGE$" -sgn="$SERVICEGROUPNAME$" -sgns="$SERVICEGROUPNAMES$" -lsch="$LASTSERVICECHECK$" -lssc="$LASTSERVICESTATECHANGE$" -lsok="$LASTSERVICEOK$" -lsw="$LASTSERVICEWARNING$" -lsu="$LASTSERVICEUNKNOWN$" -lsc="$LASTSERVICECRITICAL$" -so="$SERVICEOUTPUT$" -lso="$LONGSERVICEOUTPUT$" -spd="$SERVICEPERFDATA$" 15 | } 16 | 17 | define command { 18 | command_name notify-host-by-opsgenie 19 | command_line /home/opsgenie/oec/opsgenie-icinga/send2opsgenie -entityType=host -t="$NOTIFICATIONTYPE$" -ldt="$LONGDATETIME$" -hn="$HOSTNAME$" -hdn="$HOSTDISPLAYNAME$" -hal="$HOSTALIAS$" -haddr="$HOSTADDRESS$" -hs="$HOSTSTATE$" -hsi="$HOSTSTATEID$" -lhs="$LASTHOSTSTATE$" -lhsi="$LASTHOSTSTATEID$" -hst="$HOSTSTATETYPE$" -ha="$HOSTATTEMPT$" -mha="$MAXHOSTATTEMPTS$" -hei="$HOSTEVENTID$" -lhei="$LASTHOSTEVENTID$" -hpi="$HOSTPROBLEMID$" -lhpi="$LASTHOSTPROBLEMID$" -hl="$HOSTLATENCY$" -het="$HOSTEXECUTIONTIME$" -hd="$HOSTDURATION$" -hds="$HOSTDURATIONSEC$" -hdt="$HOSTDOWNTIME$" -hpc="$HOSTPERCENTCHANGE$" -hgn="$HOSTGROUPNAME$" -hgns="$HOSTGROUPNAMES$" -lhc="$LASTHOSTCHECK$" -lhsc="$LASTHOSTSTATECHANGE$" -lhu="$LASTHOSTUP$" -lhd="$LASTHOSTDOWN$" -lhur="$LASTHOSTUNREACHABLE$" -ho="$HOSTOUTPUT$" -lho="$LONGHOSTOUTPUT$" -hpd="$HOSTPERFDATA$" 20 | } 21 | -------------------------------------------------------------------------------- /Icinga2/conf/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey": "", 3 | "baseUrl": "https://api.opsgenie.com", 4 | "logLevel": "DEBUG", 5 | "globalArgs": [], 6 | "globalFlags": { 7 | "graphite_url": "http://localhost:5003", 8 | "api_url": "https://localhost:5665", 9 | "user": "icingaadmin", 10 | "password": "icingaadmin", 11 | "insecure": "false" 12 | }, 13 | "actionMappings": { 14 | "Create": { 15 | "filepath": "", 16 | "sourceType": "", 17 | "env": [], 18 | "stdout": "" 19 | }, 20 | "Acknowledge": { 21 | "filepath": "", 22 | "sourceType": "", 23 | "env": [], 24 | "stdout": "" 25 | }, 26 | "AddNote": { 27 | "filepath": "", 28 | "sourceType": "", 29 | "env": [], 30 | "stdout": "" 31 | }, 32 | "TakeOwnership": { 33 | "filepath": "", 34 | "sourceType": "", 35 | "env": [], 36 | "stdout": "" 37 | }, 38 | "AssignOwnership": { 39 | "filepath": "", 40 | "sourceType": "", 41 | "env": [], 42 | "stdout": "" 43 | }, 44 | "UnAcknowledge": { 45 | "filepath": "", 46 | "sourceType": "", 47 | "env": [], 48 | "stdout": "" 49 | } 50 | }, 51 | "pollerConf": { 52 | "pollingWaitIntervalInMillis": 100, 53 | "visibilityTimeoutInSec": 30, 54 | "maxNumberOfMessages": 10 55 | }, 56 | "poolConf": { 57 | "maxNumberOfWorker": 12, 58 | "minNumberOfWorker": 4, 59 | "monitoringPeriodInMillis": 15000, 60 | "keepAliveTimeInMillis": 6000, 61 | "queueSize": 0 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /Icinga2/conf/opsgenie-integration.conf: -------------------------------------------------------------------------------- 1 | ################################### ICINGA2OPSGENIE CONFIGURATION ############################### 2 | #you can define log levels: warning, debug, info,error 3 | icinga2opsgenie.logger=warning 4 | icinga2opsgenie.timeout=60 5 | logPath=/var/log/opsgenie/send2opsgenie.log 6 | #responders= 7 | #tags= 8 | 9 | ####################################### PROXY CONFIGURATION FOR ICINGA ############################################ 10 | icinga2opsgenie.http.proxy.enabled=false 11 | icinga2opsgenie.http.proxy.port=11111 12 | icinga2opsgenie.http.proxy.host=localhost 13 | icinga2opsgenie.http.proxy.protocol=http 14 | #icinga2opsgenie.http.proxy.username=admin 15 | #icinga2opsgenie.http.proxy.password=changeme 16 | 17 | -------------------------------------------------------------------------------- /Icinga2/native/ogAfter.sh: -------------------------------------------------------------------------------- 1 | 2 | chmod 755 /home/opsgenie/oec/opsgenie-icinga2/send2opsgenie 3 | 4 | if id -u icinga >/dev/null 2>&1; then 5 | usermod -a -G opsgenie icinga 6 | chown -R icinga:opsgenie /var/log/opsgenie 7 | elif id -u nagios >/dev/null 2>&1; then 8 | usermod -a -G opsgenie nagios 9 | chown -R nagios:opsgenie /var/log/opsgenie 10 | else 11 | echo "WARNING : Neither icinga nor nagios user exists. Please don't forget to add your icinga or nagios user to opsgenie group!" 12 | fi 13 | 14 | if [ -d "/etc/icinga2/conf.d" ]; then 15 | cp /etc/opsgenie/opsgenie.conf /etc/icinga2/conf.d/ 16 | else 17 | echo "WARNING : Could not find your icinga conf.d directory. Please copy /etc/opsgenie/opsgenie.conf file to your Icinga conf.d directory manually!" 18 | fi 19 | echo "WARNING : If you're updating the integration from version 1.​*.*​, please update your /etc/opsgenie/conf/opsgenie-integration.conf file accordingly: the old configuration will not work with this version of the integration." 20 | -------------------------------------------------------------------------------- /Icinga2/opsgenie-icinga2/opsgenie.conf: -------------------------------------------------------------------------------- 1 | object NotificationCommand "opsgenie-service-notification" { 2 | import "plugin-notification-command" 3 | 4 | vars.hgns = {{ host.groups.join(",") }} 5 | vars.sgns = {{ service.groups.join(",") }} 6 | command = [ "/home/opsgenie/oec/opsgenie-icinga2/send2opsgenie" ] 7 | arguments = { 8 | "-entityType" = "service" 9 | "-t" = "$notification.type$" 10 | "-ldt" = "$icinga.long_date_time$" 11 | "-hn" = "$host.name$" 12 | "-hdn" = "$host.display_name$" 13 | "-hal" = "$host.display_name$" 14 | "-haddr" = "$host.address$" 15 | "-hs" = "$host.state$" 16 | "-hsi" = "$host.state_id$" 17 | "-lhs" = "$host.last_state$" 18 | "-lhsi" = "$host.last_state_id$" 19 | "-hst" = "$host.state_type$" 20 | "-ha" = "$host.check_attempt$" 21 | "-mha" = "$host.max_check_attempts$" 22 | "-hl" = "$host.latency$" 23 | "-het" = "$host.execution_time$" 24 | "-hds" = "$host.duration_sec$" 25 | "-hdt" = "$host.downtime_depth$" 26 | "-hgn" = "$host.group$" 27 | "-hgns" = "$command.vars.hgns$" 28 | "-lhc" = "$host.last_check$" 29 | "-lhsc" = "$host.last_state_change$" 30 | "-ho" = "$host.output$" 31 | "-hpd" = "$host.perfdata$" 32 | "-s" = "$service.name$" 33 | "-sdn" = "$service.display_name$" 34 | "-ss" = "$service.state$" 35 | "-ssi" = "$service.state_id$" 36 | "-lss" = "$service.last_state$" 37 | "-lssi" = "$service.last_state_id$" 38 | "-sst" = "$service.state_type$" 39 | "-sa" = "$service.check_attempt$" 40 | "-sc" = "$service.check_command$" 41 | "-msa" = "$service.max_check_attempts$" 42 | "-sl" = "$service.latency$" 43 | "-set" = "$service.execution_time$" 44 | "-sds" = "$service.duration_sec$" 45 | "-sdt" = "$service.downtime_depth$" 46 | "-sgns" = "$command.vars.sgns$" 47 | "-lsch" = "$service.last_check$" 48 | "-lssc" = "$service.last_state_change$" 49 | "-so" = "$service.output$" 50 | "-spd" = "$service.perfdata$" 51 | } 52 | } 53 | 54 | object NotificationCommand "opsgenie-host-notification" { 55 | import "plugin-notification-command" 56 | 57 | vars.hgns = {{ host.groups.join(",") }} 58 | command = [ "/home/opsgenie/oec/opsgenie-icinga2/send2opsgenie" ] 59 | arguments = { 60 | "-entityType" = "host" 61 | "-t" = "$notification.type$" 62 | "-ldt" = "$icinga.long_date_time$" 63 | "-hn" = "$host.name$" 64 | "-hdn" = "$host.display_name$" 65 | "-hal" = "$host.display_name$" 66 | "-haddr" = "$host.address$" 67 | "-hs" = "$host.state$" 68 | "-hsi" = "$host.state_id$" 69 | "-lhs" = "$host.last_state$" 70 | "-lhsi" = "$host.last_state_id$" 71 | "-hst" = "$host.state_type$" 72 | "-ha" = "$host.check_attempt$" 73 | "-mha" = "$host.max_check_attempts$" 74 | "-hl" = "$host.latency$" 75 | "-het" = "$host.execution_time$" 76 | "-hds" = "$host.duration_sec$" 77 | "-hdt" = "$host.downtime_depth$" 78 | "-hgn" = "$host.group$" 79 | "-hgns" = "$command.vars.hgns$" 80 | "-lhc" = "$host.last_check$" 81 | "-lhsc" = "$host.last_state_change$" 82 | "-ho" = "$host.output$" 83 | "-hpd" = "$host.perfdata$" 84 | } 85 | } 86 | 87 | object User "opsgenie" { 88 | import "generic-user" 89 | display_name = "OpsGenie Contact" 90 | } 91 | 92 | apply Notification "notify-opsgenie-service" to Service { 93 | command = "opsgenie-service-notification" 94 | users = ["opsgenie"] 95 | assign where service 96 | } 97 | 98 | apply Notification "notify-opsgenie-host" to Host { 99 | command = "opsgenie-host-notification" 100 | users = ["opsgenie"] 101 | assign where host 102 | } 103 | -------------------------------------------------------------------------------- /Jira/JIRA_Lambda_Script/ogLambdaJira.js: -------------------------------------------------------------------------------- 1 | var http = require('http'); 2 | var https = require('https'); 3 | 4 | // Config start 5 | var ogApiKey = ''; 6 | 7 | var jiraUsername = ''; 8 | var jiraPassword = ''; 9 | 10 | // If you're using your own installation of JIRA, 11 | // values in the next section can be completely different. 12 | var jiraHost = '.atlassian.net'; 13 | var jiraBasePath = '/rest/api/latest'; 14 | var jiraProtocol = 'https'; 15 | var jiraPort = 443; 16 | 17 | var reqTimeout = 3000; 18 | 19 | // Set your alert tag to project key mappings here. 20 | // These values are used to determine a JIRA project 21 | // at which an issue will be created for the new alert. 22 | var alertTagToJiraProjectKey = []; 23 | // The following mappings (as well as the mandatory default key) are provided as examples. 24 | // var alertTagToJiraProjectKey = [ 25 | // {tag: 'paymentService', key: 'PAYM'}, 26 | // {tag: 'authenticationService', key: 'AUTH'} 27 | // ]; 28 | var jiraDefaultProjectKey = 'DEF'; 29 | // Config end 30 | 31 | // These values are not expected to be changed. 32 | 33 | 34 | //if you are using opsgenie from another domain e.g. eu, sandbox etc. 35 | //you should update the line below 36 | var ogHost = 'api.opsgenie.com'; 37 | 38 | var ogProtocol = 'https'; 39 | var ogPort = 443; 40 | 41 | // This value is not expected to be changed. 42 | var JIRA_ISSUE_KEY_PREFIX = 'jiraIssueKey:'; 43 | 44 | var jiraReqOpts = { 45 | host: jiraHost, 46 | port: jiraPort, 47 | path: undefined, // To be set. 48 | method: 'POST', 49 | headers: { 50 | 'Content-Type': 'application/json' 51 | }, 52 | agent: false, 53 | auth: jiraUsername + ':' + jiraPassword 54 | }; 55 | 56 | var ogReqOpts = { 57 | host: ogHost, 58 | port: ogPort, 59 | path: '/v2/alerts', 60 | method: 'POST', 61 | headers: { 62 | 'Content-Type': 'application/json', 63 | 'Authorization': 'GenieKey ' + ogApiKey 64 | }, 65 | agent: false 66 | }; 67 | 68 | var ogHttp = ogProtocol === 'https' ? https : http; 69 | var jiraHttp = jiraProtocol === 'https' ? https : http; 70 | 71 | var genericSuccessFunc = function (event, context) { 72 | console.log('Execution completed successfully.'); 73 | context.succeed(); 74 | }; 75 | 76 | function createJiraIssue(event, context) { 77 | var jiraProjectKey = determineJiraProjectKey(event); 78 | jiraReqOpts.path = jiraBasePath + '/issue'; 79 | var jiraReqBody = { 80 | 'fields': { 81 | 'project': { 82 | 'key': jiraProjectKey 83 | }, 84 | 'summary': event.alert.message, 85 | 'description': 'Issue created for OpsGenie Alert ' + event.alert.alertId + ' from Integration ' + event.integrationId, 86 | 'issuetype': { 87 | 'name': 'Bug' // Make sure your JIRA project configuration(s) supports this Issue Type. 88 | } 89 | } 90 | }; 91 | doApiCall(event, context, jiraHttp, jiraReqOpts, jiraReqBody, 'JIRA', 'creating issue', 201, addTagToOpsGenieAlert); 92 | } 93 | 94 | function addTagToOpsGenieAlert(event, context, jiraResBody) { 95 | var ogReqBody = { 96 | 'tags': [JIRA_ISSUE_KEY_PREFIX + jiraResBody.key] 97 | }; 98 | ogReqOpts.path = '/v2/alerts/' + event.alert.alertId + "/tags" 99 | doApiCall(event, context, ogHttp, ogReqOpts, ogReqBody, 'OpsGenie', 'adding issue key as tag to alert', 202, genericSuccessFunc); 100 | } 101 | 102 | function addCommentToJiraIssue(event, context) { 103 | var jiraReqBody = { 104 | 'body': event.alert.note 105 | }; 106 | doExistingJiraIssueApiCall(event, context, '/comment', jiraReqBody, 'adding comment to issue', 201); 107 | } 108 | 109 | function startJiraIssueProgress(event, context) { 110 | var jiraReqBody = { 111 | 'transition': { 112 | 'id': '4' 113 | } 114 | }; 115 | doExistingJiraIssueApiCall(event, context, '/transitions', jiraReqBody, 'starting issue progress', 204); 116 | } 117 | 118 | function closeJiraIssue(event, context) { 119 | var jiraReqBody = { 120 | 'transition': { 121 | 'id': '2' 122 | } 123 | }; 124 | doExistingJiraIssueApiCall(event, context, '/transitions', jiraReqBody, 'closing issue', 204); 125 | } 126 | 127 | function doExistingJiraIssueApiCall(event, context, jiraReqPathSuffix, jiraReqBody, happening, successCode) { 128 | var jiraIssueKey = extractJiraIssueKeyFromAlertTag(event); 129 | if (jiraIssueKey) { 130 | jiraReqOpts.path = jiraBasePath + '/issue/' + jiraIssueKey + jiraReqPathSuffix; 131 | doApiCall(event, context, jiraHttp, jiraReqOpts, jiraReqBody, 'JIRA', happening, successCode, genericSuccessFunc); 132 | } else { 133 | context.done(new Error('Cannot determine associated JIRA issue. Alert data lacks JIRA issue key tag.')); 134 | } 135 | } 136 | 137 | function doApiCall(event, context, httplib, reqOpts, reqBody, service, happening, successCode, onSuccess) { 138 | var req = httplib.request(reqOpts, function (res) { 139 | console.log(service + ' request in progress: ' + JSON.stringify(reqOpts)); 140 | console.log(service + ' request body sent: ' + JSON.stringify(reqBody)); 141 | console.log(service + ' response status: ' + res.statusCode); 142 | res.on('data', function (chunk) { 143 | console.log(service + ' response body: ' + chunk); 144 | if (res.statusCode === successCode) { 145 | onSuccess(event, context, JSON.parse(chunk)); 146 | } else { 147 | context.done(new Error(service + ' ' + happening + ' failed.')); 148 | } 149 | }); 150 | }); 151 | req.write(JSON.stringify(reqBody)); 152 | req.end(); 153 | 154 | req.on('error', function (err) { 155 | context.done(new Error(service + ' request error: ' + err.message)); 156 | }); 157 | req.setTimeout(reqTimeout, function () { 158 | context.done(new Error(service + ' request timeout after ' + reqTimeout + ' milliseconds.')); 159 | }); 160 | } 161 | 162 | function determineJiraProjectKey(event) { 163 | var jiraProjectKey = ''; 164 | var tags = event.alert.tags; 165 | for (i = 0; i < alertTagToJiraProjectKey.length; i++) { 166 | if (tags.indexOf(alertTagToJiraProjectKey[i].tag) > -1) { 167 | jiraProjectKey = alertTagToJiraProjectKey[i].key; 168 | break; 169 | } 170 | } 171 | return jiraProjectKey || jiraDefaultProjectKey; 172 | } 173 | 174 | function extractJiraIssueKeyFromAlertTag(event) { 175 | var tags = event.alert.tags; 176 | var jiraIssueKey = ''; 177 | for (i = 0; i < tags.length; i++) { 178 | if (tags[i].substring(0, JIRA_ISSUE_KEY_PREFIX.length) === JIRA_ISSUE_KEY_PREFIX) { 179 | jiraIssueKey = tags[i].substring(JIRA_ISSUE_KEY_PREFIX.length); 180 | break; 181 | } 182 | } 183 | return jiraIssueKey; 184 | } 185 | 186 | exports.handler = function (event, context) { 187 | console.log('Received event: ', event); 188 | if (event.action === 'Create') { 189 | createJiraIssue(event, context); 190 | } else if (event.action === 'AddNote') { 191 | addCommentToJiraIssue(event, context); 192 | } else if (event.action === 'Acknowledge') { 193 | startJiraIssueProgress(event, context); 194 | } else if (event.action === 'Close' || event.action === 'Delete') { 195 | closeJiraIssue(event, context); 196 | } else { 197 | context.done(new Error('Action type "' + event.action + '" not supported.')); 198 | } 199 | }; 200 | 201 | -------------------------------------------------------------------------------- /Jira/conf/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey": "", 3 | "baseUrl": "https://api.opsgenie.com", 4 | "logLevel": "DEBUG", 5 | "globalArgs": [], 6 | "globalFlags": { 7 | "url": "", 8 | "username": "", 9 | "password": "", 10 | "issueTypeName": "", 11 | "projectKey": "" 12 | }, 13 | "actionMappings": { 14 | "createIssue": { 15 | "filepath": "", 16 | "sourceType": "", 17 | "env": [], 18 | "stdout": "" 19 | }, 20 | "resolveIssue": { 21 | "filepath": "", 22 | "sourceType": "", 23 | "env": [], 24 | "stdout": "" 25 | }, 26 | "inProgressIssue": { 27 | "filepath": "", 28 | "sourceType": "", 29 | "env": [], 30 | "stdout": "" 31 | }, 32 | "closeIssue": { 33 | "filepath": "", 34 | "sourceType": "", 35 | "env": [], 36 | "stdout": "" 37 | }, 38 | "issueDone": { 39 | "filepath": "", 40 | "sourceType": "", 41 | "env": [], 42 | "stdout": "" 43 | }, 44 | "addCommentToIssue": { 45 | "filepath": "", 46 | "sourceType": "", 47 | "env": [], 48 | "stdout": "" 49 | } 50 | }, 51 | "pollerConf": { 52 | "pollingWaitIntervalInMillis": 100, 53 | "visibilityTimeoutInSec": 30, 54 | "maxNumberOfMessages": 10 55 | }, 56 | "poolConf": { 57 | "maxNumberOfWorker": 12, 58 | "minNumberOfWorker": 4, 59 | "monitoringPeriodInMillis": 15000, 60 | "keepAliveTimeInMillis": 6000, 61 | "queueSize": 0 62 | } 63 | } -------------------------------------------------------------------------------- /Jira/scripts/actionExecutor.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import logging 4 | import sys 5 | import re 6 | 7 | import requests 8 | from requests.auth import HTTPBasicAuth 9 | 10 | parser = argparse.ArgumentParser() 11 | parser.add_argument('-payload', '--queuePayload', help='Payload from queue', required=True) 12 | parser.add_argument('-apiKey', '--apiKey', help='The apiKey of the integration', required=True) 13 | parser.add_argument('-opsgenieUrl', '--opsgenieUrl', help='The url', required=True) 14 | parser.add_argument('-logLevel', '--logLevel', help='Level of log', required=True) 15 | parser.add_argument('-username', '--username', help='Username', required=False) 16 | parser.add_argument('-password', '--password', help='Password', required=False) 17 | parser.add_argument('-url', '--url', help='URL', required=False) 18 | parser.add_argument('-projectKey', '--projectKey', help='Project Key', required=False) 19 | parser.add_argument('-issueTypeName', '--issueTypeName', help='Issue Type', required=False) 20 | args = vars(parser.parse_args()) 21 | 22 | logging.basicConfig(stream=sys.stdout, level=args['logLevel']) 23 | 24 | 25 | def parse_field(key, mandatory): 26 | variable = queue_message.get(key) 27 | if not variable: 28 | variable = args.get(key) 29 | if mandatory and not variable: 30 | logging.error(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 31 | "' is missing. Check your configuration file.") 32 | raise ValueError(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 33 | "' is missing. Check your configuration file.") 34 | return variable 35 | 36 | 37 | def parse_timeout(): 38 | parsed_timeout = args.get('http.timeout') 39 | if not parsed_timeout: 40 | return 30000 41 | return int(parsed_timeout) 42 | 43 | 44 | def get_transition_id(request_headers, jira_url, transition_name, token): 45 | transition_id = str() 46 | response = requests.get(jira_url, None, headers=request_headers, auth=token, timeout=timeout) 47 | try: 48 | body = response.json() 49 | if body and response.status_code < 299: 50 | transition_list = body["transitions"] 51 | for transition in transition_list: 52 | to = transition['to'] 53 | if transition_name == to['name']: 54 | transition_id = transition['id'] 55 | logging.info(LOG_PREFIX + " Successfully executed at Jira") 56 | logging.debug(LOG_PREFIX + " Jira response: " + str(response.status_code) + " " + str(response.content)) 57 | else: 58 | logging.error( 59 | LOG_PREFIX + " Could not execute at Jira; response: " + str(response.content) + " status code: " + str( 60 | response.status_code)) 61 | if not transition_id: 62 | logging.debug(LOG_PREFIX + " Transition id is empty") 63 | return transition_id 64 | except ValueError: 65 | logging.error("The response body is not a valid json object!") 66 | 67 | 68 | def main(): 69 | global LOG_PREFIX 70 | global queue_message 71 | global timeout 72 | 73 | queue_message_string = args['queuePayload'] 74 | queue_message_string = queue_message_string.strip() 75 | queue_message = json.loads(queue_message_string) 76 | 77 | alert_id = queue_message["alertId"] 78 | mapped_action = queue_message["mappedActionV2"]["name"] 79 | 80 | LOG_PREFIX = "[" + mapped_action + "]" 81 | 82 | logging.info("Will execute " + mapped_action + " for alertId " + alert_id) 83 | 84 | timeout = parse_timeout() 85 | url = parse_field('url', True) 86 | username = parse_field('username', True) 87 | password = parse_field('password', True) 88 | project_key = parse_field('projectKey', False) 89 | issue_type_name = parse_field('issueTypeName', False) 90 | 91 | issue_key = queue_message.get("key") 92 | 93 | logging.debug("Url: " + str(url)) 94 | logging.debug("Username: " + str(username)) 95 | logging.debug("Project Key: " + str(project_key)) 96 | logging.debug("Issue Type: " + str(issue_type_name)) 97 | logging.debug("Issue Key: " + str(issue_key)) 98 | 99 | content_params = dict() 100 | 101 | token = HTTPBasicAuth(username, password) 102 | headers = { 103 | "Content-Type": "application/json", 104 | "Accept": "application/json", 105 | } 106 | 107 | result_url = url + "/rest/api/2/issue" 108 | 109 | if mapped_action == "addCommentToIssue": 110 | content_params = { 111 | "body": queue_message.get('body') 112 | } 113 | result_url += "/" + issue_key + "/comment" 114 | elif mapped_action == "createIssue": 115 | toLabel = "ogAlias:" + queue_message.get("alias") 116 | content_params = { 117 | "fields": { 118 | "project": { 119 | "key": project_key 120 | }, 121 | "issuetype": { 122 | "name": issue_type_name 123 | }, 124 | "summary": queue_message.get("summary"), 125 | "description": queue_message.get("description"), 126 | "labels": [ re.sub('\s', '', toLabel) ] 127 | } 128 | } 129 | elif mapped_action == "resolveIssue": 130 | result_url += "/" + issue_key + "/transitions" 131 | content_params = { 132 | "transition": { 133 | "id": get_transition_id(headers, result_url, "Resolved", token) 134 | }, 135 | "fields": { 136 | "resolution": { 137 | "name": "Done" 138 | } 139 | } 140 | } 141 | elif mapped_action == "closeIssue": 142 | result_url += "/" + issue_key + "/transitions" 143 | content_params = { 144 | "transition": { 145 | "id": get_transition_id(headers, result_url, "Closed", token) 146 | }, 147 | "fields": { 148 | "resolution": { 149 | "name": "Done" 150 | } 151 | } 152 | } 153 | elif mapped_action == "issueDone": 154 | result_url += "/" + issue_key + "/transitions" 155 | content_params = { 156 | "transition": { 157 | "id": get_transition_id(headers, result_url, "Done", token) 158 | } 159 | } 160 | elif mapped_action == "inProgressIssue": 161 | result_url += "/" + issue_key + "/transitions" 162 | content_params = { 163 | "transition": { 164 | "id": get_transition_id(headers, result_url, "In Progress", token) 165 | } 166 | } 167 | 168 | logging.debug(str(content_params)) 169 | response = requests.post(result_url, data=json.dumps(content_params), headers=headers, auth=token, 170 | timeout=timeout) 171 | if response.status_code < 299: 172 | logging.info("Successfully executed at Jira") 173 | if mapped_action == "createIssue": 174 | try: 175 | response_body = response.json() 176 | if response_body: 177 | issue_key_from_response = response_body['key'] 178 | if issue_key_from_response: 179 | alert_api_url = args['opsgenieUrl'] + "/v2/alerts/" + alert_id + "/details" 180 | content = { 181 | "details": 182 | { 183 | "issueKey": issue_key_from_response 184 | } 185 | } 186 | headers = { 187 | "Content-Type": "application/json", 188 | "Accept-Language": "application/json", 189 | "Authorization": "GenieKey " + args['apiKey'] 190 | } 191 | logging.debug(str(alert_api_url) + str(content) + str(headers)) 192 | alert_response = requests.post(alert_api_url, 193 | data=json.dumps(content), headers=headers, 194 | timeout=timeout) 195 | if alert_response.status_code < 299: 196 | logging.info(LOG_PREFIX + " Successfully sent to Opsgenie") 197 | logging.debug( 198 | LOG_PREFIX + " Jira response: " + str(alert_response.content) + " " + str( 199 | alert_response.status_code)) 200 | else: 201 | logging.warning( 202 | LOG_PREFIX + " Could not execute at Opsgenie; response: " + str( 203 | alert_response.content) + " status code: " + str(alert_response.status_code)) 204 | else: 205 | logging.warning( 206 | LOG_PREFIX + " Jira response is empty") 207 | except ValueError: 208 | logging.error(ValueError) 209 | else: 210 | logging.warning( 211 | LOG_PREFIX + " Could not execute at Jira; response: " + str(response.content) + " status code: " + str( 212 | response.status_code)) 213 | 214 | 215 | if __name__ == '__main__': 216 | main() 217 | -------------------------------------------------------------------------------- /JiraServiceDesk/conf/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey": "", 3 | "baseUrl": "https://api.opsgenie.com", 4 | "logLevel": "DEBUG", 5 | "globalArgs": [], 6 | "globalFlags": { 7 | "url": "", 8 | "username": "", 9 | "password": "", 10 | "issueTypeName": "", 11 | "key": "" 12 | }, 13 | "actionMappings": { 14 | "createIssue": { 15 | "filepath": "", 16 | "sourceType": "", 17 | "env": [], 18 | "stdout": "" 19 | }, 20 | "resolveIssue": { 21 | "filepath": "", 22 | "sourceType": "", 23 | "env": [], 24 | "stdout": "" 25 | }, 26 | "addComment": { 27 | "filepath": "", 28 | "sourceType": "", 29 | "env": [], 30 | "stdout": "" 31 | } 32 | }, 33 | "pollerConf": { 34 | "pollingWaitIntervalInMillis": 100, 35 | "visibilityTimeoutInSec": 30, 36 | "maxNumberOfMessages": 10 37 | }, 38 | "poolConf": { 39 | "maxNumberOfWorker": 12, 40 | "minNumberOfWorker": 4, 41 | "monitoringPeriodInMillis": 15000, 42 | "keepAliveTimeInMillis": 6000, 43 | "queueSize": 0 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /JiraServiceDesk/scripts/actionExecutor.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import logging 4 | import sys 5 | import re 6 | 7 | import requests 8 | from requests.auth import HTTPBasicAuth 9 | 10 | parser = argparse.ArgumentParser() 11 | parser.add_argument('-payload', '--queuePayload', help='Payload from queue', required=True) 12 | parser.add_argument('-apiKey', '--apiKey', help='The apiKey of the integration', required=True) 13 | parser.add_argument('-opsgenieUrl', '--opsgenieUrl', help='The url', required=True) 14 | parser.add_argument('-logLevel', '--logLevel', help='Level of log', required=True) 15 | parser.add_argument('-url', '--url', help='URL', required=False) 16 | parser.add_argument('-username', '--username', help='Username', required=False) 17 | parser.add_argument('-password', '--password', help='Password', required=False) 18 | parser.add_argument('-key', '--key', help='Project key', required=False) 19 | parser.add_argument('-issueTypeName', '--issueTypeName', help='Issue Type', required=False) 20 | args = vars(parser.parse_args()) 21 | 22 | logging.basicConfig(stream=sys.stdout, level=args['logLevel']) 23 | 24 | 25 | def parse_field(key, mandatory): 26 | variable = queue_message.get(key) 27 | if not variable: 28 | variable = args.get(key) 29 | if mandatory and not variable: 30 | logging.error(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 31 | "' is missing. Check your configuration file.") 32 | raise ValueError(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 33 | "' is missing. Check your configuration file.") 34 | return variable 35 | 36 | 37 | def parse_timeout(): 38 | parsed_timeout = args.get('http.timeout') 39 | if not parsed_timeout: 40 | return 30000 41 | return int(parsed_timeout) 42 | 43 | 44 | def get_transition_id(request_headers, jira_url, transition_name, token): 45 | transition_id = str() 46 | response = requests.get(jira_url, None, headers=request_headers, auth=token, timeout=timeout) 47 | body = response.json() 48 | if body != {} and response.status_code < 299: 49 | transition_list = body["transitions"] 50 | for transition in transition_list: 51 | to = transition['to'] 52 | if transition_name == to['name']: 53 | transition_id = transition['id'] 54 | logging.info(LOG_PREFIX + " Successfully executed at Jira Service Desk") 55 | logging.debug( 56 | LOG_PREFIX + " Jira Service Desk response: " + str(response.status_code) + " " + str(response.content)) 57 | else: 58 | logging.error( 59 | LOG_PREFIX + " Could not execute at Jira Service Desk; response: " + str( 60 | response.content) + " status code: " + str(response.status_code)) 61 | if transition_id: 62 | return transition_id 63 | else: 64 | logging.debug(LOG_PREFIX + " Transition id is empty") 65 | 66 | 67 | def main(): 68 | global LOG_PREFIX 69 | global queue_message 70 | global timeout 71 | 72 | queue_message_string = args['queuePayload'] 73 | queue_message = json.loads(queue_message_string) 74 | 75 | logging.debug(str(queue_message)) 76 | 77 | alert_id = queue_message["alert"]["alertId"] 78 | mapped_action = queue_message["mappedActionV2"]["name"] 79 | 80 | LOG_PREFIX = "[" + mapped_action + "]" 81 | logging.info("Will execute " + mapped_action + " for alertId " + alert_id) 82 | 83 | timeout = parse_timeout() 84 | url = parse_field('url', True) 85 | username = parse_field('username', True) 86 | password = parse_field('password', True) 87 | project_key = parse_field('key', False) 88 | issue_type_name = parse_field('issueTypeName', False) 89 | 90 | issue_key = queue_message.get("IssueKey") 91 | 92 | logging.debug("Url: " + str(url)) 93 | logging.debug("Username: " + str(username)) 94 | logging.debug("Project Key: " + str(project_key)) 95 | logging.debug("Issue Type: " + str(issue_type_name)) 96 | logging.debug("Issue Key: " + str(issue_key)) 97 | 98 | content_params = dict() 99 | 100 | token = HTTPBasicAuth(username, password) 101 | headers = { 102 | "Content-Type": "application/json", 103 | "Accept": "application/json" 104 | } 105 | 106 | result_url = url + "/rest/api/2/issue" 107 | 108 | if mapped_action == "addComment": 109 | content_params = { 110 | "body": queue_message.get('body') 111 | } 112 | result_url += "/" + str(issue_key) + "/comment" 113 | elif mapped_action == "createIssue": 114 | toLabel = queue_message.get("alias") 115 | content_params = { 116 | "fields": { 117 | "project": { 118 | "key": project_key 119 | }, 120 | "issuetype": { 121 | "name": issue_type_name 122 | }, 123 | "summary": queue_message.get("summary"), 124 | "description": queue_message.get("description"), 125 | "labels": [ re.sub('\s', '', toLabel) ] 126 | } 127 | } 128 | elif mapped_action == "resolveIssue": 129 | result_url += "/" + str(issue_key) + "/transitions" 130 | content_params = { 131 | "transition": { 132 | "id": get_transition_id(headers, result_url, "Resolved", token) 133 | }, 134 | "fields": { 135 | "resolution": { 136 | "name": "Done" 137 | } 138 | } 139 | } 140 | 141 | logging.debug(str(content_params)) 142 | response = requests.post(result_url, data=json.dumps(content_params), headers=headers, auth=token, timeout=timeout) 143 | if response.status_code < 299: 144 | logging.info("Successfully executed at Jira Service Desk") 145 | if mapped_action == "createIssue": 146 | if response.json(): 147 | issue_key_from_response = response.json()['key'] 148 | if issue_key_from_response: 149 | alert_api_url = args.get('opsgenieUrl') + "/v2/alerts/" + alert_id + "/details" 150 | content = { 151 | "details": 152 | { 153 | "IssueKey": issue_key_from_response 154 | } 155 | } 156 | headers = { 157 | "Content-Type": "application/json", 158 | "Accept-Language": "application/json", 159 | "Authorization": "GenieKey " + args.get('apiKey') 160 | } 161 | alert_response = requests.post(alert_api_url, 162 | data=json.dumps(content), headers=headers, timeout=timeout) 163 | if alert_response.status_code < 299: 164 | logging.info(LOG_PREFIX + " Successfully sent to Opsgenie") 165 | logging.debug( 166 | LOG_PREFIX + " Jira Service Desk response: " + str(alert_response.content) + " " + str( 167 | alert_response.status_code)) 168 | else: 169 | logging.warning( 170 | LOG_PREFIX + " Could not execute at Opsgenie; response: " + str( 171 | alert_response.content) + " status code: " + str(alert_response.status_code)) 172 | else: 173 | logging.warning( 174 | LOG_PREFIX + " Jira Service Desk response is empty") 175 | else: 176 | logging.warning( 177 | LOG_PREFIX + " Could not execute at Jira Service Desk; response: " + str( 178 | response.content) + " status code: " + str(response.status_code)) 179 | 180 | 181 | if __name__ == '__main__': 182 | main() 183 | -------------------------------------------------------------------------------- /LibreNMS/conf/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey": "", 3 | "baseUrl": "https://api.opsgenie.com", 4 | "logLevel": "DEBUG", 5 | "globalArgs": [], 6 | "globalFlags": { 7 | "url": "", 8 | "apiToken": "" 9 | }, 10 | "actionMappings": { 11 | "ackAlert": { 12 | "filepath": "", 13 | "sourceType": "", 14 | "env": [], 15 | "stdout": "" 16 | }, 17 | "unmuteAlert": { 18 | "filepath": "", 19 | "sourceType": "", 20 | "env": [], 21 | "stdout": "" 22 | } 23 | }, 24 | "pollerConf": { 25 | "pollingWaitIntervalInMillis": 100, 26 | "visibilityTimeoutInSec": 30, 27 | "maxNumberOfMessages": 10 28 | }, 29 | "poolConf": { 30 | "maxNumberOfWorker": 12, 31 | "minNumberOfWorker": 4, 32 | "monitoringPeriodInMillis": 15000, 33 | "keepAliveTimeInMillis": 6000, 34 | "queueSize": 0 35 | } 36 | } -------------------------------------------------------------------------------- /LibreNMS/scripts/actionExecutor.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import logging 4 | import sys 5 | 6 | import requests 7 | 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument('-payload', '--payload', help='Payload from queue', required=True) 10 | parser.add_argument('-apiKey', '--apiKey', help='The apiKey of the integration', required=True) 11 | parser.add_argument('-opsgenieUrl', '--opsgenieUrl', help='The url', required=True) 12 | parser.add_argument('-logLevel', '--logLevel', help='Level of log', required=True) 13 | parser.add_argument('-url', '--url', help='LibreNms Server Url', required=False) 14 | parser.add_argument('-apiToken', '--apiToken', help='Api Token', required=False) 15 | parser.add_argument('-timeout', '--timeout', help='Timeout', required=False) 16 | 17 | args = vars(parser.parse_args()) 18 | 19 | logging.basicConfig(stream=sys.stdout, level=args['logLevel']) 20 | 21 | queue_message_string = args['payload'] 22 | queue_message = json.loads(queue_message_string) 23 | 24 | alert_id = queue_message["alert"]["alertId"] 25 | mapped_action = queue_message["mappedActionV2"]["name"] 26 | 27 | LOG_PREFIX = "[" + mapped_action + "]:" 28 | logging.info(LOG_PREFIX + " Will execute " + mapped_action + " for alertId " + alert_id) 29 | 30 | 31 | def parse_field(key, mandatory): 32 | variable = queue_message.get(key) 33 | if not variable: 34 | variable = args.get(key) 35 | if mandatory and not variable: 36 | logging.error(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 37 | "' is missing. Check your configuration file.") 38 | raise ValueError(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 39 | "' is missing. Check your configuration file.") 40 | return variable 41 | 42 | 43 | url = parse_field('url', True) 44 | if url.endswith("/") and len(url) >= 2: 45 | url = url[0:len(url) - 2] 46 | 47 | api_token = parse_field('apiToken', True) 48 | rule = int(queue_message["rule"]) 49 | device_id = int(queue_message["deviceId"]) 50 | timestamp = queue_message["timestamp"] 51 | timeout = args.get('timeout') 52 | if timeout is None: 53 | timeout = 30000 54 | else: 55 | timeout = int(timeout) 56 | 57 | logging.debug("Url: " + str(url)) 58 | logging.debug("ApiToken " + str(api_token)) 59 | logging.debug("Rule from OpsGenie Alert Details: " + str(rule)) 60 | logging.debug("Device ID from OpsGenie Alert Details: " + str(device_id)) 61 | logging.debug("Timestamp from OpsGenie Alert Details: " + str(timestamp)) 62 | 63 | list_rules_endpoint = url + "/api/v0/rules" 64 | 65 | logging.debug("Sending GET request to " + str(list_rules_endpoint)) 66 | 67 | list_rules_response = requests.get(list_rules_endpoint, None, headers={"X-Auth-Token": api_token}, timeout=timeout) 68 | 69 | logging.debug("Response from " + str(list_rules_endpoint) + ": " + str(list_rules_response.text) + "Status Code: " 70 | + str(list_rules_response.status_code)) 71 | 72 | if list_rules_response.status_code < 400: 73 | rules = list_rules_response.json()["rules"] 74 | rule_id = None 75 | 76 | rule_list = [x["id"] for x in rules if x["id"] == rule] 77 | for x in rule_list: 78 | logging.debug(x) 79 | 80 | if len(rule_list) > 0: 81 | rule_id = rule_list[0] 82 | logging.debug("Rule Id from LibreNMS: " + str(rule_id)) 83 | 84 | list_alerts_endpoint = url + "/api/v0/alerts" 85 | list_alerts_response = None 86 | 87 | if mapped_action == "ackAlert": 88 | query_params = {"state": "1"} 89 | logging.debug("Sending GET request to " + str(list_alerts_endpoint) + "with parameters: " 90 | + json.dumps(query_params)) 91 | list_alerts_response = requests.get(list_alerts_endpoint, query_params, headers={"X-Auth-Token": api_token}, 92 | timeout=timeout) 93 | 94 | elif mapped_action == "unmuteAlert": 95 | query_params = {"state": "2"} 96 | logging.debug("Sending GET request to " + str(list_alerts_endpoint) + "with parameters: " 97 | + json.dumps(query_params)) 98 | list_alerts_response = requests.get(list_alerts_endpoint, query_params, headers={"X-Auth-Token": api_token}, 99 | timeout=timeout) 100 | 101 | logging.debug( 102 | "Response from " + str(list_alerts_endpoint) + ": " + str(list_alerts_response.content) + "Status Code: " 103 | + str(list_alerts_response.status_code)) 104 | 105 | if list_alerts_response.status_code < 400: 106 | alerts = list_alerts_response.json()['alerts'] 107 | alert_id = None 108 | 109 | if len(alerts) > 0: 110 | alert_list = [x['id'] for x in alerts if (x['rule_id'] == rule and x['device_id'] == device_id and 111 | x['timestamp'] == timestamp)] 112 | if len(alert_list) > 0: 113 | alert_id = alert_list[0] 114 | logging.debug("Alert ID: " + str(alert_id)) 115 | logging.debug( 116 | "Found alert that matches the timestamp from Opsgenie alert, using that alert's alert id.") 117 | else: 118 | alert_list = [x['id'] for x in alerts if (x['rule_id'] == rule and x['device_id'] == device_id)] 119 | logging.debug("Timestamp did not match the timestamp retrieved from Opsgenie alert," 120 | + " using that alert ID of the first alert matches the rule and the device id.") 121 | alert_id = alert_list[0] 122 | logging.debug("Alert ID: " + str(alert_id)) 123 | else: 124 | logging.error( 125 | LOG_PREFIX + " Could not obtain alerts list from the list alerts response from LibreNMS API or found no matching alerts.") 126 | 127 | logging.debug("Alert Id from LibreNMS: " + str(alert_id)) 128 | if alert_id is not None: 129 | if mapped_action == "ackAlert": 130 | url = url + "/api/v0/alerts/" + str(alert_id) 131 | elif mapped_action == "unmuteAlert": 132 | url = url + "/api/v0/alerts/unmute/" + str(alert_id) 133 | 134 | logging.debug("Sending PUT request to " + str(url)) 135 | 136 | response = requests.put(url, None, headers={"X-Auth-Token": api_token}, timeout=timeout) 137 | 138 | logging.debug("Response from " + url + ": " + str(response.content) + "Status Code: " 139 | + str(response.status_code)) 140 | 141 | if response.status_code < 400: 142 | logging.info(LOG_PREFIX + ' Succesfully executed at LibreNMS.') 143 | logging.debug(LOG_PREFIX + " LibreNMS response:" + str(response.content)) 144 | else: 145 | logging.error( 146 | LOG_PREFIX + " Could not execute at LibreNMS; response: " + str( 147 | response.status_code) + ' ' + str(response.text)) 148 | else: 149 | logging.error(LOG_PREFIX + " Alert Id from the LibreNMS API was null.") 150 | else: 151 | logging.error( 152 | LOG_PREFIX + " Could not get alert list from LibreNMS; response: " + str( 153 | list_alerts_response.status_code) + ' ' + str(list_alerts_response.text)) 154 | else: 155 | logging.error(LOG_PREFIX + " Rule Id from the LibreNMS API was null.") 156 | else: 157 | logging.error( 158 | LOG_PREFIX + " Could not get rules list from LibreNMS; response: " + str( 159 | list_rules_response.status_code) + ' ' + str(list_rules_response.text)) 160 | 161 | -------------------------------------------------------------------------------- /LibreNMS/transport.opsgenie.php: -------------------------------------------------------------------------------- 1 | /* Copyright (C) 2017 Celal Emre CICEK 2 | * This program is free software: you can redistribute it and/or modify 3 | * it under the terms of the GNU General Public License as published by 4 | * the Free Software Foundation, either version 3 of the License, or 5 | * (at your option) any later version. 6 | * 7 | * This program is distributed in the hope that it will be useful, 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the 10 | * GNU General Public License for more details. 11 | * 12 | * You should have received a copy of the GNU General Public License 13 | * along with this program. If not, see . */ 14 | 15 | /** 16 | * OpsGenie API Transport 17 | * @author Celal Emre CICEK 18 | * @copyright 2017 Celal Emre CICEK 19 | * @license GPL 20 | * @package LibreNMS 21 | * @subpackage Alerts 22 | */ 23 | 24 | $url = $opts['url']; 25 | 26 | $curl = curl_init(); 27 | 28 | set_curl_proxy($curl); 29 | curl_setopt($curl, CURLOPT_URL, $url ); 30 | curl_setopt($curl, CURLOPT_CUSTOMREQUEST, "POST"); 31 | curl_setopt($curl, CURLOPT_RETURNTRANSFER, 1); 32 | curl_setopt($curl, CURLOPT_HTTPHEADER, array('Content-Type: application/json')); 33 | curl_setopt($curl, CURLOPT_POSTFIELDS, json_encode($obj)); 34 | 35 | $ret = curl_exec($curl); 36 | $code = curl_getinfo($curl, CURLINFO_HTTP_CODE); 37 | 38 | var_dump("Response from OpsGenie: " . $ret); //FIXME: proper debugging 39 | 40 | if($code != 200) { 41 | var_dump("Error when sending post request to OpsGenie. Response code: " . $code); //FIXME: proper debugging 42 | return false; 43 | } 44 | 45 | return true; 46 | -------------------------------------------------------------------------------- /Marid/groovy-executer.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opsgenie/oec-scripts/3fa918dd1c3cfecaac475c75fd62f5dde9539436/Marid/groovy-executer.zip -------------------------------------------------------------------------------- /Nagios/conf/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey": "", 3 | "baseUrl": "https://api.opsgenie.com", 4 | "logLevel": "DEBUG", 5 | "globalArgs": [ 6 | "" 7 | ], 8 | "globalFlags": { 9 | "alert_histogram_image_url": "http://localhost/nagios/cgi-bin/histogram.cgi", 10 | "trends_image_url": "http://localhost/nagios/cgi-bin/trends.cgi", 11 | "command_url": "http://localhost/nagios/cgi-bin/cmd.cgi", 12 | "user": "nagiosadmin", 13 | "password": "admin" 14 | }, 15 | "actionMappings": { 16 | "Create": { 17 | "filepath": "", 18 | "sourceType": "local", 19 | "env": [ 20 | "e1=v1", 21 | "e2=v2" 22 | ], 23 | "stdout": "" 24 | }, 25 | "Acknowledge": { 26 | "filepath": "", 27 | "sourceType": "local", 28 | "env": [ 29 | "e1=v1", 30 | "e2=v2" 31 | ], 32 | "stdout": "" 33 | }, 34 | "AddNote": { 35 | "filepath": "", 36 | "sourceType": "local", 37 | "env": [ 38 | "e1=v1", 39 | "e2=v2" 40 | ], 41 | "stdout": "" 42 | }, 43 | "TakeOwnership": { 44 | "filepath": "", 45 | "sourceType": "local", 46 | "env": [ 47 | "e1=v1", 48 | "e2=v2" 49 | ], 50 | "stdout": "" 51 | }, 52 | "AssignOwnership": { 53 | "filepath": "", 54 | "sourceType": "local", 55 | "env": [ 56 | "e1=v1", 57 | "e2=v2" 58 | ], 59 | "stdout": "" 60 | }, 61 | "UnAcknowledge": { 62 | "filepath": "", 63 | "sourceType": "local", 64 | "env": [ 65 | "e1=v1", 66 | "e2=v2" 67 | ], 68 | "stdout": "" 69 | } 70 | }, 71 | "pollerConf": { 72 | "pollingWaitIntervalInMillis": 100, 73 | "visibilityTimeoutInSec": 30, 74 | "maxNumberOfMessages": 10 75 | }, 76 | "poolConf": { 77 | "maxNumberOfWorker": 12, 78 | "minNumberOfWorker": 4, 79 | "monitoringPeriodInMillis": 15000, 80 | "keepAliveTimeInMillis": 6000, 81 | "queueSize": 0 82 | } 83 | } -------------------------------------------------------------------------------- /Nagios/conf/opsgenie-integration.conf: -------------------------------------------------------------------------------- 1 | ################################### NAGIOS2OPSGENIE CONFIGURATION ############################### 2 | #you can define log levels: warning, debug, info,error 3 | nagios2opsgenie.logger = warning 4 | nagios2opsgenie.timeout = 60 5 | logPath = /var/log/opsgenie/send2opsgenie.log 6 | #responders= 7 | #tags= 8 | 9 | 10 | ####################################### PROXY CONFIGURATION FOR NAGIOS ############################################ 11 | nagios2opsgenie.http.proxy.enabled = false 12 | nagios2opsgenie.http.proxy.port = 11111 13 | nagios2opsgenie.http.proxy.host = localhost 14 | nagios2opsgenie.http.proxy.protocol = http 15 | #nagios2opsgenie.http.proxy.username=admin 16 | #nagios2opsgenie.http.proxy.password=changeme 17 | -------------------------------------------------------------------------------- /Nagios/native/ogAfter.sh: -------------------------------------------------------------------------------- 1 | 2 | chmod 755 /home/opsgenie/oec/opsgenie-nagios/send2opsgenie 3 | 4 | if id -u nagios >/dev/null 2>&1; then 5 | usermod -a -G opsgenie nagios 6 | else 7 | echo "WARNING : nagios user does not exist. Please don't forget to add your nagios user to opsgenie group!" 8 | fi 9 | 10 | if [ -d "/usr/local/nagios/etc/objects" ]; then 11 | cp /etc/opsgenie/opsgenie.cfg /usr/local/nagios/etc/objects/ 12 | else 13 | echo "WARNING : Could not find your NAGIOS_HOME directory. Please copy /etc/opsgenie/opsgenie.cfg file to your /etc/objects directory manually!" 14 | fi -------------------------------------------------------------------------------- /Nagios/opsgenie-nagios/opsgenie.cfg: -------------------------------------------------------------------------------- 1 | define contact { 2 | contact_name opsgenie 3 | alias OpsGenie Contact 4 | service_notification_period 24x7 5 | host_notification_period 24x7 6 | service_notification_options c,r 7 | host_notification_options d,r 8 | service_notification_commands notify-service-by-opsgenie 9 | host_notification_commands notify-host-by-opsgenie 10 | } 11 | 12 | define command { 13 | command_name notify-service-by-opsgenie 14 | command_line /home/opsgenie/oec/opsgenie-nagios/send2opsgenie -entityType=service -t="$NOTIFICATIONTYPE$" -ldt="$LONGDATETIME$" -hn="$HOSTNAME$" -hdn="$HOSTDISPLAYNAME$" -hal="$HOSTALIAS$" -haddr="$HOSTADDRESS$" -hs="$HOSTSTATE$" -hsi="$HOSTSTATEID$" -lhs="$LASTHOSTSTATE$" -lhsi="$LASTHOSTSTATEID$" -hst="$HOSTSTATETYPE$" -ha="$HOSTATTEMPT$" -mha="$MAXHOSTATTEMPTS$" -hei="$HOSTEVENTID$" -lhei="$LASTHOSTEVENTID$" -hpi="$HOSTPROBLEMID$" -lhpi="$LASTHOSTPROBLEMID$" -hl="$HOSTLATENCY$" -het="$HOSTEXECUTIONTIME$" -hd="$HOSTDURATION$" -hds="$HOSTDURATIONSEC$" -hdt="$HOSTDOWNTIME$" -hpc="$HOSTPERCENTCHANGE$" -hgn="$HOSTGROUPNAME$" -hgns="$HOSTGROUPNAMES$" -lhc="$LASTHOSTCHECK$" -lhsc="$LASTHOSTSTATECHANGE$" -lhu="$LASTHOSTUP$" -lhd="$LASTHOSTDOWN$" -lhur="$LASTHOSTUNREACHABLE$" -ho="$HOSTOUTPUT$" -lho="$LONGHOSTOUTPUT$" -hpd="$HOSTPERFDATA$" -s="$SERVICEDESC$" -sdn="$SERVICEDISPLAYNAME$" -ss="$SERVICESTATE$" -ssi="$SERVICESTATEID$" -lss="$LASTSERVICESTATE$" -lssi="$LASTSERVICESTATEID$" -sst="$SERVICESTATETYPE$" -sa="$SERVICEATTEMPT$" -msa="$MAXSERVICEATTEMPTS$" -siv="$SERVICEISVOLATILE$" -sei="$SERVICEEVENTID$" -lsei="$LASTSERVICEEVENTID$" -spi="$SERVICEPROBLEMID$" -lspi="$LASTSERVICEPROBLEMID$" -sl="$SERVICELATENCY$" -set="$SERVICEEXECUTIONTIME$" -sd="$SERVICEDURATION$" -sds="$SERVICEDURATIONSEC$" -sdt="$SERVICEDOWNTIME$" -spc="$SERVICEPERCENTCHANGE$" -sgn="$SERVICEGROUPNAME$" -sgns="$SERVICEGROUPNAMES$" -lsch="$LASTSERVICECHECK$" -lssc="$LASTSERVICESTATECHANGE$" -lsok="$LASTSERVICEOK$" -lsw="$LASTSERVICEWARNING$" -lsu="$LASTSERVICEUNKNOWN$" -lsc="$LASTSERVICECRITICAL$" -so="$SERVICEOUTPUT$" -lso="$LONGSERVICEOUTPUT$" -spd="$SERVICEPERFDATA$" -snu="$SERVICENOTESURL$" 15 | } 16 | 17 | define command { 18 | command_name notify-host-by-opsgenie 19 | command_line /home/opsgenie/oec/opsgenie-nagios/send2opsgenie -entityType=host -t="$NOTIFICATIONTYPE$" -ldt="$LONGDATETIME$" -hn="$HOSTNAME$" -hdn="$HOSTDISPLAYNAME$" -hal="$HOSTALIAS$" -haddr="$HOSTADDRESS$" -hs="$HOSTSTATE$" -hsi="$HOSTSTATEID$" -lhs="$LASTHOSTSTATE$" -lhsi="$LASTHOSTSTATEID$" -hst="$HOSTSTATETYPE$" -ha="$HOSTATTEMPT$" -mha="$MAXHOSTATTEMPTS$" -hei="$HOSTEVENTID$" -lhei="$LASTHOSTEVENTID$" -hpi="$HOSTPROBLEMID$" -lhpi="$LASTHOSTPROBLEMID$" -hl="$HOSTLATENCY$" -het="$HOSTEXECUTIONTIME$" -hd="$HOSTDURATION$" -hds="$HOSTDURATIONSEC$" -hdt="$HOSTDOWNTIME$" -hpc="$HOSTPERCENTCHANGE$" -hgn="$HOSTGROUPNAME$" -hgns="$HOSTGROUPNAMES$" -lhc="$LASTHOSTCHECK$" -lhsc="$LASTHOSTSTATECHANGE$" -lhu="$LASTHOSTUP$" -lhd="$LASTHOSTDOWN$" -lhur="$LASTHOSTUNREACHABLE$" -ho="$HOSTOUTPUT$" -lho="$LONGHOSTOUTPUT$" -hpd="$HOSTPERFDATA$" -hnu="$HOSTNOTESURL$" 20 | } 21 | -------------------------------------------------------------------------------- /NagiosXI/conf/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey": "", 3 | "baseUrl": "https://api.opsgenie.com", 4 | "logLevel": "DEBUG", 5 | "globalArgs": [], 6 | "globalFlags": { 7 | "alert_histogram_image_url": "http://localhost/nagios/cgi-bin/histogram.cgi", 8 | "trends_image_url": "http://localhost/nagios/cgi-bin/trends.cgi", 9 | "command_url": "http://localhost/nagios/cgi-bin/cmd.cgi", 10 | "user": "nagiosadmin", 11 | "password": "admin" 12 | }, 13 | "actionMappings": { 14 | "Create": { 15 | "filepath": "", 16 | "sourceType": "", 17 | "env": [], 18 | "stdout": "" 19 | }, 20 | "Acknowledge": { 21 | "filepath": "", 22 | "sourceType": "", 23 | "env": [], 24 | "stdout": "" 25 | }, 26 | "AddNote": { 27 | "filepath": "", 28 | "sourceType": "", 29 | "env": [], 30 | "stdout": "" 31 | }, 32 | "TakeOwnership": { 33 | "filepath": "", 34 | "sourceType": "", 35 | "env": [], 36 | "stdout": "" 37 | }, 38 | "AssignOwnership": { 39 | "filepath": "", 40 | "sourceType": "", 41 | "env": [], 42 | "stdout": "" 43 | }, 44 | "UnAcknowledge": { 45 | "filepath": "", 46 | "sourceType": "", 47 | "env": [], 48 | "stdout": "" 49 | } 50 | }, 51 | "pollerConf": { 52 | "pollingWaitIntervalInMillis": 100, 53 | "visibilityTimeoutInSec": 30, 54 | "maxNumberOfMessages": 10 55 | }, 56 | "poolConf": { 57 | "maxNumberOfWorker": 12, 58 | "minNumberOfWorker": 4, 59 | "monitoringPeriodInMillis": 15000, 60 | "keepAliveTimeInMillis": 6000, 61 | "queueSize": 0 62 | } 63 | } -------------------------------------------------------------------------------- /NagiosXI/conf/opsgenie-integration.conf: -------------------------------------------------------------------------------- 1 | ################################### NAGIOS2OPSGENIE CONFIGURATION ############################### 2 | #you can define log levels: warning, debug, info,error 3 | nagios2opsgenie.logger = warning 4 | nagios2opsgenie.timeout = 60 5 | logPath = /var/log/opsgenie/send2opsgenie.log 6 | #responders= 7 | #tags= 8 | 9 | 10 | ####################################### PROXY CONFIGURATION FOR NAGIOS ############################################ 11 | nagios2opsgenie.http.proxy.enabled = false 12 | nagios2opsgenie.http.proxy.port = 11111 13 | nagios2opsgenie.http.proxy.host = localhost 14 | nagios2opsgenie.http.proxy.protocol = http 15 | #nagios2opsgenie.http.proxy.username=admin 16 | #nagios2opsgenie.http.proxy.password=changeme 17 | -------------------------------------------------------------------------------- /NagiosXI/native/ogAfter.sh: -------------------------------------------------------------------------------- 1 | 2 | chmod 755 /home/opsgenie/oec/opsgenie-nagiosxi/send2opsgenie 3 | 4 | if id -u nagios >/dev/null 2>&1; then 5 | usermod -a -G opsgenie nagios 6 | else 7 | echo "WARNING : nagios user does not exist. Please don't forget to add your nagios user to opsgenie group!" 8 | fi -------------------------------------------------------------------------------- /OEC/conf/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey": "", 3 | "baseUrl": "https://api.opsgenie.com", 4 | "logLevel": "DEBUG", 5 | "globalArgs": [ 6 | "" 7 | ], 8 | "globalFlags": { 9 | }, 10 | "actionMappings": { 11 | "Create": { 12 | "filepath": "", 13 | "sourceType": "", 14 | "env": [ 15 | "e1=v1", 16 | "e2=v2" 17 | ], 18 | "stdout": "" 19 | } 20 | }, 21 | "pollerConf": { 22 | "pollingWaitIntervalInMillis": 100, 23 | "visibilityTimeoutInSec": 30, 24 | "maxNumberOfMessages": 10 25 | }, 26 | "poolConf": { 27 | "maxNumberOfWorker": 12, 28 | "minNumberOfWorker": 4, 29 | "monitoringPeriodInMillis": 15000, 30 | "keepAliveTimeInMillis": 6000, 31 | "queueSize": 0 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /OEC/scripts/actionExecutor.py: -------------------------------------------------------------------------------- 1 | print("This is an example script. You can write your custom scripts!") 2 | -------------------------------------------------------------------------------- /OEC/scripts/http.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | 4 | import requests 5 | 6 | parser = argparse.ArgumentParser() 7 | parser.add_argument('-payload', '--queuePayload', required=True) 8 | parser.add_argument('-logLevel', '--logLevel', required=True) 9 | parser.add_argument('-apiKey', '--apiKey', required=True) 10 | parser.add_argument('-opsgenieUrl', '--opsgenieUrl', required=True) 11 | parser.add_argument('-method', '--method', required=False) 12 | parser.add_argument('-url', '--url', required=False) 13 | parser.add_argument('-headers', '--headers', type=json.loads, required=False) 14 | parser.add_argument('-params', '--params', type=json.loads, required=False) 15 | parser.add_argument('-body', '--body', required=False) 16 | args = vars(parser.parse_args()) 17 | 18 | raw_message = args['queuePayload'] 19 | raw_message = raw_message.strip() 20 | message = json.loads(raw_message) 21 | 22 | 23 | def parse_field(key, mandatory=True): 24 | variable = args.get(key) 25 | if not variable: 26 | variable = message.get(key) 27 | if not variable and mandatory: 28 | raise ValueError("Skipping execution [" + key + "] field does not exist in payload and configs.") 29 | return variable 30 | 31 | 32 | def main(): 33 | method = parse_field("method") 34 | url = parse_field("url") 35 | headers = parse_field("headers", False) 36 | params = parse_field("params", False) 37 | body = parse_field("body", False) 38 | 39 | response = requests.request(method=method, url=url, headers=headers, 40 | params=params, data=body) 41 | 42 | result = { 43 | "headers": dict(response.headers), 44 | "body": response.text, 45 | "statusCode": response.status_code 46 | } 47 | 48 | print(json.dumps(result)) 49 | 50 | 51 | if __name__ == '__main__': 52 | main() 53 | -------------------------------------------------------------------------------- /OP5/conf/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey": "", 3 | "baseUrl": "https://api.opsgenie.com", 4 | "logLevel": "DEBUG", 5 | "globalArgs": [], 6 | "globalFlags": { 7 | "url": "", 8 | "username": "", 9 | "password": "" 10 | }, 11 | "actionMappings": { 12 | "acknowledge": { 13 | "filepath": "", 14 | "sourceType": "", 15 | "env": [], 16 | "stdout": "" 17 | } 18 | }, 19 | "pollerConf": { 20 | "pollingWaitIntervalInMillis": 100, 21 | "visibilityTimeoutInSec": 30, 22 | "maxNumberOfMessages": 10 23 | }, 24 | "poolConf": { 25 | "maxNumberOfWorker": 12, 26 | "minNumberOfWorker": 4, 27 | "monitoringPeriodInMillis": 15000, 28 | "keepAliveTimeInMillis": 6000, 29 | "queueSize": 0 30 | } 31 | } -------------------------------------------------------------------------------- /OP5/conf/opsgenie-integration.conf.part: -------------------------------------------------------------------------------- 1 | ################################### NAGIOS2OPSGENIE CONFIGURATION ############################### 2 | #you can define log levels: warning, debug, info,error 3 | nagios2opsgenie.logger=warning 4 | nagios2opsgenie.timeout= 60 5 | logPath=/var/log/opsgenie/send2opsgenie.log 6 | #responders= 7 | #tags= 8 | 9 | ####################################### PROXY CONFIGURATION FOR NAGIOS ############################################ 10 | nagios2opsgenie.http.proxy.enabled=false 11 | nagios2opsgenie.http.proxy.port=11111 12 | nagios2opsgenie.http.proxy.host=localhost 13 | nagios2opsgenie.http.proxy.protocol=http 14 | #nagios2opsgenie.http.proxy.username=admin 15 | #nagios2opsgenie.http.proxy.password=changeme 16 | 17 | -------------------------------------------------------------------------------- /OP5/native/ogAfter.sh: -------------------------------------------------------------------------------- 1 | 2 | chmod 755 /home/opsgenie/oec/opsgenie-op5/send2opsgenie 3 | 4 | 5 | if id -u nagios >/dev/null 2>&1; then 6 | usermod -a -G opsgenie nagios 7 | else 8 | echo "WARNING : nagios user does not exist. Please don't forget to add your nagios user to opsgenie group!" 9 | fi 10 | 11 | cp /etc/opsgenie/opsgenie.cfg /opt/monitor/etc/mconf 12 | cp /etc/opsgenie/opsgenie.cfg /opt/monitor/etc 13 | chown monitor:apache /etc/opsgenie/opsgenie.cfg 14 | chown monitor:apache /opt/monitor/etc/opsgenie.cfg 15 | chown monitor:apache /opt/monitor/etc/mconf/opsgenie.cfg 16 | chmod 664 /etc/opsgenie/opsgenie.cfg 17 | chmod 664 /opt/monitor/etc/opsgenie.cfg 18 | chmod 664 /opt/monitor/etc/mconf/opsgenie.cfg -------------------------------------------------------------------------------- /OP5/opsgenie-op5/opsgenie.cfg: -------------------------------------------------------------------------------- 1 | define contact { 2 | contact_name opsgenie 3 | alias OpsGenie Contact 4 | service_notification_period 24x7 5 | host_notification_period 24x7 6 | service_notification_options c,r 7 | host_notification_options d,r 8 | service_notification_commands notify-service-by-opsgenie 9 | host_notification_commands notify-host-by-opsgenie 10 | } 11 | 12 | define command { 13 | command_name notify-service-by-opsgenie 14 | command_line /home/opsgenie/oec/opsgenie-op5/send2opsgenie -entityType=service -t="$NOTIFICATIONTYPE$" -ldt="$LONGDATETIME$" -hn="$HOSTNAME$" -hdn="$HOSTDISPLAYNAME$" -hal="$HOSTALIAS$" -haddr="$HOSTADDRESS$" -hs="$HOSTSTATE$" -hsi="$HOSTSTATEID$" -lhs="$LASTHOSTSTATE$" -lhsi="$LASTHOSTSTATEID$" -hst="$HOSTSTATETYPE$" -ha="$HOSTATTEMPT$" -mha="$MAXHOSTATTEMPTS$" -hei="$HOSTEVENTID$" -lhei="$LASTHOSTEVENTID$" -hpi="$HOSTPROBLEMID$" -lhpi="$LASTHOSTPROBLEMID$" -hl="$HOSTLATENCY$" -het="$HOSTEXECUTIONTIME$" -hd="$HOSTDURATION$" -hds="$HOSTDURATIONSEC$" -hdt="$HOSTDOWNTIME$" -hpc="$HOSTPERCENTCHANGE$" -hgn="$HOSTGROUPNAME$" -hgns="$HOSTGROUPNAMES$" -lhc="$LASTHOSTCHECK$" -lhsc="$LASTHOSTSTATECHANGE$" -lhu="$LASTHOSTUP$" -lhd="$LASTHOSTDOWN$" -lhur="$LASTHOSTUNREACHABLE$" -ho="$HOSTOUTPUT$" -lho="$LONGHOSTOUTPUT$" -hpd="$HOSTPERFDATA$" -s="$SERVICEDESC$" -sdn="$SERVICEDISPLAYNAME$" -ss="$SERVICESTATE$" -ssi="$SERVICESTATEID$" -lss="$LASTSERVICESTATE$" -lssi="$LASTSERVICESTATEID$" -sst="$SERVICESTATETYPE$" -sa="$SERVICEATTEMPT$" -msa="$MAXSERVICEATTEMPTS$" -siv="$SERVICEISVOLATILE$" -sei="$SERVICEEVENTID$" -lsei="$LASTSERVICEEVENTID$" -spi="$SERVICEPROBLEMID$" -lspi="$LASTSERVICEPROBLEMID$" -sl="$SERVICELATENCY$" -set="$SERVICEEXECUTIONTIME$" -sd="$SERVICEDURATION$" -sds="$SERVICEDURATIONSEC$" -sdt="$SERVICEDOWNTIME$" -spc="$SERVICEPERCENTCHANGE$" -sgn="$SERVICEGROUPNAME$" -sgns="$SERVICEGROUPNAMES$" -lsch="$LASTSERVICECHECK$" -lssc="$LASTSERVICESTATECHANGE$" -lsok="$LASTSERVICEOK$" -lsw="$LASTSERVICEWARNING$" -lsu="$LASTSERVICEUNKNOWN$" -lsc="$LASTSERVICECRITICAL$" -so="$SERVICEOUTPUT$" -lso="$LONGSERVICEOUTPUT$" -spd="$SERVICEPERFDATA$" -snu="$SERVICENOTESURL$" 15 | } 16 | 17 | define command { 18 | command_name notify-host-by-opsgenie 19 | command_line /home/opsgenie/oec/opsgenie-op5/send2opsgenie -entityType=host -t="$NOTIFICATIONTYPE$" -ldt="$LONGDATETIME$" -hn="$HOSTNAME$" -hdn="$HOSTDISPLAYNAME$" -hal="$HOSTALIAS$" -haddr="$HOSTADDRESS$" -hs="$HOSTSTATE$" -hsi="$HOSTSTATEID$" -lhs="$LASTHOSTSTATE$" -lhsi="$LASTHOSTSTATEID$" -hst="$HOSTSTATETYPE$" -ha="$HOSTATTEMPT$" -mha="$MAXHOSTATTEMPTS$" -hei="$HOSTEVENTID$" -lhei="$LASTHOSTEVENTID$" -hpi="$HOSTPROBLEMID$" -lhpi="$LASTHOSTPROBLEMID$" -hl="$HOSTLATENCY$" -het="$HOSTEXECUTIONTIME$" -hd="$HOSTDURATION$" -hds="$HOSTDURATIONSEC$" -hdt="$HOSTDOWNTIME$" -hpc="$HOSTPERCENTCHANGE$" -hgn="$HOSTGROUPNAME$" -hgns="$HOSTGROUPNAMES$" -lhc="$LASTHOSTCHECK$" -lhsc="$LASTHOSTSTATECHANGE$" -lhu="$LASTHOSTUP$" -lhd="$LASTHOSTDOWN$" -lhur="$LASTHOSTUNREACHABLE$" -ho="$HOSTOUTPUT$" -lho="$LONGHOSTOUTPUT$" -hpd="$HOSTPERFDATA$" -hnu="$HOSTNOTESURL$" 20 | } 21 | -------------------------------------------------------------------------------- /OP5/scripts/actionExecutor.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import logging 4 | import sys 5 | 6 | import requests 7 | from requests.auth import HTTPBasicAuth 8 | 9 | parser = argparse.ArgumentParser() 10 | parser.add_argument('-payload', '--queuePayload', help='Payload from queue', required=True) 11 | parser.add_argument('-apiKey', '--apiKey', help='The apiKey of the integration', required=True) 12 | parser.add_argument('-opsgenieUrl', '--opsgenieUrl', help='The url', required=True) 13 | parser.add_argument('-logLevel', '--logLevel', help='Log Level', required=True) 14 | parser.add_argument('-username', '--username', help='Username', required=False) 15 | parser.add_argument('-password', '--password', help='Password', required=False) 16 | parser.add_argument('-url', '--url', help='Url', required=False) 17 | args = vars(parser.parse_args()) 18 | 19 | logging.basicConfig(stream=sys.stdout, level=args['logLevel']) 20 | 21 | 22 | def parse_field(key, mandatory): 23 | variable = queue_message.get(key) 24 | if not variable: 25 | variable = args.get(key) 26 | if mandatory and not variable: 27 | logging.error(LOG_PREFIX + " Skipping action, Mandatory conf item '" + str(key) + 28 | "' is missing. Check your configuration file.") 29 | raise ValueError(LOG_PREFIX + " Skipping action, Mandatory conf item '" + str(key) + 30 | "' is missing. Check your configuration file.") 31 | return variable 32 | 33 | 34 | def parse_timeout(): 35 | parsed_timeout = args.get('http.timeout') 36 | if not parsed_timeout: 37 | return 30000 38 | return int(parsed_timeout) 39 | 40 | 41 | def post_to_op5(post_params, typeOfNotification): 42 | url = parse_field("url", True) + "/api/command/" 43 | if typeOfNotification == "service": 44 | url += "ACKNOWLEDGE_SVC_PROBLEM" 45 | elif typeOfNotification == "host": 46 | url += "ACKNOWLEDGE_HOST_PROBLEM" 47 | 48 | username = parse_field("username", True) 49 | password = parse_field("password", True) 50 | logging.debug("Username: " + str(username)) 51 | 52 | token = HTTPBasicAuth(username, password) 53 | 54 | logging.debug(LOG_PREFIX + " Posting to OP5. Url: " + str(url) + " params: " + str(post_params)) 55 | response = requests.post(url, data=post_params, auth=token, timeout=parse_timeout()) 56 | if response and response.status_code == 200: 57 | logging.info(LOG_PREFIX + " Successfully executed at OP5.") 58 | logging.debug(LOG_PREFIX + " OP5 response: " + str(response.content)) 59 | else: 60 | logging.error( 61 | LOG_PREFIX + " Could not execute at OP5. StatusCode: " + str(response.status_code) + " Response: " + str( 62 | response.content)) 63 | 64 | 65 | def main(): 66 | global LOG_PREFIX 67 | global queue_message 68 | 69 | queue_message_string = args['queuePayload'] 70 | queue_message = json.loads(queue_message_string) 71 | 72 | alert_id = queue_message["alert"]["alertId"] 73 | mapped_action = queue_message["mappedActionV2"]["name"] 74 | 75 | LOG_PREFIX = "[" + mapped_action + "]" 76 | logging.info("Will execute " + str(mapped_action) + " for alertId " + str(alert_id)) 77 | 78 | post_params = { 79 | "host_name": queue_message.get("host_name"), 80 | "sticky": queue_message.get("sticky"), 81 | "notify": queue_message.get("notify"), 82 | "persistent": queue_message.get("persistent"), 83 | "comment": queue_message.get("comment") 84 | } 85 | 86 | service = queue_message.get("service_desc") 87 | if service: 88 | post_params.update({"service_description": service}) 89 | post_to_op5(post_params, "service") 90 | else: 91 | post_to_op5(post_params, "host") 92 | 93 | if __name__ == '__main__': 94 | main() 95 | -------------------------------------------------------------------------------- /PRTG/Postdata.txt: -------------------------------------------------------------------------------- 1 | device=%device&linkdevice=%linkdevice&sitename=%sitename&serviceurl=%serviceurl&settings=%settings&datetime=%datetime&history=%history&host=%host&down=%down&downtime=%downtime&lastdown=%lastdown&nodename=%nodename&location=%location&group=%group&linkgroup=%linkgroup&lastmessage=%lastmessage&lastup=%lastup&uptime=%uptime&state=%state&statesince=%statesince&sensor=%sensor&linksensor=%linksensor&probe=%probe&priority=%priority&commentssensor=%commentssensor&commentsdevice=%commentsdevice&commentsgroup=%commentsgroup&commentsprobe=%commentsprobe&colorofstate=%colorofstate&iconofstate=%iconofstate&id=%sensorid& -------------------------------------------------------------------------------- /PRTG/Postdatav2.txt: -------------------------------------------------------------------------------- 1 | device=%device&linkdevice=%linkdevice&sitename=%sitename&serviceurl=%serviceurl&settings=%settings&datetime=%datetime&history=%history&host=%host&down=%down&downtime=%downtime&lastdown=%lastdown&nodename=%nodename&location=%location&group=%group&linkgroup=%linkgroup&lastmessage=%lastmessage&lastup=%lastup&uptime=%uptime&status=%status&statesince=%statesince&sensor=%sensor&linksensor=%linksensor&probe=%probe&priority=%priority&commentssensor=%commentssensor&commentsdevice=%commentsdevice&commentsgroup=%commentsgroup&commentsprobe=%commentsprobe&colorofstate=%colorofstate&iconofstate=%iconofstate&id=%sensorid& -------------------------------------------------------------------------------- /PRTG/conf/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey": "", 3 | "baseUrl": "https://api.opsgenie.com", 4 | "logLevel": "DEBUG", 5 | "globalArgs": [], 6 | "globalFlags": { 7 | "prtgUrl": "", 8 | "username": "", 9 | "passhash": "" 10 | }, 11 | "actionMappings": { 12 | "acknowledgeSensor": { 13 | "filepath": "", 14 | "sourceType": "", 15 | "env": [], 16 | "stdout": "" 17 | } 18 | }, 19 | "pollerConf": { 20 | "pollingWaitIntervalInMillis": 100, 21 | "visibilityTimeoutInSec": 30, 22 | "maxNumberOfMessages": 10 23 | }, 24 | "poolConf": { 25 | "maxNumberOfWorker": 12, 26 | "minNumberOfWorker": 4, 27 | "monitoringPeriodInMillis": 15000, 28 | "keepAliveTimeInMillis": 6000, 29 | "queueSize": 0 30 | } 31 | } -------------------------------------------------------------------------------- /PRTG/scripts/actionExecutor.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import logging 4 | import sys 5 | 6 | import requests 7 | 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument('-payload', '--queuePayload', help='Payload from queue', required=True) 10 | parser.add_argument('-apiKey', '--apiKey', help='The apiKey of the integration', required=True) 11 | parser.add_argument('-opsgenieUrl', '--opsgenieUrl', help='The url', required=True) 12 | parser.add_argument('-logLevel', '--logLevel', help='Level of log', required=True) 13 | parser.add_argument('-username', '--username', help='Username', required=False) 14 | parser.add_argument('-passhash', '--passhash', help='Passhash', required=False) 15 | parser.add_argument('-prtgUrl', '--prtgUrl', help='PRTG Url', required=False) 16 | parser.add_argument('-sensorId', '--sensorId', help='Sensor Id', required=False) 17 | parser.add_argument('-acknowledgeMessage', '--acknowledgeMessage', help='Acknowledge Message', required=False) 18 | args = vars(parser.parse_args()) 19 | 20 | logging.basicConfig(stream=sys.stdout, level=args['logLevel']) 21 | 22 | 23 | def parse_field(key, mandatory): 24 | variable = queue_message.get(key) 25 | if not variable: 26 | variable = args.get(key) 27 | if mandatory and not variable: 28 | logging.error(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 29 | "' is missing. Check your configuration file.") 30 | raise ValueError(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 31 | "' is missing. Check your configuration file.") 32 | return variable 33 | 34 | 35 | def parse_timeout(): 36 | parsed_timeout = args.get('http.timeout') 37 | if not parsed_timeout: 38 | return 30000 39 | return int(parsed_timeout) 40 | 41 | 42 | def main(): 43 | global LOG_PREFIX 44 | global queue_message 45 | global timeout 46 | 47 | queue_message_string = args['queuePayload'] 48 | queue_message_string = queue_message_string.strip() 49 | queue_message = json.loads(queue_message_string) 50 | 51 | alert_id = queue_message["alertId"] 52 | mapped_action = queue_message["mappedActionV2"]["name"] 53 | 54 | LOG_PREFIX = "[" + mapped_action + "]" 55 | 56 | logging.info("Will execute " + mapped_action + " for alertId " + alert_id) 57 | 58 | timeout = parse_timeout() 59 | url = parse_field('prtgUrl', True) 60 | username = parse_field('username', True) 61 | passhash = parse_field('passhash', True) 62 | ackMessage = parse_field('acknowledgeMessage', True) 63 | id = parse_field('sensorId', True) 64 | 65 | prtgPath = "/api/acknowledgealarm.htm" 66 | if url.endswith("/"): 67 | prtgPath = "api/acknowledgealarm.htm" 68 | 69 | result_url = url + prtgPath 70 | 71 | params = { 72 | 'id': id, 73 | 'ackmsg': ackMessage, 74 | 'username': username, 75 | 'passhash': passhash 76 | } 77 | logging.debug("Sending request to PRTG.") 78 | response = requests.post(result_url, params=params, timeout=timeout) 79 | if response.status_code < 300: 80 | logging.info("Successfully executed at PRTG") 81 | 82 | else: 83 | logging.warning( 84 | LOG_PREFIX + " Could not execute at PRTG; response: " + str(response.content) + " status code: " + str( 85 | response.status_code)) 86 | 87 | 88 | if __name__ == '__main__': 89 | main() 90 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # oec-scripts 2 | [![Contact Support](https://img.shields.io/badge/-Contact%20Support-blue)](https://support.atlassian.com/contact/#/) 3 | [![Public Issue Tracker](https://img.shields.io/badge/-Public%20Issue%20Tracker-blue)](https://jira.atlassian.com/browse/OPSGENIE-803?jql=project%3DOPSGENIE%20AND%20component%20in%20(%22OEC%20-%20Configuration%22%2C%20%22OEC%20-%20Installation%22)%20and%20resolution%20is%20EMPTY) 4 | ## Opsgenie Edge Connector (OEC) custom scripts 5 | 6 | > For more information about OEC: https://github.com/opsgenie/oec 7 | > 8 | > Please note that, new deb package releases will have opsgenie prefix in package names. This might cause a problem when you try to upgrade your existing package. To solve that problem, you need to remove the old package and install the new one. 9 | 10 | | Latest releases | 11 | | :-------------: | 12 | | [BMCFootprintsV11](https://github.com/opsgenie/oec-scripts/releases/tag/BMCFootprintsV11-1.1.3_oec-1.1.3) | 13 | | [BMCFootprintsV12](https://github.com/opsgenie/oec-scripts/releases/tag/BMCFootprintsV12-1.1.3_oec-1.1.3) | 14 | | [BMCRemedy](https://github.com/opsgenie/oec-scripts/releases/tag/BMCRemedy-1.1.3_oec-1.1.3) | 15 | | [Cherwell](https://github.com/opsgenie/oec-scripts/releases/tag/Cherwell-1.1.4_oec-1.1.3) | 16 | | [DynatraceAppMon](https://github.com/opsgenie/oec-scripts/releases/tag/DynatraceAppMon-1.1.3_oec-1.1.3) | 17 | | [Icinga](https://github.com/opsgenie/oec-scripts/releases/tag/Icinga-1.1.5_oec-1.1.3) | 18 | | [Icinga2](https://github.com/opsgenie/oec-scripts/releases/tag/Icinga2-1.1.6_oec-1.1.3) | 19 | | [Jira](https://github.com/opsgenie/oec-scripts/releases/tag/Jira-1.1.5_oec-1.1.4) | 20 | | [JiraServiceDesk](https://github.com/opsgenie/oec-scripts/releases/tag/JiraServiceDesk-1.1.6_oec-1.1.4) | 21 | | [LibreNMS](https://github.com/opsgenie/oec-scripts/releases/tag/LibreNMS-1.1.3_oec-1.1.3) | 22 | | [Nagios](https://github.com/opsgenie/oec-scripts/releases/tag/Nagios-1.1.4_oec-1.1.3) | 23 | | [NagiosXI](https://github.com/opsgenie/oec-scripts/releases/tag/NagiosXI-1.1.5_oec-1.1.3) | 24 | | [OEC](https://github.com/opsgenie/oec-scripts/releases/tag/OEC-1.1.3_oec-1.1.3) | 25 | | [OP5](https://github.com/opsgenie/oec-scripts/releases/tag/OP5-1.1.3_oec-1.1.3) | 26 | | [PRTG](https://github.com/opsgenie/oec-scripts/releases/tag/PRTG-1.1.3_oec-1.1.3) | 27 | | [Solarwinds](https://github.com/opsgenie/oec-scripts/releases/tag/Solarwinds-1.1.3_oec-1.1.3) | 28 | | [SolarwindsMSPNCentral](https://github.com/opsgenie/oec-scripts/releases/tag/SolarwindsMSPNCentral-1.1.3_oec-1.1.3) | 29 | | [SolarwindsWebHelpdesk](https://github.com/opsgenie/oec-scripts/releases/tag/SolarwindsWebHelpdesk-1.1.3_oec-1.1.3) | 30 | | [Splunk](https://github.com/opsgenie/oec-scripts/releases/tag/Splunk-1.1.4_oec-1.1.3) | 31 | | [Trackit](https://github.com/opsgenie/oec-scripts/releases/tag/Trackit-1.1.3_oec-1.1.3) | 32 | | [Xmpp](https://github.com/opsgenie/oec-scripts/releases/tag/Xmpp-1.1.3_oec-1.1.3) | 33 | | [Zabbix](https://github.com/opsgenie/oec-scripts/releases/tag/Zabbix-1.1.7_oec-1.1.3) | 34 | | [Zendesk](https://github.com/opsgenie/oec-scripts/releases/tag/Zendesk-1.1.3_oec-1.1.3) | 35 | | [Zenoss](https://github.com/opsgenie/oec-scripts/releases/tag/Zenoss-1.1.3_oec-1.1.3) | 36 | 37 | ## Contact Support 38 | You can find open bugs and suggestions for OEC on our [public issue tracker](https://jira.atlassian.com/browse/OPSGENIE-803?jql=project%3DOPSGENIE%20AND%20component%20in%20(%22OEC%20-%20Configuration%22%2C%20%22OEC%20-%20Installation%22)%20and%20resolution%20is%20EMPTY). If you are experiencing an issue with OEC, or if you want to raise a new bug or suggestion you can reach out [Opsgenie support](https://support.atlassian.com/contact/#/). 39 | 40 | -------------------------------------------------------------------------------- /Solarwinds/conf/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey": "", 3 | "baseUrl": "https://api.opsgenie.com", 4 | "logLevel": "DEBUG", 5 | "globalArgs": [], 6 | "globalFlags": { 7 | "url": "https://:17774", 8 | "login": "", 9 | "password": "" 10 | }, 11 | "actionMappings": { 12 | "Acknowledge": { 13 | "filepath": "", 14 | "sourceType": "", 15 | "env": [], 16 | "stdout": "" 17 | }, 18 | "AddNote": { 19 | "filepath": "", 20 | "sourceType": "", 21 | "env": [], 22 | "stdout": "" 23 | }, 24 | "Close": { 25 | "filepath": "", 26 | "sourceType": "", 27 | "env": [], 28 | "stdout": "" 29 | } 30 | }, 31 | "pollerConf": { 32 | "pollingWaitIntervalInMillis": 100, 33 | "visibilityTimeoutInSec": 30, 34 | "maxNumberOfMessages": 10 35 | }, 36 | "poolConf": { 37 | "maxNumberOfWorker": 12, 38 | "minNumberOfWorker": 4, 39 | "monitoringPeriodInMillis": 15000, 40 | "keepAliveTimeInMillis": 6000, 41 | "queueSize": 0 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /Solarwinds/resetActionBody.txt: -------------------------------------------------------------------------------- 1 | ## Reset action should trigger the Close Action of OpsGenie. By default, Solarwinds Orion NPM integration alerts uses alias field as Alert Alias. 2 | That's why sending only ActionType (Close action's default condition) and alias (default alias of alerts) will be sufficient. 3 | 4 | ## OpsGenie expects the following fields like below, use them without changing names. 5 | alias=${N=Alerting;M=AlertObjectID}-${N=Alerting;M=AlertID}&ActionType=Close 6 | 7 | ## Troubleshooting content, use µ#µ instead of = and §#§ instead of & 8 | aliasµ#µ${N=Alerting;M=AlertObjectID}-${N=Alerting;M=AlertID}§#§ActionTypeµ#µClose -------------------------------------------------------------------------------- /Solarwinds/scripts/actionExecutor.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import logging 4 | import sys 5 | import time 6 | 7 | import requests 8 | from requests.auth import HTTPBasicAuth 9 | 10 | parser = argparse.ArgumentParser() 11 | parser.add_argument('-payload', '--payload', help='Payload from queue', required=True) 12 | parser.add_argument('-apiKey', '--apiKey', help='The apiKey of the integration', required=True) 13 | parser.add_argument('-opsgenieUrl', '--opsgenieUrl', help='The url', required=True) 14 | parser.add_argument('-logLevel', '--logLevel', help='Level of log', required=True) 15 | parser.add_argument('-url', '--url', help='Your solarwinds server IP or FQDN', required=False) 16 | parser.add_argument('-login', '--login', help='Name of Solarwinds user that can acknowledge alerts', required=False) 17 | parser.add_argument('-password', '--password', help='Password for Solarwinds user that can acknowledge alerts', 18 | required=False) 19 | parser.add_argument('-timeout', '--timeout', help='Timeout', required=False) 20 | 21 | args = vars(parser.parse_args()) 22 | 23 | logging.basicConfig(stream=sys.stdout, level=args['logLevel']) 24 | 25 | queue_message_string = args['payload'] 26 | queue_message = json.loads(queue_message_string) 27 | logging.debug(queue_message) 28 | 29 | 30 | def parse_field(key, mandatory): 31 | variable = queue_message.get(key) 32 | if not variable: 33 | variable = args.get(key) 34 | if mandatory and not variable: 35 | logging.error(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 36 | "' is missing. Check your configuration file.") 37 | raise ValueError(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 38 | "' is missing. Check your configuration file.") 39 | return variable 40 | 41 | 42 | def acknowledge_solarwinds_alert(url, auth_token, object_id, comment): 43 | endpoint = url + "/SolarWinds/InformationService/v3/Json/Invoke/Orion.AlertActive/Acknowledge" 44 | 45 | content_array = [[object_id], comment] 46 | content = json.dumps(content_array) 47 | 48 | logging.warning("Acknowledgement details: " + content) 49 | 50 | response = requests.post(endpoint, data=content, headers={"Content-Type": "application/json"}, auth=auth_token, 51 | timeout=timeout) 52 | 53 | if response.status_code < 299: 54 | logging.info(LOG_PREFIX + " Successfully executed at Solarwinds.") 55 | logging.debug(LOG_PREFIX + " Solarwinds response: " + str(response.status_code) + " " + str(response.content)) 56 | else: 57 | logging.warning(LOG_PREFIX + " Could not execute at Solarwinds; response: " + str(response.content) 58 | + " status code: " + str(response.status_code)) 59 | 60 | 61 | def close_solarwinds_alert(url, auth_token, object_id, comment): 62 | endpoint = url + "/SolarWinds/InformationService/v3/Json/Invoke/Orion.AlertActive/ClearAlert" 63 | 64 | content_array = [[object_id]] 65 | content = json.dumps(content_array) 66 | 67 | logging.warning("Close details: " + content) 68 | 69 | response = requests.post(endpoint, data=content, headers={"Content-Type": "application/json"}, auth=auth_token, 70 | timeout=timeout) 71 | 72 | if response.status_code < 299: 73 | logging.info(LOG_PREFIX + " Successfully executed at Solarwinds.") 74 | logging.debug(LOG_PREFIX + " Solarwinds response: " + str(response.status_code) + " " + str(response.content)) 75 | else: 76 | logging.warning(LOG_PREFIX + " Could not execute at Solarwinds; response: " + str(response.content) 77 | + " status code: " + str(response.status_code)) 78 | 79 | add_note_solarwinds_alert(url, auth_token, object_id, comment) 80 | 81 | 82 | def add_note_solarwinds_alert(url, auth_token, object_id, comment): 83 | endpoint = url + "/SolarWinds/InformationService/v3/Json/Invoke/Orion.AlertActive/AppendNote" 84 | 85 | content_array = [[object_id], comment] 86 | content = json.dumps(content_array) 87 | 88 | logging.warning("Close details: " + content) 89 | 90 | response = requests.post(endpoint, data=content, headers={"Content-Type": "application/json"}, auth=auth_token, 91 | timeout=timeout) 92 | 93 | if response.status_code < 299: 94 | logging.info(LOG_PREFIX + " Successfully executed at Solarwinds.") 95 | logging.debug(LOG_PREFIX + " Solarwinds response: " + str(response.status_code) + " " + str(response.content)) 96 | else: 97 | logging.warning(LOG_PREFIX + " Could not execute at Solarwinds; response: " + str(response.content) 98 | + " status code: " + str(response.status_code)) 99 | 100 | 101 | def main(): 102 | global LOG_PREFIX 103 | global timeout 104 | 105 | action = queue_message["action"] 106 | alert = queue_message["alert"] 107 | source = queue_message["source"] 108 | 109 | logging.debug("Action: " + str(action)) 110 | 111 | LOG_PREFIX = "[" + action + "]:" 112 | 113 | username = parse_field('login', True) 114 | password = parse_field('password', True) 115 | url = parse_field('url', True) 116 | timeout = args['timeout'] 117 | 118 | if not timeout: 119 | timeout = 30000 120 | else: 121 | timeout = int(timeout) 122 | 123 | logging.debug("Username: " + username) 124 | logging.debug("Password: " + password) 125 | 126 | auth_token = HTTPBasicAuth(username, password) 127 | 128 | get_alert_url = args['opsgenieUrl'] + "/v2/alerts/" + alert["alertId"] 129 | 130 | headers = { 131 | "Content-Type": "application/json", 132 | "Accept-Language": "application/json", 133 | "Authorization": "GenieKey " + args['apiKey'] 134 | } 135 | 136 | response = requests.get(get_alert_url, None, headers=headers, timeout=timeout) 137 | content = response.json() 138 | 139 | if "data" in content.keys(): 140 | alert_from_opsgenie = content["data"] 141 | if source["type"].lower() != "solarwinds": 142 | definition_id = alert_from_opsgenie["details"]["AlertDefinitionID"] 143 | logging.debug("alertDefinitionID: " + str(definition_id)) 144 | object_type = alert_from_opsgenie["details"]["ObjectType"] 145 | logging.debug("objectType: " + str(object_type)) 146 | object_id = alert_from_opsgenie["details"]["ObjectID"] 147 | logging.debug("objectID: " + str(object_id)) 148 | 149 | str_updated = time.strftime("%m/%d/%Y, %H:%M:%S") 150 | alert_username = str(alert.get("username")) 151 | alert_note = str(alert.get("note")) 152 | alert_message = str(alert.get("message")) 153 | if action == "Acknowledge": 154 | message = alert_username + " acknowledged alert: \"" + alert_note + "\" on alert: \"" + \ 155 | alert_message + "\"" 156 | comment = str_updated + " Acknowledged in Opsgenie by " + alert_username 157 | acknowledge_solarwinds_alert(url, auth_token, object_id, comment) 158 | 159 | elif action == "AddNote": 160 | message = alert_username + " added note to alert: \"" + alert_note + "\" on alert: \"" + \ 161 | alert[ 162 | "message"] + "\"" 163 | comment = str_updated + " Updated by " + alert_username + " from OpsGenie: " + alert_note 164 | add_note_solarwinds_alert(url, auth_token, object_id, comment) 165 | 166 | elif action == "Close": 167 | message = alert_username + " closed alert: \"" + alert_note + "\" on alert: \"" + alert_message + "\"" 168 | comment = str_updated + " Updated by " + alert_username + " from OpsGenie: " + alert_note 169 | close_solarwinds_alert(url, auth_token, object_id, comment) 170 | else: 171 | message = alert_username + " executed [" + action + "] action on alert: \"" + alert_message + "\"" 172 | 173 | logging.info(LOG_PREFIX + " " + message) 174 | 175 | else: 176 | logging.warning(LOG_PREFIX + " Action source is Solarwinds; discarding action in order to prevent looping.") 177 | 178 | else: 179 | logging.warning( 180 | LOG_PREFIX + " Alert with id " + alert["alertId"] + " does not exist in Opsgenie. It is probably deleted.") 181 | 182 | 183 | if __name__ == '__main__': 184 | main() 185 | -------------------------------------------------------------------------------- /Solarwinds/triggerActionBody.txt: -------------------------------------------------------------------------------- 1 | ## OpsGenie expects the following fields like below, use them without changing names (in both contents) 2 | ## you can send alias, responders and tags to be used in OpsGenie alerts. Notice that these fields start with lowercase in example content do not change it.d 3 | ActionType=Create& 4 | alias=${N=Alerting;M=AlertObjectID}-${N=Alerting;M=AlertID}& 5 | ObjectID=${N=Alerting;M=AlertObjectID}& 6 | NodeName=${NodeName}& 7 | AlertID=${N=Alerting;M=AlertID}& 8 | AlertDefID=${N=Alerting;M=AlertDefID}& 9 | AlertName=${N=Alerting;M=AlertName}& 10 | AlertMessage=${N=Alerting;M=AlertMessage}& 11 | AlertDescription=${N=Alerting;M=AlertDescription}& 12 | AlertDetailsUrl=${N=Alerting;M=AlertDetailsUrl}& 13 | DownTime=${N=Alerting;M=DownTime}& 14 | AcknowledgeUrl=${N=Alerting;M=AcknowledgeUrl}& 15 | Acknowledged=${N=Alerting;M=Acknowledged}& 16 | AcknowledgedBy=${N=Alerting;M=AcknowledgedBy}& 17 | AcknowledgedTime=${N=Alerting;M=AcknowledgedTime;F=DateTime}& 18 | AlertTriggerCount=${N=Alerting;M=AlertTriggerCount}& 19 | AlertTriggerTime=${N=Alerting;M=AlertTriggerTime;F=DateTime}& 20 | LastEdit=${N=Alerting;M=LastEdit;F=DateTime}& 21 | ObjectType=${N=Alerting;M=ObjectType}& 22 | Severity=${N=Alerting;M=Severity}& 23 | TimeOfDay=${N=Alerting;M=TimeOfDay}& 24 | DateTime=${N=Generic;M=DateTime;F=DateTime}& 25 | responders=& 26 | tags= 27 | 28 | ## Troubleshooting content, use µ#µ instead of = and §#§ instead of & 29 | ## you can send alias, responders and tags to be used in OpsGenie alerts. Notice that these fields start with lowercase in example content do not change it. 30 | 31 | ActionTypeµ#µCreate§#§ 32 | aliasµ#µ${N=Alerting;M=AlertObjectID}-${N=Alerting;M=AlertID}§#§ 33 | ObjectIDµ#µ${N=Alerting;M=AlertObjectID}§#§ 34 | NodeNameµ#µ${NodeName}§#§ 35 | AlertIDµ#µ${N=Alerting;M=AlertID}§#§ 36 | AlertDefIDµ#µ${N=Alerting;M=AlertDefID}§#§ 37 | AlertNameµ#µ${N=Alerting;M=AlertName}§#§ 38 | AlertMessageµ#µ${N=Alerting;M=AlertMessage}§#§ 39 | AlertDescriptionµ#µ${N=Alerting;M=AlertDescription}§#§ 40 | AlertDetailsUrlµ#µ${N=Alerting;M=AlertDetailsUrl}§#§ 41 | DownTimeµ#µ${N=Alerting;M=DownTime}§#§ 42 | AcknowledgeUrlµ#µ${N=Alerting;M=AcknowledgeUrl}§#§ 43 | Acknowledgedµ#µ${N=Alerting;M=Acknowledged}§#§ 44 | AcknowledgedByµ#µ${N=Alerting;M=AcknowledgedBy}§#§ 45 | AcknowledgedTimeµ#µ${N=Alerting;M=AcknowledgedTime;F=DateTime}§#§ 46 | AlertTriggerCountµ#µ${N=Alerting;M=AlertTriggerCount}§#§ 47 | AlertTriggerTimeµ#µ${N=Alerting;M=AlertTriggerTime;F=DateTime}§#§ 48 | LastEditµ#µ${N=Alerting;M=LastEdit;F=DateTime}§#§ 49 | ObjectTypeµ#µ${N=Alerting;M=ObjectType}§#§ 50 | Severityµ#µ${N=Alerting;M=Severity}§#§ 51 | TimeOfDayµ#µ${N=Alerting;M=TimeOfDay}§#§ 52 | DateTimeµ#µ${N=Generic;M=DateTime;F=DateTime}§#§ 53 | respondersµ#µ§#§ 54 | tagsµ#µ 55 | -------------------------------------------------------------------------------- /SolarwindsMSPNCentral/conf/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey": "", 3 | "baseUrl": "https://api.opsgenie.com", 4 | "logLevel": "DEBUG", 5 | "globalArgs": [], 6 | "globalFlags": { 7 | "url": "", 8 | "username": "", 9 | "password": "" 10 | }, 11 | "actionMappings": { 12 | "acknowledgeNotification": { 13 | "filepath": "", 14 | "sourceType": "", 15 | "env": [], 16 | "stdout": "" 17 | } 18 | }, 19 | "pollerConf": { 20 | "pollingWaitIntervalInMillis": 100, 21 | "visibilityTimeoutInSec": 30, 22 | "maxNumberOfMessages": 10 23 | }, 24 | "poolConf": { 25 | "maxNumberOfWorker": 12, 26 | "minNumberOfWorker": 4, 27 | "monitoringPeriodInMillis": 15000, 28 | "keepAliveTimeInMillis": 6000, 29 | "queueSize": 0 30 | } 31 | } -------------------------------------------------------------------------------- /SolarwindsMSPNCentral/scripts/actionExecutor.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import logging 4 | import sys 5 | 6 | import requests 7 | 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument('-payload', '--payload', help='Payload from queue', required=True) 10 | parser.add_argument('-apiKey', '--apiKey', help='The apiKey of the integration', required=True) 11 | parser.add_argument('-opsgenieUrl', '--opsgenieUrl', help='The url', required=True) 12 | parser.add_argument('-logLevel', '--logLevel', help='Level of log', required=True) 13 | parser.add_argument('-url', '--url', help='Your Solarwinds MSP N-central server IP or FQDN', required=False) 14 | parser.add_argument('-username', '--username', help='Name of Solarwinds MSP N-central user that can acknowledge alerts', 15 | required=False) 16 | parser.add_argument('-password', '--password', 17 | help='Password for Solarwinds MSP N-central user that can acknowledge alerts', 18 | required=False) 19 | parser.add_argument('-timeout', '--timeout', help='Timeout', required=False) 20 | 21 | args = vars(parser.parse_args()) 22 | 23 | logging.basicConfig(stream=sys.stdout, level=args['logLevel']) 24 | 25 | queue_message_string = args['payload'] 26 | queue_message = json.loads(queue_message_string) 27 | 28 | alert_id = queue_message["alert"]["alertId"] 29 | mapped_action = queue_message["mappedActionV2"]["name"] 30 | 31 | LOG_PREFIX = "[" + mapped_action + "]:" 32 | logging.info(LOG_PREFIX + " Will execute " + mapped_action + " for alertId " + alert_id) 33 | 34 | 35 | def parse_field(key, mandatory): 36 | variable = queue_message.get(key) 37 | if not variable: 38 | variable = args.get(key) 39 | if mandatory and not variable: 40 | logging.error(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 41 | "' is missing. Check your configuration file.") 42 | raise ValueError(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 43 | "' is missing. Check your configuration file.") 44 | return variable 45 | 46 | 47 | username = parse_field('username', True) 48 | password = parse_field('password', True) 49 | url = parse_field('url', True) 50 | timeout = args['timeout'] 51 | 52 | if not timeout: 53 | timeout = 30000 54 | else: 55 | timeout = int(timeout) 56 | 57 | active_notification_trigger_Id = queue_message["activeNotificationTriggerID"] 58 | 59 | logging.debug("Username: " + username) 60 | logging.debug("Password: " + password) 61 | logging.debug("Url: " + url) 62 | logging.debug("activeNotificationTriggerID: " + str(active_notification_trigger_Id)) 63 | 64 | if mapped_action == "acknowledgeNotification": 65 | soapEndpoint = url + "/dms2/services2/ServerEI2" 66 | logging.debug("SOAP Endpoint: " + soapEndpoint) 67 | 68 | headers = {'content-type': 'text/xml; charset=UTF-8'} 69 | 70 | body = ''' 71 | 74 | 75 | 76 | 77 | {0} 78 | {1} 79 | {2} 80 | true 81 | false 82 | 83 | 84 | 85 | '''.format(active_notification_trigger_Id, username, password) 86 | 87 | response = requests.post(soapEndpoint, data=body, headers=headers, timeout=timeout) 88 | 89 | logging.debug("Status code of the response: " + str(response.status_code)) 90 | logging.debug("Response content: " + str(response.content)) 91 | 92 | if 300 > response.status_code: 93 | logging.info("SOAP request sent successfully.") 94 | else: 95 | logging.error("SOAP request failed with status code: " + str(response.status_code)) 96 | -------------------------------------------------------------------------------- /SolarwindsWebHelpdesk/conf/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey": "", 3 | "baseUrl": "https://api.opsgenie.com", 4 | "logLevel": "DEBUG", 5 | "globalArgs": [], 6 | "globalFlags": { 7 | "serverUrl": "", 8 | "apiToken": "" 9 | }, 10 | "actionMappings": { 11 | "acknowledgeNotification": { 12 | "filepath": "", 13 | "sourceType": "", 14 | "env": [], 15 | "stdout": "" 16 | } 17 | }, 18 | "pollerConf": { 19 | "pollingWaitIntervalInMillis": 100, 20 | "visibilityTimeoutInSec": 30, 21 | "maxNumberOfMessages": 10 22 | }, 23 | "poolConf": { 24 | "maxNumberOfWorker": 12, 25 | "minNumberOfWorker": 4, 26 | "monitoringPeriodInMillis": 15000, 27 | "keepAliveTimeInMillis": 6000, 28 | "queueSize": 0 29 | } 30 | } -------------------------------------------------------------------------------- /SolarwindsWebHelpdesk/scripts/actionExecutor.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import logging 4 | import sys 5 | 6 | import requests 7 | 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument('-payload', '--queuePayload', help='Payload from queue', required=True) 10 | parser.add_argument('-apiKey', '--apiKey', help='The apiKey of the integration', required=True) 11 | parser.add_argument('-opsgenieUrl', '--opsgenieUrl', help='The url', required=True) 12 | parser.add_argument('-logLevel', '--logLevel', help='Level of log', required=True) 13 | parser.add_argument('-serverUrl', '--serverUrl', help='Solarwinds Server URL', required=True) 14 | parser.add_argument('-apiToken', '--apiToken', help='Api Token', required=True) 15 | parser.add_argument('-httpTimeout', '--httpTimeout', help='Timeout for http requests', required=False) 16 | 17 | args = vars(parser.parse_args()) 18 | 19 | queue_message_string = args['queuePayload'] 20 | queue_message = json.loads(queue_message_string) 21 | CLOSE_STATUS_ID = 3 22 | ACKNOWLEDGED_STATUS_ID = 6 23 | 24 | logging.basicConfig(stream=sys.stdout, level=args['logLevel']) 25 | 26 | 27 | def parse_field(key, mandatory): 28 | variable = queue_message[key] 29 | if not variable.strip(): 30 | variable = args[key] 31 | if mandatory and not variable: 32 | raise ValueError(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 33 | "' is missing. Check your configuration file.") 34 | 35 | 36 | def send_close_request(): 37 | alert_alias = queue_message['alert']['alias'] 38 | content_type_header = {"content_type": "application/json"} 39 | url = SERVER_URL + "/helpdesk/WebObjects/Helpdesk.woa/ra/Tickets/" + alert_alias + "?apiKey=" + API_TOKEN 40 | 41 | status_type = {"id": CLOSE_STATUS_ID} 42 | content_params = {"statustype": status_type} 43 | 44 | response = requests.put(url, json=content_params, headers=content_type_header, timeout=HTTP_TIMEOUT) 45 | if response.status_code < 400: 46 | logging.info(LOG_PREFIX + ' Successfully executed at Solarwinds.') 47 | logging.debug(LOG_PREFIX + " Solarwinds response:" + str(response.content)) 48 | else: 49 | logging.error( 50 | LOG_PREFIX + " Could not execute at Solarwinds; response: " + response.status_code + ' ' + str( 51 | response.content)) 52 | 53 | 54 | def send_acknowledge_request(): 55 | alert_alias = queue_message['alert']['alias'] 56 | content_type_header = {"content_type": "application/json"} 57 | url = SERVER_URL + "/helpdesk/WebObjects/Helpdesk.woa/ra/Tickets/" + alert_alias + "?apiKey=" + API_TOKEN 58 | 59 | status_type = {"id": ACKNOWLEDGED_STATUS_ID} 60 | content_params = {"statustype": status_type} 61 | 62 | response = requests.put(url, json=content_params, headers=content_type_header, timeout=HTTP_TIMEOUT) 63 | if response.status_code < 400: 64 | logging.info(LOG_PREFIX + ' Successfully executed at Solarwinds.') 65 | logging.debug(LOG_PREFIX + " Solarwinds response:" + str(response.content)) 66 | else: 67 | logging.error( 68 | LOG_PREFIX + " Could not execute at Solarwinds; response: " + response.status_code + ' ' + str( 69 | response.content)) 70 | 71 | 72 | def send_add_note_request(): 73 | alert_alias = queue_message['alert']['alias'] 74 | alert_note = queue_message['alert']['note'] 75 | content_type_header = {"content_type": "application/json"} 76 | url = SERVER_URL + "/helpdesk/WebObjects/Helpdesk.woa/ra/TechNotes?apiKey=" + API_TOKEN 77 | 78 | job = {"type": "JobTicket", 79 | "id": alert_alias 80 | } 81 | content_params = { 82 | "noteText": alert_note, 83 | "jobticket": job, 84 | "workTime": "0", 85 | "isHidden": False, 86 | "isSolution": False, 87 | "emailClient": True, 88 | "emailTech": True, 89 | "emailTechGroupLevel": False, 90 | "emailGroupManager": False, 91 | "emailCc": False, 92 | "emailBcc": False, 93 | "ccAddressesForTech": "", 94 | "bccAddresses": "" 95 | } 96 | response = requests.post(url, json=content_params, headers=content_type_header, timeout=HTTP_TIMEOUT) 97 | if response.status_code < 400: 98 | logging.info(LOG_PREFIX + ' Successfully executed at Solarwinds.') 99 | logging.debug(LOG_PREFIX + " Solarwinds response:" + str(response.content)) 100 | else: 101 | logging.error( 102 | LOG_PREFIX + " Could not execute at Solarwinds; response: " + response.status_code + ' ' + str( 103 | response.content)) 104 | 105 | 106 | def main(): 107 | global LOG_PREFIX 108 | global SERVER_URL 109 | global API_TOKEN 110 | global HTTP_TIMEOUT 111 | 112 | action = queue_message['action'] 113 | LOG_PREFIX = '[' + action + ']' 114 | SERVER_URL = args['serverUrl'] 115 | API_TOKEN = args['apiToken'] 116 | HTTP_TIMEOUT = args['httpTimeout'] 117 | if not HTTP_TIMEOUT: 118 | HTTP_TIMEOUT = 30000 119 | else: 120 | HTTP_TIMEOUT = int(HTTP_TIMEOUT) 121 | 122 | if action == 'Close': 123 | send_close_request() 124 | elif action == "Acknowledge": 125 | send_acknowledge_request() 126 | elif action == "AddNote": 127 | send_add_note_request() 128 | 129 | 130 | if __name__ == '__main__': 131 | main() 132 | -------------------------------------------------------------------------------- /Splunk/conf/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey" : "", 3 | "baseUrl" : "https://api.opsgenie.com", 4 | "logLevel" : "DEBUG", 5 | "globalArgs" : [], 6 | "globalFlags" : { 7 | "url": "", 8 | "token": "", 9 | "sslVerify": "" 10 | }, 11 | "globalEnv" : [], 12 | "actionMappings" : { 13 | "createEvent" : { 14 | "sourceType" : "", 15 | "filepath" : "", 16 | "stdout" : "" 17 | } 18 | }, 19 | "pollerConf" : { 20 | "pollingWaitIntervalInMillis" : 100, 21 | "visibilityTimeoutInSeconds" : 30, 22 | "maxNumberOfMessages" : 10 23 | }, 24 | "poolConf" : { 25 | "maxNumberOfWorker": 12, 26 | "minNumberOfWorker": 4, 27 | "queueSize" : 0, 28 | "keepAliveTimeInMillis" : 6000, 29 | "monitoringPeriodInMillis" : 15000 30 | } 31 | } -------------------------------------------------------------------------------- /Splunk/scripts/actionExecutor.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import logging 4 | import sys 5 | 6 | import requests 7 | 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument("-payload", "--queuePayload", 10 | help="Payload from queue", required=True) 11 | parser.add_argument("-apiKey", "--apiKey", 12 | help="The apiKey of the integration", required=True) 13 | parser.add_argument("-opsgenieUrl", "--opsgenieUrl", 14 | help="Opsgenie apiUrl", required=True) 15 | parser.add_argument("-logLevel", "--logLevel", 16 | help="Level of logging", required=True) 17 | parser.add_argument("-url", "--url", 18 | help="Splunk base url with port", required=False) 19 | parser.add_argument("-token", "--token", 20 | help="Splunk http event collector token", required=False) 21 | parser.add_argument("-sslVerify", "--sslverify", 22 | help="SSL verify your splunk server url", required=False) 23 | 24 | args = vars(parser.parse_args()) 25 | 26 | logging.basicConfig(stream=sys.stdout, level=args["logLevel"]) 27 | 28 | def parse_field(key, mandatory): 29 | variable = queue_message.get(key) 30 | if not variable: 31 | variable = args.get(key) 32 | if mandatory and not variable: 33 | logging.error(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 34 | "' is missing. Check your configuration file.") 35 | raise ValueError(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 36 | "' is missing. Check your configuration file.") 37 | return variable 38 | 39 | def main(): 40 | global queue_message 41 | 42 | queue_message_string = args["queuePayload"] 43 | queue_message_string = queue_message_string.strip() 44 | queue_message = json.loads(queue_message_string) 45 | 46 | action = queue_message["action"] 47 | alert_id = queue_message["alertId"] 48 | 49 | log_prefix = "[{}]".format(action) 50 | logging.info("Will execute {} for alertId {}".format(action, alert_id)) 51 | 52 | splunk_url = parse_field('url', True) 53 | splunk_token = parse_field('token', True) 54 | ssl_verify = parse_field('sslverify', False) 55 | 56 | if not ssl_verify or ssl_verify.lower() == "false": 57 | ssl_verify = False 58 | else: 59 | ssl_verify = True 60 | 61 | del queue_message["url"] 62 | del queue_message["token"] 63 | 64 | headers = { 65 | "Content-Type": "application/json", 66 | "Authorization": "Splunk {}".format(splunk_token) 67 | } 68 | 69 | target_url = "{}/services/collector".format(splunk_url) 70 | body = { 71 | "event": queue_message 72 | } 73 | 74 | response = requests.post(target_url, data=json.dumps(body), headers=headers, verify=ssl_verify) 75 | if response.status_code < 299: 76 | logging.info(log_prefix + " Successfully relayed payload to Splunk") 77 | else: 78 | logging.warning(log_prefix + " Could not relay to Splunk; response: {} status code: {}".format( 79 | str(response.content), response.status_code)) 80 | 81 | 82 | if __name__ == "__main__": 83 | main() 84 | -------------------------------------------------------------------------------- /Trackit/conf/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey": "", 3 | "baseUrl": "https://api.opsgenie.com", 4 | "logLevel": "DEBUG", 5 | "globalArgs": [], 6 | "globalFlags": { 7 | "url": "https://", 8 | "login": "", 9 | "password": "" 10 | }, 11 | "actionMappings": { 12 | "Create": { 13 | "filepath": "", 14 | "sourceType": "", 15 | "env": [], 16 | "stdout": "" 17 | }, 18 | "AddNote": { 19 | "filepath": "", 20 | "sourceType": "", 21 | "env": [], 22 | "stdout": "" 23 | }, 24 | "AddResponder": { 25 | "filepath": "", 26 | "sourceType": "", 27 | "env": [], 28 | "stdout": "" 29 | }, 30 | "AssignOwnership": { 31 | "filepath": "", 32 | "sourceType": "", 33 | "env": [], 34 | "stdout": "" 35 | }, 36 | "TakeOwnership": { 37 | "filepath": "", 38 | "sourceType": "", 39 | "env": [], 40 | "stdout": "" 41 | }, 42 | "Close": { 43 | "filepath": "", 44 | "sourceType": "", 45 | "env": [], 46 | "stdout": "" 47 | } 48 | }, 49 | "pollerConf": { 50 | "pollingWaitIntervalInMillis": 100, 51 | "visibilityTimeoutInSec": 30, 52 | "maxNumberOfMessages": 10 53 | }, 54 | "poolConf": { 55 | "maxNumberOfWorker": 12, 56 | "minNumberOfWorker": 4, 57 | "monitoringPeriodInMillis": 15000, 58 | "keepAliveTimeInMillis": 6000, 59 | "queueSize": 0 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /Trackit/scripts/actionExecutor.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import logging 4 | import sys 5 | 6 | import requests 7 | 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument('-payload', '--queuePayload', help='Payload from queue', required=True) 10 | parser.add_argument('-apiKey', '--apiKey', help='The apiKey of the integration', required=True) 11 | parser.add_argument('-opsgenieUrl', '--opsgenieUrl', help='The url', required=True) 12 | parser.add_argument('-logLevel', '--logLevel', help='Log level', required=True) 13 | parser.add_argument('-url', '--url', help='The url', required=False) 14 | parser.add_argument('-login', '--login', help='Login', required=False) 15 | parser.add_argument('-password', '--password', help='Password', required=False) 16 | args = vars(parser.parse_args()) 17 | 18 | logging.basicConfig(stream=sys.stdout, level=args['logLevel']) 19 | 20 | def parse_field(key, mandatory): 21 | variable = queue_message.get(key) 22 | if not variable: 23 | variable = args.get(key) 24 | if mandatory and not variable: 25 | logging.error(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 26 | "' is missing. Check your configuration file.") 27 | raise ValueError(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 28 | "' is missing. Check your configuration file.") 29 | return variable 30 | 31 | 32 | def parse_timeout(): 33 | parsed_timeout = args.get('http.timeout') 34 | if not parsed_timeout: 35 | return 30000 36 | return int(parsed_timeout) 37 | 38 | 39 | def send_request_to_trackit(final_url, content, headers): 40 | response = requests.post(final_url, json.dumps(content), headers=headers, timeout=timeout) 41 | if response.status_code < 299: 42 | logging.info(LOG_PREFIX + " Successfully executed at TrackIt.") 43 | else: 44 | logging.warning( 45 | LOG_PREFIX + " Could not execute at TrackIt; response: " + str(response.content) + " " + str( 46 | response.status_code)) 47 | 48 | 49 | def login_to_trackit(): 50 | final_url = url + "/TrackitWeb/api/login?username=" + parse_field("login", 51 | True) + "&pwd=" + parse_field( 52 | "password", True) 53 | logging.debug("Url: " + str(final_url)) 54 | response = requests.get(final_url) 55 | if response: 56 | response_map = response.json() 57 | if response_map: 58 | return response_map['data']['apiKey'] 59 | 60 | return None 61 | 62 | 63 | def add_note_to_workflow(message, workflow_id, track_key): 64 | final_url = url + "/TrackitWeb/api/workorder/AddNote/" + workflow_id 65 | headers = { 66 | "Content-Type": "text/json", 67 | "Accept": "text/json", 68 | "TrackitAPIKey": track_key 69 | } 70 | content = { 71 | "IsPrivate": "False", 72 | "FullText": message 73 | } 74 | logging.debug( 75 | "Before Post -> Url: " + final_url + ", Content: " + str(content) + ", Request Headers: " + str(headers)) 76 | send_request_to_trackit(final_url, content, headers) 77 | 78 | 79 | def close_workflow(workflow_id, track_key): 80 | final_url = url + "/TrackitWeb/api/workorder/Close/" + workflow_id 81 | headers = { 82 | "Content-Type": "text/json", 83 | "Accept": "text/json", 84 | "TrackitAPIKey": track_key 85 | } 86 | logging.debug("Before Post -> Url: " + final_url + ", " + "Request Headers: " + str(headers)) 87 | send_request_to_trackit(final_url, {}, headers) 88 | 89 | 90 | def main(): 91 | global queue_message 92 | global LOG_PREFIX 93 | global timeout 94 | global url 95 | 96 | queue_message_string = args['queuePayload'] 97 | queue_message = json.loads(queue_message_string) 98 | alert = queue_message["alert"] 99 | alert_id = alert["alertId"] 100 | action = queue_message["action"] 101 | 102 | timeout = parse_timeout() 103 | 104 | LOG_PREFIX = "[" + action + "]" 105 | logging.info("Will execute " + action + " for alertId " + alert_id) 106 | url = parse_field("url", True) 107 | track_key = login_to_trackit() 108 | workflow_id = str(alert["details"]["workflow_id"]) 109 | user_name = str(alert['username']) 110 | message = str(alert['message']) 111 | 112 | if workflow_id: 113 | message = user_name + " executed [" + action + "] action on alert: \"" + message + "\"" 114 | 115 | if action == "Acknowledge": 116 | message = user_name + " acknowledged alert: \"" + message + "\"" 117 | elif action == "AddNote": 118 | note = str(alert['note']) 119 | message = user_name + " noted: \"" + note + "\" on alert: \"" + message + "\"" 120 | elif action == "AddResponder": 121 | responder = str(alert['responder']) 122 | message = user_name + " added responder " + responder + " to alert: \"" + message + "\"" 123 | elif action == "AssignOwnership": 124 | owner = str(alert['owner']) 125 | message = user_name + " assigned ownership of the alert: \"" + message + "\" to " + \ 126 | owner 127 | elif action == "TakeOwnership": 128 | owner = str(alert['owner']) 129 | message = user_name + " took ownership of the alert: \"" + message + "\"" 130 | 131 | if action != "Close": 132 | add_note_to_workflow(message, workflow_id, track_key) 133 | else: 134 | close_workflow(workflow_id, track_key) 135 | else: 136 | logging.warning(LOG_PREFIX + " Cannot send action to Track-It because workflow_id is not found on alert") 137 | if __name__ == '__main__': 138 | main() 139 | -------------------------------------------------------------------------------- /Trackit/scripts/createTrackitWorkflow.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import logging 4 | import sys 5 | 6 | import requests 7 | 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument('-payload', '--queuePayload', help='Payload from queue', required=True) 10 | parser.add_argument('-apiKey', '--apiKey', help='The apiKey of the integration', required=True) 11 | parser.add_argument('-opsgenieUrl', '--opsgenieUrl', help='The url', required=True) 12 | parser.add_argument('-logLevel', '--logLevel', help='Log level', required=True) 13 | parser.add_argument('-url', '--url', help='The url', required=False) 14 | parser.add_argument('-login', '--login', help='Login', required=False) 15 | parser.add_argument('-password', '--password', help='Password', required=False) 16 | args = vars(parser.parse_args()) 17 | 18 | logging.basicConfig(stream=sys.stdout, level=args['logLevel']) 19 | 20 | 21 | def parse_field(key, mandatory): 22 | variable = queue_message.get(key) 23 | if not variable: 24 | variable = args.get(key) 25 | if mandatory and not variable: 26 | logging.error(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 27 | "' is missing. Check your configuration file.") 28 | raise ValueError(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 29 | "' is missing. Check your configuration file.") 30 | return variable 31 | 32 | 33 | def parse_timeout(): 34 | parsed_timeout = args.get('http.timeout') 35 | if not parsed_timeout: 36 | return 30000 37 | return int(parsed_timeout) 38 | 39 | 40 | def login_to_trackit(url): 41 | final_url = url + "/TrackitWeb/api/login?username=" + parse_field("login", 42 | True) + "&pwd=" + parse_field( 43 | "password", True) 44 | logging.debug("Url: " + final_url) 45 | response = requests.get(final_url, timeout) 46 | if response: 47 | response_map = response.json() 48 | if response_map: 49 | return response_map['data']['apiKey'] 50 | 51 | return None 52 | 53 | 54 | def main(): 55 | global LOG_PREFIX 56 | global queue_message 57 | global timeout 58 | 59 | queue_message_string = args['queuePayload'] 60 | queue_message = json.loads(queue_message_string) 61 | alert = queue_message["alert"] 62 | alert_id = alert["alertId"] 63 | action = queue_message["action"] 64 | 65 | LOG_PREFIX = "[" + action + "]" 66 | logging.info("Will execute " + action + " for alertId " + alert_id) 67 | 68 | timeout = parse_timeout() 69 | url = parse_field("url", True) 70 | track_key = login_to_trackit(url) 71 | 72 | if action == "Create": 73 | headers = { 74 | "Content-Type": "text/json", 75 | "Accept": "text/json", 76 | "TrackitAPIKey": track_key 77 | } 78 | content_params = { 79 | "StatusName": "Open", 80 | "Summary": alert['message'], 81 | "RequestorName": parse_field("login", True) 82 | } 83 | create_url = str(url) + "/TrackitWeb/api/workorder/Create" 84 | logging.debug( 85 | "Before Post -> Url: " + create_url + ", " + "Request Headers: " + str(headers) + " Content: " + str(content_params)) 86 | response = requests.post(create_url, json.dumps(content_params), headers=headers, timeout=timeout) 87 | if response.status_code < 299: 88 | logging.info(LOG_PREFIX + " Successfully executed at TrackIt.") 89 | try: 90 | response_map = response.json() 91 | if response_map: 92 | flow_id = response_map['data']['data']['Id'] 93 | if flow_id: 94 | alert_api_url = args['opsgenieUrl'] + "/v2/alerts/" + alert_id + "/details" 95 | content = { 96 | "details": 97 | { 98 | "workflow_id": flow_id 99 | } 100 | } 101 | headers = { 102 | "Content-Type": "application/json", 103 | "Accept-Language": "application/json", 104 | "Authorization": "GenieKey " + args['apiKey'] 105 | } 106 | alert_response = requests.post(alert_api_url, 107 | data=json.dumps(content), headers=headers, timeout=timeout) 108 | if alert_response.status_code < 299: 109 | logging.info(LOG_PREFIX + " Successfully sent to Opsgenie") 110 | logging.debug( 111 | LOG_PREFIX + " TrackIt response: " + str(alert_response.content) + " " + str(alert_response.status_code)) 112 | else: 113 | logging.warning( 114 | LOG_PREFIX + " Could not execute at Opsgenie; response: " + str(alert_response.content) + " status code: " + str(alert_response.status_code)) 115 | else: 116 | logging.warning( 117 | LOG_PREFIX + " Flow Id does not exist.") 118 | except ValueError: 119 | logging.error( 120 | LOG_PREFIX + " Response does not have flow Id variable, " + str(response.content) + " " + str(response.status_code)) 121 | else: 122 | logging.warning( 123 | LOG_PREFIX + " Could not execute at TrackIt; response: " + str(response.content) + " " + str(response.status_code)) 124 | 125 | 126 | if __name__ == '__main__': 127 | main() 128 | -------------------------------------------------------------------------------- /Xmpp/conf/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey": "", 3 | "baseUrl": "https://api.opsgenie.com", 4 | "logLevel": "DEBUG", 5 | "globalArgs": [], 6 | "globalFlags": { 7 | "jid": "", 8 | "password": "", 9 | "room": "" 10 | }, 11 | "actionMappings": { 12 | "Create": { 13 | "filepath": "", 14 | "sourceType": "", 15 | "env": [], 16 | "stdout": "" 17 | }, 18 | "Acknowledge": { 19 | "filepath": "", 20 | "sourceType": "", 21 | "env": [], 22 | "stdout": "" 23 | }, 24 | "AddNote": { 25 | "filepath": "", 26 | "sourceType": "", 27 | "env": [], 28 | "stdout": "" 29 | }, 30 | "Close": { 31 | "filepath": "", 32 | "sourceType": "", 33 | "env": [], 34 | "stdout": "" 35 | } 36 | }, 37 | "pollerConf": { 38 | "pollingWaitIntervalInMillis": 100, 39 | "visibilityTimeoutInSec": 30, 40 | "maxNumberOfMessages": 10 41 | }, 42 | "poolConf": { 43 | "maxNumberOfWorker": 12, 44 | "minNumberOfWorker": 4, 45 | "monitoringPeriodInMillis": 15000, 46 | "keepAliveTimeInMillis": 6000, 47 | "queueSize": 0 48 | } 49 | } -------------------------------------------------------------------------------- /Xmpp/scripts/actionExecutor.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import logging 4 | import sys 5 | 6 | import slixmpp 7 | 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument('-payload', '--queuePayload', help='Payload from queue', required=True) 10 | parser.add_argument('-apiKey', '--apiKey', help='The apiKey of the integration', required=True) 11 | parser.add_argument('-opsgenieUrl', '--opsgenieUrl', help='The url', required=True) 12 | parser.add_argument('-logLevel', '--logLevel', help='Level of log', required=True) 13 | 14 | parser.add_argument("-jid", "--jid", dest="jid", help="JID to use") 15 | parser.add_argument("-password", "--password", dest="password", help="Password to use") 16 | parser.add_argument("-room", "--room", dest="room", help="MUC room to join") 17 | 18 | args = vars(parser.parse_args()) 19 | 20 | queue_message_string = args.get('queuePayload') 21 | queue_message = json.loads(queue_message_string) 22 | 23 | logging.basicConfig(stream=sys.stdout, level=args.get('logLevel')) 24 | 25 | alert = queue_message.get("alert") 26 | alert_id = alert.get("alertId") 27 | action = queue_message.get("action") 28 | 29 | LOG_PREFIX = "[" + action + "]" 30 | CONNECTION = "connection" 31 | TIMEOUT = 3 32 | 33 | 34 | def create_message(): 35 | message = "" 36 | alert_message = str(alert.get("message")) 37 | alert_username = str(alert.get("username")) 38 | alert_note = str(alert.get("note")) 39 | if alert_id: 40 | if action == "Create": 41 | message = "New alert: \"" + alert_message + "\"" 42 | elif action == "Acknowledge": 43 | message = alert_username + " acknowledged alert: \"" + alert_message + "\"" 44 | elif action == "AddNote": 45 | message = alert_username + " added note \"" + alert_note + "\" to the alert: \"" + alert_message + "\"" 46 | elif action == "Close": 47 | message = alert_username + " closed alert: \"" + alert_message + "\"" 48 | else: 49 | message = alert_username + " executed [" + action + "] action on alert: \"" + alert_message + "\"" 50 | logging.info(LOG_PREFIX + "Will execute " + action + " for alertId " + alert_id) 51 | return message 52 | 53 | 54 | class MemoryStore: 55 | memory_store = {} 56 | 57 | @staticmethod 58 | def store(key, value): 59 | MemoryStore.memory_store[key] = value 60 | 61 | @staticmethod 62 | def lookup(key): 63 | return MemoryStore.memory_store.get(key) 64 | 65 | @staticmethod 66 | def remove(key): 67 | MemoryStore.memory_store.pop(key, None) 68 | 69 | @staticmethod 70 | def reset(): 71 | MemoryStore.memory_store = {} 72 | 73 | 74 | class MUCBot(slixmpp.ClientXMPP): 75 | def __init__(self, jid, password, room, nick): 76 | slixmpp.ClientXMPP.__init__(self, jid, password) 77 | self.room = room 78 | self.nick = nick 79 | self.add_event_handler("session_start", self.start) 80 | 81 | def start(self, event): 82 | self.get_roster() 83 | 84 | self.send_presence() 85 | self.plugin['xep_0045'].join_muc(self.room, 86 | self.nick, 87 | wait=True) 88 | 89 | message = create_message() 90 | self.send_message(mto=self.room, mbody=message, mtype='groupchat') 91 | self.disconnect(wait=True) 92 | 93 | 94 | def parse_field(key, mandatory): 95 | variable = queue_message.get(key) 96 | if variable is None or not variable.strip(): 97 | variable = args.get(key) 98 | if mandatory and not variable: 99 | err_message = LOG_PREFIX + " Skipping action, Mandatory conf item " + key + \ 100 | " is missing. Check your configuration file." 101 | logging.error(err_message) 102 | raise ValueError(err_message) 103 | return variable 104 | 105 | 106 | def main(): 107 | global TIMEOUT 108 | logging.info("Will execute " + action + " for alertId " + alert_id) 109 | 110 | jid = parse_field('jid', True) 111 | password = parse_field('password', True) 112 | room = parse_field('room', True) 113 | 114 | xmpp = MUCBot(jid, password, room, 'Opsgenie') 115 | xmpp.register_plugin('xep_0045') 116 | xmpp.connect() 117 | xmpp.process(timeout=TIMEOUT) 118 | 119 | 120 | if __name__ == '__main__': 121 | main() -------------------------------------------------------------------------------- /Zabbix/conf/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey": "", 3 | "baseUrl": "https://api.opsgenie.com", 4 | "logLevel": "DEBUG", 5 | "globalArgs": [], 6 | "globalFlags": { 7 | "command_url": "http://localhost/zabbix/api_jsonrpc.php", 8 | "user": "Admin", 9 | "password": "zabbix" 10 | }, 11 | "actionMappings": { 12 | "Acknowledge": { 13 | "filepath": "", 14 | "sourceType": "", 15 | "env": [], 16 | "stdout": "" 17 | } 18 | }, 19 | "pollerConf": { 20 | "pollingWaitIntervalInMillis": 100, 21 | "visibilityTimeoutInSec": 30, 22 | "maxNumberOfMessages": 10 23 | }, 24 | "poolConf": { 25 | "maxNumberOfWorker": 12, 26 | "minNumberOfWorker": 4, 27 | "monitoringPeriodInMillis": 15000, 28 | "keepAliveTimeInMillis": 6000, 29 | "queueSize": 0 30 | } 31 | } -------------------------------------------------------------------------------- /Zabbix/conf/opsgenie-integration.conf: -------------------------------------------------------------------------------- 1 | ######################################## ZABBIX CONFIGURATION ####################################### 2 | #zabbix2opsgenie.logger=warning 3 | logPath = /var/log/opsgenie/send2opsgenie.log 4 | 5 | #responders= 6 | #tags= 7 | 8 | ####################################### PROXY CONFIGURATION FOR ZABBIX ############################################ 9 | zabbix2opsgenie.http.proxy.enabled = false 10 | zabbix2opsgenie.http.proxy.port = 11111 11 | zabbix2opsgenie.http.proxy.host = localhost 12 | zabbix2opsgenie.http.proxy.protocol = http 13 | #zabbix2opsgenie.http.proxy.username=admin 14 | #zabbix2opsgenie.http.proxy.password=changeme -------------------------------------------------------------------------------- /Zabbix/native/ogAfter.sh: -------------------------------------------------------------------------------- 1 | chmod 755 /home/opsgenie/oec/opsgenie-zabbix/send2opsgenie 2 | 3 | if id -u zabbix >/dev/null 2>&1; then 4 | usermod -a -G opsgenie zabbix 5 | chown -R zabbix:opsgenie /var/log/opsgenie 6 | else 7 | echo "WARNING : zabbix user does not exist. Please don't forget to add your zabbix user to opsgenie group!" 8 | fi -------------------------------------------------------------------------------- /Zabbix/native/ogBefore.sh: -------------------------------------------------------------------------------- 1 | if [ -z $(getent group opsgenie) ] 2 | then 3 | groupadd opsgenie 4 | fi -------------------------------------------------------------------------------- /Zabbix/opsgenie-zabbix/actionCommand.txt: -------------------------------------------------------------------------------- 1 | /home/opsgenie/oec/opsgenie-zabbix/send2opsgenie -triggerName='{TRIGGER.NAME}' -triggerId='{TRIGGER.ID}' -triggerStatus='{TRIGGER.STATUS}' -triggerSeverity='{TRIGGER.SEVERITY}' -triggerDescription='{TRIGGER.DESCRIPTION}' -triggerUrl='{TRIGGER.URL}' -triggerValue='{TRIGGER.VALUE}' -triggerHostGroupName='{TRIGGER.HOSTGROUP.NAME}' -hostName='{HOST.NAME}' -ipAddress='{IPADDRESS}' -eventId='{EVENT.ID}' -date='{DATE}' -time='{TIME}' -itemKey='{ITEM.KEY}' -itemValue='{ITEM.VALUE}' -recoveryEventStatus='{EVENT.RECOVERY.STATUS}' -------------------------------------------------------------------------------- /Zabbix/scripts/actionExecutor.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import logging 4 | import sys 5 | 6 | import requests 7 | 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument('-payload', '--queuePayload', help='Payload from queue', required=True) 10 | parser.add_argument('-apiKey', '--apiKey', help='The apiKey of the integration', required=True) 11 | parser.add_argument('-opsgenieUrl', '--opsgenieUrl', help='The url', required=True) 12 | parser.add_argument('-logLevel', '--logLevel', help='Log level', required=True) 13 | parser.add_argument('-command_url', '--command_url', help='The Command URL', required=False) 14 | parser.add_argument('-user', '--user', help='User', required=False) 15 | parser.add_argument('-password', '--password', help='Password', required=False) 16 | args = vars(parser.parse_args()) 17 | 18 | logging.basicConfig(stream=sys.stdout, level=args['logLevel']) 19 | 20 | 21 | def parse_field(key, mandatory): 22 | variable = queue_message.get(key) 23 | if not variable: 24 | variable = args.get(key) 25 | if mandatory and not variable: 26 | logging.error(LOG_PREFIX + " Skipping action, Mandatory conf item '" + str(key) + 27 | "' is missing. Check your configuration file.") 28 | raise ValueError(LOG_PREFIX + " Skipping action, Mandatory conf item '" + str(key) + 29 | "' is missing. Check your configuration file.") 30 | return variable 31 | 32 | 33 | def login_to_zabbix(user, password, url): 34 | login_params = { 35 | "jsonrpc": "2.0", 36 | "method": "user.login", 37 | "params": { 38 | "user": user, 39 | "password": password 40 | }, 41 | "id": 1 42 | } 43 | logging.debug(LOG_PREFIX + " Logging in to Zabbix. Url: " + str(url) + " user: " + str(user)) 44 | content_headers = { 45 | "Content-Type": "application/json" 46 | } 47 | login_result = requests.post(url, data=json.dumps(login_params), headers=content_headers, timeout=timeout) 48 | logging.debug(LOG_PREFIX + " login response: " + str(login_result.status_code) + " " + str(login_result.json())) 49 | if login_result.json() and not (login_result.json()).get('error'): 50 | return (login_result.json()).get('result') 51 | else: 52 | logging.error( 53 | LOG_PREFIX + " Cannot login to Zabbix: Response " + str(login_result.status_code) + " " + str( 54 | login_result.content)) 55 | 56 | 57 | def parse_timeout(): 58 | parsed_timeout = args.get('http.timeout') 59 | if not parsed_timeout: 60 | return 30000 61 | return int(parsed_timeout) 62 | 63 | 64 | def main(): 65 | global LOG_PREFIX 66 | global queue_message 67 | global timeout 68 | 69 | queue_message_string = args['queuePayload'] 70 | queue_message = json.loads(queue_message_string) 71 | 72 | alert_id = queue_message["alert"]["alertId"] 73 | action = queue_message["action"] 74 | source = queue_message["source"] 75 | 76 | LOG_PREFIX = "[" + action + "]" 77 | 78 | timeout = parse_timeout() 79 | 80 | logging.info("Will execute " + action + " for alertId " + alert_id) 81 | 82 | username = parse_field('user', True) 83 | password = parse_field('password', True) 84 | url = parse_field('command_url', True) 85 | 86 | logging.debug("Username: " + str(username)) 87 | logging.debug("Command Url: " + str(url)) 88 | logging.debug("AlertId: " + str(alert_id)) 89 | logging.debug("Source: " + str(source)) 90 | logging.debug("Action: " + str(action)) 91 | 92 | if alert_id: 93 | alert_api_url = args['opsgenieUrl'] + "/v2/alerts/" + alert_id 94 | headers = { 95 | "Content-Type": "application/json", 96 | "Accept-Language": "application/json", 97 | "Authorization": "GenieKey " + args['apiKey'] 98 | } 99 | alert_response = requests.get(alert_api_url, headers=headers, timeout=timeout) 100 | if alert_response.status_code < 299 and (alert_response.json()).get('data'): 101 | if action == "Acknowledge": 102 | if source and str(source['name']).lower() == "zabbix": 103 | logging.warning("Opsgenie alert is already acknowledged by Zabbix. Discarding!!!") 104 | else: 105 | post_params = { 106 | "jsonrpc": "2.0", 107 | "id": 1, 108 | "method": "event.acknowledge", 109 | "params": { 110 | "eventids": parse_from_details("eventId", alert_response), 111 | "message": "Acknowledged by " + alert_response.json()['data']['report'][ 112 | 'acknowledgedBy'] + " via Opsgenie" 113 | } 114 | } 115 | auth = login_to_zabbix(username, password, url) 116 | if auth: 117 | logging.debug("Posting to Zabbix. Url: " + str(url) + ", params: " + str(post_params)) 118 | post_params.update({"auth": auth}) 119 | headers = { 120 | "Content-Type": "application/json", 121 | } 122 | response = requests.post(url, data=json.dumps(post_params), headers=headers, timeout=timeout) 123 | if alert_response.json() and not (alert_response.json()).get('error'): 124 | logging.info("Successfully executed at Zabbix.") 125 | logging.debug("Zabbix response: " + str(response.json())) 126 | else: 127 | logging.warning( 128 | "Could not execute at Zabbix. Zabbix Response: " + str( 129 | response.content) + " Status Code: " + str(response.status_code)) 130 | else: 131 | logging.warning(LOG_PREFIX + "Cannot login to Zabbix!") 132 | else: 133 | logging.warning("Alert with id [" + str(alert_id) + "] does not exist in Opsgenie. It is probably deleted.") 134 | else: 135 | logging.warning("Alert id does not exist ") 136 | 137 | def parse_from_details(key,alert_response): 138 | if key in alert_response.json()['data']["details"].keys(): 139 | return alert_response.json()['data']["details"][key] 140 | return "" 141 | 142 | if __name__ == '__main__': 143 | main() 144 | -------------------------------------------------------------------------------- /Zabbix/scripts/actionExecutorForZabbix4.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import logging 4 | import sys 5 | 6 | import requests 7 | 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument('-payload', '--queuePayload', help='Payload from queue', required=True) 10 | parser.add_argument('-apiKey', '--apiKey', help='The apiKey of the integration', required=True) 11 | parser.add_argument('-opsgenieUrl', '--opsgenieUrl', help='The url', required=True) 12 | parser.add_argument('-logLevel', '--logLevel', help='Log level', required=True) 13 | parser.add_argument('-command_url', '--command_url', help='The Command URL', required=False) 14 | parser.add_argument('-user', '--user', help='User', required=False) 15 | parser.add_argument('-password', '--password', help='Password', required=False) 16 | args = vars(parser.parse_args()) 17 | 18 | logging.basicConfig(stream=sys.stdout, level=args['logLevel']) 19 | 20 | 21 | def parse_field(key, mandatory): 22 | variable = queue_message.get(key) 23 | if not variable: 24 | variable = args.get(key) 25 | if mandatory and not variable: 26 | logging.error(LOG_PREFIX + " Skipping action, Mandatory conf item '" + str(key) + 27 | "' is missing. Check your configuration file.") 28 | raise ValueError(LOG_PREFIX + " Skipping action, Mandatory conf item '" + str(key) + 29 | "' is missing. Check your configuration file.") 30 | return variable 31 | 32 | 33 | def parse_timeout(): 34 | parsed_timeout = args.get('http.timeout') 35 | if not parsed_timeout: 36 | return 30000 37 | return int(parsed_timeout) 38 | 39 | 40 | def login_to_zabbix(user, password, url): 41 | login_params = { 42 | "jsonrpc": "2.0", 43 | "method": "user.login", 44 | "params": { 45 | "user": user, 46 | "password": password 47 | }, 48 | "id": 1 49 | } 50 | logging.debug(LOG_PREFIX + " Logging in to Zabbix. Url: " + str(url) + " user: " + str(user)) 51 | content_headers = { 52 | "Content-Type": "application/json" 53 | } 54 | login_result = requests.post(url, data=json.dumps(login_params), headers=content_headers, timeout=timeout) 55 | logging.debug(LOG_PREFIX + " login response: " + str(login_result.status_code) + " " + str(login_result.json())) 56 | if login_result.json() and not login_result.json().get('error'): 57 | return login_result.json()['result'] 58 | else: 59 | logging.error( 60 | LOG_PREFIX + " Cannot login to Zabbix: Response " + str(login_result.status_code) + " " + str( 61 | login_result.content)) 62 | 63 | 64 | def main(): 65 | global LOG_PREFIX 66 | global queue_message 67 | global timeout 68 | 69 | queue_message_string = args['queuePayload'] 70 | queue_message = json.loads(queue_message_string) 71 | 72 | alert_id = queue_message["alert"]["alertId"] 73 | action = queue_message["action"] 74 | source = queue_message["source"] 75 | 76 | LOG_PREFIX = "[" + action + "]" 77 | 78 | timeout = parse_timeout() 79 | 80 | logging.info("Will execute " + str(action) + " for alertId " + str(alert_id)) 81 | 82 | username = parse_field('user', True) 83 | password = parse_field('password', True) 84 | url = parse_field('command_url', True) 85 | 86 | logging.debug("Username: " + str(username)) 87 | logging.debug("Command Url: " + str(url)) 88 | logging.debug("AlertId: " + str(alert_id)) 89 | logging.debug("Source: " + str(source)) 90 | logging.debug("Action: " + str(action)) 91 | 92 | if alert_id: 93 | alert_api_url = args['opsgenieUrl'] + "/v2/alerts/" + alert_id 94 | headers = { 95 | "Content-Type": "application/json", 96 | "Accept-Language": "application/json", 97 | "Authorization": "GenieKey " + args['apiKey'] 98 | } 99 | alert_response = requests.get(alert_api_url, headers=headers, timeout=timeout) 100 | if alert_response.status_code < 299 and alert_response.json()['data']: 101 | if action == "Acknowledge": 102 | if source and str(source['name']).lower() == "zabbix": 103 | logging.warning("OpsGenie alert is already acknowledged by Zabbix. Discarding!!!") 104 | else: 105 | post_params = { 106 | "jsonrpc": "2.0", 107 | "id": 1, 108 | "method": "event.acknowledge", 109 | "params": { 110 | "eventids": parse_from_details("eventId", alert_response), 111 | "message": "Acknowledged by " + alert_response.json()['data']['report'][ 112 | 'acknowledgedBy'] + " via Opsgenie", 113 | "action": 6 114 | } 115 | } 116 | auth = login_to_zabbix(username, password, url) 117 | if auth: 118 | logging.debug("Posting to Zabbix. Url: " + str(url) + ", params: " + str(post_params)) 119 | post_params.update({"auth": auth}) 120 | headers = { 121 | "Content-Type": "application/json", 122 | } 123 | response = requests.post(url, data=json.dumps(post_params), headers=headers, timeout=timeout) 124 | if alert_response.json() and not alert_response.json().get('error'): 125 | logging.info("Successfully executed at Zabbix.") 126 | logging.debug("Zabbix response: " + str(response.json())) 127 | else: 128 | logging.warning( 129 | "Could not execute at Zabbix. Zabbix Response: " + response.content + " Status Code: " + response.status_code) 130 | else: 131 | logging.warning(LOG_PREFIX + "Cannot login to Zabbix!") 132 | else: 133 | logging.warning("Alert with id [" + str(alert_id) + "] does not exist in Opsgenie. It is probably deleted.") 134 | else: 135 | logging.warning("Alert id does not exist ") 136 | 137 | 138 | def parse_from_details(key, alert_response): 139 | if key in alert_response.json()['data']["details"].keys(): 140 | return alert_response.json()['data']["details"][key] 141 | return "" 142 | 143 | 144 | if __name__ == '__main__': 145 | main() 146 | -------------------------------------------------------------------------------- /Zabbix/scripts/actionExecutorForZabbix6.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import logging 4 | import sys 5 | 6 | import requests 7 | 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument('-payload', '--queuePayload', help='Payload from queue', required=True) 10 | parser.add_argument('-apiKey', '--apiKey', help='The apiKey of the integration', required=True) 11 | parser.add_argument('-opsgenieUrl', '--opsgenieUrl', help='The url', required=True) 12 | parser.add_argument('-logLevel', '--logLevel', help='Log level', required=True) 13 | parser.add_argument('-command_url', '--command_url', help='The Command URL', required=False) 14 | parser.add_argument('-user', '--user', help='User', required=False) 15 | parser.add_argument('-password', '--password', help='Password', required=False) 16 | args = vars(parser.parse_args()) 17 | 18 | logging.basicConfig(stream=sys.stdout, level=args['logLevel']) 19 | 20 | 21 | def parse_field(key, mandatory): 22 | variable = queue_message.get(key) 23 | if not variable: 24 | variable = args.get(key) 25 | if mandatory and not variable: 26 | logging.error(LOG_PREFIX + " Skipping action, Mandatory conf item '" + str(key) + 27 | "' is missing. Check your configuration file.") 28 | raise ValueError(LOG_PREFIX + " Skipping action, Mandatory conf item '" + str(key) + 29 | "' is missing. Check your configuration file.") 30 | return variable 31 | 32 | 33 | def parse_timeout(): 34 | parsed_timeout = args.get('http.timeout') 35 | if not parsed_timeout: 36 | return 30000 37 | return int(parsed_timeout) 38 | 39 | 40 | def login_to_zabbix(user, password, url): 41 | login_params = { 42 | "jsonrpc": "2.0", 43 | "method": "user.login", 44 | "params": { 45 | "username": user, 46 | "password": password 47 | }, 48 | "id": 1 49 | } 50 | logging.debug(LOG_PREFIX + " Logging in to Zabbix. Url: " + str(url) + " user: " + str(user)) 51 | content_headers = { 52 | "Content-Type": "application/json" 53 | } 54 | login_result = requests.post(url, data=json.dumps(login_params), headers=content_headers, timeout=timeout) 55 | logging.debug(LOG_PREFIX + " login response: " + str(login_result.status_code) + " " + str(login_result.json())) 56 | if login_result.json() and not login_result.json().get('error'): 57 | return login_result.json()['result'] 58 | else: 59 | logging.error( 60 | LOG_PREFIX + " Cannot login to Zabbix: Response " + str(login_result.status_code) + " " + str(login_result.content)) 61 | 62 | 63 | def main(): 64 | global LOG_PREFIX 65 | global queue_message 66 | global timeout 67 | 68 | queue_message_string = args['queuePayload'] 69 | queue_message = json.loads(queue_message_string) 70 | 71 | alert_id = queue_message["alert"]["alertId"] 72 | action = queue_message["action"] 73 | source = queue_message["source"] 74 | 75 | LOG_PREFIX = "[" + action + "]" 76 | 77 | timeout = parse_timeout() 78 | 79 | logging.info("Will execute " + str(action) + " for alertId " + str(alert_id)) 80 | 81 | username = parse_field('user', True) 82 | password = parse_field('password', True) 83 | url = parse_field('command_url', True) 84 | 85 | logging.debug("Username: " + str(username)) 86 | logging.debug("Command Url: " + str(url)) 87 | logging.debug("AlertId: " + str(alert_id)) 88 | logging.debug("Source: " + str(source)) 89 | logging.debug("Action: " + str(action)) 90 | 91 | if alert_id: 92 | alert_api_url = args['opsgenieUrl'] + "/v2/alerts/" + alert_id 93 | headers = { 94 | "Content-Type": "application/json", 95 | "Accept-Language": "application/json", 96 | "Authorization": "GenieKey " + args['apiKey'] 97 | } 98 | alert_response = requests.get(alert_api_url, headers=headers, timeout=timeout) 99 | if alert_response.status_code < 299 and alert_response.json()['data']: 100 | if action == "Acknowledge": 101 | if source and str(source['name']).lower() == "zabbix": 102 | logging.warning("OpsGenie alert is already acknowledged by Zabbix. Discarding!!!") 103 | else: 104 | post_params = { 105 | "jsonrpc": "2.0", 106 | "id": 1, 107 | "method": "event.acknowledge", 108 | "params": { 109 | "eventids": parse_from_details("eventId", alert_response), 110 | "message": "Acknowledged by " + alert_response.json()['data']['report'][ 111 | 'acknowledgedBy'] + " via Opsgenie", 112 | "action": 6 113 | } 114 | } 115 | auth = login_to_zabbix(username, password, url) 116 | if auth: 117 | logging.debug("Posting to Zabbix. Url: " + str(url) + ", params: " + str(post_params)) 118 | post_params.update({"auth": auth}) 119 | headers = { 120 | "Content-Type": "application/json", 121 | } 122 | response = requests.post(url, data=json.dumps(post_params), headers=headers, timeout=timeout) 123 | if alert_response.json() and not alert_response.json().get('error'): 124 | logging.info("Successfully executed at Zabbix.") 125 | logging.debug("Zabbix response: " + str(response.json())) 126 | else: 127 | logging.warning( 128 | "Could not execute at Zabbix. Zabbix Response: " + response.content + " Status Code: " + response.status_code) 129 | else: 130 | logging.warning(LOG_PREFIX + "Cannot login to Zabbix!") 131 | else: 132 | logging.warning("Alert with id [" + str(alert_id) + "] does not exist in Opsgenie. It is probably deleted.") 133 | else: 134 | logging.warning("Alert id does not exist ") 135 | 136 | def parse_from_details(key,alert_response): 137 | if key in alert_response.json()['data']["details"].keys(): 138 | return alert_response.json()['data']["details"][key] 139 | return "" 140 | 141 | if __name__ == '__main__': 142 | main() 143 | -------------------------------------------------------------------------------- /Zendesk/conf/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey": "", 3 | "baseUrl": "https://api.opsgenie.com", 4 | "logLevel": "DEBUG", 5 | "globalArgs": [], 6 | "globalFlags": { 7 | "apiToken": "", 8 | "zendeskEmail": "", 9 | "subdomain": "" 10 | }, 11 | "actionMappings": { 12 | "createTicket": { 13 | "filepath": "", 14 | "sourceType": "", 15 | "env": [], 16 | "stdout": "" 17 | }, 18 | "addInternalComment": { 19 | "filepath": "", 20 | "sourceType": "", 21 | "env": [], 22 | "stdout": "" 23 | }, 24 | "addPublicComment": { 25 | "filepath": "", 26 | "sourceType": "", 27 | "env": [], 28 | "stdout": "" 29 | }, 30 | "setStatusToOpen": { 31 | "filepath": "", 32 | "sourceType": "", 33 | "env": [], 34 | "stdout": "" 35 | }, 36 | "setStatusToPending": { 37 | "filepath": "", 38 | "sourceType": "", 39 | "env": [], 40 | "stdout": "" 41 | }, 42 | "setStatusToSolved": { 43 | "filepath": "", 44 | "sourceType": "", 45 | "env": [], 46 | "stdout": "" 47 | }, 48 | "setStatusToClosed": { 49 | "filepath": "", 50 | "sourceType": "", 51 | "env": [], 52 | "stdout": "" 53 | } 54 | }, 55 | "pollerConf": { 56 | "pollingWaitIntervalInMillis": 100, 57 | "visibilityTimeoutInSec": 30, 58 | "maxNumberOfMessages": 10 59 | }, 60 | "poolConf": { 61 | "maxNumberOfWorker": 12, 62 | "minNumberOfWorker": 4, 63 | "monitoringPeriodInMillis": 15000, 64 | "keepAliveTimeInMillis": 6000, 65 | "queueSize": 0 66 | } 67 | } -------------------------------------------------------------------------------- /Zendesk/scripts/actionExecutor.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import logging 4 | import sys 5 | 6 | import requests 7 | from requests.auth import HTTPBasicAuth 8 | 9 | parser = argparse.ArgumentParser() 10 | parser.add_argument('-payload', '--payload', help='Payload from queue', required=True) 11 | parser.add_argument('-apiKey', '--apiKey', help='The apiKey of the integration', required=True) 12 | parser.add_argument('-opsgenieUrl', '--opsgenieUrl', help='The url', required=True) 13 | parser.add_argument('-logLevel', '--logLevel', help='Log level', required=True) 14 | parser.add_argument('-zendeskEmail', '--zendeskEmail', help='Zendesk Email', required=False) 15 | parser.add_argument('-apiToken', '--apiToken', help='Api Token', required=False) 16 | parser.add_argument('-subdomain', '--subdomain', help='Subdomain', required=False) 17 | args = vars(parser.parse_args()) 18 | 19 | logging.basicConfig(stream=sys.stdout, level=args['logLevel']) 20 | 21 | queue_message_string = args['queuePayload'] 22 | queue_message = json.loads(queue_message_string) 23 | 24 | alert_id = queue_message["alert"]["alertId"] 25 | mapped_action = queue_message["mappedActionV2"]["name"] 26 | 27 | LOG_PREFIX = "[" + mapped_action + "]" 28 | 29 | 30 | def parse_field(key, mandatory): 31 | variable = queue_message.get(key) 32 | if not variable: 33 | variable = args.get(key) 34 | if mandatory and not variable: 35 | logging.error(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 36 | "' is missing. Check your configuration file.") 37 | raise ValueError(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 38 | "' is missing. Check your configuration file.") 39 | return variable 40 | 41 | 42 | def parse_timeout(): 43 | parsed_timeout = args.get('http.timeout') 44 | if not parsed_timeout: 45 | return 30000 46 | return int(parsed_timeout) 47 | 48 | 49 | def main(): 50 | logging.info("Will execute " + mapped_action + " for alertId " + alert_id) 51 | 52 | zendesk_email = parse_field('zendeskEmail', True) + '/token' 53 | api_token = parse_field('apiToken', True) 54 | 55 | timeout = parse_timeout() 56 | 57 | zendesk_url = queue_message.get("zendeskUrl") 58 | if not zendesk_url: 59 | zendesk_url = "https://" + parse_field('subdomain', True) + ".zendesk.com" 60 | 61 | ticket_id = queue_message.get("ticketId") 62 | result_uri = zendesk_url + "/api/v2/tickets" 63 | 64 | logging.debug("Zendesk Email: " + str(zendesk_email)) 65 | logging.debug("Zendesk Url: " + str(zendesk_url)) 66 | logging.debug("Ticket Id: " + str(ticket_id)) 67 | 68 | content_params = dict() 69 | 70 | if mapped_action == "addInternalComment": 71 | result_uri += "/" + ticket_id + ".json" 72 | content_params = { 73 | "ticket": { 74 | "comment": { 75 | "body": queue_message.get('body'), 76 | "public": False 77 | } 78 | } 79 | } 80 | elif mapped_action == "addPublicComment": 81 | result_uri += "/" + ticket_id + ".json" 82 | content_params = { 83 | "ticket": { 84 | "comment": { 85 | "body": queue_message.get('body'), 86 | "public": True 87 | } 88 | } 89 | } 90 | elif mapped_action == "createTicket": 91 | result_uri += ".json" 92 | content_params = { 93 | "ticket": { 94 | "comment": { 95 | "body": queue_message.get('body'), 96 | "public": False 97 | }, 98 | "external_id": queue_message.get('externalId'), 99 | "subject": queue_message.get('subject'), 100 | "tags": queue_message.get('tags') 101 | } 102 | } 103 | elif mapped_action == "setStatusToClosed": 104 | result_uri += "/" + ticket_id + ".json" 105 | content_params = { 106 | "ticket": { 107 | "comment": { 108 | "body": queue_message.get('body'), 109 | "public": False 110 | }, 111 | "status": 'closed' 112 | } 113 | } 114 | elif mapped_action == "setStatusToOpen": 115 | result_uri += "/" + ticket_id + ".json" 116 | content_params = { 117 | "ticket": { 118 | "comment": { 119 | "body": queue_message.get('body'), 120 | "public": False 121 | }, 122 | "status": 'open' 123 | } 124 | } 125 | elif mapped_action == "setStatusToSolved": 126 | result_uri += "/" + ticket_id + ".json" 127 | content_params = { 128 | "ticket": { 129 | "comment": { 130 | "body": queue_message.get('body'), 131 | "public": False 132 | }, 133 | "status": 'solved' 134 | } 135 | } 136 | elif mapped_action == "setStatusToPending": 137 | result_uri += "/" + ticket_id + ".json" 138 | content_params = { 139 | "ticket": { 140 | "comment": { 141 | "body": queue_message.get('body'), 142 | "public": False 143 | }, 144 | "status": 'pending' 145 | } 146 | } 147 | 148 | logging.debug("Request Url: " + str(result_uri)) 149 | logging.debug("Request Body: " + str(content_params)) 150 | 151 | token = HTTPBasicAuth(zendesk_email, api_token) 152 | headers = { 153 | "Content-Type": "application/json", 154 | "Accept-Language": "application/json", 155 | } 156 | 157 | if mapped_action == "createTicket": 158 | response = requests.post(result_uri, data=json.dumps(content_params), headers=headers, auth=token, 159 | timeout=timeout) 160 | if response.status_code < 299: 161 | logging.info("Successfully executed at Zendesk") 162 | ticket_from_response = (response.json()).get('ticket') 163 | if ticket_from_response: 164 | ticket_id_from_response = str(ticket_from_response['id']) 165 | if ticket_id_from_response: 166 | alert_api_url = args.get('opsgenieUrl') + "/v2/alerts/" + alert_id + "/details" 167 | content = { 168 | "details": { 169 | "ticket_id": ticket_id_from_response 170 | } 171 | } 172 | alert_api_headers = { 173 | "Content-Type": "application/json", 174 | "Accept-Language": "application/json", 175 | "Authorization": "GenieKey " + args.get('apiKey') 176 | } 177 | logging.info("payload: " + json.dumps(content)) 178 | alert_response = requests.post(alert_api_url, 179 | data=json.dumps(content), 180 | headers=alert_api_headers, 181 | timeout=timeout) 182 | if alert_response.status_code < 299: 183 | logging.info("Successfully sent to Opsgenie") 184 | logging.debug( 185 | "Opsgenie response: " + str(alert_response.content) + " " + str(alert_response.status_code)) 186 | else: 187 | logging.warning( 188 | "Could not execute at Opsgenie; response: " + str( 189 | alert_response.content) + " status code: " + str(alert_response.status_code)) 190 | else: 191 | logging.warning( 192 | "Could not execute at Zendesk; response: " + str(response.content) + " status code: " + str( 193 | response.status_code)) 194 | else: 195 | response = requests.put(result_uri, data=json.dumps(content_params), headers=headers, auth=token) 196 | if response.status_code < 299: 197 | logging.info("Successfully executed at Zendesk") 198 | else: 199 | logging.warning( 200 | "Could not execute at Zendesk; response: " + str(response.content) + " status code: " + str( 201 | response.status_code)) 202 | 203 | 204 | if __name__ == '__main__': 205 | main() -------------------------------------------------------------------------------- /Zendesk/triggerInstructions.txt: -------------------------------------------------------------------------------- 1 | - From the "Triggers" page, click "add trigger". 2 | - Put "OpsGenie Create Alert" into "Trigger name", description is optional. 3 | - Under "Meet ALL of the following conditions:", add two conditions as specified below: 4 | - Status Is not Solved 5 | - Ticket Is Created 6 | - Under "Actions:", click "add action", select "Notify target", and pick the URL target you added earlier for the integration. 7 | - Paste the following into the "Message" field: 8 | action: create || 9 | id: {{ticket.id}} || 10 | status: {{ticket.status}} || 11 | title: {{ticket.title}} || 12 | tags: {{ticket.tags}} || 13 | link: {{ticket.link}} || 14 | external_id: {{ticket.external_id}} || 15 | via: {{ticket.via}} || 16 | priority: {{ticket.priority}} || 17 | ticket_type: {{ticket.ticket_type}} ||gi 18 | score: {{ticket.score}} || 19 | groupname: {{ticket.group.name}} || 20 | due_date: {{ticket.due_date}} || 21 | account: {{ticket.account}} || 22 | assigneename: {{ticket.assignee.name}} || 23 | requestername: {{ticket.requester.name}} || 24 | organizationname: {{ticket.organization.name}} || 25 | in_business_hours: {{ticket.in_business_hours}} || 26 | description: {{ticket.description}} 27 | 28 | - Click "Create". 29 | - After the trigger is saved, click "add trigger" to add the second trigger. 30 | - Put "OpsGenie Add Note" into "Trigger title". 31 | - Under "Meet ALL of the following conditions:", add a condition as specified below: 32 | - Ticket: Is Updated 33 | - Ticket:Status Is Not Solved 34 | - Under "Perform these actions:", select "Notify target" under "Notifications", and pick the URL target you added earlier for the integration. 35 | - Paste the following into the "Message" field: 36 | action: addnote || 37 | id: {{ticket.id}} || 38 | status: {{ticket.status}} || 39 | latest_comment : {{ticket.latest_comment_formatted}} || 40 | tags: {{ticket.tags}} || 41 | external_id: {{ticket.external_id}} 42 | 43 | - Click "Create". 44 | - Now click "add trigger" to add the last one. 45 | - Put "OpsGenie Close Alert" into "Trigger name". 46 | - Under "Meet ANY of the following conditions:", add two conditions as specified below: 47 | - Ticket:Status Is Solved 48 | - Ticket:Status Is Closed 49 | - Under "Actions:", select "Notify target" under "Notifications", and pick the URL target you added earlier for the integration. 50 | - Paste the following into the "Message" field: 51 | action: close || 52 | id: {{ticket.id}} || 53 | status: {{ticket.status}} || 54 | latest_comment : {{ticket.latest_comment_formatted}} || 55 | tags: {{ticket.tags}} || 56 | external_id: {{ticket.external_id}} 57 | 58 | - Click "Create trigger". 59 | 60 | ----(OPTIONAL)---- 61 | In Zendesk: 62 | - If you want to acknowledge OpsGenie alerts from some action on Zendesk ticket, please follow the following instructions 63 | - Click on "add trigger" to add the trigger. 64 | - Put "OpsGenie Acknowledge" into "Trigger name". 65 | - Please specify the conditions to describe when you want to acknowledge the OpsGenie alert. 66 | - Under "Actions:", select "Notify target" under "Notifications", and pick the URL target you added earlier for the integration. 67 | - Paste the following into the "Message" field: 68 | action: acknowledge || 69 | id: {{ticket.id}} || 70 | status: {{ticket.status}} || 71 | tags: {{ticket.tags}} || 72 | latest_comment : {{ticket.latest_comment_formatted}} || 73 | external_id: {{ticket.external_id}} 74 | 75 | - Click "Create trigger". 76 | 77 | In OpsGenie: 78 | - Open the Zendesk integrations page. 79 | - Click on the "Advanced" tab. 80 | - Add an "Acknowledge Alert" action. 81 | - Under filter, select "Match all conditions below". 82 | - Add "Action Contains acknowledge" as a condition. 83 | - Save the integration. 84 | 85 | 86 | 87 | -------------------------------------------------------------------------------- /Zenoss/conf/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiKey": "", 3 | "baseUrl": "https://api.opsgenie.com", 4 | "logLevel": "DEBUG", 5 | "globalArgs": [], 6 | "globalFlags": { 7 | "command_url": "http://localhost:8080/zport/dmd/evconsole_router", 8 | "username": "Admin", 9 | "password": "zenoss" 10 | }, 11 | "actionMappings": { 12 | "Acknowledge": { 13 | "filepath": "", 14 | "sourceType": "", 15 | "env": [], 16 | "stdout": "" 17 | }, 18 | "Close": { 19 | "filepath": "", 20 | "sourceType": "", 21 | "env": [], 22 | "stdout": "" 23 | } 24 | }, 25 | "pollerConf": { 26 | "pollingWaitIntervalInMillis": 100, 27 | "visibilityTimeoutInSec": 30, 28 | "maxNumberOfMessages": 10 29 | }, 30 | "poolConf": { 31 | "maxNumberOfWorker": 12, 32 | "minNumberOfWorker": 4, 33 | "monitoringPeriodInMillis": 15000, 34 | "keepAliveTimeInMillis": 6000, 35 | "queueSize": 0 36 | } 37 | } -------------------------------------------------------------------------------- /Zenoss/conf/opsgenie-integration.conf: -------------------------------------------------------------------------------- 1 | ################################### ZENOSS2OPSGENIE CONFIGURATION ############################### 2 | #you can define log levels: warning, debug, info,error 3 | zenoss2opsgenie.logger = warning 4 | zenoss2opsgenie.timeout = 60 5 | logPath = /var/log/opsgenie/send2opsgenie.log 6 | #responders= 7 | #tags= 8 | 9 | ####################################### PROXY CONFIGURATION FOR ZENOSS ############################################ 10 | zenoss2opsgenie.http.proxy.enabled = false 11 | zenoss2opsgenie.http.proxy.port = 11111 12 | zenoss2opsgenie.http.proxy.host = localhost 13 | zenoss2opsgenie.http.proxy.protocol = http 14 | #zenoss2opsgenie.http.proxy.username=admin 15 | #zenoss2opsgenie.http.proxy.password=changeme -------------------------------------------------------------------------------- /Zenoss/native/ogAfter.sh: -------------------------------------------------------------------------------- 1 | chmod 755 /home/opsgenie/oec/opsgenie-zenoss/send2opsgenie 2 | 3 | if id -u zenoss >/dev/null 2>&1; then 4 | usermod -a -G opsgenie zenoss 5 | chown -R zenoss:opsgenie /var/log/opsgenie 6 | else 7 | echo "WARNING : zenoss user does not exist. Please don't forget to add your zenoss user to opsgenie group!" 8 | fi -------------------------------------------------------------------------------- /Zenoss/scripts/actionExecutor.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import base64 3 | import json 4 | import logging 5 | import sys 6 | 7 | import requests 8 | 9 | parser = argparse.ArgumentParser() 10 | parser.add_argument('-payload', '--queuePayload', help='Payload from queue', required=True) 11 | parser.add_argument('-apiKey', '--apiKey', help='The apiKey of the integration', required=True) 12 | parser.add_argument('-opsgenieUrl', '--opsgenieUrl', help='The url', required=True) 13 | parser.add_argument('-logLevel', '--logLevel', help='Log Level', required=True) 14 | parser.add_argument('-command_url', '--command_url', help='The url', required=False) 15 | parser.add_argument('-username', '--username', help='Username', required=False) 16 | parser.add_argument('-password', '--password', help='Password', required=False) 17 | args = vars(parser.parse_args()) 18 | 19 | logging.basicConfig(stream=sys.stdout, level=args['logLevel']) 20 | 21 | 22 | def parse_field(key, mandatory): 23 | variable = queue_message.get(key) 24 | if not variable: 25 | variable = args.get(key) 26 | if mandatory and not variable: 27 | logging.error(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 28 | "' is missing. Check your configuration file.") 29 | raise ValueError(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key + 30 | "' is missing. Check your configuration file.") 31 | return variable 32 | 33 | 34 | def parse_timeout(): 35 | parsed_timeout = args.get('http.timeout') 36 | if not parsed_timeout: 37 | return 30000 38 | return int(parsed_timeout) 39 | 40 | 41 | def main(): 42 | global LOG_PREFIX 43 | global queue_message 44 | 45 | queue_message_string = args['queuePayload'] 46 | queue_message = json.loads(queue_message_string) 47 | 48 | timeout = parse_timeout() 49 | 50 | alert_id = queue_message["alert"]["alertId"] 51 | action = queue_message["action"] 52 | source = queue_message["source"] 53 | 54 | LOG_PREFIX = "[" + action + "]" 55 | 56 | logging.info("Will execute " + str(action) + " for alertId " + str(alert_id)) 57 | 58 | username = parse_field('username', True) 59 | password = parse_field('password', True) 60 | url = parse_field('command_url', True) 61 | 62 | logging.debug("Username: " + str(username)) 63 | logging.debug("Command Url: " + str(url)) 64 | logging.debug("AlertId: " + str(alert_id)) 65 | logging.debug("Source: " + str(source)) 66 | logging.debug("Action: " + str(action)) 67 | 68 | token = base64.b64encode((username + ":" + password).encode('US-ASCII')) 69 | headers = { 70 | "Content-Type": "application/json", 71 | "Accept-Language": "application/json", 72 | "Authorization": "Basic " + bytes(token).decode("US-ASCII") 73 | } 74 | if alert_id: 75 | opsgenie_alert_api_url = args['opsgenieUrl'] + "/v2/alerts/" + alert_id 76 | opsgenie_alert_api_headers = { 77 | "Content-Type": "application/json", 78 | "Accept-Language": "application/json", 79 | "Authorization": "GenieKey " + args['apiKey'] 80 | } 81 | alert_response = requests.get(opsgenie_alert_api_url, headers=opsgenie_alert_api_headers, timeout=timeout) 82 | if alert_response.status_code < 299 and alert_response.json()['data']: 83 | post_params = { 84 | "action": "EventsRouter", 85 | "data": { 86 | "evids": [alert_response.json()["data"]["alias"]] 87 | }, 88 | "type": "rpc", 89 | "tid": alert_id 90 | } 91 | discard_action = False 92 | if action == "Acknowledge": 93 | if source and str(source['name']).lower() == "zenoss": 94 | logging.warning("Opsgenie alert is already acknowledged by zenoss. Discarding!!!") 95 | discard_action = True 96 | else: 97 | post_params.update({"method": "acknowledge"}) 98 | elif action == "Close": 99 | if source and str(source['name']).lower() == "zenoss": 100 | logging.warning("Opsgenie alert is already closed by zenoss. Discarding!!!") 101 | discard_action = True 102 | else: 103 | post_params.update({"method": "close"}) 104 | 105 | if not discard_action: 106 | logging.debug("Posting to Zenoss. Command Url: " + str(url) + ", params: " + str(post_params)) 107 | response = requests.post(url, data=json.dumps(post_params), headers=headers, timeout=timeout) 108 | if response.status_code == 200: 109 | logging.info("Successfully executed at Zenoss.") 110 | logging.debug("Zenoss response: " + str(response.content)) 111 | else: 112 | logging.warning( 113 | "Could not execute at Zenoss. Zenoss Response: " + str(response.content) + " Status Code: " + str(response.status_code)) 114 | else: 115 | logging.warning("Alert with id [" + str(alert_id) + "] does not exist in OpsGenie. It is probably deleted.") 116 | else: 117 | logging.warning("Alert id does not exist ") 118 | 119 | 120 | if __name__ == '__main__': 121 | main() 122 | -------------------------------------------------------------------------------- /release/oec-builder/oec-deb/DEBIAN/control: -------------------------------------------------------------------------------- 1 | Name: Opsgenie Edge Connector (OEC) 2 | Description: Integration server for Connecting On-Premise Monitoring and ITSM Tools 3 | Website: https://www.opsgenie.com 4 | Maintainer: Opsgenie 5 | Package: %INTEGRATION% 6 | Section: %INTEGRATION% 7 | Version: %VERSION% 8 | Architecture: amd64 9 | -------------------------------------------------------------------------------- /release/oec-builder/oec-deb/DEBIAN/postinst: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | chown -R opsgenie:opsgenie /home/opsgenie 3 | chown -R opsgenie:opsgenie /var/log/opsgenie 4 | 5 | chmod +x /etc/systemd/system/oec.service 6 | chmod +x /usr/local/bin/OpsgenieEdgeConnector 7 | systemctl daemon-reload 8 | systemctl enable oec 9 | -------------------------------------------------------------------------------- /release/oec-builder/oec-deb/DEBIAN/preinst: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ ! -d "/var/log/opsgenie" ]; then 3 | mkdir /var/log/opsgenie 4 | fi 5 | 6 | if [ ! -d "/home/opsgenie" ]; then 7 | mkdir /home/opsgenie 8 | fi 9 | 10 | if [ -z $(getent passwd opsgenie) ]; then 11 | groupadd opsgenie -r 12 | useradd -g opsgenie opsgenie -r -d /home/opsgenie 13 | fi 14 | -------------------------------------------------------------------------------- /release/oec-builder/oec-deb/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG GO_VERSION=${GO_VERSION:-1.12.1} 2 | 3 | FROM golang:${GO_VERSION}-stretch 4 | 5 | RUN apt-get update && \ 6 | apt-get -y install rpm zip jq 7 | 8 | #RUN useradd 1000 9 | #USER 1000 10 | #WORKDIR /home/1000 -------------------------------------------------------------------------------- /release/oec-builder/oec-deb/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir -p /oec-deb/usr/local/bin 4 | s3URL=https://og-release-cicd-public-oregon.s3-us-west-2.amazonaws.com/purpose%3Dpublic/project%3Doec-builder/env%3Dprod/branch%3D${BRANCH}/module%3Doec-builder/version%3D${OEC_VERSION}/oec-linux-${OEC_VERSION}.zip 5 | curl ${s3URL} --output ./oec.zip --silent --show-error --create-dirs 6 | unzip ./oec.zip -d /oec-deb/usr/local/bin 7 | 8 | cp -R $INPUT/. /oec-deb && \ 9 | mkdir -p /oec-deb/home/opsgenie/oec/ && \ 10 | cp -R $OEC_SCRIPTS_REPO/$INTEGRATION/. /oec-deb/home/opsgenie/oec && \ 11 | cp $OEC_SCRIPTS_REPO/release/oec-builder/oecScriptsVersion.json /oec-deb/home/opsgenie/oec && \ 12 | 13 | INTEGRATION_VERSION=$(jq -r --arg v "$INTEGRATION" '.[$v]' /oec-deb/home/opsgenie/oec/oecScriptsVersion.json) 14 | rm /oec-deb/home/opsgenie/oec/oecScriptsVersion.json && \ 15 | 16 | INTEGRATION_NAME=$(echo "$INTEGRATION" | awk '{print tolower($0)}') 17 | INTEGRATION_PATH=opsgenie-${INTEGRATION_NAME} 18 | 19 | ####incoming part####### 20 | INCOMING_PATH=/oec-deb/home/opsgenie/oec/${INTEGRATION_PATH} 21 | if [ -d "$INCOMING_PATH" ]; then 22 | go get -u github.com/alexcesaro/log && \ 23 | cd ${INCOMING_PATH} && \ 24 | GOOS=linux GOARCH=amd64 go build -o send2opsgenie send2opsgenie.go 25 | fi 26 | ######################## 27 | 28 | sed -i "s||/home/opsgenie/oec/scripts/actionExecutor.py|" /oec-deb/home/opsgenie/oec/conf/config.json 29 | sed -i "s||/home/opsgenie/oec/output/output.txt|" /oec-deb/home/opsgenie/oec/conf/config.json 30 | sed -i "s//local/g" /oec-deb/home/opsgenie/oec/conf/config.json 31 | 32 | sed -i "s/%VERSION%/${INTEGRATION_VERSION}/g" /oec-deb/DEBIAN/control && \ 33 | sed -i "s/%INTEGRATION%/${INTEGRATION_PATH}/g" /oec-deb/DEBIAN/control && \ 34 | 35 | cd ~ && \ 36 | mkdir -p $OUTPUT/oec-packages-$OEC_VERSION/${INTEGRATION_PATH} && \ 37 | mkdir /${INTEGRATION_PATH} && \ 38 | mv /oec-deb/* /${INTEGRATION_PATH} && \ 39 | 40 | mkdir /deb-package && \ 41 | dpkg-deb -b /${INTEGRATION_PATH} /deb-package && \ 42 | cp -R /deb-package/${INTEGRATION_PATH}* $OUTPUT/oec-packages-$OEC_VERSION/${INTEGRATION_PATH}/ 43 | -------------------------------------------------------------------------------- /release/oec-builder/oec-deb/etc/systemd/system/oec.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Opsgenie Edge Connector (OEC) 3 | 4 | [Service] 5 | Group=opsgenie 6 | User=opsgenie 7 | Type=simple 8 | ExecStart=/usr/local/bin/OpsgenieEdgeConnector 9 | 10 | Environment="OEC_CONF_SOURCE_TYPE=local" 11 | Environment="OEC_CONF_LOCAL_FILEPATH=/home/opsgenie/oec/conf/config.json" 12 | 13 | [Install] 14 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /release/oec-builder/oec-linux/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG GO_VERSION=${GO_VERSION:-1.12.1} 2 | 3 | FROM golang:${GO_VERSION}-stretch 4 | 5 | RUN apt-get update && \ 6 | apt-get -y install rpm zip jq 7 | 8 | #RUN useradd 1000 9 | #USER 1000 10 | #WORKDIR /home/1000 -------------------------------------------------------------------------------- /release/oec-builder/oec-linux/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir /oec-linux 4 | s3URL=https://og-release-cicd-public-oregon.s3-us-west-2.amazonaws.com/purpose%3Dpublic/project%3Doec-builder/env%3Dprod/branch%3D${BRANCH}/module%3Doec-builder/version%3D${OEC_VERSION}/oec-linux-${OEC_VERSION}.zip 5 | curl ${s3URL} --output ./oec.zip --silent --show-error --create-dirs 6 | unzip ./oec.zip -d /oec-linux 7 | 8 | cp -R $OEC_SCRIPTS_REPO/$INTEGRATION/. /oec-linux && \ 9 | cp $OEC_SCRIPTS_REPO/release/oec-builder/oecScriptsVersion.json /oec-linux && \ 10 | 11 | INTEGRATION_VERSION=$(jq -r --arg v "$INTEGRATION" '.[$v]' /oec-linux/oecScriptsVersion.json) 12 | rm /oec-linux/oecScriptsVersion.json && \ 13 | 14 | INTEGRATION_NAME=$(echo "$INTEGRATION" | awk '{print tolower($0)}') 15 | INTEGRATION_PATH=opsgenie-${INTEGRATION_NAME} 16 | 17 | #########incoming part########## 18 | INCOMING_PATH=/oec-linux/${INTEGRATION_PATH} 19 | if [ -d "$INCOMING_PATH" ]; then 20 | go get -u github.com/alexcesaro/log && \ 21 | cd ${INCOMING_PATH} && \ 22 | GOOS=linux GOARCH=amd64 go build -o send2opsgenie send2opsgenie.go 23 | fi 24 | ################################ 25 | 26 | cd ~ && \ 27 | mkdir -p $OUTPUT/oec-packages-$OEC_VERSION/${INTEGRATION_PATH} && \ 28 | mkdir /${INTEGRATION_PATH} && \ 29 | mv /oec-linux/* /${INTEGRATION_PATH} && \ 30 | 31 | zip -r ${INTEGRATION_PATH}-${INTEGRATION_VERSION}-linux-amd64.zip /${INTEGRATION_PATH} && \ 32 | cp -R ${INTEGRATION_PATH}* $OUTPUT/oec-packages-$OEC_VERSION/${INTEGRATION_PATH}/ 33 | -------------------------------------------------------------------------------- /release/oec-builder/oec-rpm/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG GO_VERSION=${GO_VERSION:-1.12.1} 2 | 3 | FROM golang:${GO_VERSION}-stretch 4 | 5 | RUN apt-get update && \ 6 | apt-get -y install rpm zip jq 7 | 8 | #RUN useradd 1000 9 | #USER 1000 10 | #WORKDIR /home/1000 -------------------------------------------------------------------------------- /release/oec-builder/oec-rpm/SPECS/oec-rhel6.spec: -------------------------------------------------------------------------------- 1 | Name: %INTEGRATION%-rhel6 2 | Version: %VERSION% 3 | Summary: OEC (%INTEGRATION%) for Connecting On-Premise Monitoring and ITSM Tools 4 | Release: 1 5 | License: Apache-2.0 6 | URL: https://www.opsgenie.com/ 7 | Group: System 8 | Packager: Opsgenie 9 | BuildRoot: ~/rpmbuild/ 10 | 11 | %description 12 | Opsgenie Edge Connector (OEC) is designed to resolve challenges faced in the integration of internal and external systems. 13 | 14 | %prep 15 | echo "BUILDROOT = $RPM_BUILD_ROOT" 16 | mkdir -p $RPM_BUILD_ROOT/usr/local/bin/ 17 | mkdir -p $RPM_BUILD_ROOT/home/opsgenie/oec/ 18 | cp $GITHUB_WORKSPACE/.release/oec-rpm/OpsgenieEdgeConnector $RPM_BUILD_ROOT/usr/local/bin/ 19 | cp -R $GITHUB_WORKSPACE/.release/oec-rpm/oec-scripts/. $RPM_BUILD_ROOT/home/opsgenie/oec/ 20 | 21 | mkdir -p $RPM_BUILD_ROOT/etc/init.d/ 22 | cp $GITHUB_WORKSPACE/.release/oec-rpm/rhel6-service/oec $RPM_BUILD_ROOT/etc/init.d/ 23 | 24 | %pre 25 | if [ ! -d "/var/log/opsgenie" ]; then 26 | mkdir /var/log/opsgenie 27 | fi 28 | 29 | if [ ! -d "/home/opsgenie" ]; then 30 | mkdir /home/opsgenie 31 | fi 32 | 33 | if [ -z $(getent passwd opsgenie) ]; then 34 | groupadd opsgenie -r 35 | useradd -g opsgenie opsgenie -r -d /home/opsgenie 36 | fi 37 | 38 | %post 39 | chown -R opsgenie:opsgenie /home/opsgenie 40 | chown -R opsgenie:opsgenie /var/log/opsgenie 41 | 42 | chmod +x /usr/local/bin/OpsgenieEdgeConnector 43 | 44 | chmod +x /etc/init.d/oec 45 | service oec start 46 | 47 | %postun 48 | service oec stop 49 | rm /etc/init.d/oec 50 | 51 | %files 52 | /usr/local/bin/OpsgenieEdgeConnector 53 | /etc/init.d/oec 54 | /home/opsgenie/oec/ 55 | 56 | %changelog 57 | -------------------------------------------------------------------------------- /release/oec-builder/oec-rpm/SPECS/oec.spec: -------------------------------------------------------------------------------- 1 | Name: %INTEGRATION% 2 | Version: %VERSION% 3 | Summary: OEC (%INTEGRATION%) for Connecting On-Premise Monitoring and ITSM Tools 4 | Release: 1 5 | License: Apache-2.0 6 | URL: https://www.opsgenie.com/ 7 | Group: System 8 | Packager: Opsgenie 9 | BuildRoot: . 10 | 11 | %description 12 | Opsgenie Edge Connector (OEC) is designed to resolve challenges faced in the integration of internal and external systems. 13 | 14 | %prep 15 | echo "BUILDROOT = $RPM_BUILD_ROOT" 16 | mkdir -p $RPM_BUILD_ROOT/usr/local/bin/ 17 | mkdir -p $RPM_BUILD_ROOT/etc/systemd/system/ 18 | mkdir -p $RPM_BUILD_ROOT/home/opsgenie/oec/ 19 | cp $GITHUB_WORKSPACE/.release/oec-rpm/OpsgenieEdgeConnector $RPM_BUILD_ROOT/usr/local/bin/ 20 | cp $GITHUB_WORKSPACE/.release/oec-rpm/oec.service $RPM_BUILD_ROOT/etc/systemd/system/ 21 | cp -R $GITHUB_WORKSPACE/.release/oec-rpm/oec-scripts/. $RPM_BUILD_ROOT/home/opsgenie/oec/ 22 | 23 | %pre 24 | if [ ! -d "/var/log/opsgenie" ]; then 25 | mkdir /var/log/opsgenie 26 | fi 27 | 28 | if [ ! -d "/home/opsgenie" ]; then 29 | mkdir /home/opsgenie 30 | fi 31 | 32 | if [ -z $(getent passwd opsgenie) ]; then 33 | groupadd opsgenie -r 34 | useradd -g opsgenie opsgenie -r -d /home/opsgenie 35 | fi 36 | 37 | %post 38 | chown -R opsgenie:opsgenie /home/opsgenie 39 | chown -R opsgenie:opsgenie /var/log/opsgenie 40 | 41 | chmod +x /etc/systemd/system/oec.service 42 | chmod +x /usr/local/bin/OpsgenieEdgeConnector 43 | systemctl daemon-reload 44 | systemctl enable oec 45 | 46 | %postun 47 | systemctl daemon-reload 48 | 49 | %files 50 | /usr/local/bin/OpsgenieEdgeConnector 51 | /etc/systemd/system/oec.service 52 | /home/opsgenie/oec/ 53 | 54 | %changelog 55 | * Mon Jan 28 2019 Emel Komurcu 56 | - 1.0 r1 First release -------------------------------------------------------------------------------- /release/oec-builder/oec-rpm/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir -p /rpmbuild 4 | s3URL=https://og-release-cicd-public-oregon.s3-us-west-2.amazonaws.com/purpose%3Dpublic/project%3Doec-builder/env%3Dprod/branch%3D${BRANCH}/module%3Doec-builder/version%3D${OEC_VERSION}/oec-linux-${OEC_VERSION}.zip 5 | curl ${s3URL} --output ./oec.zip --silent --show-error --create-dirs 6 | unzip ./oec.zip -d /rpmbuild 7 | 8 | cp -R $INPUT/. /rpmbuild && \ 9 | cp -R $OEC_SCRIPTS_REPO/$INTEGRATION/. /rpmbuild/oec-scripts && \ 10 | 11 | cp $OEC_SCRIPTS_REPO/release/oec-builder/oecScriptsVersion.json /rpmbuild/oec-scripts && \ 12 | 13 | INTEGRATION_VERSION=$(jq -r --arg v "$INTEGRATION" '.[$v]' /rpmbuild/oec-scripts/oecScriptsVersion.json) 14 | rm /rpmbuild/oec-scripts/oecScriptsVersion.json && \ 15 | 16 | INTEGRATION_NAME=$(echo "$INTEGRATION" | awk '{print tolower($0)}') 17 | INTEGRATION_PATH=opsgenie-${INTEGRATION_NAME} 18 | 19 | #########incoming part########## 20 | INCOMING_PATH=/rpmbuild/oec-scripts/${INTEGRATION_PATH} 21 | if [ -d "$INCOMING_PATH" ]; then 22 | go get -u github.com/alexcesaro/log && \ 23 | cd ${INCOMING_PATH} && \ 24 | GOOS=linux GOARCH=amd64 go build -o send2opsgenie send2opsgenie.go 25 | fi 26 | ################################ 27 | 28 | sed -i "s||$RPM_BUILD_ROOT/home/opsgenie/oec/scripts/actionExecutor.py|" /rpmbuild/oec-scripts/conf/config.json 29 | sed -i "s||$RPM_BUILD_ROOT/home/opsgenie/oec/output/output.txt|" /rpmbuild/oec-scripts/conf/config.json 30 | sed -i "s//local/g" /rpmbuild/oec-scripts/conf/config.json 31 | 32 | sed -i "s/%VERSION%/${INTEGRATION_VERSION}/g" /rpmbuild/SPECS/oec.spec && \ 33 | sed -i "s/%VERSION%/${INTEGRATION_VERSION}/g" /rpmbuild/SPECS/oec-rhel6.spec && \ 34 | 35 | sed -i "s/%INTEGRATION%/${INTEGRATION_PATH}/g" /rpmbuild/SPECS/oec.spec && \ 36 | sed -i "s/%INTEGRATION%/${INTEGRATION_PATH}/g" /rpmbuild/SPECS/oec-rhel6.spec && \ 37 | 38 | cd ~ && \ 39 | mkdir -p $OUTPUT/oec-packages-$OEC_VERSION/${INTEGRATION_PATH} && \ 40 | 41 | rpmbuild --target=x86_64 -ba /rpmbuild/SPECS/oec.spec && \ 42 | cp -R /root/rpmbuild/RPMS/x86_64/${INTEGRATION_PATH}* $OUTPUT/oec-packages-$OEC_VERSION/${INTEGRATION_PATH} && \ 43 | 44 | rpmbuild --target=x86_64 -ba /rpmbuild/SPECS/oec-rhel6.spec && \ 45 | cp -R /root/rpmbuild/RPMS/x86_64/${INTEGRATION_PATH}* $OUTPUT/oec-packages-$OEC_VERSION/${INTEGRATION_PATH}/ 46 | -------------------------------------------------------------------------------- /release/oec-builder/oec-rpm/oec.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Opsgenie Edge Connector (OEC) 3 | Wants=network-online.target 4 | After=network.target network-online.target 5 | 6 | [Service] 7 | Group=opsgenie 8 | User=opsgenie 9 | Type=simple 10 | ExecStart=/usr/local/bin/OpsgenieEdgeConnector 11 | 12 | Environment="OEC_CONF_SOURCE_TYPE=local" 13 | Environment="OEC_CONF_LOCAL_FILEPATH=/home/opsgenie/oec/conf/config.json" 14 | 15 | [Install] 16 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /release/oec-builder/oec-rpm/rhel6-service/oec: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # description: Opsgenie Edge Connector (OEC) 4 | # 5 | # Below is the source function library 6 | # 7 | . /etc/init.d/functions 8 | 9 | exec="/usr/local/bin/OpsgenieEdgeConnector" 10 | prog="OEC" 11 | config="/home/opsgenie/oec/conf/config.json" 12 | 13 | export OEC_CONF_SOURCE_TYPE="local" 14 | export OEC_CONF_LOCAL_FILEPATH="/home/opsgenie/oec/conf/config.json" 15 | 16 | start() { 17 | echo -n "Starting Opsgenie Edge Connector (OEC)" 18 | $exec 19 | } 20 | 21 | stop() { 22 | echo -n "Stopping Opsgenie Edge Connector (OEC)" 23 | kill -15 $(pgrep -f OpsgenieEdgeConnector) 24 | } 25 | 26 | 27 | case "$1" in 28 | start) 29 | start 30 | ;; 31 | stop) 32 | stop 33 | ;; 34 | restart) 35 | echo -n "Restarting Opsgenie Edge Connector (OEC)" 36 | stop 37 | start 38 | ;; 39 | *) 40 | echo "Usage: $0 {start|stop|restart}" 41 | exit 1 42 | ;; 43 | esac -------------------------------------------------------------------------------- /release/oec-builder/oec-win32/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG GO_VERSION=${GO_VERSION:-1.12.1} 2 | 3 | FROM golang:${GO_VERSION}-stretch 4 | 5 | RUN apt-get update && \ 6 | apt-get -y install rpm zip jq 7 | 8 | #RUN useradd 1000 9 | #USER 1000 10 | #WORKDIR /home/1000 -------------------------------------------------------------------------------- /release/oec-builder/oec-win32/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir /oec-win32 4 | s3URL=https://og-release-cicd-public-oregon.s3-us-west-2.amazonaws.com/purpose%3Dpublic/project%3Doec-builder/env%3Dprod/branch%3D${BRANCH}/module%3Doec-builder/version%3D${OEC_VERSION}/oec-win32-${OEC_VERSION}.zip 5 | curl ${s3URL} --output ./oec.zip --silent --show-error --create-dirs 6 | unzip ./oec.zip -d /oec-win32 7 | 8 | cp -R $INPUT/oecService.json.example /oec-win32 && \ 9 | cp -R $OEC_SCRIPTS_REPO/$INTEGRATION/. /oec-win32 && \ 10 | cp $OEC_SCRIPTS_REPO/release/oec-builder/oecScriptsVersion.json /oec-win32 && \ 11 | 12 | INTEGRATION_VERSION=$(jq -r --arg v "$INTEGRATION" '.[$v]' /oec-win32/oecScriptsVersion.json) 13 | rm /oec-win32/oecScriptsVersion.json && \ 14 | 15 | INTEGRATION_NAME=$(echo "$INTEGRATION" | awk '{print tolower($0)}') 16 | INTEGRATION_PATH=opsgenie-${INTEGRATION_NAME} 17 | 18 | #########incoming part########## 19 | INCOMING_PATH=/oec-win32/${INTEGRATION_PATH} 20 | if [ -d "$INCOMING_PATH" ]; then 21 | go get -u github.com/alexcesaro/log && \ 22 | cd ${INCOMING_PATH} && \ 23 | GOOS=windows GOARCH=386 go build -o send2opsgenie32.exe send2opsgenie.go 24 | fi 25 | ################################ 26 | 27 | cd ~ && \ 28 | mkdir -p $OUTPUT/oec-packages-$OEC_VERSION/${INTEGRATION_PATH} && \ 29 | mkdir /${INTEGRATION_PATH} && \ 30 | mv /oec-win32/* /${INTEGRATION_PATH} && \ 31 | 32 | zip -r ${INTEGRATION_PATH}-$INTEGRATION_VERSION-win-386.zip /${INTEGRATION_PATH} && \ 33 | cp -R ${INTEGRATION_PATH}* $OUTPUT/oec-packages-$OEC_VERSION/${INTEGRATION_PATH}/ -------------------------------------------------------------------------------- /release/oec-builder/oec-win32/oecService.json.example: -------------------------------------------------------------------------------- 1 | { 2 | "Name": "OEC Service", 3 | "DisplayName": "OEC Service", 4 | "Description": "Run the OEC Service", 5 | "OECPath" : "C:\\opsgenie\\opsgenie-oec\\OpsgenieEdgeConnector32.exe", 6 | "Args" : ["-oec-metrics", "7070"], 7 | "Env": [ 8 | "OEC_CONF_LOCAL_FILEPATH=C:\\opsgenie\\opsgenie-oec\\conf\\config.json", 9 | "OEC_CONF_SOURCE_TYPE=local" 10 | ], 11 | "Stderr": "C:\\opsgenie\\opsgenie-oec\\output\\oecErr.log", 12 | "Stdout": "C:\\opsgenie\\opsgenie-oec\\output\\oecOut.log" 13 | } -------------------------------------------------------------------------------- /release/oec-builder/oec-win64/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG GO_VERSION=${GO_VERSION:-1.12.1} 2 | 3 | FROM golang:${GO_VERSION}-stretch 4 | 5 | RUN apt-get update && \ 6 | apt-get -y install rpm zip jq 7 | 8 | #RUN useradd 1000 9 | #USER 1000 10 | #WORKDIR /home/1000 -------------------------------------------------------------------------------- /release/oec-builder/oec-win64/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir /oec-win64 4 | s3URL=https://og-release-cicd-public-oregon.s3-us-west-2.amazonaws.com/purpose%3Dpublic/project%3Doec-builder/env%3Dprod/branch%3D${BRANCH}/module%3Doec-builder/version%3D${OEC_VERSION}/oec-win64-${OEC_VERSION}.zip 5 | curl ${s3URL} --output ./oec.zip --silent --show-error --create-dirs 6 | unzip ./oec.zip -d /oec-win64 7 | 8 | cp -R $INPUT/oecService.json.example /oec-win64 && \ 9 | cp -R $OEC_SCRIPTS_REPO/$INTEGRATION/. /oec-win64 && \ 10 | cp $OEC_SCRIPTS_REPO/release/oec-builder/oecScriptsVersion.json /oec-win64 && \ 11 | 12 | INTEGRATION_VERSION=$(jq -r --arg v "$INTEGRATION" '.[$v]' /oec-win64/oecScriptsVersion.json) 13 | rm /oec-win64/oecScriptsVersion.json && \ 14 | 15 | INTEGRATION_NAME=$(echo "$INTEGRATION" | awk '{print tolower($0)}') 16 | INTEGRATION_PATH=opsgenie-${INTEGRATION_NAME} 17 | 18 | #########incoming part########## 19 | INCOMING_PATH=/oec-win64/${INTEGRATION_PATH} 20 | if [ -d "$INCOMING_PATH" ]; then 21 | go get -u github.com/alexcesaro/log && \ 22 | cd ${INCOMING_PATH} && \ 23 | GOOS=windows GOARCH=amd64 go build -o send2opsgenie64.exe send2opsgenie.go 24 | fi 25 | ################################ 26 | 27 | cd ~ && \ 28 | mkdir -p $OUTPUT/oec-packages-$OEC_VERSION/${INTEGRATION_PATH} && \ 29 | mkdir /${INTEGRATION_PATH} && \ 30 | mv /oec-win64/* /${INTEGRATION_PATH} && \ 31 | 32 | zip -r ${INTEGRATION_PATH}-$INTEGRATION_VERSION-win-amd64.zip /${INTEGRATION_PATH} && \ 33 | cp -R ${INTEGRATION_PATH}* $OUTPUT/oec-packages-$OEC_VERSION/${INTEGRATION_PATH}/ -------------------------------------------------------------------------------- /release/oec-builder/oec-win64/oecService.json.example: -------------------------------------------------------------------------------- 1 | { 2 | "Name": "OEC Service", 3 | "DisplayName": "OEC Service", 4 | "Description": "Run the OEC Service", 5 | "OECPath" : "C:\\opsgenie\\opsgenie-oec\\OpsgenieEdgeConnector64.exe", 6 | "Args" : ["-oec-metrics", "7070"], 7 | "Env": [ 8 | "OEC_CONF_LOCAL_FILEPATH=C:\\opsgenie\\opsgenie-oec\\conf\\config.json", 9 | "OEC_CONF_SOURCE_TYPE=local" 10 | ], 11 | "Stderr": "C:\\opsgenie\\opsgenie-oec\\output\\oecErr.log", 12 | "Stdout": "C:\\opsgenie\\opsgenie-oec\\output\\oecOut.log" 13 | } -------------------------------------------------------------------------------- /release/oec-builder/oecScriptsVersion.json: -------------------------------------------------------------------------------- 1 | { 2 | "oecVersion": "1.1.3", 3 | "BMCFootprintsV11": "1.1.3", 4 | "BMCFootprintsV12": "1.1.3", 5 | "BMCRemedy": "1.1.3", 6 | "Cherwell": "1.1.4", 7 | "DynatraceAppMon": "1.1.3", 8 | "Icinga": "1.1.5", 9 | "Icinga2": "1.1.8", 10 | "Jira": "1.1.5", 11 | "JiraServiceDesk": "1.1.6", 12 | "LibreNMS": "1.1.3", 13 | "Marid": "1.1.3", 14 | "Nagios": "1.1.6", 15 | "NagiosXI": "1.1.5", 16 | "OEC": "1.1.3", 17 | "OP5": "1.1.3", 18 | "PRTG": "1.1.3", 19 | "Solarwinds": "1.1.3", 20 | "SolarwindsMSPNCentral": "1.1.3", 21 | "SolarwindsWebHelpdesk": "1.1.3", 22 | "Splunk": "1.1.4", 23 | "Trackit": "1.1.3", 24 | "Xmpp": "1.1.3", 25 | "Zabbix": "1.1.10", 26 | "Zendesk": "1.1.3", 27 | "Zenoss": "1.1.3" 28 | } 29 | -------------------------------------------------------------------------------- /release/oec-builder/readmeIterationCount: -------------------------------------------------------------------------------- 1 | 6 2 | --------------------------------------------------------------------------------