├── .gitignore ├── CODEOWNERS ├── CODE_OF_CONDUCT.md ├── LICENSE ├── README.md ├── azure-delete-empty-loadbalancers ├── README.md ├── azure-delete-empty-loadbalancers.png ├── azure-delete-empty-loadbalancers.yaml └── filter-loadbalancers.py ├── azure-delete-unused-nics ├── README.md ├── azure-delete-unused-nics.png ├── azure-delete-unused-nics.yaml └── filter-nics.py ├── azure-disk-reaper ├── README.md ├── azure-disk-reaper.png ├── azure-disk-reaper.yaml └── filter-disks.py ├── azure-vm-reaper ├── README.md ├── azure-vm-reaper.png ├── azure-vm-reaper.yaml └── filter-vms.py ├── datadog-k8s-rollback ├── README.md ├── datadog-k8s-rollback.png └── datadog-k8s-rollback.yaml ├── datadog-to-jira ├── README.md ├── datadog-to-jira.png └── datadog-to-jira.yaml ├── datadog-to-slack ├── README.md ├── datadog-to-slack.png └── datadog-to-slack.yaml ├── dynamodb-capacity-monitor ├── README.md └── dynamodb-capacity-monitor.yaml ├── dynatrace-respond-to-problem ├── README.md ├── dynatrace-respond-to-problem.png └── dynatrace-respond-to-problem.yaml ├── ebs-reaper ├── README.md ├── ebs-reaper.png ├── ebs-reaper.yaml └── filter-volumes.py ├── ec2-provision-and-configure-with-bolt ├── README.md ├── ec2-provision-and-configure-with-bolt.png ├── ec2-provision-and-configure-with-bolt.yaml └── infra │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── ec2-reaper ├── .flake8 ├── README.md ├── ec2-reaper.png ├── ec2-reaper.yaml └── filter-instances.py ├── ec2-remove-unused-key-pairs ├── README.md ├── ec2-remove-unused-key-pairs.png ├── ec2-remove-unused-key-pairs.yaml └── filter-key-pairs.py ├── ec2-scan-unused-key-pairs ├── README.md ├── ec2-scan-unused-key-pairs.yaml └── metadata.yaml ├── ec2-stop-untagged-instances ├── README.md ├── ec2-stop-untagged-instances.png ├── ec2-stop-untagged-instances.yaml └── filter-instances.py ├── elbv2-delete-empty-loadbalancers ├── README.md ├── elbv2-delete-empty-loadbalancers.png ├── elbv2-delete-empty-loadbalancers.yaml └── filter-loadbalancers.py ├── empty ├── README.md └── empty.yaml ├── firehydrant-rollback ├── README.md └── firehydrant-rollback.yaml ├── gcp-disk-reaper ├── README.md ├── filter-disks.py ├── gcp-disk-reaper.png └── gcp-disk-reaper.yaml ├── gcp-instance-reaper ├── README.md ├── filter-instances.py ├── gcp-instance-reaper.png └── gcp-instance-reaper.yaml ├── http-health-check ├── README.md ├── checkHealth.py └── http-health-check.yaml ├── images ├── code-tab.png ├── datadog-api-key.png ├── datadog-application-key.png ├── datadog-k8s-rollback-modal.png ├── datadog-monitor.png ├── datadog-test-alert.png ├── datadog-trigger.png ├── datadog-webhook.png ├── dry-run-modal.png ├── guide-connections.png ├── missing-connection.png ├── run-workflow-action.png ├── runbutton.svg └── settings-sidenav.png ├── kubectl-apply-on-dockerhub-push ├── README.md ├── kubectl-apply-on-dockerhub-push.png └── kubectl-apply-on-dockerhub-push.yaml ├── pagerduty-production-incident-policy ├── README.md ├── pagerduty-production-incident-policy.png └── pagerduty-production-incident-policy.yaml ├── pagerduty-to-jira ├── README.md ├── pagerduty-to-jira.png └── pagerduty-to-jira.yaml ├── pagerduty-to-slack ├── README.md ├── pagerduty-to-slack.png └── pagerduty-to-slack.yaml ├── pagerduty-to-twilio ├── README.md ├── pagerduty-to-twilio.png └── pagerduty-to-twilio.yaml ├── pulumi-preview ├── README.md ├── pulumi-preview.png └── pulumi-preview.yaml ├── puppet-run-emit-data ├── README.md ├── puppet-run-emit-data.png └── puppet-run-emit-data.yaml ├── puppet-selective-enforcement ├── README.md ├── puppet-selective-enforcement.png └── puppet-selective-enforcement.yaml ├── puppet-shutdown-ec2 ├── README.md ├── puppet-shutdown-ec2.png └── puppet-shutdown-ec2.yaml ├── s3-remediate-unencrypted-buckets ├── README.md ├── filter-buckets.py ├── s3-remediate-unencrypted-buckets.png └── s3-remediate-unencrypted-buckets.yaml ├── s3-restrict-authenticated_user-read-buckets ├── README.md ├── filter-buckets.py ├── s3-restrict-authenticated_user-read-buckets.png └── s3-restrict-authenticated_user-read-buckets.yaml ├── s3-restrict-authenticated_user-read_acp-buckets ├── README.md ├── filter-buckets.py ├── s3-restrict-authenticated_user-read_acp-buckets.png └── s3-restrict-authenticated_user-read_acp-buckets.yaml ├── s3-restrict-authenticated_user-write-buckets ├── README.md ├── filter-buckets.py ├── s3-restrict-authenticated_user-write-buckets.png └── s3-restrict-authenticated_user-write-buckets.yaml ├── s3-restrict-authenticated_user-write_acp-buckets ├── README.md ├── filter-buckets.py ├── s3-restrict-authenticated_user-write_acp-buckets.png └── s3-restrict-authenticated_user-write_acp-buckets.yaml ├── s3-restrict-public-read-buckets ├── README.md ├── filter-buckets.py ├── s3-restrict-public-read-buckets.png └── s3-restrict-public-read-buckets.yaml ├── s3-restrict-public-read_acp-buckets ├── README.md ├── filter-buckets.py ├── s3-restrict-public-read_acp-buckets.png └── s3-restrict-public-read_acp-buckets.yaml ├── s3-restrict-public-write-buckets ├── README.md ├── filter-buckets.py ├── s3-restrict-public-write-buckets.png └── s3-restrict-public-write-buckets.yaml ├── s3-restrict-public-write_acp-buckets ├── README.md ├── filter-buckets.py ├── s3-restrict-public-write_acp-buckets.png └── s3-restrict-public-write_acp-buckets.yaml ├── splunkoncall-incident-response ├── README.md └── splunkoncall-incident-response.yaml ├── sts-describe-ec2-objects ├── README.md ├── sts-describe-ec2-objects.png └── sts-describe-ec2-objects.yaml ├── sts-stop-untagged-instances ├── README.md ├── filter-instances.py ├── sts-stop-untagged-instances.png └── sts-stop-untagged-instances.yaml ├── terraform-continuous-deployment ├── README.md ├── terraform-continuous-deployment.png └── terraform-continuous-deployment.yaml └── update-workflow-on-merge ├── README.md ├── update-workflow-on-merge.png └── update-workflow-on-merge.yaml /.gitignore: -------------------------------------------------------------------------------- 1 | # 2 | # Development artifacts 3 | # 4 | *.sw[onp] 5 | *.code-workspace 6 | *.flake8 7 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @puppetlabs/relay-community 2 | -------------------------------------------------------------------------------- /azure-delete-empty-loadbalancers/azure-delete-empty-loadbalancers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/azure-delete-empty-loadbalancers/azure-delete-empty-loadbalancers.png -------------------------------------------------------------------------------- /azure-delete-empty-loadbalancers/azure-delete-empty-loadbalancers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Delete empty Azure Load Balancers 3 | description: Save money by finding empty Azure load balancers and terminating them. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/azure-delete-empty-loadbalancers 5 | tags: 6 | - cost optimization 7 | 8 | # Uncomment out this trigger to run this workflow hourly. 9 | # triggers: 10 | # - name: schedule 11 | # source: 12 | # type: schedule 13 | # schedule: '0 * * * *' 14 | # binding: 15 | # parameters: 16 | # dryRun: true 17 | 18 | parameters: 19 | dryRun: 20 | description: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 21 | default: 'true' 22 | steps: 23 | - name: list-azure-load-balancers 24 | image: relaysh/azure-network-step-load-balancer-list 25 | spec: 26 | azure: &azure 27 | connection: !Connection { type: azure, name: my-azure-account } 28 | - name: filter-loadbalancers 29 | image: relaysh/core:latest-python 30 | spec: 31 | loadBalancers: !Output {from: list-azure-load-balancers, name: loadBalancers} 32 | inputFile: https://raw.githubusercontent.com/puppetlabs/relay-workflows/master/azure-delete-empty-loadbalancers/filter-loadbalancers.py 33 | - name: approval 34 | description: Wait for approval 35 | type: approval 36 | dependsOn: filter-loadbalancers 37 | when: 38 | - !Fn.equals [!Parameter dryRun, 'false'] 39 | - name: delete-azure-lbs 40 | image: relaysh/azure-network-step-load-balancer-delete 41 | dependsOn: approval 42 | when: 43 | - !Fn.equals [!Parameter dryRun, 'false'] 44 | spec: 45 | azure: *azure 46 | resourceIDs: !Output {from: filter-loadbalancers, name: resourceIDs} 47 | confetti: true 48 | -------------------------------------------------------------------------------- /azure-delete-empty-loadbalancers/filter-loadbalancers.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # File: filter-loadbalancers.py 4 | # Description: This is an example script that you can author or modify that retrieves 5 | # a list of Azure Load Balancers and filters the ones that are empty e.g. 6 | # no backend configurations. 7 | # Inputs: 8 | # - loadBalancers - list of Azure Load Balancers 9 | # Outputs: 10 | # - resourceIDs - list of Azure Virtual Machine resource IDs to be terminated in the subsequent step 11 | 12 | from relay_sdk import Interface, Dynamic as D 13 | 14 | relay = Interface() 15 | 16 | to_terminate = [] 17 | to_keep = [] 18 | 19 | lbs = relay.get(D.loadBalancers) 20 | for lb in lbs: 21 | if len(lb['backend_address_pools']) == 0: 22 | to_terminate.append(lb['id']) 23 | else: 24 | to_keep.append(lb['id']) 25 | continue 26 | 27 | print('\nFound {} Load Balancers that are NOT empty:'.format(len(to_keep))) 28 | print(*[i for i in to_keep], sep = "\n") 29 | 30 | print('\nFound {} Load Balancers that are empty:'.format(len(to_terminate))) 31 | print(*[i for i in to_terminate], sep = "\n") 32 | 33 | 34 | print('\nSetting output `resourceIDs` to list of {0} load balancer resource ids to terminate.'.format(len(to_terminate))) 35 | relay.outputs.set('resourceIDs', to_terminate) 36 | -------------------------------------------------------------------------------- /azure-delete-unused-nics/azure-delete-unused-nics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/azure-delete-unused-nics/azure-delete-unused-nics.png -------------------------------------------------------------------------------- /azure-delete-unused-nics/azure-delete-unused-nics.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Delete unused Azure network interfaces 3 | description: Enforce compliance by finding unused Azure network interfaces and terminating them. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/azure-delete-unused-nics 5 | tags: 6 | - compliance 7 | 8 | # Uncomment out this trigger to run this workflow hourly. 9 | # triggers: 10 | # - name: schedule 11 | # source: 12 | # type: schedule 13 | # schedule: '0 * * * *' 14 | # binding: 15 | # parameters: 16 | # dryRun: true 17 | 18 | parameters: 19 | dryRun: 20 | description: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 21 | default: 'true' 22 | steps: 23 | - name: list-azure-nics 24 | image: relaysh/azure-network-step-nic-list 25 | spec: 26 | azure: &azure 27 | connection: !Connection { type: azure, name: my-azure-account } 28 | - name: filter-nics 29 | image: relaysh/core:latest-python 30 | spec: 31 | networkInterfaces: !Output {from: list-azure-nics, name: networkInterfaces} 32 | inputFile: https://raw.githubusercontent.com/puppetlabs/relay-workflows/master/azure-delete-unused-nics/filter-nics.py 33 | - name: approval 34 | description: Wait for approval 35 | type: approval 36 | dependsOn: filter-nics 37 | when: 38 | - !Fn.equals [!Parameter dryRun, 'false'] 39 | - name: delete-azure-nics 40 | image: relaysh/azure-network-step-nic-delete 41 | dependsOn: approval 42 | when: 43 | - !Fn.equals [!Parameter dryRun, 'false'] 44 | spec: 45 | azure: *azure 46 | resourceIDs: !Output {from: filter-nics, name: resourceIDs} 47 | confetti: true 48 | -------------------------------------------------------------------------------- /azure-delete-unused-nics/filter-nics.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # File: filter-loadbalancers.py 4 | # Description: This is an example script that you can author or modify that retrieves 5 | # a list of Azure Network Interfaces and filters the ones that are unused e.g. 6 | # no VM configuration. 7 | # Inputs: 8 | # - networkInterfaces - list of Azure NICs 9 | # Outputs: 10 | # - resourceIDs - list of Azure Virtual Machine resource IDs to be terminated in the subsequent step 11 | 12 | from relay_sdk import Interface, Dynamic as D 13 | 14 | relay = Interface() 15 | 16 | to_terminate = [] 17 | to_keep = [] 18 | 19 | nics = relay.get(D.networkInterfaces) 20 | for nic in nics: 21 | if 'virtual_machine' in nic.keys(): 22 | to_keep.append(nic['id']) 23 | else: 24 | to_terminate.append(nic['id']) 25 | continue 26 | 27 | print('\nFound {} Network Interfaces that are used:'.format(len(to_keep))) 28 | print(*[i for i in to_keep], sep = "\n") 29 | 30 | print('\nFound {} Network Interfaces that are NOT used:'.format(len(to_terminate))) 31 | print(*[i for i in to_terminate], sep = "\n") 32 | 33 | 34 | print('\nSetting output `resourceIDs` to list of {0} network interface resource ids to terminate.'.format(len(to_terminate))) 35 | relay.outputs.set('resourceIDs', to_terminate) 36 | -------------------------------------------------------------------------------- /azure-disk-reaper/azure-disk-reaper.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/azure-disk-reaper/azure-disk-reaper.png -------------------------------------------------------------------------------- /azure-disk-reaper/azure-disk-reaper.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Delete unattached Azure Disks 3 | description: Save money by finding unattached Azure Disks in a given subscription and terminating them. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/azure-disk-reaper 5 | tags: 6 | - cost optimization 7 | 8 | # Uncomment out this trigger to run this workflow hourly. 9 | # triggers: 10 | # - name: schedule 11 | # source: 12 | # type: schedule 13 | # schedule: '0 * * * *' 14 | # binding: 15 | # parameters: 16 | # dryRun: true 17 | 18 | parameters: 19 | dryRun: 20 | description: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 21 | default: 'true' 22 | steps: 23 | - name: list-azure-disks 24 | image: relaysh/azure-disks-step-disk-list 25 | spec: 26 | azure: &azure 27 | connection: !Connection { type: azure, name: my-azure-account } 28 | - name: filter-disks 29 | image: relaysh/core:latest-python 30 | spec: 31 | disks: !Output {from: list-azure-disks, name: disks} 32 | inputFile: https://raw.githubusercontent.com/puppetlabs/relay-workflows/master/azure-disk-reaper/filter-disks.py 33 | - name: approval 34 | description: Wait for approval to terminate disks 35 | type: approval 36 | dependsOn: filter-disks 37 | when: 38 | - !Fn.equals [!Parameter dryRun, 'false'] 39 | - name: delete-disks 40 | image: relaysh/azure-disks-step-disk-delete 41 | dependsOn: approval 42 | when: 43 | - !Fn.equals [!Parameter dryRun, 'false'] 44 | spec: 45 | azure: *azure 46 | resourceIDs: !Output {from: filter-disks, name: resourceIDs} 47 | confetti: true 48 | -------------------------------------------------------------------------------- /azure-disk-reaper/filter-disks.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # File: filter-disks.py 4 | # Description: This is an example script that you can author or modify that retrieves 5 | # a list of Azure Disks from the Relay Interface (in the form of parameters) 6 | # and filters the Disks that are unattached. It then sets the output 7 | # variable `resource_ids` to the list of Azure Disks volumes that are unattached. 8 | # Inputs: 9 | # - disks - list of Azure Disks 10 | # Outputs: 11 | # - resourceIDs - list of Azure Disk resource IDs to be terminated in the subsequent step 12 | 13 | import logging 14 | 15 | from relay_sdk import Interface, Dynamic as D 16 | 17 | relay = Interface() 18 | 19 | to_terminate = [] 20 | 21 | # Filtering volumes with no attachments 22 | disks = filter(lambda i: i['disk_state'] == 'Unattached', relay.get(D.disks)) 23 | for disk in disks: 24 | try: 25 | to_terminate.append(disk['id']) 26 | print('Adding Azure Disk {0} with no attachments to termination list'.format(disk['name'])) 27 | except Exception as e: 28 | print('Azure Disk {0} not considered for termination because of a processing error: {1}'.format(disk['name'], e)) 29 | 30 | # Adding list of Azure Disk resource ids to output `resource_ids` 31 | if len(to_terminate) == 0: 32 | print('No volumes to terminate! Exiting.') 33 | exit() 34 | else: 35 | print('Setting output `resourceIDs` to list of {0} disks'.format(len(to_terminate))) 36 | relay.outputs.set('resourceIDs', to_terminate) 37 | 38 | -------------------------------------------------------------------------------- /azure-vm-reaper/azure-vm-reaper.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/azure-vm-reaper/azure-vm-reaper.png -------------------------------------------------------------------------------- /azure-vm-reaper/azure-vm-reaper.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Delete untagged Azure Virtual Machines 3 | description: Save money by finding untagged Azure Virtual Machines in a given subscription and deleting them. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/azure-vm-reaper 5 | tags: 6 | - cost optimization 7 | 8 | # Uncomment out this trigger to run this workflow hourly. 9 | # triggers: 10 | # - name: schedule 11 | # source: 12 | # type: schedule 13 | # schedule: '0 * * * *' 14 | # binding: 15 | # parameters: 16 | # dryRun: true 17 | 18 | parameters: 19 | dryRun: 20 | description: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 21 | default: 'true' 22 | steps: 23 | - name: list-azure-vms 24 | image: relaysh/azure-virtual-machines-step-vm-list 25 | spec: 26 | azure: &azure 27 | connection: !Connection { type: azure, name: my-azure-account } 28 | - name: filter-vms 29 | image: relaysh/core:latest-python 30 | spec: 31 | virtualMachines: !Output {from: list-azure-vms, name: virtualMachines} 32 | inputFile: https://raw.githubusercontent.com/puppetlabs/relay-workflows/master/azure-vm-reaper/filter-vms.py 33 | - name: approval 34 | description: Wait for approval to terminate Azure virtual machines 35 | type: approval 36 | dependsOn: filter-vms 37 | when: 38 | - !Fn.equals [!Parameter dryRun, 'false'] 39 | - name: delete-azure-vms 40 | image: relaysh/azure-virtual-machines-step-vm-delete 41 | dependsOn: approval 42 | when: 43 | - !Fn.equals [!Parameter dryRun, 'false'] 44 | spec: 45 | azure: *azure 46 | resourceIDs: !Output {from: filter-vms, name: resourceIDs} 47 | confetti: true 48 | -------------------------------------------------------------------------------- /azure-vm-reaper/filter-vms.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # File: filter-vms.py 4 | # Description: This is an example script that you can author or modify that retrieves 5 | # a list of Azure Virtual Machines from the Relay Interface (in the form of parameters) 6 | # and filters the VMs that have no tags. It then sets the output variable `resourceIDs` 7 | # to the list of Azure Virtual Machines resource IDs that are untagged. 8 | # Inputs: 9 | # - virtualMachines - list of Azure Virtual Machines 10 | # Outputs: 11 | # - resourceIDs - list of Azure Virtual Machine resource IDs to be terminated in the subsequent step 12 | 13 | from relay_sdk import Interface, Dynamic as D 14 | 15 | relay = Interface() 16 | 17 | to_terminate = [] 18 | to_keep = [] 19 | 20 | # Filtering Azure virtual machines with no tags 21 | print('Looking for all Virtual Machines with no tags') 22 | vms = relay.get(D.virtualMachines) # Queries for `virtual_machines` parameter from Relay 23 | for vm in vms: 24 | if 'tags' in vm.keys(): 25 | to_keep.append(vm['id']) 26 | continue 27 | else: 28 | try: 29 | to_terminate.append(vm['id']) 30 | except Exception as e: 31 | print('\nAzure Virtual Machine {0} not considered for termination because of a processing error: {1}'.format(vm['name'], e)) 32 | 33 | print('\nFound {} Virtual machines (with tags) not considered for termination:'.format(len(to_keep))) 34 | print(*[vm_id for vm_id in to_keep], sep = "\n") 35 | 36 | if len(to_terminate) == 0: 37 | print('\nNo Virtual Machines to terminate! Exiting.') 38 | exit() 39 | else: 40 | print('\nAdding {} Virtual machines (without tags) to terminate:'.format(len(to_terminate))) 41 | print(*[vm_id for vm_id in to_terminate], sep = "\n") 42 | print('\nSetting output `resourceIDs` to list of {0} virtual machine resource ids to terminate:'.format(len(to_terminate))) 43 | relay.outputs.set('resourceIDs', to_terminate) 44 | print(*[vm_id for vm_id in to_terminate], sep = "\n") 45 | -------------------------------------------------------------------------------- /datadog-k8s-rollback/datadog-k8s-rollback.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/datadog-k8s-rollback/datadog-k8s-rollback.png -------------------------------------------------------------------------------- /datadog-k8s-rollback/datadog-k8s-rollback.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Rollback Kubernetes deployment and update Datadog incident 3 | description: This workflow rolls back a kubernetes deployment to the previous version and updates a Datadog Incident Management incident upon completion. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/datadog-k8s-rollback 5 | tags: 6 | - auto remediation 7 | 8 | parameters: 9 | deployment: 10 | description: Name of the kubernetes deployment to roll back 11 | default: nginx-deployment 12 | public_id: 13 | description: Numeric part of the "friendly" id of the Datadog Incident to update; for IR-3 use "3" 14 | namespace: 15 | description: Kubernetes namespace for the deployment 16 | default: default 17 | 18 | 19 | steps: 20 | - name: kubectl-deployment-rollback 21 | image: relaysh/kubernetes-step-kubectl 22 | spec: 23 | namespace: !Parameter namespace 24 | cluster: 25 | name: my-kubernetes-cluster 26 | connection: !Connection { type: kubernetes, name: my-kubernetes-cluster } 27 | command: rollout 28 | args: !Fn.concat ["undo ", "deployment.v1.apps/", !Parameter deployment] 29 | - name: convert-incident-id 30 | image: relaysh/datadog-step-incident-extract-id 31 | spec: 32 | connection: !Connection {type: datadog, name: my-datadog-keys } 33 | public_id: !Parameter public_id 34 | - name: update-timeline 35 | image: relaysh/datadog-step-incident-timeline-update 36 | dependsOn: kubectl-deployment-rollback 37 | spec: 38 | connection: !Connection {type: datadog, name: my-datadog-keys } 39 | incident_id: !Output {from: convert-incident-id, name: incident_id} 40 | timeline_cell_content: !Fn.concat ["Rolled back: ", !Parameter deployment, " - [view logs in Relay](https://app.relay.sh/workflows/datadog-k8s-rollback)"] 41 | -------------------------------------------------------------------------------- /datadog-to-jira/README.md: -------------------------------------------------------------------------------- 1 | When a Datadog alert fires, you might be doing something more important. But, you don't want to forget about it. With this workflow, you can automatically create an issue in Jira issue when a Datadog event is received. 2 | 3 | # Prerequisites 4 | 5 | Before you run this workflow, you will need the following: 6 | - A [Datadog](https://www.datadog.com/) account. 7 | - An instance of [Jira](https://www.atlassian.com/software/jira) available to the internet. Jira Server and Jira Cloud are both supported. 8 | 9 | # Configure the workflow 10 | 11 | Follow these steps to configure the workflow. Doing this will enable Relay to listen for alerts from Datadog and create tickets in Jira. 12 | ## Set up the Datadog trigger 13 | 14 | Follow these instructions to set up your Datadog trigger. 15 | 16 | From the main workflow page, find the `datadog` trigger and click the **Copy webhook URL** button. 17 | 18 | ![Set up Datadog trigger](/images/datadog-trigger.png) 19 | 20 | In Datadog, add a new webhook: 21 | 22 | - Click on the **Integrations** menu option. 23 | - Install or configure the **Webhooks Integration**. 24 | - Under **Webhooks**, click **New +**. 25 | - Provide a meaningful name (e.g. `relay-webhook`) 26 | - Paste the URL to the webhook URL found in Relay. 27 | - Additional properties can be added to the payload for workflow customization. 28 | 29 | ![Set up Datadog webhook](/images/datadog-webhook.png) 30 | 31 | - Click **Save**. 32 | 33 | Configure the Datadog monitor: 34 | - Within the Monitor, click **Settings(⚙)** -> **Edit** 35 | - Under **Say what's happening**, add the above webhook with `@webhook-{name of webhook}` (e.g. `@webhook-relay`) 36 | 37 | ![Set up Datadog monitor](/images/datadog-monitor.png) 38 | 39 | - Click **Save** 40 | ## Configure the Jira integration 41 | 42 | - Click **Fill in missing connections** or click **Settings** in the side nav. 43 | 44 | ![Fill in missing connections](/images/missing-connection.png) 45 | 46 | ![Click settings from side nav](/images/settings-sidenav.png) 47 | 48 | - Fill out the form to create a Connection to your Jira instance. 49 | - **jiraURL** - The URL to the root of your Jira instance. For Jira Cloud, this is 50 | `https://your-domain.atlassian.net`. 51 | - **jiraUsername** - The username to use when authenticating to Jira. 52 | - **jiraToken** - The [API token](https://confluence.atlassian.com/x/Vo71Nw) (for Jira Cloud) or password to use when authenticating to Jira. 53 | 54 | - Click **Save** 55 | 56 | ## Set the default Jira project 57 | 58 | Configure the default Jira project where you will create the tickets. 59 | - Navigate to the **Code** tab. 60 | 61 | ![Code tab](/images/code-tab.png) 62 | 63 | - Find the parameter for `jiraProjectKey` and specify the `default:` project (e.g. `OPS`) where tickets will be created. 64 | 65 | ```yaml 66 | jiraProjectKey: 67 | description: the JIRA project key to use when creating tickets 68 | default: OPS 69 | ``` 70 | # Run the workflow manually from Datadog 71 | 72 | To test the Datadog alert: 73 | - Navigate to the Monitor that you configured earlier. 74 | - Click **Settings(⚙)** -> **Edit** 75 | - Scroll to the bottom of the page and click **Test Notifications** 76 | - Select **Alert** (default) 77 | - Click **Run Test** 78 | 79 | ![Test alert in Datadog](/images/datadog-test-alert.png) -------------------------------------------------------------------------------- /datadog-to-jira/datadog-to-jira.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/datadog-to-jira/datadog-to-jira.png -------------------------------------------------------------------------------- /datadog-to-jira/datadog-to-jira.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Respond to a Datadog alert via Jira 3 | description: Automatically respond to a Datadog alert by creating an issue in Jira. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/datadog-to-jira 5 | tags: 6 | - incident response 7 | 8 | parameters: 9 | eventBody: 10 | description: Datadog event body 11 | eventTitle: 12 | description: Datadog event title 13 | eventType: 14 | description: Datadog event type 15 | jiraProjectKey: 16 | description: the JIRA project key to use when creating tickets 17 | default: OPS 18 | 19 | triggers: 20 | - name: datadog 21 | source: 22 | type: webhook 23 | image: relaysh/datadog-trigger-event-fired 24 | binding: 25 | parameters: 26 | eventBody: !Data body 27 | eventTitle: !Data title 28 | eventType: !Data event_type 29 | 30 | steps: 31 | - name: jira-issue-create 32 | image: relaysh/jira-step-issue-create 33 | when: !Fn.equals [!Parameter eventType, query_alert_monitor] 34 | spec: 35 | connection: !Connection { type: jira, name: my-jira-connection} 36 | issue: 37 | fields: 38 | description: !Fn.convertMarkdown [jira, !Parameter eventBody] 39 | project: 40 | key: !Parameter jiraProjectKey 41 | summary: !Parameter eventTitle 42 | type: 43 | name: Task 44 | -------------------------------------------------------------------------------- /datadog-to-slack/README.md: -------------------------------------------------------------------------------- 1 | When a Datadog alert fires, you might be doing something more important. But, you don't want to forget about it. With this workflow, you can automatically send a message to a Slack channel when a Datadog event is received. 2 | 3 | # Prerequisites 4 | 5 | Before you run this workflow, you will need the following: 6 | - A [Datadog](https://www.datadog.com/) account. 7 | - A [Slack](https://slack.com/) workspace and an appropriate channel to send notifications to. 8 | 9 | # Configure the workflow 10 | 11 | Follow these steps to configure the workflow. Doing this will enable Relay to listen for alerts from Datadog and create messages in Slack. 12 | 13 | ## Set up the Datadog trigger 14 | 15 | Follow these instructions to set up your Datadog trigger. 16 | 17 | From the main workflow page, find the `datadog` trigger and click the **Copy webhook URL** button. 18 | 19 | ![Set up Datadog trigger](/images/datadog-trigger.png) 20 | 21 | In Datadog, add a new webhook: 22 | 23 | - Click on the **Integrations** menu option. 24 | - Install or configure the **Webhooks Integration**. 25 | - Under **Webhooks**, click **New +**. 26 | - Provide a meaningful name (e.g. `relay-webhook`) 27 | - Paste the URL to the webhook URL found in Relay. 28 | - Additional properties can be added to the payload for workflow customization. 29 | 30 | ![Set up Datadog webhook](/images/datadog-webhook.png) 31 | 32 | - Click **Save**. 33 | 34 | Configure the Datadog monitor: 35 | - Within the Monitor, click **Settings(⚙)** -> **Edit** 36 | - Under **Say what's happening**, add the above webhook with `@webhook-{name of webhook}` (e.g. `@webhook-relay`) 37 | 38 | ![Set up Datadog monitor](/images/datadog-monitor.png) 39 | 40 | 41 | 42 | 43 | - Click **Save** 44 | 45 | - Define the following parameters: 46 | - `slackChannel`: the slack channel to send notifications to. 47 | - Configure a Slack connection in Relay. 48 | - The default name is `slack-connection`. 49 | 50 | ## Set up the trigger 51 | 52 | In the workflow overview page in Relay, find the webhook URL by navigating to 53 | the **Setup** sidebar. 54 | 55 | In Datadog, add a new webhook: 56 | 57 | 1. Click on the **Integrations** menu option. 58 | 2. Install or configure the **Webhooks Integration**. 59 | 2. Under **Webhooks**, click **New +**. 60 | - Provide a meaningful name. 61 | - Update the URL to the webhook URL found in Relay. 62 | - Additional properties can be added to the payload for workflow customization. 63 | 4. Click **Save**. 64 | 65 | Configure the Datadog monitor: 66 | 1. Within the Monitor, click **Settings(⚙)** -> **Edit** 67 | 2. Under **Say what's happening**, add the above webhook with `@webhook-{name of webhook}` (e.g. `@webhook-relay`) 68 | 3. Click **Save** 69 | 70 | # Run the workflow manually from Datadog 71 | 72 | To test the Datadog alert: 73 | - Navigate to the Monitor that you configured earlier. 74 | - Click **Settings(⚙)** -> **Edit** 75 | - Scroll to the bottom of the page and click **Test Notifications** 76 | - Select **Alert** (default) 77 | - Click **Run Test** 78 | 79 | ![Test alert in Datadog](/images/datadog-test-alert.png) 80 | -------------------------------------------------------------------------------- /datadog-to-slack/datadog-to-slack.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/datadog-to-slack/datadog-to-slack.png -------------------------------------------------------------------------------- /datadog-to-slack/datadog-to-slack.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Respond to a Datadog alert with Slack 3 | description: Automatically respond to a Datadog alert by sending a message to a Slack channel. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/datadog-to-slack 5 | tags: 6 | - incident response 7 | 8 | parameters: 9 | slackChannel: 10 | description: the Slack channel to send notifications to 11 | default: '#ops' 12 | eventTitle: 13 | description: Datadog event title 14 | eventType: 15 | description: Datadog event type 16 | 17 | triggers: 18 | - name: datadog 19 | source: 20 | type: webhook 21 | image: relaysh/datadog-trigger-event-fired 22 | binding: 23 | parameters: 24 | eventDate: !Data date 25 | eventTitle: !Data title 26 | eventType: !Data event_type 27 | 28 | steps: 29 | - name: slack-notify 30 | image: relaysh/slack-step-message-send 31 | when: !Fn.equals [!Parameter eventType, query_alert_monitor] 32 | spec: 33 | connection: !Connection [slack, slack-connection] 34 | channel: !Parameter slackChannel 35 | username: Datadog via Relay 36 | message: !Fn.concat 37 | - '⚠️ *' 38 | - !Parameter eventTitle 39 | - '* ⚠️' 40 | -------------------------------------------------------------------------------- /dynamodb-capacity-monitor/README.md: -------------------------------------------------------------------------------- 1 | This workflow sends a notification to Slack when DynamoDB provisioned capacity exceeds defined limits. 2 | 3 | ## Prerequisites 4 | 5 | Before you run this workflow, you will need the following: 6 | - An [AWS](https://aws.amazon.com/) account and credentials with read access to DynamoDB. 7 | - A [Slack](https://slack.com/) workspace and an appropriate channel to send notifications to. 8 | 9 | ## Configure the workflow 10 | 11 | - Define the following parameters: 12 | - `slackChannel`: the slack channel to send notifications to. 13 | - Setup unconfigured connections from the **Settings** sidebar: 14 | - Configure the Slack connection `slack-connection`. 15 | - Configure the AWS connection `aws-connection`. 16 | - Update the `capacity` specification if desired: 17 | - multiple, independent capacity thresholds can be defined 18 | - `regions` can be updated to `include` or `exclude` any desired regions 19 | - `aggregate` limits represent a total of the defined regions 20 | - `regional` limits apply per region 21 | 22 | ## Specification examples 23 | 24 | ```yaml 25 | spec: 26 | aws: !Connection { type: aws, name: aws-connection } 27 | ignore: 28 | - us-gov-east-1 29 | - us-gov-west-1 30 | - us-iso-east-1 31 | - us-isob-east-1 32 | capacity: 33 | - regions: 34 | include: 35 | - us-east-1 36 | - us-east-2 37 | limits: 38 | aggregate: 39 | read: 500 40 | write: 500 41 | regional: 42 | read: 300 43 | write: 300 44 | - regions: 45 | include: 46 | - us-west-1 47 | - us-west-2 48 | limits: 49 | aggregate: 50 | read: 750 51 | write: 500 52 | regional: 53 | read: 500 54 | write: 300 55 | - regions: 56 | all: true 57 | exclude: 58 | - us-east-1 59 | - us-east-2 60 | - us-west-1 61 | - us-west-2 62 | limits: 63 | aggregate: 64 | read: 0 65 | write: 0 66 | regional: 67 | read: 0 68 | write: 0 69 | ``` 70 | -------------------------------------------------------------------------------- /dynamodb-capacity-monitor/dynamodb-capacity-monitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Monitor AWS DynamoDB provisioned capacity 3 | description: This workflow sends a notification to Slack when DynamoDB provisioned capacity exceeds defined limits. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/dynamodb-capacity-monitor 5 | tags: 6 | - incident response 7 | 8 | parameters: 9 | slackChannel: 10 | description: the Slack channel to send notifications to 11 | default: "#ops" 12 | 13 | triggers: 14 | - name: schedule 15 | source: 16 | type: schedule 17 | schedule: "0 */1 * * *" 18 | 19 | steps: 20 | - name: monitor 21 | image: gcr.io/nebula-contrib/relay-aws-dynamodb-monitor 22 | command: java 23 | args: 24 | - -jar 25 | - /relay/aws-dynamodb-monitor.jar 26 | spec: 27 | aws: !Connection { type: aws, name: aws-connection } 28 | ignore: 29 | - us-gov-east-1 30 | - us-gov-west-1 31 | - us-iso-east-1 32 | - us-isob-east-1 33 | capacity: 34 | - regions: 35 | all: true 36 | limits: 37 | aggregate: 38 | read: 500 39 | write: 500 40 | regional: 41 | read: 100 42 | write: 100 43 | - name: slack-notify 44 | image: relaysh/slack-step-message-send 45 | when: !Fn.notEquals [!Output [monitor, capacity], ""] 46 | spec: 47 | connection: !Connection [slack, slack-connection] 48 | channel: !Parameter slackChannel 49 | username: Relay 50 | message: !Fn.concat 51 | - "⚠️ " 52 | - "*Alert*" 53 | - " ⚠️" 54 | - !Output [monitor, capacity] 55 | -------------------------------------------------------------------------------- /dynatrace-respond-to-problem/README.md: -------------------------------------------------------------------------------- 1 | # dynatrace-respond-to-problem workflow 2 | 3 | This workflow can be triggered by a Dynatrace Problem. It also allows you to decide whether to make a call back to Dynatrace to add a comment. 4 | For this two-way integration to work you need to create two Relay secrets: dtapitoken and dtapiendpoint. 5 | 6 | To add it to your relay account, either paste the contents of dynatrace-respond-to-problem.yaml in the web code editor or clone this repo and use the CLI: 7 | 8 | ```shell 9 | relay workflow add dynatrace-respond-to-problem -f ./dynatrace-respond-to-problem.yaml 10 | ``` 11 | 12 | Example for secrets: 13 | 14 | ```shell 15 | dtapitoken: ABCDEFGHADSFASDFA 16 | dtapiendpoint: https://yourdynatrace.live.dynatrace.com 17 | ``` 18 | 19 | Once you have saved this workflow in Relay you can setup the Dynatrace Problem Notification Webhook by using the Webhook Entrypoint from Relay and the following custom payload in Dynatrace: 20 | 21 | ```json 22 | { 23 | "State":"{State}", 24 | "ProblemID":"{ProblemID}", 25 | "ProblemTitle":"{ProblemTitle}", 26 | "ProblemURL":"{ProblemURL}", 27 | "PID" : "{PID}", 28 | "ProblemSeverity" : "{ProblemSeverity}", 29 | "ProblemImpact" : "{ProblemImpact}", 30 | "ProblemDetailsText" : "{ProblemDetailsText}", 31 | "Tags" : "{Tags}", 32 | "ProblemDetailsJSON" : "{ProblemDetailsJSON}" 33 | } 34 | ``` 35 | -------------------------------------------------------------------------------- /dynatrace-respond-to-problem/dynatrace-respond-to-problem.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/dynatrace-respond-to-problem/dynatrace-respond-to-problem.png -------------------------------------------------------------------------------- /dynatrace-respond-to-problem/dynatrace-respond-to-problem.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Connect Dynatrace and Relay to respond to problem events 3 | description: This workflow configures a webhook trigger to receive problem notifications from Dynatrace, conditionally posts a comment back to the Dynatrace problem, and logs the problem details in Relay. 4 | homepage: https://github.com/relay-integrations/relay-dynatrace/tree/master/workflows/dynatrace-respond-to-problem 5 | tags: 6 | - auto remediation 7 | 8 | parameters: 9 | state: 10 | default: "" 11 | problemid: 12 | default: "" 13 | pid: 14 | default: "" 15 | problemtitle: 16 | default: "" 17 | problemurl: 18 | default: "" 19 | problemseverity: 20 | default: "" 21 | problemimpact: 22 | default: "" 23 | problemdetailstext: 24 | default: "" 25 | tags: 26 | default: "" 27 | 28 | triggers: 29 | - name: dynatrace-problem-event 30 | source: 31 | type: webhook 32 | image: relaysh/dynatrace-trigger-event-fired:latest 33 | binding: 34 | parameters: 35 | state: !Data State 36 | problemid: !Data ProblemId 37 | pid: !Data PID 38 | problemtitle: !Data ProblemTitle 39 | problemurl: !Data ProblemUrl 40 | problemseverity: !Data ProblemSeverity 41 | problemimpact: !Data ProblemImpact 42 | problemdetailstext: !Data ProblemDetailsText 43 | tags: !Data Tags 44 | steps: 45 | - name: approval-post-problem-comment 46 | description: Do you want to post a comment on the Dynatrace problem? 47 | type: approval 48 | - name: post-problem-comment 49 | image: relaysh/core 50 | input: 51 | - echo "$(ni get -p {.dtapiendpoint})/api/v1/problem/details/$(ni get -p {.pid})/comments" 52 | - echo "Authorization Api-Token $(ni get -p {.dtapitoken})" 53 | - | 54 | set -x 55 | curl -X POST "$(ni get -p {.dtapiendpoint})/api/v1/problem/details/$(ni get -p {.pid})/comments" \ 56 | -H "accept: application/json; charset=utf-8" \ 57 | -H "Authorization: Api-Token $(ni get -p {.dtapitoken})" \ 58 | -H "Content-Type: application/json; charset=utf-8" \ 59 | -d "{\"comment\":\"This is a comment from Relay. We are working on this!!\",\"user\":\"Relay Workflow\",\"context\":\"Relay\"}" 60 | dependsOn: 61 | - approval-post-problem-comment 62 | spec: 63 | problemdetailstext: !Parameter problemdetailstext 64 | problemtitle: !Parameter problemtitle 65 | state: !Parameter state 66 | problemid: !Parameter problemid 67 | pid: !Parameter pid 68 | dtapitoken: !Secret dtapitoken 69 | dtapiendpoint: !Secret dtapiendpoint 70 | - name: log-problem-details 71 | image: relaysh/core 72 | spec: 73 | problemdetailstext: !Parameter problemdetailstext 74 | problemtitle: !Parameter problemtitle 75 | state: !Parameter state 76 | problemid: !Parameter problemid 77 | pid: !Parameter pid 78 | input: 79 | - echo "Here the details of the Dynatrace Problem" 80 | - echo "Title $(ni get -p {.problemtitle})" 81 | - echo "$(ni get -p {.problemdetailstext})" 82 | - echo "State $(ni get -p {.state})" 83 | - echo "ProblemId $(ni get -p {.problemid})" 84 | - echo "PID $(ni get -p {.pid})" 85 | -------------------------------------------------------------------------------- /ebs-reaper/README.md: -------------------------------------------------------------------------------- 1 | ## Prerequisites 2 | 3 | Before you run this workflow, you will need the following: 4 | - An AWS account. 5 | - An AWS IAM user with permissions to list and delete EBS volumes (if not 6 | run in dry run mode). 7 | - One or more running EBS volumes that are unattached. 8 | 9 | ## Run the workflow 10 | 11 | Follow these steps to run the workflow: 12 | 1. Add your AWS credentials as a Connection: 13 | - Click **Setup** 14 | - Find the Connection named `my-aws-account` and click Edit(✎). Use the following values: 15 | - **KEY**: `ACCESS KEY ID` 16 | - **VALUE**: Enter your AWS access key id associated with the account 17 | - **KEY**: `SECRET ACCESS KEY` 18 | - **VALUE**: Enter your AWS secret access key associated with the account 19 | - Click **Save** 20 | 21 | 2. Click **Run workflow** and wait for the workflow run page to appear. 22 | 3. Supply following parameters to the modal: 23 | - **KEY**: `region` 24 | - **VALUE**: The AWS region to run in 25 | - **KEY**: `dryRun` 26 | - **VALUE**: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 27 | 28 | 4. **Warning:** If you run the workflow with the `dryRun` parameter set to 29 | `false`, volumes that are unattached will immediately be terminated. 30 | 31 | ## Run the workflow on a schedule 32 | 33 | Follow these steps to run this workflow on a schedule: 34 | - Un-comment out the Trigger block in the workflow file: 35 | 36 | > TIP: If you're using the Relay code editor, highlight the `triggers` section and type `⌘ + /` (Mac) or `Ctrl + /` (Windows) to uncomment. 37 | 38 | ```yaml 39 | # triggers: 40 | # - name: schedule 41 | # source: 42 | # type: schedule 43 | # schedule: '0 * * * *' 44 | # binding: 45 | # parameters: 46 | # region: us-east-1 47 | # dryRun: true 48 | # lifetimeTag: lifetime 49 | # terminationDateTag: termination_date 50 | ``` 51 | 52 | - Configure the `schedule` trigger: 53 | - Supply the run interval in [cron format](https://crontab.guru/). 54 | - Configure the following parameter bindings: 55 | - Specify the `region` to run in. 56 | - Specify the `lifetimeTag` to use. 57 | - Specify the `terminationDateTag` to use. 58 | - Specify whether `dryRun` should be set to `true` or `false`. 59 | - Click **Save changes** -------------------------------------------------------------------------------- /ebs-reaper/ebs-reaper.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/ebs-reaper/ebs-reaper.png -------------------------------------------------------------------------------- /ebs-reaper/ebs-reaper.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Delete unattached EBS volumes 3 | description: This workflow looks at all of the EBS volumes in a given account and region and selects a subset of those to delete. Requires an AWS account with permissions to delete EBS volumes. The termination criterion is whether any attachments exist on the EBS volume. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/ebs-reaper 5 | tags: 6 | - cost optimization 7 | 8 | # Uncomment out this trigger to run this workflow hourly. 9 | # triggers: 10 | # - name: schedule 11 | # source: 12 | # type: schedule 13 | # schedule: '0 * * * *' 14 | # binding: 15 | # parameters: 16 | # awsRegion: us-east-1 17 | # dryRun: true 18 | 19 | parameters: 20 | awsRegion: 21 | description: The AWS region to run in 22 | default: us-east-1 23 | dryRun: 24 | description: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 25 | default: 'true' 26 | steps: 27 | - name: describe-volumes 28 | image: relaysh/aws-ebs-step-volumes-describe 29 | spec: 30 | aws: &aws 31 | connection: !Connection { type: aws, name: my-aws-account } 32 | region: !Parameter awsRegion 33 | - name: filter-volumes 34 | image: relaysh/core:latest-python 35 | spec: 36 | volumes: !Output {from: describe-volumes, name: volumes} 37 | inputFile: https://raw.githubusercontent.com/puppetlabs/relay-workflows/master/ebs-reaper/filter-volumes.py 38 | - name: approval 39 | description: Wait for approval to terminate volumes 40 | type: approval 41 | when: 42 | - !Fn.equals [!Parameter dryRun, 'false'] 43 | dependsOn: filter-volumes 44 | - name: terminate-volumes 45 | image: relaysh/aws-ebs-step-volumes-delete 46 | dependsOn: approval 47 | when: 48 | - !Fn.equals [!Parameter dryRun, 'false'] 49 | spec: 50 | aws: *aws 51 | volumeIDs: !Output {from: filter-volumes, name: volumeIDs} 52 | confetti: true 53 | -------------------------------------------------------------------------------- /ebs-reaper/filter-volumes.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # File: filter-volumes.py 4 | # Description: This is an example script that you can author or modify that retrieves 5 | # a list of Volumes from the Relay Interface (in the form of parameters) 6 | # and filters the volumes that are unattached. It then sets the output 7 | # variable `volumeIDs` to the list of EBS volumes that are unattached. 8 | # Inputs: 9 | # - volumes - list of EBS volumes 10 | # Outputs: 11 | # - volumeids - list of EBS volume ids to be terminated in the subsequent step 12 | 13 | from relay_sdk import Interface, Dynamic as D 14 | 15 | relay = Interface() 16 | 17 | to_terminate = [] 18 | 19 | # Filtering volumes with no attachments 20 | volumes = filter(lambda i: len(i['Attachments']) == 0, relay.get(D.volumes)) 21 | for volume in volumes: 22 | try: 23 | to_terminate.append(volume['VolumeId']) 24 | print('Terminating EBS volume {0} with no attachments'.format(volume['VolumeId'])) 25 | except Exception as e: 26 | print('EBS volume {0} not considered for termination because of a processing error: {1}'.format(volume['VolumeId'], e)) 27 | 28 | if len(to_terminate) == 0: 29 | print('No volumes to terminate! Exiting.') 30 | exit() 31 | else: 32 | relay.outputs.set('volumeIDs', to_terminate) 33 | print('Setting output `volumeIDs` to {0}'.format(to_terminate)) 34 | 35 | -------------------------------------------------------------------------------- /ec2-provision-and-configure-with-bolt/README.md: -------------------------------------------------------------------------------- 1 | This workflow uses Terraform to provision an EC2 instance, then it runs a Bolt 2 | plan to configure it. 3 | 4 | ## Prerequisites 5 | 6 | Before you run the workflow, make sure you have access to the following: 7 | - An AWS account that has the privilege to create an EC2 instance and a security group and the ability to read and write to an S3 bucket 8 | - An AWS VPC where u want to deploy your setup 9 | - A SSH key to connect to the EC2 instance (create or upload it to AWS). 10 | - A Repository with a Boltdir that you would like to run on the EC2 instance. 11 | 12 | ## Run the workflow 13 | 14 | 1. Add the workflow in Relay 15 | 2. Setup unconfigured connections: 16 | - Click **Setup** 17 | - On the right sidebar, you will have a list of unconfigured Connections 18 | - Get your AWS account credentials and configure the `terraform-provider` 19 | aws connection. 20 | - Configure the `bolt-ec2-private-key` ssh connection. 21 | - Configure the `bolt-repo-private-key` ssh connection. 22 | 3. Click **Run** and when prompted, fill in any relevant parameters. 23 | -------------------------------------------------------------------------------- /ec2-provision-and-configure-with-bolt/ec2-provision-and-configure-with-bolt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/ec2-provision-and-configure-with-bolt/ec2-provision-and-configure-with-bolt.png -------------------------------------------------------------------------------- /ec2-provision-and-configure-with-bolt/ec2-provision-and-configure-with-bolt.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Provision an EC2 instance and configure it with Bolt 3 | description: This workflow uses Terraform to provision an EC2 instance, then it runs a Bolt plan to configure it. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/ec2-provision-and-configure-with-bolt 5 | tags: 6 | - continuous delivery 7 | 8 | parameters: 9 | terraformRepository: 10 | description: The git repository where bolt files are located 11 | default: https://github.com/puppetlabs/relay-workflows 12 | awsRegion: 13 | description: The AWS region to deploy the EC2 instance 14 | default: us-west-1 15 | vpcID: 16 | description: The AWS vpc id to deploy EC2 instance 17 | sshKeyName: 18 | description: The AWS ssh key for the EC2 instance 19 | default: relay-bolt 20 | terraformStateBucket: 21 | description: The name of the bucket you'd like to store terraform state in 22 | terraformStateFilename: 23 | description: The name of the state file to use 24 | default: bolt-workflow.tfstate 25 | terraformWorkspace: 26 | description: The name of the Terraform workspace to use 27 | default: default 28 | boltRepository: 29 | description: The git repository URL where bolt files are located 30 | boltPlanName: 31 | description: The name of the bolt plan to run 32 | boltProjectDir: 33 | description: The path to the bolt directory in boltRepository 34 | default: Boltdir 35 | 36 | steps: 37 | - name: provision-ec2-with-terraform 38 | image: relaysh/terraform-step-apply 39 | spec: 40 | backendConfig: 41 | bucket: !Parameter terraformStateBucket 42 | region: !Parameter awsRegion 43 | key: !Parameter terraformStateFilename 44 | vars: 45 | vpc_id: !Parameter vpcID 46 | aws_region: !Parameter awsRegion 47 | key_name: !Parameter sshKeyName 48 | workspace: !Parameter terraformWorkspace 49 | directory: ec2-provision-and-configure-with-bolt/infra/ 50 | aws: !Connection { type: aws, name: terraform-provider } 51 | # Note that the terraform files are pulled from a public repository, 52 | # so we don't need an ssh key here like we do below. 53 | git: 54 | name: relay-workflows 55 | repository: !Parameter terraformRepository 56 | 57 | - name: configure-instance-with-bolt 58 | dependsOn: 59 | - provision-ec2-with-terraform 60 | image: relaysh/bolt-step-run 61 | spec: 62 | type: plan 63 | name: !Parameter boltPlanName 64 | projectDir: !Parameter boltProjectDir 65 | installModules: true 66 | targets: !Output [provision-ec2-with-terraform, EC2_IP] 67 | credentials: 68 | id_rsa: !Connection { type: ssh, name: bolt-ec2-private-key } 69 | transport: 70 | type: ssh 71 | user: ubuntu 72 | privateKey: id_rsa 73 | verifyHost: false 74 | git: 75 | # Since the bolt plan might be pulled from a private repository, we've included 76 | # a parameter here for cloning in that scenario. If you are cloning from a public 77 | # repository and no public key is required, you can remove the connection line below. 78 | connection: !Connection { type: ssh, name: bolt-repo-private-key } 79 | name: bolt-repository 80 | repository: !Parameter boltRepository 81 | -------------------------------------------------------------------------------- /ec2-provision-and-configure-with-bolt/infra/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "s3" {} 3 | } 4 | 5 | provider "aws" { 6 | region = local.workspace["aws_region"] 7 | shared_credentials_file = "/workspace/credentials" 8 | } 9 | 10 | resource "aws_security_group" "web_sg" { 11 | name = "Web SG" 12 | description = "Managed by Terraform" 13 | vpc_id = local.workspace["vpc_id"] 14 | 15 | ingress { 16 | from_port = 22 17 | to_port = 22 18 | protocol = "TCP" 19 | cidr_blocks = ["0.0.0.0/0"] 20 | } 21 | 22 | ingress { 23 | from_port = 80 24 | to_port = 80 25 | protocol = "TCP" 26 | cidr_blocks = ["0.0.0.0/0"] 27 | } 28 | 29 | egress { 30 | from_port = 0 31 | to_port = 0 32 | protocol = "-1" 33 | cidr_blocks = ["0.0.0.0/0"] 34 | } 35 | } 36 | 37 | resource "aws_instance" "web" { 38 | ami = var.ec2_ami 39 | instance_type = var.ec2_machine_type 40 | vpc_security_group_ids = [aws_security_group.web_sg.id] 41 | key_name = local.workspace["key_name"] 42 | 43 | root_block_device { 44 | volume_type = "gp2" 45 | volume_size = var.ec2_disk_size 46 | } 47 | 48 | depends_on = [aws_security_group.web_sg] 49 | } 50 | -------------------------------------------------------------------------------- /ec2-provision-and-configure-with-bolt/infra/outputs.tf: -------------------------------------------------------------------------------- 1 | output "EC2_IP" { 2 | value = aws_instance.web.public_ip 3 | } -------------------------------------------------------------------------------- /ec2-provision-and-configure-with-bolt/infra/variables.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # Workaround for https://github.com/hashicorp/terraform/issues/15966 3 | workspace-settings = { 4 | file = "workspace.${terraform.workspace}.tfvars.json" 5 | 6 | defaults = { 7 | vpc_id = "" 8 | aws_region = "" 9 | key_name = "" 10 | } 11 | } 12 | 13 | workspace = "${merge(local.workspace-settings.defaults, jsondecode(fileexists(local.workspace-settings.file) ? file(local.workspace-settings.file) : "{}"))}" 14 | } 15 | 16 | variable "ec2_machine_type" { 17 | description = "EC2 machine type" 18 | default = "t2.micro" 19 | } 20 | 21 | variable "ec2_ami" { 22 | description = "EC2 machine type" 23 | default = "ami-02df9ea15c1778c9c" 24 | } 25 | 26 | variable "ec2_disk_size" { 27 | description = "EC2 disk size in GB" 28 | default = "20" 29 | } -------------------------------------------------------------------------------- /ec2-reaper/.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 160 3 | -------------------------------------------------------------------------------- /ec2-reaper/README.md: -------------------------------------------------------------------------------- 1 | This workflow looks at all of the EC2 instances in a given account and region and selects a subset of those to terminate. The termination criteria are: 2 | 3 | * Not tagged with a termination date or lifetime after 4 minutes 4 | * The `termination_date` or `lifetime` tags are present but cannot be parsed 5 | * The `termination_date` or `lifetime` tags indicate that the instance has 6 | expired 7 | 8 | An instance may be configured to never terminate if its `lifetime` tag has the 9 | special value `indefinite`. 10 | 11 | ## Prerequisites 12 | 13 | Before you run this workflow, you will need the following: 14 | - An AWS account. 15 | - An AWS IAM user with permissions to list and terminate EC2 instances (if not 16 | run in dry run mode). 17 | - One or more running EC2 instances that are configured to use the 18 | `termination_date` or `lifetime` tags. 19 | 20 | ## Run the workflow 21 | 22 | Follow these steps to run the workflow: 23 | 24 | 1. Add your AWS credentials as a Connection: 25 | - Click **Setup** 26 | - Find the Connection named `my-aws-account` and click Edit(✎). Use the following values: 27 | - **KEY**: `ACCESS KEY ID` 28 | - **VALUE**: Enter your AWS access key id associated with the account 29 | - **KEY**: `SECRET ACCESS KEY` 30 | - **VALUE**: Enter your AWS secret access key associated with the account 31 | - Click **Save** 32 | 33 | 2. Click **Run workflow** and wait for the workflow run page to appear. 34 | 3. Supply following parameters to the modal: 35 | - **KEY**: `region` 36 | - **VALUE**: The AWS region to run in 37 | - **KEY**: `terminationDateTag` 38 | - **VALUE**: The name of the tag to use for determining the termination date 39 | - **KEY**: `lifetimeTag` 40 | - **VALUE**: The name of the tag to use for determining the lifetime 41 | - **KEY**: `dryRun` 42 | - **VALUE**: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 43 | 44 | 4. **Warning:** If you run the workflow with the `dryRun` parameter set to 45 | `false`, instances not in compliance with this workflow policy will 46 | immediately be terminated. 47 | 48 | ## Run the workflow on a schedule 49 | 50 | Follow these steps to run this workflow on a schedule: 51 | - Un-comment out the Trigger block in the workflow file: 52 | 53 | > TIP: If you're using the Relay code editor, highlight the `triggers` section and type `⌘ + /` (Mac) or `Ctrl + /` (Windows) to uncomment. 54 | 55 | ```yaml 56 | # triggers: 57 | # - name: schedule 58 | # source: 59 | # type: schedule 60 | # schedule: '0 * * * *' 61 | # binding: 62 | # parameters: 63 | # region: us-east-1 64 | # dryRun: true 65 | # lifetimeTag: lifetime 66 | # terminationDateTag: termination_date 67 | ``` 68 | 69 | - Configure the `schedule` trigger: 70 | - Supply the run interval in [cron format](https://crontab.guru/). 71 | - Configure the following parameter bindings: 72 | - Specify the `region` to run in. 73 | - Specify the `lifetimeTag` to use. 74 | - Specify the `terminationDateTag` to use. 75 | - Specify whether `dryRun` should be set to `true` or `false`. 76 | - Click **Save changes** -------------------------------------------------------------------------------- /ec2-reaper/ec2-reaper.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/ec2-reaper/ec2-reaper.png -------------------------------------------------------------------------------- /ec2-reaper/ec2-reaper.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Terminate EC2 instances without valid lifetime tag 3 | description: This workflow looks at all of the EC2 instances in a given account and region and selects a subset of those to terminate. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/ec2-reaper 5 | tags: 6 | - cost optimization 7 | 8 | # Uncomment out this trigger to run this workflow hourly. 9 | # triggers: 10 | # - name: schedule 11 | # source: 12 | # type: schedule 13 | # schedule: '0 * * * *' 14 | # binding: 15 | # parameters: 16 | # awsRegion: us-east-1 17 | # dryRun: true 18 | # lifetimeTag: lifetime 19 | # terminationDateTag: termination_date 20 | 21 | parameters: 22 | awsRegion: 23 | description: The AWS region to run in 24 | default: us-east-1 25 | terminationDateTag: 26 | description: The name of the tag to use for determining the termination date 27 | default: termination_date 28 | lifetimeTag: 29 | description: The name of the tag to use for determining the lifetime 30 | default: lifetime 31 | dryRun: 32 | description: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 33 | default: 'true' 34 | 35 | steps: 36 | - name: describe-instances 37 | image: relaysh/aws-ec2-step-instances-describe 38 | spec: 39 | aws: &aws 40 | connection: !Connection { type: aws, name: my-aws-account } 41 | region: !Parameter awsRegion 42 | - name: filter-instances 43 | image: relaysh/core:latest-python 44 | spec: 45 | terminationDateTag: !Parameter terminationDateTag 46 | lifetimeTag: !Parameter lifetimeTag 47 | instances: !Output {from: describe-instances, name: instances} 48 | inputFile: https://raw.githubusercontent.com/puppetlabs/relay-workflows/master/ec2-reaper/filter-instances.py 49 | - name: termination-approval 50 | description: Wait for approval to terminate instances 51 | type: approval 52 | dependsOn: filter-instances 53 | when: 54 | - !Fn.equals [!Parameter dryRun, 'false'] 55 | - name: terminate-instances 56 | dependsOn: termination-approval 57 | image: relaysh/aws-ec2-step-instances-terminate 58 | when: 59 | - !Fn.equals [!Parameter dryRun, 'false'] 60 | spec: 61 | aws: *aws 62 | instanceIDs: !Output {from: filter-instances, name: instanceIDs} 63 | -------------------------------------------------------------------------------- /ec2-remove-unused-key-pairs/README.md: -------------------------------------------------------------------------------- 1 | This workflow looks at all of the unused EC2 key pairs in an account and deletes them. 2 | 3 | ## Prerequisites 4 | 5 | Before you run this workflow, you will need the following: 6 | - An AWS account. 7 | - An AWS IAM user with permissions to list and terminate EC2 instances (if not 8 | run in dry run mode). 9 | - One or more running EC2 key pairs that are unused. 10 | 11 | ## Run the workflow 12 | 13 | Follow these steps to run the workflow: 14 | 1. Add your AWS credentials as a Connection: 15 | - Click **Setup** 16 | - Find the Connection named `my-aws-account` and click Edit(✎). Use the following values: 17 | - **KEY**: `ACCESS KEY ID` 18 | - **VALUE**: Enter your AWS access key id associated with the account 19 | - **KEY**: `SECRET ACCESS KEY` 20 | - **VALUE**: Enter your AWS secret access key associated with the account 21 | - Click **Save** 22 | 23 | 2. Click **Run workflow** and wait for the workflow run page to appear. 24 | 3. Supply following parameters to the modal: 25 | - **KEY**: `region` 26 | - **VALUE**: The AWS region to run in 27 | - **KEY**: `dryRun` 28 | - **VALUE**: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 29 | 30 | 4. **Warning:** If you run the workflow with the `dryRun` parameter set to 31 | `false`, key pairs that are not used will be deleted. 32 | 33 | ## Run the workflow on a schedule 34 | 35 | Follow these steps to run this workflow on a schedule: 36 | - Un-comment out the Trigger block in the workflow file: 37 | 38 | > TIP: If you're using the Relay code editor, highlight the `triggers` section and type `⌘ + /` (Mac) or `Ctrl + /` (Windows) to uncomment. 39 | 40 | ```yaml 41 | # triggers: 42 | # - name: schedule 43 | # source: 44 | # type: schedule 45 | # schedule: '0 * * * *' 46 | # binding: 47 | # parameters: 48 | # region: us-east-1 49 | # dryRun: true 50 | ``` 51 | 52 | - Configure the `schedule` trigger: 53 | - Supply the run interval in [cron format](https://crontab.guru/). 54 | - Configure the following parameter bindings: 55 | - Specify the `region` to run in. 56 | - Specify whether `dryRun` should be set to `true` or `false`. 57 | - Click **Save changes** -------------------------------------------------------------------------------- /ec2-remove-unused-key-pairs/ec2-remove-unused-key-pairs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/ec2-remove-unused-key-pairs/ec2-remove-unused-key-pairs.png -------------------------------------------------------------------------------- /ec2-remove-unused-key-pairs/ec2-remove-unused-key-pairs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Remove unused EC2 key pairs 3 | description: This workflow finds all EC2 key pairs that are not used by an EC2 instance and removes them from the account. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/ec2-remove-unused-key-pairs 5 | tags: 6 | - compliance 7 | 8 | # Uncomment out this trigger to run this workflow hourly. 9 | # triggers: 10 | # - name: schedule 11 | # source: 12 | # type: schedule 13 | # schedule: '0 * * * *' 14 | # binding: 15 | # parameters: 16 | # awsRegion: us-east-1 17 | # dryRun: true 18 | 19 | parameters: 20 | awsRegion: 21 | description: The AWS region to run in 22 | default: us-east-1 23 | dryRun: 24 | description: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 25 | default: 'true' 26 | 27 | steps: 28 | - name: describe-instances 29 | image: relaysh/aws-ec2-step-instances-describe 30 | spec: 31 | aws: &aws 32 | connection: !Connection { type: aws, name: my-aws-account } 33 | region: !Parameter awsRegion 34 | 35 | - name: describe-key-pairs 36 | image: relaysh/aws-ec2-step-key-pairs-describe 37 | spec: 38 | aws: *aws 39 | 40 | - name: filter-key-pairs 41 | image: relaysh/core:latest-python 42 | spec: 43 | instances: !Output {from: describe-instances, name: instances} 44 | keyPairs: !Output { from: describe-key-pairs, name: keyPairs} 45 | inputFile: https://raw.githubusercontent.com/puppetlabs/relay-workflows/master/ec2-remove-unused-key-pairs/filter-key-pairs.py 46 | 47 | - name: approval 48 | description: Wait for approval to delete key pairs 49 | type: approval 50 | dependsOn: filter-key-pairs 51 | when: 52 | - !Fn.equals [!Parameter dryRun, 'false'] 53 | 54 | - name: delete-key-pairs 55 | dependsOn: approval 56 | image: relaysh/aws-ec2-step-key-pairs-delete 57 | when: 58 | - !Fn.equals [!Parameter dryRun, 'false'] 59 | spec: 60 | aws: *aws 61 | keyPairNames: !Output {from: filter-key-pairs, name: keyPairNames} 62 | -------------------------------------------------------------------------------- /ec2-remove-unused-key-pairs/filter-key-pairs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # File: filter-key-pairs.py 4 | # Description: This is an example script that you can author or modify that retrieves 5 | # a list of key pairs from the Relay Interface (in the form of parameters) 6 | # and filters the key pairs that are unused. It sets the output 7 | # variable `keypairs` to the list of key pairs that are unused, 8 | # and the output variable `formatted` to a human-readable representation. 9 | # Inputs: 10 | # - keyPairs - List of keyPairs to evaluate 11 | # - instances - List of instances to compare against 12 | # Outputs: 13 | # - keyPairNames - list of key pair names 14 | # - formatted - nicely formatted output for use in notifications or messages 15 | 16 | from relay_sdk import Interface, Dynamic as D 17 | 18 | relay = Interface() 19 | 20 | to_delete = [] 21 | to_keep = [] 22 | 23 | keyPairs = relay.get(D.keyPairs) 24 | instances = relay.get(D.instances) 25 | formatted = "Results of keypair filter:\n" 26 | 27 | err = 0 28 | 29 | if not keyPairs: 30 | formatted += 'No keypairs found.\n' 31 | err = 1 32 | if not instances: 33 | formatted += 'No instances found.\n' 34 | err = 1 35 | 36 | if err == 0: 37 | all_keyPairs = list(map(lambda i: i['KeyName'], relay.get(D.keyPairs))) 38 | used_keyPairs = list(map(lambda i: i['KeyName'], relay.get(D.instances))) 39 | 40 | for key in all_keyPairs: 41 | if key in used_keyPairs: 42 | to_keep.append(key) 43 | else: 44 | to_delete.append(key) 45 | 46 | formatted += '\nFound {} used key pairs:\n'.format(len(to_keep)) 47 | for key in to_keep: 48 | formatted += key + "\n" 49 | 50 | formatted += '\nFound {} unused key pairs:\n'.format(len(to_delete)) 51 | for key in to_delete: 52 | formatted += key + "\n" 53 | 54 | print("Setting output `keyPairNames` with list of {} unused keypairs".format(len(to_delete))) 55 | relay.outputs.set('keyPairNames', to_delete) 56 | 57 | print(formatted) 58 | relay.outputs.set('formatted',formatted) 59 | 60 | exit(0) -------------------------------------------------------------------------------- /ec2-scan-unused-key-pairs/README.md: -------------------------------------------------------------------------------- 1 | This workflow looks at all of the unused EC2 key pairs in an account and sends a Slack notification about them. 2 | 3 | ## Prerequisites 4 | 5 | Before you run this workflow, you will need the following: 6 | - An AWS account. 7 | - An AWS IAM user with permissions to list EC2 instances 8 | - One or more running EC2 key pairs that are unused. 9 | - A Slack API token with "bot user" permissions: `chat:write`, `chat:write.public`, `chat:write.customize`. The [Slack step README](https://github.com/relay-integrations/relay-slack/blob/master/steps/message-send/README.md) has instructions on how to create a token. 10 | 11 | ## Run the workflow 12 | 13 | Follow these steps to run the workflow: 14 | 1. Add the workflow to your account from the [workflow's Library page](https://relay.sh/workflows/ec2-scan-unused-key-pairs/). 15 | 16 | 1. Add your AWS and Slack credentials as Connections. (If you already have usable Connections set up in Relay, adjust the workflow code to reference the existing names rather than add new ones.) 17 | - Navigate to the **Settings** tab of the workflow's page in your account 18 | - ✎ Edit the connection named `my-aws-account`, then click **Save** 19 | - **KEY**: `ACCESS KEY ID` 20 | - **VALUE**: Enter the AWS access key id associated with the account 21 | - **KEY**: `SECRET ACCESS KEY` 22 | - **VALUE**: Enter the AWS secret access key associated with the account 23 | - ✎ Edit the connection named `my-slack-token`, then click **Save** 24 | - **KEY**: `TOKEN` 25 | - **VALUE**: Paste the bot user token. 26 | 27 | 2. Click **Run workflow** and wait for the workflow run page to appear. 28 | 29 | 3. Supply following parameters, then click **Run**: 30 | - **KEY**: `region` 31 | - **VALUE**: The AWS region to run in 32 | - **KEY**: `slackChannel` 33 | - **VALUE**: The name of the channel to post to 34 | 35 | ## Run the workflow automatically 36 | 37 | * **Schedule**: By default the workflow will run once a week, at 7:00PST on Mondays. To adjust this, edit the value of the `schedule` field in the `triggers` section. It uses standard cron syntax; for help building a custom schedule, check out https://crontab.guru/ 38 | 39 | * **Default Parameters**: You may want to adjust the default Slack channel and AWS region used by the workflow. Edit the workflow's `parameters` section to set appropriate values for these keys. -------------------------------------------------------------------------------- /ec2-scan-unused-key-pairs/ec2-scan-unused-key-pairs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Notify about unused keypairs 3 | description: This workflow finds all EC2 key pairs that are not used by an EC2 instance and notifies a slack channel about them. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/ec2-scan-unused-key-pairs 5 | tags: 6 | - compliance 7 | 8 | triggers: 9 | # Run at 7AM every Monday 10 | - name: schedule 11 | source: 12 | type: schedule 13 | schedule: '0 7 * * 1' 14 | 15 | parameters: 16 | awsRegion: 17 | description: The AWS region to run in 18 | default: us-east-1 19 | slackChannel: 20 | description: The Slack channel to notify 21 | default: "#automation-notices" 22 | 23 | steps: 24 | - name: describe-instances 25 | image: relaysh/aws-ec2-step-instances-describe 26 | spec: 27 | aws: &aws 28 | connection: !Connection { type: aws, name: my-aws-account } 29 | region: !Parameter awsRegion 30 | 31 | - name: describe-key-pairs 32 | image: relaysh/aws-ec2-step-key-pairs-describe 33 | spec: 34 | aws: *aws 35 | 36 | - name: filter-key-pairs 37 | image: relaysh/core:latest-python 38 | spec: 39 | instances: !Output {from: describe-instances, name: instances} 40 | keyPairs: !Output { from: describe-key-pairs, name: keyPairs} 41 | inputFile: https://raw.githubusercontent.com/puppetlabs/relay-workflows/master/ec2-remove-unused-key-pairs/filter-key-pairs.py 42 | 43 | - name: notify-team 44 | image: relaysh/slack-step-message-send 45 | spec: 46 | connection: !Connection {type: slack, name: my-slack-token} 47 | channel: !Parameter slackChannel 48 | username: relay 49 | message: !Output {from: filter-key-pairs, name: formatted} 50 | -------------------------------------------------------------------------------- /ec2-scan-unused-key-pairs/metadata.yaml: -------------------------------------------------------------------------------- 1 | connections: 2 | aws/test: 3 | accessKeyID: AKIASAMPLEKEY 4 | secretAccessKey: 6bkpuV9fF3LX1Yo79OpfTwsw8wt5wsVLGTPJjDTu 5 | 6 | runs: 7 | '1': 8 | steps: 9 | describe-instances: 10 | spec: 11 | aws: !Connection [aws, test] 12 | region: us-east-1 13 | outputs: 14 | instances: 15 | - InstanceID: instance1 16 | KeyName: keypair1 17 | - InstanceID: instance2 18 | KeyName: keypair2 19 | describe-key-pairs: 20 | spec: 21 | aws: !Connection [aws, test] 22 | region: us-east-1 23 | outputs: 24 | keyPairs: 25 | - KeyPairID: id1 26 | KeyFingerprint: "de:ad:be:ef" 27 | KeyName: keypair1 28 | - KeyPairID: id2 29 | KeyFingerprint: "ba:d1:de:ea" 30 | KeyName: keypair2 31 | - KeyPairID: id3 32 | KeyFingerprint: "00:ff:ee:aa" 33 | KeyName: keypair3 34 | filter-key-pairs: 35 | spec: 36 | instances: !Output {from: describe-instances, name: instances} 37 | keyPairs: !Output {from: describe-key-pairs, name: keyPairs} 38 | outputs: 39 | keyPairNames: keypair3 40 | formatted: "Sample formatted output - found keypair3 unused" 41 | -------------------------------------------------------------------------------- /ec2-stop-untagged-instances/README.md: -------------------------------------------------------------------------------- 1 | This workflow looks at all of the EC2 instances in a given account and region and stops the ones that are untagged. 2 | 3 | ## Prerequisites 4 | 5 | Before you run this workflow, you will need the following: 6 | - An AWS account. 7 | - An AWS IAM user with permissions to list and terminate EC2 instances (if not 8 | run in dry run mode). 9 | - One or more running EC2 instances that are untagged. 10 | 11 | ## Run the workflow 12 | 13 | Follow these steps to run the workflow: 14 | 1. Add your AWS credentials as a Connection: 15 | - Click **Setup** 16 | - Find the Connection named `my-aws-account` and click Edit(✎). Use the following values: 17 | - **KEY**: `ACCESS KEY ID` 18 | - **VALUE**: Enter your AWS access key id associated with the account 19 | - **KEY**: `SECRET ACCESS KEY` 20 | - **VALUE**: Enter your AWS secret access key associated with the account 21 | - Click **Save** 22 | 23 | 2. Click **Run workflow** and wait for the workflow run page to appear. 24 | 3. Supply following parameters to the modal: 25 | - **KEY**: `region` 26 | - **VALUE**: The AWS region to run in 27 | - **KEY**: `dryRun` 28 | - **VALUE**: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 29 | 30 | 4. **Warning:** If you run the workflow with the `dryRun` parameter set to 31 | `false`, instances not in compliance with this workflow policy will 32 | immediately be stopped. 33 | 34 | ## Run the workflow on a schedule 35 | 36 | Follow these steps to run this workflow on a schedule: 37 | - Un-comment out the Trigger block in the workflow file: 38 | 39 | > TIP: If you're using the Relay code editor, highlight the `triggers` section and type `⌘ + /` (Mac) or `Ctrl + /` (Windows) to uncomment. 40 | 41 | ```yaml 42 | # triggers: 43 | # - name: schedule 44 | # source: 45 | # type: schedule 46 | # schedule: '0 * * * *' 47 | # binding: 48 | # parameters: 49 | # region: us-east-1 50 | # dryRun: true 51 | ``` 52 | 53 | - Configure the `schedule` trigger: 54 | - Supply the run interval in [cron format](https://crontab.guru/). 55 | - Configure the following parameter bindings: 56 | - Specify the `region` to run in. 57 | - Specify whether `dryRun` should be set to `true` or `false`. 58 | - Click **Save changes** -------------------------------------------------------------------------------- /ec2-stop-untagged-instances/ec2-stop-untagged-instances.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/ec2-stop-untagged-instances/ec2-stop-untagged-instances.png -------------------------------------------------------------------------------- /ec2-stop-untagged-instances/ec2-stop-untagged-instances.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Stop untagged EC2 instances 3 | description: This workflow looks at all of the EC2 instances in a given account and region and stops the ones that are untagged. Requires an AWS account with permissions to delete stop EC2 instances. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/ec2-stop-untagged-instances 5 | tags: 6 | - compliance 7 | 8 | # Uncomment out this trigger to run this workflow hourly. 9 | # triggers: 10 | # - name: schedule 11 | # source: 12 | # type: schedule 13 | # schedule: '0 * * * *' 14 | # binding: 15 | # parameters: 16 | # awsRegion: us-east-1 17 | # dryRun: true 18 | 19 | parameters: 20 | awsRegion: 21 | description: The AWS region to run in 22 | default: us-east-1 23 | dryRun: 24 | description: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 25 | default: 'true' 26 | 27 | steps: 28 | - name: describe-instances 29 | image: relaysh/aws-ec2-step-instances-describe 30 | spec: 31 | aws: &aws 32 | connection: !Connection { type: aws, name: my-aws-account } 33 | region: !Parameter awsRegion 34 | - name: filter-instances 35 | image: relaysh/core:latest-python 36 | spec: 37 | instances: !Output {from: describe-instances, name: instances} 38 | inputFile: https://raw.githubusercontent.com/puppetlabs/relay-workflows/master/ec2-stop-untagged-instances/filter-instances.py 39 | - name: approval 40 | description: Wait for approval to stop instances 41 | type: approval 42 | dependsOn: filter-instances 43 | when: 44 | - !Fn.equals [!Parameter dryRun, 'false'] 45 | - name: stop-instances 46 | dependsOn: approval 47 | image: relaysh/aws-ec2-step-instances-stop 48 | when: 49 | - !Fn.equals [!Parameter dryRun, 'false'] 50 | spec: 51 | aws: *aws 52 | instanceIDs: !Output {from: filter-instances, name: instanceIDs} 53 | -------------------------------------------------------------------------------- /ec2-stop-untagged-instances/filter-instances.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # File: filter-instances.py 4 | # Description: This is an example script that you can author or modify that retrieves 5 | # a list of instances from the Relay Interface (in the form of parameters) 6 | # and filters the instances that are untagged. It then sets the output 7 | # variable `instanceIDs` to the list of instances that are untagged. 8 | # Inputs: 9 | # - instances - List of instances to evaluate 10 | # Outputs: 11 | # - instanceIDs - list of instance IDs to stop in the next step 12 | 13 | from relay_sdk import Interface, Dynamic as D 14 | 15 | relay = Interface() 16 | 17 | to_stop = [] 18 | to_keep = [] 19 | 20 | instances = filter(lambda i: i['State']['Name'] == 'running', relay.get(D.instances)) 21 | for instance in instances: 22 | try: 23 | if instance['Tags'] is None: 24 | to_stop.append(instance['InstanceId']) 25 | else: 26 | to_keep.append(instance['InstanceId']) 27 | except Exception as e: 28 | print('\nEC2 instance {0} not considered for termination because of a processing error: {1}'.format(instance['InstanceId'], e)) 29 | 30 | print('\nFound {} instances (with tags) to keep:'.format(len(to_keep))) 31 | print(*[instance_id for instance_id in to_keep], sep = "\n") 32 | 33 | print('\nFound {} instances without tags to stop:'.format(len(to_stop))) 34 | print(*[instance_id for instance_id in to_stop], sep = "\n") 35 | 36 | relay.outputs.set('instanceIDs', to_stop) 37 | -------------------------------------------------------------------------------- /elbv2-delete-empty-loadbalancers/README.md: -------------------------------------------------------------------------------- 1 | This workflow finds empty ELBv2 load balancers by locating all load balancers that have 2 | empty target groups (i.e. no registered targets). 3 | 4 | ## Prerequisites 5 | 6 | Before you run this workflow, you will need the following: 7 | - An AWS account. 8 | - An AWS IAM user with permissions to list and delete ELBv2 load balancers (if not 9 | run in dry run mode). 10 | - One or more ELBv2 load balancers that are empty (no targets). 11 | 12 | ## Run the workflow 13 | 14 | Follow these steps to run the workflow: 15 | 1. Add your AWS credentials as a Connection: 16 | - Click **Setup** 17 | - Find the Connection named `my-aws-account` and click Edit(✎). Use the following values: 18 | - **KEY**: `ACCESS KEY ID` 19 | - **VALUE**: Enter your AWS access key id associated with the account 20 | - **KEY**: `SECRET ACCESS KEY` 21 | - **VALUE**: Enter your AWS secret access key associated with the account 22 | - Click **Save** 23 | 24 | 2. Click **Run workflow** and wait for the workflow run page to appear. 25 | 3. Supply following parameters to the modal: 26 | - **KEY**: `region` 27 | - **VALUE**: The AWS region to run in 28 | - **KEY**: `dryRun` 29 | - **VALUE**: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 30 | 31 | 4. **Warning:** If you run the workflow with the `dryRun` parameter set to 32 | `false`, load balancers that are empty will immediately be deleted. 33 | 34 | ## Run the workflow on a schedule 35 | 36 | Follow these steps to run this workflow on a schedule: 37 | - Un-comment out the Trigger block in the workflow file: 38 | 39 | > TIP: If you're using the Relay code editor, highlight the `triggers` section and type `⌘ + /` (Mac) or `Ctrl + /` (Windows) to uncomment. 40 | 41 | ```yaml 42 | # triggers: 43 | # - name: schedule 44 | # source: 45 | # type: schedule 46 | # schedule: '0 * * * *' 47 | # binding: 48 | # parameters: 49 | # region: us-east-1 50 | # dryRun: true 51 | ``` 52 | 53 | - Configure the `schedule` trigger: 54 | - Supply the run interval in [cron format](https://crontab.guru/). 55 | - Configure the following parameter bindings: 56 | - Specify the `region` to run in. 57 | - Specify whether `dryRun` should be set to `true` or `false`. 58 | - Click **Save changes** -------------------------------------------------------------------------------- /elbv2-delete-empty-loadbalancers/elbv2-delete-empty-loadbalancers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/elbv2-delete-empty-loadbalancers/elbv2-delete-empty-loadbalancers.png -------------------------------------------------------------------------------- /elbv2-delete-empty-loadbalancers/elbv2-delete-empty-loadbalancers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Delete empty ELBv2 load balancers 3 | description: This workflow finds empty ELBv2 load balancers by locating all load balancers that have empty target groups (i.e. no registered targets). It then terminates these load balancers. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/elbv2-delete-empty-loadbalancers 5 | tags: 6 | - cost optimization 7 | 8 | # Uncomment out this trigger to run this workflow hourly. 9 | # triggers: 10 | # - name: schedule 11 | # source: 12 | # type: schedule 13 | # schedule: '0 * * * *' 14 | # binding: 15 | # parameters: 16 | # awsRegion: us-east-1 17 | # dryRun: true 18 | 19 | parameters: 20 | awsRegion: 21 | description: The AWS region to run in 22 | default: us-east-1 23 | dryRun: 24 | description: True if this workflow should only print the resources it would delete 25 | default: 'true' 26 | 27 | steps: 28 | - name: describe-load-balancers 29 | image: relaysh/aws-elbv2-step-load-balancers-describe 30 | spec: 31 | aws: &aws 32 | connection: !Connection { type: aws, name: my-aws-account } 33 | region: !Parameter awsRegion 34 | 35 | - name: describe-target-groups 36 | image: relaysh/aws-elbv2-step-target-groups-describe 37 | spec: 38 | aws: *aws 39 | 40 | - name: describe-targets 41 | image: relaysh/aws-elbv2-step-targets-describe 42 | spec: 43 | aws: *aws 44 | targetgroups: !Output {from: describe-target-groups, name: targetgroups} 45 | 46 | - name: filter-empty-load-balancers 47 | image: relaysh/core:latest-python 48 | spec: 49 | loadbalancers: !Output {from: describe-load-balancers, name: loadbalancers} 50 | targetgroups: !Output {from: describe-target-groups, name: targetgroups} 51 | targets: !Output {from: describe-targets, name: targets} 52 | inputFile: https://raw.githubusercontent.com/puppetlabs/relay-workflows/master/elbv2-delete-empty-loadbalancers/filter-loadbalancers.py 53 | 54 | - name: approval 55 | description: Wait for manual approval to delete load balancers 56 | type: approval 57 | dependsOn: filter-empty-load-balancers 58 | when: 59 | - !Fn.equals [!Parameter dryRun, 'false'] 60 | 61 | - name: delete-load-balancers 62 | image: relaysh/aws-elbv2-step-load-balancers-delete 63 | dependsOn: approval 64 | when: 65 | - !Fn.equals [!Parameter dryRun, 'false'] 66 | spec: 67 | aws: *aws 68 | loadbalancerARNs: !Output {from: filter-empty-load-balancers, name: loadbalancerARNs} 69 | -------------------------------------------------------------------------------- /elbv2-delete-empty-loadbalancers/filter-loadbalancers.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # File: filter-loadbalancers.py 4 | # Description: This is an example script that you can author or modify that retrieves 5 | # a list of Load Balancers and Target Groups from the Relay Interface (in 6 | # the form of parameters) and filters the load balancers that are empty. 7 | # Inputs: 8 | # - loadbalancers - list of ELB v2 load balancers 9 | # - targetgroups - list of target groups 10 | # Outputs: 11 | # - loadbalancerARNs - list of empty ELBv2 load balancer ARNs to be deleted 12 | 13 | from relay_sdk import Interface, Dynamic as D 14 | 15 | relay = Interface() 16 | 17 | loadbalancer_arns = list(map(lambda i: i['LoadBalancerArn'], relay.get(D.loadbalancers))) 18 | targets = relay.get(D.targets) 19 | 20 | to_terminate = [] 21 | to_keep = [] 22 | 23 | # Only 1 Load Balancer can be associated per Target Group - https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-limits.html 24 | for arn in loadbalancer_arns: 25 | terminate = True 26 | for group in relay.get(D.targetgroups): 27 | if arn in group['LoadBalancerArns'] and len(targets[group['TargetGroupArn']]) != 0: 28 | terminate = False 29 | to_keep.append(arn) 30 | if terminate: 31 | to_terminate.append(arn) 32 | 33 | print("\nLoad Balancers that are NOT empty:\n") 34 | print(*[a for a in to_keep], sep="\n") 35 | 36 | print("\nLoad Balancers that are empty:\n") 37 | print(*[a for a in to_terminate], sep="\n") 38 | 39 | if len(to_terminate) == 0: 40 | exit() 41 | else: 42 | print('\nSetting output `loadbalancerARNs` to list of {} load balancers to terminate'.format(len(to_terminate))) 43 | relay.outputs.set('loadbalancerARNs', to_terminate) -------------------------------------------------------------------------------- /empty/README.md: -------------------------------------------------------------------------------- 1 | This empty workflow is used for the initial revision when a user creates a "New workflow" in the UI app. 2 | -------------------------------------------------------------------------------- /empty/empty.yaml: -------------------------------------------------------------------------------- 1 | ## Full guide available at https://relay.sh/docs/getting-started 2 | 3 | apiVersion: v1 4 | -------------------------------------------------------------------------------- /firehydrant-rollback/README.md: -------------------------------------------------------------------------------- 1 | # Roll back a Kubernetes deployment from a FireHydrant incident runbook 2 | 3 | This workflow connects Relay to [FireHydrant.io](https://firehydrant.io), a service for managing incidents that affect the availability of your service. It demonstrates rolling back a bad Kubernetes deployment as a remediation action to fix a degraded service. 4 | 5 | This documentation is a shorter version of the [Deployment Rollbacks via FireHydrant Runbook](http://relay.sh/blog/firehydrant-rollback-runbook/) blog post; see that for detailed instructions. 6 | 7 | # Connecting the Services 8 | 9 | In FireHydrant, we'll crate a Runbook that will trigger the workflow by sending a webhook to Relay. Create a new Runbook and add a **Send a Webhook** step. For the **Endpoint URL**, paste the webhook address from the Relay's **Settings** sidebar. The **HMAC Secret** field is an arbitrary string (not currently used). For the **JSON Payload** field, paste the following template: 10 | 11 | ```json 12 | { 13 | "incident_id": "{{ incident.id }}", 14 | "name": "{{ incident.name }}", 15 | "summary": "{{ incident.summary }}", 16 | "service": "{{ incident.services[0].name | downcase }}", 17 | "environment": "{{ incident.environments[0].name | downcase }}", 18 | "channel_id": "{{ incident.channel_id }}", 19 | "channel_name": "{{ incident.channel_name }}" 20 | } 21 | ``` 22 | 23 | Next, create a FireHydrant API key for Relay to post information back into the incident timeline. Under **Integrations** - **Bot users** in FireHydrant, create a new **Bot user** with a memorable name and description. Save the resulting API token into a Relay secret on the Relay workflow's **Settings** sidebar named `apiKey` (case-sensitive). 24 | 25 | ## GCP Authentication Setup 26 | 27 | This workflow uses a GCP Connection type on Relay's end, which requires a [service account](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#service-account-tokens) 28 | configured on your cluster. Follow the GCP guide to API Server Authentication's ["Service in other environments"](https://cloud.google.com/kubernetes-engine/docs/how-to/api-server-authentication#service_in_other_environments) section to set one up. This workflow will require the service account have the role `roles/container.developer` attached to it; if you re-use the connection for other workflows it may require additional permissions. Once you've gotten the service account JSON file downloaded, add a GCP Connection in Relay, name it `relay-service-account` and paste the contents of the JSON file into the dialog. Under the hood, Relay stores this securely in our Vault service and makes the contents available to workflow containers through the [!Connection custom type](https://relay.sh/docs/using-workflows/managing-connections/) in the workflow. 29 | 30 | ## Configuring Services and Environments 31 | 32 | The Environments section lets you enumerate the instances of your service, to better characterize the impact of an incident, help assign owners for remediation actions, and message outage information to the appropriate audiences. Check out this FireHydrant [helpdesk article on inventory management](https://help.firehydrant.io/en/articles/4192249-inventory-management-functionalities-services-and-environments) for more details on infrastructure organization. For our purposes, the goal of defining environments is to map them onto Kubernetes namespaces where our application is running. (For production workloads, it's more likely that your environments map to distinct clusters; that's totally possible to handle in Relay but is beyond the scope of this introduction!) 33 | -------------------------------------------------------------------------------- /firehydrant-rollback/firehydrant-rollback.yaml: -------------------------------------------------------------------------------- 1 | # This workflow rolls back a kubernetes deployment 2 | # in a given environment 3 | 4 | apiVersion: v1 5 | summary: Roll back a Kubernetes deployment from a FireHydrant incident runbook 6 | description: When a production FireHydrant incident occurs, roll back a change. 7 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/firehydrant-rollback 8 | parameters: 9 | incidentID: 10 | description: Internal FireHydrant UUID of the incident 11 | incidentName: 12 | description: Human-friendly name of the incident 13 | incidentSummary: 14 | description: Summary description of the incident 15 | incidentService: 16 | description: Service affected by the incident 17 | incidentEnvironment: 18 | description: Environment affected by the incident (maps to kubernetes namespace) 19 | slackChannelID: 20 | description: Internal ID of the Slack channel created to manage the incident 21 | slackChannelName: 22 | description: Human friendly name of the Slack channel for incident communications 23 | triggers: 24 | - name: firehydrant-incident 25 | source: 26 | type: webhook 27 | image: relaysh/stdlib-trigger-json 28 | binding: 29 | parameters: 30 | incidentID: !Data incident_id 31 | incidentName: !Data name 32 | incidentSummary: !Data summary 33 | incidentService: !Data service 34 | incidentEnvironment: !Data environment 35 | slackChannelID: !Data channel_id 36 | slackChannelName: !Data channel_name 37 | steps: 38 | - name: kubectl-deployment-rollback 39 | image: relaysh/gcp-step-rollout-undo 40 | spec: 41 | namespace: !Parameter incidentEnvironment 42 | cluster: "mycluster" 43 | zone: "us-central1-c" 44 | google: !Connection { type: gcp, name: relay-service-account } 45 | deployment: !Parameter incidentService 46 | - name: post-update 47 | image: relaysh/firehydrant-step-timeline-update 48 | spec: 49 | apiKey: !Secret apiKey 50 | incidentID: !Parameter incidentID 51 | message: !Fn.concat ["Update from Relay rollback: ", !Output {from: kubectl-deployment-rollback, name: result} ] 52 | -------------------------------------------------------------------------------- /gcp-disk-reaper/README.md: -------------------------------------------------------------------------------- 1 | This workflow looks at all of the GCP disks in a given account and zone and 2 | selects a subset of those to terminate that don't have any users (i.e. unattached). 3 | 4 | ## Prerequisites 5 | 6 | Before you run this workflow, you will need the following: 7 | - A GCP account. 8 | - An GCP service account with permissions to list and terminate GCP disks (if not 9 | run in dry run mode). 10 | - One or more running GCP disks that are not attached to any instances. 11 | 12 | ## Run the workflow 13 | 14 | Follow these steps to run the workflow: 15 | 1. Add your GCP service account credentials as a Connection: 16 | 2. Click **Run workflow** and wait for the workflow run page to appear. 17 | 3. Supply following parameters to the modal: 18 | - **KEY**: `zone` 19 | - **VALUE**: The GCP zone to run in. 20 | - **KEY**: `dryRun` 21 | - **VALUE**: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 22 | 23 | 4. **Warning:** If you run the workflow with the `dryRun` parameter set to 24 | `false`, unattached disks will immediately be terminated. 25 | 26 | ## Run the workflow on a schedule 27 | 28 | Follow these steps to run this workflow on a schedule: 29 | - Un-comment out the Trigger block in the workflow file: 30 | 31 | > TIP: If you're using the Relay code editor, highlight the `triggers` section and type `⌘ + /` (Mac) or `Ctrl + /` (Windows) to uncomment. 32 | 33 | ```yaml 34 | # triggers: 35 | # - name: schedule 36 | # source: 37 | # type: schedule 38 | # schedule: '0 * * * *' 39 | # binding: 40 | # parameters: 41 | # zone: us-central1-a 42 | # dryRun: true 43 | ``` 44 | 45 | - Configure the `schedule` trigger: 46 | - Supply the run interval in [cron format](https://crontab.guru/). 47 | - Configure the following parameter bindings: 48 | - Specify the `zone` to run in. 49 | - Specify whether `dryRun` should be set to `true` or `false`. 50 | - Click **Save changes** -------------------------------------------------------------------------------- /gcp-disk-reaper/filter-disks.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from relay_sdk import Interface, Dynamic as D 3 | 4 | relay = Interface() 5 | 6 | if __name__ == '__main__': 7 | to_terminate = [] 8 | print('Disk to be terminated:') 9 | disks = relay.get(D.disks) 10 | for disk in disks: 11 | if "users" not in disk.keys(): 12 | print(disk.get('name')) 13 | to_terminate.append(disk) 14 | 15 | print('Found {} disks that are unattached'.format(len(to_terminate))) 16 | print('Setting output `disks` to list of {} disks to terminate'.format(len(to_terminate))) 17 | 18 | relay.outputs.set('disks', to_terminate) 19 | -------------------------------------------------------------------------------- /gcp-disk-reaper/gcp-disk-reaper.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/gcp-disk-reaper/gcp-disk-reaper.png -------------------------------------------------------------------------------- /gcp-disk-reaper/gcp-disk-reaper.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Delete GCP disks that are unattached 3 | description: This workflow looks at all of the GCP disks in a given account and zone and terminates the ones that don't have any users. 4 | homepage: https://github.com/puppetlabs/relay-workflows/blob/master/gcp-disk-reaper 5 | tags: 6 | - cost optimization 7 | 8 | # Uncomment out this trigger to run this workflow hourly. 9 | # triggers: 10 | # - name: schedule 11 | # source: 12 | # type: schedule 13 | # schedule: '0 * * * *' 14 | # binding: 15 | # parameters: 16 | # zone: us-central1-a 17 | # dryRun: true 18 | 19 | parameters: 20 | zone: 21 | description: The GCP zone to run in 22 | default: us-central1-a 23 | dryRun: 24 | description: True if this workflow should only print the resources it would delete 25 | default: 'true' 26 | 27 | steps: 28 | - name: list-disks 29 | image: relaysh/gcp-step-disk-list 30 | spec: 31 | google: &google 32 | service_account_info: !Connection { type: gcp, name: my-gcp-account } 33 | zone: !Parameter zone 34 | - name: filter-disks 35 | image: relaysh/core:latest-python 36 | spec: 37 | disks: !Output {from: list-disks, name: disks} 38 | inputFile: https://raw.githubusercontent.com/puppetlabs/relay-workflows/master/gcp-disk-reaper/filter-disks.py 39 | - name: delete-approval 40 | description: Wait for approval to delete disks 41 | type: approval 42 | dependsOn: filter-disks 43 | when: 44 | - !Fn.equals [!Parameter dryRun, 'false'] 45 | - name: delete-disks 46 | dependsOn: delete-approval 47 | image: relaysh/gcp-step-disk-delete 48 | when: 49 | - !Fn.equals [!Parameter dryRun, 'false'] 50 | spec: 51 | google: *google 52 | disks: !Output {from: filter-disks, name: disks} 53 | -------------------------------------------------------------------------------- /gcp-instance-reaper/README.md: -------------------------------------------------------------------------------- 1 | This workflow looks at all of the GCP instances in a given account and zone and 2 | selects a subset of those to terminate. The termination criteria are: 3 | 4 | * Not labelled with a termination date or lifetime after 4 minutes 5 | * The `termination_date` or `lifetime` labels are present but cannot be parsed. 6 | * The `termination_date` or `lifetime` labels indicate that the instance has 7 | expired. 8 | 9 | An instance may be configured to never terminate if its `lifetime` label has 10 | the special value `indefinite`. Other valid values for the `lifetime` label are 11 | of the form `` where `` is any integer and `` is a 12 | time unit of `w` (weeks), `h` (hours), `d` (days) or `m` (months). So, as an 13 | example, if the `lifetime` label for an instance has a value of `43w` then it 14 | should be terminated after it's 43 weeks old. 15 | 16 | ## Prerequisites 17 | 18 | Before you run this workflow, you will need the following: 19 | - A GCP account. 20 | - An GCP service account with permissions to list and terminate GCP instances (if not 21 | run in dry run mode). 22 | - One or more running GCP instances that are configured to use the 23 | `termination_date` or `lifetime` labels. 24 | 25 | ## Run the workflow 26 | 27 | Follow these steps to run the workflow: 28 | 1. Add your GCP service account credentials as a Connection: 29 | 2. Click **Run workflow** and wait for the workflow run page to appear. 30 | 3. Supply following parameters to the modal: 31 | - **KEY**: `zone` 32 | - **VALUE**: The GCP zone to run in. 33 | - **KEY**: `terminationDateLabel` 34 | - **VALUE**: The label to use for determining the termination date. 35 | - **KEY**: `lifetimeLabel` 36 | - **VALUE**: The label to use for determining the lifetime. 37 | - **KEY**: `dryRun` 38 | - **VALUE**: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 39 | 40 | 4. **Warning:** If you run the workflow with the `dryRun` parameter set to 41 | `false`, instances not in compliance with this workflow policy will 42 | immediately be terminated. 43 | 44 | ## Run the workflow on a schedule 45 | 46 | Follow these steps to run this workflow on a schedule: 47 | - Un-comment out the Trigger block in the workflow file: 48 | 49 | > TIP: If you're using the Relay code editor, highlight the `triggers` section and type `⌘ + /` (Mac) or `Ctrl + /` (Windows) to uncomment. 50 | 51 | ```yaml 52 | # triggers: 53 | # - name: schedule 54 | # source: 55 | # type: schedule 56 | # schedule: '0 * * * *' 57 | # binding: 58 | # parameters: 59 | # zone: us-central1-a 60 | # terminationDateLabel: termination_date 61 | # lifetimeLabel: lifetime 62 | # dryRun: true 63 | ``` 64 | 65 | - Configure the `schedule` trigger: 66 | - Supply the run interval in [cron format](https://crontab.guru/). 67 | - Configure the following parameter bindings: 68 | - Specify the `zone` to run in. 69 | - Specify the `terminationLabel` to use. 70 | - Specify the `lifetimeLabel` to use. 71 | - Specify whether `dryRun` should be set to `true` or `false`. 72 | - Click **Save changes** 73 | -------------------------------------------------------------------------------- /gcp-instance-reaper/gcp-instance-reaper.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/gcp-instance-reaper/gcp-instance-reaper.png -------------------------------------------------------------------------------- /gcp-instance-reaper/gcp-instance-reaper.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Delete GCP instances without valid `lifetime` tag 3 | description: This workflow looks at all of the GCP instances in a given account and zone and selects a subset of those to terminate. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/gcp-instance-reaper 5 | tags: 6 | - cost optimization 7 | 8 | # Uncomment out this trigger to run this workflow hourly. 9 | # triggers: 10 | # - name: schedule 11 | # source: 12 | # type: schedule 13 | # schedule: '0 * * * *' 14 | # binding: 15 | # parameters: 16 | # zone: us-central1-a 17 | # terminationDateLabel: termination_date 18 | # lifetimeLabel: lifetime 19 | # dryRun: true 20 | 21 | parameters: 22 | zone: 23 | description: The GCP zone to run in 24 | default: us-central1-a 25 | terminationDateLabel: 26 | description: The name of the label to use for determining the termination date 27 | default: termination_date 28 | lifetimeLabel: 29 | description: The name of the label to use for determining the lifetime 30 | default: lifetime 31 | dryRun: 32 | description: True if this workflow should only print the resources it would delete 33 | default: 'true' 34 | 35 | steps: 36 | - name: list-instances 37 | image: relaysh/gcp-step-instance-list 38 | spec: 39 | google: &google 40 | service_account_info: !Connection { type: gcp, name: my-gcp-account } 41 | zone: !Parameter zone 42 | - name: filter-instances 43 | image: relaysh/core:latest-python 44 | spec: 45 | terminationDateLabel: !Parameter terminationDateLabel 46 | lifetimeLabel: !Parameter lifetimeLabel 47 | instances: !Output {from: list-instances, name: instances} 48 | inputFile: https://raw.githubusercontent.com/puppetlabs/relay-workflows/master/gcp-instance-reaper/filter-instances.py 49 | - name: delete-approval 50 | description: Wait for approval to delete instances 51 | type: approval 52 | dependsOn: filter-instances 53 | when: 54 | - !Fn.equals [!Parameter dryRun, 'false'] 55 | - name: delete-instances 56 | dependsOn: delete-approval 57 | image: relaysh/gcp-step-instance-delete 58 | when: 59 | - !Fn.equals [!Parameter dryRun, 'false'] 60 | spec: 61 | google: *google 62 | instances: !Output {from: filter-instances, name: instances} 63 | -------------------------------------------------------------------------------- /http-health-check/README.md: -------------------------------------------------------------------------------- 1 | This workflow checks the status of an http endpoint. If the 2 | endpoint does not return a 200, then it restarts an EC2 3 | instance. 4 | 5 | ## Prerequisites 6 | 7 | Before you run this workflow, you will need the following: 8 | - An AWS account. 9 | - An AWS IAM user with permissions to restart EC2 instances (if not 10 | run in dry run mode). 11 | - One or more running EC2 instances running a HTTP service. 12 | 13 | ## Run the workflow manually 14 | 15 | Follow these steps to run the workflow: 16 | 1. Add your AWS credentials as a Connection: 17 | - Click **Setup** 18 | - Find the Connection named `my-aws-account` and click Edit(✎). Use the following values: 19 | - **KEY**: `ACCESS KEY ID` 20 | - **VALUE**: Enter your AWS access key id associated with the account 21 | - **KEY**: `SECRET ACCESS KEY` 22 | - **VALUE**: Enter your AWS secret access key associated with the account 23 | - Click **Save** 24 | 25 | 2. Click **Run workflow** and wait for the workflow run page to appear. 26 | 3. Supply following parameters to the modal: 27 | - **KEY**: `instanceID` 28 | - **VALUE**: The EC2 instance to restart in response to health check 29 | - **KEY**: `url` 30 | - **VALUE**: The URL to make a health check against. 31 | 32 | ## Run the workflow on a schedule 33 | 34 | Follow these steps to run this workflow on a schedule: 35 | - Un-comment out the Trigger block in the workflow file: 36 | 37 | > TIP: If you're using the Relay code editor, highlight the `triggers` section and type `⌘ + /` (Mac) or `Ctrl + /` (Windows) to uncomment. 38 | 39 | ```yaml 40 | # triggers: 41 | # - name: schedule 42 | # source: 43 | # type: schedule 44 | # schedule: '0 * * * *' 45 | # binding: 46 | # parameters: 47 | # instanceID: i-1498314 48 | # url: 'https://relay.sh' 49 | ``` 50 | 51 | - Configure the `schedule` trigger: 52 | - Supply the run interval in [cron format](https://crontab.guru/). 53 | - Configure the following parameter bindings: 54 | - Specify the `instanceID` to restart. 55 | - Specify the `url` of the http service to check. 56 | - Click **Save changes** -------------------------------------------------------------------------------- /http-health-check/checkHealth.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # File: get-http-status.py 4 | # Description: This example script makes a request to a URL and sets its status 5 | # a the output. The status can be used in subsequent steps to 6 | # perform different bits of logic in a workflow. 7 | 8 | from urllib.request import urlopen 9 | 10 | from relay_sdk import Interface, Dynamic as D 11 | 12 | relay = Interface() 13 | 14 | def get_http_status(url): 15 | try: 16 | with urlopen(url) as response: 17 | return str(response.status) 18 | except: 19 | # empty string indicates something bad happened. 20 | return "" 21 | 22 | if __name__ == '__main__': 23 | url = None 24 | try: 25 | url = relay.get(D.url) 26 | except: 27 | print('No URL was configured. Exiting.') 28 | exit(1) 29 | status = get_http_status(url) 30 | print("Status for service {0} is {1}".format(url, status)) 31 | relay.outputs.set('status', status) 32 | -------------------------------------------------------------------------------- /http-health-check/http-health-check.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Check the status of an http endpoint 3 | description: This workflow performs a general http health status and then restarts an EC2 instance in response if the http status is not 200. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/http-health-check 5 | tags: 6 | - auto remediation 7 | 8 | parameters: 9 | instanceID: 10 | description: The EC2 instance ID to reboot when the health check fails. 11 | url: 12 | description: The URL to make a health check against. 13 | default: https://relay.sh 14 | 15 | # Uncomment this trigger to run on a schedule 16 | # triggers: 17 | # - name: schedule 18 | # source: 19 | # type: schedule 20 | # schedule: '0 * * * *' 21 | # binding: 22 | # parameters: 23 | # instanceID: '' # insert your EC2 instance id here 24 | # url: '' # insert the URL to make a health check against 25 | 26 | steps: 27 | - name: get-http-status 28 | image: relaysh/core:latest-python 29 | inputFile: https://raw.githubusercontent.com/puppetlabs/relay-workflows/master/http-health-check/checkHealth.py 30 | spec: 31 | url: !Parameter url 32 | 33 | - name: ec2-reboot-instances 34 | image: relaysh/aws-ec2-step-instances-reboot 35 | when: 36 | - !Fn.notEquals [!Output {from: get-http-status, name: status}, '200'] 37 | spec: 38 | aws: 39 | connection: !Connection { type: aws, name: my-aws-account } 40 | region: us-west-2 41 | instanceIDs: 42 | - !Parameter instanceID 43 | -------------------------------------------------------------------------------- /images/code-tab.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/images/code-tab.png -------------------------------------------------------------------------------- /images/datadog-api-key.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/images/datadog-api-key.png -------------------------------------------------------------------------------- /images/datadog-application-key.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/images/datadog-application-key.png -------------------------------------------------------------------------------- /images/datadog-k8s-rollback-modal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/images/datadog-k8s-rollback-modal.png -------------------------------------------------------------------------------- /images/datadog-monitor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/images/datadog-monitor.png -------------------------------------------------------------------------------- /images/datadog-test-alert.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/images/datadog-test-alert.png -------------------------------------------------------------------------------- /images/datadog-trigger.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/images/datadog-trigger.png -------------------------------------------------------------------------------- /images/datadog-webhook.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/images/datadog-webhook.png -------------------------------------------------------------------------------- /images/dry-run-modal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/images/dry-run-modal.png -------------------------------------------------------------------------------- /images/guide-connections.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/images/guide-connections.png -------------------------------------------------------------------------------- /images/missing-connection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/images/missing-connection.png -------------------------------------------------------------------------------- /images/run-workflow-action.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/images/run-workflow-action.png -------------------------------------------------------------------------------- /images/runbutton.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | -------------------------------------------------------------------------------- /images/settings-sidenav.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/images/settings-sidenav.png -------------------------------------------------------------------------------- /kubectl-apply-on-dockerhub-push/README.md: -------------------------------------------------------------------------------- 1 | This workflow sets a new image tag for a kubernetes deployment when an image is 2 | pushed to Docker Hub. 3 | 4 | ## Prerequisites 5 | 6 | Before you run this workflow, you will need the following: 7 | - An account on [Docker Hub](https://hub.docker.com/) 8 | - An operating Kubernetes cluster with a deployment 9 | 10 | ## Run the workflow 11 | 12 | 1. Add the workflow in Relay 13 | 2. Setup the Docker Hub webhook: 14 | - Click **Setup** 15 | - On the right sidebar, copy the webhook for the dockerhub-image-pushed trigger 16 | - Navigate to your repository on Docker Hub 17 | - Click **Webhooks** at the top 18 | - Add a name for your webhook and paste the url in the box **Webhook URL** 19 | - Click **Create** 20 | 3. Setup your Kubernetes connection 21 | - In the Relay page for the workflow, expand the **Setup** sidebar 22 | - In the sidebar, click the plus "+" under Connections for the Kubernetes connection and give it a name 23 | - Fill in the **Cluster server** field with the URL to your Kubernetes master (`kubectl cluster-info` will show this) 24 | - Paste the PEM-encoded CA certificate for your cluster in the **Certificate authority** field. This command will display the CA cert: `kubectl config view --raw --flatten -o json | jq -r '.clusters[] | select(.name == "'$(kubectl config current-context)'") | .cluster."certificate-authority-data"' | base64 --decode` 25 | - Paste an access token into the **Token** field. This can be retrieved with the following command: `kubectl config view --raw --minify --flatten -o jsonpath='{.users[].user.auth-provider.config.access-token}'` 26 | 4. Creating a new tag for your docker image and pushing it to Docker Hub should 27 | result in an update to your deployment container. You can validate with: 28 | `kubectl -n get deployments -o=jsonpath='{.metadata.name}{": "}{range .spec.template.spec.containers[*]}{.image}{end}'`. 29 | -------------------------------------------------------------------------------- /kubectl-apply-on-dockerhub-push/kubectl-apply-on-dockerhub-push.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/kubectl-apply-on-dockerhub-push/kubectl-apply-on-dockerhub-push.png -------------------------------------------------------------------------------- /kubectl-apply-on-dockerhub-push/kubectl-apply-on-dockerhub-push.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Update Kubernetes deployment image tag on Docker Hub push 3 | description: This workflow responds when a new Docker Hub image is available by updating a Kubernetes deployment image. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/kubectl-apply-on-dockerhub-push 5 | tags: 6 | - continuous delivery 7 | 8 | triggers: 9 | - name: dockerhub-image-pushed 10 | source: 11 | type: webhook 12 | image: relaysh/dockerhub-trigger-image-pushed 13 | binding: 14 | parameters: 15 | tag: !Data tag 16 | imageName: !Data name 17 | 18 | parameters: 19 | tag: 20 | description: image tag to change 21 | imageName: 22 | description: the image's name 23 | deploymentName: 24 | description: the deployment's name 25 | default: busybox 26 | containerName: 27 | description: the name of the container for the deployment 28 | default: busybox 29 | namespace: 30 | description: the namespace where your deployment lives 31 | default: default 32 | 33 | steps: 34 | # uncomment the following if you would like to approve the set 35 | # step manually instead of letting it runs automatically. 36 | # 37 | # - name: approval 38 | # description: Wait for approval to run kubectl command 39 | # type: approval 40 | - name: kubectl-set-new-image 41 | description: > 42 | Sets a new image tag version for the deployment. 43 | This uses the set command in kubectl to change the image 44 | the deployment uses. 45 | image: relaysh/kubernetes-step-kubectl:latest 46 | # if you are using the above approval, uncomment the following line 47 | # dependsOn: approval 48 | spec: 49 | namespace: !Parameter namespace 50 | connection: !Connection { type: "kubernetes", name: "my-cluster-connection" } 51 | command: set 52 | args: 53 | - image 54 | - !Fn.concat ["deployment/", !Parameter deploymentName] 55 | - !Fn.concat [!Parameter containerName, "=", !Parameter imageName, ":", !Parameter tag] 56 | -------------------------------------------------------------------------------- /pagerduty-production-incident-policy/README.md: -------------------------------------------------------------------------------- 1 | This workflow codifies an incident response process for PagerDuty incidents. The 2 | process in this workflow is to create a jira ticket and assign to the current 3 | on-call person and set up an incident response slack room with the jira ticket 4 | as the room title, and invite the on-call person to the slack room to start 5 | documenting the remediation steps taken. 6 | 7 | ## Prerequisites 8 | 9 | Before you run this workflow, you will need the following connections configured in Relay: 10 | - A [PagerDuty](https://www.pagerduty.com/) account. 11 | - A [Jira](https://www.atlassian.com/software/jira) account. 12 | - A [Slack](https://slack.com/) workspace bot with the following permissions: 13 | - `channels:manage` to create the channel and set the topic 14 | - `users:read` to list users 15 | - `users:read.email` to read users' email addresses 16 | - `chat:write` to send messages 17 | - `chat:write.public` to send messages to channels without joining 18 | - `chat:write.customize` to sennd messages as a customized username and avatar 19 | 20 | ## Configure the workflow 21 | 22 | You may need to update some of the default parameters or connection information 23 | in this workflow to run in your environment. The default configuration assumes: 24 | - Your PagerDuty connection is called `my-pagerduty-account` 25 | - Your Jira connection is called `my-jira-account` 26 | - Your Slack connection is called `my-slack-account` 27 | - Your Jira issue ticker is `RLY` 28 | - Your incident slack room is called `#team-relay-production-incident-` 29 | 30 | ## Set up the trigger 31 | 32 | When you create this workflow for the first time, we'll automatically provision 33 | a webhook for you. You need to provide this webhook to PagerDuty to complete the 34 | integration. 35 | 36 | In the workflow overview page in Relay, find the webhook URL by navigating to 37 | the **Setup** sidebar. Copy the URL to your clipboard. 38 | 39 | In PagerDuty, determine which services you want to run the workflow when an 40 | incident is triggered. For each of those services: 41 | 42 | 1. Click on the **Integrations** tab. 43 | 2. At the bottom of the page, click **Add or manage extensions**. 44 | 3. Create a **New Extension**: 45 | - Extension Type: `Generic V2 Webhook` 46 | - Name: `Relay` 47 | - URL: Paste the webhook URL from your clipboard. 48 | 4. Click **Save**. 49 | 50 | Whenever an incident is triggered for the first time, this workflow will run. 51 | You can reuse the same webhook URL for many services. 52 | -------------------------------------------------------------------------------- /pagerduty-production-incident-policy/pagerduty-production-incident-policy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/pagerduty-production-incident-policy/pagerduty-production-incident-policy.png -------------------------------------------------------------------------------- /pagerduty-to-jira/README.md: -------------------------------------------------------------------------------- 1 | This workflow responds to a PagerDuty incident by creating an issue in Jira. 2 | 3 | ## Prerequisites 4 | 5 | Before you run this workflow, you will need the following: 6 | - A [PagerDuty](https://www.pagerduty.com/) account. 7 | - An instance of [Jira](https://www.atlassian.com/software/jira) available to 8 | the internet. Jira Cloud instances are compatible with this workflow. 9 | 10 | ## Configure the workflow 11 | 12 | - Update the default parameter for `jiraProjectKey` for the Jira project where you 13 | want tickets to be created. 14 | - Currently, tickets will be created in the 'OPS' project by default. 15 | 16 | - Define the following secrets to connect to your Jira instance: 17 | - `jiraURL`: The URL to the root of your Jira instance. For Jira Cloud, this is 18 | `https://your-domain.atlassian.net`. 19 | - `jiraUsername`: The username to use when authenticating to Jira. 20 | - `jiraToken`: The [API token](https://confluence.atlassian.com/x/Vo71Nw) (for 21 | Jira Cloud) or password to use when authenticating to Jira. 22 | 23 | ## Test the workflow 24 | 25 | You can test the workflow with dummy data by clicking the **Run** button. Ensure 26 | an appropriate issue is created in your Jira instance and the message you expect 27 | shows up in your Slack workspace. We recommend always testing workflows manually 28 | before configuring automated triggers. 29 | 30 | ## Set up the trigger 31 | 32 | When you create this workflow for the first time, we'll automatically provision 33 | a webhook for you. You need to provide this webhook to PagerDuty to complete the 34 | integration. 35 | 36 | In the workflow overview page in Relay, find the webhook URL by navigating to 37 | the **Setup** sidebar. Copy the URL to your clipboard. 38 | 39 | In PagerDuty, determine which services you want to run the workflow when an 40 | incident is triggered. For each of those services: 41 | 42 | 1. Click on the **Integrations** tab. 43 | 2. At the bottom of the page, click **Add or manage extensions**. 44 | 3. Create a **New Extension**: 45 | - Extension Type: `Generic V2 Webhook` 46 | - Name: `Relay` 47 | - URL: Paste the webhook URL from your clipboard. 48 | 4. Click **Save**. 49 | 50 | Whenever an incident is triggered for the first time, this workflow will run. 51 | You can reuse the same webhook URL for many services. 52 | -------------------------------------------------------------------------------- /pagerduty-to-jira/pagerduty-to-jira.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/pagerduty-to-jira/pagerduty-to-jira.png -------------------------------------------------------------------------------- /pagerduty-to-jira/pagerduty-to-jira.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Respond to a PagerDuty incident via Jira 3 | description: This workflow responds to a PagerDuty incident by creating an issue in Jira. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/pagerduty-to-jira 5 | tags: 6 | - incident response 7 | 8 | parameters: 9 | jiraProjectKey: 10 | description: the JIRA project key to use when creating tickets 11 | default: OPS 12 | incidentTitle: 13 | description: a brief summary of the incident 14 | incidentURL: 15 | description: a link to more information about the incident 16 | serviceName: 17 | description: the service that triggered the incident 18 | 19 | triggers: 20 | - name: pagerduty 21 | source: 22 | type: webhook 23 | image: relaysh/pagerduty-trigger-incident-triggered 24 | binding: 25 | parameters: 26 | incidentTitle: !Data title 27 | incidentURL: !Data appURL 28 | serviceName: !Data serviceName 29 | 30 | steps: 31 | - name: create-jira-issue 32 | image: relaysh/jira-step-issue-create 33 | spec: 34 | connection: 35 | url: !Secret jiraURL 36 | username: !Secret jiraUsername 37 | password: !Secret jiraToken 38 | issue: 39 | fields: 40 | project: 41 | key: !Parameter jiraProjectKey 42 | type: 43 | name: Task 44 | summary: !Parameter incidentTitle 45 | description: !Fn.concat 46 | - 'A new PagerDuty incident occurred affecting the service ' 47 | - !Parameter serviceName 48 | - '. For more information, see [' 49 | - !Parameter incidentURL 50 | - '].' 51 | -------------------------------------------------------------------------------- /pagerduty-to-slack/README.md: -------------------------------------------------------------------------------- 1 | This workflow responds to a PagerDuty incident by sending a message to a Slack 2 | channel. The urgency of the incident determines which Slack channel the message 3 | goes to. 4 | 5 | ## Prerequisites 6 | 7 | Before you run this workflow, you will need the following: 8 | - A [Slack](https://slack.com/) workspace and a connection configured in Relay 9 | to that workspace. 10 | - A [PagerDuty](https://www.pagerduty.com/) account. 11 | 12 | ## Configure the workflow 13 | 14 | You may need to update some of the default parameters or connection information 15 | in this workflow to run in your environment. The default configuration assumes: 16 | - Your Slack connection is called `my-slack-account` 17 | - You want high-urgency incidents to go to the `#on-call` Slack channel 18 | - You want low-urgency incidents to go to the `#it` Slack channel 19 | 20 | ## Test the workflow 21 | 22 | You can test the workflow with dummy data by clicking the **Run** button. Ensure 23 | the message you expect shows up in your Slack workspace. We recommend always 24 | testing workflows manually before configuring automated triggers. 25 | 26 | ## Set up the trigger 27 | 28 | When you create this workflow for the first time, we'll automatically provision 29 | a webhook for you. You need to provide this webhook to PagerDuty to complete the 30 | integration. 31 | 32 | In the workflow overview page in Relay, find the webhook URL by navigating to 33 | the **Setup** sidebar. Copy the URL to your clipboard. 34 | 35 | In PagerDuty, determine which services you want to run the workflow when an 36 | incident is triggered. For each of those services: 37 | 38 | 1. Click on the **Integrations** tab. 39 | 2. At the bottom of the page, click **Add or manage extensions**. 40 | 3. Create a **New Extension**: 41 | - Extension Type: `Generic V2 Webhook` 42 | - Name: `Relay` 43 | - URL: Paste the webhook URL from your clipboard. 44 | 4. Click **Save**. 45 | 46 | Whenever an incident is triggered for the first time, this workflow will run. 47 | You can reuse the same webhook URL for many services. 48 | -------------------------------------------------------------------------------- /pagerduty-to-slack/pagerduty-to-slack.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/pagerduty-to-slack/pagerduty-to-slack.png -------------------------------------------------------------------------------- /pagerduty-to-slack/pagerduty-to-slack.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Respond to a PagerDuty incident with Slack 3 | description: > 4 | This workflow responds to a PagerDuty incident by sending a message to a Slack channel. The urgency of the incident determines which Slack channel the message goes to. 5 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/pagerduty-to-slack 6 | tags: 7 | - incident response 8 | 9 | parameters: 10 | lowUrgencySlackChannel: 11 | description: the Slack channel to send low-urgency notifications to 12 | default: '#it' 13 | highUrgencySlackChannel: 14 | description: the Slack channel to send high-urgency notifications to 15 | default: '#on-call' 16 | incidentTitle: 17 | description: a brief summary of the incident 18 | incidentUrgency: 19 | description: the urgency of the incident 20 | incidentURL: 21 | description: a link to more information about the incident 22 | serviceName: 23 | description: the service that triggered the incident 24 | 25 | locals: 26 | message: &message !Fn.concat 27 | - '⚠️ *<' 28 | - !Parameter incidentURL 29 | - '|' 30 | - !Parameter incidentTitle 31 | - '>* ⚠️' 32 | - "\nAffected service: " 33 | - !Parameter serviceName 34 | 35 | triggers: 36 | - name: pagerduty 37 | source: 38 | type: webhook 39 | image: relaysh/pagerduty-trigger-incident-triggered 40 | binding: 41 | parameters: 42 | incidentTitle: !Data title 43 | incidentUrgency: !Data urgency 44 | incidentURL: !Data appURL 45 | serviceName: !Data serviceName 46 | 47 | steps: 48 | - name: high-urgency-message 49 | image: relaysh/slack-step-message-send 50 | when: !Fn.notEquals [!Parameter incidentUrgency, low] 51 | spec: 52 | connection: !Connection {type: slack, name: my-slack-account} 53 | channel: !Parameter highUrgencySlackChannel 54 | username: PagerDuty via Relay 55 | message: *message 56 | - name: low-urgency-message 57 | image: relaysh/slack-step-message-send 58 | when: !Fn.equals [!Parameter incidentUrgency, low] 59 | spec: 60 | connection: !Connection {type: slack, name: my-slack-account} 61 | channel: !Parameter lowUrgencySlackChannel 62 | username: PagerDuty via Relay 63 | message: *message 64 | -------------------------------------------------------------------------------- /pagerduty-to-twilio/README.md: -------------------------------------------------------------------------------- 1 | This workflow responds to a PagerDuty incident by sending an SMS message to a 2 | phone number you define. 3 | 4 | ## Prerequisites 5 | 6 | Before you run this workflow, you will need the following: 7 | - A [Twilio](https://twilio.com/) account with a phone number provisioned for 8 | sending an SMS. 9 | - A [PagerDuty](https://www.pagerduty.com/) account. 10 | 11 | ## Configure the workflow 12 | 13 | You will need to update some of the default parameters and secrets in this 14 | workflow to run in your environment. 15 | - Set the `phoneNumber` parameter to the number that you want the message 16 | delivered to by default. 17 | - Set the `twilioAccountSID` secret to the SID of your Twilio account. 18 | - Set the `twilioAuthToken` secret to the auth token for your Twilio account. 19 | - Set the `twilioPhoneNumber` secret to the phone number provisioned in Twilio 20 | for sending SMS messages. 21 | 22 | ## Test the workflow 23 | 24 | You can test the workflow with dummy data by clicking the **Run** button. Ensure 25 | the message you expect shows up in your Slack workspace. We recommend always 26 | testing workflows manually before configuring automated triggers. 27 | 28 | ## Set up the trigger 29 | 30 | When you create this workflow for the first time, we'll automatically provision 31 | a webhook for you. You need to provide this webhook to PagerDuty to complete the 32 | integration. 33 | 34 | In the workflow overview page in Relay, find the webhook URL by navigating to 35 | the **Setup** sidebar. Copy the URL to your clipboard. 36 | 37 | In PagerDuty, determine which services you want to run the workflow when an 38 | incident is triggered. For each of those services: 39 | 40 | 1. Click on the **Integrations** tab. 41 | 2. At the bottom of the page, click **Add or manage extensions**. 42 | 3. Create a **New Extension**: 43 | - Extension Type: `Generic V2 Webhook` 44 | - Name: `Relay` 45 | - URL: Paste the webhook URL from your clipboard. 46 | 4. Click **Save**. 47 | 48 | Whenever an incident is triggered for the first time, this workflow will run. 49 | You can reuse the same webhook URL for many services. 50 | -------------------------------------------------------------------------------- /pagerduty-to-twilio/pagerduty-to-twilio.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/pagerduty-to-twilio/pagerduty-to-twilio.png -------------------------------------------------------------------------------- /pagerduty-to-twilio/pagerduty-to-twilio.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Send a Twilio SMS when PagerDuty incident triggers 3 | description: This workflow uses Twilio to send an SMS notification when a new PagerDuty incident is triggered. 4 | homepage: https://github.com/puppetlabs/relay-workflows/blob/master/pagerduty-to-twilio 5 | tags: 6 | - incident response 7 | 8 | parameters: 9 | incidentTitle: 10 | description: a brief summary of the incident 11 | incidentUrgency: 12 | description: the urgency of the incident 13 | incidentURL: 14 | description: a link to more information about the incident 15 | serviceName: 16 | description: the service that triggered the incident 17 | 18 | locals: 19 | message: &message | 20 | ⚠️ ${parameters.incidentTitle} 21 | Affected service: ${parameters.serviceName} 22 | Details: ${parameters.incidentURL} 23 | 24 | triggers: 25 | - name: pagerduty 26 | source: 27 | type: webhook 28 | image: relaysh/pagerduty-trigger-incident-triggered 29 | binding: 30 | parameters: 31 | incidentTitle: ${event.title} 32 | incidentUrgency: ${event.urgency} 33 | incidentURL: ${event.appURL} 34 | serviceName: ${event.serviceName} 35 | 36 | steps: 37 | - name: notify-via-twilio 38 | image: relaysh/twilio-step-send-sms 39 | spec: 40 | twilio: &twilio 41 | accountSID: ${secrets.twilioAccountSID} 42 | authToken: ${secrets.twilioAuthToken} 43 | from: ${secrets.twilioPhoneNumberFrom} 44 | to: ${secrets.twilioPhoneNumberTo} 45 | body: *message 46 | -------------------------------------------------------------------------------- /pulumi-preview/README.md: -------------------------------------------------------------------------------- 1 | 2 | This workflow runs the `pulumi` command against a connected 3 | GitHub repository via Relay. It's useful for implementing 4 | CI/CD workflows which connect to services outside of GitHub. 5 | 6 | In this example, the outcome of the Pulumi execution is passed 7 | into Slack as a notification. 8 | 9 | To use this workflow: 10 | - add it to your relay account 11 | - configure a Slack connection called 'my-workspace' 12 | (or change this code to match your real connection's name) 13 | - add workflow secrets for your pulumi and github accounts 14 | - copy the webhook url from the relay UI 15 | - in your pulumi app repo on github, go to settings->webhooks 16 | - paste the webhook url and set it to execute on PRs 17 | 18 | -------------------------------------------------------------------------------- /pulumi-preview/pulumi-preview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/pulumi-preview/pulumi-preview.png -------------------------------------------------------------------------------- /pulumi-preview/pulumi-preview.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Connect a Pulumi application in GitHub to Relay for CI/CD 3 | description: > 4 | This workflow receives webhook events from GitHub and runs Pulumi to preview or create resources in your application. 5 | homepage: https://github.com/relay-integrations/relay-pulumi/tree/master/workflows/pulumi-preview/ 6 | tags: 7 | - continuous delivery 8 | 9 | parameters: 10 | event_payload: 11 | description: Payload of webhook event, filled in by trigger 12 | pulumi_commandline: 13 | description: What to do - only 'preview' (default) or 'up' 14 | default: preview 15 | 16 | triggers: 17 | - name: github-events 18 | source: 19 | type: webhook 20 | image: relaysh/github-trigger-event-sink 21 | binding: 22 | parameters: 23 | event_payload: ${event.event_payload} 24 | 25 | steps: 26 | - name: pulumi-run 27 | image: relaysh/pulumi-step-run 28 | spec: 29 | pulumi_access_token: ${secrets.pulumi_access_token} 30 | github_token: ${secrets.github_access_token} 31 | pulumi_backend_url: ${secrets.pulumi_backend_url} 32 | event_payload: ${parameters.event_payload} 33 | pulumi_commandline: ${parameters.pulumi_commandline} 34 | - name: slack-output 35 | image: relaysh/slack-step-message-send 36 | spec: 37 | connection: ${connections.slack.my-workspace} 38 | channel: prog-relay-testing 39 | message: ${outputs.pulumi-run.output} 40 | username: relayerbot 41 | -------------------------------------------------------------------------------- /puppet-run-emit-data/README.md: -------------------------------------------------------------------------------- 1 | # Emit Puppet run data 2 | 3 | This workflow demonstrates what data is emitted by a Puppet run to Relay. 4 | 5 | ## Prerequisites 6 | Before you run this workflow, you will need the following: 7 | - Puppetserver with the [Relay module](https://forge.puppet.com/puppetlabs/relay) installed. Check out the module for installation instructions. 8 | 9 | ## Set up the trigger 10 | Follow the instructions in the [Relay module](https://forge.puppet.com/puppetlabs/relay) to set up the trigger. -------------------------------------------------------------------------------- /puppet-run-emit-data/puppet-run-emit-data.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/puppet-run-emit-data/puppet-run-emit-data.png -------------------------------------------------------------------------------- /puppet-run-emit-data/puppet-run-emit-data.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Emit Puppet run data 3 | description: This workflow demonstrates what data is emitted by a Puppet run to Relay. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/puppet-run-emit-data 5 | parameters: 6 | # From the trigger 7 | host: 8 | description: "Hostname that submitted the report" 9 | logs: 10 | description: "Array of the log lines that were notice severity or greater." 11 | summary: 12 | description: "Long-form summary of the puppet run" 13 | status: 14 | description: "Run status" 15 | facts: 16 | description: "Full hash of puppet facts" 17 | triggers: 18 | - name: puppet-report 19 | source: 20 | type: push 21 | binding: 22 | parameters: 23 | host: !Data report.host 24 | logs: !Data report.logs 25 | status: !Data report.status 26 | summary: !Data report.summary 27 | facts: !Data facts 28 | steps: 29 | - name: host 30 | image: relaysh/core 31 | spec: 32 | host: !Parameter host 33 | input: 34 | - ni get | jq .host 35 | - name: logs 36 | image: relaysh/core 37 | spec: 38 | logs: !Parameter logs 39 | input: 40 | - ni get | jq .logs 41 | - name: status 42 | image: relaysh/core 43 | spec: 44 | status: !Parameter status 45 | input: 46 | - ni get | jq .status 47 | - name: summary 48 | image: relaysh/core 49 | spec: 50 | summary: !Parameter summary 51 | input: 52 | - ni get | jq .summary 53 | - name: facts 54 | image: relaysh/core 55 | spec: 56 | facts: !Parameter facts 57 | input: 58 | - ni get | jq .facts 59 | -------------------------------------------------------------------------------- /puppet-selective-enforcement/README.md: -------------------------------------------------------------------------------- 1 | # Selectively enforce Puppet Runs 2 | 3 | This workflow listens for a noop Puppet run and selectively enforces runs when corrective changes are found. 4 | 5 | ## Prerequisites 6 | Before you run this workflow, you will need the following: 7 | - Puppetserver with the [Relay module](https://forge.puppet.com/puppetlabs/relay) installed. Check out the module for installation instructions. 8 | 9 | ## Set up the Puppet trigger 10 | Follow the instructions in the [Relay module](https://forge.puppet.com/puppetlabs/relay) to set up the trigger. -------------------------------------------------------------------------------- /puppet-selective-enforcement/puppet-selective-enforcement.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/puppet-selective-enforcement/puppet-selective-enforcement.png -------------------------------------------------------------------------------- /puppet-selective-enforcement/puppet-selective-enforcement.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Selectively enforce Puppet run 3 | description: This workflow selectively enforces a Puppet run when changes are detected 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/puppet-selective-enforcement 5 | tags: 6 | - auto remediation 7 | 8 | parameters: 9 | host: 10 | description: "Hostname that submitted the report" 11 | noop: 12 | description: Whether this was a no-op run 13 | default: true 14 | resourceStatuses: 15 | description: The changed resources 16 | triggers: 17 | - name: puppet-report 18 | source: 19 | type: push 20 | binding: 21 | parameters: 22 | host: !Data host 23 | noop: !Data noop 24 | resourceStatuses: !Data resource_statuses 25 | steps: 26 | - name: view-puppet-resources 27 | image: relaysh/core 28 | spec: 29 | resourceStatuses: !Parameter resourceStatuses 30 | input: 31 | - ni get | jq .resourceStatuses 32 | - name: detect-corrective-changes 33 | image: relaysh/core 34 | dependsOn: view-puppet-resources 35 | spec: 36 | resourceStatuses: !Parameter resourceStatuses 37 | input: 38 | - DETECTED_CHANGES=$(ni get | jq -r '.resourceStatuses | to_entries[] | select(.value.corrective_change) | .key') 39 | - 'echo "Detected changes, if any: ${DETECTED_CHANGES}"' 40 | - 'if [ x = x${DETECTED_CHANGES} ] ; then ni output set -k detectedChanges -v none; else ni output set -k detectedChanges -v "${DETECTED_CHANGES}" ; fi' 41 | - name: approval 42 | description: Wait for approval to run Puppet for real 43 | type: approval 44 | dependsOn: detect-corrective-changes 45 | when: 46 | - !Parameter noop 47 | - !Fn.notEquals [ !Output { from: detect-corrective-changes, name: detectedChanges }, none ] 48 | - name: start-puppet-run 49 | image: relaysh/puppet-step-run-start 50 | dependsOn: approval 51 | spec: 52 | connection: !Connection { type: puppet, name: my-puppet-connection} 53 | environment: production 54 | scope: 55 | nodes: 56 | - !Parameter host 57 | - name: wait-for-puppet-run 58 | image: relaysh/puppet-step-run-wait 59 | dependsOn: start-puppet-run 60 | spec: 61 | connection: !Connection { type: puppet, name: my-puppet-connection} 62 | id: 63 | !Output [ start-puppet-run, id ] 64 | -------------------------------------------------------------------------------- /puppet-shutdown-ec2/README.md: -------------------------------------------------------------------------------- 1 | # Stop EC2 instance when sudoers file is changed with Puppet 2 | 3 | This workflow is an example of responding to changes in a Puppet run. In this example, when a sudoers file change is detected, the workflow will stop the EC2 instance that is running the Puppet agent. 4 | 5 | ## Prerequisites 6 | 7 | Before you run this workflow, you will need the following: 8 | - An AWS account. 9 | - An AWS IAM user with permissions to list and stop EC2 instances (if not 10 | run in dry run mode). 11 | - Puppetserver with the [Relay module](https://forge.puppet.com/puppetlabs/relay) installed. Check out the module for installation instructions. 12 | 13 | ## Configure the workflow 14 | 15 | You may need to update some of the secrets or connection information 16 | in this workflow to run in your environment. 17 | - Add your AWS credentials for `my-aws-account` 18 | - Add your preferred region under `awsRegion` secret 19 | 20 | ## Set up the trigger 21 | 22 | Follow the instructions in the [Relay module](https://forge.puppet.com/puppetlabs/relay) to set up the trigger. -------------------------------------------------------------------------------- /puppet-shutdown-ec2/puppet-shutdown-ec2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/puppet-shutdown-ec2/puppet-shutdown-ec2.png -------------------------------------------------------------------------------- /puppet-shutdown-ec2/puppet-shutdown-ec2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: When sudoers file changes on my Puppet run, shut down the EC2 instance. 3 | description: > 4 | This workflow listens for a sudoers file change on a Puppet run and shuts down the EC2 instance in response 5 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/puppet-shutdown-ec2 6 | tags: 7 | - auto remediation 8 | - Incident response 9 | - compliance 10 | 11 | parameters: 12 | # From the trigger 13 | host: 14 | description: The node that ran the Puppet agent 15 | resourceStatuses: 16 | description: The changed resources 17 | watchType: 18 | description: The resource type to watch for 19 | default: 'Sudo::Conf' 20 | triggers: 21 | - name: puppet-report 22 | source: 23 | type: push 24 | binding: 25 | parameters: 26 | host: !Data host 27 | resourceStatuses: !Data resource_statuses 28 | steps: 29 | - name: view-puppet-resources 30 | image: relaysh/core 31 | spec: 32 | resourceStatuses: !Parameter resourceStatuses 33 | input: 34 | - ni get | jq .resourceStatuses 35 | - name: detect-changes 36 | image: relaysh/core 37 | dependsOn: view-puppet-resources 38 | spec: 39 | resourceStatuses: !Parameter resourceStatuses 40 | watchType: !Parameter watchType 41 | input: 42 | - DETECTED_CHANGES=$(ni get | jq --arg type "$( ni get -p {.watchType} )" -r '.resourceStatuses | values | map(select(.corrective_change == true)) | map(.containment_path | (to_entries[] | select(.value | startswith("\($type)[")) | .key) as $key | .[:$key+1] | join("/")) | unique | join(",")') 43 | - 'echo "Detected changes, if any: ${DETECTED_CHANGES}"' 44 | - 'if [ x = x${DETECTED_CHANGES} ] ; then ni output set -k detectedChanges -v none; else ni output set -k detectedChanges -v "${DETECTED_CHANGES}" ; fi' 45 | - name: lookup-ec2-instance 46 | image: relaysh/aws-ec2-step-instances-describe 47 | when: 48 | - !Fn.notEquals [ !Output { from: detect-changes, name: detectedChanges }, none ] 49 | spec: 50 | aws: &aws 51 | connection: !Connection { type: aws, name: my-aws-account } 52 | region: !Secret awsRegion 53 | filters: 54 | private-dns-name: !Parameter host 55 | - name: output-instance-id 56 | image: relaysh/core:latest-python 57 | spec: 58 | instances: !Output { from: lookup-ec2-instance, name: instances } 59 | when: 60 | - !Fn.notEquals [ !Output { from: lookup-ec2-instance, name: instances }, ""] 61 | input: 62 | - echo -e "from relay_sdk import Interface, Dynamic as D\nrelay=Interface()\nrelay.outputs.set('instanceID', relay.get(D.instances)[0]['InstanceId'])" | python 63 | - name: approval 64 | description: Wait for instance shutdown approval 65 | type: approval 66 | dependsOn: 67 | - output-instance-id 68 | - detect-changes 69 | - name: ec2-stop-instances 70 | image: relaysh/aws-ec2-step-instances-stop 71 | spec: 72 | aws: *aws 73 | instanceIDs: 74 | - !Output { from: output-instance-id, name: instanceID } 75 | dependsOn: approval 76 | -------------------------------------------------------------------------------- /s3-remediate-unencrypted-buckets/README.md: -------------------------------------------------------------------------------- 1 | This workflow finds all unencrypted S3 buckets in a given account and encrypts them 2 | with default encryption. 3 | 4 | For more information on default encryption, see [Amazon S3 Default Encryption for S3 Buckets 5 | ](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) 6 | 7 | ## Prerequisites 8 | 9 | Before you run this workflow, you will need the following: 10 | - An AWS account. 11 | - An AWS IAM user with permissions to list and modify S3 buckets (if not 12 | run in dry run mode). 13 | - One or more S3 buckets that are unencrypted. 14 | 15 | ## Run the workflow 16 | 17 | Follow these steps to run the workflow: 18 | 1. Add your AWS credentials as a Connection: 19 | - Click **Setup** 20 | - Find the Connection named `my-aws-account` and click Edit(✎). Use the following values: 21 | - **KEY**: `ACCESS KEY ID` 22 | - **VALUE**: Enter your AWS access key id associated with the account 23 | - **KEY**: `SECRET ACCESS KEY` 24 | - **VALUE**: Enter your AWS secret access key associated with the account 25 | - Click **Save** 26 | 27 | 2. Click **Run workflow** and wait for the workflow run page to appear. 28 | 3. Supply following parameters to the modal: 29 | - **KEY**: `dryRun` 30 | - **VALUE**: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 31 | 32 | 4. **Warning:** If you run the workflow with the `dryRun` parameter set to 33 | `false`, buckets not in compliance with this workflow policy will 34 | immediately be modified to be 'private'. 35 | 36 | ## Run the workflow on a schedule 37 | 38 | Follow these steps to run this workflow on a schedule: 39 | - Un-comment out the Trigger block in the workflow file: 40 | 41 | > TIP: If you're using the Relay code editor, highlight the `triggers` section and type `⌘ + /` (Mac) or `Ctrl + /` (Windows) to uncomment. 42 | 43 | ```yaml 44 | # triggers: 45 | # - name: schedule 46 | # source: 47 | # type: schedule 48 | # schedule: '0 * * * *' 49 | # binding: 50 | # parameters: 51 | # dryRun: true 52 | ``` 53 | 54 | - Configure the `schedule` trigger: 55 | - Supply the run interval in [cron format](https://crontab.guru/). 56 | - Configure the following parameter bindings: 57 | - Specify whether `dryRun` should be set to `true` or `false`. 58 | - Click **Save changes** -------------------------------------------------------------------------------- /s3-remediate-unencrypted-buckets/filter-buckets.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from relay_sdk import Interface, Dynamic as D 3 | 4 | 5 | relay = Interface() 6 | 7 | to_modify = [] 8 | to_do_nothing = [] 9 | 10 | encryptionConfigurations = relay.get(D.encryptionConfigurations) 11 | 12 | for bucket in encryptionConfigurations.keys(): 13 | # If the encryption configuration of a bucket is none, bucket is unencrypted. Adding these to list of buckets to encrypt. 14 | if encryptionConfigurations[bucket] == None: 15 | to_modify.append(bucket) 16 | else: 17 | to_do_nothing.append(bucket) 18 | 19 | print("\nFound {} bucket(s) that are encrypted:".format(len(to_do_nothing))) 20 | print(*[bucket for bucket in to_do_nothing], sep = "\n") 21 | 22 | print("\nFound {} bucket(s) that are NOT encrypted:".format(len(to_modify))) 23 | print(*[bucket for bucket in to_modify], sep = "\n") 24 | 25 | print('\nSetting output variable `buckets` with list of {} bucket(s) that are NOT encrypted.'.format(len(to_modify))) 26 | relay.outputs.set('buckets', to_modify) 27 | -------------------------------------------------------------------------------- /s3-remediate-unencrypted-buckets/s3-remediate-unencrypted-buckets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/s3-remediate-unencrypted-buckets/s3-remediate-unencrypted-buckets.png -------------------------------------------------------------------------------- /s3-remediate-unencrypted-buckets/s3-remediate-unencrypted-buckets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Remediate unencrypted S3 buckets 3 | description: This workflow finds unencrypted S3 buckets and enables default encryption on those buckets. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/s3-remediate-unencrypted-buckets 5 | tags: 6 | - compliance 7 | 8 | # Uncomment out this trigger to run this workflow hourly. 9 | # triggers: 10 | # - name: schedule 11 | # source: 12 | # type: schedule 13 | # schedule: '0 * * * *' 14 | # binding: 15 | # parameters: 16 | # dryRun: true 17 | 18 | parameters: 19 | dryRun: 20 | description: True if this workflow should not actually modify buckets 21 | default: 'true' 22 | 23 | steps: 24 | - name: list-buckets 25 | image: relaysh/aws-s3-step-bucket-list 26 | spec: 27 | aws: &aws 28 | connection: !Connection { type: aws, name: my-aws-account } 29 | - name: get-encryption-settings 30 | image: relaysh/aws-s3-step-bucket-get-encryption 31 | spec: 32 | aws: *aws 33 | buckets: !Output {from: list-buckets, name: buckets} 34 | - name: filter-buckets 35 | image: relaysh/core:latest-python 36 | spec: 37 | encryptionConfigurations: !Output {from: get-encryption-settings, name: encryptionConfigurations} 38 | inputFile: https://raw.githubusercontent.com/puppetlabs/relay-workflows/master/s3-remediate-unencrypted-buckets/filter-buckets.py 39 | - name: approval 40 | description: Wait for approval to modify S3 buckets 41 | type: approval 42 | dependsOn: filter-buckets 43 | when: 44 | - !Fn.equals [!Parameter dryRun, 'false'] 45 | - name: encrypt-buckets 46 | image: relaysh/aws-s3-step-bucket-put-encryption 47 | dependsOn: approval 48 | when: 49 | - !Fn.equals [!Parameter dryRun, 'false'] 50 | spec: 51 | aws: *aws 52 | buckets: !Output {from: filter-buckets, name: buckets} 53 | confetti: true 54 | -------------------------------------------------------------------------------- /s3-restrict-authenticated_user-read-buckets/README.md: -------------------------------------------------------------------------------- 1 | This workflow looks at all of the public S3 buckets in a given account and restricts those that provide 'READ' access to all authenticated users. 2 | 3 | It evaluates all buckets for a grant that includes: 4 | - Group containing "http://acs.amazonaws.com/groups/global/AllUsers" 5 | - Permission containing "READ" 6 | 7 | These buckets will be restricted to be 'private'. 8 | 9 | ## Prerequisites 10 | 11 | Before you run this workflow, you will need the following: 12 | - An AWS account. 13 | - An AWS IAM user with permissions to list and modify S3 buckets (if not 14 | run in dry run mode). 15 | - One or more S3 buckets that are public and provide 'READ' access. 16 | 17 | ## Run the workflow 18 | 19 | Follow these steps to run the workflow: 20 | 1. Add your AWS credentials as a Connection: 21 | - Click **Setup** 22 | - Find the Connection named `my-aws-account` and click Edit(✎). Use the following values: 23 | - **KEY**: `ACCESS KEY ID` 24 | - **VALUE**: Enter your AWS access key id associated with the account 25 | - **KEY**: `SECRET ACCESS KEY` 26 | - **VALUE**: Enter your AWS secret access key associated with the account 27 | - Click **Save** 28 | 29 | 2. Click **Run workflow** and wait for the workflow run page to appear. 30 | 3. Supply following parameters to the modal: 31 | - **KEY**: `dryRun` 32 | - **VALUE**: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 33 | 34 | 4. **Warning:** If you run the workflow with the `dryRun` parameter set to 35 | `false`, buckets not in compliance with this workflow policy will 36 | immediately be modified to be 'private'. 37 | 38 | ## Run the workflow on a schedule 39 | 40 | Follow these steps to run this workflow on a schedule: 41 | - Un-comment out the Trigger block in the workflow file: 42 | 43 | > TIP: If you're using the Relay code editor, highlight the `triggers` section and type `⌘ + /` (Mac) or `Ctrl + /` (Windows) to uncomment. 44 | 45 | ```yaml 46 | # triggers: 47 | # - name: schedule 48 | # source: 49 | # type: schedule 50 | # schedule: '0 * * * *' 51 | # binding: 52 | # parameters: 53 | # dryRun: true 54 | ``` 55 | 56 | - Configure the `schedule` trigger: 57 | - Supply the run interval in [cron format](https://crontab.guru/). 58 | - Configure the following parameter bindings: 59 | - Specify whether `dryRun` should be set to `true` or `false`. 60 | - Click **Save changes** -------------------------------------------------------------------------------- /s3-restrict-authenticated_user-read-buckets/filter-buckets.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from relay_sdk import Interface, Dynamic as D 3 | 4 | 5 | relay = Interface() 6 | 7 | to_modify = [] 8 | to_do_nothing = [] 9 | 10 | bucketACLs = relay.get(D.bucketACLs) 11 | 12 | for bucketName in bucketACLs.keys(): 13 | public_bucket = False 14 | 15 | # If the URI of the grant is "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" and the permission contains "READ", adding to list to remediate. 16 | for grant in bucketACLs[bucketName]: 17 | if grant['Grantee']['Type'] == "Group" and grant['Grantee']['URI'] == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" and "READ" in str(grant['Permission']): 18 | public_bucket = True 19 | else: 20 | continue 21 | 22 | if public_bucket: 23 | to_modify.append(bucketName) 24 | else: 25 | to_do_nothing.append(bucketName) 26 | 27 | print("\nFound {} bucket(s) that DON'T have public READ permissions:".format(len(to_do_nothing))) 28 | print(*[bucket for bucket in to_do_nothing], sep = "\n") 29 | 30 | print("\nFound {} bucket(s) that have public READ permissions:".format(len(to_modify))) 31 | print(*[bucket for bucket in to_modify], sep = "\n") 32 | 33 | print('\nSetting output variable `buckets` with list of {} bucket(s) with public READ permissions.'.format(len(to_modify))) 34 | relay.outputs.set('buckets', to_modify) 35 | -------------------------------------------------------------------------------- /s3-restrict-authenticated_user-read-buckets/s3-restrict-authenticated_user-read-buckets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/s3-restrict-authenticated_user-read-buckets/s3-restrict-authenticated_user-read-buckets.png -------------------------------------------------------------------------------- /s3-restrict-authenticated_user-read-buckets/s3-restrict-authenticated_user-read-buckets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Restricts S3 buckets with READ permissions to all Authenticated Users 3 | description: This workflow looks at all of the S3 buckets in a given account and restricts those that provide 'READ' access to all Authenticated Users. Requires an AWS account with permissions to modify S3 buckets. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/s3-restrict-authenticated_user-read-buckets 5 | tags: 6 | - compliance 7 | 8 | # Uncomment out this trigger to run this workflow hourly. 9 | # triggers: 10 | # - name: schedule 11 | # source: 12 | # type: schedule 13 | # schedule: '0 * * * *' 14 | # binding: 15 | # parameters: 16 | # dryRun: true 17 | 18 | parameters: 19 | dryRun: 20 | description: True if this workflow should not actually modify buckets 21 | default: 'true' 22 | 23 | steps: 24 | - name: list-buckets 25 | image: relaysh/aws-s3-step-bucket-list 26 | spec: 27 | aws: &aws 28 | connection: !Connection { type: aws, name: my-aws-account } 29 | - name: get-bucket-acls 30 | image: relaysh/aws-s3-step-bucket-get-acls 31 | spec: 32 | aws: *aws 33 | buckets: !Output {from: list-buckets, name: buckets} 34 | - name: filter-buckets 35 | image: relaysh/core:latest-python 36 | spec: 37 | bucketACLs: !Output {from: get-bucket-acls, name: bucketACLs} 38 | inputFile: https://raw.githubusercontent.com/puppetlabs/relay-workflows/master/s3-restrict-authenticated_user-read-buckets/filter-buckets.py 39 | - name: approval 40 | description: Wait for approval to modify S3 buckets 41 | type: approval 42 | dependsOn: filter-buckets 43 | when: 44 | - !Fn.equals [!Parameter dryRun, 'false'] 45 | - name: modify-acls 46 | image: relaysh/aws-s3-step-bucket-put-acls 47 | dependsOn: approval 48 | when: 49 | - !Fn.equals [!Parameter dryRun, 'false'] 50 | spec: 51 | aws: *aws 52 | buckets: !Output {from: filter-buckets, name: buckets} 53 | acl: private 54 | confetti: true 55 | -------------------------------------------------------------------------------- /s3-restrict-authenticated_user-read_acp-buckets/README.md: -------------------------------------------------------------------------------- 1 | This workflow looks at all of the S3 buckets in a given account and restricts those that provide 'READ' access to all Authenticated Users. 2 | 3 | It evaluates all buckets for a grant that includes: 4 | - Group containing "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" 5 | - Permission containing "READ" 6 | 7 | These buckets will be restricted to be 'private'. 8 | 9 | ## Prerequisites 10 | 11 | Before you run this workflow, you will need the following: 12 | - An AWS account. 13 | - An AWS IAM user with permissions to list and modify S3 buckets (if not 14 | run in dry run mode). 15 | - One or more S3 buckets that provide 'READ' access to all Authenticated Users. 16 | 17 | ## Run the workflow 18 | 19 | Follow these steps to run the workflow: 20 | 1. Add your AWS credentials as a Connection: 21 | - Click **Setup** 22 | - Find the Connection named `my-aws-account` and click Edit(✎). Use the following values: 23 | - **KEY**: `ACCESS KEY ID` 24 | - **VALUE**: Enter your AWS access key id associated with the account 25 | - **KEY**: `SECRET ACCESS KEY` 26 | - **VALUE**: Enter your AWS secret access key associated with the account 27 | - Click **Save** 28 | 29 | 2. Click **Run workflow** and wait for the workflow run page to appear. 30 | 3. Supply following parameters to the modal: 31 | - **KEY**: `dryRun` 32 | - **VALUE**: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 33 | 34 | 4. **Warning:** If you run the workflow with the `dryRun` parameter set to 35 | `false`, buckets not in compliance with this workflow policy will 36 | immediately be modified to be 'private'. 37 | 38 | ## Run the workflow on a schedule 39 | 40 | Follow these steps to run this workflow on a schedule: 41 | - Un-comment out the Trigger block in the workflow file: 42 | 43 | > TIP: If you're using the Relay code editor, highlight the `triggers` section and type `⌘ + /` (Mac) or `Ctrl + /` (Windows) to uncomment. 44 | 45 | ```yaml 46 | # triggers: 47 | # - name: schedule 48 | # source: 49 | # type: schedule 50 | # schedule: '0 * * * *' 51 | # binding: 52 | # parameters: 53 | # dryRun: true 54 | ``` 55 | 56 | - Configure the `schedule` trigger: 57 | - Supply the run interval in [cron format](https://crontab.guru/). 58 | - Configure the following parameter bindings: 59 | - Specify whether `dryRun` should be set to `true` or `false`. 60 | - Click **Save changes** -------------------------------------------------------------------------------- /s3-restrict-authenticated_user-read_acp-buckets/filter-buckets.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from relay_sdk import Interface, Dynamic as D 3 | 4 | 5 | relay = Interface() 6 | 7 | to_modify = [] 8 | to_do_nothing = [] 9 | 10 | bucketACLs = relay.get(D.bucketACLs) 11 | 12 | for bucketName in bucketACLs.keys(): 13 | public_bucket = False 14 | 15 | # If the URI of the grant is "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" and the permission contains "READ_ACP", adding to list to remediate. 16 | for grant in bucketACLs[bucketName]: 17 | if grant['Grantee']['Type'] == "Group" and grant['Grantee']['URI'] == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" and "READ_ACP" in str(grant['Permission']): 18 | public_bucket = True 19 | else: 20 | continue 21 | 22 | if public_bucket: 23 | to_modify.append(bucketName) 24 | else: 25 | to_do_nothing.append(bucketName) 26 | 27 | print("\nFound {} bucket(s) that DON'T have public READ permissions:".format(len(to_do_nothing))) 28 | print(*[bucket for bucket in to_do_nothing], sep = "\n") 29 | 30 | print("\nFound {} bucket(s) that have public READ permissions:".format(len(to_modify))) 31 | print(*[bucket for bucket in to_modify], sep = "\n") 32 | 33 | print('\nSetting output variable `buckets` with list of {} bucket(s) with public READ permissions.'.format(len(to_modify))) 34 | relay.outputs.set('buckets', to_modify) 35 | -------------------------------------------------------------------------------- /s3-restrict-authenticated_user-read_acp-buckets/s3-restrict-authenticated_user-read_acp-buckets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/s3-restrict-authenticated_user-read_acp-buckets/s3-restrict-authenticated_user-read_acp-buckets.png -------------------------------------------------------------------------------- /s3-restrict-authenticated_user-read_acp-buckets/s3-restrict-authenticated_user-read_acp-buckets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Restrict S3 buckets with READ_ACP permissions to all Authenticated Users 3 | description: This workflow looks at all of the S3 buckets in a given account and restricts those that provide 'READ_ACP' access to all Authenticated Users. Requires an AWS account with permissions to modify S3 buckets. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/s3-restrict-authenticated_user-read_acp-buckets 5 | tags: 6 | - compliance 7 | 8 | # Uncomment out this trigger to run this workflow hourly. 9 | # triggers: 10 | # - name: schedule 11 | # source: 12 | # type: schedule 13 | # schedule: '0 * * * *' 14 | # binding: 15 | # parameters: 16 | # dryRun: true 17 | 18 | parameters: 19 | dryRun: 20 | description: True if this workflow should not actually modify buckets 21 | default: 'true' 22 | 23 | steps: 24 | - name: list-buckets 25 | image: relaysh/aws-s3-step-bucket-list 26 | spec: 27 | aws: &aws 28 | connection: !Connection { type: aws, name: my-aws-account } 29 | - name: get-bucket-acls 30 | image: relaysh/aws-s3-step-bucket-get-acls 31 | spec: 32 | aws: *aws 33 | buckets: !Output {from: list-buckets, name: buckets} 34 | - name: filter-buckets 35 | image: relaysh/core:latest-python 36 | spec: 37 | bucketACLs: !Output {from: get-bucket-acls, name: bucketACLs} 38 | inputFile: https://raw.githubusercontent.com/puppetlabs/relay-workflows/master/s3-restrict-authenticated_user-read_acp-buckets/filter-buckets.py 39 | - name: approval 40 | description: Wait for approval to modify S3 buckets 41 | type: approval 42 | dependsOn: filter-buckets 43 | when: 44 | - !Fn.equals [!Parameter dryRun, 'false'] 45 | - name: modify-acls 46 | image: relaysh/aws-s3-step-bucket-put-acls 47 | dependsOn: approval 48 | when: 49 | - !Fn.equals [!Parameter dryRun, 'false'] 50 | spec: 51 | aws: *aws 52 | buckets: !Output {from: filter-buckets, name: buckets} 53 | acl: private 54 | confetti: true 55 | -------------------------------------------------------------------------------- /s3-restrict-authenticated_user-write-buckets/README.md: -------------------------------------------------------------------------------- 1 | This workflow looks at all of the public S3 buckets in a given account and restricts those that provide 'READ' access to all authenticated users. 2 | 3 | It evaluates all buckets for a grant that includes: 4 | - Group containing "http://acs.amazonaws.com/groups/global/AllUsers" 5 | - Permission containing "READ" 6 | 7 | These buckets will be restricted to be 'private'. 8 | 9 | ## Prerequisites 10 | 11 | Before you run this workflow, you will need the following: 12 | - An AWS account. 13 | - An AWS IAM user with permissions to list and modify S3 buckets (if not 14 | run in dry run mode). 15 | - One or more S3 buckets that are public and provide 'READ' access. 16 | 17 | ## Run the workflow 18 | 19 | Follow these steps to run the workflow: 20 | 1. Add your AWS credentials as a Connection: 21 | - Click **Setup** 22 | - Find the Connection named `my-aws-account` and click Edit(✎). Use the following values: 23 | - **KEY**: `ACCESS KEY ID` 24 | - **VALUE**: Enter your AWS access key id associated with the account 25 | - **KEY**: `SECRET ACCESS KEY` 26 | - **VALUE**: Enter your AWS secret access key associated with the account 27 | - Click **Save** 28 | 29 | 2. Click **Run workflow** and wait for the workflow run page to appear. 30 | 3. Supply following parameters to the modal: 31 | - **KEY**: `dryRun` 32 | - **VALUE**: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 33 | 34 | 4. **Warning:** If you run the workflow with the `dryRun` parameter set to 35 | `false`, buckets not in compliance with this workflow policy will 36 | immediately be modified to be 'private'. 37 | 38 | ## Run the workflow on a schedule 39 | 40 | Follow these steps to run this workflow on a schedule: 41 | - Un-comment out the Trigger block in the workflow file: 42 | 43 | > TIP: If you're using the Relay code editor, highlight the `triggers` section and type `⌘ + /` (Mac) or `Ctrl + /` (Windows) to uncomment. 44 | 45 | ```yaml 46 | # triggers: 47 | # - name: schedule 48 | # source: 49 | # type: schedule 50 | # schedule: '0 * * * *' 51 | # binding: 52 | # parameters: 53 | # dryRun: true 54 | ``` 55 | 56 | - Configure the `schedule` trigger: 57 | - Supply the run interval in [cron format](https://crontab.guru/). 58 | - Configure the following parameter bindings: 59 | - Specify whether `dryRun` should be set to `true` or `false`. 60 | - Click **Save changes** -------------------------------------------------------------------------------- /s3-restrict-authenticated_user-write-buckets/filter-buckets.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from relay_sdk import Interface, Dynamic as D 3 | 4 | 5 | relay = Interface() 6 | 7 | to_modify = [] 8 | to_do_nothing = [] 9 | 10 | bucketACLs = relay.get(D.bucketACLs) 11 | 12 | for bucketName in bucketACLs.keys(): 13 | public_bucket = False 14 | 15 | # If the URI of the grant is "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" and the permission contains "WRITE", adding to list to remediate. 16 | for grant in bucketACLs[bucketName]: 17 | if grant['Grantee']['Type'] == "Group" and grant['Grantee']['URI'] == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" and "WRITE" in str(grant['Permission']): 18 | public_bucket = True 19 | else: 20 | continue 21 | 22 | if public_bucket: 23 | to_modify.append(bucketName) 24 | else: 25 | to_do_nothing.append(bucketName) 26 | 27 | print("\nFound {} bucket(s) that DON'T have public WRITE permissions:".format(len(to_do_nothing))) 28 | print(*[bucket for bucket in to_do_nothing], sep = "\n") 29 | 30 | print("\nFound {} bucket(s) that have public WRITE permissions:".format(len(to_modify))) 31 | print(*[bucket for bucket in to_modify], sep = "\n") 32 | 33 | print('\nSetting output variable `buckets` with list of {} bucket(s) with public WRITE permissions.'.format(len(to_modify))) 34 | relay.outputs.set('buckets', to_modify) 35 | -------------------------------------------------------------------------------- /s3-restrict-authenticated_user-write-buckets/s3-restrict-authenticated_user-write-buckets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/s3-restrict-authenticated_user-write-buckets/s3-restrict-authenticated_user-write-buckets.png -------------------------------------------------------------------------------- /s3-restrict-authenticated_user-write-buckets/s3-restrict-authenticated_user-write-buckets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Restricts S3 buckets with WRITE permissions to all Authenticated Users 3 | description: This workflow looks at all of the S3 buckets in a given account and restricts those that provide 'WRITE' access to all Authenticated Users. Requires an AWS account with permissions to modify S3 buckets. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/s3-restrict-authenticated_user-write-buckets 5 | tags: 6 | - compliance 7 | 8 | # Uncomment out this trigger to run this workflow hourly. 9 | # triggers: 10 | # - name: schedule 11 | # source: 12 | # type: schedule 13 | # schedule: '0 * * * *' 14 | # binding: 15 | # parameters: 16 | # dryRun: true 17 | 18 | parameters: 19 | dryRun: 20 | description: True if this workflow should not actually modify buckets 21 | default: 'true' 22 | 23 | steps: 24 | - name: list-buckets 25 | image: relaysh/aws-s3-step-bucket-list 26 | spec: 27 | aws: &aws 28 | connection: !Connection { type: aws, name: my-aws-account } 29 | - name: get-bucket-acls 30 | image: relaysh/aws-s3-step-bucket-get-acls 31 | spec: 32 | aws: *aws 33 | buckets: !Output {from: list-buckets, name: buckets} 34 | - name: filter-buckets 35 | image: relaysh/core:latest-python 36 | spec: 37 | bucketACLs: !Output {from: get-bucket-acls, name: bucketACLs} 38 | inputFile: https://raw.githubusercontent.com/puppetlabs/relay-workflows/master/s3-restrict-authenticated_user-write-buckets/filter-buckets.py 39 | - name: approval 40 | description: Wait for approval to modify S3 buckets 41 | type: approval 42 | dependsOn: filter-buckets 43 | when: 44 | - !Fn.equals [!Parameter dryRun, 'false'] 45 | - name: modify-acls 46 | image: relaysh/aws-s3-step-bucket-put-acls 47 | dependsOn: approval 48 | when: 49 | - !Fn.equals [!Parameter dryRun, 'false'] 50 | spec: 51 | aws: *aws 52 | buckets: !Output {from: filter-buckets, name: buckets} 53 | acl: private 54 | confetti: true 55 | -------------------------------------------------------------------------------- /s3-restrict-authenticated_user-write_acp-buckets/README.md: -------------------------------------------------------------------------------- 1 | This workflow looks at all of the public S3 buckets in a given account and restricts those that provide 'READ' access to all authenticated users. 2 | 3 | It evaluates all buckets for a grant that includes: 4 | - Group containing "http://acs.amazonaws.com/groups/global/AllUsers" 5 | - Permission containing "READ" 6 | 7 | These buckets will be restricted to be 'private'. 8 | 9 | ## Prerequisites 10 | 11 | Before you run this workflow, you will need the following: 12 | - An AWS account. 13 | - An AWS IAM user with permissions to list and modify S3 buckets (if not 14 | run in dry run mode). 15 | - One or more S3 buckets that are public and provide 'READ' access. 16 | 17 | ## Run the workflow 18 | 19 | Follow these steps to run the workflow: 20 | 1. Add your AWS credentials as a Connection: 21 | - Click **Setup** 22 | - Find the Connection named `my-aws-account` and click Edit(✎). Use the following values: 23 | - **KEY**: `ACCESS KEY ID` 24 | - **VALUE**: Enter your AWS access key id associated with the account 25 | - **KEY**: `SECRET ACCESS KEY` 26 | - **VALUE**: Enter your AWS secret access key associated with the account 27 | - Click **Save** 28 | 29 | 2. Click **Run workflow** and wait for the workflow run page to appear. 30 | 3. Supply following parameters to the modal: 31 | - **KEY**: `dryRun` 32 | - **VALUE**: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 33 | 34 | 4. **Warning:** If you run the workflow with the `dryRun` parameter set to 35 | `false`, buckets not in compliance with this workflow policy will 36 | immediately be modified to be 'private'. 37 | 38 | ## Run the workflow on a schedule 39 | 40 | Follow these steps to run this workflow on a schedule: 41 | - Un-comment out the Trigger block in the workflow file: 42 | 43 | > TIP: If you're using the Relay code editor, highlight the `triggers` section and type `⌘ + /` (Mac) or `Ctrl + /` (Windows) to uncomment. 44 | 45 | ```yaml 46 | # triggers: 47 | # - name: schedule 48 | # source: 49 | # type: schedule 50 | # schedule: '0 * * * *' 51 | # binding: 52 | # parameters: 53 | # dryRun: true 54 | ``` 55 | 56 | - Configure the `schedule` trigger: 57 | - Supply the run interval in [cron format](https://crontab.guru/). 58 | - Configure the following parameter bindings: 59 | - Specify whether `dryRun` should be set to `true` or `false`. 60 | - Click **Save changes** -------------------------------------------------------------------------------- /s3-restrict-authenticated_user-write_acp-buckets/filter-buckets.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from relay_sdk import Interface, Dynamic as D 3 | 4 | 5 | relay = Interface() 6 | 7 | to_modify = [] 8 | to_do_nothing = [] 9 | 10 | bucketACLs = relay.get(D.bucketACLs) 11 | 12 | for bucketName in bucketACLs.keys(): 13 | public_bucket = False 14 | 15 | # If the URI of the grant is "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" and the permission contains "READ", adding to list to remediate. 16 | for grant in bucketACLs[bucketName]: 17 | if grant['Grantee']['Type'] == "Group" and grant['Grantee']['URI'] == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" and "READ" in str(grant['Permission']): 18 | public_bucket = True 19 | else: 20 | continue 21 | 22 | if public_bucket: 23 | to_modify.append(bucketName) 24 | else: 25 | to_do_nothing.append(bucketName) 26 | 27 | print("\nFound {} bucket(s) that DON'T have public READ permissions:".format(len(to_do_nothing))) 28 | print(*[bucket for bucket in to_do_nothing], sep = "\n") 29 | 30 | print("\nFound {} bucket(s) that have public READ permissions:".format(len(to_modify))) 31 | print(*[bucket for bucket in to_modify], sep = "\n") 32 | 33 | print('\nSetting output variable `buckets` with list of {} bucket(s) with public READ permissions.'.format(len(to_modify))) 34 | relay.outputs.set('buckets', to_modify) 35 | -------------------------------------------------------------------------------- /s3-restrict-authenticated_user-write_acp-buckets/s3-restrict-authenticated_user-write_acp-buckets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/s3-restrict-authenticated_user-write_acp-buckets/s3-restrict-authenticated_user-write_acp-buckets.png -------------------------------------------------------------------------------- /s3-restrict-authenticated_user-write_acp-buckets/s3-restrict-authenticated_user-write_acp-buckets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Restrict S3 buckets with WRITE_ACP permissions to all Authenticated Users 3 | description: This workflow looks at all of the S3 buckets in a given account and restricts those that provide 'WRITE_ACP' access to all Authenticated Users. Requires an AWS account with permissions to modify S3 buckets. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/s3-restrict-authenticated_user-write_acp-buckets 5 | tags: 6 | - compliance 7 | 8 | # Uncomment out this trigger to run this workflow hourly. 9 | # triggers: 10 | # - name: schedule 11 | # source: 12 | # type: schedule 13 | # schedule: '0 * * * *' 14 | # binding: 15 | # parameters: 16 | # dryRun: true 17 | 18 | parameters: 19 | dryRun: 20 | description: True if this workflow should not actually modify buckets 21 | default: 'true' 22 | 23 | steps: 24 | - name: list-buckets 25 | image: relaysh/aws-s3-step-bucket-list 26 | spec: 27 | aws: &aws 28 | connection: !Connection { type: aws, name: my-aws-account } 29 | - name: get-bucket-acls 30 | image: relaysh/aws-s3-step-bucket-get-acls 31 | spec: 32 | aws: *aws 33 | buckets: !Output {from: list-buckets, name: buckets} 34 | - name: filter-buckets 35 | image: relaysh/core:latest-python 36 | spec: 37 | bucketACLs: !Output {from: get-bucket-acls, name: bucketACLs} 38 | inputFile: https://raw.githubusercontent.com/puppetlabs/relay-workflows/master/s3-restrict-authenticated_user-write_acp-buckets/filter-buckets.py 39 | - name: approval 40 | description: Wait for approval to modify S3 buckets 41 | type: approval 42 | dependsOn: filter-buckets 43 | when: 44 | - !Fn.equals [!Parameter dryRun, 'false'] 45 | - name: modify-acls 46 | image: relaysh/aws-s3-step-bucket-put-acls 47 | dependsOn: approval 48 | when: 49 | - !Fn.equals [!Parameter dryRun, 'false'] 50 | spec: 51 | aws: *aws 52 | buckets: !Output {from: filter-buckets, name: buckets} 53 | acl: private 54 | confetti: true 55 | -------------------------------------------------------------------------------- /s3-restrict-public-read-buckets/README.md: -------------------------------------------------------------------------------- 1 | This workflow looks at all of the public S3 buckets in a given account and restricts those that provide 'READ' access. 2 | 3 | It evaluates all buckets for a grant that includes: 4 | - Group containing "http://acs.amazonaws.com/groups/global/AllUsers" 5 | - Permission containing "READ" 6 | 7 | These buckets will be restricted to be 'private'. 8 | 9 | ## Prerequisites 10 | 11 | Before you run this workflow, you will need the following: 12 | - An AWS account. 13 | - An AWS IAM user with permissions to list and modify S3 buckets (if not 14 | run in dry run mode). 15 | - One or more S3 buckets that are public and provide 'READ' access. 16 | 17 | ## Run the workflow 18 | 19 | Follow these steps to run the workflow: 20 | 1. Add your AWS credentials as a Connection: 21 | - Click **Setup** 22 | - Find the Connection named `my-aws-account` and click Edit(✎). Use the following values: 23 | - **KEY**: `ACCESS KEY ID` 24 | - **VALUE**: Enter your AWS access key id associated with the account 25 | - **KEY**: `SECRET ACCESS KEY` 26 | - **VALUE**: Enter your AWS secret access key associated with the account 27 | - Click **Save** 28 | 29 | 2. Click **Run workflow** and wait for the workflow run page to appear. 30 | 3. Supply following parameters to the modal: 31 | - **KEY**: `dryRun` 32 | - **VALUE**: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 33 | 34 | 4. **Warning:** If you run the workflow with the `dryRun` parameter set to 35 | `false`, buckets not in compliance with this workflow policy will 36 | immediately be modified to be 'private'. 37 | 38 | ## Run the workflow on a schedule 39 | 40 | Follow these steps to run this workflow on a schedule: 41 | - Un-comment out the Trigger block in the workflow file: 42 | 43 | > TIP: If you're using the Relay code editor, highlight the `triggers` section and type `⌘ + /` (Mac) or `Ctrl + /` (Windows) to uncomment. 44 | 45 | ```yaml 46 | # triggers: 47 | # - name: schedule 48 | # source: 49 | # type: schedule 50 | # schedule: '0 * * * *' 51 | # binding: 52 | # parameters: 53 | # dryRun: true 54 | ``` 55 | 56 | - Configure the `schedule` trigger: 57 | - Supply the run interval in [cron format](https://crontab.guru/). 58 | - Configure the following parameter bindings: 59 | - Specify whether `dryRun` should be set to `true` or `false`. 60 | - Click **Save changes** -------------------------------------------------------------------------------- /s3-restrict-public-read-buckets/filter-buckets.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from relay_sdk import Interface, Dynamic as D 3 | 4 | 5 | relay = Interface() 6 | 7 | to_modify = [] 8 | to_do_nothing = [] 9 | 10 | bucketACLs = relay.get(D.bucketACLs) 11 | 12 | for bucketName in bucketACLs.keys(): 13 | public_bucket = False 14 | 15 | # If the URI of the grant is "http://acs/amazonaws.com/groups/global/AllUsers" and the permission contains "READ", adding to list to remediate. 16 | for grant in bucketACLs[bucketName]: 17 | if grant['Grantee']['Type'] == "Group" and grant['Grantee']['URI'] == "http://acs.amazonaws.com/groups/global/AllUsers" and "READ" in str(grant['Permission']): 18 | public_bucket = True 19 | else: 20 | continue 21 | 22 | if public_bucket: 23 | to_modify.append(bucketName) 24 | else: 25 | to_do_nothing.append(bucketName) 26 | 27 | print("\nFound {} bucket(s) that DON'T have public READ permissions:".format(len(to_do_nothing))) 28 | print(*[bucket for bucket in to_do_nothing], sep = "\n") 29 | 30 | print("\nFound {} bucket(s) that have public READ permissions:".format(len(to_modify))) 31 | print(*[bucket for bucket in to_modify], sep = "\n") 32 | 33 | print('\nSetting output variable `buckets` with list of {} bucket(s) with public READ permissions.'.format(len(to_modify))) 34 | relay.outputs.set('buckets', to_modify) 35 | -------------------------------------------------------------------------------- /s3-restrict-public-read-buckets/s3-restrict-public-read-buckets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/s3-restrict-public-read-buckets/s3-restrict-public-read-buckets.png -------------------------------------------------------------------------------- /s3-restrict-public-read-buckets/s3-restrict-public-read-buckets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Restrict public S3 buckets with READ permissions 3 | description: This workflow looks at all of the public S3 buckets in a given account and restricts those that provide 'READ' access. Requires an AWS account with permissions to modify S3 buckets. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/s3-restrict-public-read-buckets 5 | tags: 6 | - compliance 7 | 8 | # Uncomment out this trigger to run this workflow hourly. 9 | # triggers: 10 | # - name: schedule 11 | # source: 12 | # type: schedule 13 | # schedule: '0 * * * *' 14 | # binding: 15 | # parameters: 16 | # dryRun: true 17 | 18 | parameters: 19 | dryRun: 20 | description: True if this workflow should not actually modify buckets 21 | default: 'true' 22 | 23 | steps: 24 | - name: list-buckets 25 | image: relaysh/aws-s3-step-bucket-list 26 | spec: 27 | aws: &aws 28 | connection: !Connection { type: aws, name: my-aws-account } 29 | - name: get-bucket-acls 30 | image: relaysh/aws-s3-step-bucket-get-acls 31 | spec: 32 | aws: *aws 33 | buckets: !Output {from: list-buckets, name: buckets} 34 | - name: filter-buckets 35 | image: relaysh/core:latest-python 36 | spec: 37 | bucketACLs: !Output {from: get-bucket-acls, name: bucketACLs} 38 | inputFile: https://raw.githubusercontent.com/puppetlabs/relay-workflows/master/s3-restrict-public-read-buckets/filter-buckets.py 39 | - name: approval 40 | description: Wait for approval to modify S3 buckets 41 | type: approval 42 | dependsOn: filter-buckets 43 | when: 44 | - !Fn.equals [!Parameter dryRun, 'false'] 45 | - name: modify-acls 46 | image: relaysh/aws-s3-step-bucket-put-acls 47 | dependsOn: approval 48 | when: 49 | - !Fn.equals [!Parameter dryRun, 'false'] 50 | spec: 51 | aws: *aws 52 | buckets: !Output {from: filter-buckets, name: buckets} 53 | acl: private 54 | confetti: true 55 | -------------------------------------------------------------------------------- /s3-restrict-public-read_acp-buckets/README.md: -------------------------------------------------------------------------------- 1 | This workflow looks at all of the public S3 buckets in a given account and restricts those that provide 'READ_ACP' access. 2 | 3 | It evaluates all buckets for a grant that includes: 4 | - Group containing "http://acs.amazonaws.com/groups/global/AllUsers" 5 | - Permission containing "READ_ACP" 6 | 7 | These buckets will be restricted to be 'private'. 8 | 9 | ## Prerequisites 10 | 11 | Before you run this workflow, you will need the following: 12 | - An AWS account. 13 | - An AWS IAM user with permissions to list and modify S3 buckets (if not 14 | run in dry run mode). 15 | - One or more S3 buckets that are public and provide 'READ' access. 16 | 17 | ## Run the workflow 18 | 19 | Follow these steps to run the workflow: 20 | 1. Add your AWS credentials as a Connection: 21 | - Click **Setup** 22 | - Find the Connection named `my-aws-account` and click Edit(✎). Use the following values: 23 | - **KEY**: `ACCESS KEY ID` 24 | - **VALUE**: Enter your AWS access key id associated with the account 25 | - **KEY**: `SECRET ACCESS KEY` 26 | - **VALUE**: Enter your AWS secret access key associated with the account 27 | - Click **Save** 28 | 29 | 2. Click **Run workflow** and wait for the workflow run page to appear. 30 | 3. Supply following parameters to the modal: 31 | - **KEY**: `dryRun` 32 | - **VALUE**: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 33 | 34 | 4. **Warning:** If you run the workflow with the `dryRun` parameter set to 35 | `false`, buckets not in compliance with this workflow policy will 36 | immediately be modified to be 'private'. 37 | 38 | ## Run the workflow on a schedule 39 | 40 | Follow these steps to run this workflow on a schedule: 41 | - Un-comment out the Trigger block in the workflow file: 42 | 43 | > TIP: If you're using the Relay code editor, highlight the `triggers` section and type `⌘ + /` (Mac) or `Ctrl + /` (Windows) to uncomment. 44 | 45 | ```yaml 46 | # triggers: 47 | # - name: schedule 48 | # source: 49 | # type: schedule 50 | # schedule: '0 * * * *' 51 | # binding: 52 | # parameters: 53 | # dryRun: true 54 | ``` 55 | 56 | - Configure the `schedule` trigger: 57 | - Supply the run interval in [cron format](https://crontab.guru/). 58 | - Configure the following parameter bindings: 59 | - Specify whether `dryRun` should be set to `true` or `false`. 60 | - Click **Save changes** -------------------------------------------------------------------------------- /s3-restrict-public-read_acp-buckets/filter-buckets.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from relay_sdk import Interface, Dynamic as D 3 | 4 | 5 | relay = Interface() 6 | 7 | to_modify = [] 8 | to_do_nothing = [] 9 | 10 | bucketACLs = relay.get(D.bucketACLs) 11 | 12 | for bucketName in bucketACLs.keys(): 13 | public_bucket = False 14 | 15 | # If the URI of the grant is "http://acs/amazonaws.com/groups/global/AllUsers" and the permission contains "READ_ACP", adding to list to remediate. 16 | for grant in bucketACLs[bucketName]: 17 | if grant['Grantee']['Type'] == "Group" and grant['Grantee']['URI'] == "http://acs.amazonaws.com/groups/global/AllUsers" and "READ_ACP" in str(grant['Permission']): 18 | public_bucket = True 19 | else: 20 | continue 21 | 22 | if public_bucket: 23 | to_modify.append(bucketName) 24 | else: 25 | to_do_nothing.append(bucketName) 26 | 27 | print("\nFound {} bucket(s) that DON'T have public READ_ACP permissions:".format(len(to_do_nothing))) 28 | print(*[bucket for bucket in to_do_nothing], sep = "\n") 29 | 30 | print("\nFound {} bucket(s) that have public READ_ACP permissions:".format(len(to_modify))) 31 | print(*[bucket for bucket in to_modify], sep = "\n") 32 | 33 | print('\nSetting output variable `buckets` with list of {} bucket(s) with public READ_ACP permissions.'.format(len(to_modify))) 34 | relay.outputs.set('buckets', to_modify) 35 | -------------------------------------------------------------------------------- /s3-restrict-public-read_acp-buckets/s3-restrict-public-read_acp-buckets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/s3-restrict-public-read_acp-buckets/s3-restrict-public-read_acp-buckets.png -------------------------------------------------------------------------------- /s3-restrict-public-read_acp-buckets/s3-restrict-public-read_acp-buckets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Restrict public S3 buckets with READ_ACP permissions 3 | description: This workflow looks at all of the public S3 buckets in a given account and restricts those that provide 'READ_ACP' access. Requires an AWS account with permissions to modify S3 buckets. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/s3-restrict-public-read_acp-buckets 5 | tags: 6 | - compliance 7 | 8 | # Uncomment out this trigger to run this workflow hourly. 9 | # triggers: 10 | # - name: schedule 11 | # source: 12 | # type: schedule 13 | # schedule: '0 * * * *' 14 | # binding: 15 | # parameters: 16 | # dryRun: true 17 | 18 | parameters: 19 | dryRun: 20 | description: True if this workflow should not actually modify buckets 21 | default: 'true' 22 | 23 | steps: 24 | - name: list-buckets 25 | image: relaysh/aws-s3-step-bucket-list 26 | spec: 27 | aws: &aws 28 | connection: !Connection { type: aws, name: my-aws-account } 29 | - name: get-bucket-acls 30 | image: relaysh/aws-s3-step-bucket-get-acls 31 | spec: 32 | aws: *aws 33 | buckets: !Output {from: list-buckets, name: buckets} 34 | - name: filter-buckets 35 | image: relaysh/core:latest-python 36 | spec: 37 | bucketACLs: !Output {from: get-bucket-acls, name: bucketACLs} 38 | inputFile: https://raw.githubusercontent.com/puppetlabs/relay-workflows/master/s3-restrict-public-read-buckets/filter-buckets.py 39 | - name: approval 40 | description: Wait for approval to modify S3 buckets 41 | type: approval 42 | dependsOn: filter-buckets 43 | when: 44 | - !Fn.equals [!Parameter dryRun, 'false'] 45 | - name: modify-acls 46 | image: relaysh/aws-s3-step-bucket-put-acls 47 | dependsOn: approval 48 | when: 49 | - !Fn.equals [!Parameter dryRun, 'false'] 50 | spec: 51 | aws: *aws 52 | buckets: !Output {from: filter-buckets, name: buckets} 53 | acl: private 54 | confetti: true 55 | -------------------------------------------------------------------------------- /s3-restrict-public-write-buckets/README.md: -------------------------------------------------------------------------------- 1 | This workflow looks at all of the public S3 buckets in a given account and restricts those that provide 'WRITE' access. 2 | 3 | It evaluates all buckets for a grant that includes: 4 | - Group containing "http://acs.amazonaws.com/groups/global/AllUsers" 5 | - Permission containing "WRITE" 6 | 7 | These buckets will be restricted to be 'private'. 8 | 9 | ## Prerequisites 10 | 11 | Before you run this workflow, you will need the following: 12 | - An AWS account. 13 | - An AWS IAM user with permissions to list and modify S3 buckets (if not 14 | run in dry run mode). 15 | - One or more S3 buckets that are public and provide 'WRITE' access. 16 | 17 | ## Run the workflow 18 | 19 | Follow these steps to run the workflow: 20 | 1. Add your AWS credentials as a Connection: 21 | - Click **Setup** 22 | - Find the Connection named `my-aws-account` and click Edit(✎). Use the following values: 23 | - **KEY**: `ACCESS KEY ID` 24 | - **VALUE**: Enter your AWS access key id associated with the account 25 | - **KEY**: `SECRET ACCESS KEY` 26 | - **VALUE**: Enter your AWS secret access key associated with the account 27 | - Click **Save** 28 | 29 | 2. Click **Run workflow** and wait for the workflow run page to appear. 30 | 3. Supply following parameters to the modal: 31 | - **KEY**: `dryRun` 32 | - **VALUE**: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 33 | 34 | 4. **Warning:** If you run the workflow with the `dryRun` parameter set to 35 | `false`, buckets not in compliance with this workflow policy will 36 | immediately be modified to be 'private'. 37 | 38 | ## Run the workflow on a schedule 39 | 40 | Follow these steps to run this workflow on a schedule: 41 | - Un-comment out the Trigger block in the workflow file: 42 | 43 | > TIP: If you're using the Relay code editor, highlight the `triggers` section and type `⌘ + /` (Mac) or `Ctrl + /` (Windows) to uncomment. 44 | 45 | ```yaml 46 | # triggers: 47 | # - name: schedule 48 | # source: 49 | # type: schedule 50 | # schedule: '0 * * * *' 51 | # binding: 52 | # parameters: 53 | # dryRun: true 54 | ``` 55 | 56 | - Configure the `schedule` trigger: 57 | - Supply the run interval in [cron format](https://crontab.guru/). 58 | - Configure the following parameter bindings: 59 | - Specify whether `dryRun` should be set to `true` or `false`. 60 | - Click **Save changes** -------------------------------------------------------------------------------- /s3-restrict-public-write-buckets/filter-buckets.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from relay_sdk import Interface, Dynamic as D 3 | 4 | 5 | relay = Interface() 6 | 7 | to_modify = [] 8 | to_do_nothing = [] 9 | 10 | bucketACLs = relay.get(D.bucketACLs) 11 | 12 | for bucketName in bucketACLs.keys(): 13 | public_bucket = False 14 | 15 | # If the URI of the grant is "http://acs/amazonaws.com/groups/global/AllUsers" and the permission contains "WRITE", adding to list to remediate. 16 | for grant in bucketACLs[bucketName]: 17 | if grant['Grantee']['Type'] == "Group" and grant['Grantee']['URI'] == "http://acs.amazonaws.com/groups/global/AllUsers" and "WRITE" in str(grant['Permission']): 18 | public_bucket = True 19 | else: 20 | continue 21 | 22 | if public_bucket: 23 | to_modify.append(bucketName) 24 | else: 25 | to_do_nothing.append(bucketName) 26 | 27 | print("\nFound {} bucket(s) that DON'T have public WRITE permissions:".format(len(to_do_nothing))) 28 | print(*[bucket for bucket in to_do_nothing], sep = "\n") 29 | 30 | print("\nFound {} bucket(s) that have public WRITE permissions:".format(len(to_modify))) 31 | print(*[bucket for bucket in to_modify], sep = "\n") 32 | 33 | print('\nSetting output variable `buckets` with list of {} bucket(s) with public WRITE permissions.'.format(len(to_modify))) 34 | relay.outputs.set('buckets', to_modify) 35 | -------------------------------------------------------------------------------- /s3-restrict-public-write-buckets/s3-restrict-public-write-buckets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/s3-restrict-public-write-buckets/s3-restrict-public-write-buckets.png -------------------------------------------------------------------------------- /s3-restrict-public-write-buckets/s3-restrict-public-write-buckets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Restrict public S3 buckets with WRITE permissions 3 | description: This workflow looks at all of the public S3 buckets in a given account and restricts those that provide 'WRITE' access. Requires an AWS account with permissions to modify S3 buckets. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/s3-restrict-public-write-buckets 5 | tags: 6 | - compliance 7 | 8 | # Uncomment out this trigger to run this workflow hourly. 9 | # triggers: 10 | # - name: schedule 11 | # source: 12 | # type: schedule 13 | # schedule: '0 * * * *' 14 | # binding: 15 | # parameters: 16 | # dryRun: true 17 | 18 | parameters: 19 | dryRun: 20 | description: True if this workflow should not actually modify buckets 21 | default: 'true' 22 | 23 | steps: 24 | - name: list-buckets 25 | image: relaysh/aws-s3-step-bucket-list 26 | spec: 27 | aws: &aws 28 | connection: !Connection { type: aws, name: my-aws-account } 29 | - name: get-bucket-acls 30 | image: relaysh/aws-s3-step-bucket-get-acls 31 | spec: 32 | aws: *aws 33 | buckets: !Output {from: list-buckets, name: buckets} 34 | - name: filter-buckets 35 | image: relaysh/core:latest-python 36 | spec: 37 | bucketACLs: !Output {from: get-bucket-acls, name: bucketACLs} 38 | inputFile: https://raw.githubusercontent.com/puppetlabs/relay-workflows/master/s3-restrict-public-write-buckets/filter-buckets.py 39 | - name: approval 40 | description: Wait for approval to modify S3 buckets 41 | type: approval 42 | dependsOn: filter-buckets 43 | when: 44 | - !Fn.equals [!Parameter dryRun, 'false'] 45 | - name: modify-acls 46 | image: relaysh/aws-s3-step-bucket-put-acls 47 | dependsOn: approval 48 | when: 49 | - !Fn.equals [!Parameter dryRun, 'false'] 50 | spec: 51 | aws: *aws 52 | buckets: !Output {from: filter-buckets, name: buckets} 53 | acl: private 54 | confetti: true 55 | -------------------------------------------------------------------------------- /s3-restrict-public-write_acp-buckets/README.md: -------------------------------------------------------------------------------- 1 | This workflow looks at all of the public S3 buckets in a given account and restricts those that provide 'WRITE_ACP' access. 2 | 3 | It evaluates all buckets for a grant that includes: 4 | - Group containing "http://acs.amazonaws.com/groups/global/AllUsers" 5 | - Permission containing "WRITE_ACP" 6 | 7 | These buckets will be restricted to be 'private'. 8 | 9 | ## Prerequisites 10 | 11 | Before you run this workflow, you will need the following: 12 | - An AWS account. 13 | - An AWS IAM user with permissions to list and modify S3 buckets (if not 14 | run in dry run mode). 15 | - One or more S3 buckets that are public and provide 'WRITE' access. 16 | 17 | ## Run the workflow 18 | 19 | Follow these steps to run the workflow: 20 | 1. Add your AWS credentials as a Connection: 21 | - Click **Setup** 22 | - Find the Connection named `my-aws-account` and click Edit(✎). Use the following values: 23 | - **KEY**: `ACCESS KEY ID` 24 | - **VALUE**: Enter your AWS access key id associated with the account 25 | - **KEY**: `SECRET ACCESS KEY` 26 | - **VALUE**: Enter your AWS secret access key associated with the account 27 | - Click **Save** 28 | 29 | 2. Click **Run workflow** and wait for the workflow run page to appear. 30 | 3. Supply following parameters to the modal: 31 | - **KEY**: `dryRun` 32 | - **VALUE**: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 33 | 34 | 4. **Warning:** If you run the workflow with the `dryRun` parameter set to 35 | `false`, buckets not in compliance with this workflow policy will 36 | immediately be modified to be 'private'. 37 | 38 | ## Run the workflow on a schedule 39 | 40 | Follow these steps to run this workflow on a schedule: 41 | - Un-comment out the Trigger block in the workflow file: 42 | 43 | > TIP: If you're using the Relay code editor, highlight the `triggers` section and type `⌘ + /` (Mac) or `Ctrl + /` (Windows) to uncomment. 44 | 45 | ```yaml 46 | # triggers: 47 | # - name: schedule 48 | # source: 49 | # type: schedule 50 | # schedule: '0 * * * *' 51 | # binding: 52 | # parameters: 53 | # dryRun: true 54 | ``` 55 | 56 | - Configure the `schedule` trigger: 57 | - Supply the run interval in [cron format](https://crontab.guru/). 58 | - Configure the following parameter bindings: 59 | - Specify whether `dryRun` should be set to `true` or `false`. 60 | - Click **Save changes** -------------------------------------------------------------------------------- /s3-restrict-public-write_acp-buckets/filter-buckets.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from relay_sdk import Interface, Dynamic as D 3 | 4 | 5 | relay = Interface() 6 | 7 | to_modify = [] 8 | to_do_nothing = [] 9 | 10 | bucketACLs = relay.get(D.bucketACLs) 11 | 12 | for bucketName in bucketACLs.keys(): 13 | public_bucket = False 14 | 15 | # If the URI of the grant is "http://acs/amazonaws.com/groups/global/AllUsers" and the permission contains "WRITE_ACP", adding to list to remediate. 16 | for grant in bucketACLs[bucketName]: 17 | if grant['Grantee']['Type'] == "Group" and grant['Grantee']['URI'] == "http://acs.amazonaws.com/groups/global/AllUsers" and "WRITE_ACP" in str(grant['Permission']): 18 | public_bucket = True 19 | else: 20 | continue 21 | 22 | if public_bucket: 23 | to_modify.append(bucketName) 24 | else: 25 | to_do_nothing.append(bucketName) 26 | 27 | print("\nFound {} bucket(s) that DON'T have public WRITE_ACP permissions:".format(len(to_do_nothing))) 28 | print(*[bucket for bucket in to_do_nothing], sep = "\n") 29 | 30 | print("\nFound {} bucket(s) that have public WRITE_ACP permissions:".format(len(to_modify))) 31 | print(*[bucket for bucket in to_modify], sep = "\n") 32 | 33 | print('\nSetting output variable `buckets` with list of {} bucket(s) with public WRITE_ACP permissions.'.format(len(to_modify))) 34 | relay.outputs.set('buckets', to_modify) 35 | -------------------------------------------------------------------------------- /s3-restrict-public-write_acp-buckets/s3-restrict-public-write_acp-buckets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/s3-restrict-public-write_acp-buckets/s3-restrict-public-write_acp-buckets.png -------------------------------------------------------------------------------- /s3-restrict-public-write_acp-buckets/s3-restrict-public-write_acp-buckets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Restrict public S3 buckets with WRITE_ACP permissions 3 | description: This workflow looks at all of the public S3 buckets in a given account and restricts those that provide 'WRITE_ACP' access. Requires an AWS account with permissions to modify S3 buckets. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/s3-restrict-public-write_acp-buckets 5 | tags: 6 | - compliance 7 | 8 | # Uncomment out this trigger to run this workflow hourly. 9 | # triggers: 10 | # - name: schedule 11 | # source: 12 | # type: schedule 13 | # schedule: '0 * * * *' 14 | # binding: 15 | # parameters: 16 | # dryRun: true 17 | 18 | parameters: 19 | dryRun: 20 | description: True if this workflow should not actually modify buckets 21 | default: 'true' 22 | 23 | steps: 24 | - name: list-buckets 25 | image: relaysh/aws-s3-step-bucket-list 26 | spec: 27 | aws: &aws 28 | connection: !Connection { type: aws, name: my-aws-account } 29 | - name: get-bucket-acls 30 | image: relaysh/aws-s3-step-bucket-get-acls 31 | spec: 32 | aws: *aws 33 | buckets: !Output {from: list-buckets, name: buckets} 34 | - name: filter-buckets 35 | image: relaysh/core:latest-python 36 | spec: 37 | bucketACLs: !Output {from: get-bucket-acls, name: bucketACLs} 38 | inputFile: https://raw.githubusercontent.com/puppetlabs/relay-workflows/master/s3-restrict-public-write-buckets/filter-buckets.py 39 | - name: approval 40 | description: Wait for approval to modify S3 buckets 41 | type: approval 42 | dependsOn: filter-buckets 43 | when: 44 | - !Fn.equals [!Parameter dryRun, 'false'] 45 | - name: modify-acls 46 | image: relaysh/aws-s3-step-bucket-put-acls 47 | dependsOn: approval 48 | when: 49 | - !Fn.equals [!Parameter dryRun, 'false'] 50 | spec: 51 | aws: *aws 52 | buckets: !Output {from: filter-buckets, name: buckets} 53 | acl: private 54 | confetti: true 55 | -------------------------------------------------------------------------------- /splunkoncall-incident-response/README.md: -------------------------------------------------------------------------------- 1 | This workflow codifies an incident response process for Splunk On-Call incidents. The process in this workflow is to create a JIRA ticket, set up an incident response slack room, and post the information about created tickets back into the incident timeline. 2 | 3 | ## Prerequisites 4 | 5 | Before you run this workflow, you will need the following connections configured in Relay: 6 | 7 | - A [Jira](https://www.atlassian.com/software/jira) account. 8 | - A [Slack](https://slack.com/) workspace bot with the following permissions: 9 | - `channels:manage` to create the channel and set the topic 10 | - `chat:write` to send messages 11 | - `chat:write.public` to send messages to channels without joining 12 | - `chat:write.customize` to send messages as a customized username and avatar 13 | 14 | You'll also need to enable the REST integration point on Splunk On-Call and add the generated endpoint URL as a workflow Secret named `endpointURL`. Note that the incoming webhook from Splunk On-Call to Relay uses the [escalation webhook integration](https://help.victorops.com/knowledge-base/escalation-webhooks/), not the Enterprise-level [custom webhooks](https://help.victorops.com/knowledge-base/custom-outbound-webhooks). 15 | 16 | ## Configure the workflow 17 | 18 | You may need to update some of the default parameters or connection information 19 | in this workflow to run in your environment. The default configuration assumes: 20 | 21 | - Your Jira connection is called `my-jira-account` 22 | - Your Slack connection is called `my-slack-account` 23 | - Your Jira project key is `RLY` 24 | - Your incident slack channels will be named `#team-relay-production-incident-` 25 | 26 | ## Set up the trigger 27 | 28 | When you create this workflow for the first time, we'll automatically provision 29 | a webhook for you. You need to provide this webhook to Splunk On-Call to complete the 30 | integration. 31 | 32 | In the workflow overview page in Relay, find the webhook URL by navigating to 33 | the **Setup** sidebar. Copy the URL to your clipboard. 34 | 35 | In Splunk On-Call, go to **Integrations** and enable the **Webhooks** integration. Add a new webhook, give it a memorable name and paste the Relay URL into the dialog box. 36 | 37 | You'll then need to associate the webhook name with one or more Escalation Policies, so the workflow will be triggered upon incident creation. Updates from Relay will be associated with the timeline of the Splunk On-Call incident which triggered them. 38 | -------------------------------------------------------------------------------- /sts-describe-ec2-objects/README.md: -------------------------------------------------------------------------------- 1 | This workflow demonstrates the use of the [AWS STS assume role step](https://github.com/relay-integrations/relay-aws-sts/tree/master/steps/aws-sts-step-assume-role). It 2 | assumes an IAM role, then outputs the instances, images, key pairs, and volumes 3 | in that account. 4 | 5 | ## Prerequisites 6 | 7 | Before you run this workflow, you will need the following: 8 | - An AWS account. 9 | - An AWS IAM user with permissions to assume the privileged IAM role. 10 | - An AWS IAM role with the user as a trusted entity and permissions to list EC2 instances. 11 | - One or more running EC2 instances. 12 | 13 | ## Run the workflow 14 | 15 | Follow these steps to run the workflow: 16 | 1. Add your AWS IAM user credentials as a Connection: 17 | - Click **Setup** 18 | - Find the Connection named `my-aws-account` and click Edit(✎). Use the following values: 19 | - **KEY**: `ACCESS KEY ID` 20 | - **VALUE**: Enter your AWS access key id associated with the account 21 | - **KEY**: `SECRET ACCESS KEY` 22 | - **VALUE**: Enter your AWS secret access key associated with the account 23 | - Click **Save** 24 | 25 | 2. Click **Run workflow** and wait for the workflow run page to appear. 26 | 3. Supply following parameters to the modal: 27 | - **KEY**: `region` 28 | - **VALUE**: The AWS region to run in 29 | - **KEY**: `roleARN` 30 | - **VALUE**: The ARN of the IAM role to assume (e.g. arn:aws:iam::180094860577:role/EC2) 31 | 32 | ## Run the workflow on a schedule 33 | 34 | Follow these steps to run this workflow on a schedule: 35 | - Un-comment out the Trigger block in the workflow file: 36 | 37 | > TIP: If you're using the Relay code editor, highlight the `triggers` section and type `⌘ + /` (Mac) or `Ctrl + /` (Windows) to uncomment. 38 | 39 | ```yaml 40 | # triggers: 41 | # - name: schedule 42 | # source: 43 | # type: schedule 44 | # schedule: '0 * * * *' 45 | # binding: 46 | # parameters: 47 | # region: us-east-1 48 | # roleARN: "" 49 | ``` 50 | 51 | - Configure the `schedule` trigger: 52 | - Supply the run interval in [cron format](https://crontab.guru/). 53 | - Configure the following parameter bindings: 54 | - Specify the `region` to run in. 55 | - Specify the `roleARN` to assume 56 | - Click **Save changes** -------------------------------------------------------------------------------- /sts-describe-ec2-objects/sts-describe-ec2-objects.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/sts-describe-ec2-objects/sts-describe-ec2-objects.png -------------------------------------------------------------------------------- /sts-describe-ec2-objects/sts-describe-ec2-objects.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Assuming a Privileged IAM role 3 | description: This workflow uses AWS Security Token Service (STS) to assume a privileged IAM role which it uses to list instances, images, key pairs, and volumes. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/sts-describe-ec2-objects 5 | 6 | # Uncomment out this trigger to run this workflow hourly. 7 | # triggers: 8 | # - name: schedule 9 | # source: 10 | # type: schedule 11 | # schedule: '0 * * * *' 12 | # binding: 13 | # parameters: 14 | # region: us-east-1 15 | # roleARN: "" # Insert your role ARN here 16 | 17 | parameters: 18 | region: 19 | description: The AWS region to run in 20 | default: us-east-1 21 | roleARN: 22 | description: ARN of the IAM role to assume 23 | default: "" 24 | 25 | steps: 26 | - name: assume-role 27 | image: relaysh/sts-assume-role 28 | spec: 29 | aws: 30 | connection: !Connection { type: aws, name: my-aws-account } 31 | region: !Parameter region 32 | roleARN: arn:aws:iam::180094860577:role/EC2 33 | roleSessionName: my-session 34 | - name: describe-instances 35 | image: relaysh/aws-ec2-step-instances-describe 36 | spec: 37 | aws: &assumerole 38 | connection: !Output {from: assume-role, name: connection} 39 | region: !Parameter region 40 | - name: describe-images 41 | image: relaysh/aws-ec2-step-images-describe 42 | spec: 43 | aws: *assumerole 44 | - name: describe-key-pairs 45 | image: relaysh/aws-ec2-step-key-pairs-describe 46 | spec: 47 | aws: *assumerole 48 | - name: describe-volumes 49 | image: relaysh/aws-ebs-step-volumes-describe 50 | spec: 51 | aws: *assumerole 52 | -------------------------------------------------------------------------------- /sts-stop-untagged-instances/README.md: -------------------------------------------------------------------------------- 1 | This workflow assumes an IAM role, looks at all of the EC2 instances in a given account and region, 2 | and stops the ones that are untagged. 3 | 4 | ## Prerequisites 5 | 6 | Before you run this workflow, you will need the following: 7 | - An AWS account. 8 | - An AWS IAM user with permissions to assume the privileged IAM role. 9 | - An AWS IAM role with the user as a trusted entity and permissions to list and modify EC2 instances. 10 | - One or more running EC2 instances that are untagged. 11 | 12 | ## Run the workflow 13 | 14 | Follow these steps to run the workflow: 15 | 1. Add your AWS IAM user credentials as a Connection: 16 | - Click **Setup** 17 | - Find the Connection named `my-aws-account` and click Edit(✎). Use the following values: 18 | - **KEY**: `ACCESS KEY ID` 19 | - **VALUE**: Enter your AWS access key id associated with the account 20 | - **KEY**: `SECRET ACCESS KEY` 21 | - **VALUE**: Enter your AWS secret access key associated with the account 22 | - Click **Save** 23 | 24 | 2. Click **Run workflow** and wait for the workflow run page to appear. 25 | 3. Supply following parameters to the modal: 26 | - **KEY**: `region` 27 | - **VALUE**: The AWS region to run in 28 | - **KEY**: `dryRun` 29 | - **VALUE**: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 30 | - **KEY**: `roleARN` 31 | - **VALUE**: The ARN of the IAM role to assume (e.g. arn:aws:iam::180094860577:role/EC2) 32 | 33 | 34 | 4. **Warning:** If you run the workflow with the `dryRun` parameter set to 35 | `false`, instances not in compliance with this workflow policy will 36 | immediately be stopped. 37 | 38 | ## Run the workflow on a schedule 39 | 40 | Follow these steps to run this workflow on a schedule: 41 | - Un-comment out the Trigger block in the workflow file: 42 | 43 | > TIP: If you're using the Relay code editor, highlight the `triggers` section and type `⌘ + /` (Mac) or `Ctrl + /` (Windows) to uncomment. 44 | 45 | ```yaml 46 | # triggers: 47 | # - name: schedule 48 | # source: 49 | # type: schedule 50 | # schedule: '0 * * * *' 51 | # binding: 52 | # parameters: 53 | # region: us-east-1 54 | # dryRun: true 55 | # roleARN: "" 56 | ``` 57 | 58 | - Configure the `schedule` trigger: 59 | - Supply the run interval in [cron format](https://crontab.guru/). 60 | - Configure the following parameter bindings: 61 | - Specify the `region` to run in. 62 | - Specify whether `dryRun` should be set to `true` or `false`. 63 | - Specify the `roleARN` to assume 64 | - Click **Save changes** -------------------------------------------------------------------------------- /sts-stop-untagged-instances/filter-instances.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # File: filter-instances.py 4 | # Description: This is an example script that you can author or modify that retrieves 5 | # a list of instances from the Relay Interface (in the form of parameters) 6 | # and filters the instances that are untagged. It then sets the output 7 | # variable `instanceIDs` to the list of instances that are untagged. 8 | # Inputs: 9 | # - instances - List of instances to evaluate 10 | # Outputs: 11 | # - instanceIDs - list of instance IDs to stop in the next step 12 | 13 | from relay_sdk import Interface, Dynamic as D 14 | 15 | relay = Interface() 16 | 17 | to_stop = [] 18 | to_keep = [] 19 | 20 | instances = filter(lambda i: i['State']['Name'] == 'running', relay.get(D.instances)) 21 | if len(instances) == 0: 22 | print("No instances found!") 23 | exit(1) 24 | 25 | for instance in instances: 26 | try: 27 | if instance['Tags'] is None: 28 | to_stop.append(instance['InstanceId']) 29 | else: 30 | to_keep.append(instance['InstanceId']) 31 | except Exception as e: 32 | print('\nEC2 instance {0} not considered for termination because of a processing error: {1}'.format(instance['InstanceId'], e)) 33 | 34 | print('\nFound {} instances (with tags) to keep:'.format(len(to_keep))) 35 | print(*[instance_id for instance_id in to_keep], sep = "\n") 36 | 37 | print('\nFound {} instances without tags to stop:'.format(len(to_stop))) 38 | print(*[instance_id for instance_id in to_stop], sep = "\n") 39 | 40 | relay.outputs.set('instanceIDs', to_stop) 41 | -------------------------------------------------------------------------------- /sts-stop-untagged-instances/sts-stop-untagged-instances.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/sts-stop-untagged-instances/sts-stop-untagged-instances.png -------------------------------------------------------------------------------- /sts-stop-untagged-instances/sts-stop-untagged-instances.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Stop untagged EC2 instances 3 | description: This workflow uses AWS Security Token Service (STS) to assume a privileged IAM role which it uses to then stop untagged EC2 instances. 4 | homepage: https://github.com/puppetlabs/relay-workflows/blob/master/sts-stop-untagged-instances 5 | tags: 6 | - compliance 7 | 8 | # Uncomment out this trigger to run this workflow hourly. 9 | # triggers: 10 | # - name: schedule 11 | # source: 12 | # type: schedule 13 | # schedule: '0 * * * *' 14 | # binding: 15 | # parameters: 16 | # region: us-east-1 17 | # dryRun: true 18 | # roleARN: "" # Insert your role ARN here 19 | 20 | parameters: 21 | region: 22 | description: The AWS region to run in 23 | default: us-east-1 24 | dryRun: 25 | description: True if you dont want to actually delete the resources. Use this to test the workflow and ensure it is behaving as expected. 26 | default: 'true' 27 | roleARN: 28 | description: ARN of the IAM role to assume 29 | default: "" 30 | 31 | steps: 32 | - name: assume-role 33 | image: relaysh/sts-assume-role 34 | spec: 35 | aws: 36 | connection: !Connection { type: aws, name: my-aws-account } 37 | region: !Parameter region 38 | roleARN: !Parameter roleARN 39 | roleSessionName: my-session 40 | - name: describe-instances 41 | image: relaysh/aws-ec2-step-instances-describe 42 | spec: 43 | aws: &assumerole 44 | connection: !Output {from: assume-role, name: connection } 45 | region: !Parameter region 46 | - name: filter-instances 47 | image: relaysh/core:latest-python 48 | spec: 49 | instances: !Output {from: describe-instances, name: instances} 50 | inputFile: https://raw.githubusercontent.com/puppetlabs/relay-workflows/master/sts-stop-untagged-instances/filter-instances.py 51 | - name: approval 52 | description: Wait for approval to stop instances 53 | type: approval 54 | dependsOn: filter-instances 55 | when: 56 | - !Fn.equals [!Parameter dryRun, 'false'] 57 | - name: stop-instances 58 | dependsOn: approval 59 | image: relaysh/aws-ec2-step-instances-stop 60 | when: 61 | - !Fn.equals [!Parameter dryRun, 'false'] 62 | spec: 63 | aws: *assumerole 64 | instanceIDs: !Output {from: filter-instances, name: instanceIDs} 65 | -------------------------------------------------------------------------------- /terraform-continuous-deployment/README.md: -------------------------------------------------------------------------------- 1 | This workflow runs a Terraform plan when a GitHub PR is merged into a configured 2 | branch. 3 | 4 | ## Prerequisites 5 | 6 | Before you run this workflow, you will need the following: 7 | - An AWS or GCP account to store the Terraform state file. 8 | - A Git repository that has a Terraform plan in it. 9 | - SSH key for above Git repository 10 | - A Git repository on GitHub to configure the webhook trigger in. 11 | You can use the Terraform plan repo to have it run when you merge a PR into 12 | master for example. 13 | 14 | ## Run the workflow 15 | 16 | 1. Add the workflow and set the terraform vars secret if you need any: 17 | - Click **Setup** 18 | - On the right sidebar, you will have a list of unconfigured secrets 19 | - Click on the (✎) next to **terraformVarsJSON** 20 | - Add terraform vars as a JSON object. If you don't need any, just use an 21 | empty object instead: `{}`. 22 | 2. Under the same setup bar on the right side, copy the webhook URL for 23 | **github-pr-merge** under the **Webhook trigger** section. 24 | 3. To add it to your GitHub repository. Navigate to your repository on https://github.com. 25 | - Navigate to the repository settings page by clicking **Settings** on the repository bar. 26 | - Click **Webhooks**. 27 | - Click **Add webhook** 28 | - Paste the webhook URL in the **Payload URL** box 29 | - Change **Content type** to `application/json` 30 | - Click **Let me select individual events.** 31 | - Check the **Pull requests** box 32 | - Uncheck the **Pushes** box 33 | - Click **Add webhook** at the bottom of the page 34 | 35 | Relay will now receive webhook events for Pull Request merges and use them to 36 | run your workflow. 37 | 38 | **Note:** The default state storage provider is AWS S3. If you would rather use a GCP 39 | storage bucket, then change how the credentials spec in the workflow: 40 | 41 | ``` 42 | ... 43 | spec: 44 | credentials: 45 | credentials.json: !Connection { type: gcp, name: my-gcp-credentials } 46 | ... 47 | ``` 48 | -------------------------------------------------------------------------------- /terraform-continuous-deployment/terraform-continuous-deployment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/terraform-continuous-deployment/terraform-continuous-deployment.png -------------------------------------------------------------------------------- /terraform-continuous-deployment/terraform-continuous-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Run Terraform when Pull Request merged in GitHub 3 | description: This workflow runs a Terraform command when a GitHub PR is merged into a configured branch. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/terraform-continuous-deployment 5 | tags: 6 | - continuous delivery 7 | 8 | triggers: 9 | - name: github-pr-merge 10 | source: 11 | type: webhook 12 | image: relaysh/github-trigger-pull-request-merged:latest 13 | binding: 14 | parameters: 15 | repository: !Data repositorySSHURL 16 | branch: !Data branch 17 | 18 | parameters: 19 | workspace: 20 | description: The Terraform workspace to use 21 | default: default 22 | moduleDir: 23 | description: The directory of the Terraform module 24 | default: . 25 | repository: 26 | description: The repository that holds the Terraform module 27 | branch: 28 | description: The repository branch to use when cloning the repository 29 | default: master 30 | 31 | steps: 32 | # uncomment the following if you would like to approve the set 33 | # step manually instead of letting it runs automatically. 34 | # 35 | # - name: approval 36 | # description: Wait for approval to run terraform command 37 | # type: approval 38 | - name: terraform-run 39 | image: relaysh/terraform-step-apply 40 | # dependsOn: [approval] 41 | spec: 42 | vars: !Fn.jsonUnmarshal [!Secret terraformVarsJSON] 43 | workspace: !Parameter workspace 44 | directory: !Parameter moduleDir 45 | aws: !Connection { type: aws, name: terraform-state-provider } 46 | git: !Fn.merge 47 | connection: !Connection { type: ssh, name: terraform-repository-key } 48 | name: terraform-plan 49 | repository: !Parameter repository 50 | branch: !Parameter branch 51 | -------------------------------------------------------------------------------- /update-workflow-on-merge/README.md: -------------------------------------------------------------------------------- 1 | This is a meta-workflow that can be run to update the current 2 | version of a workflow stored in Relay from a GitHub repo. It's 3 | meant to be run automatically via GitHub webhook upon commit, 4 | so that merging a workflow PR to the `main` branch of a git repo 5 | will sync it to Relay. 6 | 7 | ## Prerequisites 8 | 9 | Before you run this workflow, you will need a public Github repository 10 | where you intend to store the canonical version of your workflows. 11 | 12 | Add this workflow to your Relay account, either from the workflow repository 13 | (double-meta!) or directly. It will prompt you to add login credentials as 14 | secrets; these can either be for your main account or a secondary account 15 | that has Administrator access to your workflows. 16 | 17 | ## Set up the trigger 18 | 19 | When you create this workflow for the first time, we'll automatically provision 20 | a webhook for you. You need to provide this webhook to GitHub to complete the 21 | integration. 22 | 23 | In the workflow overview page in Relay, find the webhook URL by navigating to 24 | the **Setup** sidebar. Copy the URL to your clipboard. 25 | 26 | In the GitHub repo, go to **Setup**, then **Webhooks**. Paste the webhook 27 | URL, leave the **Secret** field blank. Choose **Let me select individual events** 28 | and select only "Pull Requests". 29 | 30 | ## Use the workflow 31 | 32 | Now, whenever there's a PR event in your workflow repository, GitHub will 33 | call the webhook with details of the PR. The workflow extracts information 34 | about the PR from the webhook payload, clones the repository to find the 35 | changed workflow file and updates the version which is stored on Relay. 36 | 37 | It makes some assumptions in the name of brevity, specifically: 38 | - the commit contains no more than one yaml file, 39 | which is the workflow to be updated 40 | - the workflow name on the service matches the filename, 41 | minus the .yaml extension 42 | - you've set up secrets with your login creds to relay itself. 43 | -------------------------------------------------------------------------------- /update-workflow-on-merge/update-workflow-on-merge.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/puppetlabs/relay-workflows/5d01fbac1fe4df0fc9a794403680d8561d55f8ea/update-workflow-on-merge/update-workflow-on-merge.png -------------------------------------------------------------------------------- /update-workflow-on-merge/update-workflow-on-merge.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | summary: Update workflow on merge 3 | description: This workflow can be run from a GitHub commit trigger configured on a workflow repo to update workflows in the service whenever a PR is merged. 4 | homepage: https://github.com/puppetlabs/relay-workflows/tree/master/update-workflow-on-merge 5 | tags: 6 | - continuous delivery 7 | 8 | parameters: 9 | pr_target_branch: 10 | description: "branch target for the merged PR (default: main)" 11 | default: main 12 | pr_url: 13 | description: "the URL where relay should retrieve the updated workflow" 14 | repo_url: 15 | description: "http clone url for the repository" 16 | repo_path: 17 | description: "the username/reponame path to the repo" 18 | 19 | triggers: 20 | - name: github-commit-hook 21 | source: 22 | type: webhook 23 | image: relaysh/github-trigger-pull-request-merged 24 | binding: 25 | parameters: 26 | pr_target_branch: !Data branch 27 | pr_url: !Data url 28 | repo_url: !Data repositoryURL 29 | repo_path: !Data repository 30 | 31 | steps: 32 | - name: update-workflow 33 | image: relaysh/core 34 | when: 35 | - !Fn.equals [!Parameter pr_target_branch, "main"] 36 | spec: 37 | pr_url: !Parameter pr_url 38 | repo_url: !Parameter repo_url 39 | repo_path: !Parameter repo_path 40 | relay: 41 | username: !Secret relayuser 42 | password: !Secret relaypass 43 | input: 44 | - | 45 | clonedir=$(basename `ni get -p {.repo_path}`) 46 | pr_url=$(ni get -p {.pr_url}) 47 | git clone $(ni get -p {.repo_url}) $clonedir 48 | cd $clonedir 49 | sha=$(curl -s $pr_url | jq -r '.head.sha') 50 | filename=$(git diff-tree -r --name-only --no-commit-id $sha | grep yaml | head -1) 51 | if [[ -n $filename ]]; then 52 | workflow=$(basename $filename .yaml) 53 | echo $(ni get -p {.relay.password}) | relay auth login $(ni get -p {.relay.username}) -p 54 | relay workflow replace $workflow -f $filename 55 | fi 56 | --------------------------------------------------------------------------------