├── .github └── workflows │ ├── scaleAWS.yml │ └── scaleAzure.yml ├── .gitignore ├── LICENSE ├── README.md ├── aws ├── f5module │ ├── README.md │ ├── autotools.tf │ ├── f5_onboard.tmpl │ ├── main.tf │ ├── onboard_do_1nic.tpl │ ├── onboard_do_2nic.tpl │ ├── onboard_do_3nic.tpl │ ├── outputs.tf │ └── variables.tf ├── terraform │ ├── main.tf │ ├── outputs.tf │ ├── terraform.tfvars │ ├── variables.tf │ └── workload.tf ├── workflow_state │ ├── main.tf │ └── variables.tf └── workflow_terraform │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── workload.tf ├── azure ├── f5module │ ├── autotools.tf │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── terraform │ ├── main.tf │ ├── outputs.tf │ ├── terraform.tfvars │ ├── variables.tf │ └── workload.tf ├── workflow_state │ ├── main.tf │ └── variables.tf └── workflow_terraform │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── configs ├── alertforwarder.js └── app_sec_policy.xml ├── images ├── alert_1.png ├── alert_2.png ├── alert_3.png ├── alert_final.png ├── alert_maxconns.png ├── alert_minconns.png ├── alert_mincpu.png ├── alerts.png ├── arch.png ├── create_watch.png ├── elk_discover.png ├── elk_explore.png ├── elk_login.png ├── index_1.png ├── index_2.png ├── index_3.png ├── monitor_1.png ├── monitor_2.png ├── output.png ├── splunk.png ├── splunk1.png └── splunk3.png ├── scripts ├── consul.sh ├── deploy.sh └── kill.sh ├── templates ├── alertfwd.tpl ├── as3.json ├── backend.tpl ├── backendapp.tpl ├── backendapp_aws.tpl ├── consul.tpl ├── consul_server.tpl ├── do.json ├── onboard.tpl ├── onboard_do_1nic.tpl ├── tfstate.tpl ├── ts_1.json ├── ts_2.json └── ts_3.json └── ts_consumers ├── azurelaw └── law.tf ├── elastic ├── README.md ├── elk.sh ├── elk.tf └── logstash.conf └── splunk ├── README.md ├── splunk.sh └── splunk.tf /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .nyc_output 3 | .vscode/ 4 | bundle.js* 5 | api-bundle.js* 6 | coverage 7 | dist 8 | docs/_build/ 9 | docs/_static/apidocs.html 10 | node_modules 11 | rpmbuild 12 | test-output.txt 13 | terraform.tfvars 14 | iappslx/presentation/openapi.json 15 | venv/ 16 | docs/userguide/proposed-doc-rev.rst 17 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ADC Telemetry-based Autoscaling 2 | =============================== 3 | This solution, (see below) illustrates how F5's Automation Toolchain can integrate with third party analytics provider(s) to provide cloud-agnostic centralized application delivery monitoring and autoscaling. 4 | 5 | Flowers 6 | 7 | The solution utilizes various third-party technologies/services along with F5’s automation toolchain including: 8 | 9 | - **Hashicorp Terraform** and **Consul** for infrastructure provisioning, service discovery and event logging 10 | - **F5 BIG-IP(s)** providing L4/L7 ADC Services 11 | - **F5 Declarative Onboarding**, (DO) and **Application Services 3 Extension**, (AS3) to deploy to configure BIG-IP application services 12 | - **F5 Telemetry Streaming**, (TS) to stream telemetry data to a third party analytics provider 13 | - **GitHub Actions** for workflow automation 14 | - **Azure or AWS** public clouds for application hosting 15 | - **Third-party Analytics Provider**, (integrated with BIG-IP(s) via TS) for monitoring and alerting, (environment includes and ELK stack trial for testing/demo purposes) 16 | 17 | 18 | ### GitHub Secrets 19 | Create the following [GitHub secrets](https://docs.github.com/en/actions/reference/encrypted-secrets). The secrets will be utilized by the actions workflow to securely update the deployment. Depending upon which cloud you deploy into, you will need to provide either an [AWS access key and corresponding secret](https://aws.amazon.com/premiumsupport/knowledge-center/create-access-key/) or [Azure service prinicipal credentials](https://github.com/marketplace/actions/azure-login) as well as a [GitHub acces token](https://docs.github.com/en/github/authenticating-to-github/keeping-your-account-and-data-secure/creating-a-personal-access-token) for your repository. 20 | 21 | 22 | #### Required for all deployments 23 | - GH_TOKEN - *ex: ghp_mkqCzxBci0Sl3.......rY 24 | 25 | #### Required for AWS deployments 26 | - AWS_ACCESS_KEY_ID - *ex: AKIATXXXXXXXXXXXXXXX 27 | - AWS_SECRET_ACCESS_KEY - *ex: kkLeijGuHYXXXXXXXXXXXXXXXXXXXXXXXXX 28 | 29 | #### Required for Azure deployments 30 | - AZURE_CLIENT_ID - *ex: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX 31 | - AZURE_CLIENT_SECRET - *ex: XXXXXXXXXXXXXXXXXXXXXXXXXXX 32 | - AZURE_SUBSCRIPTION_ID - *ex: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX 33 | - AZURE_TENANT_ID - *ex: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX 34 | - AZURE_CREDS - Comination of the above in JSON format - *ex: {"clientId": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX", "clientSecret": "XXXXXXXXXXXXXXXXXXXXXXXX", "subscriptionId": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX", "tenantId": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"}* 35 | 36 | ### Terraform Variables 37 | 38 | The following variables, (*located in ./terraform/terraform.tfvars*) should be modified as necessary. 39 | 40 | - location *(**Azure Deployments only**)* = The Azure region where the application infrastructure will be deployed - *default: "eastus"* 41 | - region *(**AWS Deployments only**)* = The AWS region where the application infrastructure will be deployed - *ex: "us-west-1"* 42 | - github_owner = Github Account hosting the repository - *ex: "f5devcentral"* 43 | - repo_path = - *ex: "/repos/f5devcentral/adc-telemetry-based-autoscaling/dispatches"* 44 | - github_token = - *ex: "ghp_mkqCzxBci0Sl3.......rY" 45 | - bigip_count = - *default: 1* 46 | - workload_count = - *default: 2* 47 | - bigip_min = - *default: 1* 48 | - bigip_max = - *default: 5* 49 | - workload_min = - *default: 2* 50 | - workload_max = - *default: 5* 51 | - scale_interval = Interval, (in seconds) between scaling events. Alerts fired within interval setting will fail. - *default: 300* 52 | - ts_consumer = The analytics consumer connecting to (*1 = splunk 2 = elk 3 = azure log analytics - default: 1*) 53 | - splunkIP = Optional - IP address of Splunk Enterprise. TS declaration assumes HEC listening on default port of **8088** and using HTTPS 54 | - splunkHEC = Optional - Splunk HEC token 55 | - logStashIP = Optional - IP address of Logstash service. TS declaration assumes logstash listening on default port of **8080** and using HTTP 56 | - law_id = Optional - Azure log analytics workspace ID 57 | - law_primarykey = Optional - Azure log analytics workspace primary key 58 | - ec2_key_name *(**AWS Deployments only**)* = Name of an existing AWS [key pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) - *ex: "glckey"* 59 | 60 | In addition to the above variables, the solution derives and sets two key local values, (hostname & app_id). The app_id is randomly generated and unique to the deployment. The hostname is assigned to all BIG-IP instances in the cluster with a format of - "*bigip...com*" - example: *bigip.azure.cd5e.com* 61 | 62 | ### Deploying the Solution 63 | 1. Duplicate the repo - Since the solution relies on Github Actions for orchestration it will be necessary to first [duplcate the repo](https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/duplicating-a-repository) into a Github account under your control. Clone the newly created repo locally to perform the initial app infrastructure deployment. 64 | 65 | 1. Complete the *'terraform.tfvars'* file entries using the above noted examples. 66 | 67 | 1. Authenticate to Azure using [Azure CLI](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/guides/azure_cli) 68 | 69 | 1. Navigate to the scripts directory, (*adc-telemetry-based-autoscaling/azure/scripts/*) and execute the deployment script - *deploy.sh*. Specify the desired destination cloud using the *-c* flag. For example, to deploy to azure execute **sh deploy.sh -c azure** or for AWS execute **sh deploy.sh -c aws**. 70 | 71 | The deployment script executes the Terraform project as illustrated above and migrates the local Terraform state to remote state located on the newly created Hashicorp Consul server. The GitHub runners will reference the migrated state and repo-hosted terraform to perform infrastructure updates, (*scaling operations*). 72 | 73 | With the Terraform deployment completed, you should be presented with outputs similar to below. The outputs provide the endpoints to interact with environment. Before continuing on to the next steps, (configuring alerts) take a few minutes to familiarize yourself with the environment. 74 | 75 | Flowers 76 | 77 | 78 | ### The AlertForwarder service 79 | The AlertForwwarder (AF) is a simple NodeJS service that is deployed on the Consul virtual machine instance as part of the application infrastructure. The service's sole purpose is to receive alerts; (webhooks) from the analytics vendor, (currently Splunk, ELK, and/or Azure Log Analytics), normalize the webhook payload, and securely proxy the call to trigger the GitHub action workflow. 80 | 81 | The AF service exposes a single endpoint, (*https://:8000*) to receive incoming webhook calls. Refer to the deployment output for the AF endpoint address. You will configure your analytic provider(s) to send webhooks, (*triggered via alerts*) to this address. 82 | 83 | 84 | ### Configuring Alerts 85 | 86 | The AF service currently supports alerts received from the following TS consumers: **Splunk**, **Elastic Watcher/Kibana**, **Azure Log Analytics**, and **default**. The AlertForwarder service will accept any provider's alert with the below default webook format. At a minium, the message payload must inculde the BIG-IP hostname field, (*hostname*). 87 | 88 | Refer to the relevant TS consumer folder located in the *'ts_consumers'* directory. There you will find guidance for configuring sample alerts for the above noted vendors. 89 | 90 | ***Default webook POST body*** 91 | - Scale BIG-IP cluster 92 | ``` 93 | {"source": "default", "scaleAction":"scaleOutBigip", "message": "{payload}"} 94 | ``` 95 | ``` 96 | {"source": "default", "scaleAction":"scaleInBigip", "message": "{payload}" 97 | ``` 98 | 99 | - Scale workload cluster 100 | ``` 101 | {"source": "default", "scaleAction":"scaleOutWorkload", "message": "{payload}"} 102 | ``` 103 | ``` 104 | {"source": "default", "scaleAction":"scaleInWorkload", "message": "{payload}"} 105 | ``` -------------------------------------------------------------------------------- /aws/f5module/README.md: -------------------------------------------------------------------------------- 1 | ## Deploys BIG-IP in AWS Cloud 2 | 3 | This Terraform module deploys N-nic F5 BIG-IP in AWS cloud,and with module count feature we can also deploy multiple instances of BIG-IP. 4 | 5 | ## Prerequisites 6 | 7 | This module is supported from Terraform 0.13 version onwards. 8 | 9 | It is tested against following provider/terraform versions 10 | 11 | Terraform v0.14.0 12 | 13 | + provider registry.terraform.io/hashicorp/aws v3.8.0 14 | + provider registry.terraform.io/hashicorp/random v2.3.0 15 | + provider registry.terraform.io/hashicorp/template v2.1.2 16 | + provider registry.terraform.io/hashicorp/null v2.1.2 17 | 18 | ## Releases and Versioning 19 | 20 | This module is supported in the following bigip and terraform version 21 | 22 | | BIGIP version | Terraform 0.14 | 23 | |---------------|----------------| 24 | | BIG-IP 16.x | X | 25 | | BIG-IP 15.x | X | 26 | | BIG-IP 14.x | X | 27 | 28 | ## Password Management 29 | 30 | |:point_up: |By default bigip module will have random password setting to give dynamic password generation| 31 | |----|---| 32 | 33 | |:point_up: |Users Can explicitly provide password as input to Module using optional Variable "f5_password"| 34 | |----|---| 35 | 36 | |:point_up: | To use AWS secret manager password,we have to enable the variable "aws_secretmanager_auth" to true and supply the secret name to variable "aws_secretmanager_secret_id" and also IAM Profile to "aws_iam_instance_profile"| 37 | |-----|----| 38 | 39 | |:warning: |End Users are responsible of the IAM profile setup, please find useful links for [IAM Setup](https://aws.amazon.com/premiumsupport/knowledge-center/restrict-ec2-iam/)| 40 | |:-----------|:----| 41 | 42 | ## Example Usage 43 | 44 | We have provided some common deployment [examples](https://github.com/f5devcentral/terraform-aws-bigip-module/tree/master/examples) 45 | 46 | 47 | 48 | #### Note 49 | There should be one to one mapping between subnet_ids and securitygroup_ids (for example if we have 2 or more external subnet_ids,we have to give same number of external securitygroup_ids to module) 50 | 51 | Users can have dynamic or static private ip allocation.If primary/secondary private ip value is null, it will be dynamic or else static private ip allocation. 52 | 53 | ``` 54 | With Static private ip allocation we can assign primary and secondary private ips for external interfaces, whereas primary private ip for management 55 | and internal interfaces. 56 | ``` 57 | 58 | If it is static private ip allocation we can't use module count as same private ips will be tried to allocate for multiple 59 | bigip instances based on module count. 60 | 61 | With Dynamic private ip allocation,we have to pass null value to primary/secondary private ip declaration and module count will be supported. 62 | 63 | #### Note 64 | ``` 65 | Sometimes it is observed that the given static primary and secondary private ips may get exchanged. This is the limitation present in aws. 66 | ``` 67 | Below example snippets show how this module is called ( Dynamic private ip allocation ) 68 | 69 | ``` 70 | 71 | # 72 | #Example 1-NIC Deployment Module usage 73 | # 74 | module bigip { 75 | count = var.instance_count 76 | source = "../../" 77 | prefix = "bigip-aws-1nic" 78 | ec2_key_name = aws_key_pair.generated_key.key_name 79 | mgmt_subnet_ids = [{ "subnet_id" = "subnet_id_mgmt", "public_ip" = true, "private_ip_primary" = ""}] 80 | mgmt_securitygroup_ids = ["securitygroup_id_mgmt"] 81 | } 82 | 83 | # 84 | #Example 2-NIC Deployment Module usage 85 | # 86 | module bigip { 87 | count = var.instance_count 88 | source = "../../" 89 | prefix = "bigip-aws-2nic" 90 | ec2_key_name = aws_key_pair.generated_key.key_name 91 | mgmt_subnet_ids = [{ "subnet_id" = "subnet_id_mgmt", "public_ip" = true, "private_ip_primary" = ""}] 92 | mgmt_securitygroup_ids = ["securitygroup_id_mgmt"] 93 | external_subnet_ids = [{ "subnet_id" = "subnet_id_external", "public_ip" = true, "private_ip_primary" = "", "private_ip_secondary" = ""}] 94 | external_securitygroup_ids = ["securitygroup_id_external"] 95 | } 96 | 97 | # 98 | #Example 3-NIC Deployment Module usage 99 | # 100 | module bigip { 101 | count = var.instance_count 102 | source = "../../" 103 | prefix = "bigip-aws-3nic" 104 | ec2_key_name = aws_key_pair.generated_key.key_name 105 | mgmt_subnet_ids = [{ "subnet_id" = "subnet_id_mgmt", "public_ip" = true, "private_ip_primary" = ""}] 106 | mgmt_securitygroup_ids = ["securitygroup_id_mgmt"] 107 | external_subnet_ids = [{ "subnet_id" = "subnet_id_external", "public_ip" = true, "private_ip_primary" = "", "private_ip_secondary" = ""}] 108 | external_securitygroup_ids = ["securitygroup_id_external"] 109 | internal_subnet_ids = [{"subnet_id" = "subnet_id_internal", "public_ip"=false, "private_ip_primary" = ""}] 110 | internal_securitygroup_ids = ["securitygropu_id_internal"] 111 | } 112 | 113 | # 114 | #Example 4-NIC Deployment Module usage(with 2 external public interfaces,one management and internal interface.There should be one to one mapping between subnet_ids and securitygroupids) 115 | # 116 | 117 | module bigip { 118 | count = var.instance_count 119 | source = "../../" 120 | prefix = "bigip-aws-4nic" 121 | ec2_key_name = aws_key_pair.generated_key.key_name 122 | mgmt_subnet_ids = [{ "subnet_id" = "subnet_id_mgmt", "public_ip" = true }] 123 | mgmt_securitygroup_ids = ["securitygroup_id_mgmt"] 124 | external_subnet_ids = [{ "subnet_id" = "subnet_id_external", "public_ip" = true },{"subnet_id" = "subnet_id_external2", "public_ip" = true }] 125 | external_securitygroup_ids = ["securitygroup_id_external","securitygroup_id_external"] 126 | internal_subnet_ids = [{"subnet_id" = "subnet_id_internal", "public_ip"=false }] 127 | internal_securitygroup_ids = ["securitygropu_id_internal"] 128 | } 129 | 130 | Similarly we can have N-nic deployments based on user provided subnet_ids and securitygroup_ids. 131 | With module count, user can deploy multiple bigip instances in the aws cloud (with the default value of count being one ) 132 | 133 | 134 | ``` 135 | #### Below is the example snippet for private ip allocation 136 | 137 | ``` 138 | Example 3-NIC Deployment with static private ip allocation 139 | 140 | module bigip { 141 | source = "../../" 142 | count = var.instance_count 143 | prefix = format("%s-3nic", var.prefix) 144 | ec2_key_name = aws_key_pair.generated_key.key_name 145 | aws_secretmanager_secret_id = aws_secretsmanager_secret.bigip.id 146 | mgmt_subnet_ids = [{ "subnet_id" = aws_subnet.mgmt.id, "public_ip" = true, "private_ip_primary" = "10.0.1.4"}] 147 | mgmt_securitygroup_ids = [module.mgmt-network-security-group.this_security_group_id] 148 | external_securitygroup_ids = [module.external-network-security-group-public.this_security_group_id] 149 | internal_securitygroup_ids = [module.internal-network-security-group-public.this_security_group_id] 150 | external_subnet_ids = [{ "subnet_id" = aws_subnet.external-public.id, "public_ip" = true, "private_ip_primary" = "10.0.2.4", "private_ip_secondary" = "10.0.2.5"}] 151 | internal_subnet_ids = [{ "subnet_id" = aws_subnet.internal.id, "public_ip" = false, "private_ip_primary" = "10.0.3.4"}] 152 | } 153 | ``` 154 | 155 | ### BIG-IP Automation Toolchain InSpec Profile for testing readiness of Automation Tool Chain components 156 | 157 | After the module deployment, we can use inspec tool for verifying the Bigip connectivity along with ATC components 158 | 159 | This InSpec profile evaluates the following: 160 | 161 | * Basic connectivity to a BIG-IP management endpoint ('bigip-connectivity') 162 | * Availability of the Declarative Onboarding (DO) service ('bigip-declarative-onboarding') 163 | * Version reported by the Declarative Onboarding (DO) service ('bigip-declarative-onboarding-version') 164 | * Availability of the Application Services (AS3) service ('bigip-application-services') 165 | * Version reported by the Application Services (AS3) service ('bigip-application-services-version') 166 | * Availability of the Telemetry Streaming (TS) service ('bigip-telemetry-streaming') 167 | * Version reported by the Telemetry Streaming (TS) service ('bigip-telemetry-streaming-version') 168 | * Availability of the Cloud Failover Extension( CFE ) service ('bigip-cloud-failover-extension') 169 | * Version reported by the Cloud Failover Extension( CFE ) service('bigip-cloud-failover-extension-version') 170 | 171 | #### run inspec tests 172 | 173 | we can either run inspec exec command or execute runtests.sh in any one of example nic folder which will run below inspec command 174 | 175 | inspec exec inspec/bigip-ready --input bigip_address=$BIGIP_MGMT_IP bigip_port=$BIGIP_MGMT_PORT user=$BIGIP_USER password=$BIGIP_PASSWORD do_version=$DO_VERSION as3_version=$AS3_VERSION ts_version=$TS_VERSION fast_version=$FAST_VERSION cfe_version=$CFE_VERSION 176 | 177 | 178 | #### Required Input Variables 179 | 180 | These variables must be set in the module block when using this module. 181 | 182 | | Name | Description | Type | 183 | |------|-------------|------| 184 | | prefix | This value is inserted in the beginning of each aws object. Note: requires alpha-numeric without special character | `string` | 185 | | ec2_key_name | AWS EC2 Key name for SSH access | string | | 186 | | mgmt\_subnet\_ids | Map with Subnet-id and public_ip as keys for the management subnet | `List of Maps` | 187 | | mgmt\_securitygroup\_ids | securitygroup\_ids for the management interface | `List` | 188 | | instance\_count | Number of Bigip instances to spin up | `number` | 189 | 190 | #### Optional Input Variables 191 | 192 | These variables have default values and don't have to be set to use this module. You may set these variables to override their default values. 193 | 194 | | Name | Description | Type | Default | 195 | |------|-------------|------|---------| 196 | | f5\_username | The admin username of the F5 BIG-IP that will be deployed | `string` | bigipuser | 197 | | f5\_password | Password of the F5 BIG-IP that will be deployed | `string` | "" | 198 | | ec2_instance_type | AWS EC2 instance type | string | m5.large | 199 | | f5_ami_search_name | BIG-IP AMI name to search for | string | F5 BIGIP-* PAYG-Best 200Mbps* | 200 | | mgmt_eip | Enable an Elastic IP address on the management interface | bool | TRUE | 201 | | aws_secretmanager_auth | Whether to use key vault to pass authentication | bool | FALSE | 202 | | aws_secretmanager_secret_id | AWS Secret Manager Secret ID that stores the BIG-IP password | string | | 203 | | aws_iam_instance_profile | AWS IAM instance profile that can be associate for BIGIP with required permissions | string | | 204 | | DO_URL | URL to download the BIG-IP Declarative Onboarding module | `string` | latest | 205 | | AS3_URL | URL to download the BIG-IP Application Service Extension 3 (AS3) module | `string` | latest | 206 | | TS_URL | URL to download the BIG-IP Telemetry Streaming module | `string` | latest | 207 | | FAST_URL | URL to download the BIG-IP FAST module | `string` | latest | 208 | | CFE_URL | URL to download the BIG-IP Cloud Failover Extension module | `string` | latest | 209 | | INIT_URL | URL to download the BIG-IP runtime init module | `string` | latest | 210 | | libs\_dir | Directory on the BIG-IP to download the A&O Toolchain into | `string` | /config/cloud/aws/node_modules | 211 | | onboard\_log | Directory on the BIG-IP to store the cloud-init logs | `string` | /var/log/startup-script.log | 212 | | external\_subnet\_ids | he subnet id of the virtual network where the virtual machines will reside | `List of Maps` | [{ "subnet_id" = null, "public_ip" = null }] | 213 | | internal\_subnet\_ids | The subnet id of the virtual network where the virtual machines will reside | `List of Maps` | [{ "subnet_id" = null, "public_ip" = null }] | 214 | | external\_securitygroup\_ids | The Network Security Group ids for external network | `List` | [] | 215 | | internal\_securitygroup\_ids | The Network Security Group ids for internal network | `List` | [] | 216 | 217 | ~> **NOTE:** For each external interface there will be one primary,secondary private ip will be assigned. 218 | 219 | #### Output Variables 220 | | Name | Description | 221 | |------|-------------| 222 | | mgmtPublicIP | The actual ip address allocated for the resource | 223 | | mgmtPublicDNS | fqdn to connect to the first vm provisioned | 224 | | mgmtPort | Mgmt Port | 225 | | f5\_username | BIG-IP username | 226 | | bigip\_password | BIG-IP Password (if dynamic_password is choosen it will be random generated password or if aws_secretmanager_auth is choosen it will be aws_secretsmanager_secret_version secret string ) | 227 | | private\_addresses | List of BIG-IP private addresses | 228 | | public\_addresses | List of BIG-IP public addresses | 229 | 230 | ~> **NOTE:** A local json file will get generated which contains the DO declaration 231 | 232 | ## Support Information 233 | 234 | This repository is community-supported. Follow instructions below on how to raise issues. 235 | 236 | ### Filing Issues and Getting Help 237 | 238 | If you come across a bug or other issue, use [GitHub Issues](https://github.com/f5devcentral/terraform-aws-bigip-module/issues) to submit an issue for our team. You can also see the current known issues on that page, which are tagged with a purple Known Issue label. 239 | 240 | ## Copyright 241 | 242 | Copyright 2014-2019 F5 Networks Inc. 243 | 244 | ### F5 Networks Contributor License Agreement 245 | 246 | Before you start contributing to any project sponsored by F5 Networks, Inc. (F5) on GitHub, you will need to sign a Contributor License Agreement (CLA). 247 | 248 | If you are signing as an individual, we recommend that you talk to your employer (if applicable) before signing the CLA since some employment agreements may have restrictions on your contributions to other projects. Otherwise by submitting a CLA you represent that you are legally entitled to grant the licenses recited therein. 249 | 250 | If your employer has rights to intellectual property that you create, such as your contributions, you represent that you have received permission to make contributions on behalf of that employer, that your employer has waived such rights for your contributions, or that your employer has executed a separate CLA with F5. 251 | 252 | If you are signing on behalf of a company, you represent that you are legally entitled to grant the license recited therein. You represent further that each employee of the entity that submits contributions is authorized to submit such contributions on behalf of the entity pursuant to the CLA. 253 | -------------------------------------------------------------------------------- /aws/f5module/autotools.tf: -------------------------------------------------------------------------------- 1 | # Setup Onboarding scripts 2 | 3 | locals { 4 | params_map_1 = { 5 | 1 = var.splunkIP 6 | 2 = var.logStashIP 7 | 3 = var.law_id 8 | } 9 | params_map_2 = { 10 | 1 = var.splunkHEC 11 | 2 = "" 12 | 3 = var.law_primarykey 13 | } 14 | } 15 | 16 | data "template_file" "do_json" { 17 | template = file("../../templates/do.json") 18 | 19 | vars = { 20 | hostname = var.hostname 21 | local_selfip = "-external-self-address-" 22 | gateway = var.ext_gw 23 | dns_server = var.dns_server 24 | ntp_server = var.ntp_server 25 | timezone = var.timezone 26 | } 27 | } 28 | 29 | data "template_file" "as3_json" { 30 | template = file("../../templates/as3.json") 31 | vars = { 32 | web_pool = "myapp-${var.app}" 33 | app_name = var.app_name 34 | consul_ip = var.consul_ip 35 | } 36 | } 37 | 38 | data "template_file" "ts_json" { 39 | template = file("../../templates/ts_${var.ts_consumer}.json") 40 | vars = { 41 | param_1 = local.params_map_1[var.ts_consumer] 42 | param_2 = local.params_map_2[var.ts_consumer] 43 | region = var.azure_region 44 | } 45 | } 46 | 47 | -------------------------------------------------------------------------------- /aws/f5module/f5_onboard.tmpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Send output to log file and serial console 4 | mkdir -p /var/log/cloud /config/cloud /var/config/rest/downloads 5 | LOG_FILE=/var/log/cloud/startup-script.log 6 | [[ ! -f $LOG_FILE ]] && touch $LOG_FILE || { echo "Run Only Once. Exiting"; exit; } 7 | npipe=/tmp/$$.tmp 8 | trap "rm -f $npipe" EXIT 9 | mknod $npipe p 10 | tee <$npipe -a $LOG_FILE /dev/ttyS0 & 11 | exec 1>&- 12 | exec 1>$npipe 13 | exec 2>&1 14 | 15 | # # create user 16 | tmsh create auth user ${bigip_username} password ${bigip_password} shell tmsh partition-access replace-all-with { all-partitions { role admin } } 17 | 18 | # save config 19 | tmsh save sys config 20 | 21 | mkdir -p /config/cloud 22 | 23 | cat << 'EOF' > /config/cloud/runtime-init-conf.yaml 24 | --- 25 | runtime_parameters: 26 | - name: USER_NAME 27 | type: static 28 | value: ${bigip_username} 29 | EOF 30 | 31 | if ${aws_secretmanager_auth} 32 | then 33 | cat << 'EOF' >> /config/cloud/runtime-init-conf.yaml 34 | - name: ADMIN_PASS 35 | type: secret 36 | secretProvider: 37 | environment: aws 38 | type: SecretsManager 39 | version: AWSCURRENT 40 | secretId: ${bigip_password} 41 | EOF 42 | else 43 | cat << 'EOF' >> /config/cloud/runtime-init-conf.yaml 44 | - name: ADMIN_PASS 45 | type: static 46 | value: ${bigip_password} 47 | EOF 48 | fi 49 | 50 | # Add remaining runtime_parameters: 51 | cat << 'EOF' >> /config/cloud/runtime-init-conf.yaml 52 | pre_onboard_enabled: 53 | - name: provision_rest 54 | type: inline 55 | commands: 56 | - /usr/bin/setdb provision.extramb 500 57 | - /usr/bin/setdb restjavad.useextramb true 58 | extension_packages: 59 | install_operations: 60 | - extensionType: do 61 | extensionVersion: ${DO_VER} 62 | extensionUrl: ${DO_URL} 63 | - extensionType: as3 64 | extensionVersion: ${AS3_VER} 65 | extensionUrl: ${AS3_URL} 66 | - extensionType: ts 67 | extensionVersion: ${TS_VER} 68 | extensionUrl: ${TS_URL} 69 | - extensionType: cf 70 | extensionVersion: ${CFE_VER} 71 | extensionUrl: ${CFE_URL} 72 | extension_services: 73 | service_operations: 74 | - extensionType: do 75 | type: inline 76 | value: 77 | schemaVersion: 1.0.0 78 | class: Device 79 | async: true 80 | label: Onboard BIG-IP 81 | Common: 82 | class: Tenant 83 | hostname: ${hostname} 84 | dbVars: 85 | class: DbVariables 86 | ui.advisory.enabled: true 87 | ui.advisory.color: green 88 | ui.advisory.text: /Common/hostname 89 | config.allow.rfc3927: enable 90 | myDns: 91 | class: DNS 92 | nameServers: 93 | - 8.8.8.8 94 | - 4.2.2.4 95 | search: 96 | - f5.com 97 | myNtp: 98 | class: NTP 99 | servers: 100 | - 0.pool.ntp.org 101 | - 1.pool.ntp.org 102 | - 2.pool.ntp.org 103 | timezone: ${timezone} 104 | admin: 105 | class: User 106 | userType: regular 107 | password: ${bigip_password} 108 | shell: bash 109 | ${bigip_username}: 110 | class: User 111 | userType: regular 112 | password: ${bigip_password} 113 | shell: bash 114 | partitionAccess: 115 | all-partitions: 116 | role: admin 117 | myProvisioning: 118 | class: Provision 119 | ltm: nominal 120 | avr: nominal 121 | asm: nominal 122 | - extensionType: ts 123 | type: inline 124 | value: 125 | class: Telemetry 126 | My_System: 127 | class: Telemetry_System 128 | systemPoller: 129 | interval: 60 130 | My_Listener: 131 | class: Telemetry_Listener 132 | port: 6514 133 | My_Consumer: 134 | class: Telemetry_Consumer 135 | type: Splunk 136 | host: ${param_1} 137 | protocol: https 138 | port: 8088 139 | allowSelfSignedCert: true 140 | passphrase: 141 | cipherText: ${param_2} 142 | - extensionType: as3 143 | type: inline 144 | value: 145 | $schema: https://raw.githubusercontent.com/F5Networks/f5-appsvcs-extension/master/schema/latest/as3-schema.json 146 | class: AS3 147 | action: deploy 148 | persist: true 149 | declaration: 150 | class: ADC 151 | schemaVersion: 3.13.0 152 | id: 123abc 153 | label: Sample 1 154 | remark: An HTTPS sample application 155 | controls: 156 | trace: true 157 | DemoTenant: 158 | class: Tenant 159 | Shared: 160 | class: Application 161 | template: shared 162 | ${app_name}: 163 | class: Application 164 | template: https 165 | serviceMain: 166 | class: Service_HTTPS 167 | virtualPort: 443 168 | virtualAddresses: 169 | - 0.0.0.0 170 | pool: ${web_pool} 171 | policyWAF: 172 | use: My_ASM_Policy 173 | serverTLS: webtls 174 | profileTrafficLog: 175 | use: telemetry_traffic_log_profile 176 | profileAnalyticsTcp: 177 | use: telemetry_tcp_analytics_profile 178 | securityLogProfiles: 179 | - use: telemetry_asm_security_log_profile 180 | profileBotDefense: 181 | bigip: /Common/bot-defense 182 | profileDOS: 183 | bigip: /Common/dos 184 | My_ASM_Policy: 185 | class: WAF_Policy 186 | url: https://raw.githubusercontent.com/f5devcentral/adc-telemetry-based-autoscaling/main/configs/app_sec_policy.xml 187 | ignoreChanges: true 188 | ${web_pool}: 189 | class: Pool 190 | monitors: 191 | - tcp 192 | members: 193 | - servicePort: 80 194 | addressDiscovery: consul 195 | updateInterval: 10 196 | uri: http://${consul_ip}:8500/v1/catalog/service/nginx 197 | telemetry_local_rule: 198 | remark: Only required when TS is a local listener 199 | class: iRule 200 | iRule: |- 201 | when CLIENT_ACCEPTED { 202 | node 127.0.0.1 6514 203 | } 204 | telemetry_local: 205 | remark: Only required when TS is a local listener 206 | class: Service_TCP 207 | virtualAddresses: 208 | - 255.255.255.254 209 | virtualPort: 6514 210 | iRules: 211 | - telemetry_local_rule 212 | telemetry: 213 | class: Pool 214 | members: 215 | - enable: true 216 | serverAddresses: 217 | - 255.255.255.254 218 | servicePort: 6514 219 | monitors: 220 | - bigip: /Common/tcp 221 | telemetry_hsl: 222 | class: Log_Destination 223 | type: remote-high-speed-log 224 | protocol: tcp 225 | pool: 226 | use: telemetry 227 | telemetry_formatted: 228 | class: Log_Destination 229 | type: splunk 230 | forwardTo: 231 | use: telemetry_hsl 232 | telemetry_publisher: 233 | class: Log_Publisher 234 | destinations: 235 | - use: telemetry_formatted 236 | telemetry_asm_security_log_profile: 237 | class: Security_Log_Profile 238 | application: 239 | localStorage: false 240 | remoteStorage: splunk 241 | servers: 242 | - address: 255.255.255.254 243 | port: '6514' 244 | storageFilter: 245 | requestType: all 246 | telemetry_traffic_log_profile: 247 | class: Traffic_Log_Profile 248 | requestSettings: 249 | requestEnabled: true 250 | requestProtocol: mds-tcp 251 | requestPool: 252 | use: telemetry 253 | requestTemplate: event_source='request_logging',hostname='$BIGIP_HOSTNAME',client_ip='$CLIENT_IP',server_ip='$SERVER_IP',http_method='$HTTP_METHOD',http_uri='$HTTP_URI',virtual_name='$VIRTUAL_NAME',event_timestamp='$DATE_HTTP' 254 | telemetry_http_analytics_profile: 255 | class: Analytics_Profile 256 | collectGeo: true 257 | collectMaxTpsAndThroughput: true 258 | collectOsAndBrowser: true 259 | collectIp: true 260 | collectMethod: true 261 | collectPageLoadTime: true 262 | collectResponseCode: true 263 | collectSubnet: true 264 | collectUrl: true 265 | collectUserAgent: true 266 | collectUserSession: true 267 | publishIruleStatistics: true 268 | telemetry_tcp_analytics_profile: 269 | class: Analytics_TCP_Profile 270 | collectCity: true 271 | collectContinent: true 272 | collectCountry: true 273 | collectNexthop: true 274 | collectPostCode: true 275 | collectRegion: true 276 | collectRemoteHostIp: true 277 | collectRemoteHostSubnet: true 278 | collectedByServerSide: true 279 | webtls: 280 | class: TLS_Server 281 | certificates: 282 | - certificate: webcert 283 | webcert: 284 | class: Certificate 285 | remark: in practice we recommend using a passphrase 286 | certificate: |- 287 | -----BEGIN CERTIFICATE----- 288 | MIIFWjCCBEKgAwIBAgITfQAAAB0gY6x6LLG8KwAAAAAAHTANBgkqhkiG9w0BAQUF 289 | ADBOMRMwEQYKCZImiZPyLGQBGRYDY29tMRowGAYKCZImiZPyLGQBGRYKYXNlcnJh 290 | Y29ycDEbMBkGA1UEAxMSYXNlcnJhY29ycC1EQy1DQS0xMB4XDTIwMDIxNTIyMTIw 291 | MloXDTIyMDIxNDIyMTIwMlowHzEdMBsGA1UEAxMUbXlhcHAuYXNlcnJhY29ycC5j 292 | b20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDTGBNbVYLJiIDfL0FQ 293 | Mi/mFzcLeQhP11c4YdxjMBPJKSBWKnXuxywcOJHJ6A2rpGKhAApSsVc6j6sXfCty 294 | bszbNgvx7KdV8c2S02ILNqlJwTOXRkQhN0srlATYdF/i6T1Y1fpkjBiZMC7A8fRf 295 | QNwT8DgberiuN4YvfsNGbQej+a1dOVQAGaBn15xwXe8Xfw9iLRangb8n4yAz4qS2 296 | IJig2UYzSc3P6k1ulZ6I1Yo/xOj7zar3R/09DZ6ikGBvy3TrSfYpRX8pXFgFsJOl 297 | DeYwzAKlKY4MnapgwszIFMmxveK/d3K+l3Kn0791VdBklrrlycV7itGXrqNYdHLg 298 | C3dPAgMBAAGjggJeMIICWjA7BgkrBgEEAYI3FQcELjAsBiQrBgEEAYI3FQiBg9Bp 299 | g5vnB4b5lxighjeD8YE0eYL+ujyChWkCAWQCAQMwEwYDVR0lBAwwCgYIKwYBBQUH 300 | AwEwDgYDVR0PAQH/BAQDAgWgMBsGCSsGAQQBgjcVCgQOMAwwCgYIKwYBBQUHAwEw 301 | HQYDVR0OBBYEFMXq6/mUs8bg5TUoL3uXPUyyAFyXMB8GA1UdIwQYMBaAFBEzMhC4 302 | l6myjmO0WBY2s0tLj1fQMIHOBgNVHR8EgcYwgcMwgcCggb2ggbqGgbdsZGFwOi8v 303 | L0NOPWFzZXJyYWNvcnAtREMtQ0EtMSxDTj1kYyxDTj1DRFAsQ049UHVibGljJTIw 304 | S2V5JTIwU2VydmljZXMsQ049U2VydmljZXMsQ049Q29uZmlndXJhdGlvbixEQz1h 305 | c2VycmFjb3JwLERDPWNvbT9jZXJ0aWZpY2F0ZVJldm9jYXRpb25MaXN0P2Jhc2U/ 306 | b2JqZWN0Q2xhc3M9Y1JMRGlzdHJpYnV0aW9uUG9pbnQwgccGCCsGAQUFBwEBBIG6 307 | MIG3MIG0BggrBgEFBQcwAoaBp2xkYXA6Ly8vQ049YXNlcnJhY29ycC1EQy1DQS0x 308 | LENOPUFJQSxDTj1QdWJsaWMlMjBLZXklMjBTZXJ2aWNlcyxDTj1TZXJ2aWNlcyxD 309 | Tj1Db25maWd1cmF0aW9uLERDPWFzZXJyYWNvcnAsREM9Y29tP2NBQ2VydGlmaWNh 310 | dGU/YmFzZT9vYmplY3RDbGFzcz1jZXJ0aWZpY2F0aW9uQXV0aG9yaXR5MA0GCSqG 311 | SIb3DQEBBQUAA4IBAQC8xoaBDhn0BGqQ73ROjlvI+5yJs3UUws2D7KCtpzNt2Ksm 312 | gm52umZoIzofPRXg/RVkt+Ig7Y9+ixaEyAxMFtpDyap1bTNjWsw99LoUZvMo7B9q 313 | rgOS55h5OeLwc1PM3n43I9H2C3uQ1hYflD3ovzvzrywejCHlHlReovZkYCcrDCa+ 314 | ytw7Hob0P1vkXsPtpmk61A7PCLw6DghhczT1f4nAK147FuRl55jz38JFOtqKVlfU 315 | NH4EaSxciHO2evWDHUddzeAwxHLg77UKPH+MSPXd7jGZx3xqQEtpjMqq5WM09YsL 316 | 1mwOJpk1Xarkb0WB0J10YXqKs6tSxyrfX/FL5MZA 317 | -----END CERTIFICATE----- 318 | privateKey: |- 319 | -----BEGIN PRIVATE KEY----- 320 | MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDTGBNbVYLJiIDf 321 | L0FQMi/mFzcLeQhP11c4YdxjMBPJKSBWKnXuxywcOJHJ6A2rpGKhAApSsVc6j6sX 322 | fCtybszbNgvx7KdV8c2S02ILNqlJwTOXRkQhN0srlATYdF/i6T1Y1fpkjBiZMC7A 323 | 8fRfQNwT8DgberiuN4YvfsNGbQej+a1dOVQAGaBn15xwXe8Xfw9iLRangb8n4yAz 324 | 4qS2IJig2UYzSc3P6k1ulZ6I1Yo/xOj7zar3R/09DZ6ikGBvy3TrSfYpRX8pXFgF 325 | sJOlDeYwzAKlKY4MnapgwszIFMmxveK/d3K+l3Kn0791VdBklrrlycV7itGXrqNY 326 | dHLgC3dPAgMBAAECggEADudNPo6L/FSR3LAGaXYRoaoT7dBuwMxQUR+/opUbYIFx 327 | 0gOPbftL5vYrfNjQVkT13a4WDH6OzQilqLPQkXS3K+bl8v+lVNEMlYgtDOOgEh/8 328 | 13pThxDTUtFRgkK9HlUfSq1Yz06A0hfvxRmQCkWXBCVaoL4KWep7o9DMUqWR+4ad 329 | XlvzvG2W3fvNE3+ewwf0tR/OYTQOZvkRfm0Ws1s0W85wr6Ec87psbLPPO2yecFcq 330 | 3fJjcZmbaWWG5Thh9479W3rhC3I6rJN+YLgyXoumml5wmmjf8CxocUL3uPt+32u5 331 | E4OZTLdAIF0+KxH3hYbw3D6DB/LnAZVB+jxmOC4j2QKBgQDm5JVzld5KYUlIt566 332 | syQ95JMyw0Oqp1U7WMwI8+RMYnO4NPo6Dzej9LMsVAQFmg5DncElSf3PLC9PfsVe 333 | CK6FiXPScy/9cAqchJVI3f7CgJiYvrFwoiieVJFYSgh52aWxL/KHnEe4UWk50qmS 334 | /hCyPdSCHJVw1oh5dIO/QGG+YwKBgQDqDFi8mNrUI/QD+m/2HNT+6PaXUWEfyY94 335 | /swvn9O+qQYWbU8MCxeucthTJ5p5lYY5FdUeKPGZn0jahgoEV63XnuemNe43tOJA 336 | Dpo1UyWmQoodAOOm9QiEEjOAxx+hEcSfJrEUgGSYVR+GHbap+xuB0BrtCN9qWsdb 337 | U2d25b4xJQKBgQCV4SAardhkVA6sQ3WoIA2Ql8Xtl89fAcxT/+pCjX9PDkGr+8fK 338 | 1IH7ziZYyhjqQfysa8UrHYLCbx4u7k3UIrKXQIiMvfUTAR4CSBZX/LMZMzzbOj4Y 339 | rUrMrHzE4Rnrbxsdj9BRs2LjBQXXYSZuornX2kcORtvDKZ/hp362MWbBnQKBgQCo 340 | SZZojXQTQ4LKdYGZsmOIotPkO9SdOZ3a/0KsH7zuA7Tn3VMQMs2lVtiq+ff94oCv 341 | fT5PQFtv/XMyBV0ggDb0qkKgZXjTP1HLg3RoUU/p+0A52JDYVKn55Oh5eTQJ6a+6 342 | S+TZ+/PZAKP5GFZmZLMDpTInK9ERNRLRXOgxOsKFrQKBgQDH6PfQTuvubwL+CYbb 343 | CI1AtWOGEGcuLIbtlbh5e4/1FxtdG2pgV2wBJxIwNhn8U7yMHj9B/MB39OAt6Vlc 344 | ZU0Dah41RMi4dPGAi/iuTQklfRLjROSVmhb/lS9xDRxzHcm0u0YBuU0Q+MC3aw7O 345 | jXWs11QDs5AR93mLB0AZdRjGLA== 346 | -----END PRIVATE KEY----- 347 | post_onboard_enabled: [] 348 | EOF 349 | 350 | # # Download 351 | #PACKAGE_URL='https://cdn.f5.com/product/cloudsolutions/f5-bigip-runtime-init/v1.1.0/dist/f5-bigip-runtime-init-1.1.0-1.gz.run' 352 | #PACKAGE_URL='https://cdn.f5.com/product/cloudsolutions/f5-bigip-runtime-init/v1.2.0/dist/f5-bigip-runtime-init-1.2.0-1.gz.run' 353 | for i in {1..30}; do 354 | curl -fv --retry 1 --connect-timeout 5 -L ${INIT_URL} -o "/var/config/rest/downloads/f5-bigip-runtime-init.gz.run" && break || sleep 10 355 | done 356 | # Install 357 | bash /var/config/rest/downloads/f5-bigip-runtime-init.gz.run -- '--cloud aws' 358 | # Run 359 | # export F5_BIGIP_RUNTIME_INIT_LOG_LEVEL=silly 360 | f5-bigip-runtime-init --config-file /config/cloud/runtime-init-conf.yaml 361 | tmsh modify analytics global-settings { external-logging-publisher /DemoTenant/${app_name}/telemetry_publisher offbox-protocol hsl use-offbox enabled } 362 | -------------------------------------------------------------------------------- /aws/f5module/onboard_do_1nic.tpl: -------------------------------------------------------------------------------- 1 | { 2 | "schemaVersion": "1.0.0", 3 | "class": "Device", 4 | "async": true, 5 | "label": "Onboard BIG-IP", 6 | "Common": { 7 | "class": "Tenant", 8 | "mySystem": { 9 | "class": "System", 10 | "hostname": "${hostname}" 11 | }, 12 | "myDns": { 13 | "class": "DNS", 14 | "nameServers": [ 15 | ${name_servers} 16 | ], 17 | "search": [ 18 | "f5.com" 19 | ] 20 | }, 21 | "myNtp": { 22 | "class": "NTP", 23 | "servers": [ 24 | ${ntp_servers} 25 | ], 26 | "timezone": "UTC" 27 | } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /aws/f5module/onboard_do_2nic.tpl: -------------------------------------------------------------------------------- 1 | { 2 | "schemaVersion": "1.0.0", 3 | "class": "Device", 4 | "async": true, 5 | "label": "Onboard BIG-IP", 6 | "Common": { 7 | "class": "Tenant", 8 | "mySystem": { 9 | "class": "System", 10 | "hostname": "${hostname}" 11 | }, 12 | "myDns": { 13 | "class": "DNS", 14 | "nameServers": [ 15 | ${name_servers} 16 | ], 17 | "search": [ 18 | "f5.com" 19 | ] 20 | }, 21 | "myNtp": { 22 | "class": "NTP", 23 | "servers": [ 24 | ${ntp_servers} 25 | ], 26 | "timezone": "UTC" 27 | }, 28 | "${vlan-name}": { 29 | "class": "VLAN", 30 | "tag": 4093, 31 | "mtu": 1500, 32 | "interfaces": [ 33 | { 34 | "name": "1.1", 35 | "tagged": false 36 | } 37 | ], 38 | "cmpHash": "dst-ip" 39 | }, 40 | "${vlan-name}-self": { 41 | "class": "SelfIp", 42 | "address": "${self-ip}/24", 43 | "vlan": "${vlan-name}", 44 | "allowService": "default", 45 | "trafficGroup": "traffic-group-local-only" 46 | }, 47 | "default": { 48 | "class": "Route", 49 | "gw": "${gateway}", 50 | "network": "default", 51 | "mtu": 1500 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /aws/f5module/onboard_do_3nic.tpl: -------------------------------------------------------------------------------- 1 | { 2 | "schemaVersion": "1.0.0", 3 | "class": "Device", 4 | "async": true, 5 | "label": "Onboard BIG-IP", 6 | "Common": { 7 | "class": "Tenant", 8 | "mySystem": { 9 | "class": "System", 10 | "hostname": "${hostname}" 11 | }, 12 | "myDns": { 13 | "class": "DNS", 14 | "nameServers": [ 15 | ${name_servers} 16 | ], 17 | "search": [ 18 | "f5.com" 19 | ] 20 | }, 21 | "myNtp": { 22 | "class": "NTP", 23 | "servers": [ 24 | ${ntp_servers} 25 | ], 26 | "timezone": "UTC" 27 | }, 28 | "${vlan-name1}": { 29 | "class": "VLAN", 30 | "tag": 4093, 31 | "mtu": 1500, 32 | "interfaces": [ 33 | { 34 | "name": "1.1", 35 | "tagged": false 36 | } 37 | ], 38 | "cmpHash": "dst-ip" 39 | }, 40 | "${vlan-name1}-self": { 41 | "class": "SelfIp", 42 | "address": "${self-ip1}/24", 43 | "vlan": "${vlan-name1}", 44 | "allowService": "none", 45 | "trafficGroup": "traffic-group-local-only" 46 | }, 47 | "default": { 48 | "class": "Route", 49 | "gw": "${gateway}", 50 | "network": "default", 51 | "mtu": 1500 52 | }, 53 | "${vlan-name2}": { 54 | "class": "VLAN", 55 | "tag": 4094, 56 | "mtu": 1500, 57 | "interfaces": [ 58 | { 59 | "name": "1.2", 60 | "tagged": false 61 | } 62 | ], 63 | "cmpHash": "dst-ip" 64 | }, 65 | "${vlan-name2}-self": { 66 | "class": "SelfIp", 67 | "address": "${self-ip2}/24", 68 | "vlan": "${vlan-name2}", 69 | "allowService": "default", 70 | "trafficGroup": "traffic-group-local-only" 71 | } 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /aws/f5module/outputs.tf: -------------------------------------------------------------------------------- 1 | # BIG-IP Management Public IP Addresses 2 | output mgmtPublicIP { 3 | description = "List of BIG-IP public IP addresses for the management interfaces" 4 | value = aws_eip.mgmt[*].public_ip 5 | } 6 | 7 | # BIG-IP Management Public DNS 8 | output mgmtPublicDNS { 9 | description = "List of BIG-IP public DNS records for the management interfaces" 10 | value = aws_eip.mgmt[*].public_dns 11 | } 12 | 13 | # BIG-IP Management Port 14 | output mgmtPort { 15 | description = "HTTPS Port used for the BIG-IP management interface" 16 | value = local.total_nics > 1 ? "443" : "8443" 17 | } 18 | 19 | output f5_username { 20 | value = var.f5_username 21 | } 22 | 23 | output bigip_password { 24 | description = <<-EOT 25 | "Password for bigip user ( if dynamic_password is choosen it will be random generated password or if azure_keyvault is choosen it will be key vault secret name ) 26 | EOT 27 | value = (var.f5_password == "") ? (var.aws_secretmanager_auth ? var.aws_secretmanager_secret_id : random_string.dynamic_password.result) : var.f5_password 28 | } 29 | 30 | output private_addresses { 31 | description = "List of BIG-IP private addresses" 32 | value = concat(aws_network_interface.mgmt.*.private_ips, aws_network_interface.mgmt1.*.private_ips, aws_network_interface.public.*.private_ips, aws_network_interface.external_private.*.private_ips, aws_network_interface.private.*.private_ips, aws_network_interface.public1.*.private_ips, aws_network_interface.external_private1.*.private_ips, aws_network_interface.private1.*.private_ips) 33 | } 34 | 35 | output private_addresses_new { 36 | description = "List of BIG-IP private addresses" 37 | value = { 38 | mgmt = { 39 | private_ip = length(compact(local.mgmt_public_private_ip_primary)) > 0 ? aws_network_interface.mgmt.*.private_ip :aws_network_interface.mgmt1.*.private_ip 40 | private_ips = length(compact(local.mgmt_public_private_ip_primary)) > 0 ? aws_network_interface.mgmt.*.private_ips :aws_network_interface.mgmt1.*.private_ips 41 | } 42 | public = { 43 | private_ip = length(compact(local.external_public_private_ip_primary)) > 0 ? aws_network_interface.public.*.private_ip : aws_network_interface.public1.*.private_ip 44 | private_ips = length(compact(local.external_public_private_ip_primary)) > 0 ? aws_network_interface.public.*.private_ips : aws_network_interface.public1.*.private_ips 45 | } 46 | external_private = { 47 | private_ip = length(compact(local.external_private_ip_primary)) > 0 ? aws_network_interface.external_private.*.private_ip : aws_network_interface.external_private1.*.private_ip 48 | private_ips = length(compact(local.external_private_ip_primary)) > 0 ? aws_network_interface.external_private.*.private_ips : aws_network_interface.external_private1.*.private_ips 49 | } 50 | private = { 51 | private_ip = length(compact(local.internal_private_ip_primary)) > 0 ? aws_network_interface.private.*.private_ip : aws_network_interface.private1.*.private_ip 52 | private_ips = length(compact(local.internal_private_ip_primary)) > 0 ? aws_network_interface.private.*.private_ips : aws_network_interface.private1.*.private_ips 53 | } 54 | } 55 | } 56 | 57 | output onboard_do { 58 | value = local.total_nics > 1 ? (local.total_nics == 2 ? data.template_file.clustermemberDO2[0].rendered : data.template_file.clustermemberDO3[0].rendered) : data.template_file.clustermemberDO1[0].rendered 59 | depends_on = [data.template_file.clustermemberDO1[0], data.template_file.clustermemberDO2[0], data.template_file.clustermemberDO3[0]] 60 | } 61 | -------------------------------------------------------------------------------- /aws/f5module/variables.tf: -------------------------------------------------------------------------------- 1 | variable app_name { 2 | type = string 3 | default = "" 4 | } 5 | 6 | variable app_id { 7 | type = string 8 | default = "" 9 | } 10 | 11 | variable hostname { 12 | type = string 13 | default = "" 14 | } 15 | 16 | variable tg_arn { 17 | type = string 18 | default = "" 19 | } 20 | variable "ext_gw" { default = "10.0.0.1"} 21 | 22 | variable azure_region { 23 | type = string 24 | default = "" 25 | } 26 | 27 | variable "dns_server" { default = "8.8.8.8" } 28 | variable "ntp_server" { default = "0.us.pool.ntp.org" } 29 | variable "timezone" { default = "UTC" } 30 | 31 | variable "app" {default = "app1" } 32 | 33 | variable splunkIP { 34 | type = string 35 | } 36 | 37 | variable splunkHEC { 38 | type = string 39 | } 40 | 41 | variable logStashIP { 42 | type = string 43 | } 44 | 45 | variable law_id { 46 | type = string 47 | } 48 | 49 | variable law_primarykey { 50 | type = string 51 | } 52 | 53 | variable ts_consumer { 54 | type = number 55 | } 56 | 57 | variable consul_ip { 58 | description = "consul server IP address" 59 | type = string 60 | } 61 | 62 | variable prefix { 63 | description = "Prefix for resources created by this module" 64 | type = string 65 | } 66 | 67 | variable f5_username { 68 | description = "The admin username of the F5 Bigip that will be deployed" 69 | default = "bigipuser" 70 | } 71 | 72 | variable f5_password { 73 | description = "Password of the F5 Bigip that will be deployed" 74 | default = "F5testnet!" 75 | } 76 | 77 | variable f5_ami_search_name { 78 | description = "BIG-IP AMI name to search for" 79 | type = string 80 | //default = "F5 BIGIP-*" 81 | default = "F5 BIGIP-16.1* PAYG-Best 200Mbps*" 82 | #default = "F5 Networks BIGIP-14.0.1-0.0.14 PAYG - Best 1Gbps - 20190817094829-929ca0d8-c2d7-4068-8f9a-eb75a677afed-ami-047292a9177e2e328.4" 83 | } 84 | 85 | variable f5_instance_count { 86 | description = "Number of BIG-IPs to deploy" 87 | type = number 88 | default = 1 89 | } 90 | 91 | variable ec2_instance_type { 92 | description = "AWS EC2 instance type" 93 | type = string 94 | default = "m5.xlarge" 95 | #default = "c4.xlarge" 96 | } 97 | 98 | variable ec2_key_name { 99 | description = "AWS EC2 Key name for SSH access" 100 | type = string 101 | default = "~/.ssh/id_rsa.pub" 102 | //default = "tf-demo-key" 103 | } 104 | 105 | variable mgmt_eip { 106 | description = "Enable an Elastic IP address on the management interface" 107 | type = bool 108 | default = true 109 | } 110 | 111 | variable aws_secretmanager_auth { 112 | description = "Whether to use secret manager to pass authentication" 113 | type = bool 114 | default = false 115 | } 116 | 117 | variable aws_secretmanager_secret_id { 118 | description = "AWS Secret Manager Secret ID that stores the BIG-IP password" 119 | type = string 120 | default = null 121 | } 122 | 123 | variable aws_iam_instance_profile { 124 | description = "aws_iam_instance_profile" 125 | type = string 126 | default = null 127 | } 128 | 129 | variable mgmt_subnet_ids { 130 | description = "The subnet id of the virtual network where the virtual machines will reside." 131 | type = list(object({ 132 | subnet_id = string 133 | public_ip = bool 134 | private_ip_primary = string 135 | })) 136 | default = [{ "subnet_id" = null, "public_ip" = null, "private_ip_primary" = null }] 137 | } 138 | 139 | variable external_subnet_ids { 140 | description = "The subnet id of the virtual network where the virtual machines will reside." 141 | type = list(object({ 142 | subnet_id = string 143 | public_ip = bool 144 | private_ip_primary = string 145 | private_ip_secondary = string 146 | })) 147 | default = [{ "subnet_id" = null, "public_ip" = null, "private_ip_primary" = null, "private_ip_secondary" = null }] 148 | } 149 | 150 | variable internal_subnet_ids { 151 | description = "The subnet id of the virtual network where the virtual machines will reside." 152 | type = list(object({ 153 | subnet_id = string 154 | public_ip = bool 155 | private_ip_primary = string 156 | })) 157 | default = [{ "subnet_id" = null, "public_ip" = null, "private_ip_primary" = null }] 158 | } 159 | 160 | variable internal_source_dest_check { 161 | description = "Disable source/dest check on Internal interface to allow inline routing for backends" 162 | default = true 163 | } 164 | 165 | variable external_source_dest_check { 166 | description = "Disable source/dest check on External interface to allow inline routing for backends" 167 | default = true 168 | } 169 | 170 | variable mgmt_securitygroup_ids { 171 | description = "The Network Security Group ids for management network " 172 | type = list(string) 173 | } 174 | 175 | variable external_securitygroup_ids { 176 | description = "The Network Security Group ids for external network " 177 | type = list(string) 178 | default = [] 179 | } 180 | 181 | variable internal_securitygroup_ids { 182 | description = "The Network Security Group ids for internal network " 183 | type = list(string) 184 | default = [] 185 | } 186 | 187 | ## Please check and update the latest DO URL from https://github.com/F5Networks/f5-declarative-onboarding/releases 188 | # always point to a specific version in order to avoid inadvertent configuration inconsistency 189 | variable DO_URL { 190 | description = "URL to download the BIG-IP Declarative Onboarding module" 191 | type = string 192 | default = "https://github.com/F5Networks/f5-declarative-onboarding/releases/download/v1.18.0/f5-declarative-onboarding-1.18.0-4.noarch.rpm" 193 | } 194 | ## Please check and update the latest AS3 URL from https://github.com/F5Networks/f5-appsvcs-extension/releases/latest 195 | # always point to a specific version in order to avoid inadvertent configuration inconsistency 196 | variable AS3_URL { 197 | description = "URL to download the BIG-IP Application Service Extension 3 (AS3) module" 198 | type = string 199 | default = "https://github.com/F5Networks/f5-appsvcs-extension/releases/download/v3.25.0/f5-appsvcs-3.25.0-3.noarch.rpm" 200 | } 201 | 202 | ## Please check and update the latest TS URL from https://github.com/F5Networks/f5-telemetry-streaming/releases/latest 203 | # always point to a specific version in order to avoid inadvertent configuration inconsistency 204 | variable TS_URL { 205 | description = "URL to download the BIG-IP Telemetry Streaming module" 206 | type = string 207 | default = "https://github.com/F5Networks/f5-telemetry-streaming/releases/download/v1.17.0/f5-telemetry-1.17.0-4.noarch.rpm" 208 | } 209 | 210 | ## Please check and update the latest Failover Extension URL from https://github.com/f5devcentral/f5-cloud-failover-extension/releases/latest 211 | # always point to a specific version in order to avoid inadvertent configuration inconsistency 212 | variable CFE_URL { 213 | description = "URL to download the BIG-IP Cloud Failover Extension module" 214 | type = string 215 | default = "https://github.com/F5Networks/f5-cloud-failover-extension/releases/download/v1.7.1/f5-cloud-failover-1.7.1-1.noarch.rpm" 216 | } 217 | 218 | ## Please check and update the latest FAST URL from https://github.com/F5Networks/f5-appsvcs-templates/releases/latest 219 | # always point to a specific version in order to avoid inadvertent configuration inconsistency 220 | variable FAST_URL { 221 | description = "URL to download the BIG-IP FAST module" 222 | type = string 223 | default = "https://github.com/F5Networks/f5-appsvcs-templates/releases/download/v1.6.1/f5-appsvcs-templates-1.6.1-1.noarch.rpm" 224 | } 225 | ## Please check and update the latest runtime init URL from https://github.com/F5Networks/f5-bigip-runtime-init/releases/latest 226 | # always point to a specific version in order to avoid inadvertent configuration inconsistency 227 | variable INIT_URL { 228 | description = "URL to download the BIG-IP runtime init" 229 | type = string 230 | default = "https://cdn.f5.com/product/cloudsolutions/f5-bigip-runtime-init/v1.2.0/dist/f5-bigip-runtime-init-1.2.0-1.gz.run" 231 | } 232 | variable libs_dir { 233 | description = "Directory on the BIG-IP to download the A&O Toolchain into" 234 | type = string 235 | default = "/config/cloud/aws/node_modules" 236 | } 237 | 238 | variable onboard_log { 239 | description = "Directory on the BIG-IP to store the cloud-init logs" 240 | type = string 241 | default = "/var/log/startup-script.log" 242 | } 243 | 244 | variable custom_user_data { 245 | description = "Provide a custom bash script or cloud-init script the BIG-IP will run on creation" 246 | type = string 247 | default = null 248 | } 249 | -------------------------------------------------------------------------------- /aws/terraform/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | github = { 4 | source = "integrations/github" 5 | version = "4.9.4" 6 | } 7 | } 8 | } 9 | 10 | provider "aws" { 11 | region = var.region 12 | } 13 | 14 | locals { 15 | allowed_mgmt_cidr = "0.0.0.0/0" 16 | allowed_app_cidr = "0.0.0.0/0" 17 | hostname = format("bigip.aws.%s.com", local.app_id) 18 | event_timestamp = formatdate("YYYY-MM-DD hh:mm:ss",timestamp()) 19 | app_id = random_id.id.hex 20 | } 21 | 22 | # Configure the GitHub Provider 23 | provider "github" { 24 | token = var.github_token 25 | owner = var.github_owner 26 | } 27 | 28 | resource "github_repository_file" "adpm" { 29 | repository = "adc-telemetry-based-autoscaling" 30 | branch = "main" 31 | file = "aws/consul_server.cfg" 32 | content = format("http://%s:8500", aws_instance.consulvm.public_ip) 33 | commit_message = format("file contents update by application ID: %s", local.app_id) 34 | overwrite_on_create = true 35 | } 36 | 37 | # 38 | # Create a random id 39 | # 40 | resource "random_id" "id" { 41 | byte_length = 2 42 | } 43 | 44 | # 45 | # Create random password for BIG-IP 46 | # 47 | resource random_string password { 48 | length = 16 49 | min_upper = 1 50 | min_lower = 1 51 | min_numeric = 1 52 | special = false 53 | } 54 | 55 | resource "aws_iam_role" "main" { 56 | name = format("%s-iam-role", local.app_id) 57 | assume_role_policy = < 1 ? "443" : "8443" 15 | } 16 | 17 | output f5_username { 18 | value = var.f5_username 19 | } 20 | 21 | output bigip_password { 22 | value = local.upass 23 | } 24 | 25 | output onboard_do { 26 | value = data.template_file.clustermemberDO1[0].rendered 27 | depends_on = [data.template_file.clustermemberDO1[0]] 28 | 29 | } 30 | 31 | output mgmt_nic { 32 | value = azurerm_network_interface.mgmt_nic.*.id 33 | } 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /azure/f5module/variables.tf: -------------------------------------------------------------------------------- 1 | variable splunkIP { 2 | type = string 3 | } 4 | 5 | variable splunkHEC { 6 | type = string 7 | } 8 | 9 | variable logStashIP { 10 | type = string 11 | } 12 | 13 | variable law_id { 14 | type = string 15 | } 16 | 17 | variable law_primarykey { 18 | type = string 19 | } 20 | 21 | variable ts_consumer { 22 | type = number 23 | } 24 | 25 | variable prefix { 26 | description = "Prefix for resources created by this module" 27 | type = string 28 | } 29 | 30 | variable consul_ip { 31 | description = "consul server IP address" 32 | type = string 33 | } 34 | 35 | variable backend_pool_id { 36 | type = string 37 | } 38 | 39 | 40 | variable f5_username { 41 | description = "The admin username of the F5 Bigip that will be deployed" 42 | default = "bigipuser" 43 | } 44 | 45 | variable resource_group_name { 46 | description = "The name of the resource group in which the resources will be created" 47 | type = string 48 | } 49 | 50 | variable mgmt_subnet_ids { 51 | description = "List of maps of subnetids of the virtual network where the virtual machines will reside." 52 | type = list(object({ 53 | subnet_id = string 54 | public_ip = bool 55 | private_ip_primary = string 56 | })) 57 | default = [{ "subnet_id" = null, "public_ip" = null, "private_ip_primary" = null }] 58 | } 59 | 60 | variable external_subnet_ids { 61 | description = "List of maps of subnetids of the virtual network where the virtual machines will reside." 62 | type = list(object({ 63 | subnet_id = string 64 | public_ip = bool 65 | private_ip_primary = string 66 | private_ip_secondary = string 67 | })) 68 | default = [{ "subnet_id" = null, "public_ip" = null, "private_ip_primary" = null, "private_ip_secondary" = null }] 69 | } 70 | 71 | variable internal_subnet_ids { 72 | description = "List of maps of subnetids of the virtual network where the virtual machines will reside." 73 | type = list(object({ 74 | subnet_id = string 75 | public_ip = bool 76 | private_ip_primary = string 77 | })) 78 | default = [{ "subnet_id" = null, "public_ip" = null, "private_ip_primary" = null }] 79 | } 80 | 81 | 82 | variable mgmt_securitygroup_ids { 83 | description = "List of network Security Groupids for management network " 84 | type = list(string) 85 | } 86 | 87 | variable external_securitygroup_ids { 88 | description = "List of network Security Groupids for external network " 89 | type = list(string) 90 | default = [] 91 | } 92 | 93 | variable internal_securitygroup_ids { 94 | description = "List of network Security Groupids for internal network " 95 | type = list(string) 96 | default = [] 97 | } 98 | 99 | variable f5_instance_type { 100 | description = "Specifies the size of the virtual machine." 101 | type = string 102 | default = "Standard_DS3_v2" 103 | } 104 | 105 | variable f5_image_name { 106 | type = string 107 | default = "f5-bigip-virtual-edition-200m-best-hourly" 108 | } 109 | variable f5_version { 110 | type = string 111 | default = "latest" 112 | } 113 | 114 | variable f5_product_name { 115 | type = string 116 | default = "f5-big-ip-best" 117 | } 118 | 119 | variable storage_account_type { 120 | description = "Defines the type of storage account to be created. Valid options are Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS." 121 | default = "Standard_LRS" 122 | } 123 | 124 | variable enable_accelerated_networking { 125 | type = bool 126 | description = "(Optional) Enable accelerated networking on Network interface" 127 | default = false 128 | } 129 | 130 | variable enable_ssh_key { 131 | type = bool 132 | description = "(Optional) Enable ssh key authentication in Linux virtual Machine" 133 | default = false 134 | } 135 | 136 | variable script_name { 137 | type = string 138 | default = "f5_onboard" 139 | } 140 | 141 | variable "dns_server" { default = "8.8.8.8" } 142 | variable "ntp_server" { default = "0.us.pool.ntp.org" } 143 | variable "timezone" { default = "UTC" } 144 | variable "ext_gw" { default = "10.2.1.1"} 145 | 146 | variable "app" {default = "app1" } 147 | variable "backend01ext" { default = "10.2.1.101" } 148 | 149 | variable "tls_cert" {default = ""} 150 | variable "tls_key" {default = ""} 151 | variable "cipherText" {default = ""} 152 | variable "protectedVal" {default = ""} 153 | 154 | ## Please check and update the latest DO URL from https://github.com/F5Networks/f5-declarative-onboarding/releases 155 | # always point to a specific version in order to avoid inadvertent configuration inconsistency 156 | variable doPackageUrl { 157 | description = "URL to download the BIG-IP Declarative Onboarding module" 158 | type = string 159 | //default = "" 160 | default = "https://github.com/F5Networks/f5-declarative-onboarding/releases/download/v1.18.0/f5-declarative-onboarding-1.18.0-4.noarch.rpm" 161 | } 162 | ## Please check and update the latest AS3 URL from https://github.com/F5Networks/f5-appsvcs-extension/releases/latest 163 | # always point to a specific version in order to avoid inadvertent configuration inconsistency 164 | variable as3PackageUrl { 165 | description = "URL to download the BIG-IP Application Service Extension 3 (AS3) module" 166 | type = string 167 | //default = "" 168 | default = "https://github.com/F5Networks/f5-appsvcs-extension/releases/download/v3.25.0/f5-appsvcs-3.25.0-3.noarch.rpm" 169 | } 170 | 171 | ## Please check and update the latest TS URL from https://github.com/F5Networks/f5-telemetry-streaming/releases/latest 172 | # always point to a specific version in order to avoid inadvertent configuration inconsistency 173 | variable tsPackageUrl { 174 | description = "URL to download the BIG-IP Telemetry Streaming module" 175 | type = string 176 | //default = "" 177 | default = "https://github.com/F5Networks/f5-telemetry-streaming/releases/download/v1.17.0/f5-telemetry-1.17.0-4.noarch.rpm" 178 | } 179 | 180 | ## Please check and update the latest FAST URL from https://github.com/F5Networks/f5-appsvcs-templates/releases/latest 181 | # always point to a specific version in order to avoid inadvertent configuration inconsistency 182 | variable fastPackageUrl { 183 | description = "URL to download the BIG-IP FAST module" 184 | type = string 185 | //default = "" 186 | default = "https://github.com/F5Networks/f5-appsvcs-templates/releases/download/v1.4.0/f5-appsvcs-templates-1.4.0-1.noarch.rpm" 187 | } 188 | 189 | ## Please check and update the latest Failover Extension URL from https://github.com/F5Networks/f5-cloud-failover-extension/releases/latest 190 | # always point to a specific version in order to avoid inadvertent configuration inconsistency 191 | variable cfePackageUrl { 192 | description = "URL to download the BIG-IP Cloud Failover Extension module" 193 | type = string 194 | //default = "" 195 | default = "https://github.com/F5Networks/f5-cloud-failover-extension/releases/download/v1.6.1/f5-cloud-failover-1.6.1-1.noarch.rpm" 196 | } 197 | 198 | variable libs_dir { 199 | description = "Directory on the BIG-IP to download the A&O Toolchain into" 200 | default = "/config/cloud/azure/node_modules" 201 | type = string 202 | } 203 | variable onboard_log { 204 | description = "Directory on the BIG-IP to store the cloud-init logs" 205 | default = "/var/log/startup-script.log" 206 | type = string 207 | } 208 | 209 | variable availabilityZones { 210 | description = "If you want the VM placed in an Azure Availability Zone, and the Azure region you are deploying to supports it, specify the numbers of the existing Availability Zone you want to use." 211 | type = list 212 | default = [1] 213 | } 214 | 215 | variable azure_secret_rg { 216 | description = "The name of the resource group in which the Azure Key Vault exists" 217 | type = string 218 | default = "" 219 | } 220 | 221 | variable az_key_vault_authentication { 222 | description = "Whether to use key vault to pass authentication" 223 | type = bool 224 | default = false 225 | } 226 | 227 | variable azure_keyvault_name { 228 | description = "The name of the Azure Key Vault to use" 229 | type = string 230 | default = "" 231 | } 232 | 233 | variable app_name { 234 | type = string 235 | default = "" 236 | } 237 | 238 | variable app_id { 239 | type = string 240 | default = "" 241 | } 242 | 243 | variable azure_keyvault_secret_name { 244 | description = "The name of the Azure Key Vault secret containing the password" 245 | type = string 246 | default = "" 247 | } 248 | 249 | -------------------------------------------------------------------------------- /azure/terraform/outputs.tf: -------------------------------------------------------------------------------- 1 | output app_id { 2 | value = local.app_id 3 | } 4 | 5 | output "b_bigip_management_address" { 6 | value = "https://${module.bigip.0.mgmtPublicIP}:8443" 7 | } 8 | 9 | output "c_application_address" { 10 | description = "Public endpoint for load balancing external app" 11 | value = "https://${azurerm_public_ip.nlb_public_ip.ip_address}" 12 | } 13 | 14 | output "d_consul_public_address" { 15 | value = "https://${azurerm_public_ip.consul_public_ip.ip_address}:8443" 16 | } 17 | 18 | output "e_AlertForwarder_public_address" { 19 | value = "https://${azurerm_public_ip.consul_public_ip.ip_address}:8000" 20 | } -------------------------------------------------------------------------------- /azure/terraform/terraform.tfvars: -------------------------------------------------------------------------------- 1 | location = "eastus" 2 | AllowedIPs = ["0.0.0.0/0"] 3 | github_owner = "" 4 | repo_path = "/repos/f5devcentral/adc-telemetry-based-autoscaling/dispatches" 5 | github_token = "" 6 | bigip_count = 1 7 | workload_count = 2 8 | bigip_min = 1 9 | bigip_max = 5 10 | workload_min = 1 11 | workload_max = 5 12 | scale_interval = 300 13 | ts_consumer = 1 14 | splunkIP = "" 15 | splunkHEC = "" 16 | logStashIP = "" 17 | law_id = "" 18 | law_primarykey = "" 19 | -------------------------------------------------------------------------------- /azure/terraform/variables.tf: -------------------------------------------------------------------------------- 1 | variable splunkIP { 2 | type = string 3 | } 4 | 5 | variable splunkHEC { 6 | type = string 7 | } 8 | 9 | variable logStashIP { 10 | type = string 11 | } 12 | 13 | variable law_id { 14 | type = string 15 | } 16 | 17 | variable law_primarykey { 18 | type = string 19 | } 20 | 21 | variable ts_consumer { 22 | description = "The analytics consumer connecting to 1 = splunk 2 = elk 3 = azure log analytics" 23 | type = number 24 | } 25 | 26 | variable bigip_count { 27 | description = "Number of Bigip instances to create( From terraform 0.13, module supports count feature to spin mutliple instances )" 28 | type = number 29 | } 30 | 31 | variable workload_count { 32 | description = "Number of backend application instances to create( From terraform 0.13, module supports count feature to spin mutliple instances )" 33 | type = number 34 | } 35 | 36 | variable bigip_min { 37 | type = number 38 | default = 1 39 | } 40 | 41 | variable bigip_max { 42 | type = number 43 | default = 4 44 | } 45 | 46 | variable workload_min { 47 | type = number 48 | default = 1 49 | } 50 | 51 | variable workload_max { 52 | type = number 53 | default = 4 54 | } 55 | variable scale_interval { 56 | type = number 57 | default = 300 58 | } 59 | 60 | variable app_name { 61 | type = string 62 | default = "sample_app" 63 | } 64 | 65 | variable consul_ip { 66 | type = string 67 | description = "private address assigned to consul server" 68 | default = "10.2.1.100" 69 | } 70 | 71 | variable github_token { 72 | type = string 73 | description = "repo token required to update secrets" 74 | } 75 | 76 | variable github_owner { 77 | type = string 78 | description = "repo owner required to update secrets" 79 | default = "" 80 | } 81 | 82 | variable repo_path { 83 | type = string 84 | description = "repo path for github actions" 85 | default = "" 86 | } 87 | 88 | variable prefix { 89 | description = "Prefix for resources created by this module" 90 | type = string 91 | default = "application" 92 | } 93 | 94 | variable location {default = "eastus"} 95 | 96 | variable cidr { 97 | description = "Azure VPC CIDR" 98 | type = string 99 | default = "10.2.0.0/16" 100 | } 101 | 102 | variable upassword {default = "F5demonet!"} 103 | 104 | variable availabilityZones { 105 | description = "If you want the VM placed in an Azure Availability Zone, and the Azure region you are deploying to supports it, specify the numbers of the existing Availability Zone you want to use." 106 | type = list 107 | default = [2] 108 | } 109 | 110 | variable AllowedIPs { 111 | } 112 | 113 | # TAGS 114 | variable "purpose" { default = "public" } 115 | variable "environment" { default = "f5env" } #ex. dev/staging/prod 116 | variable "owner" { default = "f5owner" } 117 | variable "group" { default = "f5group" } 118 | variable "costcenter" { default = "f5costcenter" } 119 | variable "application" { default = "f5app" } 120 | -------------------------------------------------------------------------------- /azure/terraform/workload.tf: -------------------------------------------------------------------------------- 1 | # 2 | # Create backend application workloads 3 | # 4 | resource "azurerm_network_interface" "appnic" { 5 | count = var.workload_count 6 | name = "app_nic_${count.index}" 7 | location = azurerm_resource_group.rg.location 8 | resource_group_name = azurerm_resource_group.rg.name 9 | 10 | ip_configuration { 11 | name = "testConfiguration" 12 | subnet_id = data.azurerm_subnet.mgmt.id 13 | private_ip_address_allocation = "dynamic" 14 | } 15 | } 16 | 17 | resource "azurerm_managed_disk" "appdisk" { 18 | count = var.workload_count 19 | name = "datadisk_existing_${count.index}" 20 | location = azurerm_resource_group.rg.location 21 | resource_group_name = azurerm_resource_group.rg.name 22 | storage_account_type = "Standard_LRS" 23 | create_option = "Empty" 24 | disk_size_gb = "1023" 25 | } 26 | 27 | resource "azurerm_availability_set" "avset" { 28 | name = "avset" 29 | location = azurerm_resource_group.rg.location 30 | resource_group_name = azurerm_resource_group.rg.name 31 | platform_fault_domain_count = 2 32 | platform_update_domain_count = 2 33 | managed = true 34 | } 35 | 36 | data "template_file" "backendapp" { 37 | template = file("../../templates/backendapp.tpl") 38 | vars = { 39 | app_id = local.app_id 40 | consul_ip = var.consul_ip 41 | } 42 | } 43 | 44 | resource "azurerm_virtual_machine" "app" { 45 | count = var.workload_count 46 | name = "app_vm_${count.index}" 47 | location = azurerm_resource_group.rg.location 48 | availability_set_id = azurerm_availability_set.avset.id 49 | resource_group_name = azurerm_resource_group.rg.name 50 | network_interface_ids = [element(azurerm_network_interface.appnic.*.id, count.index)] 51 | vm_size = "Standard_DS1_v2" 52 | 53 | 54 | # Uncomment this line to delete the OS disk automatically when deleting the VM 55 | delete_os_disk_on_termination = true 56 | 57 | # Uncomment this line to delete the data disks automatically when deleting the VM 58 | delete_data_disks_on_termination = true 59 | 60 | storage_image_reference { 61 | publisher = "Canonical" 62 | offer = "UbuntuServer" 63 | sku = "18.04-LTS" 64 | version = "latest" 65 | } 66 | 67 | storage_os_disk { 68 | name = "myosdisk${count.index}" 69 | caching = "ReadWrite" 70 | create_option = "FromImage" 71 | managed_disk_type = "Standard_LRS" 72 | } 73 | 74 | # Optional data disks 75 | storage_data_disk { 76 | name = "datadisk_new_${count.index}" 77 | managed_disk_type = "Standard_LRS" 78 | create_option = "Empty" 79 | lun = 0 80 | disk_size_gb = "1023" 81 | } 82 | 83 | storage_data_disk { 84 | name = element(azurerm_managed_disk.appdisk.*.name, count.index) 85 | managed_disk_id = element(azurerm_managed_disk.appdisk.*.id, count.index) 86 | create_option = "Attach" 87 | lun = 1 88 | disk_size_gb = element(azurerm_managed_disk.appdisk.*.disk_size_gb, count.index) 89 | } 90 | 91 | os_profile { 92 | computer_name = format("workload-%s", count.index) 93 | admin_username = "appuser" 94 | admin_password = var.upassword 95 | custom_data = data.template_file.backendapp.rendered 96 | } 97 | 98 | os_profile_linux_config { 99 | disable_password_authentication = false 100 | } 101 | 102 | tags = { 103 | Name = "${var.environment}-backendapp_${count.index}" 104 | environment = var.environment 105 | owner = var.owner 106 | group = var.group 107 | costcenter = var.costcenter 108 | application = var.application 109 | tag_name = "Env" 110 | value = "consul" 111 | propagate_at_launch = true 112 | key = "Env" 113 | value = "consul" 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /azure/workflow_state/main.tf: -------------------------------------------------------------------------------- 1 | data "template_file" "backend_file" { 2 | template = file("../../templates/backend.tpl") 3 | vars = { 4 | app_id = var.app_id 5 | bigip_count = var.bigip_count 6 | workload_count = var.workload_count 7 | consul_ip = var.consul_ip 8 | } 9 | } 10 | 11 | resource "local_file" "backend" { 12 | content = data.template_file.backend_file.rendered 13 | filename = "../workflow_terraform/backend.tf" 14 | } -------------------------------------------------------------------------------- /azure/workflow_state/variables.tf: -------------------------------------------------------------------------------- 1 | variable bigip_count { type = number } 2 | variable workload_count { type = number } 3 | variable app_id { type = string } 4 | variable consul_ip { type = string } -------------------------------------------------------------------------------- /azure/workflow_terraform/outputs.tf: -------------------------------------------------------------------------------- 1 | output app_id { 2 | value = local.app_id 3 | } 4 | 5 | output "b_bigip_management_address" { 6 | value = "https://${module.bigip.0.mgmtPublicIP}:8443" 7 | } 8 | 9 | output "c_application_address" { 10 | description = "Public endpoint for load balancing external app" 11 | value = "https://${azurerm_public_ip.nlb_public_ip.ip_address}" 12 | } 13 | 14 | output "d_consul_public_address" { 15 | value = "https://${azurerm_public_ip.consul_public_ip.ip_address}:8443" 16 | } 17 | 18 | output "e_AlertForwarder_public_address" { 19 | value = "https://${azurerm_public_ip.consul_public_ip.ip_address}:8000" 20 | } -------------------------------------------------------------------------------- /azure/workflow_terraform/variables.tf: -------------------------------------------------------------------------------- 1 | variable splunkIP { 2 | type = string 3 | } 4 | 5 | variable splunkHEC { 6 | type = string 7 | } 8 | 9 | variable logStashIP { 10 | type = string 11 | } 12 | 13 | variable law_id { 14 | type = string 15 | } 16 | 17 | variable law_primarykey { 18 | type = string 19 | } 20 | 21 | variable ts_consumer { 22 | type = number 23 | } 24 | 25 | variable app_name { 26 | type = string 27 | default = "sample_app" 28 | } 29 | 30 | variable bigip_min { 31 | type = number 32 | default = 1 33 | } 34 | 35 | variable bigip_max { 36 | type = number 37 | default = 4 38 | } 39 | 40 | variable workload_min { 41 | type = number 42 | default = 1 43 | } 44 | 45 | variable workload_max { 46 | type = number 47 | default = 4 48 | } 49 | variable scale_interval { 50 | type = number 51 | default = 300 52 | } 53 | 54 | variable repo_path { 55 | type = string 56 | description = "repo path for github actions" 57 | default = "" 58 | } 59 | 60 | variable consul_ip { 61 | type = string 62 | description = "private address assigned to consul server" 63 | default = "10.2.1.100" 64 | } 65 | 66 | variable github_token { 67 | type = string 68 | description = "repo token required to update secrets" 69 | } 70 | 71 | variable github_owner { 72 | type = string 73 | description = "repo owner required to update secrets" 74 | } 75 | 76 | variable prefix { 77 | description = "Prefix for resources created by this module" 78 | type = string 79 | default = "application" 80 | } 81 | 82 | variable location { 83 | } 84 | 85 | variable cidr { 86 | description = "Azure VPC CIDR" 87 | type = string 88 | default = "10.2.0.0/16" 89 | } 90 | 91 | variable upassword {default = "F5demonet!"} 92 | 93 | variable availabilityZones { 94 | description = "If you want the VM placed in an Azure Availability Zone, and the Azure region you are deploying to supports it, specify the numbers of the existing Availability Zone you want to use." 95 | type = list 96 | default = [2] 97 | } 98 | 99 | variable AllowedIPs { 100 | default = ["0.0.0.0/0"] 101 | } 102 | 103 | # TAGS 104 | variable "purpose" { default = "public" } 105 | variable "environment" { default = "f5env" } #ex. dev/staging/prod 106 | variable "owner" { default = "f5owner" } 107 | variable "group" { default = "f5group" } 108 | variable "costcenter" { default = "f5costcenter" } 109 | variable "application" { default = "f5app" } 110 | -------------------------------------------------------------------------------- /configs/alertforwarder.js: -------------------------------------------------------------------------------- 1 | const express = require( 'express' ); 2 | const app = express(); 3 | const fs = require('fs'); 4 | const bodyParser = require('body-parser'); 5 | const https = require('https'); 6 | const http = require('http'); 7 | const args = process.argv.slice(2) //Required to authenticate with Github action repo 8 | const repoPath = '/repos/f5devcentral/adc-telemetry-based-autoscaling/dispatches' //Modify to match designated github action repo 9 | 10 | /* 11 | Create Listening server - receive alerts from analytics provider 12 | */ 13 | const options1 = { 14 | key: "-----BEGIN PRIVATE KEY-----\nMIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDTGBNbVYLJiIDf\nL0FQMi/mFzcLeQhP11c4YdxjMBPJKSBWKnXuxywcOJHJ6A2rpGKhAApSsVc6j6sX\nfCtybszbNgvx7KdV8c2S02ILNqlJwTOXRkQhN0srlATYdF/i6T1Y1fpkjBiZMC7A\n8fRfQNwT8DgberiuN4YvfsNGbQej+a1dOVQAGaBn15xwXe8Xfw9iLRangb8n4yAz\n4qS2IJig2UYzSc3P6k1ulZ6I1Yo/xOj7zar3R/09DZ6ikGBvy3TrSfYpRX8pXFgF\nsJOlDeYwzAKlKY4MnapgwszIFMmxveK/d3K+l3Kn0791VdBklrrlycV7itGXrqNY\ndHLgC3dPAgMBAAECggEADudNPo6L/FSR3LAGaXYRoaoT7dBuwMxQUR+/opUbYIFx\n0gOPbftL5vYrfNjQVkT13a4WDH6OzQilqLPQkXS3K+bl8v+lVNEMlYgtDOOgEh/8\n13pThxDTUtFRgkK9HlUfSq1Yz06A0hfvxRmQCkWXBCVaoL4KWep7o9DMUqWR+4ad\nXlvzvG2W3fvNE3+ewwf0tR/OYTQOZvkRfm0Ws1s0W85wr6Ec87psbLPPO2yecFcq\n3fJjcZmbaWWG5Thh9479W3rhC3I6rJN+YLgyXoumml5wmmjf8CxocUL3uPt+32u5\nE4OZTLdAIF0+KxH3hYbw3D6DB/LnAZVB+jxmOC4j2QKBgQDm5JVzld5KYUlIt566\nsyQ95JMyw0Oqp1U7WMwI8+RMYnO4NPo6Dzej9LMsVAQFmg5DncElSf3PLC9PfsVe\nCK6FiXPScy/9cAqchJVI3f7CgJiYvrFwoiieVJFYSgh52aWxL/KHnEe4UWk50qmS\n/hCyPdSCHJVw1oh5dIO/QGG+YwKBgQDqDFi8mNrUI/QD+m/2HNT+6PaXUWEfyY94\n/swvn9O+qQYWbU8MCxeucthTJ5p5lYY5FdUeKPGZn0jahgoEV63XnuemNe43tOJA\nDpo1UyWmQoodAOOm9QiEEjOAxx+hEcSfJrEUgGSYVR+GHbap+xuB0BrtCN9qWsdb\nU2d25b4xJQKBgQCV4SAardhkVA6sQ3WoIA2Ql8Xtl89fAcxT/+pCjX9PDkGr+8fK\n1IH7ziZYyhjqQfysa8UrHYLCbx4u7k3UIrKXQIiMvfUTAR4CSBZX/LMZMzzbOj4Y\nrUrMrHzE4Rnrbxsdj9BRs2LjBQXXYSZuornX2kcORtvDKZ/hp362MWbBnQKBgQCo\nSZZojXQTQ4LKdYGZsmOIotPkO9SdOZ3a/0KsH7zuA7Tn3VMQMs2lVtiq+ff94oCv\nfT5PQFtv/XMyBV0ggDb0qkKgZXjTP1HLg3RoUU/p+0A52JDYVKn55Oh5eTQJ6a+6\nS+TZ+/PZAKP5GFZmZLMDpTInK9ERNRLRXOgxOsKFrQKBgQDH6PfQTuvubwL+CYbb\nCI1AtWOGEGcuLIbtlbh5e4/1FxtdG2pgV2wBJxIwNhn8U7yMHj9B/MB39OAt6Vlc\nZU0Dah41RMi4dPGAi/iuTQklfRLjROSVmhb/lS9xDRxzHcm0u0YBuU0Q+MC3aw7O\njXWs11QDs5AR93mLB0AZdRjGLA==\n-----END PRIVATE KEY-----", 15 | cert: "-----BEGIN CERTIFICATE-----\nMIIFWjCCBEKgAwIBAgITfQAAAB0gY6x6LLG8KwAAAAAAHTANBgkqhkiG9w0BAQUF\nADBOMRMwEQYKCZImiZPyLGQBGRYDY29tMRowGAYKCZImiZPyLGQBGRYKYXNlcnJh\nY29ycDEbMBkGA1UEAxMSYXNlcnJhY29ycC1EQy1DQS0xMB4XDTIwMDIxNTIyMTIw\nMloXDTIyMDIxNDIyMTIwMlowHzEdMBsGA1UEAxMUbXlhcHAuYXNlcnJhY29ycC5j\nb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDTGBNbVYLJiIDfL0FQ\nMi/mFzcLeQhP11c4YdxjMBPJKSBWKnXuxywcOJHJ6A2rpGKhAApSsVc6j6sXfCty\nbszbNgvx7KdV8c2S02ILNqlJwTOXRkQhN0srlATYdF/i6T1Y1fpkjBiZMC7A8fRf\nQNwT8DgberiuN4YvfsNGbQej+a1dOVQAGaBn15xwXe8Xfw9iLRangb8n4yAz4qS2\nIJig2UYzSc3P6k1ulZ6I1Yo/xOj7zar3R/09DZ6ikGBvy3TrSfYpRX8pXFgFsJOl\nDeYwzAKlKY4MnapgwszIFMmxveK/d3K+l3Kn0791VdBklrrlycV7itGXrqNYdHLg\nC3dPAgMBAAGjggJeMIICWjA7BgkrBgEEAYI3FQcELjAsBiQrBgEEAYI3FQiBg9Bp\ng5vnB4b5lxighjeD8YE0eYL+ujyChWkCAWQCAQMwEwYDVR0lBAwwCgYIKwYBBQUH\nAwEwDgYDVR0PAQH/BAQDAgWgMBsGCSsGAQQBgjcVCgQOMAwwCgYIKwYBBQUHAwEw\nHQYDVR0OBBYEFMXq6/mUs8bg5TUoL3uXPUyyAFyXMB8GA1UdIwQYMBaAFBEzMhC4\nl6myjmO0WBY2s0tLj1fQMIHOBgNVHR8EgcYwgcMwgcCggb2ggbqGgbdsZGFwOi8v\nL0NOPWFzZXJyYWNvcnAtREMtQ0EtMSxDTj1kYyxDTj1DRFAsQ049UHVibGljJTIw\nS2V5JTIwU2VydmljZXMsQ049U2VydmljZXMsQ049Q29uZmlndXJhdGlvbixEQz1h\nc2VycmFjb3JwLERDPWNvbT9jZXJ0aWZpY2F0ZVJldm9jYXRpb25MaXN0P2Jhc2U/\nb2JqZWN0Q2xhc3M9Y1JMRGlzdHJpYnV0aW9uUG9pbnQwgccGCCsGAQUFBwEBBIG6\nMIG3MIG0BggrBgEFBQcwAoaBp2xkYXA6Ly8vQ049YXNlcnJhY29ycC1EQy1DQS0x\nLENOPUFJQSxDTj1QdWJsaWMlMjBLZXklMjBTZXJ2aWNlcyxDTj1TZXJ2aWNlcyxD\nTj1Db25maWd1cmF0aW9uLERDPWFzZXJyYWNvcnAsREM9Y29tP2NBQ2VydGlmaWNh\ndGU/YmFzZT9vYmplY3RDbGFzcz1jZXJ0aWZpY2F0aW9uQXV0aG9yaXR5MA0GCSqG\nSIb3DQEBBQUAA4IBAQC8xoaBDhn0BGqQ73ROjlvI+5yJs3UUws2D7KCtpzNt2Ksm\ngm52umZoIzofPRXg/RVkt+Ig7Y9+ixaEyAxMFtpDyap1bTNjWsw99LoUZvMo7B9q\nrgOS55h5OeLwc1PM3n43I9H2C3uQ1hYflD3ovzvzrywejCHlHlReovZkYCcrDCa+\nytw7Hob0P1vkXsPtpmk61A7PCLw6DghhczT1f4nAK147FuRl55jz38JFOtqKVlfU\nNH4EaSxciHO2evWDHUddzeAwxHLg77UKPH+MSPXd7jGZx3xqQEtpjMqq5WM09YsL\n1mwOJpk1Xarkb0WB0J10YXqKs6tSxyrfX/FL5MZA\n-----END CERTIFICATE-----" 16 | }; 17 | 18 | https.createServer(options1, function (request, response) { 19 | if (request.method == 'POST') { 20 | 21 | const { headers, method, url } = request; 22 | let body = []; 23 | request.on('error', (err) => { 24 | console.error(err); 25 | 26 | }).on('data', (chunk) => { 27 | body.push(chunk); 28 | }).on('end', () => { 29 | body = Buffer.concat(body).toString(); 30 | bodyJson = JSON.parse(body); 31 | source = bodyJson.source; 32 | scaleAction = bodyJson.scaleAction; 33 | console.log(bodyJson); 34 | 35 | 36 | if (scaleAction == null){ 37 | console.log("error with scaleaction"); 38 | response.end(); 39 | }; 40 | 41 | if (source == "azureLogs"){ 42 | analytic = "azure" 43 | vals = bodyJson.SearchResults.tables[0].rows[0].toString(); 44 | var hostIndex = vals.search("bigip.azure") 45 | hostName = vals.substring(hostIndex, hostIndex + 20) 46 | 47 | } else if (source == 'elk') { 48 | analytic = "elk" 49 | message = bodyJson.message 50 | var hostIndex = message.search("bigip.azure") 51 | hostName = message.substring(hostIndex, hostIndex + 20) 52 | poolName = "" 53 | 54 | } else if (source == 'splunk') { 55 | analytic = "splunk" 56 | message = bodyJson.message 57 | var hostIndex = message.search("bigip.azure") 58 | hostName = message.substring(hostIndex, hostIndex + 20) 59 | 60 | } else { 61 | console.log("Invalid nalytics source specified") 62 | response.end(); 63 | } 64 | 65 | //Convert hostName and poolName to arrays and derive identifiers 66 | var n = hostName.split("."); 67 | app_id = n[2]; 68 | 69 | //Create scaling eventtype 70 | var app_name = "app1"; 71 | switch (scaleAction) { 72 | case "scaleOutBigip": 73 | what2Scale = 'bigip'; 74 | scaling_direction = 'up' 75 | app_name = app_name 76 | break; 77 | case "scaleInBigip": 78 | what2Scale = 'bigip'; 79 | scaling_direction = 'down' 80 | app_name = app_name 81 | break; 82 | case "scaleOutWorkload": 83 | what2Scale = 'app'; 84 | scaling_direction = 'up' 85 | app_name = app_name 86 | break; 87 | case "scaleInWorkload": 88 | what2Scale = 'app'; 89 | scaling_direction = 'down' 90 | app_name = app_name 91 | break; 92 | } 93 | 94 | console.log("The application ID is " + app_id + ". Webhook request to scale the " + what2Scale + " " + scaling_direction + ". If relevant, the app name is '" + app_name + "'.") 95 | 96 | //Construct Github Action webhook payload 97 | const data2 = JSON.stringify({ 98 | event_type: "scale-azure", //+ analytic, 99 | client_payload: { 100 | scaling_type: what2Scale, 101 | app_name: app_name, 102 | scaling_direction: scaling_direction, 103 | webhook_source: source, 104 | app_id: app_id 105 | } 106 | }) 107 | 108 | const options = { 109 | hostname: 'api.github.com', 110 | port: 443, 111 | path: repoPath, 112 | method: 'POST', 113 | headers: { 114 | 'Content-Type': 'application/json', 115 | 'Content-Length': data2.length, 116 | 'Authorization': 'token ' + args[0], 117 | 'user-agent': 'node.js' 118 | } 119 | } 120 | 121 | /* 122 | Create https POST to github 123 | */ 124 | const req2 = https.request(options, res2 => { 125 | console.log(`Post to Github returned status code of: ${res2.statusCode}`) 126 | console.log("Processing operation complete.\n") 127 | 128 | res2.on('data', d => { 129 | process.stdout.write(d) 130 | }) 131 | }) 132 | 133 | req2.on('error', error => { 134 | console.error(error) 135 | }) 136 | 137 | // submit payload via webhook to Github Action 138 | req2.write(data2) 139 | req2.end() 140 | 141 | response.on('error', (err) => { 142 | console.error(err); 143 | }); 144 | 145 | response.writeHead(200, {'Content-Type': 'application/json'}) 146 | const responseBody = { headers, method, url, body }; 147 | response.write(JSON.stringify(responseBody)); 148 | 149 | response.end(); 150 | }); 151 | } 152 | else { 153 | console.log("Invalid HTTP method"); 154 | response.end(); 155 | } 156 | 157 | // Start listener 158 | console.log("Starting alert processor...\n"); 159 | }).listen(8000); -------------------------------------------------------------------------------- /images/alert_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/adc-telemetry-based-autoscaling/8ff7c57acd6c39edbea3f48e27013b535dbc2f47/images/alert_1.png -------------------------------------------------------------------------------- /images/alert_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/adc-telemetry-based-autoscaling/8ff7c57acd6c39edbea3f48e27013b535dbc2f47/images/alert_2.png -------------------------------------------------------------------------------- /images/alert_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/adc-telemetry-based-autoscaling/8ff7c57acd6c39edbea3f48e27013b535dbc2f47/images/alert_3.png -------------------------------------------------------------------------------- /images/alert_final.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/adc-telemetry-based-autoscaling/8ff7c57acd6c39edbea3f48e27013b535dbc2f47/images/alert_final.png -------------------------------------------------------------------------------- /images/alert_maxconns.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/adc-telemetry-based-autoscaling/8ff7c57acd6c39edbea3f48e27013b535dbc2f47/images/alert_maxconns.png -------------------------------------------------------------------------------- /images/alert_minconns.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/adc-telemetry-based-autoscaling/8ff7c57acd6c39edbea3f48e27013b535dbc2f47/images/alert_minconns.png -------------------------------------------------------------------------------- /images/alert_mincpu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/adc-telemetry-based-autoscaling/8ff7c57acd6c39edbea3f48e27013b535dbc2f47/images/alert_mincpu.png -------------------------------------------------------------------------------- /images/alerts.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/adc-telemetry-based-autoscaling/8ff7c57acd6c39edbea3f48e27013b535dbc2f47/images/alerts.png -------------------------------------------------------------------------------- /images/arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/adc-telemetry-based-autoscaling/8ff7c57acd6c39edbea3f48e27013b535dbc2f47/images/arch.png -------------------------------------------------------------------------------- /images/create_watch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/adc-telemetry-based-autoscaling/8ff7c57acd6c39edbea3f48e27013b535dbc2f47/images/create_watch.png -------------------------------------------------------------------------------- /images/elk_discover.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/adc-telemetry-based-autoscaling/8ff7c57acd6c39edbea3f48e27013b535dbc2f47/images/elk_discover.png -------------------------------------------------------------------------------- /images/elk_explore.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/adc-telemetry-based-autoscaling/8ff7c57acd6c39edbea3f48e27013b535dbc2f47/images/elk_explore.png -------------------------------------------------------------------------------- /images/elk_login.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/adc-telemetry-based-autoscaling/8ff7c57acd6c39edbea3f48e27013b535dbc2f47/images/elk_login.png -------------------------------------------------------------------------------- /images/index_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/adc-telemetry-based-autoscaling/8ff7c57acd6c39edbea3f48e27013b535dbc2f47/images/index_1.png -------------------------------------------------------------------------------- /images/index_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/adc-telemetry-based-autoscaling/8ff7c57acd6c39edbea3f48e27013b535dbc2f47/images/index_2.png -------------------------------------------------------------------------------- /images/index_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/adc-telemetry-based-autoscaling/8ff7c57acd6c39edbea3f48e27013b535dbc2f47/images/index_3.png -------------------------------------------------------------------------------- /images/monitor_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/adc-telemetry-based-autoscaling/8ff7c57acd6c39edbea3f48e27013b535dbc2f47/images/monitor_1.png -------------------------------------------------------------------------------- /images/monitor_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/adc-telemetry-based-autoscaling/8ff7c57acd6c39edbea3f48e27013b535dbc2f47/images/monitor_2.png -------------------------------------------------------------------------------- /images/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/adc-telemetry-based-autoscaling/8ff7c57acd6c39edbea3f48e27013b535dbc2f47/images/output.png -------------------------------------------------------------------------------- /images/splunk.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/adc-telemetry-based-autoscaling/8ff7c57acd6c39edbea3f48e27013b535dbc2f47/images/splunk.png -------------------------------------------------------------------------------- /images/splunk1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/adc-telemetry-based-autoscaling/8ff7c57acd6c39edbea3f48e27013b535dbc2f47/images/splunk1.png -------------------------------------------------------------------------------- /images/splunk3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/adc-telemetry-based-autoscaling/8ff7c57acd6c39edbea3f48e27013b535dbc2f47/images/splunk3.png -------------------------------------------------------------------------------- /scripts/consul.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo rm -rf consul 4 | 5 | #Utils 6 | sudo apt-get install unzip -y 7 | 8 | #Download Consul 9 | CONSUL_VERSION="1.9.3" 10 | curl --silent --remote-name https://releases.hashicorp.com/consul/${CONSUL_VERSION}/consul_${CONSUL_VERSION}_linux_amd64.zip 11 | 12 | #Install Consul 13 | sudo unzip -o consul_${CONSUL_VERSION}_linux_amd64.zip 14 | sudo chown root:root consul 15 | sudo mv consul /usr/local/bin/ 16 | 17 | #Create Consul User 18 | sudo useradd --system --home /etc/consul.d --shell /bin/false consul 19 | sudo mkdir --parents /opt/consul 20 | sudo chown --recursive consul:consul /opt/consul 21 | 22 | #Create config dir 23 | sudo mkdir --parents /etc/consul.d 24 | sudo touch /etc/consul.d/consul.hcl 25 | sudo chown --recursive consul:consul /etc/consul.d 26 | sudo chmod 640 /etc/consul.d/consul.hcl 27 | 28 | cat << EOF > /etc/consul.d/consul.hcl 29 | datacenter = "UDF" 30 | data_dir = "/opt/consul" 31 | 32 | EOF 33 | 34 | cat << EOF > /etc/consul.d/server.hcl 35 | server = false 36 | bootstrap_expect = 1 37 | client_addr = "0.0.0.0" 38 | EOF 39 | -------------------------------------------------------------------------------- /scripts/deploy.sh: -------------------------------------------------------------------------------- 1 | while getopts c:b:w:t: flag 2 | do 3 | case "${flag}" in 4 | c) cloud=${OPTARG};; 5 | b) bigip_count=${OPTARG};; 6 | w) workload_count=${OPTARG};; 7 | t) github_token=${OPTARG};; 8 | esac 9 | done 10 | 11 | cd ../$cloud/terraform/ 12 | 13 | terraform init && terraform plan && terraform apply --auto-approve 14 | sleep 10s 15 | terraform init -force-copy && terraform plan && terraform apply --auto-approve -------------------------------------------------------------------------------- /scripts/kill.sh: -------------------------------------------------------------------------------- 1 | while getopts c:b:w:t: flag 2 | do 3 | case "${flag}" in 4 | c) cloud=${OPTARG};; 5 | esac 6 | done 7 | 8 | cd ../$cloud/terraform/ 9 | 10 | touch tfstate.tf && rm tfstate.tf && terraform init -force-copy && terraform destroy --auto-approve 11 | rm -rf .terraform 12 | rm -rf .terraform.lock.hcl -------------------------------------------------------------------------------- /templates/alertfwd.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sudo apt update && curl -sL https://deb.nodesource.com/setup_16.x | sudo -E bash - && sudo apt-get -y install nodejs 3 | sudo cat << EOF > /home/ubuntu/alertforwarder.js 4 | 5 | const express = require( 'express' ); 6 | const app = express(); 7 | const fs = require('fs'); 8 | const bodyParser = require('body-parser'); 9 | const https = require('https'); 10 | const http = require('http'); 11 | const token = "${github_token}" //Required to authenticate with Github action repo 12 | const repoPath = '${repo_path}' //Modify to match designated github action repo 13 | 14 | /* 15 | Create Listening server - receive alerts from analytics provider 16 | */ 17 | const options1 = { 18 | cert: "-----BEGIN CERTIFICATE-----\nMIIFWjCCBEKgAwIBAgITfQAAAB0gY6x6LLG8KwAAAAAAHTANBgkqhkiG9w0BAQUF\nADBOMRMwEQYKCZImiZPyLGQBGRYDY29tMRowGAYKCZImiZPyLGQBGRYKYXNlcnJh\nY29ycDEbMBkGA1UEAxMSYXNlcnJhY29ycC1EQy1DQS0xMB4XDTIwMDIxNTIyMTIw\nMloXDTIyMDIxNDIyMTIwMlowHzEdMBsGA1UEAxMUbXlhcHAuYXNlcnJhY29ycC5j\nb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDTGBNbVYLJiIDfL0FQ\nMi/mFzcLeQhP11c4YdxjMBPJKSBWKnXuxywcOJHJ6A2rpGKhAApSsVc6j6sXfCty\nbszbNgvx7KdV8c2S02ILNqlJwTOXRkQhN0srlATYdF/i6T1Y1fpkjBiZMC7A8fRf\nQNwT8DgberiuN4YvfsNGbQej+a1dOVQAGaBn15xwXe8Xfw9iLRangb8n4yAz4qS2\nIJig2UYzSc3P6k1ulZ6I1Yo/xOj7zar3R/09DZ6ikGBvy3TrSfYpRX8pXFgFsJOl\nDeYwzAKlKY4MnapgwszIFMmxveK/d3K+l3Kn0791VdBklrrlycV7itGXrqNYdHLg\nC3dPAgMBAAGjggJeMIICWjA7BgkrBgEEAYI3FQcELjAsBiQrBgEEAYI3FQiBg9Bp\ng5vnB4b5lxighjeD8YE0eYL+ujyChWkCAWQCAQMwEwYDVR0lBAwwCgYIKwYBBQUH\nAwEwDgYDVR0PAQH/BAQDAgWgMBsGCSsGAQQBgjcVCgQOMAwwCgYIKwYBBQUHAwEw\nHQYDVR0OBBYEFMXq6/mUs8bg5TUoL3uXPUyyAFyXMB8GA1UdIwQYMBaAFBEzMhC4\nl6myjmO0WBY2s0tLj1fQMIHOBgNVHR8EgcYwgcMwgcCggb2ggbqGgbdsZGFwOi8v\nL0NOPWFzZXJyYWNvcnAtREMtQ0EtMSxDTj1kYyxDTj1DRFAsQ049UHVibGljJTIw\nS2V5JTIwU2VydmljZXMsQ049U2VydmljZXMsQ049Q29uZmlndXJhdGlvbixEQz1h\nc2VycmFjb3JwLERDPWNvbT9jZXJ0aWZpY2F0ZVJldm9jYXRpb25MaXN0P2Jhc2U/\nb2JqZWN0Q2xhc3M9Y1JMRGlzdHJpYnV0aW9uUG9pbnQwgccGCCsGAQUFBwEBBIG6\nMIG3MIG0BggrBgEFBQcwAoaBp2xkYXA6Ly8vQ049YXNlcnJhY29ycC1EQy1DQS0x\nLENOPUFJQSxDTj1QdWJsaWMlMjBLZXklMjBTZXJ2aWNlcyxDTj1TZXJ2aWNlcyxD\nTj1Db25maWd1cmF0aW9uLERDPWFzZXJyYWNvcnAsREM9Y29tP2NBQ2VydGlmaWNh\ndGU/YmFzZT9vYmplY3RDbGFzcz1jZXJ0aWZpY2F0aW9uQXV0aG9yaXR5MA0GCSqG\nSIb3DQEBBQUAA4IBAQC8xoaBDhn0BGqQ73ROjlvI+5yJs3UUws2D7KCtpzNt2Ksm\ngm52umZoIzofPRXg/RVkt+Ig7Y9+ixaEyAxMFtpDyap1bTNjWsw99LoUZvMo7B9q\nrgOS55h5OeLwc1PM3n43I9H2C3uQ1hYflD3ovzvzrywejCHlHlReovZkYCcrDCa+\nytw7Hob0P1vkXsPtpmk61A7PCLw6DghhczT1f4nAK147FuRl55jz38JFOtqKVlfU\nNH4EaSxciHO2evWDHUddzeAwxHLg77UKPH+MSPXd7jGZx3xqQEtpjMqq5WM09YsL\n1mwOJpk1Xarkb0WB0J10YXqKs6tSxyrfX/FL5MZA\n-----END CERTIFICATE-----", 19 | key: "-----BEGIN PRIVATE KEY-----\nMIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDTGBNbVYLJiIDf\nL0FQMi/mFzcLeQhP11c4YdxjMBPJKSBWKnXuxywcOJHJ6A2rpGKhAApSsVc6j6sX\nfCtybszbNgvx7KdV8c2S02ILNqlJwTOXRkQhN0srlATYdF/i6T1Y1fpkjBiZMC7A\n8fRfQNwT8DgberiuN4YvfsNGbQej+a1dOVQAGaBn15xwXe8Xfw9iLRangb8n4yAz\n4qS2IJig2UYzSc3P6k1ulZ6I1Yo/xOj7zar3R/09DZ6ikGBvy3TrSfYpRX8pXFgF\nsJOlDeYwzAKlKY4MnapgwszIFMmxveK/d3K+l3Kn0791VdBklrrlycV7itGXrqNY\ndHLgC3dPAgMBAAECggEADudNPo6L/FSR3LAGaXYRoaoT7dBuwMxQUR+/opUbYIFx\n0gOPbftL5vYrfNjQVkT13a4WDH6OzQilqLPQkXS3K+bl8v+lVNEMlYgtDOOgEh/8\n13pThxDTUtFRgkK9HlUfSq1Yz06A0hfvxRmQCkWXBCVaoL4KWep7o9DMUqWR+4ad\nXlvzvG2W3fvNE3+ewwf0tR/OYTQOZvkRfm0Ws1s0W85wr6Ec87psbLPPO2yecFcq\n3fJjcZmbaWWG5Thh9479W3rhC3I6rJN+YLgyXoumml5wmmjf8CxocUL3uPt+32u5\nE4OZTLdAIF0+KxH3hYbw3D6DB/LnAZVB+jxmOC4j2QKBgQDm5JVzld5KYUlIt566\nsyQ95JMyw0Oqp1U7WMwI8+RMYnO4NPo6Dzej9LMsVAQFmg5DncElSf3PLC9PfsVe\nCK6FiXPScy/9cAqchJVI3f7CgJiYvrFwoiieVJFYSgh52aWxL/KHnEe4UWk50qmS\n/hCyPdSCHJVw1oh5dIO/QGG+YwKBgQDqDFi8mNrUI/QD+m/2HNT+6PaXUWEfyY94\n/swvn9O+qQYWbU8MCxeucthTJ5p5lYY5FdUeKPGZn0jahgoEV63XnuemNe43tOJA\nDpo1UyWmQoodAOOm9QiEEjOAxx+hEcSfJrEUgGSYVR+GHbap+xuB0BrtCN9qWsdb\nU2d25b4xJQKBgQCV4SAardhkVA6sQ3WoIA2Ql8Xtl89fAcxT/+pCjX9PDkGr+8fK\n1IH7ziZYyhjqQfysa8UrHYLCbx4u7k3UIrKXQIiMvfUTAR4CSBZX/LMZMzzbOj4Y\nrUrMrHzE4Rnrbxsdj9BRs2LjBQXXYSZuornX2kcORtvDKZ/hp362MWbBnQKBgQCo\nSZZojXQTQ4LKdYGZsmOIotPkO9SdOZ3a/0KsH7zuA7Tn3VMQMs2lVtiq+ff94oCv\nfT5PQFtv/XMyBV0ggDb0qkKgZXjTP1HLg3RoUU/p+0A52JDYVKn55Oh5eTQJ6a+6\nS+TZ+/PZAKP5GFZmZLMDpTInK9ERNRLRXOgxOsKFrQKBgQDH6PfQTuvubwL+CYbb\nCI1AtWOGEGcuLIbtlbh5e4/1FxtdG2pgV2wBJxIwNhn8U7yMHj9B/MB39OAt6Vlc\nZU0Dah41RMi4dPGAi/iuTQklfRLjROSVmhb/lS9xDRxzHcm0u0YBuU0Q+MC3aw7O\njXWs11QDs5AR93mLB0AZdRjGLA==\n-----END PRIVATE KEY-----" 20 | 21 | }; 22 | 23 | https.createServer(options1, function (request, response) { 24 | if (request.method == 'POST') { 25 | 26 | const { headers, method, url } = request; 27 | let body = []; 28 | request.on('error', (err) => { 29 | console.error(err); 30 | 31 | }).on('data', (chunk) => { 32 | body.push(chunk); 33 | }).on('end', () => { 34 | body = Buffer.concat(body).toString(); 35 | bodyJson = JSON.parse(body); 36 | source = bodyJson.source; 37 | scaleAction = bodyJson.scaleAction; 38 | console.log(bodyJson); 39 | 40 | if (scaleAction == null){ 41 | console.log("error with scaleaction"); 42 | response.end(); 43 | }; 44 | 45 | if (source == "azurelaw"){ 46 | vals = bodyJson.SearchResults.tables[0].rows[0].toString(); 47 | var hostIndex = vals.search("bigip.azure") 48 | var hostLength = 20 49 | 50 | if ( hostIndex == -1) { 51 | hostIndex = vals.search("bigip.aws") 52 | hostLength = 18 53 | } 54 | hostName = vals.substring(hostIndex, hostIndex + hostLength) 55 | 56 | } else if (source == 'elk' || source == 'splunk' || source == 'default') { 57 | message = bodyJson.message 58 | var hostIndex = message.search("bigip.azure") 59 | var hostLength = 20 60 | 61 | if ( hostIndex == -1) { 62 | hostIndex = message.search("bigip.aws") 63 | hostlength = 18 64 | } 65 | hostName = message.substring(hostIndex, hostIndex + hostLength) 66 | 67 | } else { 68 | console.log("Invalid nalytics source specified") 69 | response.end(); 70 | } 71 | 72 | //Convert hostName to arrays and derive identifiers 73 | var n = hostName.split("."); 74 | cloud = n[1]; 75 | app_id = n[2]; 76 | 77 | //Create scaling eventtype 78 | var app_name = "app1"; 79 | switch (scaleAction) { 80 | case "scaleOutBigip": 81 | what2Scale = 'bigip'; 82 | scaling_direction = 'up' 83 | app_name = app_name 84 | break; 85 | case "scaleInBigip": 86 | what2Scale = 'bigip'; 87 | scaling_direction = 'down' 88 | app_name = app_name 89 | break; 90 | case "scaleOutWorkload": 91 | what2Scale = 'app'; 92 | scaling_direction = 'up' 93 | app_name = app_name 94 | break; 95 | case "scaleInWorkload": 96 | what2Scale = 'app'; 97 | scaling_direction = 'down' 98 | app_name = app_name 99 | break; 100 | } 101 | 102 | console.log("The application ID is " + app_id + ". Webhook request to scale the " + what2Scale + " " + scaling_direction + ". If relevant, the app name is '" + app_name + "'.") 103 | 104 | //Construct Github Action webhook payload 105 | const data2 = JSON.stringify({ 106 | event_type: "scale-" + cloud, 107 | client_payload: { 108 | scaling_type: what2Scale, 109 | app_name: app_name, 110 | scaling_direction: scaling_direction, 111 | webhook_source: source, 112 | app_id: app_id 113 | } 114 | }) 115 | 116 | const options = { 117 | hostname: 'api.github.com', 118 | port: 443, 119 | path: repoPath, 120 | method: 'POST', 121 | headers: { 122 | 'Content-Type': 'application/json', 123 | 'Content-Length': data2.length, 124 | 'Authorization': 'token ' + token, 125 | 'user-agent': 'node.js' 126 | } 127 | } 128 | 129 | /* 130 | Create https POST to github 131 | */ 132 | const req2 = https.request(options, res2 => { 133 | console.log("Processing operation complete.\n") 134 | 135 | res2.on('data', d => { 136 | process.stdout.write(d) 137 | }) 138 | }) 139 | 140 | req2.on('error', error => { 141 | console.error(error) 142 | }) 143 | 144 | // submit payload via webhook to Github Action 145 | req2.write(data2) 146 | req2.end() 147 | 148 | response.on('error', (err) => { 149 | console.error(err); 150 | }); 151 | 152 | response.writeHead(200, {'Content-Type': 'application/json'}) 153 | const responseBody = { headers, method, url, body }; 154 | response.write(JSON.stringify(responseBody)); 155 | 156 | response.end(); 157 | }); 158 | } 159 | else { 160 | console.log("Invalid HTTP method"); 161 | response.end(); 162 | } 163 | 164 | // Start listener 165 | console.log("Starting alert processor...\n"); 166 | }).listen(8000); 167 | EOF 168 | cd /home/ubuntu && npm install request && npm install express && npm install body-parser && npm install http && npm install fs && npm install https && sudo chmod +x /home/ubuntu/alertforwarder.js 169 | 170 | sudo cat << EOF > /etc/systemd/system/alertforwarder.service 171 | [Unit] 172 | Description=alertforwarder 173 | [Service] 174 | ExecStart=/usr/bin/node /home/ubuntu/alertforwarder.js 175 | Restart=always 176 | User=nobody 177 | # Note Debian/Ubuntu uses 'nogroup', RHEL/Fedora uses 'nobody' 178 | Group=nogroup 179 | Environment=PATH=/usr/bin:/usr/local/bin:/home/ubuntu 180 | Environment=NODE_ENV=production 181 | WorkingDirectory=/home/ubuntu 182 | 183 | [Install] 184 | WantedBy=multi-user.target 185 | EOF 186 | 187 | sudo systemctl start alertforwarder.service && sudo systemctl stop alertforwarder.service && sudo systemctl restart alertforwarder.service 188 | 189 | 190 | 191 | -------------------------------------------------------------------------------- /templates/as3.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://raw.githubusercontent.com/F5Networks/f5-appsvcs-extension/master/schema/latest/as3-schema.json", 3 | "class": "AS3", 4 | "action": "deploy", 5 | "persist": true, 6 | "declaration": { 7 | "class": "ADC", 8 | "schemaVersion": "3.13.0", 9 | "id": "123abc", 10 | "label": "Sample 1", 11 | "remark": "An HTTPS sample application", 12 | "controls": { 13 | "trace": true 14 | }, 15 | "DemoTenant": { 16 | "class": "Tenant", 17 | "Shared": { 18 | "class": "Application", 19 | "template": "shared" 20 | }, 21 | "${app_name}": { 22 | "class": "Application", 23 | "template": "https", 24 | "serviceMain": { 25 | "class": "Service_HTTPS", 26 | "virtualPort": 443, 27 | "virtualAddresses": [ 28 | "0.0.0.0" 29 | ], 30 | "pool": "${web_pool}", 31 | "policyWAF": { 32 | "use": "My_ASM_Policy" 33 | }, 34 | "serverTLS": "webtls", 35 | "profileTrafficLog": { 36 | "use": "telemetry_traffic_log_profile" 37 | }, 38 | "profileAnalyticsTcp": { 39 | "use": "telemetry_tcp_analytics_profile" 40 | }, 41 | "securityLogProfiles": [ 42 | { 43 | "use": "telemetry_asm_security_log_profile" 44 | } 45 | ], 46 | "profileBotDefense": { 47 | "bigip": "/Common/bot-defense" 48 | }, 49 | "profileDOS": { 50 | "bigip": "/Common/dos" 51 | } 52 | }, 53 | "My_ASM_Policy": { 54 | "class": "WAF_Policy", 55 | "url": "https://raw.githubusercontent.com/f5devcentral/adc-telemetry-based-autoscaling/main/configs/app_sec_policy.xml", 56 | "ignoreChanges": true 57 | }, 58 | "${web_pool}": { 59 | "class": "Pool", 60 | "monitors": [ 61 | "tcp" 62 | ], 63 | "members": [{ 64 | "servicePort": 80, 65 | "addressDiscovery": "consul", 66 | "updateInterval": 10, 67 | "uri": "http://${consul_ip}:8500/v1/catalog/service/nginx" 68 | }] 69 | }, 70 | "telemetry_local_rule": { 71 | "remark": "Only required when TS is a local listener", 72 | "class": "iRule", 73 | "iRule": "when CLIENT_ACCEPTED {\n node 127.0.0.1 6514\n}" 74 | }, 75 | "telemetry_local": { 76 | "remark": "Only required when TS is a local listener", 77 | "class": "Service_TCP", 78 | "virtualAddresses": [ 79 | "255.255.255.254" 80 | ], 81 | "virtualPort": 6514, 82 | "iRules": [ 83 | "telemetry_local_rule" 84 | ] 85 | }, 86 | "telemetry": { 87 | "class": "Pool", 88 | "members": [{ 89 | "enable": true, 90 | "serverAddresses": [ 91 | "255.255.255.254" 92 | ], 93 | "servicePort": 6514 94 | }], 95 | "monitors": [{ 96 | "bigip": "/Common/tcp" 97 | }] 98 | }, 99 | "telemetry_hsl": { 100 | "class": "Log_Destination", 101 | "type": "remote-high-speed-log", 102 | "protocol": "tcp", 103 | "pool": { 104 | "use": "telemetry" 105 | } 106 | }, 107 | "telemetry_formatted": { 108 | "class": "Log_Destination", 109 | "type": "splunk", 110 | "forwardTo": { 111 | "use": "telemetry_hsl" 112 | } 113 | }, 114 | "telemetry_publisher": { 115 | "class": "Log_Publisher", 116 | "destinations": [{ 117 | "use": "telemetry_formatted" 118 | }] 119 | }, 120 | "telemetry_asm_security_log_profile": { 121 | "class": "Security_Log_Profile", 122 | "application": { 123 | "localStorage": false, 124 | "remoteStorage": "splunk", 125 | "servers": [{ 126 | "address": "255.255.255.254", 127 | "port": "6514" 128 | }], 129 | "storageFilter": { 130 | "requestType": "all" 131 | } 132 | } 133 | }, 134 | "telemetry_traffic_log_profile": { 135 | "class": "Traffic_Log_Profile", 136 | "requestSettings": { 137 | "requestEnabled": true, 138 | "requestProtocol": "mds-tcp", 139 | "requestPool": { 140 | "use": "telemetry" 141 | }, 142 | "requestTemplate": "event_source=\"request_logging\",hostname=\"$BIGIP_HOSTNAME\",client_ip=\"$CLIENT_IP\",server_ip=\"$SERVER_IP\",http_method=\"$HTTP_METHOD\",http_uri=\"$HTTP_URI\",virtual_name=\"$VIRTUAL_NAME\",event_timestamp=\"$DATE_HTTP\"" 143 | } 144 | }, 145 | "telemetry_http_analytics_profile": { 146 | "class": "Analytics_Profile", 147 | "collectGeo": true, 148 | "collectMaxTpsAndThroughput": true, 149 | "collectOsAndBrowser": true, 150 | "collectIp": true, 151 | "collectMethod": true, 152 | "collectPageLoadTime": true, 153 | "collectResponseCode": true, 154 | "collectSubnet" : true, 155 | "collectUrl": true, 156 | "collectUserAgent": true, 157 | "collectUserSession": true, 158 | "publishIruleStatistics": true 159 | }, 160 | "telemetry_tcp_analytics_profile": { 161 | "class": "Analytics_TCP_Profile", 162 | "collectCity": true, 163 | "collectContinent": true, 164 | "collectCountry": true, 165 | "collectNexthop": true, 166 | "collectPostCode": true, 167 | "collectRegion": true, 168 | "collectRemoteHostIp": true, 169 | "collectRemoteHostSubnet" : true, 170 | "collectedByServerSide": true 171 | }, 172 | "webtls": { 173 | "class": "TLS_Server", 174 | "certificates": [{ 175 | "certificate": "webcert" 176 | }] 177 | }, 178 | "webcert": { 179 | "class": "Certificate", 180 | "remark": "in practice we recommend using a passphrase", 181 | "certificate": "-----BEGIN CERTIFICATE-----\nMIIFWjCCBEKgAwIBAgITfQAAAB0gY6x6LLG8KwAAAAAAHTANBgkqhkiG9w0BAQUF\nADBOMRMwEQYKCZImiZPyLGQBGRYDY29tMRowGAYKCZImiZPyLGQBGRYKYXNlcnJh\nY29ycDEbMBkGA1UEAxMSYXNlcnJhY29ycC1EQy1DQS0xMB4XDTIwMDIxNTIyMTIw\nMloXDTIyMDIxNDIyMTIwMlowHzEdMBsGA1UEAxMUbXlhcHAuYXNlcnJhY29ycC5j\nb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDTGBNbVYLJiIDfL0FQ\nMi/mFzcLeQhP11c4YdxjMBPJKSBWKnXuxywcOJHJ6A2rpGKhAApSsVc6j6sXfCty\nbszbNgvx7KdV8c2S02ILNqlJwTOXRkQhN0srlATYdF/i6T1Y1fpkjBiZMC7A8fRf\nQNwT8DgberiuN4YvfsNGbQej+a1dOVQAGaBn15xwXe8Xfw9iLRangb8n4yAz4qS2\nIJig2UYzSc3P6k1ulZ6I1Yo/xOj7zar3R/09DZ6ikGBvy3TrSfYpRX8pXFgFsJOl\nDeYwzAKlKY4MnapgwszIFMmxveK/d3K+l3Kn0791VdBklrrlycV7itGXrqNYdHLg\nC3dPAgMBAAGjggJeMIICWjA7BgkrBgEEAYI3FQcELjAsBiQrBgEEAYI3FQiBg9Bp\ng5vnB4b5lxighjeD8YE0eYL+ujyChWkCAWQCAQMwEwYDVR0lBAwwCgYIKwYBBQUH\nAwEwDgYDVR0PAQH/BAQDAgWgMBsGCSsGAQQBgjcVCgQOMAwwCgYIKwYBBQUHAwEw\nHQYDVR0OBBYEFMXq6/mUs8bg5TUoL3uXPUyyAFyXMB8GA1UdIwQYMBaAFBEzMhC4\nl6myjmO0WBY2s0tLj1fQMIHOBgNVHR8EgcYwgcMwgcCggb2ggbqGgbdsZGFwOi8v\nL0NOPWFzZXJyYWNvcnAtREMtQ0EtMSxDTj1kYyxDTj1DRFAsQ049UHVibGljJTIw\nS2V5JTIwU2VydmljZXMsQ049U2VydmljZXMsQ049Q29uZmlndXJhdGlvbixEQz1h\nc2VycmFjb3JwLERDPWNvbT9jZXJ0aWZpY2F0ZVJldm9jYXRpb25MaXN0P2Jhc2U/\nb2JqZWN0Q2xhc3M9Y1JMRGlzdHJpYnV0aW9uUG9pbnQwgccGCCsGAQUFBwEBBIG6\nMIG3MIG0BggrBgEFBQcwAoaBp2xkYXA6Ly8vQ049YXNlcnJhY29ycC1EQy1DQS0x\nLENOPUFJQSxDTj1QdWJsaWMlMjBLZXklMjBTZXJ2aWNlcyxDTj1TZXJ2aWNlcyxD\nTj1Db25maWd1cmF0aW9uLERDPWFzZXJyYWNvcnAsREM9Y29tP2NBQ2VydGlmaWNh\ndGU/YmFzZT9vYmplY3RDbGFzcz1jZXJ0aWZpY2F0aW9uQXV0aG9yaXR5MA0GCSqG\nSIb3DQEBBQUAA4IBAQC8xoaBDhn0BGqQ73ROjlvI+5yJs3UUws2D7KCtpzNt2Ksm\ngm52umZoIzofPRXg/RVkt+Ig7Y9+ixaEyAxMFtpDyap1bTNjWsw99LoUZvMo7B9q\nrgOS55h5OeLwc1PM3n43I9H2C3uQ1hYflD3ovzvzrywejCHlHlReovZkYCcrDCa+\nytw7Hob0P1vkXsPtpmk61A7PCLw6DghhczT1f4nAK147FuRl55jz38JFOtqKVlfU\nNH4EaSxciHO2evWDHUddzeAwxHLg77UKPH+MSPXd7jGZx3xqQEtpjMqq5WM09YsL\n1mwOJpk1Xarkb0WB0J10YXqKs6tSxyrfX/FL5MZA\n-----END CERTIFICATE-----", 182 | "privateKey": "-----BEGIN PRIVATE KEY-----\nMIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDTGBNbVYLJiIDf\nL0FQMi/mFzcLeQhP11c4YdxjMBPJKSBWKnXuxywcOJHJ6A2rpGKhAApSsVc6j6sX\nfCtybszbNgvx7KdV8c2S02ILNqlJwTOXRkQhN0srlATYdF/i6T1Y1fpkjBiZMC7A\n8fRfQNwT8DgberiuN4YvfsNGbQej+a1dOVQAGaBn15xwXe8Xfw9iLRangb8n4yAz\n4qS2IJig2UYzSc3P6k1ulZ6I1Yo/xOj7zar3R/09DZ6ikGBvy3TrSfYpRX8pXFgF\nsJOlDeYwzAKlKY4MnapgwszIFMmxveK/d3K+l3Kn0791VdBklrrlycV7itGXrqNY\ndHLgC3dPAgMBAAECggEADudNPo6L/FSR3LAGaXYRoaoT7dBuwMxQUR+/opUbYIFx\n0gOPbftL5vYrfNjQVkT13a4WDH6OzQilqLPQkXS3K+bl8v+lVNEMlYgtDOOgEh/8\n13pThxDTUtFRgkK9HlUfSq1Yz06A0hfvxRmQCkWXBCVaoL4KWep7o9DMUqWR+4ad\nXlvzvG2W3fvNE3+ewwf0tR/OYTQOZvkRfm0Ws1s0W85wr6Ec87psbLPPO2yecFcq\n3fJjcZmbaWWG5Thh9479W3rhC3I6rJN+YLgyXoumml5wmmjf8CxocUL3uPt+32u5\nE4OZTLdAIF0+KxH3hYbw3D6DB/LnAZVB+jxmOC4j2QKBgQDm5JVzld5KYUlIt566\nsyQ95JMyw0Oqp1U7WMwI8+RMYnO4NPo6Dzej9LMsVAQFmg5DncElSf3PLC9PfsVe\nCK6FiXPScy/9cAqchJVI3f7CgJiYvrFwoiieVJFYSgh52aWxL/KHnEe4UWk50qmS\n/hCyPdSCHJVw1oh5dIO/QGG+YwKBgQDqDFi8mNrUI/QD+m/2HNT+6PaXUWEfyY94\n/swvn9O+qQYWbU8MCxeucthTJ5p5lYY5FdUeKPGZn0jahgoEV63XnuemNe43tOJA\nDpo1UyWmQoodAOOm9QiEEjOAxx+hEcSfJrEUgGSYVR+GHbap+xuB0BrtCN9qWsdb\nU2d25b4xJQKBgQCV4SAardhkVA6sQ3WoIA2Ql8Xtl89fAcxT/+pCjX9PDkGr+8fK\n1IH7ziZYyhjqQfysa8UrHYLCbx4u7k3UIrKXQIiMvfUTAR4CSBZX/LMZMzzbOj4Y\nrUrMrHzE4Rnrbxsdj9BRs2LjBQXXYSZuornX2kcORtvDKZ/hp362MWbBnQKBgQCo\nSZZojXQTQ4LKdYGZsmOIotPkO9SdOZ3a/0KsH7zuA7Tn3VMQMs2lVtiq+ff94oCv\nfT5PQFtv/XMyBV0ggDb0qkKgZXjTP1HLg3RoUU/p+0A52JDYVKn55Oh5eTQJ6a+6\nS+TZ+/PZAKP5GFZmZLMDpTInK9ERNRLRXOgxOsKFrQKBgQDH6PfQTuvubwL+CYbb\nCI1AtWOGEGcuLIbtlbh5e4/1FxtdG2pgV2wBJxIwNhn8U7yMHj9B/MB39OAt6Vlc\nZU0Dah41RMi4dPGAi/iuTQklfRLjROSVmhb/lS9xDRxzHcm0u0YBuU0Q+MC3aw7O\njXWs11QDs5AR93mLB0AZdRjGLA==\n-----END PRIVATE KEY-----" 183 | } 184 | } 185 | } 186 | } 187 | } 188 | -------------------------------------------------------------------------------- /templates/backend.tpl: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "consul" { 3 | address = "${consul_ip}" 4 | scheme = "http" 5 | path = "adpm/applications/${app_id}/terraform/tfstate" 6 | gzip = true 7 | } 8 | } 9 | 10 | data "terraform_remote_state" "state" { 11 | backend = "consul" 12 | config = { 13 | address = "${consul_ip}" 14 | path = "adpm/applications/${app_id}/terraform/tfstate" 15 | } 16 | } 17 | 18 | locals{ 19 | app_id = "${app_id}" 20 | bigip_count = ${bigip_count} 21 | workload_count = ${workload_count} 22 | } 23 | -------------------------------------------------------------------------------- /templates/backendapp.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #Utils 4 | sudo apt update 5 | sudo apt-get install -y unzip jq 6 | 7 | #Get IP 8 | local_ipv4=`echo $(curl -s -f -H Metadata:true "http://169.254.169.254/metadata/instance/network/interface?api-version=2019-06-01" | jq -r '.[1].ipv4[]' | grep private | awk '{print $2}' | awk -F \" '{print $2}') | awk '{print $1}'` 9 | 10 | #Download Consul 11 | CONSUL_VERSION="1.9.0" 12 | curl --silent --remote-name https://releases.hashicorp.com/consul/1.9.0/consul_1.9.0_linux_amd64.zip 13 | 14 | #Install Consul 15 | unzip consul_1.9.0_linux_amd64.zip 16 | sudo chown root:root consul 17 | sudo mv consul /usr/local/bin/ 18 | consul -autocomplete-install 19 | complete -C /usr/local/bin/consul consul 20 | 21 | #Create Consul User 22 | sudo useradd --system --home /etc/consul.d --shell /bin/false consul 23 | sudo mkdir --parents /opt/consul 24 | sudo chown --recursive consul:consul /opt/consul 25 | 26 | #Create Systemd Config 27 | sudo cat << EOF > /etc/systemd/system/consul.service 28 | [Unit] 29 | Description="HashiCorp Consul - A service mesh solution" 30 | Documentation=https://www.consul.io/ 31 | Requires=network-online.target 32 | After=network-online.target 33 | ConditionFileNotEmpty=/etc/consul.d/consul.hcl 34 | [Service] 35 | User=consul 36 | Group=consul 37 | ExecStart=/usr/local/bin/consul agent -config-dir=/etc/consul.d/ 38 | ExecReload=/usr/local/bin/consul reload 39 | KillMode=process 40 | Restart=always 41 | LimitNOFILE=65536 42 | [Install] 43 | WantedBy=multi-user.target 44 | EOF 45 | 46 | #Create config dir 47 | sudo mkdir --parents /etc/consul.d 48 | sudo touch /etc/consul.d/consul.hcl 49 | sudo chown --recursive consul:consul /etc/consul.d 50 | sudo chmod 640 /etc/consul.d/consul.hcl 51 | 52 | cat << EOF > /etc/consul.d/consul.hcl 53 | data_dir = "/opt/consul" 54 | ui = true 55 | EOF 56 | 57 | cat << EOF > /etc/consul.d/client.hcl 58 | advertise_addr = "$local_ipv4" 59 | retry_join = ["${consul_ip}"] 60 | EOF 61 | 62 | cat << EOF > /etc/consul.d/nginx.json 63 | { 64 | "service": { 65 | "name": "nginx", 66 | "port": 80, 67 | "checks": [ 68 | { 69 | "id": "nginx", 70 | "name": "nginx TCP Check", 71 | "tcp": "localhost:80", 72 | "interval": "10s", 73 | "timeout": "1s" 74 | } 75 | ] 76 | } 77 | } 78 | EOF 79 | 80 | #Enable the service 81 | sudo systemctl enable consul 82 | sudo service consul start 83 | sudo service consul status 84 | 85 | #Install Dockers 86 | #sudo snap install docker 87 | #sudo curl -L "https://github.com/docker/compose/releases/download/1.24.1/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose 88 | #sudo chmod +x /usr/local/bin/docker-compose 89 | 90 | #Run nginx 91 | #sleep 10 92 | #cat << EOF > docker-compose.yml 93 | #version: "3.7" 94 | #services: 95 | # web: 96 | # image: nginxdemos/hello 97 | # ports: 98 | # - "80:80" 99 | # restart: always 100 | # command: [nginx-debug, '-g', 'daemon off;'] 101 | # network_mode: "host" 102 | #EOF 103 | #sudo docker-compose up -d 104 | 105 | sudo apt update && sudo apt install -y nginx && sudo apt install -y php-fpm && sudo apt install -y php-curl && sudo /etc/init.d/php7.2-fpm restart && sudo rm /etc/nginx/sites-available/default 106 | cat << EOF > /etc/nginx/sites-available/default 107 | server { 108 | listen 80 default_server; 109 | listen [::]:80 default_server; 110 | 111 | root /var/www/html; 112 | 113 | index index.php index.html index.htm index.nginx-debian.html; 114 | 115 | server_name _; 116 | 117 | location / { 118 | try_files \$uri \$uri/ =404; 119 | } 120 | 121 | location ~ \.php$ { 122 | fastcgi_pass unix:/run/php/php7.2-fpm.sock; 123 | include snippets/fastcgi-php.conf; 124 | } 125 | } 126 | EOF 127 | 128 | cat << EOF > /var/www/html/index.php 129 | "; 134 | echo "

Application ${app_id} Scaling Operations Status

"; 135 | echo "
"; 136 | echo(date("m-d-Y H:i:s",\$t)); 137 | echo "

"; 138 | \$url1 = array("Scaling operation active? " => "http://${consul_ip}:8500/v1/kv/adpm/applications/${app_id}/scaling/is_running?raw"); 139 | \$url2 = array("Current backend workload count: " => "http://${consul_ip}:8500/v1/kv/adpm/applications/${app_id}/scaling/workload/current_count?raw"); 140 | \$url3 = array("Last workload scaling event timestamp: " => "http://${consul_ip}:8500/v1/kv/adpm/applications/${app_id}/scaling/workload/last_modified_timestamp?raw"); 141 | \$url4 = array("Current BIG-IP cluster count: " => "http://${consul_ip}:8500/v1/kv/adpm/applications/${app_id}/scaling/bigip/current_count?raw"); 142 | \$url5 = array("Last BIG-IP scaling event timestamp" => "http://${consul_ip}:8500/v1/kv/adpm/applications/${app_id}/scaling/bigip/last_modified_timestamp?raw"); 143 | 144 | \$urls = array_merge(\$url1, \$url2, \$url3, \$url4, \$url5); 145 | \$array_length = count(\$urls); 146 | \$ch = curl_init(); 147 | echo "
"; 148 | foreach (\$urls as \$x => \$x_value) 149 | { 150 | echo "
" . \$x; 151 | \$headers = []; 152 | \$headers[] = 'X-Consul-Token: 6ae6afa6-a8f3-06ba-b960-515c7963d23a'; 153 | curl_setopt(\$ch, CURLOPT_HTTPHEADER, \$headers); 154 | curl_setopt(\$ch, CURLOPT_URL, \$x_value); 155 | curl_setopt(\$ch, CURLOPT_HEADER, false); 156 | curl_setopt(\$ch, CURLOPT_RETURNTRANSFER, false); 157 | curl_setopt(\$ch, CURLOPT_VERBOSE, true); 158 | 159 | echo " "; 160 | 161 | curl_exec(\$ch); 162 | } 163 | echo "
"; 164 | curl_close(\$ch); 165 | ?> 166 | 167 | 168 | 169 | 170 | 171 | 173 | 174 | EOF 175 | sudo /etc/init.d/php7.2-fpm restart && sudo systemctl restart nginx 176 | 177 | 178 | 179 | -------------------------------------------------------------------------------- /templates/backendapp_aws.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #Utils 4 | sudo apt update 5 | sudo apt-get install -y unzip jq 6 | 7 | #Get IP 8 | local_ipv4="$(curl http://169.254.169.254/latest/meta-data/local-ipv4 2>/dev/null)" 9 | 10 | #Download Consul 11 | CONSUL_VERSION="1.9.0" 12 | curl --silent --remote-name https://releases.hashicorp.com/consul/1.9.0/consul_1.9.0_linux_amd64.zip 13 | 14 | #Install Consul 15 | unzip consul_1.9.0_linux_amd64.zip 16 | sudo chown root:root consul 17 | sudo mv consul /usr/local/bin/ 18 | consul -autocomplete-install 19 | complete -C /usr/local/bin/consul consul 20 | 21 | #Create Consul User 22 | sudo useradd --system --home /etc/consul.d --shell /bin/false consul 23 | sudo mkdir --parents /opt/consul 24 | sudo chown --recursive consul:consul /opt/consul 25 | 26 | #Create Systemd Config 27 | sudo cat << EOF > /etc/systemd/system/consul.service 28 | [Unit] 29 | Description="HashiCorp Consul - A service mesh solution" 30 | Documentation=https://www.consul.io/ 31 | Requires=network-online.target 32 | After=network-online.target 33 | ConditionFileNotEmpty=/etc/consul.d/consul.hcl 34 | [Service] 35 | User=consul 36 | Group=consul 37 | ExecStart=/usr/local/bin/consul agent -config-dir=/etc/consul.d/ 38 | ExecReload=/usr/local/bin/consul reload 39 | KillMode=process 40 | Restart=always 41 | LimitNOFILE=65536 42 | [Install] 43 | WantedBy=multi-user.target 44 | EOF 45 | 46 | #Create config dir 47 | sudo mkdir --parents /etc/consul.d 48 | sudo touch /etc/consul.d/consul.hcl 49 | sudo chown --recursive consul:consul /etc/consul.d 50 | sudo chmod 640 /etc/consul.d/consul.hcl 51 | 52 | cat << EOF > /etc/consul.d/consul.hcl 53 | data_dir = "/opt/consul" 54 | ui = true 55 | EOF 56 | 57 | cat << EOF > /etc/consul.d/client.hcl 58 | advertise_addr = "$local_ipv4" 59 | retry_join = ["${consul_ip}"] 60 | EOF 61 | 62 | cat << EOF > /etc/consul.d/nginx.json 63 | { 64 | "service": { 65 | "name": "nginx", 66 | "port": 80, 67 | "checks": [ 68 | { 69 | "id": "nginx", 70 | "name": "nginx TCP Check", 71 | "tcp": "localhost:80", 72 | "interval": "10s", 73 | "timeout": "1s" 74 | } 75 | ] 76 | } 77 | } 78 | EOF 79 | 80 | #Enable the service 81 | sudo systemctl enable consul 82 | sudo service consul start 83 | sudo service consul status 84 | 85 | #Install Dockers 86 | #sudo snap install docker 87 | #sudo curl -L "https://github.com/docker/compose/releases/download/1.24.1/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose 88 | #sudo chmod +x /usr/local/bin/docker-compose 89 | 90 | #Run nginx 91 | #sleep 10 92 | #cat << EOF > docker-compose.yml 93 | #version: "3.7" 94 | #services: 95 | # web: 96 | # image: nginxdemos/hello 97 | # ports: 98 | # - "80:80" 99 | # restart: always 100 | # command: [nginx-debug, '-g', 'daemon off;'] 101 | # network_mode: "host" 102 | #EOF 103 | #sudo docker-compose up -d 104 | 105 | sudo apt update && sudo apt install -y nginx && sudo apt install -y php-fpm && sudo apt install -y php-curl && sudo /etc/init.d/php7.2-fpm restart && sudo rm /etc/nginx/sites-available/default 106 | cat << EOF > /etc/nginx/sites-available/default 107 | server { 108 | listen 80 default_server; 109 | listen [::]:80 default_server; 110 | 111 | root /var/www/html; 112 | 113 | index index.php index.html index.htm index.nginx-debian.html; 114 | 115 | server_name _; 116 | 117 | location / { 118 | try_files \$uri \$uri/ =404; 119 | } 120 | 121 | location ~ \.php$ { 122 | fastcgi_pass unix:/run/php/php7.2-fpm.sock; 123 | include snippets/fastcgi-php.conf; 124 | } 125 | } 126 | EOF 127 | 128 | cat << EOF > /var/www/html/index.php 129 | "; 134 | echo "

Application ${app_id} Scaling Operations Status

"; 135 | echo "
"; 136 | echo(date("m-d-Y H:i:s",\$t)); 137 | echo "

"; 138 | \$url1 = array("Scaling operation active? " => "http://${consul_ip}:8500/v1/kv/adpm/applications/${app_id}/scaling/is_running?raw"); 139 | \$url2 = array("Current backend workload count: " => "http://${consul_ip}:8500/v1/kv/adpm/applications/${app_id}/scaling/workload/current_count?raw"); 140 | \$url3 = array("Last workload scaling event timestamp: " => "http://${consul_ip}:8500/v1/kv/adpm/applications/${app_id}/scaling/workload/last_modified_timestamp?raw"); 141 | \$url4 = array("Current BIG-IP cluster count: " => "http://${consul_ip}:8500/v1/kv/adpm/applications/${app_id}/scaling/bigip/current_count?raw"); 142 | \$url5 = array("Last BIG-IP scaling event timestamp" => "http://${consul_ip}:8500/v1/kv/adpm/applications/${app_id}/scaling/bigip/last_modified_timestamp?raw"); 143 | 144 | \$urls = array_merge(\$url1, \$url2, \$url3, \$url4, \$url5); 145 | \$array_length = count(\$urls); 146 | \$ch = curl_init(); 147 | echo "
"; 148 | foreach (\$urls as \$x => \$x_value) 149 | { 150 | echo "
" . \$x; 151 | \$headers = []; 152 | \$headers[] = 'X-Consul-Token: 6ae6afa6-a8f3-06ba-b960-515c7963d23a'; 153 | curl_setopt(\$ch, CURLOPT_HTTPHEADER, \$headers); 154 | curl_setopt(\$ch, CURLOPT_URL, \$x_value); 155 | curl_setopt(\$ch, CURLOPT_HEADER, false); 156 | curl_setopt(\$ch, CURLOPT_RETURNTRANSFER, false); 157 | curl_setopt(\$ch, CURLOPT_VERBOSE, true); 158 | 159 | echo " "; 160 | 161 | curl_exec(\$ch); 162 | } 163 | echo "
"; 164 | curl_close(\$ch); 165 | ?> 166 | 167 | 168 | 169 | 170 | 171 | 173 | 174 | EOF 175 | sudo /etc/init.d/php7.2-fpm restart && sudo systemctl restart nginx 176 | 177 | 178 | 179 | -------------------------------------------------------------------------------- /templates/consul.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #Utils 4 | sudo apt-get update && sudo apt-get install unzip 5 | 6 | #Download Consul 7 | curl --silent --remote-name https://releases.hashicorp.com/consul/${consul_ver}/consul_${consul_ver}_linux_amd64.zip 8 | 9 | #Install Consul 10 | unzip consul_${consul_ver}_linux_amd64.zip 11 | sudo chown root:root consul 12 | sudo mv consul /usr/local/bin/ 13 | consul -autocomplete-install 14 | complete -C /usr/local/bin/consul consul 15 | 16 | #Create Consul User 17 | sudo useradd --system --home /etc/consul.d --shell /bin/false consul 18 | sudo mkdir --parents /opt/consul 19 | sudo chown --recursive consul:consul /opt/consul 20 | 21 | #Create Consul CA and server certificates 22 | consul tls ca create && consul tls cert create -server && sudo mv dc1-server-consul-0.pem dc1-server-consul-0-key.pem consul-agent-ca.pem /home/ubuntu/ 23 | 24 | #Create Systemd Config 25 | sudo cat << EOF > /etc/systemd/system/consul.service 26 | [Unit] 27 | Description="HashiCorp Consul - A service mesh solution" 28 | Documentation=https://www.consul.io/ 29 | Requires=network-online.target 30 | After=network-online.target 31 | ConditionFileNotEmpty=/etc/consul.d/server.json 32 | 33 | [Service] 34 | User=consul 35 | Group=consul 36 | ExecStart=/usr/local/bin/consul agent -config-file=/etc/consul.d/server.json 37 | ExecReload=/usr/local/bin/consul reload 38 | KillMode=process 39 | Restart=always 40 | LimitNOFILE=65536 41 | 42 | [Install] 43 | WantedBy=multi-user.target 44 | EOF 45 | 46 | #Create config dir 47 | sudo mkdir --parents /etc/consul.d 48 | sudo touch /etc/consul.d/server.json 49 | sudo chown --recursive consul:consul /etc/consul.d 50 | sudo chmod 640 /etc/consul.d/server.json 51 | 52 | cat << EOF > /etc/consul.d/server.json 53 | { 54 | "bootstrap": true, 55 | "ui": true, 56 | "enable_script_checks": false, 57 | "disable_remote_exec": true, 58 | "data_dir": "/opt/consul", 59 | "datacenter": "dc1", 60 | "log_level": "DEBUG", 61 | "server": true, 62 | "addresses": { 63 | "http": "0.0.0.0", 64 | "https": "0.0.0.0" 65 | }, 66 | "verify_incoming": false, 67 | "verify_outgoing": true, 68 | "verify_server_hostname": false, 69 | "ca_file": "/home/ubuntu/consul-agent-ca.pem", 70 | "cert_file": "/home/ubuntu/dc1-server-consul-0.pem", 71 | "key_file": "/home/ubuntu/dc1-server-consul-0-key.pem", 72 | "ports": { 73 | "https": 8443 74 | } 75 | } 76 | EOF 77 | 78 | #Enable the service 79 | sudo systemctl enable consul 80 | sudo service consul start 81 | sudo service consul status 82 | 83 | sudo apt update && curl -sL https://deb.nodesource.com/setup_16.x | sudo -E bash - && sudo apt-get -y install nodejs 84 | sudo cat << EOF > /home/ubuntu/alertforwarder.js 85 | 86 | const express = require( 'express' ); 87 | const app = express(); 88 | const fs = require('fs'); 89 | const bodyParser = require('body-parser'); 90 | const https = require('https'); 91 | const http = require('http'); 92 | const token = "${github_token}" //Required to authenticate with Github action repo 93 | const repoPath = '${repo_path}' //Modify to match designated github action repo 94 | 95 | /* 96 | Create Listening server - receive alerts from analytics provider 97 | */ 98 | const options1 = { 99 | cert: "-----BEGIN CERTIFICATE-----\nMIIFWjCCBEKgAwIBAgITfQAAAB0gY6x6LLG8KwAAAAAAHTANBgkqhkiG9w0BAQUF\nADBOMRMwEQYKCZImiZPyLGQBGRYDY29tMRowGAYKCZImiZPyLGQBGRYKYXNlcnJh\nY29ycDEbMBkGA1UEAxMSYXNlcnJhY29ycC1EQy1DQS0xMB4XDTIwMDIxNTIyMTIw\nMloXDTIyMDIxNDIyMTIwMlowHzEdMBsGA1UEAxMUbXlhcHAuYXNlcnJhY29ycC5j\nb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDTGBNbVYLJiIDfL0FQ\nMi/mFzcLeQhP11c4YdxjMBPJKSBWKnXuxywcOJHJ6A2rpGKhAApSsVc6j6sXfCty\nbszbNgvx7KdV8c2S02ILNqlJwTOXRkQhN0srlATYdF/i6T1Y1fpkjBiZMC7A8fRf\nQNwT8DgberiuN4YvfsNGbQej+a1dOVQAGaBn15xwXe8Xfw9iLRangb8n4yAz4qS2\nIJig2UYzSc3P6k1ulZ6I1Yo/xOj7zar3R/09DZ6ikGBvy3TrSfYpRX8pXFgFsJOl\nDeYwzAKlKY4MnapgwszIFMmxveK/d3K+l3Kn0791VdBklrrlycV7itGXrqNYdHLg\nC3dPAgMBAAGjggJeMIICWjA7BgkrBgEEAYI3FQcELjAsBiQrBgEEAYI3FQiBg9Bp\ng5vnB4b5lxighjeD8YE0eYL+ujyChWkCAWQCAQMwEwYDVR0lBAwwCgYIKwYBBQUH\nAwEwDgYDVR0PAQH/BAQDAgWgMBsGCSsGAQQBgjcVCgQOMAwwCgYIKwYBBQUHAwEw\nHQYDVR0OBBYEFMXq6/mUs8bg5TUoL3uXPUyyAFyXMB8GA1UdIwQYMBaAFBEzMhC4\nl6myjmO0WBY2s0tLj1fQMIHOBgNVHR8EgcYwgcMwgcCggb2ggbqGgbdsZGFwOi8v\nL0NOPWFzZXJyYWNvcnAtREMtQ0EtMSxDTj1kYyxDTj1DRFAsQ049UHVibGljJTIw\nS2V5JTIwU2VydmljZXMsQ049U2VydmljZXMsQ049Q29uZmlndXJhdGlvbixEQz1h\nc2VycmFjb3JwLERDPWNvbT9jZXJ0aWZpY2F0ZVJldm9jYXRpb25MaXN0P2Jhc2U/\nb2JqZWN0Q2xhc3M9Y1JMRGlzdHJpYnV0aW9uUG9pbnQwgccGCCsGAQUFBwEBBIG6\nMIG3MIG0BggrBgEFBQcwAoaBp2xkYXA6Ly8vQ049YXNlcnJhY29ycC1EQy1DQS0x\nLENOPUFJQSxDTj1QdWJsaWMlMjBLZXklMjBTZXJ2aWNlcyxDTj1TZXJ2aWNlcyxD\nTj1Db25maWd1cmF0aW9uLERDPWFzZXJyYWNvcnAsREM9Y29tP2NBQ2VydGlmaWNh\ndGU/YmFzZT9vYmplY3RDbGFzcz1jZXJ0aWZpY2F0aW9uQXV0aG9yaXR5MA0GCSqG\nSIb3DQEBBQUAA4IBAQC8xoaBDhn0BGqQ73ROjlvI+5yJs3UUws2D7KCtpzNt2Ksm\ngm52umZoIzofPRXg/RVkt+Ig7Y9+ixaEyAxMFtpDyap1bTNjWsw99LoUZvMo7B9q\nrgOS55h5OeLwc1PM3n43I9H2C3uQ1hYflD3ovzvzrywejCHlHlReovZkYCcrDCa+\nytw7Hob0P1vkXsPtpmk61A7PCLw6DghhczT1f4nAK147FuRl55jz38JFOtqKVlfU\nNH4EaSxciHO2evWDHUddzeAwxHLg77UKPH+MSPXd7jGZx3xqQEtpjMqq5WM09YsL\n1mwOJpk1Xarkb0WB0J10YXqKs6tSxyrfX/FL5MZA\n-----END CERTIFICATE-----", 100 | key: "-----BEGIN PRIVATE KEY-----\nMIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDTGBNbVYLJiIDf\nL0FQMi/mFzcLeQhP11c4YdxjMBPJKSBWKnXuxywcOJHJ6A2rpGKhAApSsVc6j6sX\nfCtybszbNgvx7KdV8c2S02ILNqlJwTOXRkQhN0srlATYdF/i6T1Y1fpkjBiZMC7A\n8fRfQNwT8DgberiuN4YvfsNGbQej+a1dOVQAGaBn15xwXe8Xfw9iLRangb8n4yAz\n4qS2IJig2UYzSc3P6k1ulZ6I1Yo/xOj7zar3R/09DZ6ikGBvy3TrSfYpRX8pXFgF\nsJOlDeYwzAKlKY4MnapgwszIFMmxveK/d3K+l3Kn0791VdBklrrlycV7itGXrqNY\ndHLgC3dPAgMBAAECggEADudNPo6L/FSR3LAGaXYRoaoT7dBuwMxQUR+/opUbYIFx\n0gOPbftL5vYrfNjQVkT13a4WDH6OzQilqLPQkXS3K+bl8v+lVNEMlYgtDOOgEh/8\n13pThxDTUtFRgkK9HlUfSq1Yz06A0hfvxRmQCkWXBCVaoL4KWep7o9DMUqWR+4ad\nXlvzvG2W3fvNE3+ewwf0tR/OYTQOZvkRfm0Ws1s0W85wr6Ec87psbLPPO2yecFcq\n3fJjcZmbaWWG5Thh9479W3rhC3I6rJN+YLgyXoumml5wmmjf8CxocUL3uPt+32u5\nE4OZTLdAIF0+KxH3hYbw3D6DB/LnAZVB+jxmOC4j2QKBgQDm5JVzld5KYUlIt566\nsyQ95JMyw0Oqp1U7WMwI8+RMYnO4NPo6Dzej9LMsVAQFmg5DncElSf3PLC9PfsVe\nCK6FiXPScy/9cAqchJVI3f7CgJiYvrFwoiieVJFYSgh52aWxL/KHnEe4UWk50qmS\n/hCyPdSCHJVw1oh5dIO/QGG+YwKBgQDqDFi8mNrUI/QD+m/2HNT+6PaXUWEfyY94\n/swvn9O+qQYWbU8MCxeucthTJ5p5lYY5FdUeKPGZn0jahgoEV63XnuemNe43tOJA\nDpo1UyWmQoodAOOm9QiEEjOAxx+hEcSfJrEUgGSYVR+GHbap+xuB0BrtCN9qWsdb\nU2d25b4xJQKBgQCV4SAardhkVA6sQ3WoIA2Ql8Xtl89fAcxT/+pCjX9PDkGr+8fK\n1IH7ziZYyhjqQfysa8UrHYLCbx4u7k3UIrKXQIiMvfUTAR4CSBZX/LMZMzzbOj4Y\nrUrMrHzE4Rnrbxsdj9BRs2LjBQXXYSZuornX2kcORtvDKZ/hp362MWbBnQKBgQCo\nSZZojXQTQ4LKdYGZsmOIotPkO9SdOZ3a/0KsH7zuA7Tn3VMQMs2lVtiq+ff94oCv\nfT5PQFtv/XMyBV0ggDb0qkKgZXjTP1HLg3RoUU/p+0A52JDYVKn55Oh5eTQJ6a+6\nS+TZ+/PZAKP5GFZmZLMDpTInK9ERNRLRXOgxOsKFrQKBgQDH6PfQTuvubwL+CYbb\nCI1AtWOGEGcuLIbtlbh5e4/1FxtdG2pgV2wBJxIwNhn8U7yMHj9B/MB39OAt6Vlc\nZU0Dah41RMi4dPGAi/iuTQklfRLjROSVmhb/lS9xDRxzHcm0u0YBuU0Q+MC3aw7O\njXWs11QDs5AR93mLB0AZdRjGLA==\n-----END PRIVATE KEY-----" 101 | 102 | }; 103 | 104 | https.createServer(options1, function (request, response) { 105 | if (request.method == 'POST') { 106 | 107 | const { headers, method, url } = request; 108 | let body = []; 109 | request.on('error', (err) => { 110 | console.error(err); 111 | 112 | }).on('data', (chunk) => { 113 | body.push(chunk); 114 | }).on('end', () => { 115 | body = Buffer.concat(body).toString(); 116 | bodyJson = JSON.parse(body); 117 | source = bodyJson.source; 118 | scaleAction = bodyJson.scaleAction; 119 | console.log(bodyJson); 120 | 121 | if (scaleAction == null){ 122 | console.log("error with scaleaction"); 123 | response.end(); 124 | }; 125 | 126 | if (source == "azurelaw"){ 127 | vals = bodyJson.SearchResults.tables[0].rows[0].toString(); 128 | var hostIndex = vals.search("bigip.azure") 129 | var hostLength = 20 130 | 131 | if ( hostIndex == -1) { 132 | hostIndex = vals.search("bigip.aws") 133 | hostLength = 18 134 | } 135 | hostName = vals.substring(hostIndex, hostIndex + hostLength) 136 | 137 | } else if (source == 'elk' || source == 'splunk' || source == 'default') { 138 | message = bodyJson.message 139 | var hostIndex = message.search("bigip.azure") 140 | var hostLength = 20 141 | 142 | if ( hostIndex == -1) { 143 | hostIndex = message.search("bigip.aws") 144 | hostlength = 18 145 | } 146 | hostName = message.substring(hostIndex, hostIndex + hostLength) 147 | 148 | } else { 149 | console.log("Invalid nalytics source specified") 150 | response.end(); 151 | } 152 | 153 | //Convert hostName to arrays and derive identifiers 154 | var n = hostName.split("."); 155 | cloud = n[1]; 156 | app_id = n[2]; 157 | 158 | //Create scaling eventtype 159 | var app_name = "app1"; 160 | switch (scaleAction) { 161 | case "scaleOutBigip": 162 | what2Scale = 'bigip'; 163 | scaling_direction = 'up' 164 | app_name = app_name 165 | break; 166 | case "scaleInBigip": 167 | what2Scale = 'bigip'; 168 | scaling_direction = 'down' 169 | app_name = app_name 170 | break; 171 | case "scaleOutWorkload": 172 | what2Scale = 'app'; 173 | scaling_direction = 'up' 174 | app_name = app_name 175 | break; 176 | case "scaleInWorkload": 177 | what2Scale = 'app'; 178 | scaling_direction = 'down' 179 | app_name = app_name 180 | break; 181 | } 182 | 183 | console.log("The application ID is " + app_id + ". Webhook request to scale the " + what2Scale + " " + scaling_direction + ". If relevant, the app name is '" + app_name + "'.") 184 | 185 | //Construct Github Action webhook payload 186 | const data2 = JSON.stringify({ 187 | event_type: "scale-" + cloud, 188 | client_payload: { 189 | scaling_type: what2Scale, 190 | app_name: app_name, 191 | scaling_direction: scaling_direction, 192 | webhook_source: source, 193 | app_id: app_id 194 | } 195 | }) 196 | 197 | const options = { 198 | hostname: 'api.github.com', 199 | port: 443, 200 | path: repoPath, 201 | method: 'POST', 202 | headers: { 203 | 'Content-Type': 'application/json', 204 | 'Content-Length': data2.length, 205 | 'Authorization': 'token ' + token, 206 | 'user-agent': 'node.js' 207 | } 208 | } 209 | 210 | /* 211 | Create https POST to github 212 | */ 213 | const req2 = https.request(options, res2 => { 214 | console.log("Processing operation complete.\n") 215 | 216 | res2.on('data', d => { 217 | process.stdout.write(d) 218 | }) 219 | }) 220 | 221 | req2.on('error', error => { 222 | console.error(error) 223 | }) 224 | 225 | // submit payload via webhook to Github Action 226 | req2.write(data2) 227 | req2.end() 228 | 229 | response.on('error', (err) => { 230 | console.error(err); 231 | }); 232 | 233 | response.writeHead(200, {'Content-Type': 'application/json'}) 234 | const responseBody = { headers, method, url, body }; 235 | response.write(JSON.stringify(responseBody)); 236 | 237 | response.end(); 238 | }); 239 | } 240 | else { 241 | console.log("Invalid HTTP method"); 242 | response.end(); 243 | } 244 | 245 | // Start listener 246 | console.log("Starting alert processor...\n"); 247 | }).listen(8000); 248 | EOF 249 | cd /home/ubuntu && npm install request && npm install express && npm install body-parser && npm install http && npm install fs && npm install https && sudo chmod +x /home/ubuntu/alertforwarder.js 250 | 251 | sudo cat << EOF > /etc/systemd/system/alertforwarder.service 252 | [Unit] 253 | Description=alertforwarder 254 | [Service] 255 | ExecStart=/usr/bin/node /home/ubuntu/alertforwarder.js 256 | Restart=always 257 | User=nobody 258 | # Note Debian/Ubuntu uses 'nogroup', RHEL/Fedora uses 'nobody' 259 | Group=nogroup 260 | Environment=PATH=/usr/bin:/usr/local/bin:/home/ubuntu 261 | Environment=NODE_ENV=production 262 | WorkingDirectory=/home/ubuntu 263 | 264 | [Install] 265 | WantedBy=multi-user.target 266 | EOF 267 | 268 | sudo systemctl start alertforwarder.service && sudo systemctl stop alertforwarder.service && sudo systemctl restart alertforwarder.service 269 | 270 | -------------------------------------------------------------------------------- /templates/consul_server.tpl: -------------------------------------------------------------------------------- 1 | "http://${consul_ip}:8500" 2 | -------------------------------------------------------------------------------- /templates/do.json: -------------------------------------------------------------------------------- 1 | { 2 | "schemaVersion": "1.0.0", 3 | "class": "Device", 4 | "async": true, 5 | "label": "Onboard BIG-IP", 6 | "Common": { 7 | "class": "Tenant", 8 | "hostname": "${hostname}", 9 | "dbVars": { 10 | "class": "DbVariables", 11 | "ui.advisory.enabled": true, 12 | "ui.advisory.color": "green", 13 | "ui.advisory.text": "/Common/hostname", 14 | "config.allow.rfc3927": "enable" 15 | }, 16 | "myDns": { 17 | "class": "DNS", 18 | "nameServers": [ 19 | "${dns_server}", 20 | "2001:4860:4860::8844" 21 | ], 22 | "search": [ 23 | "f5.com" 24 | ] 25 | }, 26 | "myNtp": { 27 | "class": "NTP", 28 | "servers": [ 29 | "${ntp_server}", 30 | "1.pool.ntp.org", 31 | "2.pool.ntp.org" 32 | ], 33 | "timezone": "${timezone}" 34 | }, 35 | "myProvisioning": { 36 | "class": "Provision", 37 | "ltm": "nominal", 38 | "avr": "nominal", 39 | "asm": "nominal" 40 | } 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /templates/onboard.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # BIG-IPS ONBOARD SCRIPT 4 | 5 | LOG_FILE=${onboard_log} 6 | 7 | if [ ! -e $LOG_FILE ] 8 | then 9 | touch $LOG_FILE 10 | exec &>>$LOG_FILE 11 | else 12 | #if file exists, exit as only want to run once 13 | exit 14 | fi 15 | 16 | exec 1>$LOG_FILE 2>&1 17 | 18 | # CHECK TO SEE NETWORK IS READY 19 | CNT=0 20 | while true 21 | do 22 | STATUS=$(curl -s -k -I example.com | grep HTTP) 23 | if [[ $STATUS == *"200"* ]]; then 24 | echo "Got 200! VE is Ready!" 25 | break 26 | elif [ $CNT -le 6 ]; then 27 | echo "Status code: $STATUS Not done yet..." 28 | CNT=$[$CNT+1] 29 | else 30 | echo "GIVE UP..." 31 | break 32 | fi 33 | sleep 10 34 | done 35 | 36 | sleep 60 37 | 38 | ############################################### 39 | #### Download F5 Automation Toolchain RPMs ### 40 | ############################################### 41 | 42 | # Variables 43 | admin_username='${admin_username}' 44 | admin_password='${admin_password}' 45 | 46 | # Adding bigip user and password 47 | 48 | user_status=`tmsh list auth user $admin_username` 49 | if [[ $user_status != "" ]]; then 50 | response_status=`tmsh modify auth user $admin_username password $admin_password` 51 | echo "Response Code for setting user and password:$response_status" 52 | fi 53 | if [[ $user_status == "" ]]; then 54 | response_status=`tmsh create auth user $admin_username password $admin_password partition-access add { all-partitions { role admin } }` 55 | echo "Response Code for setting user and password:$response_status" 56 | fi 57 | 58 | CREDS="bigipuser:"$admin_password 59 | DO_URL='${DO_URL}' 60 | DO_FN=$(basename "$DO_URL") 61 | AS3_URL='${AS3_URL}' 62 | AS3_FN=$(basename "$AS3_URL") 63 | TS_URL='${TS_URL}' 64 | TS_FN=$(basename "$TS_URL") 65 | 66 | mkdir -p ${libs_dir} 67 | 68 | 69 | echo -e "\n"$(date) "Download Telemetry (TS) Pkg" 70 | curl -L -k -o ${libs_dir}/$TS_FN $TS_URL 71 | 72 | echo -e "\n"$(date) "Download Declarative Onboarding (DO) Pkg" 73 | curl -L -k -o ${libs_dir}/$DO_FN $DO_URL 74 | 75 | echo -e "\n"$(date) "Download Application Services 3 (AS3) Pkg" 76 | curl -L -k -o ${libs_dir}/$AS3_FN $AS3_URL 77 | 78 | sleep 300 79 | 80 | # Copy the RPM Pkg to the file location 81 | cp ${libs_dir}/*.rpm /var/config/rest/downloads/ 82 | 83 | # Install Telemetry Streaming Pkg 84 | DATA="{\"operation\":\"INSTALL\",\"packageFilePath\":\"/var/config/rest/downloads/$TS_FN\"}" 85 | echo -e "\n"$(date) "Install TS Pkg" 86 | curl -u $CREDS -X POST http://localhost:8100/mgmt/shared/iapp/package-management-tasks -d $DATA 87 | 88 | sleep 10 89 | 90 | # Install Declarative Onboarding Pkg 91 | DATA="{\"operation\":\"INSTALL\",\"packageFilePath\":\"/var/config/rest/downloads/$DO_FN\"}" 92 | echo -e "\n"$(date) "Install DO Pkg" 93 | curl -u $CREDS -X POST http://localhost:8100/mgmt/shared/iapp/package-management-tasks -d $DATA 94 | 95 | sleep 10 96 | 97 | # Install AS3 Pkg 98 | DATA="{\"operation\":\"INSTALL\",\"packageFilePath\":\"/var/config/rest/downloads/$AS3_FN\"}" 99 | echo -e "\n"$(date) "Install AS3 Pkg" 100 | curl -u $CREDS -X POST http://localhost:8100/mgmt/shared/iapp/package-management-tasks -d $DATA 101 | 102 | sleep 10 103 | 104 | # Check DO Ready 105 | CNT=0 106 | echo -e "\n"$(date) "Check DO Ready" 107 | while true 108 | do 109 | STATUS=$(curl -u $CREDS -X GET -s -k -I https://localhost:8443/mgmt/shared/declarative-onboarding/info | grep HTTP) 110 | if [[ $STATUS == *"200"* ]]; then 111 | echo -e "\n"$(date) "Got 200! DO is Ready!" 112 | break 113 | elif [ $CNT -le 6 ]; then 114 | echo -e "\n"$(date) "Status code: $STATUS DO Not done yet..." 115 | CNT=$[$CNT+1] 116 | else 117 | echo -e "\n"$(date) "(DO) GIVE UP..." 118 | break 119 | fi 120 | sleep 10 121 | done 122 | 123 | # Check AS3 Ready 124 | CNT=0 125 | echo -e "\n"$(date) "Check AS3 Ready" 126 | while true 127 | do 128 | STATUS=$(curl -u $CREDS -X GET -s -k -I https://localhost:8443/mgmt/shared/appsvcs/info | grep HTTP) 129 | if [[ $STATUS == *"200"* ]]; then 130 | echo -e "\n"$(date) "Got 200! AS3 is Ready!" 131 | break 132 | elif [ $CNT -le 6 ]; then 133 | echo -e "\n"$(date) "Status code: $STATUS AS3 Not done yet..." 134 | CNT=$[$CNT+1] 135 | else 136 | echo -e "\n"$(date) "(AS3) GIVE UP..." 137 | break 138 | fi 139 | sleep 10 140 | done 141 | 142 | # Check TS Ready 143 | CNT=0 144 | echo -e "\n"$(date) "Check TS Ready" 145 | while true 146 | do 147 | STATUS=$(curl -u $CREDS -X GET -s -k -I https://localhost:8443/mgmt/shared/telemetry/info | grep HTTP) 148 | if [[ $STATUS == *"200"* ]]; then 149 | echo -e "\n"$(date) "Got 200! TS is Ready!" 150 | break 151 | elif [ $CNT -le 6 ]; then 152 | echo -e "\n"$(date) "Status code: $STATUS TS Not done yet..." 153 | CNT=$[$CNT+1] 154 | else 155 | echo -e "\n"$(date) "(TS) GIVE UP..." 156 | break 157 | fi 158 | sleep 10 159 | done 160 | 161 | # Delete RPM packages 162 | echo -e "\n"$(date) "Removing temporary RPM install packages" 163 | rm -rf /var/config/rest/downloads/*.rpm 164 | 165 | sleep 5 166 | 167 | ###################################### 168 | #### POST DO and AS3 Declarations #### 169 | ###################################### 170 | 171 | # Variables DO urls 172 | doUrl="/mgmt/shared/declarative-onboarding" 173 | doCheckUrl="/mgmt/shared/declarative-onboarding/info" 174 | doTaskUrl="/mgmt/shared/declarative-onboarding/task" 175 | # Variables AS3 urls 176 | as3Url="/mgmt/shared/appsvcs/declare" 177 | as3CheckUrl="/mgmt/shared/appsvcs/info" 178 | as3TaskUrl="/mgmt/shared/appsvcs/task" 179 | # Variables TS urls 180 | tsUrl="/mgmt/shared/telemetry/declare" 181 | tsCheckUrl="/mgmt/shared/telemetry/info" 182 | tsTaskUrl="/mgmt/shared/telemetry/task" 183 | 184 | # Declaration content 185 | cat > /config/do.json < /config/as3.json <<'EOF' 189 | ${AS3_Document} 190 | EOF 191 | cat > /config/ts.json < 14 | 15 | 2. From the center panel, select '*Create index pattern*', (see below). 16 | 17 | Flowers 18 | 19 | 3. On the *Create Index Pattern* screen enter ``f5-*`` for the index pattern name. As the example below illustrates, you should see 20 | several indexes listed below. As telemetry data is streamed from the BIG-IP(s) to the ELK stack, (via Logstash - the '*L*' in ELK) 21 | it is assigned an index with a pattern of **f5-%{+YYYY.MM.dd.hh.mm}**. Click '*Next Step*' to continue. 22 | 23 | Flowers 24 | 25 | 4. Select **@timestamp** from the drop-down list for the '*Time Field*'. Select '*Create index pattern*' to complete the process. 26 | 27 | Flowers 28 | 29 | 30 | **Create Watcher Alerts** 31 | -------------------------------------- 32 | 33 | You will be using Elastic Watcher to monitor telemetry data and provide alert notifications. While still in the *Stack Management* 34 | submenu navigate to and select '*Watcher*', (see above). From the center panel select '*Create*' and then '*Create threshold alert*'. 35 | 36 | Flowers 37 | 38 | For this solution create a total of four (4) alerts. These alerts will monitor and respond to increases/decreases in BIG-IP CPU 39 | utilization and current application connections. In the event a member BIG-IP's CPU utilization exceeds or falls below the specified thresholds during the specified interval, an alert will fire triggering a webhook call to the *alertForwarder* service. 40 | The alertForwarder will subsequently post a BIG-IP scaling request to the central processor, (utilizing the repo's **GitHub Actions**). 41 | 42 | Likewise, if current connections fall outside of the specified thresholds a similar alert will be fired. However, rather than 43 | scaling BIG-IP instances, this will trigger a scaling (up/down) of the backend application workloads, (*solution example: NGINX*). Use the screenshot example below to create the first alert, (*MaxCpuAlert*). 44 | 45 | #. Provide a name, select the previously created index pattern of ``f5-*``, timestamp and timing parameters as shown below. Under 46 | conditions section select **Max()**, **myMaxCpu**, **top 1**, **hostname.keyword**, **5000** and **5 minutes** as shown below. 47 | 48 | **Note:** You should see a green line of the displayed chart that represents the selected field's, (*myMaxCpu*) value trend. 49 | This will aid you in setting threshold values appropriately to ensure scaling events are triggered. With that said, the lab 50 | environment has been configured with hard limits of (3) BIG-IP instances and (3) workload instances to ensure availability of 51 | resources for all students. Additionally, the ADPM processor is designed to throttle requests and prevent superfluous "over-scaling". Requests that are triggered but not fullfilled, (along with successful requests) are logged on your environment's Consul server. 52 | 53 | Flowers 54 | 55 | #. In the *Actions* section select '*Add action*'. From the menu pop-up select '*Webhook*', (see below). 56 | 57 | Flowers 58 | 59 | #. Use the below example to complete the webhook section. When you are done select '*Create alert*'. Specifiy ``alertforwarder.f5demo.net`` for the Host. For the webhook body 60 | enter ``{"source": "elk", "scaleAction":"scaleOutBigip", "message": "{{ctx.payload}}"}``. The *alertForwarder* service is expecting the JSON formatted 61 | payload and will parse according to source. The *alertForwarder* call the central processer, (via webhook) to trigger scaling. 62 | 63 | Flowers 64 | 65 | #. Use the table and example images below to create three additional alerts. Entries not noted in the table below are identical 66 | across alerts. 67 | 68 | .. list-table:: 69 | :widths: 10 10 20 40 20 60 80 70 | :header-rows: 1 71 | :stub-columns: 1 72 | 73 | * - **Name** 74 | - **WHEN** 75 | - **OF** 76 | - **GROUPED OVER** 77 | - **IS** 78 | - **LAST** 79 | - **Webhook body** 80 | * - MinCpuAlert 81 | - max() 82 | - myCurCons 83 | - top 1 of hostname.keyword 84 | - BELOW 1000 85 | - 5 minutes 86 | - ``{"source": "elk", "scaleAction":"scaleInBigip", "message": "{{ctx.payload}}"}`` 87 | * - MinConnsAlert 88 | - max() 89 | - myCurCons 90 | - top 1 of hostname.keyword 91 | - BELOW 50 92 | - 5 minutes 93 | - ``{"source": "elk", "scaleAction":"scaleInWorkload", "message": "{{ctx.payload}}"}`` 94 | * - MaxConnsAlert 95 | - max() 96 | - myCurCons 97 | - top 1 of hostname.keyword 98 | - ABOVE 500 99 | - 5 minutes 100 | - ``{"source": "elk", "scaleAction":"scaleOutWorkload", "message": "{{ctx.payload}}"}`` 101 | 102 | Flowers 103 | 104 | Below is an example of a completed Watcher screen. TS logs are streamed in 60-second intervals. Depending upon how you set 105 | your thresholds, you may already have alerts firing. The Watcher screen provides one way to monitor alert events. 106 | 107 | Flowers 108 | 109 | -------------------------------------------------------------------------------- /ts_consumers/elastic/elk.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_public_ip" "elk_public_ip" { 2 | name = "pip-mgmt-elk" 3 | location = var.location 4 | resource_group_name = azurerm_resource_group.rg.name 5 | allocation_method = "Static" # Static is required due to the use of the Standard sku 6 | tags = { 7 | Name = "pip-mgmt-elk" 8 | source = "terraform" 9 | } 10 | } 11 | 12 | data "azurerm_public_ip" "elk_public_ip" { 13 | name = azurerm_public_ip.elk_public_ip.name 14 | resource_group_name = azurerm_resource_group.rg.name 15 | } 16 | 17 | resource "azurerm_network_interface" "elkvm-ext-nic" { 18 | name = "${local.app_id}-elkvm-ext-nic" 19 | location = var.location 20 | resource_group_name = azurerm_resource_group.rg.name 21 | ip_configuration { 22 | name = "primary" 23 | subnet_id = data.azurerm_subnet.mgmt.id 24 | private_ip_address_allocation = "Static" 25 | private_ip_address = "10.2.1.125" 26 | primary = true 27 | public_ip_address_id = azurerm_public_ip.elk_public_ip.id 28 | } 29 | 30 | tags = { 31 | Name = "${local.app_id}-elkvm-ext-int" 32 | application = "elkserver" 33 | tag_name = "Env" 34 | value = "elk" 35 | } 36 | } 37 | 38 | resource "azurerm_virtual_machine" "elkvm" { 39 | name = "elkvm" 40 | location = var.location 41 | resource_group_name = azurerm_resource_group.rg.name 42 | network_interface_ids = [azurerm_network_interface.elkvm-ext-nic.id] 43 | vm_size = "Standard_DS3_v2" 44 | 45 | # Uncomment this line to delete the OS disk automatically when deleting the VM 46 | delete_os_disk_on_termination = true 47 | 48 | # Uncomment this line to delete the data disks automatically when deleting the VM 49 | delete_data_disks_on_termination = true 50 | 51 | storage_os_disk { 52 | name = "elkvmOsDisk" 53 | caching = "ReadWrite" 54 | create_option = "FromImage" 55 | managed_disk_type = "Premium_LRS" 56 | } 57 | 58 | storage_image_reference { 59 | publisher = "Canonical" 60 | offer = "UbuntuServer" 61 | sku = "16.04.0-LTS" 62 | version = "latest" 63 | } 64 | 65 | os_profile { 66 | computer_name = "elkvm" 67 | admin_username = "elkuser" 68 | admin_password = var.upassword 69 | custom_data = file("../scripts/elk.sh") 70 | 71 | } 72 | 73 | os_profile_linux_config { 74 | disable_password_authentication = false 75 | } 76 | 77 | tags = { 78 | Name = "${local.app_id}-elkvm" 79 | tag_name = "Env" 80 | value = "elk" 81 | propagate_at_launch = true 82 | } 83 | 84 | connection { 85 | type = "ssh" 86 | user = "elkuser" 87 | password = var.upassword 88 | host = data.azurerm_public_ip.elk_public_ip.ip_address 89 | } 90 | } 91 | 92 | output "elk_public_address" { 93 | value = "http://${azurerm_public_ip.elk_public_ip.ip_address}:8000" 94 | } 95 | -------------------------------------------------------------------------------- /ts_consumers/elastic/logstash.conf: -------------------------------------------------------------------------------- 1 | input { 2 | http { 3 | port => 8080 4 | } 5 | } 6 | 7 | filter { 8 | json { 9 | source => "message" 10 | } 11 | 12 | mutate { 13 | add_field => { "myMaxCpu" =>" %{MaxCpu}"} 14 | add_field => { "myCurCons" =>" %{server_concurrent_conns}"} 15 | } 16 | 17 | mutate { 18 | convert => { "myMaxCpu" => "integer" } 19 | convert => { "myCurCons" => "integer" } 20 | } 21 | } 22 | 23 | output { 24 | 25 | elasticsearch { 26 | hosts => ["https://127.0.0.1:9200"] 27 | user => "elastic" 28 | password => "F5demonet!" 29 | codec => json 30 | index => "f5-%{+YYYY.MM.dd.hh.mm}" 31 | ssl => true 32 | ssl_certificate_verification => false 33 | cacert => "/etc/logstash/elasticsearch-ca.pem" 34 | } 35 | } -------------------------------------------------------------------------------- /ts_consumers/splunk/README.md: -------------------------------------------------------------------------------- 1 | Configuring Alerts with Splunk Enterprise 2 | ==================================================== 3 | 4 | **Installing the Splunk Add-on for F5 BIG-IP and Splunk CIM** 5 | ----------------------------------------------------------- 6 | 7 | Installing the Splunk Add-on for F5 BIG-IP is very simple. I will go over the steps below. In order to make use of the add-on I’ll need to install Splunk’s Common Information Model, (CIM) first and here is how to do that. 8 | 9 | 1. From the Splunk Enterprise search page, select ‘Apps’ → ‘Find More Apps’. 10 | 11 | 1. Browse for “CIM” and select the Splunk Common Information Model add-on. 12 | 13 | 1. Accept the license agreement, provide Splunk account login credentials and select ‘Login and Install’. 14 | 15 | 1. Repeat steps 2-3 to install the Splunk Add-on for F5 BIG-IP. 16 | 17 | 18 | **Setup Splunk HTTP Event Collector** 19 | ------------------------------------- 20 | 21 | To receive incoming telemetry data into my Splunk Enterprise environment over HTTP/HTTPs, I will need to create an HTTP Event Collector. 22 | 23 | 1. From the Splunk UI select ‘Settings’ → ‘Data Inputs’. Select ‘HTTP Event Collector’ from the input list. 24 | 25 | 1. Prior to creating a new event collector token, I must first enable token access for my Splunk environment. On the ‘HTTP Event Collector’ page, select ‘Global Settings’. I set ‘All Tokens’ to enabled, default index, incoming port and ensure SSL is enabled. Click ‘Save’ to exit. 26 | 27 | 1. Select ‘New Token’ and provide a name for the new collector and select ‘Next’. 28 | 29 | 1. On the ‘Input Settings’ tab select the necessary allowed index(es) and select ‘Review’ then ‘Submit’. 30 | 31 | 1. Once the token is created, copy and save the token information so that it can be used when configuring F5 Telemetry streaming. 32 | 33 | **Create Splunk Alerts** 34 | -------------------------------------- 35 | 36 | ### Splunk alert query examples 37 | 38 | **BIG-IP Scaling** 39 | ``` 40 | sourcetype="f5:telemetry:json" telemetryEventCategory=AVR MaxCpu>8000 | table hostname |eval source="splunk", scaleAction="scaleOutBigip" 41 | ``` 42 | ``` 43 | sourcetype="f5:telemetry:json" telemetryEventCategory=AVR MaxCpu<3000 | table hostname |eval source="splunk", scaleAction="scaleInBigip" 44 | ``` 45 | 46 | **Workload Scaling** 47 | ``` 48 | sourcetype="f5:telemetry:json" telemetryEventCategory=AVR MaxConcurrentConnections>3000 | table hostname |eval source="splunk", scaleAction="scaleOutWokload" 49 | ``` 50 | ``` 51 | sourcetype="f5:telemetry:json" telemetryEventCategory=AVR MaxConcurrentConnections<500 | table hostname |eval source="splunk", scaleAction="scaleInWorkload" 52 | ``` 53 | 54 | ### Create Splunk Alerts 55 | 56 | Flowers 57 | 58 | Flowers 59 | 60 | Flowers -------------------------------------------------------------------------------- /ts_consumers/splunk/splunk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo 3 | echo '##############################################' 4 | echo '# #' 5 | echo '# Welcome to the Splunk 7.0.2 auto-installer #' 6 | echo '# for CentOS 7 x64. #' 7 | echo '# Last updated 03/12/2018. #' 8 | echo '# Enter the "splunk" linux user account #' 9 | echo '# password and press enter to let the magic #' 10 | echo '# happen. Note: You will change the Splunk #' 11 | echo '# Web admin password upon first login. #' 12 | echo '# #' 13 | echo '##############################################' 14 | echo 15 | echo 16 | echo "never" > /sys/kernel/mm/transparent_hugepage/enabled 17 | echo "never" > /sys/kernel/mm/transparent_hugepage/defrag 18 | echo "[Unit]" > /etc/systemd/system/disable-thp.service 19 | echo "Description=Disable Transparent Huge Pages" >> /etc/systemd/system/disable-thp.service 20 | echo "" >> /etc/systemd/system/disable-thp.service 21 | echo "[Service]" >> /etc/systemd/system/disable-thp.service 22 | echo "Type=simple" >> /etc/systemd/system/disable-thp.service 23 | echo 'ExecStart=/bin/sh -c "echo never > /sys/kernel/mm/transparent_hugepage/enabled && echo never > /sys/kernel/mm/transparent_hugepage/defrag"' >> /etc/systemd/system/disable-thp.service 24 | echo "Type=simple" >> /etc/systemd/system/disable-thp.service 25 | echo "" >> /etc/systemd/system/disable-thp.service 26 | echo "[Install]" >> /etc/systemd/system/disable-thp.service 27 | echo "WantedBy=multi-user.target" >> /etc/systemd/system/disable-thp.service 28 | systemctl daemon-reload 29 | systemctl start disable-thp 30 | systemctl enable disable-thp 31 | echo 32 | echo "Transparent Huge Pages (THP) Disabled." 33 | echo 34 | ulimit -n 64000 35 | ulimit -u 20480 36 | echo "DefaultLimitFSIZE=-1" >> /etc/systemd/system.conf 37 | echo "DefaultLimitNOFILE=64000" >> /etc/systemd/system.conf 38 | echo "DefaultLimitNPROC=20480" >> /etc/systemd/system.conf 39 | echo 40 | echo "ulimit Increased." 41 | echo 42 | yum install wget -y 43 | cd /tmp 44 | wget -O splunk-7.0.2-03bbabbd5c0f-Linux-x86_64.tgz 'https://www.splunk.com/bin/splunk/DownloadActivityServlet?architecture=x86_64&platform=linux&version=7.0.2&product=splunk&filename=splunk-7.0.2-03bbabbd5c0f-Linux-x86_64.tgz&wget=true' 45 | echo 46 | echo "Splunk Downloaded." 47 | echo 48 | tar -xzvf /tmp/splunk-7.0.2-03bbabbd5c0f-Linux-x86_64.tgz -C /opt 49 | rm -f /tmp/splunk-7.0.2-03bbabbd5c0f-Linux-x86_64.tgz 50 | useradd splunk 51 | echo splunk:F5demonet! > /tmp/pwdfile 52 | cat /tmp/pwdfile | chpasswd 53 | rm -f /tmp/pwdfile 54 | echo 55 | echo "Splunk installed and splunk linux user created." 56 | echo 57 | echo "[settings]" > /opt/splunk/etc/system/local/web.conf 58 | echo "enableSplunkWebSSL = true" >> /opt/splunk/etc/system/local/web.conf 59 | echo 60 | echo "HTTPS enabled for Splunk Web using self-signed certificate." 61 | echo 62 | chown -R splunk:splunk /opt/splunk 63 | afz=`firewall-cmd --get-active-zone | head -1` 64 | firewall-cmd --zone=$afz --add-port=8000/tcp --permanent 65 | firewall-cmd --zone=$afz --add-port=8065/tcp --permanent 66 | firewall-cmd --zone=$afz --add-port=8089/tcp --permanent 67 | firewall-cmd --zone=$afz --add-port=8191/tcp --permanent 68 | firewall-cmd --zone=$afz --add-port=9997/tcp --permanent 69 | firewall-cmd --zone=$afz --add-port=8080/tcp --permanent 70 | firewall-cmd --zone=$afz --add-port=10514/udp --permanent 71 | firewall-cmd --reload 72 | echo 73 | echo "Firewall ports used by Splunk opened." 74 | echo "[splunktcp]" > /opt/splunk/etc/system/local/inputs.conf 75 | echo "[splunktcp://9997]" >> /opt/splunk/etc/system/local/inputs.conf 76 | echo "index = main" >> /opt/splunk/etc/system/local/inputs.conf 77 | echo "disabled = 0" >> /opt/splunk/etc/system/local/inputs.conf 78 | echo "" >> /opt/splunk/etc/system/local/inputs.conf 79 | echo "[udp://10514]" >> /opt/splunk/etc/system/local/inputs.conf 80 | echo "index = main" >> /opt/splunk/etc/system/local/inputs.conf 81 | echo "disabled = 0" >> /opt/splunk/etc/system/local/inputs.conf 82 | chown splunk:splunk /opt/splunk/etc/system/local/inputs.conf 83 | echo 84 | echo "Enabled Splunk TCP input over 9997 and UDP traffic input over 10514." 85 | echo 86 | runuser -l splunk -c '/opt/splunk/bin/splunk start --accept-license' 87 | /opt/splunk/bin/splunk enable boot-start -user splunk 88 | runuser -l splunk -c '/opt/splunk/bin/splunk stop' 89 | chown root:splunk /opt/splunk/etc/splunk-launch.conf 90 | chmod 644 /opt/splunk/etc/splunk-launch.conf 91 | echo 92 | echo "Splunk test start and stop complete. Enabled Splunk to start at boot. Also, adjusted splunk-launch.conf to mitigate privilege escalation attack." 93 | echo 94 | runuser -l splunk -c '/opt/splunk/bin/splunk start' 95 | if [[ -f /opt/splunk/bin/splunk ]] 96 | then 97 | echo Splunk Enterprise 98 | cat /opt/splunk/etc/splunk.version | head -1 99 | echo "has been installed, configured, and started!" 100 | echo "Visit the Splunk server using https://hostNameORip:8000 as mentioned above." 101 | echo 102 | echo 103 | echo " HAPPY SPLUNKING!!!" 104 | echo 105 | echo 106 | echo 107 | else 108 | echo Splunk Enterprise has FAILED install! 109 | fi 110 | #End of File 111 | -------------------------------------------------------------------------------- /ts_consumers/splunk/splunk.tf: -------------------------------------------------------------------------------- 1 | # Deploy Splunk Enterprise instance 2 | 3 | resource "azurerm_public_ip" "splunk_public_ip" { 4 | name = "pip-mgmt-splunk" 5 | location = var.location 6 | resource_group_name = azurerm_resource_group.rg.name 7 | allocation_method = "Static" # Static is required due to the use of the Standard sku 8 | tags = { 9 | Name = "pip-mgmt-splunk" 10 | source = "terraform" 11 | } 12 | } 13 | 14 | resource "azurerm_network_interface" "splunkvm-ext-nic" { 15 | name = "${local.app_id}-splunkvm-ext-nic" 16 | location = var.location 17 | resource_group_name = azurerm_resource_group.rg.name 18 | ip_configuration { 19 | name = "primary" 20 | subnet_id = data.azurerm_subnet.mgmt.id 21 | private_ip_address_allocation = "Static" 22 | private_ip_address = "10.2.1.135" 23 | primary = true 24 | public_ip_address_id = azurerm_public_ip.splunk_public_ip.id 25 | } 26 | 27 | tags = { 28 | Name = "${local.app_id}-splunkvm-ext-int" 29 | application = "splunkserver" 30 | tag_name = "Env" 31 | value = "splunk" 32 | } 33 | } 34 | 35 | resource "azurerm_virtual_machine" "splunkvm" { 36 | name = "splunkvm" 37 | location = var.location 38 | resource_group_name = azurerm_resource_group.rg.name 39 | network_interface_ids = [azurerm_network_interface.splunkvm-ext-nic.id] 40 | vm_size = "Standard_DS3_v2" 41 | 42 | # Uncomment this line to delete the OS disk automatically when deleting the VM 43 | delete_os_disk_on_termination = true 44 | 45 | # Uncomment this line to delete the data disks automatically when deleting the VM 46 | delete_data_disks_on_termination = true 47 | 48 | storage_os_disk { 49 | name = "splunkvmOsDisk" 50 | caching = "ReadWrite" 51 | create_option = "FromImage" 52 | managed_disk_type = "Premium_LRS" 53 | } 54 | 55 | storage_image_reference { 56 | publisher = "Canonical" 57 | offer = "UbuntuServer" 58 | sku = "16.04.0-LTS" 59 | version = "latest" 60 | } 61 | 62 | os_profile { 63 | computer_name = "splunkvm" 64 | admin_username = "splunk" 65 | admin_password = var.upassword 66 | custom_data = file("splunk.sh") 67 | 68 | } 69 | 70 | os_profile_linux_config { 71 | disable_password_authentication = false 72 | } 73 | 74 | tags = { 75 | Name = "${local.app_id}-splunkvm" 76 | tag_name = "Env" 77 | value = "splunk" 78 | propagate_at_launch = true 79 | } 80 | } 81 | 82 | output "splunk_public_address" { 83 | value = "https://${azurerm_public_ip.splunk_public_ip.ip_address}:8000" 84 | } 85 | --------------------------------------------------------------------------------