├── .gitignore
├── Examples
├── NOTP_internal_access_only
│ ├── main.tf
│ ├── provider.tf
│ ├── readme.md
│ ├── variables.tf
│ └── variables.tfvars
└── NOTP_protected_with_eOTP
│ ├── main.tf
│ ├── provider.tf
│ ├── readme.md
│ ├── variables.tf
│ └── variables.tfvars
├── assets
├── common_docs
│ ├── ansible
│ │ └── ansible_password_based_ssh.md
│ └── terraform
│ │ ├── folder_structure.md
│ │ └── terraform_usage.md
├── day0-n.png
├── gateway
│ └── ldap_radius_architecture_diag.png
├── netscalerautomationtoolkit.png
├── terraformlab1part1.gif
└── terraformlab1part2.gif
├── events
├── 20230206
│ ├── step1_configure_ha
│ │ ├── example.tfvars
│ │ ├── main.tf
│ │ ├── variables.tf
│ │ └── versions.tf
│ └── step2_further_configs
│ │ ├── example.tfvars
│ │ ├── main.tf
│ │ ├── variables.tf
│ │ └── versions.tf
├── 20230816
│ ├── basic-content-switching.yaml
│ ├── environment-variables.sh
│ ├── inventory.ini
│ └── sessionid_based_authentication_via_login_logout.yaml
├── 20230920
│ ├── configure-waf.yaml
│ ├── inventory.ini
│ ├── netscaler-waf-final.gif
│ └── netscaler1.env
└── 20231115
│ ├── README.md
│ ├── github-workflows
│ ├── terraform-apply.yml
│ └── terraform-plan.yml
│ └── terraform-scripts
│ ├── main.tf
│ ├── provider.tf
│ └── variable.tf
├── golden_templates
├── README.md
├── netscaler_gateway
│ ├── README.md
│ ├── ldap_radius
│ │ ├── README.md
│ │ ├── step1_configure_ha
│ │ │ ├── example.tfvars
│ │ │ ├── main.tf
│ │ │ ├── variables.tf
│ │ │ └── versions.tf
│ │ └── step2_gateway_ldap_radius
│ │ │ ├── example.tfvars
│ │ │ ├── main.tf
│ │ │ ├── variables.tf
│ │ │ └── versions.tf
│ ├── oauth
│ │ ├── README.md
│ │ ├── step1_configure_ha
│ │ │ ├── example.tfvars
│ │ │ ├── main.tf
│ │ │ ├── variables.tf
│ │ │ └── versions.tf
│ │ └── step2_gateway_oauth
│ │ │ ├── example.tfvars
│ │ │ ├── main.tf
│ │ │ ├── variables.tf
│ │ │ └── versions.tf
│ └── saml
│ │ ├── README.md
│ │ ├── step1_configure_ha
│ │ ├── example.tfvars
│ │ ├── main.tf
│ │ ├── variables.tf
│ │ └── versions.tf
│ │ └── step2_gateway_saml
│ │ ├── example.tfvars
│ │ ├── main.tf
│ │ ├── variables.tf
│ │ └── versions.tf
└── upgrade-netscaler
│ ├── README.md
│ ├── high-availability
│ ├── issu-mode
│ │ ├── README.md
│ │ ├── inventory.txt
│ │ └── issu_upgrade.yaml
│ └── normal-mode
│ │ ├── README.md
│ │ ├── ansible.cfg
│ │ ├── ha_upgrade.yaml
│ │ ├── inventory.ini
│ │ └── variables.yaml
│ └── standalone
│ ├── README.md
│ ├── ansible.cfg
│ ├── inventory.ini
│ ├── standalone_upgrade.yaml
│ └── variables.yaml
├── labs
├── README.md
├── basic-content-switching-configuration-using-terraform
│ ├── 01-netscaler-adc-basic-content-switching-prerequisites
│ │ ├── assignment.md
│ │ ├── check-cloud-client
│ │ ├── cleanup-cloud-client
│ │ ├── setup-cloud-client
│ │ └── solve-cloud-client
│ ├── 02-reset-default-password
│ │ ├── assignment.md
│ │ ├── check-cloud-client
│ │ ├── cleanup-cloud-client
│ │ ├── setup-cloud-client
│ │ └── solve-cloud-client
│ ├── 03-netscaler-adc-basic-content-switching-csvserver
│ │ ├── assignment.md
│ │ ├── check-cloud-client
│ │ ├── cleanup-cloud-client
│ │ ├── setup-cloud-client
│ │ └── solve-cloud-client
│ ├── 04-netscaler-adc-basic-content-switching-cspolicy2
│ │ ├── assignment.md
│ │ ├── check-cloud-client
│ │ ├── cleanup-cloud-client
│ │ ├── setup-cloud-client
│ │ └── solve-cloud-client
│ ├── 05-netscaler-adc-basic-content-switching-cspolicy3
│ │ ├── assignment.md
│ │ ├── check-cloud-client
│ │ ├── cleanup-cloud-client
│ │ ├── setup-cloud-client
│ │ └── solve-cloud-client
│ ├── 06-netscaler-adc-basic-content-switching-destroy
│ │ ├── assignment.md
│ │ ├── check-cloud-client
│ │ ├── cleanup-cloud-client
│ │ ├── setup-cloud-client
│ │ └── solve-cloud-client
│ ├── config.yml
│ ├── track.yml
│ └── track_scripts
│ │ └── setup-cloud-client
├── deliver-apps-with-citrix-adc-and-ansible
│ ├── 01-environment-setup
│ │ └── assignment.md
│ ├── 02-install-prerequisites
│ │ ├── assignment.md
│ │ ├── check-cloud-client
│ │ └── solve-cloud-client
│ ├── 03-define-target-adc-configurations-in-ansible-playbook
│ │ ├── assignment.md
│ │ └── setup-cloud-client
│ ├── 04-configure-adc-with-ansible
│ │ └── assignment.md
│ ├── 05-destroy-the-configuration
│ │ └── assignment.md
│ ├── assets
│ │ ├── show-lb-vserver.jpg
│ │ └── ssh-fingerprint.jpg
│ ├── config.yml
│ ├── track.yml
│ └── track_scripts
│ │ ├── cleanup-cloud-client
│ │ └── setup-cloud-client
├── deliver-apps-with-netscaler-adc-terraform-provider
│ ├── 01-setup-vpx
│ │ ├── assignment.md
│ │ ├── check-cloud-client
│ │ ├── cleanup-cloud-client
│ │ ├── setup-cloud-client
│ │ └── solve-cloud-client
│ ├── 02-reset-default-password
│ │ ├── assignment.md
│ │ ├── check-cloud-client
│ │ ├── cleanup-cloud-client
│ │ ├── setup-cloud-client
│ │ └── solve-cloud-client
│ ├── 03-apply-load-balancing-configuration
│ │ ├── assignment.md
│ │ ├── check-cloud-client
│ │ ├── cleanup-cloud-client
│ │ ├── setup-cloud-client
│ │ └── solve-cloud-client
│ ├── 04-destroy-load-balancing-configuration
│ │ ├── assignment.md
│ │ ├── check-cloud-client
│ │ ├── cleanup-cloud-client
│ │ ├── setup-cloud-client
│ │ └── solve-cloud-client
│ ├── config.yml
│ ├── track.yml
│ └── track_scripts
│ │ └── setup-cloud-client
├── netscaler-adc-basic-application-protection-configuration-waf-using-terraform
│ ├── 01-netscaler-adc-basic-content-switching-prerequisites
│ │ ├── assignment.md
│ │ ├── check-cloud-client
│ │ ├── cleanup-cloud-client
│ │ ├── setup-cloud-client
│ │ └── solve-cloud-client
│ ├── 02-reset-default-password
│ │ ├── assignment.md
│ │ ├── check-cloud-client
│ │ ├── cleanup-cloud-client
│ │ ├── setup-cloud-client
│ │ └── solve-cloud-client
│ ├── 03-netscaler-adc-basic-content-switching-csvserver
│ │ ├── assignment.md
│ │ ├── check-cloud-client
│ │ ├── cleanup-cloud-client
│ │ ├── setup-cloud-client
│ │ └── solve-cloud-client
│ ├── 04-netscaler-adc-basic-waf-policy1
│ │ ├── assignment.md
│ │ ├── check-cloud-client
│ │ ├── cleanup-cloud-client
│ │ └── solve-cloud-client
│ ├── 05-netscaler-adc-basic-waf-policy2
│ │ └── assignment.md
│ ├── 06-netscaler-adc-basic-content-switching-destroy
│ │ ├── assignment.md
│ │ ├── check-cloud-client
│ │ ├── cleanup-cloud-client
│ │ ├── setup-cloud-client
│ │ └── solve-cloud-client
│ ├── config.yml
│ ├── track.yml
│ └── track_scripts
│ │ └── setup-cloud-client
└── netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform
│ ├── 01-netscaler-adc-basic-rewrite-responder-policies-prerequisites
│ ├── assignment.md
│ ├── check-cloud-client
│ ├── cleanup-cloud-client
│ ├── setup-cloud-client
│ └── solve-cloud-client
│ ├── 02-reset-default-password
│ ├── assignment.md
│ ├── check-cloud-client
│ ├── cleanup-cloud-client
│ ├── setup-cloud-client
│ └── solve-cloud-client
│ ├── 03-netscaler-adc-basic-virtual-servers-config
│ ├── assignment.md
│ ├── check-cloud-client
│ ├── cleanup-cloud-client
│ ├── setup-cloud-client
│ └── solve-cloud-client
│ ├── 04-netscaler-adc-basic-rewrite-policies
│ ├── assignment.md
│ ├── check-cloud-client
│ ├── cleanup-cloud-client
│ └── solve-cloud-client
│ ├── 05-netscaler-adc-basic-responder-policies
│ ├── assignment.md
│ ├── check-cloud-client
│ ├── cleanup-cloud-client
│ ├── setup-cloud-client
│ └── solve-cloud-client
│ ├── 06-netscaler-adc-basic-content-switching-destroy
│ ├── assignment.md
│ ├── check-cloud-client
│ ├── cleanup-cloud-client
│ ├── setup-cloud-client
│ └── solve-cloud-client
│ ├── config.yml
│ ├── track.yml
│ └── track_scripts
│ └── setup-cloud-client
└── readme.md
/.gitignore:
--------------------------------------------------------------------------------
1 | # Local .terraform directories
2 | **/.terraform/*
3 |
4 | # .tfstate files
5 | *.tfstate
6 | *.tfstate.*
7 |
8 | # Crash log files
9 | crash.log
10 | crash.*.log
11 |
12 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as
13 | # password, private keys, and other secrets. These should not be part of version
14 | # control as they are data points which are potentially sensitive and subject
15 | # to change depending on the environment.
16 | terraform.tfvars
17 | terraform.tfvars.json
18 |
19 | *.auto.tfvars
20 | *.auto.tfvars.json
21 |
22 | # Ignore override files as they are usually used to override resources locally and so
23 | # are not checked in
24 | override.tf
25 | override.tf.json
26 | *_override.tf
27 | *_override.tf.json
28 |
29 | # Include override files you do wish to add to version control using negated pattern
30 | # !example_override.tf
31 |
32 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
33 | # example: *tfplan*
34 |
35 | # Ignore CLI configuration files
36 | .terraformrc
37 | terraform.rc
38 |
39 | .vscode/*
40 | !.vscode/settings.json
41 | !.vscode/tasks.json
42 | !.vscode/launch.json
43 | !.vscode/extensions.json
44 | !.vscode/*.code-snippets
45 |
46 | # Local History for Visual Studio Code
47 | .history/
48 |
49 | # Built Visual Studio Code Extensions
50 | *.vsix
51 |
52 | *.retry
53 |
--------------------------------------------------------------------------------
/Examples/NOTP_internal_access_only/provider.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | citrixadc = {
4 | source = "citrix/citrixadc"
5 | version = "1.39.0"
6 | }
7 | }
8 | }
9 | provider "citrixadc" {
10 | endpoint = "http://192.168.123.150:80"
11 | username = "nsroot"
12 | password = "training"
13 | }
--------------------------------------------------------------------------------
/Examples/NOTP_internal_access_only/readme.md:
--------------------------------------------------------------------------------
1 | About this script
2 |
3 | This script contains everything you need to configure Native OTP Feature of NetScaler.
4 | Native OTP is the Nescaler's built in OTP solution which includes a self service portal. The number of enrolled tokens will be limited to 2 (default value is 4).
5 |
6 |
7 | Before running this script
8 |
9 | Make sure you have an LDAP service account which is able to write information in LDAP attribute "userParameters"
10 |
11 |
12 | Manual steps after running the script
13 |
14 | Because I don't want to break any running configuration on the ADC, there are some last manual steps to do:
15 | - Bind the new created authentication profile to the gateway VServer
16 | - Bind the new created traffic policy to the gateway VServer
--------------------------------------------------------------------------------
/Examples/NOTP_internal_access_only/variables.tf:
--------------------------------------------------------------------------------
1 | variable "suffix" {
2 | type = string
3 | }
4 |
5 | # LDAP Server Parameter
6 | variable "ldap_server_ip" {
7 | type = string
8 | }
9 |
10 | variable "ldap_server_port" {
11 | type = string
12 | }
13 |
14 | variable "ldap_protocol" {
15 | type = string
16 | }
17 |
18 | variable "ldap_base" {
19 | type = string
20 | }
21 |
22 | variable "ldap_bind_name" {
23 | type = string
24 | }
25 |
26 | variable "ldap_bind_pw" {
27 | type = string
28 | }
29 |
30 | variable "ldap_login_name" {
31 | type = string
32 | }
33 |
34 | variable "ldap_group_attribute" {
35 | type = string
36 | }
37 |
38 | variable "ldap_sub_attribute" {
39 | type = string
40 | }
41 |
42 | variable "ldap_otp_parameter" {
43 | type = string
44 | }
45 |
46 | # Rollout only from internal subnet
47 | variable "client_subnet" {
48 | type = string
49 | }
50 |
51 | # Pre-Installed Certificate to bind
52 | variable "cert" {
53 | type = string
54 | }
--------------------------------------------------------------------------------
/Examples/NOTP_internal_access_only/variables.tfvars:
--------------------------------------------------------------------------------
1 | suffix = "Lurchi"
2 |
3 | # LDAP Parameter
4 | ldap_server_ip = "192.168.123.100"
5 | ldap_server_port = "636"
6 | ldap_protocol = "SSL" # Possible values = PLAINTEXT, TLS, SSL
7 | ldap_bind_name = "administrator@training.local"
8 | ldap_bind_pw = "Citrix.123"
9 | ldap_base = "DC=training, DC=local"
10 | ldap_login_name = "sAMAccountName"
11 | ldap_group_attribute = "memberOf"
12 | ldap_sub_attribute = "cn"
13 | ldap_sso_attribute = "cn"
14 | ldap_otp_parameter = "userParameters"
15 |
16 | # Rollout only from internal subnet
17 | client_subnet = "192.168.123.0/24"
18 |
19 | # Pre-Installed Certificate to Bind
20 | cert = "wildcard_training"
--------------------------------------------------------------------------------
/Examples/NOTP_protected_with_eOTP/provider.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | citrixadc = {
4 | source = "citrix/citrixadc"
5 | version = "1.39.0"
6 | }
7 | }
8 | }
9 | provider "citrixadc" {
10 | endpoint = "http://192.168.123.150:80"
11 | username = "nsroot"
12 | password = "training"
13 | }
--------------------------------------------------------------------------------
/Examples/NOTP_protected_with_eOTP/readme.md:
--------------------------------------------------------------------------------
1 | About this script
2 |
3 | There are situations where it is necessary to make the manageotp site public available. To protect this via LDAP Login only does not provide adequate security.
4 | So this script creates a Native OTP solution and protects the manageotp site with a second factor. This factor is email-OTP.
5 | This one time password is send to the customers company mail address.
6 |
7 | Before running this Script
8 | Make sure you have an LDAP service account which is able to write information in LDAP attribute "userParameters"
9 |
10 | Manual steps after running the Script
11 | Because I don't want to break any running configuration on the ADC, there are some last manual steps to do:
12 |
13 | - Bind the new created authentication profile to the gateway VServer
14 | - Bind the new created traffic policy to the gateway VServer
--------------------------------------------------------------------------------
/Examples/NOTP_protected_with_eOTP/variables.tf:
--------------------------------------------------------------------------------
1 | variable "suffix" {
2 | type = string
3 | }
4 |
5 | # LDAP Server Parameter
6 | variable "ldap_server_ip" {
7 | type = string
8 | }
9 |
10 | variable "ldap_server_port" {
11 | type = string
12 | }
13 |
14 | variable "ldap_protocol" {
15 | type = string
16 | }
17 |
18 | variable "ldap_base" {
19 | type = string
20 | }
21 |
22 | variable "ldap_bind_name" {
23 | type = string
24 | }
25 |
26 | variable "ldap_bind_pw" {
27 | type = string
28 | }
29 |
30 | variable "ldap_login_name" {
31 | type = string
32 | }
33 |
34 | variable "ldap_group_attribute" {
35 | type = string
36 | }
37 |
38 | variable "ldap_sub_attribute" {
39 | type = string
40 | }
41 |
42 | variable "ldap_otp_parameter" {
43 | type = string
44 | }
45 |
46 | # Email Authentication Parameters
47 |
48 | variable "email_sender" {
49 | type = string
50 | }
51 |
52 | variable "email_password" {
53 | type = string
54 | }
55 |
56 | variable "email_server_protocol" {
57 | type = string
58 | }
59 |
60 | variable "email_server_ip_or_name" {
61 | type = string
62 | }
63 |
64 | variable "email_server_port" {
65 | type = string
66 | }
67 |
68 |
69 | # Pre-Installed Certificate to bind
70 | variable "cert" {
71 | type = string
72 | }
--------------------------------------------------------------------------------
/Examples/NOTP_protected_with_eOTP/variables.tfvars:
--------------------------------------------------------------------------------
1 | suffix = "Lurchi"
2 |
3 | # LDAP Parameter
4 | ldap_server_ip = "192.168.123.100"
5 | ldap_server_port = "636"
6 | ldap_protocol = "SSL" # Possible values = PLAINTEXT, TLS, SSL
7 | ldap_bind_name = "administrator@training.local"
8 | ldap_bind_pw = "Citrix.123"
9 | ldap_base = "DC=training, DC=local"
10 | ldap_login_name = "sAMAccountName"
11 | ldap_group_attribute = "memberOf"
12 | ldap_sub_attribute = "cn"
13 | ldap_sso_attribute = "cn"
14 | ldap_otp_parameter = "userParameters"
15 |
16 | # email Parameter
17 | email_sender = "mailsvc@training.local"
18 | email_password = "Citrix.1223"
19 | email_server_port = "25" # Possible values = 25, 587, 465
20 | email_server_ip_or_name = "mail.training.local"
21 | email_server_protocol = "smtp" # Possible values = smtp, smtps
22 |
23 | # Pre-Installed Certificate to Bind
24 | cert = "wildcard_training"
--------------------------------------------------------------------------------
/assets/common_docs/ansible/ansible_password_based_ssh.md:
--------------------------------------------------------------------------------
1 | # For Password-based SSH authentication:
2 |
3 | 1. Install `sshpass`
4 | 2. Run
5 | ```
6 | ansible-playbook -i --ask-pass
7 | ```
--------------------------------------------------------------------------------
/assets/common_docs/terraform/folder_structure.md:
--------------------------------------------------------------------------------
1 | # Folder Structure
2 |
3 | - `main.tf` describes the actual config objects to be created. The attributes of these resources are either hard coded or looked up from input variables in `example.tfvars`
4 | - `variables.tf` describes the input variables to the terraform config. These can have defaults
5 | - `versions.tf` is used to specify the contains version requirements for Terraform and providers.
6 | - `example.tfvars` has the inputs for variables specified in `variables.tf`. For variables not defined in this file, default values will be taken.
7 | - `outputs.tf` contains the outputs from the resources created in `main.tf`
--------------------------------------------------------------------------------
/assets/common_docs/terraform/terraform_usage.md:
--------------------------------------------------------------------------------
1 | # Usage
2 |
3 | ## Step-1 Install the Required Plugins
4 |
5 | * The terraform needs plugins to be installed in local folder so, use `terraform init` - It automatically installs the required plugins from the Terraform Registry.
6 |
7 | ## Step-2 Applying the Configuration
8 |
9 | * Modify the `example.tfvars`, `versions.tf` and `main.tf` (if necessary) to suit your configuration.
10 | * Use `terraform plan -var-file example.tfvars` to review the plan
11 | * Use `terraform apply -var-file example.tfvars` to apply the configuration.
12 |
13 | ## Step-3 Updating your configuration
14 |
15 | * Modify the set of resources (if necessary)
16 | * Use `terraform plan -var-file example.tfvars` and `terraform apply -var-file example.tfvars` to review and update the changes respectively.
17 |
18 | ## Step-4 Destroying your Configuration
19 |
20 | * To destroy the configuration use `terraform destroy -var-file example.tfvars`.
--------------------------------------------------------------------------------
/assets/day0-n.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/netscaler/automation-toolkit/2bac93f3f93f262249fcd7e4219c5dd6bb7d23f3/assets/day0-n.png
--------------------------------------------------------------------------------
/assets/gateway/ldap_radius_architecture_diag.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/netscaler/automation-toolkit/2bac93f3f93f262249fcd7e4219c5dd6bb7d23f3/assets/gateway/ldap_radius_architecture_diag.png
--------------------------------------------------------------------------------
/assets/netscalerautomationtoolkit.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/netscaler/automation-toolkit/2bac93f3f93f262249fcd7e4219c5dd6bb7d23f3/assets/netscalerautomationtoolkit.png
--------------------------------------------------------------------------------
/assets/terraformlab1part1.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/netscaler/automation-toolkit/2bac93f3f93f262249fcd7e4219c5dd6bb7d23f3/assets/terraformlab1part1.gif
--------------------------------------------------------------------------------
/assets/terraformlab1part2.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/netscaler/automation-toolkit/2bac93f3f93f262249fcd7e4219c5dd6bb7d23f3/assets/terraformlab1part2.gif
--------------------------------------------------------------------------------
/events/20230206/step1_configure_ha/example.tfvars:
--------------------------------------------------------------------------------
1 | netscaler1_nsip = "10.10.10.141"
2 | netscaler2_nsip = "10.10.10.142"
3 |
4 | rpc_node_password = "verysecretrpcnodepassword"
--------------------------------------------------------------------------------
/events/20230206/step1_configure_ha/main.tf:
--------------------------------------------------------------------------------
1 | # add ha node
2 | resource "citrixadc_hanode" "netscaler1" {
3 | hanode_id = 1
4 | ipaddress = var.netscaler2_nsip
5 | }
6 |
7 | # add ha node
8 | resource "citrixadc_hanode" "netscaler2" {
9 | provider = citrixadc.netscaler2
10 | hanode_id = 1
11 | ipaddress = var.netscaler1_nsip
12 |
13 | depends_on = [citrixadc_hanode.netscaler1]
14 | }
15 |
16 | resource "citrixadc_systemparameter" "netscaler1_ns_prompt" {
17 | promptstring = "%u@%s"
18 | }
19 | resource "citrixadc_systemparameter" "netscaler2_ns_prompt" {
20 | provider = citrixadc.netscaler2
21 | promptstring = "%u@%s"
22 | }
23 |
24 | # It is best practice to change the RPC node password
25 | resource "citrixadc_nsrpcnode" "netscaler1to1_rpc_node" {
26 | ipaddress = var.netscaler1_nsip
27 | password = var.rpc_node_password
28 | secure = "ON"
29 |
30 | depends_on = [citrixadc_hanode.netscaler1]
31 | }
32 |
33 | # It is best practice to change the RPC node password
34 | resource "citrixadc_nsrpcnode" "netscaler1to2_rpc_node" {
35 | ipaddress = var.netscaler2_nsip
36 | password = var.rpc_node_password
37 | secure = "ON"
38 |
39 | depends_on = [citrixadc_hanode.netscaler1]
40 | }
41 |
42 | # It is best practice to change the RPC node password
43 | resource "citrixadc_nsrpcnode" "netscaler2to1_rpc_node" {
44 | provider = citrixadc.netscaler2
45 | ipaddress = var.netscaler1_nsip
46 | password = var.rpc_node_password
47 | secure = "ON"
48 |
49 | depends_on = [citrixadc_hanode.netscaler2]
50 | }
51 |
52 | # It is best practice to change the RPC node password
53 | resource "citrixadc_nsrpcnode" "netscaler2to2_rpc_node" {
54 | provider = citrixadc.netscaler2
55 | ipaddress = var.netscaler2_nsip
56 | password = var.rpc_node_password
57 | secure = "ON"
58 |
59 | depends_on = [citrixadc_hanode.netscaler2]
60 | }
61 |
62 |
--------------------------------------------------------------------------------
/events/20230206/step1_configure_ha/variables.tf:
--------------------------------------------------------------------------------
1 | variable "netscaler1_nsip" {
2 | type = string
3 | description = "NetScaler1 IP Address"
4 | }
5 |
6 | variable "netscaler2_nsip" {
7 | type = string
8 | description = "NetScaler2 IP Address"
9 | }
10 |
11 | variable "rpc_node_password" {
12 | type = string
13 | sensitive = true
14 | description = "The new ADC RPC node password that will replace the default one on both ADC instances. [Learn More about RPCNode](https://docs.citrix.com/en-us/citrix-adc/current-release/getting-started-with-citrix-adc/change-rpc-node-password.html)"
15 | }
--------------------------------------------------------------------------------
/events/20230206/step1_configure_ha/versions.tf:
--------------------------------------------------------------------------------
1 | # provider
2 | terraform {
3 | required_providers {
4 | citrixadc = {
5 | source = "citrix/citrixadc"
6 | version = "1.29.0"
7 | }
8 | }
9 | }
10 |
11 | provider "citrixadc" {
12 | endpoint = format("http://%s", var.netscaler1_nsip)
13 | # username = "" # NS_LOGIN env variable
14 | # password = "" # NS_PASSWORD env variable
15 | }
16 |
17 | provider "citrixadc" {
18 | alias = "netscaler2"
19 | endpoint = format("http://%s", var.netscaler2_nsip)
20 | # username = "" # NS_LOGIN env variable
21 | # password = "" # NS_PASSWORD env variable
22 | }
--------------------------------------------------------------------------------
/events/20230206/step2_further_configs/example.tfvars:
--------------------------------------------------------------------------------
1 | # primary_netscaler_ip = "10.10.10.141" # Let us give this IP over CLI
2 |
3 | snip = "10.10.10.172"
4 | snip_netmask = "255.255.255.192"
5 |
6 | web_server1_name = "web-server-red"
7 | web_server1_port = 80
8 | web_server1_ip = "10.10.10.181"
9 | web_server1_serivetype = "HTTP"
10 |
11 | web_server2_name = "web-server-green"
12 | web_server2_port = 80
13 | web_server2_ip = "10.10.10.166"
14 | web_server2_serivetype = "HTTP"
15 |
16 | lbvserver_name = "demo-lb"
17 | lbvserver_ip = "10.10.10.150"
18 | lbvserver_port = 80
19 | lbvserver_servicetype = "HTTP"
--------------------------------------------------------------------------------
/events/20230206/step2_further_configs/main.tf:
--------------------------------------------------------------------------------
1 | # add ns ip snip
2 | resource "citrixadc_nsip" "snip" {
3 | ipaddress = var.snip
4 | type = "SNIP"
5 | netmask = var.snip_netmask
6 | }
7 |
8 |
9 | # add a new SNIP to new primary and check if it syncs to secondary
10 | # resource "citrixadc_nsip" "snip_test" {
11 | # ipaddress = "2.2.2.2"
12 | # type = "SNIP"
13 | # netmask = var.snip_netmask
14 | # }
15 |
16 |
17 | # add service1 - RED Web Server
18 | resource "citrixadc_service" "web_server1" {
19 | name = var.web_server1_name
20 | port = var.web_server1_port
21 | ip = var.web_server1_ip
22 | servicetype = var.web_server1_serivetype
23 |
24 | lbvserver = citrixadc_lbvserver.demo_lbvserver.name # bind web_server1 to lbvserver
25 | }
26 |
27 | # add service2 - GREEN Web Server
28 | resource "citrixadc_service" "web_server2" {
29 | name = var.web_server2_name
30 | port = var.web_server2_port
31 | ip = var.web_server2_ip
32 | servicetype = var.web_server2_serivetype
33 |
34 | lbvserver = citrixadc_lbvserver.demo_lbvserver.name # bind web_server2 to lbvserver
35 | }
36 |
37 | # ns enable lb
38 | resource "citrixadc_nsfeature" "enable_lb" {
39 | lb = true
40 | }
41 |
42 | # add lbvserver
43 | resource "citrixadc_lbvserver" "demo_lbvserver" {
44 | name = var.lbvserver_name
45 | ipv46 = var.lbvserver_ip
46 | lbmethod = "LEASTCONNECTION"
47 | port = var.lbvserver_port
48 | servicetype = var.lbvserver_servicetype
49 | }
50 |
--------------------------------------------------------------------------------
/events/20230206/step2_further_configs/variables.tf:
--------------------------------------------------------------------------------
1 | variable "primary_netscaler_ip" {
2 | type = string
3 | description = "Primary NetScaler NSIP"
4 | }
5 | variable "snip" {
6 | type = string
7 | description = "NetScaler SNIP"
8 | }
9 | variable "snip_netmask" {
10 | type = string
11 | description = "SNIP NetMask"
12 | }
13 | variable "web_server1_name" {
14 | type = string
15 | description = "Web Server1 Name"
16 | }
17 | variable "web_server1_port" {
18 | type = string
19 | description = "Web Server1 Port"
20 | }
21 | variable "web_server1_ip" {
22 | type = string
23 | description = "Web Server1 IP"
24 | }
25 | variable "web_server1_serivetype" {
26 | type = string
27 | description = "Web Server1 ServiceType"
28 | }
29 | variable "web_server2_name" {
30 | type = string
31 | description = "Web Server2 Name"
32 |
33 | }
34 | variable "web_server2_port" {
35 | type = string
36 | description = "Web Server2 PORT"
37 |
38 | }
39 | variable "web_server2_ip" {
40 | type = string
41 | description = "Web Server2 IP"
42 |
43 | }
44 | variable "web_server2_serivetype" {
45 | type = string
46 | description = "Web Server2 SERVICETYPE"
47 |
48 | }
49 | variable "lbvserver_name" {
50 | type = string
51 | description = "LBVserver Name"
52 |
53 | }
54 | variable "lbvserver_ip" {
55 | type = string
56 | description = "LBVvserver IP"
57 |
58 | }
59 | variable "lbvserver_port" {
60 | type = number
61 | description = "Which Port number LBVserver is serving traffic?"
62 |
63 | }
64 | variable "lbvserver_servicetype" {
65 | type = string
66 | description = "Which SERIVCETYPE LBVserver is serving traffic?"
67 | }
68 |
--------------------------------------------------------------------------------
/events/20230206/step2_further_configs/versions.tf:
--------------------------------------------------------------------------------
1 | # provider
2 | terraform {
3 | required_providers {
4 | citrixadc = {
5 | source = "citrix/citrixadc"
6 | version = "1.29.0"
7 | }
8 | }
9 | }
10 |
11 | provider "citrixadc" {
12 | endpoint = format("http://%s", var.primary_netscaler_ip)
13 | # username = "" # NS_LOGIN env variable
14 | # password = "" # NS_PASSWORD env variable
15 | }
16 |
17 |
18 |
--------------------------------------------------------------------------------
/events/20230816/basic-content-switching.yaml:
--------------------------------------------------------------------------------
1 | - name: Demo | NetScaler | Basic Content Switching Configuration
2 | hosts: demo_netscalers
3 | gather_facts: false
4 | tasks:
5 | # add service svc-red-server 10.10.10.15 HTTP 80
6 | - name: Demo | service-red-server
7 | delegate_to: localhost
8 | netscaler.adc.service:
9 | state: present
10 |
11 | name: svc-red-server
12 | ipaddress: 10.10.10.15
13 | servicetype: HTTP
14 | port: 80
15 |
16 | # add service svc-green-server 10.10.10.166 HTTP 80
17 | - name: Demo | service-green-server
18 | delegate_to: localhost
19 | netscaler.adc.service:
20 | state: present
21 |
22 | name: svc-green-server
23 | ipaddress: 10.10.10.166
24 | servicetype: HTTP
25 | port: 80
26 |
27 | # add lb vserver lb-red-server HTTP
28 | - name: Demo | lbvserver-red-server
29 | delegate_to: localhost
30 | netscaler.adc.lbvserver:
31 | state: present
32 |
33 | name: lb-red-server
34 | servicetype: HTTP
35 |
36 | # bind lb vserver lb-red-server svc-red-server
37 | lbvserver_service_binding:
38 | mode: desired # desired | bind | unbind
39 | binding_members:
40 | - name: lb-red-server
41 | servicename: svc-red-server
42 |
43 | # add lb vserver lb-green-server HTTP
44 | - name: Demo | lbvserver-green-server
45 | delegate_to: localhost
46 | netscaler.adc.lbvserver:
47 | state: present
48 |
49 | name: lb-green-server
50 | servicetype: HTTP
51 |
52 | # bind lb vserver lb-green-server svc-green-server
53 | lbvserver_service_binding:
54 | mode: desired # desired | bind | unbind
55 | binding_members:
56 | - name: lb-green-server
57 | servicename: svc-green-server
58 |
59 | # # bind lb vserver lb-red-server svc-red-server
60 | # - name: Demo | red-server-service-lb-binding
61 | # delegate_to: localhost
62 | # netscaler.adc.lbvserver_service_binding:
63 | # state: present
64 |
65 | # name: lb-red-server
66 | # servicename: svc-red-server
67 |
68 | # # bind lb vserver lb-green-server svc-green-server
69 | # - name: Demo | green-server-service-lb-binding
70 | # delegate_to: localhost
71 | # netscaler.adc.lbvserver_service_binding:
72 | # state: present
73 |
74 | # name: lb-green-server
75 | # servicename: svc-green-server
76 |
77 | # add cs action csaction-red-server -targetLBVserver lb-red-server
78 | - name: Demo | csaction-red-server
79 | delegate_to: localhost
80 | netscaler.adc.csaction:
81 | state: present
82 |
83 | name: csaction-red-server
84 | targetlbvserver: lb-red-server
85 |
86 | # add cs action csaction-green-server -targetLBVserver lb-green-server
87 | - name: Demo | csaction-green-server
88 | delegate_to: localhost
89 | netscaler.adc.csaction:
90 | state: present
91 |
92 | name: csaction-green-server
93 | targetlbvserver: lb-green-server
94 |
95 | # add cs policy cspolicy-red-server -rule "HTTP.REQ.URL.SET_TEXT_MODE(IGNORECASE).STARTSWITH(\"/red\")" -action csaction-red-server
96 | - name: Demo | cspolicy-red-server
97 | delegate_to: localhost
98 | netscaler.adc.cspolicy:
99 | state: present
100 |
101 | policyname: cspolicy-red-server
102 | rule: "HTTP.REQ.URL.SET_TEXT_MODE(IGNORECASE).STARTSWITH(\"/red\")"
103 | action: csaction-red-server
104 |
105 | # add cs policy cspolicy-green-server -rule "HTTP.REQ.URL.SET_TEXT_MODE(IGNORECASE).STARTSWITH(\"/green\")" -action csaction-green-server
106 | - name: Demo | cspolicy-green-server
107 | delegate_to: localhost
108 | netscaler.adc.cspolicy:
109 | state: present
110 |
111 | policyname: cspolicy-green-server
112 | rule: "HTTP.REQ.URL.SET_TEXT_MODE(IGNORECASE).STARTSWITH(\"/green\")"
113 | action: csaction-green-server
114 |
115 | # add cs vserver demo-csvserver HTTP 10.10.10.150 80
116 | - name: Demo | demo-csvserver
117 | delegate_to: localhost
118 | netscaler.adc.csvserver:
119 | state: present
120 |
121 | name: demo-csvserver
122 | servicetype: HTTP
123 | ipv46: 10.10.10.150
124 | port: 80
125 |
126 | # bind cs vserver demo-csvserver -policyName cspolicy-red-server -priority 100
127 | # bind cs vserver demo-csvserver -policyName cspolicy-green-server -priority 110
128 | csvserver_cspolicy_binding:
129 | mode: desired # desired | bind | unbind
130 | binding_members:
131 | - name: demo-csvserver
132 | policyname: cspolicy-red-server
133 | priority: 100
134 | - name: demo-csvserver
135 | policyname: cspolicy-green-server
136 | priority: 110
137 |
138 | # # bind cs vserver demo-csvserver -policyName cspolicy-red-server -priority 100
139 | # - name: Demo | red-csvserver-cspolicy-binding
140 | # delegate_to: localhost
141 | # netscaler.adc.csvserver_cspolicy_binding:
142 | # state: present
143 |
144 | # name: demo-csvserver
145 | # policyname: cspolicy-red-server
146 | # priority: 100
147 |
148 | # # bind cs vserver demo-csvserver -policyName cspolicy-green-server -priority 110
149 | # - name: Demo | green-csvserver-cspolicy-binding
150 | # delegate_to: localhost
151 | # netscaler.adc.csvserver_cspolicy_binding:
152 | # state: present
153 |
154 | # name: demo-csvserver
155 | # policyname: cspolicy-green-server
156 | # priority: 110
157 |
--------------------------------------------------------------------------------
/events/20230816/environment-variables.sh:
--------------------------------------------------------------------------------
1 | export NETSCALER_NSIP=10.10.10.141
2 | export NETSCALER_NITRO_USER=nsroot
3 | export NETSCALER_NITRO_PASS=verysecretpassword
4 | export NETSCALER_NITRO_PROTOCOL=https
5 | export NETSCALER_VALIDATE_CERTS=false
6 | export NETSCALER_SAVE_CONFIG=false
7 |
--------------------------------------------------------------------------------
/events/20230816/inventory.ini:
--------------------------------------------------------------------------------
1 | [demo_netscalers]
2 | demo_netscaler1
--------------------------------------------------------------------------------
/events/20230816/sessionid_based_authentication_via_login_logout.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Sample login and logout playbook
3 | hosts: demo_netscalers
4 |
5 | gather_facts: false
6 |
7 | tasks:
8 | - name: V2 | Sample Task | login
9 | delegate_to: localhost
10 | register: login_result
11 | netscaler.adc.login:
12 | # nsip: 10.0.0.1 # This can also be given via NETSCALER_NSIP environment variable
13 | # nitro_protocol: https # This can also be given via NETSCALER_NITRO_PROTOCOL environment variable
14 | # validate_certs: false # This can also be given via NETSCALER_VALIDATE_CERTS environment variable
15 | # save_config: false # This can also be given via NETSCALER_SAVE_CONFIG environment variable
16 |
17 | username: nsroot
18 | password: verysecretpassword
19 |
20 | - name: Print login sessionid
21 | ansible.builtin.debug:
22 | var: login_result.sessionid
23 |
24 | - name: V2 | Sample Task | nsip
25 | delegate_to: localhost
26 | netscaler.adc.nsip:
27 | nitro_auth_token: "{{ login_result.sessionid }}" # This can also be given via NETSCALER_NITRO_AUTH_TOKEN environment variable
28 | # nsip: 10.0.0.1 # This can also be given via NETSCALER_NSIP environment variable
29 | # nitro_protocol: https # This can also be given via NETSCALER_NITRO_PROTOCOL environment variable
30 | # validate_certs: false # This can also be given via NETSCALER_VALIDATE_CERTS environment variable
31 | # save_config: false # This can also be given via NETSCALER_SAVE_CONFIG environment variable
32 |
33 | state: present
34 |
35 | ipaddress: 4.4.4.4
36 | netmask: 255.255.255.192
37 | type: VIP
38 |
39 | - name: V2 | Sample Task | logout
40 | delegate_to: localhost
41 | netscaler.adc.logout:
42 | nitro_auth_token: "{{ login_result.sessionid }}" # This can also be given via NETSCALER_NITRO_AUTH_TOKEN environment variable
43 | # nsip: 10.0.0.1 # This can also be given via NETSCALER_NSIP environment variable
44 | # nitro_protocol: https # This can also be given via NETSCALER_NITRO_PROTOCOL environment variable
45 | # validate_certs: false # This can also be given via NETSCALER_VALIDATE_CERTS environment variable
46 | # save_config: false # This can also be given via NETSCALER_SAVE_CONFIG environment variable
47 |
--------------------------------------------------------------------------------
/events/20230920/inventory.ini:
--------------------------------------------------------------------------------
1 | [demo_netscalers]
2 | netscaler1 nsip=10.10.10.141
3 |
4 | [demo_netscalers:vars]
5 | nitro_user=nsroot
6 | default_password=default_password
7 | new_password=verystrongpassword
8 | validate_certs=no
9 | nitro_protocol=http
10 |
11 | snip=10.10.10.185
12 | snip_netmask=255.255.255.192
13 | vip_ip=10.10.10.186
14 | server1_ip=10.10.10.181
15 | server2_ip=10.10.10.166
16 |
--------------------------------------------------------------------------------
/events/20230920/netscaler-waf-final.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/netscaler/automation-toolkit/2bac93f3f93f262249fcd7e4219c5dd6bb7d23f3/events/20230920/netscaler-waf-final.gif
--------------------------------------------------------------------------------
/events/20230920/netscaler1.env:
--------------------------------------------------------------------------------
1 | export NETSCALER_NSIP=10.10.10.141
2 | export NETSCALER_NITRO_USER=nsroot
3 | export NETSCALER_NITRO_PASS=verystrongpassword
4 | export NETSCALER_NITRO_PROTOCOL=https
5 | export NETSCALER_VALIDATE_CERTS=false
6 | export NETSCALER_SAVE_CONFIG=false
7 |
--------------------------------------------------------------------------------
/events/20231115/README.md:
--------------------------------------------------------------------------------
1 | # Automation Pipelines: Leveraging Terraform Cloud to design a NetScaler Automation strategy
2 |
3 |
4 | 
5 |
6 |
7 | ## Recorded video
8 |
9 | https://community.netscaler.com/s/webinar/a078b000013hFsSAAU/leveraging-terraform-cloud-to-design-a-netscaler-automation-strategy
10 |
11 |
--------------------------------------------------------------------------------
/events/20231115/github-workflows/terraform-apply.yml:
--------------------------------------------------------------------------------
1 | name: "Terraform Apply"
2 |
3 | on:
4 | # apply this workflow only when pull_request is successfully merged to main branch
5 | pull_request:
6 | types:
7 | - closed
8 |
9 |
10 | env:
11 | TF_CLOUD_ORGANIZATION: "netscaler_automation"
12 | TF_API_TOKEN: "${{ secrets.TF_API_TOKEN }}"
13 | TF_WORKSPACE: "netscaler-automation-pipeline"
14 | CONFIG_DIRECTORY: "./"
15 |
16 | jobs:
17 | terraform:
18 | if: github.event.pull_request.merged == true
19 | name: "Terraform Apply"
20 | runs-on: ubuntu-latest
21 | permissions:
22 | contents: read
23 | pull-requests: write
24 | steps:
25 | - name: Checkout
26 | uses: actions/checkout@v3
27 |
28 | - name: Upload Configuration
29 | uses: hashicorp/tfc-workflows-github/actions/upload-configuration@v1.0.0
30 | id: apply-upload
31 | with:
32 | workspace: ${{ env.TF_WORKSPACE }}
33 | directory: ${{ env.CONFIG_DIRECTORY }}
34 |
35 | - name: Create Apply Run
36 | uses: hashicorp/tfc-workflows-github/actions/create-run@v1.0.0
37 | id: apply-run
38 | with:
39 | workspace: ${{ env.TF_WORKSPACE }}
40 | configuration_version: ${{ steps.apply-upload.outputs.configuration_version_id }}
41 |
42 | - name: Apply
43 | uses: hashicorp/tfc-workflows-github/actions/apply-run@v1.0.0
44 | if: fromJSON(steps.apply-run.outputs.payload).data.attributes.actions.IsConfirmable
45 | id: apply
46 | with:
47 | run: ${{ steps.apply-run.outputs.run_id }}
48 | comment: "Apply Run from GitHub Actions CI ${{ github.sha }}"
49 |
50 | - name: Update PR
51 | uses: actions/github-script@v6
52 | id: apply-comment
53 | with:
54 | github-token: ${{ secrets.GITHUB_TOKEN }}
55 | script: |
56 | // 1. Retrieve existing bot comments for the PR
57 | const { data: comments } = await github.rest.issues.listComments({
58 | owner: context.repo.owner,
59 | repo: context.repo.repo,
60 | issue_number: context.issue.number,
61 | });
62 | const output = `#### Terraform Cloud Apply Output
63 | [Terraform Cloud Apply details](${{ steps.apply-run.outputs.run_link }})
64 | `;
65 | github.rest.issues.createComment({
66 | issue_number: context.issue.number,
67 | owner: context.repo.owner,
68 | repo: context.repo.repo,
69 | body: output
70 | });
71 |
--------------------------------------------------------------------------------
/events/20231115/github-workflows/terraform-plan.yml:
--------------------------------------------------------------------------------
1 | name: "Terraform Plan"
2 |
3 | on:
4 | pull_request:
5 |
6 | env:
7 | TF_CLOUD_ORGANIZATION: "netscaler_automation"
8 | TF_API_TOKEN: "${{ secrets.TF_API_TOKEN }}"
9 | TF_WORKSPACE: "netscaler-automation-pipeline"
10 | CONFIG_DIRECTORY: "./"
11 |
12 | jobs:
13 | terraform:
14 | name: "Terraform Plan"
15 | runs-on: ubuntu-latest
16 | permissions:
17 | contents: read
18 | pull-requests: write
19 | steps:
20 | - name: Checkout
21 | uses: actions/checkout@v3
22 |
23 | - name: Upload Configuration
24 | uses: hashicorp/tfc-workflows-github/actions/upload-configuration@v1.0.0
25 | id: plan-upload
26 | with:
27 | workspace: ${{ env.TF_WORKSPACE }}
28 | directory: ${{ env.CONFIG_DIRECTORY }}
29 | speculative: true
30 |
31 | - name: Create Plan Run
32 | uses: hashicorp/tfc-workflows-github/actions/create-run@v1.0.0
33 | id: plan-run
34 | with:
35 | workspace: ${{ env.TF_WORKSPACE }}
36 | configuration_version: ${{ steps.plan-upload.outputs.configuration_version_id }}
37 | plan_only: true
38 |
39 | - name: Get Plan Output
40 | uses: hashicorp/tfc-workflows-github/actions/plan-output@v1.0.0
41 | id: plan-output
42 | with:
43 | plan: ${{ fromJSON(steps.plan-run.outputs.payload).data.relationships.plan.data.id }}
44 |
45 | - name: Update PR
46 | uses: actions/github-script@v6
47 | id: plan-comment
48 | with:
49 | github-token: ${{ secrets.GITHUB_TOKEN }}
50 | script: |
51 | // 1. Retrieve existing bot comments for the PR
52 | const { data: comments } = await github.rest.issues.listComments({
53 | owner: context.repo.owner,
54 | repo: context.repo.repo,
55 | issue_number: context.issue.number,
56 | });
57 | const botComment = comments.find(comment => {
58 | return comment.user.type === 'Bot' && comment.body.includes('Terraform Cloud Plan Output')
59 | });
60 | const output = `#### Terraform Cloud Plan Output
61 | \`\`\`
62 | Plan: ${{ steps.plan-output.outputs.add }} to add, ${{ steps.plan-output.outputs.change }} to change, ${{ steps.plan-output.outputs.destroy }} to destroy.
63 | \`\`\`
64 | [Terraform Cloud Plan](${{ steps.plan-run.outputs.run_link }})
65 | `;
66 | // 3. Delete previous comment so PR timeline makes sense
67 | if (botComment) {
68 | github.rest.issues.deleteComment({
69 | owner: context.repo.owner,
70 | repo: context.repo.repo,
71 | comment_id: botComment.id,
72 | });
73 | }
74 | github.rest.issues.createComment({
75 | issue_number: context.issue.number,
76 | owner: context.repo.owner,
77 | repo: context.repo.repo,
78 | body: output
79 | });
80 |
--------------------------------------------------------------------------------
/events/20231115/terraform-scripts/main.tf:
--------------------------------------------------------------------------------
1 | resource "citrixadc_nsfeature" "tf_nsfeature" {
2 | lb = true
3 | }
4 | resource "citrixadc_nsip" "snip" {
5 | ipaddress = "10.11.2.4"
6 | type = "SNIP"
7 | netmask = "255.255.255.0"
8 | }
9 |
10 | resource "citrixadc_lbvserver" "tf_lbvserver" {
11 | name = "tf_lbvserver"
12 | servicetype = "HTTP"
13 | ipv46 = "10.11.1.4"
14 | lbmethod = "ROUNDROBIN"
15 | port = 80
16 | }
17 | resource "citrixadc_service" "web-server-red" {
18 | name = "web-server-red"
19 | port = 80
20 | ip = "10.11.2.4"
21 | servicetype = "HTTP"
22 | }
23 | resource "citrixadc_lbvserver_service_binding" "lb_binding1" {
24 | name = citrixadc_lbvserver.tf_lbvserver.name
25 | servicename = citrixadc_service.web-server-red.name
26 | }
27 |
28 | resource "citrixadc_service" "web-server-green" {
29 | name = "web-server-green"
30 | port = 80
31 | ip = "10.11.2.5"
32 | servicetype = "HTTP"
33 | }
34 | resource "citrixadc_lbvserver_service_binding" "lb_binding2" {
35 | name = citrixadc_lbvserver.tf_lbvserver.name
36 | servicename = citrixadc_service.web-server-green.name
37 | }
38 |
--------------------------------------------------------------------------------
/events/20231115/terraform-scripts/provider.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | citrixadc = {
4 | source = "citrix/citrixadc"
5 | }
6 | }
7 | }
8 |
9 | provider "citrixadc" {
10 | # The endpoint and the username is already configured in Terraform Cloud as Env variables
11 | # endpoint = "http://..."
12 | # username = "secret"
13 | password = var.NS_PASSWORD
14 | }
--------------------------------------------------------------------------------
/events/20231115/terraform-scripts/variable.tf:
--------------------------------------------------------------------------------
1 | variable "NS_PASSWORD" {
2 | description = "Password of the NetScaler"
3 | }
--------------------------------------------------------------------------------
/golden_templates/README.md:
--------------------------------------------------------------------------------
1 | # Golden Templates
2 |
3 | TBD
4 |
--------------------------------------------------------------------------------
/golden_templates/netscaler_gateway/README.md:
--------------------------------------------------------------------------------
1 | # Gateway related Golden Templates
2 |
3 | This directory contains various golden templates related to [NetScaler Gateway](https://docs.netscaler.com/en-us/citrix-gateway/current-release.html) feature
4 |
--------------------------------------------------------------------------------
/golden_templates/netscaler_gateway/ldap_radius/README.md:
--------------------------------------------------------------------------------
1 | # Simplified Gateway with LDAP and RADIUS
2 |
3 | This example shows how to configure a simplified gateway with LDAP and RADIUS authentication.
4 |
5 | ## NetScaler Community Live Demo
6 |
7 |
8 |
9 | Video Link:
10 |
11 | ## Folder Structure
12 | There are two seperate terraform modules
13 | 1. `step1_configure_ha` helps in configuring the NetScalers in HA mode
14 | 2. `step2_gateway_ldap_radius` helps in configuring gateway-ldap-radius usecase to the primary NetScaler after the HA mode.
15 |
16 | Each of these terraform modules contains the below file structure -
17 |
18 | Refer [HERE](../../../assets/common_docs/terraform/folder_structure.md).
19 |
20 | ## Pre-requisites
21 |
22 | 1. Two NetScaler ADCs to be provisioned already in the same subnet
23 | 2. All the necessary certificates for gateway configuration should be present in `step2_gateway_ldap_radius` folder
24 |
25 | ## Usage
26 |
27 | ### Step1: Configure HA
28 |
29 | 1. `cd step1_configure_ha`
30 | 2. Refer [HERE](../../../assets/common_docs/terraform/terraform_usage.md) for steps
31 |
32 | ### Step2: Further Gateway Configuration
33 |
34 | 1. `cd step2_gateway_ldap_radius`
35 | 2. Refer [HERE](../../../assets/common_docs/terraform/terraform_usage.md) for steps
36 |
37 | ## Network Architecture
38 | 
39 |
--------------------------------------------------------------------------------
/golden_templates/netscaler_gateway/ldap_radius/step1_configure_ha/example.tfvars:
--------------------------------------------------------------------------------
1 | netscaler1_nsip = "10.10.10.1"
2 | netscaler2_nsip = "10.10.10.2"
3 |
4 | rpc_node_password = "verysecretrpcnodepassword"
--------------------------------------------------------------------------------
/golden_templates/netscaler_gateway/ldap_radius/step1_configure_ha/main.tf:
--------------------------------------------------------------------------------
1 | # add ha node 1
2 | resource "citrixadc_hanode" "netscaler1" {
3 | hanode_id = 1
4 | ipaddress = var.netscaler2_nsip
5 | }
6 |
7 | # add ha node 1
8 | resource "citrixadc_hanode" "netscaler2" {
9 | provider = citrixadc.netscaler2
10 | hanode_id = 1
11 | ipaddress = var.netscaler1_nsip
12 |
13 | depends_on = [citrixadc_hanode.netscaler1]
14 | }
15 |
16 | resource "citrixadc_systemparameter" "netscaler1_ns_prompt" {
17 | promptstring = "%u@%s"
18 | }
19 | resource "citrixadc_systemparameter" "netscaler2_ns_prompt" {
20 | provider = citrixadc.netscaler2
21 | promptstring = "%u@%s"
22 | }
23 |
24 | # It is best practice to change the RPC node password
25 | # set rpcnode -password secretpassword -secure ON
26 | resource "citrixadc_nsrpcnode" "netscaler1to1_rpc_node" {
27 | ipaddress = var.netscaler1_nsip
28 | password = var.rpc_node_password
29 | secure = "ON"
30 |
31 | depends_on = [citrixadc_hanode.netscaler1]
32 | }
33 |
34 | # It is best practice to change the RPC node password
35 | resource "citrixadc_nsrpcnode" "netscaler1to2_rpc_node" {
36 | ipaddress = var.netscaler2_nsip
37 | password = var.rpc_node_password
38 | secure = "ON"
39 |
40 | depends_on = [citrixadc_hanode.netscaler1]
41 | }
42 |
43 | # It is best practice to change the RPC node password
44 | resource "citrixadc_nsrpcnode" "netscaler2to1_rpc_node" {
45 | provider = citrixadc.netscaler2
46 | ipaddress = var.netscaler1_nsip
47 | password = var.rpc_node_password
48 | secure = "ON"
49 |
50 | depends_on = [citrixadc_hanode.netscaler2]
51 | }
52 |
53 | # It is best practice to change the RPC node password
54 | resource "citrixadc_nsrpcnode" "netscaler2to2_rpc_node" {
55 | provider = citrixadc.netscaler2
56 | ipaddress = var.netscaler2_nsip
57 | password = var.rpc_node_password
58 | secure = "ON"
59 |
60 | depends_on = [citrixadc_hanode.netscaler2]
61 | }
62 |
63 |
--------------------------------------------------------------------------------
/golden_templates/netscaler_gateway/ldap_radius/step1_configure_ha/variables.tf:
--------------------------------------------------------------------------------
1 | variable "netscaler1_nsip" {
2 | type = string
3 | description = "NetScaler1 IP Address"
4 | }
5 | variable "netscaler2_nsip" {
6 | type = string
7 | description = "NetScaler2 IP Address"
8 | }
9 | variable "rpc_node_password" {
10 | type = string
11 | sensitive = true
12 | description = "The new ADC RPC node password that will replace the default one on both ADC instances. [Learn More about RPCNode](https://docs.citrix.com/en-us/citrix-adc/current-release/getting-started-with-citrix-adc/change-rpc-node-password.html)"
13 | }
--------------------------------------------------------------------------------
/golden_templates/netscaler_gateway/ldap_radius/step1_configure_ha/versions.tf:
--------------------------------------------------------------------------------
1 | # provider
2 | terraform {
3 | required_providers {
4 | citrixadc = {
5 | source = "citrix/citrixadc"
6 | }
7 | }
8 | }
9 |
10 | provider "citrixadc" {
11 | endpoint = format("http://%s", var.netscaler1_nsip)
12 | username = "" # NS_LOGIN env variable
13 | password = "" # NS_PASSWORD env variable
14 | }
15 |
16 | provider "citrixadc" {
17 | alias = "netscaler2"
18 | endpoint = format("http://%s", var.netscaler2_nsip)
19 | username = "" # NS_LOGIN env variable
20 | password = "" # NS_PASSWORD env variable
21 | }
--------------------------------------------------------------------------------
/golden_templates/netscaler_gateway/ldap_radius/step2_gateway_ldap_radius/example.tfvars:
--------------------------------------------------------------------------------
1 | # SSL Certificates
2 | servercertfile_name = "ns-server1.cert"
3 | servekeyfile_name = "ns-server1.key"
4 |
5 | intermediate_certificate1_name = "remote_mycoolcompany_com.ca-bundle"
6 | intermediate_certificate2_name = "remote_mycoolcompany_com.ca-bundle_ic1"
7 |
8 | gateway_certfile_name = "remote_mycoolcompany_com.crt"
9 | gateway_keyfile_name = "remote.mycoolcompany.com_key3.txt"
10 |
11 | # Basic Networking
12 | snip_ip_address = "10.0.10.12"
13 | snip_netmask = "255.255.255.0"
14 |
15 |
16 | # Centralized authentication for management access
17 | systemgroup1_name = "NetScaler-ReadOnly"
18 | systemgroup2_name = "NetScaler-Operator"
19 | systemgroup3_name = "NetScaler-Network"
20 | systemgroup4_name = "NetScaler-Sysadmin"
21 | systemgroup5_name = "NetScaler-SuperUser"
22 |
23 | management_authenticationldapaction_servername = "10.0.10.13"
24 | management_authenticationldapaction_ldapbase = "CN=Users,DC=mycoolcompany,DC=local"
25 | management_authenticationldapaction_ldapbinddn = "NetScaler-Service-Account@mycoolcompany.local"
26 | management_authenticationldapaction_ldapbinddnpassword = "secretpassword"
27 | management_authenticationldapaction_searchfilter = "memberOf:1.2.840.113556.1.4.1941:=CN=NetScaler-Admins,CN=Users,DC=mycoolcompany,DC=local"
28 |
29 | # SNMP
30 | snmpcommunity_name = "public"
31 | snmpmanager1_ipaddress = "10.50.50.10"
32 | snmpmanager2_ipaddress = "10.50.50.20"
33 |
34 | snmpuser_name = "snmp_monitoring_user"
35 | snmpuser_authpasswd = "secretpassword"
36 | snmpuser_privpasswd = "secretpassword"
37 |
38 | snmptrap1_trapdestination = "10.50.50.10"
39 | snmptrap2_trapdestination = "10.50.50.20"
40 |
41 | # Syslog
42 | auditsyslogaction_name = "syslog.mycoolcompany.internal"
43 | auditsyslogaction_serverip = "10.0.10.1"
44 |
45 | # DNS
46 | dns_server1_ipaddress = "8.8.8.8"
47 | dns_server2_ipaddress = "1.1.1.1"
48 | dns_tcp_lbmonitor_query = "remote.mycoolcompany.com"
49 | dns_udp_lbmonitor_query = "remote.mycoolcompany.com"
50 |
51 | server3_ipaddress = "10.0.10.25"
52 |
53 |
54 | #Load balancing configuration
55 | storefront_lbvserver_ipv46 = "10.0.10.14"
56 | ldap_lbvserver_ipv46 = "10.0.10.13"
57 | ldap_tls_offload_lbvserver_ipv46 = "10.0.10.13"
58 | radius_lbvserver_ipv46 = "10.0.10.13"
59 |
60 | # Authentication Configuration
61 | ldap_server1_ipaddress = "192.168.3.10"
62 | ldap_server2_ipaddress = "192.168.3.11"
63 | radius_server_ipaddress = "192.168.3.20"
64 |
65 |
66 | # Store Front configuration
67 | storefront_lbmonitor_storename = "Store"
68 | storefront_server1_ipaddress = "192.168.3.30"
69 | storefront_server2_ipaddress = "192.168.3.31"
70 |
71 | ldap_lbmonitor_password = "secretpassword"
72 | ldap_lbmonitor_basedn = "CN=Users,DC=mycoolcompany,DC=local"
73 | ldap_lbmonitor_binddn = "NetScaler-Service-Account@mycoolcompany.local"
74 | ldap_lbmonitor_filter = "memberOf:1.2.840.113556.1.4.1941:=CN=NetScaler-Admins,CN=Users,DC=mycoolcompany,DC=local"
75 |
76 | radius_lbmonitor_username = "RADIUS-Service-Account"
77 | radius_lbmonitor_password = "secretpassword"
78 | radius_lbmonitor_radkey = "secretpassword"
79 |
80 | gateway_authenticationldapaction_servername = "10.0.10.13"
81 | gateway_authenticationldapaction_ldapbase = "CN=Users,DC=mycoolcompany,DC=local"
82 | gateway_authenticationldapaction_ldapbinddn = "NetScaler-Service-Account@mycoolcompany.local"
83 | gateway_authenticationldapaction_ldapbinddnpassword = "secretpassword" ##
84 |
85 | authenticationradiusaction_serverip = "10.0.10.13"
86 | authenticationradiusaction_radkey = "secretpassword"
87 |
88 | gateway_vpnvserver_ipv46 = "10.0.10.15"
89 |
90 | vpnsessionaction1_wihome = "https://10.0.10.14/Citrix/StoreWeb"
91 | vpnsessionaction1_storefronturl = "https://10.0.10.14/"
92 |
93 | vpnsessionaction2_wihome = "https://10.0.10.14/Citrix/StoreWeb"
94 |
95 |
96 | vpnvserver_staserver = ["http://192.168.3.30", "http://192.168.3.31"]
97 |
98 | gateway_dtls_vpnvserver_ipv46 = "10.0.10.15"
99 |
100 |
101 | ## These defaults don't need to be modified
102 |
103 | # promptstring = "%u@%h-%s"
104 |
105 | # management_authenticationldapaction_name = "Management_LDAP_Server"
106 |
107 | # dns_tcp_lbvserver_name = "DNS_TCP_LB"
108 | # dns_tcp_lbvserver_servicetype = "DNS_TCP"
109 | # dns_tcp_lbvserver_ipv46 = "0.0.0.0"
110 |
111 | # dns_udp_lbvserver_name = "DNS_UDP_LB"
112 | # dns_udp_lbvserver_servicetype = "DNS"
113 | # dns_udp_lbvserver_ipv46 = "0.0.0.0"
114 |
115 | # storefront_lbvserver_name = "StoreFront_LB"
116 | # storefront_lbvserver_servicetype = "SSL"
117 |
118 | # ldap_lbvserver_name = "LDAP_LB"
119 | # ldap_lbvserver_servicetype = "TCP"
120 |
121 | # ldap_tls_offload_lbvserver_name = "LDAP_TLS_Offload_LB"
122 | # ldap_tls_offload_lbvserver_servicetype = "TCP"
123 |
124 | # radius_lbvserver_name = "RADIUS_LB"
125 | # radius_lbvserver_servicetype = "RADIUS"
126 |
127 | # dns_tcp_lbmonitor_name = "DNS_TCP_monitor"
128 | # dns_tcp_lbmonitor_type = "DNS-TCP"
129 |
130 | # dns_udp_lbmonitor_name = "DNS_UDP_monitor"
131 | # dns_udp_lbmonitor_type = "DNS"
132 |
133 | # storefront_lbmonitor_name = "StoreFront_monitor"
134 | # storefront_lbmonitor_type = "STOREFRONT"
135 | # storefront_lbmonitor_dispatcherip = "127.0.0.1"
136 |
137 | # ldap_lbmonitor_name = "LDAP_MON"
138 | # ldap_lbmonitor_type = "LDAP"
139 | # ldap_lbmonitor_dispatcherip = "127.0.0.1"
140 |
141 | # radius_lbmonitor_name = "RADIUS_MON"
142 | # radius_lbmonitor_type = "RADIUS"
143 |
144 | # gateway_authenticationldapaction_name = "Gateway_LDAP_Server"
145 |
146 | # authenticationradiusaction_name = "Gateway_RADIUS_Server"
147 |
148 | # gateway_vpnvserver_name = "Gateway_vServer"
149 | # gateway_vpnvserver_servicetype = "SSL"
150 |
151 | # vpnsessionaction1_name = "Native_Profile"
152 |
153 | # vpnsessionaction2_name = "Web_Profile"
154 |
155 | # gateway_dtls_vpnvserver_name = "Gateway_DTLS_vServer"
156 | # gateway_dtls_vpnvserver_servicetype = "DTLS"
157 |
--------------------------------------------------------------------------------
/golden_templates/netscaler_gateway/ldap_radius/step2_gateway_ldap_radius/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | citrixadc = {
4 | source = "citrix/citrixadc"
5 | }
6 | }
7 | }
8 | provider "citrixadc" {
9 | endpoint = "https://${var.primary_netscaler_nsip}"
10 | username = "" # NS_LOGIN env variable
11 | password = "" # NS_PASSWORD env variable
12 | insecure_skip_verify = true
13 | }
14 |
--------------------------------------------------------------------------------
/golden_templates/netscaler_gateway/oauth/README.md:
--------------------------------------------------------------------------------
1 | # Simplified Gateway with OAuth
2 |
3 | This example shows how to configure a simplified gateway with OAuth authentication.
4 |
5 |
6 | ## Folder Structure
7 | There are two seperate terraform modules
8 | 1. `step1_configure_ha` helps in configuring the NetScalers in HA mode
9 | 2. `step2_gateway_oauth` helps in configuring gateway-oauth usecase to the primary NetScaler after the HA mode.
10 |
11 | Each of these terraform modules contains the below file structure -
12 |
13 | Refer [HERE](../../../assets/common_docs/terraform/folder_structure.md).
14 |
15 | ## Pre-requisites
16 |
17 | 1. Two NetScaler ADCs to be provisioned already in the same subnet
18 | 2. All the necessary certificates for gateway configuration should be present in `step2_gateway_oauth` folder
19 |
20 | ## Usage
21 |
22 | ### Step1: Configure HA
23 |
24 | 1. `cd step1_configure_ha`
25 | 2. Refer [HERE](../../../assets/common_docs/terraform/terraform_usage.md) for steps
26 |
27 | ### Step2: Further Gateway Configuration
28 |
29 | 1. `cd step2_gateway_oauth`
30 | 2. Refer [HERE](../../../assets/common_docs/terraform/terraform_usage.md) for steps
31 |
32 | ## Network Architecture
33 | To be updated
--------------------------------------------------------------------------------
/golden_templates/netscaler_gateway/oauth/step1_configure_ha/example.tfvars:
--------------------------------------------------------------------------------
1 | netscaler1_nsip = "10.10.10.1"
2 | netscaler2_nsip = "10.10.10.2"
3 |
4 | rpc_node_password = "verysecretrpcnodepassword"
--------------------------------------------------------------------------------
/golden_templates/netscaler_gateway/oauth/step1_configure_ha/main.tf:
--------------------------------------------------------------------------------
1 | # add ha node 1
2 | resource "citrixadc_hanode" "netscaler1" {
3 | hanode_id = 1
4 | ipaddress = var.netscaler2_nsip
5 | }
6 |
7 | # add ha node 1
8 | resource "citrixadc_hanode" "netscaler2" {
9 | provider = citrixadc.netscaler2
10 | hanode_id = 1
11 | ipaddress = var.netscaler1_nsip
12 |
13 | depends_on = [citrixadc_hanode.netscaler1]
14 | }
15 |
16 | resource "citrixadc_systemparameter" "netscaler1_ns_prompt" {
17 | promptstring = "%u@%s"
18 | }
19 | resource "citrixadc_systemparameter" "netscaler2_ns_prompt" {
20 | provider = citrixadc.netscaler2
21 | promptstring = "%u@%s"
22 | }
23 |
24 | # It is best practice to change the RPC node password
25 | # set rpcnode -password secretpassword -secure ON
26 | resource "citrixadc_nsrpcnode" "netscaler1to1_rpc_node" {
27 | ipaddress = var.netscaler1_nsip
28 | password = var.rpc_node_password
29 | secure = "ON"
30 |
31 | depends_on = [citrixadc_hanode.netscaler1]
32 | }
33 |
34 | # It is best practice to change the RPC node password
35 | resource "citrixadc_nsrpcnode" "netscaler1to2_rpc_node" {
36 | ipaddress = var.netscaler2_nsip
37 | password = var.rpc_node_password
38 | secure = "ON"
39 |
40 | depends_on = [citrixadc_hanode.netscaler1]
41 | }
42 |
43 | # It is best practice to change the RPC node password
44 | resource "citrixadc_nsrpcnode" "netscaler2to1_rpc_node" {
45 | provider = citrixadc.netscaler2
46 | ipaddress = var.netscaler1_nsip
47 | password = var.rpc_node_password
48 | secure = "ON"
49 |
50 | depends_on = [citrixadc_hanode.netscaler2]
51 | }
52 |
53 | # It is best practice to change the RPC node password
54 | resource "citrixadc_nsrpcnode" "netscaler2to2_rpc_node" {
55 | provider = citrixadc.netscaler2
56 | ipaddress = var.netscaler2_nsip
57 | password = var.rpc_node_password
58 | secure = "ON"
59 |
60 | depends_on = [citrixadc_hanode.netscaler2]
61 | }
62 |
63 |
--------------------------------------------------------------------------------
/golden_templates/netscaler_gateway/oauth/step1_configure_ha/variables.tf:
--------------------------------------------------------------------------------
1 | variable "netscaler1_nsip" {
2 | type = string
3 | description = "NetScaler1 IP Address"
4 | }
5 | variable "netscaler2_nsip" {
6 | type = string
7 | description = "NetScaler2 IP Address"
8 | }
9 | variable "rpc_node_password" {
10 | type = string
11 | sensitive = true
12 | description = "The new ADC RPC node password that will replace the default one on both ADC instances. [Learn More about RPCNode](https://docs.citrix.com/en-us/citrix-adc/current-release/getting-started-with-citrix-adc/change-rpc-node-password.html)"
13 | }
--------------------------------------------------------------------------------
/golden_templates/netscaler_gateway/oauth/step1_configure_ha/versions.tf:
--------------------------------------------------------------------------------
1 | # provider
2 | terraform {
3 | required_providers {
4 | citrixadc = {
5 | source = "citrix/citrixadc"
6 | }
7 | }
8 | }
9 |
10 | provider "citrixadc" {
11 | endpoint = format("http://%s", var.netscaler1_nsip)
12 | username = "" # NS_LOGIN env variable
13 | password = "" # NS_PASSWORD env variable
14 | }
15 |
16 | provider "citrixadc" {
17 | alias = "netscaler2"
18 | endpoint = format("http://%s", var.netscaler2_nsip)
19 | username = "" # NS_LOGIN env variable
20 | password = "" # NS_PASSWORD env variable
21 | }
--------------------------------------------------------------------------------
/golden_templates/netscaler_gateway/oauth/step2_gateway_oauth/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | citrixadc = {
4 | source = "citrix/citrixadc"
5 | }
6 | }
7 | }
8 | provider "citrixadc" {
9 | endpoint = "https://${var.primary_netscaler_nsip}"
10 | username = "" # NS_LOGIN env variable
11 | password = "" # NS_PASSWORD env variable
12 | insecure_skip_verify = true
13 | }
14 |
--------------------------------------------------------------------------------
/golden_templates/netscaler_gateway/saml/README.md:
--------------------------------------------------------------------------------
1 | # Simplified Gateway with SAML
2 |
3 | This example shows how to configure a simplified gateway with SAML authentication.
4 |
5 |
6 | ## Folder Structure
7 | There are two seperate terraform modules
8 | 1. `step1_configure_ha` helps in configuring the NetScalers in HA mode
9 | 2. `step2_gateway_saml` helps in configuring gateway-saml usecase to the primary NetScaler after the HA mode.
10 |
11 | Each of these terraform modules contains the below file structure -
12 |
13 | Refer [HERE](../../../assets/common_docs/terraform/folder_structure.md).
14 |
15 | ## Pre-requisites
16 |
17 | 1. Two NetScaler ADCs to be provisioned already in the same subnet
18 | 2. All the necessary certificates for gateway configuration should be present in `step2_gateway_saml` folder
19 |
20 | ## Usage
21 |
22 | ### Step1: Configure HA
23 |
24 | 1. `cd step1_configure_ha`
25 | 2. Refer [HERE](../../../assets/common_docs/terraform/terraform_usage.md) for steps
26 |
27 | ### Step2: Further Gateway Configuration
28 |
29 | 1. `cd step2_gateway_saml`
30 | 2. Refer [HERE](../../../assets/common_docs/terraform/terraform_usage.md) for steps
31 |
32 | ## Network Architecture
33 | To be updated
--------------------------------------------------------------------------------
/golden_templates/netscaler_gateway/saml/step1_configure_ha/example.tfvars:
--------------------------------------------------------------------------------
1 | netscaler1_nsip = "10.10.10.1"
2 | netscaler2_nsip = "10.10.10.2"
3 |
4 | rpc_node_password = "verysecretrpcnodepassword"
--------------------------------------------------------------------------------
/golden_templates/netscaler_gateway/saml/step1_configure_ha/main.tf:
--------------------------------------------------------------------------------
1 | # add ha node
2 | resource "citrixadc_hanode" "netscaler1" {
3 | hanode_id = 1
4 | ipaddress = var.netscaler2_nsip
5 | }
6 |
7 | # add ha node
8 | resource "citrixadc_hanode" "netscaler2" {
9 | provider = citrixadc.netscaler2
10 | hanode_id = 1
11 | ipaddress = var.netscaler1_nsip
12 |
13 | depends_on = [citrixadc_hanode.netscaler1]
14 | }
15 |
16 | resource "citrixadc_systemparameter" "netscaler1_ns_prompt" {
17 | promptstring = "%u@%s"
18 | }
19 | resource "citrixadc_systemparameter" "netscaler2_ns_prompt" {
20 | provider = citrixadc.netscaler2
21 | promptstring = "%u@%s"
22 | }
23 |
24 | # It is best practice to change the RPC node password
25 | resource "citrixadc_nsrpcnode" "netscaler1to1_rpc_node" {
26 | ipaddress = var.netscaler1_nsip
27 | password = var.rpc_node_password
28 | secure = "ON"
29 |
30 | depends_on = [citrixadc_hanode.netscaler1]
31 | }
32 |
33 | # It is best practice to change the RPC node password
34 | resource "citrixadc_nsrpcnode" "netscaler1to2_rpc_node" {
35 | ipaddress = var.netscaler2_nsip
36 | password = var.rpc_node_password
37 | secure = "ON"
38 |
39 | depends_on = [citrixadc_hanode.netscaler1]
40 | }
41 |
42 | # It is best practice to change the RPC node password
43 | resource "citrixadc_nsrpcnode" "netscaler2to1_rpc_node" {
44 | provider = citrixadc.netscaler2
45 | ipaddress = var.netscaler1_nsip
46 | password = var.rpc_node_password
47 | secure = "ON"
48 |
49 | depends_on = [citrixadc_hanode.netscaler2]
50 | }
51 |
52 | # It is best practice to change the RPC node password
53 | resource "citrixadc_nsrpcnode" "netscaler2to2_rpc_node" {
54 | provider = citrixadc.netscaler2
55 | ipaddress = var.netscaler2_nsip
56 | password = var.rpc_node_password
57 | secure = "ON"
58 |
59 | depends_on = [citrixadc_hanode.netscaler2]
60 | }
61 |
62 |
--------------------------------------------------------------------------------
/golden_templates/netscaler_gateway/saml/step1_configure_ha/variables.tf:
--------------------------------------------------------------------------------
1 | variable "netscaler1_nsip" {
2 | type = string
3 | description = "NetScaler1 IP Address"
4 | }
5 | variable "netscaler2_nsip" {
6 | type = string
7 | description = "NetScaler2 IP Address"
8 | }
9 | variable "rpc_node_password" {
10 | type = string
11 | sensitive = true
12 | description = "The new ADC RPC node password that will replace the default one on both ADC instances. [Learn More about RPCNode](https://docs.citrix.com/en-us/citrix-adc/current-release/getting-started-with-citrix-adc/change-rpc-node-password.html)"
13 | }
--------------------------------------------------------------------------------
/golden_templates/netscaler_gateway/saml/step1_configure_ha/versions.tf:
--------------------------------------------------------------------------------
1 | # provider
2 | terraform {
3 | required_providers {
4 | citrixadc = {
5 | source = "citrix/citrixadc"
6 | }
7 | }
8 | }
9 |
10 | provider "citrixadc" {
11 | endpoint = format("http://%s", var.netscaler1_nsip)
12 | username = ""
13 | password = ""
14 | }
15 |
16 | provider "citrixadc" {
17 | alias = "netscaler2"
18 | endpoint = format("http://%s", var.netscaler2_nsip)
19 | username = ""
20 | password = ""
21 | }
--------------------------------------------------------------------------------
/golden_templates/netscaler_gateway/saml/step2_gateway_saml/example.tfvars:
--------------------------------------------------------------------------------
1 | # SSL Certificates
2 | servercertfile_name = "ns-server1.cert"
3 | servekeyfile_name = "ns-server1.key"
4 |
5 | intermediate_certificate1_name = "remote_mycoolcompany_com.ca-bundle"
6 | intermediate_certificate2_name = "remote_mycoolcompany_com.ca-bundle_ic1"
7 |
8 | gateway_certfile_name = "remote_mycoolcompany_com.crt"
9 | gateway_keyfile_name = "remote.mycoolcompany.com_key3.txt"
10 |
11 | okta_certfile_name = "okta.cert"
12 |
13 | # Basic Networking
14 | snip_ip_address = "10.0.10.12"
15 | snip_netmask = "255.255.255.0"
16 |
17 |
18 | # Centralized authentication for management access
19 | systemgroup1_name = "NetScaler-ReadOnly"
20 | systemgroup2_name = "NetScaler-Operator"
21 | systemgroup3_name = "NetScaler-Network"
22 | systemgroup4_name = "NetScaler-Sysadmin"
23 | systemgroup5_name = "NetScaler-SuperUser"
24 |
25 | management_authenticationldapaction_servername = "10.0.10.13"
26 | management_authenticationldapaction_ldapbase = "CN=Users,DC=mycoolcompany,DC=local"
27 | management_authenticationldapaction_ldapbinddn = "NetScaler-Service-Account@mycoolcompany.local"
28 | management_authenticationldapaction_ldapbinddnpassword = "secretpassword"
29 | management_authenticationldapaction_searchfilter = "memberOf:1.2.840.113556.1.4.1941:=CN=NetScaler-Admins,CN=Users,DC=mycoolcompany,DC=local"
30 |
31 | # SNMP
32 | snmpcommunity_name = "public"
33 | snmpmanager1_ipaddress = "10.50.50.10"
34 | snmpmanager2_ipaddress = "10.50.50.20"
35 |
36 | snmpuser_name = "snmp_monitoring_user"
37 | snmpuser_authpasswd = "secretpassword"
38 | snmpuser_privpasswd = "secretpassword"
39 |
40 | snmptrap1_trapdestination = "10.50.50.10"
41 | snmptrap2_trapdestination = "10.50.50.20"
42 |
43 | # Syslog
44 | auditsyslogaction_name = "syslog.mycoolcompany.internal"
45 | auditsyslogaction_serverip = "10.0.10.1"
46 |
47 | # DNS
48 | dns_server1_ipaddress = "8.8.8.8"
49 | dns_server2_ipaddress = "1.1.1.1"
50 | dns_tcp_lbmonitor_query = "remote.mycoolcompany.com"
51 | dns_udp_lbmonitor_query = "remote.mycoolcompany.com"
52 |
53 | server3_ipaddress = "10.0.10.25"
54 |
55 |
56 | #Load balancing configuration
57 | storefront_lbvserver_ipv46 = "10.0.10.14"
58 | ldap_lbvserver_ipv46 = "10.0.10.13"
59 |
60 | # Authentication Configuration
61 | ldap_server1_ipaddress = "192.168.3.10"
62 | ldap_server2_ipaddress = "192.168.3.11"
63 |
64 | # Store Front configuration
65 | storefront_lbmonitor_storename = "Store"
66 | storefront_server1_ipaddress = "192.168.3.30"
67 | storefront_server2_ipaddress = "192.168.3.31"
68 |
69 | ldap_lbmonitor_password = "secretpassword"
70 | ldap_lbmonitor_basedn = "CN=Users,DC=mycoolcompany,DC=local"
71 | ldap_lbmonitor_binddn = "NetScaler-Service-Account@mycoolcompany.local"
72 | ldap_lbmonitor_filter = "memberOf:1.2.840.113556.1.4.1941:=CN=NetScaler-Admins,CN=Users,DC=mycoolcompany,DC=local"
73 |
74 | gateway_authenticationldapaction_servername = "10.0.10.13"
75 | gateway_authenticationldapaction_ldapbase = "CN=Users,DC=mycoolcompany,DC=local"
76 | gateway_authenticationldapaction_ldapbinddn = "NetScaler-Service-Account@mycoolcompany.local"
77 | gateway_authenticationldapaction_ldapbinddnpassword = "secretpassword"
78 |
79 |
80 |
81 | authenticationsamlaction_samlredirecturl = "https://trial-8385130.okta.com/app/citrixnetscalergateway_saml/exk4aksdlp46lHubx697/sso/saml"
82 | authenticationsamlaction_samluserfield = "NameID"
83 | authenticationsamlaction_samlissuername = "http://www.okta.com/exk4aksdlp46lHubx697"
84 | authenticationsamlaction_logouturl = "https://trial-8385130.okta.com"
85 |
86 | gateway_vpnvserver_ipv46 = "10.0.10.15"
87 |
88 | vpnsessionaction1_wihome = "https://10.0.10.14/Citrix/StoreWeb"
89 | vpnsessionaction1_storefronturl = "https://10.0.10.14/"
90 |
91 | vpnsessionaction2_wihome = "https://10.0.10.14/Citrix/StoreWeb"
92 |
93 |
94 | vpnvserver_staserver = ["http://192.168.3.30", "http://192.168.3.31"]
95 |
96 | gateway_dtls_vpnvserver_ipv46 = "10.0.10.15"
97 |
98 |
99 | ## These defaults don't need to be modified
100 |
101 | # promptstring = "%u@%h-%s"
102 |
103 | # management_authenticationldapaction_name = "Management_LDAP_Server"
104 |
105 | # dns_tcp_lbvserver_name = "DNS_TCP_LB"
106 | # dns_tcp_lbvserver_servicetype = "DNS_TCP"
107 | # dns_tcp_lbvserver_ipv46 = "0.0.0.0"
108 |
109 | # dns_udp_lbvserver_name = "DNS_UDP_LB"
110 | # dns_udp_lbvserver_servicetype = "DNS"
111 | # dns_udp_lbvserver_ipv46 = "0.0.0.0"
112 |
113 | # storefront_lbvserver_name = "StoreFront_LB"
114 | # storefront_lbvserver_servicetype = "SSL"
115 |
116 | # ldap_lbvserver_name = "LDAP_LB"
117 | # ldap_lbvserver_servicetype = "TCP"
118 |
119 |
120 | # dns_tcp_lbmonitor_name = "DNS_TCP_monitor"
121 | # dns_tcp_lbmonitor_type = "DNS-TCP"
122 |
123 | # dns_udp_lbmonitor_name = "DNS_UDP_monitor"
124 | # dns_udp_lbmonitor_type = "DNS"
125 |
126 |
127 | # storefront_lbmonitor_name = "StoreFront_monitor"
128 | # storefront_lbmonitor_type = "STOREFRONT"
129 | # storefront_lbmonitor_dispatcherip = "127.0.0.1"
130 |
131 |
132 | # ldap_lbmonitor_name = "LDAP_MON"
133 | # ldap_lbmonitor_type = "LDAP"
134 | # ldap_lbmonitor_dispatcherip = "127.0.0.1"
135 |
136 | # gateway_authenticationldapaction_name = "Gateway_LDAP_Server"
137 |
138 |
139 | # gateway_vpnvserver_name = "Gateway_vServer"
140 | # gateway_vpnvserver_servicetype = "SSL"
141 |
142 | # vpnsessionaction1_name = "Native_Profile"
143 |
144 | # vpnsessionaction2_name = "Web_Profile"
145 |
146 | # gateway_dtls_vpnvserver_name = "Gateway_DTLS_vServer"
147 | # gateway_dtls_vpnvserver_servicetype = "DTLS"
148 |
--------------------------------------------------------------------------------
/golden_templates/netscaler_gateway/saml/step2_gateway_saml/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | citrixadc = {
4 | source = "citrix/citrixadc"
5 | }
6 | }
7 | }
8 | provider "citrixadc" {
9 | endpoint = "http://${var.primary_netscaler_nsip}"
10 | username = ""
11 | password = ""
12 | }
13 |
--------------------------------------------------------------------------------
/golden_templates/upgrade-netscaler/README.md:
--------------------------------------------------------------------------------
1 | # Upgrade NetScalers
2 |
3 | ⚠️ These ansible playbooks are in `beta` phase. It is recommended not to use in production environment yet.
4 |
5 | ## Contact
6 |
7 | Please [raise issues](https://github.com/netscaler/automation-toolkit/issues) for any questions.
8 |
--------------------------------------------------------------------------------
/golden_templates/upgrade-netscaler/high-availability/issu-mode/README.md:
--------------------------------------------------------------------------------
1 | # In Service Software Upgrade(ISSU) support for high availability using Ansible-Playbook
2 |
3 | ## Prerequisites
4 |
5 | 1. Ansible version should be 4.9.0.
6 | ```bash
7 | pip install ansible==4.9.0
8 | ```
9 | 2. Two NetScalers should be in High-Availability mode.
10 | 3. Ns build Image should be present in both the NetScalers. If in case you don't have the build image available locally please refer [HERE](https://www.citrix.com/downloads/citrix-adc/) to download the Image. After you download the build, upload it to the NetScaler and Update the ansible-playbook yaml file providing the path and the file name of the build Image.
11 | 4. Password-less SSH authentication between the controller node (system in which you are running ansiblee playbook) and the NetScalers. For more info on how to do that refer [HERE](https://github.com/citrix/citrix-adc-ansible-modules#usage)
12 | 5. Instaling ADC modules and plugins
13 | ```bash
14 | ansible-galaxy collection install git+https://github.com/citrix/citrix-adc-ansible-modules.git#/ansible-collections/adc
15 | ```
16 |
17 | ## Usage with demo video
18 |
19 |
20 |
21 | 1. Edit the inventory file with the NSIP of the NetScalers.
22 | 2. Update the yaml file with the build_location and build_file_name, referring to the path and file name of the build image.
23 | 2. Run
24 | ```bash
25 | ansible-playbook issu_upgrade.yaml -i inventory.txt
26 | ```
27 |
28 | ## Further Reference
29 |
30 | * Upgrade a high availability pair [documentation](https://docs.netscaler.com/en-us/citrix-adc/current-release/upgrade-downgrade-citrix-adc-appliance/issu-high-availability.html)
31 | * Video Reference on how to run this ansible-playbook [HERE](https://youtu.be/lYuo9s76-PM)
32 |
33 |
34 | ## For Password-based SSH authentication:
35 |
36 | Refer [HERE](../../../../assets/common_docs/ansible/ansible_password_based_ssh.md)
--------------------------------------------------------------------------------
/golden_templates/upgrade-netscaler/high-availability/issu-mode/inventory.txt:
--------------------------------------------------------------------------------
1 | [primary]
2 | 10.10.10.10 nsip=10.10.10.10 nitro_user=nsroot nitro_pass=verysecretpassword validate_certs=no
3 |
4 | [secondary]
5 | 10.10.10.11 nsip=10.10.10.11 nitro_user=nsroot nitro_pass=verysecretpassword validate_certs=no
6 |
7 |
--------------------------------------------------------------------------------
/golden_templates/upgrade-netscaler/high-availability/issu-mode/issu_upgrade.yaml:
--------------------------------------------------------------------------------
1 | - hosts: secondary
2 | remote_user: nsroot
3 | connection: citrix.adc.ssh_citrix_adc
4 | gather_facts: false
5 | collections:
6 | - citrix.adc
7 | vars:
8 | ansible_python_interpreter: /var/python/bin/python
9 |
10 | build_location: "/var/nsinstall/new_build/"
11 | build_file_name: "build-13.1-34.11_nc_64.tgz"
12 | migration_poll_timeout: 120
13 |
14 | nscli_command: "force ha failover -force"
15 | migration_poll_delay: "{{ (migration_poll_timeout/5)|int }}"
16 |
17 | tasks:
18 | - name: Untar the build file in Secondary node
19 | shell: "tar xvf {{ build_location }}{{ build_file_name }} -C {{ build_location }}"
20 | register: tar_output
21 |
22 | # - name: Untar result
23 | # debug:
24 | # msg: "{{ tar_output }}"
25 |
26 | - name: Upgrade build in Secondary node
27 | shell: "./installns -Y"
28 | args:
29 | chdir: "{{ build_location }}"
30 | register: nscli_output
31 | async: 120
32 | poll: 0
33 |
34 | - name: Wait for the Secondary to Come-up
35 | wait_for_connection:
36 | connect_timeout: 10
37 | sleep: 10
38 | delay: 120
39 | timeout: 900
40 |
41 | - name: nsmigration resource
42 | delegate_to: localhost
43 | register: result
44 | vars:
45 | ansible_python_interpreter: /usr/bin/python
46 | citrix_adc_nitro_request:
47 | nsip: "{{ nsip }}"
48 | nitro_user: "{{ nitro_user }}"
49 | nitro_pass: "{{ nitro_pass }}"
50 | validate_certs: no
51 |
52 | operation: action
53 | action: start
54 |
55 | resource: nsmigration
56 | attributes: {}
57 |
58 | - name: Poll till the migration completes
59 | delegate_to: localhost
60 | vars:
61 | ansible_python_interpreter: /usr/bin/python
62 | citrix_adc_nitro_info:
63 | nsip: "{{ nsip }}"
64 | nitro_user: "{{ nitro_user }}"
65 | nitro_pass: "{{ nitro_pass }}"
66 | validate_certs: no
67 |
68 | endpoint: nsmigration
69 | nitro_info_key: nsmigration
70 |
71 | until: result['nitro_info'][0]['migrationstatus'] == "Migration is completed"
72 | retries: 5
73 | delay: "{{migration_poll_delay}}"
74 |
75 | register: result
76 |
77 | - hosts: primary
78 | remote_user: nsroot
79 | connection: citrix.adc.ssh_citrix_adc
80 | gather_facts: False
81 | vars:
82 | ansible_python_interpreter: /var/python/bin/python
83 |
84 | build_location: "/var/nsinstall/new_build/"
85 | build_file_name: "build-13.1-34.11_nc_64.tgz"
86 | nscli_command: "force ha failover -f"
87 |
88 | tasks:
89 | - name: Untar the build file in Old Primary node
90 | shell: "tar xvf {{ build_location }}{{ build_file_name }} -C {{ build_location }}"
91 | register: tar_output
92 |
93 | # - name: Untar result
94 | # debug:
95 | # msg: "{{ tar_output }}"
96 |
97 | - name: Upgrade build in Old Primary
98 | shell: "./installns -Y"
99 | args:
100 | chdir: "{{ build_location }}"
101 | register: nscli_output
102 | async: 120
103 | poll: 0
104 |
105 | - name: Wait for the Old Primary to Come-up
106 | wait_for_connection:
107 | connect_timeout: 10
108 | sleep: 10
109 | delay: 120
110 | timeout: 900
111 |
112 | - name: Force HA failover
113 | shell: "nscli -s -U :nsroot:{{nitro_pass}} {{ nscli_command }}"
114 |
--------------------------------------------------------------------------------
/golden_templates/upgrade-netscaler/high-availability/normal-mode/README.md:
--------------------------------------------------------------------------------
1 | # Normal NetScaler Upgrade support for high availability pair using Ansible-Playbook
2 |
3 | ## Prerequisites
4 |
5 | 1. Ansible version should be 4.9.0.
6 | ```bash
7 | pip install ansible==4.9.0
8 | ```
9 | 2. Two NetScalers should be in High-Availability mode.
10 | 3. Regarding NetScaler build there are two options:
11 |
12 | a. NetScaler build image is present in both primary and secondary NetScalers.
13 | * Update the [variables.yaml](./variables.yaml) file with `netscaler_build_location` and `netscaler_build_file_name` attributes to reflect the path and file name of the build image currently present in NetScaler, respectively
14 | * Set the `want_to_copy_build` attribute's value to `no` in the same [variables.yaml](./variables.yaml) file.
15 |
16 | b. NetScaler build image is not present in both primary and secondary NetScalers
17 | * Please refer [HERE](https://www.citrix.com/downloads/citrix-adc/) to download the build image.
18 | * After downloading the build, replace the values of the attributes `want_to_copy_build` and `local_build_file_full_path_with_name` in the [variables.yaml](./variables.yaml) file with `yes` and the full path with name of the build that is now on the local system, respectively.
19 |
20 |
21 | 4. Passwordless SSH authentication between the NetScalers and the control node(system in which the ansible-playbook is running). For additional information, see [HERE](https://github.com/citrix/citrix-adc-ansible-modules/tree/887afdef75865a0ebd4bccb9c759a4b06689107a#usage).
22 | 5. Instaling ADC modules and plugins
23 | ```bash
24 | ansible-galaxy collection install git+https://github.com/citrix/citrix-adc-ansible-modules.git#/ansible-collections/adc
25 | ```
26 | 6. By default, Python is installed in the directory `/var/python/bin/python` in NetScaler, but if it is located elsewhere, alter the `netscaler_python_path` variable value in the [variables.yaml](./variables.yaml) file with the path to the Python.
27 |
28 | ## Usage with demo video
29 |
30 |
31 |
32 |
33 | 1. Edit the [inventory.ini](./inventory.ini) file with the NetScaler credentials.
34 | 2. Update the [variables.yaml](./variables.yaml) file with the necessary inputs.
35 | 2. Run the below command
36 | ```bash
37 | ansible-playbook ha_upgrade.yaml -i inventory.ini
38 | ```
39 |
40 |
41 | ## For Password-based SSH authentication:
42 |
43 | Refer [HERE](../../../../assets/common_docs/ansible/ansible_password_based_ssh.md)
44 |
45 | ## Troubleshooting
46 | * `No space left on device` error
47 | * In order to complete tasks, Ansible will copy temporary files to the NetScaler. Therefore, `/root/` will be the default temporary directory. Ansible will complain if the NetScaler's `/root/` has less memory available. The [ansible.cfg](./ansible.cfg) file allows to modify the temporary directory. The steps are listed below.
48 | * Temporary directory to use on targets when executing tasks:
49 | You need to add the below lines into [ansible.cfg](./ansible.cfg) file under [defaults] block.
50 | ```
51 | [defaults]
52 | remote_tmp = /var/
53 | ```
54 | * Refer [HERE](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/sh_shell.html#parameter-remote_tmp) for additional information on `remote_tmp`.
55 |
56 |
57 |
58 | ## Further Reference
59 |
60 | * Upgrade a high availability pair [NetScaler documentation](https://docs.netscaler.com/en-us/citrix-adc/current-release/upgrade-downgrade-citrix-adc-appliance/upgrade-downgrade-ha-pair.html)
61 | * Video Reference on how to run this ansible-playbook [HERE](https://youtu.be/mqbfWsaX5Xc)
62 |
63 |
--------------------------------------------------------------------------------
/golden_templates/upgrade-netscaler/high-availability/normal-mode/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | host_key_checking = False
3 | remote_tmp = /var/
4 | callbacks_enabled = profile_tasks
5 |
6 | [ssh_connection]
7 | scp_if_ssh = True
8 |
--------------------------------------------------------------------------------
/golden_templates/upgrade-netscaler/high-availability/normal-mode/inventory.ini:
--------------------------------------------------------------------------------
1 | [primary_netscaler]
2 | 10.10.10.10 nsip=10.10.10.10 nitro_user=nsroot nitro_pass=verysecretpassword validate_certs=no
3 |
4 | [secondary_netscaler]
5 | 10.10.10.11 nsip=10.10.10.11 nitro_user=nsroot nitro_pass=verysecretpassword validate_certs=no
6 |
7 |
--------------------------------------------------------------------------------
/golden_templates/upgrade-netscaler/high-availability/normal-mode/variables.yaml:
--------------------------------------------------------------------------------
1 | # NetScaler Build Information
2 | netscaler_build_location: "/var/nsinstall/new_build/" # Location where the build file is already present on the target NetScaler, or where it should be copied to from local machine (Please include the trailing "/")
3 | netscaler_build_file_name: "build-13.1-49.15_nc_64.tgz"
4 | netscaler_target_version: "13.1-49.15" # Please input this in the format: "release version-build version" Eg: "13.1-49.15"
5 |
6 | # Python path on the target NetScaler
7 | netscaler_python_path: /var/python/bin/python
8 |
9 | # Python path on the local machine
10 | local_python_path: /usr/bin/python
11 |
12 | # Copy the build file from local to Remote NetScaler
13 | want_to_copy_build: "no"
14 | local_build_file_full_path_with_name: "./build-13.1-49.15_nc_64.tgz"
15 |
--------------------------------------------------------------------------------
/golden_templates/upgrade-netscaler/standalone/README.md:
--------------------------------------------------------------------------------
1 | # Upgrade a NetScaler standalone appliance using Ansible-Playbook
2 |
3 | ## Prerequisites
4 |
5 | 1. There should be enough storage in NetScaler's `/var/` mount to copy the build and extract the build.
6 |
7 | 2. If more than one NetScalers are being upgraded in one go and if passwordless authentication is not present, then those NetScalers should have the same password. This password is to be provided when asked by ansible using `--ask-pass` option to `ansible-playbook` command.
8 |
9 | 3. Optionally, but recommended to have a seperate Python virtual environment for Ansible. This is to avoid any conflicts with the existing Python packages in the system.
10 |
11 | ```bash
12 | python3 -m venv ansible-4.9.0
13 | source ansible-4.9.0/bin/activate
14 | ```
15 |
16 | 4. Ansible version should be 4.9.0 and the NetScaler ADC modules should be installed. If not, please follow the below steps to install Ansible and NetScaler ADC modules.
17 |
18 | ```bash
19 | pip3 install ansible==4.9.0
20 | ```
21 |
22 | 5. NetScaler build image should be present either in the NetScaler or locally.
23 |
24 | > If in case you don't have the build image available locally please refer [HERE](https://www.citrix.com/downloads/citrix-adc/) to download the image.
25 |
26 | 6. Either of the below should be present
27 |
28 | a. Password-less SSH authentication between the controller node (system in which you are running ansible playbook) and the NetScalers.
29 |
30 | b. `sshpass` should be installed in the controller node (system in which you are running ansible playbook).
31 |
32 | > For Linux -- `sudo apt install sshpass`
33 | > For MacOS -- `brew install hudochenkov/sshpass/sshpass`
34 |
35 | 7. Instaling ADC modules and plugins
36 |
37 | ```bash
38 | ansible-galaxy collection install "git+https://github.com/citrix/citrix-adc-ansible-modules.git#/ansible-collections/adc,citrix.adc"
39 | ```
40 |
41 | ## Usage
42 |
43 | 1. Edit the `inventory.ini` file with the NSIP of the NetScalers.
44 | 2. Update the `variables.yaml` file with the build_location and build_file_name, referring to the path and file name of the build image.
45 | 3. Run
46 |
47 | a. If there is passwordless authentication between the vm/laptop (where ansible is running) and the NetScaler
48 |
49 | ```bash
50 | ansible-playbook standalone_upgrade.yaml -i inventory.ini`
51 | ```
52 |
53 | b. If there is no passwordless authentication between the vm/laptop (where ansible is running) and the NetScaler
54 |
55 | ```bash
56 | ansible-playbook standalone_upgrade.yaml -i inventory.ini --ask-pass`
57 | ```
58 |
59 | ## Further Reference
60 |
61 | - Upgrade a NetScaler standalone appliance [documentation](https://docs.netscaler.com/en-us/citrix-adc/current-release/upgrade-downgrade-citrix-adc-appliance/upgrade-standalone-appliance.html)
62 |
63 | ## For Password-based SSH authentication
64 |
65 | Refer [HERE](../../../../assets/common_docs/ansible/ansible_password_based_ssh.md)
66 |
--------------------------------------------------------------------------------
/golden_templates/upgrade-netscaler/standalone/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | host_key_checking = False
3 | remote_tmp = /var/
4 |
5 | [ssh_connection]
6 | scp_if_ssh = True
--------------------------------------------------------------------------------
/golden_templates/upgrade-netscaler/standalone/inventory.ini:
--------------------------------------------------------------------------------
1 | [netscalers]
2 | 10.10.10.10 nsip=10.10.10.10 nitro_user=nsroot nitro_pass=verysecretpassword validate_certs=no
3 | 10.10.10.11 nsip=10.10.10.11 nitro_user=nsroot nitro_pass=verysecretpassword validate_certs=no
4 | 10.10.10.12 nsip=10.10.10.12 nitro_user=nsroot nitro_pass=verysecretpassword validate_certs=no
5 | 10.10.10.13 nsip=10.10.10.13 nitro_user=nsroot nitro_pass=verysecretpassword validate_certs=no
6 |
--------------------------------------------------------------------------------
/golden_templates/upgrade-netscaler/standalone/standalone_upgrade.yaml:
--------------------------------------------------------------------------------
1 | - name: Upgrade Standalone NetScaler
2 | hosts: netscalers
3 | remote_user: "{{ nitro_user }}"
4 | connection: citrix.adc.ssh_citrix_adc
5 | gather_facts: false
6 | vars_files: variables.yaml
7 |
8 | vars:
9 | ansible_python_interpreter: '{{ netscaler_python_path | default("/var/python/bin/python") }}'
10 |
11 | tasks:
12 | - name: Finding the current version of the NetScaler
13 | shell: "nscli -s -U :{{nitro_user}}:{{nitro_pass}} show version | grep -i 'NetScaler NS'"
14 |
15 | register: version
16 |
17 | - name: NetScaler version
18 | ansible.builtin.debug:
19 | msg: "{{ version[\"stdout_lines\"][0] | regex_replace(': Build ','-') | regex_replace('.nc.*$', '') | regex_replace('^.*NS', '') }}"
20 | register: present_version
21 |
22 | - name: NetScaler is already at the desired version
23 | ansible.builtin.debug:
24 | msg: "NetScaler is already at the desired version {{netscaler_target_version}}. Ending the play for {{nsip}}"
25 | when: present_version['msg'] == netscaler_target_version
26 |
27 | - name: End play for host {{nsip}}
28 | ansible.builtin.meta: end_host
29 | when: present_version['msg'] == netscaler_target_version
30 |
31 | - name: Copy build file from local machine to the Remote NetScaler
32 | ansible.builtin.copy:
33 | src: "{{local_build_file_full_path_with_name}}"
34 | dest: "{{netscaler_build_location}}"
35 | when: ((want_to_copy_build == "yes") or (want_to_copy_build == "YES"))
36 |
37 | - name: Untar the build file
38 | shell: "tar -xvzf {{ netscaler_build_location }}{{ netscaler_build_file_name }} -C {{ netscaler_build_location }}"
39 | register: tar_output
40 |
41 | - name: Upgrade build
42 | shell: "./installns -Y"
43 | args:
44 | chdir: "{{ netscaler_build_location }}"
45 | async: 120
46 | poll: 0
47 |
--------------------------------------------------------------------------------
/golden_templates/upgrade-netscaler/standalone/variables.yaml:
--------------------------------------------------------------------------------
1 | # # NetScaler side folder to which the build will be copied, OR, if there is already a build in the NetScaler, provide the path here.
2 | netscaler_build_location: /var/nsinstall/new_build/
3 |
4 | # NetScaler side build file name
5 | netscaler_build_file_name: build-14.1-12.34_nc_64.tgz
6 |
7 | netscaler_target_version: "14.1-12.34" # Please input this in the format: "release version-build version" Eg: 13.1-34.11
8 |
9 | # Full path of the NetScaler build file present in the controller-node (where ansible-playbook is running)
10 | local_build_file_full_path_with_name: ./build-14.1-12.34_nc_64.tgz
11 |
12 | # Python path on the target NetScaler. Usually this is the default.
13 | netscaler_python_path: /var/python/bin/python
14 |
15 | # Do you want ot copy the build file from local to Remote NetScaler? Quotes are mandatory.
16 | want_to_copy_build: "yes"
17 |
--------------------------------------------------------------------------------
/labs/README.md:
--------------------------------------------------------------------------------
1 | # NetScaler Community
2 |
3 | ## Labs -- Instruqt Labs
4 |
5 | ### Dev Portal
6 |
7 |
8 |
9 | ### How to use Instruqt CLI
10 |
11 | >
12 |
13 | - `instruqt auth login` -- Authentice instruqt-cli
14 | - `instruqt track pull `
15 | > Usually `` will be the last part of the instruqt lab track URL
16 | - `instruqt help` -- instruqt cli help
17 | - `instruqt help` -- help on sub-command
18 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/01-netscaler-adc-basic-content-switching-prerequisites/assignment.md:
--------------------------------------------------------------------------------
1 | ---
2 | slug: netscaler-adc-basic-content-switching-prerequisites
3 | id: z6pntui8nus4
4 | type: challenge
5 | title: Prerequisites
6 | teaser: Provision the base Infrastructure and Setup Terraform CLI
7 | notes:
8 | - type: text
9 | contents: Setup VPX
10 | tabs:
11 | - title: Bastion Host CLI
12 | type: terminal
13 | hostname: cloud-client
14 | - title: NetScaler ADC data
15 | type: service
16 | hostname: cloud-client
17 | path: /adc.html
18 | port: 80
19 | difficulty: basic
20 | timelimit: 900
21 | ---
22 |
23 | Introduction
24 | ============
25 |
26 | Welcome to the lab.
27 |
28 | In this lab, we have provisioned the following:
29 | 1. NetScaler ADC VPX
30 | 2. Two back-end servers for you.
31 |
32 | You can visit `NetScaler ADC data ` tab to find more details about the IPs that have been assigned to these.
33 |
34 |
35 | As part of the lab, we will achieve the following :
36 | 1. Install Terraform CLI binary and NetScaler ADC provider[(terraform-provider-citrixadc)](https://registry.terraform.io/providers/citrix/citrixadc/latest) in a bastion host with access to the target ADC instance.
37 | 2. Deploy a Content Switching virtual server to route traffic to your applications.
38 | 3. Configure Content Switching Policies and route traffic based on URL path or header values.
39 |
40 | For this challenge we will need to download and setup the Terraform CLI binary.
41 | Terraform provides instructions on how to download and install the
42 | Terraform binary in this [page](https://www.terraform.io/downloads).
43 | All available Terraform binaries are listed [here](https://releases.hashicorp.com/terraform/). Let’s get started with Terraform installation.
44 |
45 | Install Terraform
46 | =================
47 |
48 | First we will download Terraform:
49 |
50 | 1. Download the .zip file with the binary for the linux_amd64 platform
51 |
52 | ```bash
53 | curl https://releases.hashicorp.com/terraform/1.1.4/terraform_1.1.4_linux_amd64.zip --output terraform_1.1.4_linux_amd64.zip
54 | ```
55 |
56 | 2. Extract the executable in the current directory
57 |
58 | ```bash
59 | unzip terraform_1.1.4_linux_amd64.zip
60 | ```
61 | 3. Move the extracted binary to a location defined in the PATH variable
62 |
63 | ```bash
64 | mv ./terraform /usr/local/bin
65 | ```
66 | 4. Verify that the Terraform binary is executable from the command line
67 |
68 | ```bash
69 | terraform version
70 | ```
71 | The above command should show you the version information of the Terraform binary.
72 | > Ignore any out of date warning message.
73 |
74 | Conclusion
75 | ==========
76 |
77 | Having installed the Terraform binary our next task will be
78 | to start using Terraform for configuration management.
79 |
80 | Please proceed to the next challenge.
81 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/01-netscaler-adc-basic-content-switching-prerequisites/check-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform check the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | if ! which terraform ; then
10 | fail-message "Terraform binary not found in PATH"
11 | exit 1
12 | fi
13 | exit 0
14 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/01-netscaler-adc-basic-content-switching-prerequisites/cleanup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform cleanup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the cleanup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/01-netscaler-adc-basic-content-switching-prerequisites/setup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform setup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 |
10 | mkdir -p /var/tmp/first_time_password_reset
11 |
12 | cd /var/tmp/first_time_password_reset
13 |
14 | ADC_INSTANCE_ID=`gcloud compute instances describe --zone europe-west1-b adc-demo --format="value(id)"`
15 | ADC_INSTANCE_NSIP=`gcloud compute instances describe --zone europe-west1-b adc-demo --format="value(networkInterfaces[0].accessConfigs[0].natIP)"`
16 |
17 | # provider.tf
18 | cat <provider.tf
19 |
20 | terraform {
21 | required_providers {
22 | citrixadc = {
23 | source = "citrix/citrixadc"
24 | }
25 | }
26 | }
27 |
28 | provider "citrixadc" {
29 | endpoint = "http://${ADC_INSTANCE_NSIP}"
30 | username = "nsroot"
31 | password = "notnsroot"
32 | }
33 |
34 | EOF
35 |
36 | # resources.tf
37 | cat <resources.tf
38 |
39 | resource "citrixadc_password_resetter" "tf_resetter" {
40 | username = "nsroot"
41 | password = "${ADC_INSTANCE_ID}"
42 | new_password = "notnsroot"
43 | }
44 |
45 | EOF
46 |
47 | sleep 10
48 |
49 | terraform init
50 |
51 | sleep 5
52 |
53 | terraform apply -auto-approve
54 |
55 | exit 0
56 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/01-netscaler-adc-basic-content-switching-prerequisites/solve-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform solve the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "Installing terraform binary"
10 | mkdir -p /var/tmp/terraform_install
11 | cd /var/tmp/terraform_install
12 | curl https://releases.hashicorp.com/terraform/1.1.4/terraform_1.1.4_linux_amd64.zip --output terraform_1.1.4_linux_amd64.zip
13 | unzip terraform_1.1.4_linux_amd64.zip
14 | mv ./terraform /usr/local/bin
15 |
16 | exit 0
17 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/02-reset-default-password/assignment.md:
--------------------------------------------------------------------------------
1 | ---
2 | slug: reset-default-password
3 | id: sfyafcnbtsri
4 | type: challenge
5 | title: Reset the NetScaler ADC default password
6 | teaser: Reset the NetScaler ADC default password
7 | notes:
8 | - type: text
9 | contents: Reset the NetScaler ADC default password
10 | tabs:
11 | - title: Code Editor
12 | type: code
13 | hostname: cloud-client
14 | path: /root/reset-default-password
15 | - title: Bastion Host CLI
16 | type: terminal
17 | hostname: cloud-client
18 | - title: NetScaler ADC data
19 | type: service
20 | hostname: cloud-client
21 | path: /adc.html
22 | port: 80
23 | difficulty: basic
24 | timelimit: 900
25 | ---
26 |
27 | Introduction
28 | ============
29 |
30 | ## Reset default password
31 |
32 | The NetScaler ADC instance provisioned in the Google Cloud
33 | has a default initial password. Before any configuration can be applied we need to reset this default password. This can be done interactively through the Web GUI or
34 | through the nscli by ssh.
35 |
36 | In this challenge, we are going to reset password using the terraform provider.
37 |
38 | Terraform configuration
39 | =======================
40 |
41 | The configuration has already been written to the directory
42 | `/root/reset-default-password`. You can browse the files in the Code Editor tab.
43 |
44 | `provider.tf` - This file contains the provider connection information.
45 | You will notice that we define the endpoint to be
46 | an `https` url. This will ensure that the data exchanged with the target ADC
47 | will be encrypted.
48 | Because the target ADC's default TLS certificate is self signed
49 | we also need to set the option `insecure_skip_verify = true`. This will avoid the http requests failing due to certificate
50 | verification errors.
51 | For production instances it is strongly recommended to replace
52 | the default TLS certificate with a properly signed one.
53 |
54 | `resources.tf`- This file contains the resource which will do the actual
55 | reset of the password. For Google Cloud the default password is the instance id.
56 | The new password is defined with the `new_password` attribute.
57 | You can edit this to something else other than the provided one. If you do make sure to take note of it, because you will be needing to change the resource files for the subsequent challenges.
58 |
59 | Apply configuration
60 | ===================
61 | Go to Bastion Host CLI and perform following operations :
62 | 1. Change current directory to the one containing the terraform configuration files
63 |
64 | ```bash
65 | cd /root/reset-default-password
66 | ```
67 |
68 | 2. Initialize the terraform configuration.
69 | ```bash
70 | terraform init
71 | ```
72 | This command will download and install the NetScaler ADC provider[(terraform-provider-citrixadc)](https://registry.terraform.io/providers/citrix/citrixadc/latest) provider
73 | which is needed to run the configuration.
74 |
75 | 3. Apply the configuration.
76 | ```bash
77 | terraform apply
78 | ```
79 | This command will present you with the configuration changes
80 | and will prompt you to verify you want to apply them.
81 |
82 | Answer `yes` and press enter.
83 |
84 | If all goes well you should see a message saying 1 resource was
85 | created without any errors.
86 |
87 | Conclusion
88 | ==========
89 |
90 | We have now configured the target ADC with a new password.
91 |
92 | If you changed the new password to something else than the one
93 | supplied please take note of it since you will be needing it
94 | for the subsequent challenges NetScaler ADC provider[(terraform-provider-citrixadc)](https://registry.terraform.io/providers/citrix/citrixadc/latest) configuration.
95 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/02-reset-default-password/check-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform check the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the check script"
10 |
11 | set -e
12 |
13 | cd /root/reset-default-password
14 |
15 | if ! terraform show | grep citrixadc_password_resetter ; then
16 | fail-message "Terraform state does not contain a password resetter resource"
17 | exit 1
18 | fi
19 |
20 | exit 0
21 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/02-reset-default-password/cleanup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform cleanup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the cleanup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/02-reset-default-password/setup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform setup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the setup script"
10 |
11 | set -e
12 |
13 | mkdir -p /root/reset-default-password
14 | cd /root/reset-default-password
15 | zone="europe-west1-b"
16 |
17 | adc_instance_nsip=`gcloud compute instances describe --zone ${zone} adc-demo --format="value(networkInterfaces[0].accessConfigs[0].natIP)"`
18 |
19 | # Create provider.tf
20 | cat <provider.tf
21 |
22 | terraform {
23 | required_providers {
24 | citrixadc = {
25 | source = "citrix/citrixadc"
26 | version = "1.9.0"
27 | }
28 | }
29 | }
30 |
31 | provider "citrixadc" {
32 | endpoint = "https://${adc_instance_nsip}"
33 | insecure_skip_verify = true
34 | }
35 |
36 | EOF
37 |
38 | instance_id=`gcloud compute instances describe --zone ${zone} adc-demo --format="value(id)"`
39 |
40 | # Apply the password reset
41 | cat <resources.tf
42 |
43 | resource "citrixadc_password_resetter" "tf_resetter" {
44 | username = "nsroot"
45 | password = "${instance_id}"
46 | new_password = "verysecret"
47 | }
48 |
49 | EOF
50 |
51 | exit 0
52 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/02-reset-default-password/solve-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform solve the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the solve script"
10 | set -e
11 | cd /root/reset-default-password
12 | terraform init
13 | terraform apply -auto-approve
14 | exit 0
15 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/03-netscaler-adc-basic-content-switching-csvserver/check-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform check the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the check script"
10 |
11 | set -e
12 |
13 | cd /root/apply-cs-configuration
14 |
15 | if ! terraform show | grep citrixadc_csvserver ; then
16 | fail-message "Terraform state does not contain the cs vserver resource"
17 | exit 1
18 | fi
19 |
20 | exit 0
21 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/03-netscaler-adc-basic-content-switching-csvserver/cleanup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform cleanup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the cleanup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/03-netscaler-adc-basic-content-switching-csvserver/solve-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform solve the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the solve script"
10 |
11 | set -e
12 | cd /root/apply-cs-configuration
13 | terraform init
14 | terraform apply -var-file example.tfvars -auto-approve
15 |
16 | exit 0
17 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/04-netscaler-adc-basic-content-switching-cspolicy2/assignment.md:
--------------------------------------------------------------------------------
1 | ---
2 | slug: netscaler-adc-basic-content-switching-cspolicy2
3 | id: 0a90rpuxuxpn
4 | type: challenge
5 | title: Content Switching with Basic Policies part 2
6 | teaser: Configure ADC to route traffic based on HTTP Header value.
7 | tabs:
8 | - title: Code Editor
9 | type: code
10 | hostname: cloud-client
11 | path: /root/apply-cs-configuration
12 | - title: Bastion Host CLI
13 | type: terminal
14 | hostname: cloud-client
15 | - title: NetScaler ADC data
16 | type: service
17 | hostname: cloud-client
18 | path: /adc.html
19 | port: 80
20 | difficulty: basic
21 | timelimit: 3600
22 | ---
23 | Introduction
24 | ============
25 |
26 | ## Configure a Content Switching policy.
27 |
28 | In this challenge we will see how we can create Content Switching Policies to route traffic to your applications based on HTTP Header value.
29 |
30 | - If the HTTP request contains a Header `Color: red` we are expecting all traffic to go to the application with the red background
31 | - If the HTTP request contains a Header `Color: green` we are expecting all traffic to go to the application with the green background
32 |
33 | Learn more about ADC Content Switching [here](https://docs.netscaler.com/en-us/citrix-adc/current-release/content-switching.html).
34 |
35 |
36 | Terraform configuration
37 | =======================
38 |
39 | Click on the `Code Editor` tab to know the configuration files.
40 |
41 | In this Challenge we need to change the `rule` of the two content-switching policies.
42 | To achieve this you need to open `example.tfvars` file and change the values of `cspolicy1_rule` and `cspolicy2_rule` with the values below:
43 | `cspolicy1_rule`'s value to
44 | ```bash
45 | "HTTP.REQ.HEADER(\"Color\").CONTAINS(\"red\")"
46 | ```
47 | and `cspolicy2_rule`'s value to
48 | ```bash
49 | "HTTP.REQ.HEADER(\"Color\").CONTAINS(\"green\")"
50 | ```
51 |
52 | **_NOTE:_** After Editing the terraform configuration files please save the files by clicking on the **disk** icon on the top right corner as shown below in the screenshot.
53 |
54 |
55 | 
56 |
57 |
58 | Apply Configuration
59 | ===================
60 |
61 | Go to Bastion Host CLI and perform following operations:-
62 |
63 | In order to apply the configuration we first need to change to
64 | the correct directory.
65 | ```bash
66 | cd /root/apply-cs-configuration
67 | ```
68 |
69 | We need to apply the updated configuration.
70 | ```bash
71 | terraform apply -var-file example.tfvars
72 | ```
73 | Answer `yes` and hit `enter` to proceed. If all is well you will see a message for the successful
74 | creation of the resources.
75 |
76 | Verifying the configuration
77 | ===========================
78 |
79 | ## Verifying using cURL
80 |
81 | Lets Verify the Configuration in the Bastion Host CLI through CURL command.
82 | Go to Bastion Host CLI and type the following Curl command.
83 |
84 | **_NOTE:_** Update the VIP with the IP address present in the `NetScaler ADC data` tab while executing the below command
85 |
86 | 1. To check the red web server with the header `Color: red`
87 |
88 | ```bash
89 | curl -X GET http://{VIP} -H 'Content-Type: application/json' -H 'Color: red'
90 | ```
91 | 
92 |
93 | 2. To verify the green web server with the header `Color: green`
94 |
95 | ```bash
96 | curl -X GET http://{VIP} -H 'Content-Type: application/json' -H 'Color: green'
97 | ```
98 | 
99 |
100 |
101 | ## Inspect Configuration through ADC Web GUI
102 | You can also inspect the same information through the
103 | NetScaler ADC Web GUI.
104 | Open a browser window with the NSIP. After login head to `Traffic Management` >> `Content Switching` >> `policies`.
105 | You should be able to see the two content-switching policies.
106 | you can view further details.
107 |
108 | 
109 |
110 | Conclusion
111 | ==========
112 |
113 | In this challenge we demonstrated how to apply a Content Switching policy to route traffic based on HTTP Header value. In the next challenge we are going to create an additional policy to route traffic based on the HTTP Header presence.
114 |
115 | Proceed to the next challenge.
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/04-netscaler-adc-basic-content-switching-cspolicy2/check-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform check the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the check script"
10 |
11 | set -e
12 |
13 | cd /root/apply-cs-configuration
14 |
15 | if ! terraform show | grep citrixadc_csvserver ; then
16 | fail-message "Terraform state does not contain the cs vserver resource"
17 | exit 1
18 | fi
19 |
20 | exit 0
21 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/04-netscaler-adc-basic-content-switching-cspolicy2/cleanup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform cleanup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the cleanup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/04-netscaler-adc-basic-content-switching-cspolicy2/solve-cloud-client:
--------------------------------------------------------------------------------
1 | echo "This is the solve script"
2 |
3 | set -e
4 | cd /root/apply-cs-configuration
5 | terraform init
6 | terraform apply -var-file example.tfvars -auto-approve
7 |
8 | exit 0
9 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/05-netscaler-adc-basic-content-switching-cspolicy3/check-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform check the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the check script"
10 |
11 | set -e
12 |
13 | cd /root/apply-cs-configuration
14 |
15 | if ! terraform show | grep citrixadc_csvserver ; then
16 | fail-message "Terraform state does not contain the cs vserver resource"
17 | exit 1
18 | fi
19 |
20 | exit 0
21 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/05-netscaler-adc-basic-content-switching-cspolicy3/cleanup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform cleanup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the cleanup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/05-netscaler-adc-basic-content-switching-cspolicy3/solve-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform solve the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the solve script"
10 |
11 | set -e
12 | cd /root/apply-cs-configuration
13 | terraform init
14 | terraform apply -var-file example.tfvars -auto-approve
15 |
16 | exit 0
17 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/06-netscaler-adc-basic-content-switching-destroy/assignment.md:
--------------------------------------------------------------------------------
1 | ---
2 | slug: netscaler-adc-basic-content-switching-destroy
3 | id: bzd6tribreob
4 | type: challenge
5 | title: Destroy Configuration
6 | teaser: Destroy Terraform Managed Configuration
7 | notes:
8 | - type: text
9 | contents: Destroy Content Switching configuration
10 | tabs:
11 | - title: Code Editor
12 | type: code
13 | hostname: cloud-client
14 | path: /root/apply-cs-configuration
15 | - title: Bastion Host CLI
16 | type: terminal
17 | hostname: cloud-client
18 | - title: NetScaler ADC data
19 | type: service
20 | hostname: cloud-client
21 | path: /adc.html
22 | port: 80
23 | difficulty: basic
24 | timelimit: 3600
25 | ---
26 | Introduction
27 | ============
28 |
29 | As we have already applied all configurations for our use cases in the previous
30 | steps we will now destroy that configuration.
31 |
32 | This will remove all configuration from the NetScaler ADC instance that is managed from Terraform.
33 |
34 | Destroy configuration
35 | =====================
36 | First change to the configuration directory
37 | ```bash
38 | cd /root/apply-cs-configuration
39 | ```
40 | Then run the following command
41 | ```bash
42 | terraform destroy -var-file example.tfvars
43 | ```
44 | You will be prompted with the destroy plan
45 | which will detail which resources will be destroyed.
46 |
47 | Answer `yes` and hit `enter` to proceed.After the operation is successfully completed
48 | all configuration from the target NetScaler ADC is deleted.
49 |
50 | The back-end services will not be reachable through the VIP
51 | address.
52 |
53 |
54 | Conclusion
55 | ==========
56 | This concludes our track.
57 |
58 | In this track we focused on some example configurations that can cover specific use cases. You can find more example configurations and documentaion on [terraform registry](https://registry.terraform.io/providers/citrix/citrixadc/latest/docs).
59 |
60 | You can experiment with them and combine them to achieve more complex configurations for advanced use cases such as Application Protection, High Availability and Global Server Load Balancing and more.
61 |
62 | General documentation for the NetScaler ADC can be found
63 | at the [NetScaler ADC documentation site](https://docs.netscaler.com/en-us/citrix-adc/current-release.html).
64 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/06-netscaler-adc-basic-content-switching-destroy/check-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform check the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the check script"
10 |
11 | set -e
12 |
13 | cd /root/apply-cs-configuration
14 |
15 | if terraform show | grep citrixadc_csvserver ; then
16 | fail-message "Terraform state does still contains the lb vserver resource"
17 | exit 1
18 | fi
19 |
20 |
21 | exit 0
22 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/06-netscaler-adc-basic-content-switching-destroy/cleanup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform cleanup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the cleanup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/06-netscaler-adc-basic-content-switching-destroy/setup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform setup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the setup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/06-netscaler-adc-basic-content-switching-destroy/solve-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform solve the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the solve script"
10 | set -e
11 | cd /root/apply-cs-configuration
12 | terraform destroy -var-file example.tfvars -auto-approve
13 |
14 | exit 0
15 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/config.yml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | containers:
3 | - name: cloud-client
4 | image: gcr.io/instruqt/cloud-client
5 | shell: /bin/bash
6 | ports:
7 | - 80
8 | gcp_projects:
9 | - name: gcpproject
10 | services:
11 | - compute.googleapis.com
12 | roles: []
13 |
--------------------------------------------------------------------------------
/labs/basic-content-switching-configuration-using-terraform/track.yml:
--------------------------------------------------------------------------------
1 | slug: basic-content-switching-configuration-using-terraform
2 | id: jqwhcm5h47h0
3 | title: NetScaler ADC Basic Content Switching Configuration using Terraform
4 | teaser: Learn how to deploy & configure a Content Switching server for routing traffic
5 | to your applications.
6 | description: |
7 | ## Learn how to deploy & configure a Content Switching virtual server for routing traffic to your applications.
8 | ##
9 |
10 | On this Track we will leverage infrastructure-as-code templates to demonstrate:
11 | - How to deploy a content switching server to route traffic to your applications.
12 | - How to route traffic based on URL path or header values.
13 |
14 | The lab will provision for you a NetScaler ADC & 2 web applications and then it will guide you on using Terraform.
15 | icon: https://storage.googleapis.com/instruqt-frontend/img/tracks/default.png
16 | tags:
17 | - web-apps
18 | - automation
19 | - citrix
20 | - adc
21 | - terraform
22 | - load-balancer
23 | - content-switching
24 | owner: netscaler
25 | developers:
26 | - kkaltsas@tibco.com
27 | - rohit.myali@gmail.com
28 | - sumanth.lingappa@citrix.com
29 | - chris.chau@cloud.com
30 | timelimit: 3600
31 | lab_config:
32 | overlay: false
33 | width: 33
34 | position: right
35 | checksum: "15492214606032258156"
36 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-citrix-adc-and-ansible/01-environment-setup/assignment.md:
--------------------------------------------------------------------------------
1 | ---
2 | slug: environment-setup
3 | id: nyfc2rdsotbx
4 | type: challenge
5 | title: Provision Citrix ADC VPX
6 | teaser: Environment Setup - Deploy Citrix ADC VPX and a pair of web-apps for you.
7 | notes:
8 | - type: text
9 | contents: Environment Setup - Deploy Citrix ADC VPX and a pair of web-apps for you.
10 | tabs:
11 | - title: IP Details
12 | type: service
13 | hostname: cloud-client
14 | path: /adc.html
15 | port: 80
16 | difficulty: basic
17 | timelimit: 300
18 | ---
19 | Welcome to "**Deliver Apps with Citrix ADC and Ansible**" lab.
20 |
21 | In this lab, we have provisioned -
22 |
23 | 1. Citrix ADC VPX
24 | 1. `Citrix ADC NSIP (Management login) IP` for management access.
25 | 2. `Citrix ADC VIP (load balancer) Client IP` is the frontend IP for your apps in the backend-servers.
26 | 2. Two backend servers for you.
27 |
28 | You can check out the **details in the `IP Details` tab.**
29 |
30 | **As part of the lab, we will achieve the following** :
31 |
32 | 1. Change Citrix ADC VPX login password.
33 | 2. Configure two services for the above backend servers.
34 | 3. Configure a load balancer to deliver the backend servers over internet.
35 |
36 | Let us get started. Please click `Next` to proceed.
37 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-citrix-adc-and-ansible/02-install-prerequisites/assignment.md:
--------------------------------------------------------------------------------
1 | ---
2 | slug: install-prerequisites
3 | id: vw3sdovybyhb
4 | type: challenge
5 | title: Installing Pre-requisites
6 | teaser: Installing Pre-requisites
7 | notes:
8 | - type: text
9 | contents: Install Prerequisites
10 | tabs:
11 | - title: Terminal
12 | type: terminal
13 | hostname: cloud-client
14 | - title: IP Details
15 | type: service
16 | hostname: cloud-client
17 | path: /adc.html
18 | port: 80
19 | difficulty: basic
20 | timelimit: 900
21 | ---
22 | # Installing Pre-requisites
23 |
24 | In this challenge, let us install the prerequisites required to run `ansible-playbooks` to configure the `citrix-adc-vpx` device.
25 |
26 | All the pre-requisites can be found in our GitHub link below:
27 | * https://github.com/citrix/citrix-adc-ansible-modules#installation
28 |
29 | Pre-rquisites are:
30 | 1. Ansible software tool
31 | 2. Citrix ADC Python SDK
32 | 3. Citrix ADC Ansible modules
33 |
34 | ---
35 |
36 | 1. Install `ansible`
37 | ======================
38 | To install `ansible`, there are many ways. You can find all the valid ways to install `ansible` in the below link.
39 | * https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html
40 |
41 | Here, let us install `ansible` via `pip`.
42 |
43 | > TIP: Click on the below `code` to copy it to clipboard.
44 |
45 | If not already active, click the `Terminal` tab.
46 |
47 | Run the below commands to install `ansible`:
48 |
49 | ```
50 | python -m pip install --upgrade pip
51 | python -m pip install ansible==5.5.0
52 | ```
53 |
54 | > The above command will take 3-4 minutes.
55 | > ignore any WARNINGs if you get
56 |
57 | After installing, you can verify if the installation is successful by the below command. The output will give you the version of the ansible tool installed.
58 |
59 | ```
60 | ansible --version
61 | ```
62 |
63 | ---
64 |
65 | 2. Install `Citrix ADC Python SDK`
66 | ====================================
67 | In this, we will install the `Citrix ADC Python SDK` to use `ansible` to configure `citrix adc` devices.
68 |
69 | First, clone our `citrix-adc-ansible-modules` github repo
70 |
71 | ```
72 | git clone https://github.com/citrix/citrix-adc-ansible-modules.git /tmp/citrix-adc-ansible-modules
73 | ```
74 |
75 |
76 | Then, run the below command to install the `Citrix ADC Python SDK`.
77 |
78 | ```
79 | pip install /tmp/citrix-adc-ansible-modules/deps/nitro-python-1.0_kamet.tar.gz
80 | ```
81 |
82 | > ignore any WARNINGs if you get
83 |
84 | ---
85 |
86 |
87 | 3. Install `Citrix ADC Ansible Modules`
88 | ==========================================
89 | Next, we will install `citrix-adc` ansible modules from `ansible-galaxy` hub by running the below command.
90 |
91 | ```
92 | ansible-galaxy collection install git+https://github.com/citrix/citrix-adc-ansible-modules.git#/ansible-collections/adc
93 | ```
94 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-citrix-adc-and-ansible/02-install-prerequisites/check-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # check
3 | echo "Checking the solution of the challenge"
4 |
5 | ansible --version
6 |
7 | if [ $? -eq 0 ]; then
8 | exit 0
9 | else
10 | echo "FAIL: Ansible NOT installed! Please all the pre-requisities"
11 | echo 1
12 | fi
--------------------------------------------------------------------------------
/labs/deliver-apps-with-citrix-adc-and-ansible/02-install-prerequisites/solve-cloud-client:
--------------------------------------------------------------------------------
1 |
2 | python -m pip install --upgrade pip
3 | python -m pip install ansible
4 |
5 | sleep 1
6 |
7 | git clone https://github.com/citrix/citrix-adc-ansible-modules.git /tmp/citrix-adc-ansible-modules
8 |
9 | sleep 1
10 |
11 | pip install /tmp/citrix-adc-ansible-modules/deps/nitro-python-1.0_kamet.tar.gz
12 |
13 | sleep 1
14 |
15 | ansible-galaxy collection install git+https://github.com/citrix/citrix-adc-ansible-modules.git#/ansible-collections/adc
16 |
17 | sleep 1
--------------------------------------------------------------------------------
/labs/deliver-apps-with-citrix-adc-and-ansible/03-define-target-adc-configurations-in-ansible-playbook/assignment.md:
--------------------------------------------------------------------------------
1 | ---
2 | slug: define-target-adc-configurations-in-ansible-playbook
3 | id: knkwtxcwhukf
4 | type: challenge
5 | title: Define target Citrix ADC configurations in Ansible Playbook
6 | teaser: Define target Citrix ADC configurations in Ansible Playbook
7 | notes:
8 | - type: text
9 | contents: Define target Citrix ADC configurations in Ansible Playbook
10 | tabs:
11 | - title: Code Editor
12 | type: code
13 | hostname: cloud-client
14 | path: /root/citrixadc-ansible-track/
15 | - title: Terminal
16 | type: terminal
17 | hostname: cloud-client
18 | - title: IP Details
19 | type: service
20 | hostname: cloud-client
21 | path: /adc.html
22 | port: 80
23 | difficulty: basic
24 | timelimit: 600
25 | ---
26 | # Define target Citrix ADC configurations in Ansible Playbook
27 |
28 | An Ansible® playbook is a blueprint of automation tasks — which are complex IT actions executed with limited or no human involvement. Ansible playbooks are executed on a set, group, or classification of hosts, which together make up an Ansible inventory.
29 |
30 | Generally, below is the syntax to run a ansible-playbook
31 |
32 | ```
33 | ansible-playbook -i inventory.txt
34 | ```
35 |
36 | where -
37 |
38 | * `inventory.txt`: is a file where you can specify the IP/login/password of the Citrix ADC. You can also see the IP details of the backend servers.
39 | * `playbook.yaml`: is the actual ansible-playbook config file that contains your target Citrix ADC configurations.
40 |
41 | Click on the `Code Editor` tab to know the config files.
42 |
43 | 1. `inventory.txt` - contains the IP/password and other information for the provisioned Citrix ADC.
44 | 2. `citrixadc-first-time-password-reset.yaml` - This ansible-playbook is defined to reset the first-time-login password.
45 | 3. `citrixadc-setuplb.yaml` - This ansible-playbook create multiple entities inside Citrix ADC as follows -
46 | 1. `citrix_adc_service` - 2 services inside Citrix ADC that are mapped to your backend servers.
47 | 2. `citrix_adc_lb_vserver` - A load balancing server in Citrix ADC bound to the above 2 services. Any traffic hitting at VIP will be load balanced by this server and routed to either of the services/backend-servers.
48 |
49 | [Click here](https://docs.citrix.com/en-us/citrix-adc/current-release/getting-started-with-citrix-adc/communicate-with-clients-servers.html) to learn more about Citrix ADC services, servicegroup and loadbalancing.
50 |
51 | > Optionally, if you wish to change the Citrix ADC password, you can replace the text `verystrongpassword` in the `inventory.txt` file with that of your choice and do save the file by clicking on save button in the tab. If you opt to change the password, please remember the password for the rest of the track.
52 |
53 | Once the target Citrix ADC configurations are defined in playbooks, we are all set to push these configs on the Citrix ADC. Let’s do that in next section.
54 |
55 | Proceed next to run these playbook.
--------------------------------------------------------------------------------
/labs/deliver-apps-with-citrix-adc-and-ansible/03-define-target-adc-configurations-in-ansible-playbook/setup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | mkdir -p /root/citrixadc-ansible-track/
4 |
5 | cd /root/citrixadc-ansible-track/
6 |
7 | ADC_INSTANCE_NSIP=`gcloud compute instances describe --zone europe-west1-b adc-demo --format="value(networkInterfaces[0].accessConfigs[0].natIP)"`
8 | ADC_INSTANCE_VIP=`gcloud compute instances describe --zone europe-west1-b adc-demo --format="value(networkInterfaces[1].networkIP)"`
9 | ADC_INSTANCE_SNIP=`gcloud compute instances describe --zone europe-west1-b adc-demo --format="value(networkInterfaces[2].networkIP)"`
10 | ADC_INSTANCE_ID=`gcloud compute instances describe --zone europe-west1-b adc-demo --format="value(id)"`
11 | BACKEND_SERVER1_IP=`gcloud compute instances describe --zone europe-west1-b backend-server-1 --format="value(networkInterfaces[0].networkIP)"`
12 | BACKEND_SERVER2_IP=`gcloud compute instances describe --zone europe-west1-b backend-server-2 --format="value(networkInterfaces[0].networkIP)"`
13 |
14 | cat < inventory.txt
15 | [citrix_adc]
16 | myadc snip=${ADC_INSTANCE_SNIP} vip_ip=${ADC_INSTANCE_VIP} nsip=${ADC_INSTANCE_NSIP} nitro_user=nsroot default_password=${ADC_INSTANCE_ID} new_password=verystrongpassword validate_certs=no server1_ip=${BACKEND_SERVER1_IP} server2_ip=${BACKEND_SERVER2_IP}
17 |
18 | EOF
19 |
20 | cat < citrixadc-first-time-password-reset.yaml
21 | ---
22 |
23 | - hosts: citrix_adc
24 |
25 | gather_facts: False
26 | collections:
27 | - citrix.adc
28 |
29 | tasks:
30 |
31 | - name: Password reset
32 | delegate_to: localhost
33 | citrix_adc_password_reset:
34 | nsip: "{{ nsip }}"
35 | username: "{{ nitro_user }}"
36 | nitro_protocol: https
37 | validate_certs: no
38 | password: "{{ default_password }}"
39 | new_password: "{{ new_password }}"
40 | EOF
41 |
42 | cat < citrixadc-setuplb.yaml
43 | ---
44 |
45 | - hosts: citrix_adc
46 |
47 | vars:
48 | max_clients: 5
49 |
50 | remote_user: root
51 | gather_facts: False
52 | collections:
53 | - citrix.adc
54 |
55 | tasks:
56 | - name: Add SNIP
57 | delegate_to: localhost
58 | citrix_adc_nsip:
59 | nsip: "{{ nsip }}"
60 | nitro_user: "{{ nitro_user }}"
61 | nitro_pass: "{{ new_password }}"
62 | validate_certs: no
63 |
64 | state: present
65 |
66 | ipaddress: "{{ snip }}"
67 | netmask: 255.255.255.0
68 | type: SNIP
69 | snmp: disabled
70 |
71 | - name: service-http-1
72 | delegate_to: localhost
73 | citrix_adc_service:
74 | nsip: "{{ nsip }}"
75 | nitro_user: "{{ nitro_user }}"
76 | nitro_pass: "{{ new_password }}"
77 | validate_certs: "{{ validate_certs }}"
78 |
79 | state: present
80 |
81 | name: service-http-1
82 | servicetype: HTTP
83 | ipaddress: "{{ server1_ip }}"
84 | port: 80
85 |
86 | - name: service-http-2
87 | delegate_to: localhost
88 | citrix_adc_service:
89 | nsip: "{{ nsip }}"
90 | nitro_user: "{{ nitro_user }}"
91 | nitro_pass: "{{ new_password }}"
92 | validate_certs: "{{ validate_certs }}"
93 |
94 | state: present
95 |
96 | name: service-http-2
97 | servicetype: HTTP
98 | ipaddress: "{{ server2_ip }}"
99 | port: 80
100 |
101 | - name: lb vserver 1
102 | delegate_to: localhost
103 | citrix_adc_lb_vserver:
104 | nsip: "{{ nsip }}"
105 | nitro_user: "{{ nitro_user }}"
106 | nitro_pass: "{{ new_password }}"
107 | validate_certs: "{{ validate_certs }}"
108 |
109 | state: present
110 |
111 | name: lb-vserver-1
112 | servicetype: HTTP
113 | timeout: 12
114 | ipv46: "{{ vip_ip }}"
115 | port: 80
116 | lbmethod: ROUNDROBIN
117 | servicebindings:
118 | - servicename: service-http-1
119 | weight: 50
120 | - servicename: service-http-2
121 | weight: 50
122 | disabled: no
123 |
124 | EOF
125 |
126 | ls
127 |
128 |
129 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-citrix-adc-and-ansible/04-configure-adc-with-ansible/assignment.md:
--------------------------------------------------------------------------------
1 | ---
2 | slug: configure-adc-with-ansible
3 | id: ql0jhyeivpwj
4 | type: challenge
5 | title: Configure Citrix ADC with Ansible
6 | teaser: Configure Citrix ADC with Ansible
7 | notes:
8 | - type: text
9 | contents: Configure Citrix ADC with Ansible
10 | tabs:
11 | - title: Code Editor
12 | type: code
13 | hostname: cloud-client
14 | path: /root/citrixadc-ansible-track/
15 | - title: Terminal
16 | type: terminal
17 | hostname: cloud-client
18 | - title: IP Details
19 | type: service
20 | hostname: cloud-client
21 | path: /adc.html
22 | port: 80
23 | - title: Citrix ADC CLI Console
24 | type: terminal
25 | hostname: cloud-client
26 | difficulty: basic
27 | timelimit: 1200
28 | ---
29 | # Configure Citrix ADC with Ansible
30 |
31 | ## Reset password
32 |
33 | First, let us set the new password to the Citrix ADC VPX deployed by running the playbook as follows.
34 |
35 | Go to `Terminal` tab.
36 |
37 | If not already done, please change your directory to the lab directory
38 |
39 | ```
40 | cd /root/citrixadc-ansible-track/
41 | ```
42 |
43 | Run the playbook
44 | ```
45 | ansible-playbook -i inventory.txt citrixadc-first-time-password-reset.yaml
46 | ```
47 |
48 |
49 | ## Configure load balancer and services
50 |
51 | ```
52 | ansible-playbook -i inventory.txt citrixadc-setuplb.yaml
53 | ```
54 |
55 | > Upon successful run, you will get `ok=4 changed=4` in the terminal. This shows 4 resources/entities were created successfully by Ansible on your Citrix ADC.
56 |
57 | Validate the configuration and the user traffic
58 | ==================================================
59 |
60 | Now we have configured a load balancer which serves traffic to two backend servers.
61 | We can check the status of the load balancer by going to `IP Details` tab
62 |
63 | 1. Go to `IP Details` tab
64 | 2. Open a new tab with the `VIP IP`.
65 | 3. You will see the load balancer is serving traffic to one of the backend servers.
66 | 4. Since the load balancer is configured to serve traffic in `ROUNDROBIN` mode, when you refresh the browser page, the load balancer will serve traffic to the next backend server.
67 |
68 | Validate the configuration via Citrix ADC CLI
69 | ==========================================
70 |
71 | Now, let us login to Citrix ADC via SSH and validate the configuration provisioned by Ansible.
72 |
73 | 1. Go to `Citrix ADC CLI Console` tab
74 | 2. Login via SSH to the Citrix ADC by running the below command. Replace `` with the NSIP of your Citrix ADC. You can get the NSIP from the `IP Details` tab.
75 |
76 | ```
77 | ssh nsroot@
78 | ```
79 |
80 | 3. Accept the fingerprint prompt by typing `yes`
81 | 
82 | 4. Enter the password. If you had not changed the password in the previous step, the default password is `verystrongpassword`
83 |
84 | Now you are in the Citrix ADC CLI console.
85 |
86 | To list/show the load balancer configuration, run the following command.
87 | ```
88 | show lb vserver lb-vserver-1
89 | ```
90 |
91 | You can see the following information about the load balancer.
92 | 
93 |
94 | You can also check the status of the backend servers by running the following commands.
95 | ```
96 | show service service-http-1
97 | ```
98 |
99 | ```
100 | show service service-http-2
101 | ```
102 |
103 | You can exit the CLI by typing the below command.
104 | ```
105 | exit
106 | ```
107 |
108 | That's how simple it is to configure Citrix ADC with Ansible. For any new Citrix ADC configurations, define the ansible playbook with the target configs and execute playbooks as above.
109 |
110 | Now let us destroy the load balancer.
111 |
112 | Please proceed to next step.
113 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-citrix-adc-and-ansible/05-destroy-the-configuration/assignment.md:
--------------------------------------------------------------------------------
1 | ---
2 | slug: destroy-the-configuration
3 | id: hkbbl92q7bvd
4 | type: challenge
5 | title: Destroy/Revert the configuration
6 | teaser: Destroy/Revert the configuration
7 | notes:
8 | - type: text
9 | contents: Destroy/Revert the configuration
10 | tabs:
11 | - title: Code Editor
12 | type: code
13 | hostname: cloud-client
14 | path: /root/citrixadc-ansible-track/
15 | - title: Terminal
16 | type: terminal
17 | hostname: cloud-client
18 | - title: IP Details
19 | type: service
20 | hostname: cloud-client
21 | path: /adc.html
22 | port: 80
23 | - title: Citrix ADC CLI Console
24 | type: terminal
25 | hostname: cloud-client
26 | difficulty: basic
27 | timelimit: 900
28 | ---
29 | # Destroy/revert the configuration
30 |
31 | We can also destroy/delete the Citrix ADC server configuration we did in the last step.
32 | For that follow the procedure.
33 |
34 | 1. Go to the `Code Editor` tab
35 | 2. Click on **citrixadc-setuplb.yaml** file
36 | 3. Change the `state: present` to `state: absent` in all the ansible-tasks.
37 | > **Note:** There should be a __space__ between `state:` and `present` or `absent`
38 | 4. Save the file by clicking on the save-icon on filename's tab.
39 | 5. Go back to `Terminal` tab
40 | 6. Run the citrixadc-server.yaml playbook again.
41 |
42 | ```
43 | cd /root/citrixadc-ansible-track/
44 | ```
45 | ```
46 | ansible-playbook -i inventory.txt citrixadc-setuplb.yaml
47 | ```
48 |
49 | Now backend servers no longer serve the user traffic hitting at VIP.
50 |
51 | This completes the lab excercise. Check out our [GitHub repository](https://github.com/citrix/citrix-adc-ansible-modules/) to setup Ansible for Citrix ADC in your environment.
52 |
53 | Also checkout our [sample playbooks](https://github.com/citrix/citrix-adc-ansible-modules/tree/master/sample_playbooks) for Web Firewall, Multi Cluster and other advanced use-cases.
54 |
55 | Thank you!
56 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-citrix-adc-and-ansible/assets/show-lb-vserver.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/netscaler/automation-toolkit/2bac93f3f93f262249fcd7e4219c5dd6bb7d23f3/labs/deliver-apps-with-citrix-adc-and-ansible/assets/show-lb-vserver.jpg
--------------------------------------------------------------------------------
/labs/deliver-apps-with-citrix-adc-and-ansible/assets/ssh-fingerprint.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/netscaler/automation-toolkit/2bac93f3f93f262249fcd7e4219c5dd6bb7d23f3/labs/deliver-apps-with-citrix-adc-and-ansible/assets/ssh-fingerprint.jpg
--------------------------------------------------------------------------------
/labs/deliver-apps-with-citrix-adc-and-ansible/config.yml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | containers:
3 | - name: cloud-client
4 | image: gcr.io/instruqt/cloud-client
5 | shell: /bin/bash
6 | ports:
7 | - 80
8 | gcp_projects:
9 | - name: gcpproject
10 | services:
11 | - compute.googleapis.com
12 | roles: []
13 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-citrix-adc-and-ansible/track.yml:
--------------------------------------------------------------------------------
1 | slug: deliver-apps-with-citrix-adc-and-ansible
2 | id: ir3cuxfc3p9u
3 | title: Deliver Apps with Citrix ADC and Ansible
4 | teaser: Learn the Ansible workflow by deploying Citrix ADC and configuring a load
5 | balancer to front-end your web apps
6 | description: |
7 | **Citrix ADC** is an advanced application delivery, load balancing and security solution for your web apps. Ansible modules simplifies the Citrix ADC management, providing agility to your IT operations.
8 |
9 | In this hands-on lab, we will learn how to use Ansible to configure load balancing service in Citrix ADC and expose your public web-apps over internet. The lab will provision the Citrix ADC, pair of web-servers, and automation controller and then guide you on using Ansible workflow.
10 | icon: https://www.citrix.com/content/dam/citrix61/en_us/images/logos/citrix/citrix-logo-black.png
11 | level: beginner
12 | tags:
13 | - web-apps
14 | - ansible
15 | - automation
16 | - citrix
17 | - adc
18 | - gcp
19 | - load-balancer
20 | - ansible-playbook
21 | owner: netscaler
22 | developers:
23 | - sumanth.lingappa@citrix.com
24 | - giorgos.nikolopoulos@citrix.com
25 | - ravi.shekhar@citrix.com
26 | - mayur.vadhar1@citrix.com
27 | - komal.bhardwaj@citrix.com
28 | - konstantinos.kaltsas@citrix.com
29 | - mayur.mohanpatil@citrix.com
30 | - pushkar.patil@citrix.com
31 | lab_config:
32 | overlay: false
33 | width: 33
34 | position: right
35 | checksum: "7242508055715617948"
36 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-citrix-adc-and-ansible/track_scripts/cleanup-cloud-client:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/netscaler/automation-toolkit/2bac93f3f93f262249fcd7e4219c5dd6bb7d23f3/labs/deliver-apps-with-citrix-adc-and-ansible/track_scripts/cleanup-cloud-client
--------------------------------------------------------------------------------
/labs/deliver-apps-with-netscaler-adc-terraform-provider/01-setup-vpx/assignment.md:
--------------------------------------------------------------------------------
1 | ---
2 | slug: setup-vpx
3 | id: 0zelij1ma9b8
4 | type: challenge
5 | title: Provision NetScaler ADC VPX and Setup Terraform
6 | teaser: Environment Setup - Deploy NetScaler ADC VPX and a pair of web-apps for you
7 | notes:
8 | - type: text
9 | contents: Setup VPX
10 | tabs:
11 | - title: Bastion Host CLI
12 | type: terminal
13 | hostname: cloud-client
14 | - title: NetScaler ADC data
15 | type: service
16 | hostname: cloud-client
17 | path: /adc.html
18 | port: 80
19 | difficulty: basic
20 | timelimit: 900
21 | ---
22 |
23 | Introduction
24 | ============
25 |
26 | Welcome to the "Deliver Apps with NetScaler ADC and Terraform" lab.
27 |
28 | In this lab, we have provisioned -
29 |
30 | 1. NetScaler ADC VPX
31 | 1. `NetScaler ADC NSIP (Management login) IP` for management access
32 | 2. `NetScaler ADC VIP (load balancer) Client IP` is the frontend IP for your apps in the backend-servers.
33 | 2. Two backend servers for you.
34 |
35 | As part of the lab, we will achieve the following :
36 | 1. Install Terraform CLI binary and NetScaler ADC provider[(terraform-provider-citrixadc)](https://registry.terraform.io/providers/citrix/citrixadc/latest) in a bastion host with access to the target ADC instance.
37 | 2. Change NetScaler ADC VPX default password using Terraform
38 | 3. Configure NetScaler ADC with two services corresponding to above backend servers and a load balancer to deliver them over internet all using Terraform.
39 |
40 | For this challenge we will need to download and setup the terraform CLI binary.
41 | Terraform provides instructions on how to download and install the
42 | terraform binary in this [page](https://www.terraform.io/downloads).
43 | All available terraform binaries are listed [here](https://releases.hashicorp.com/terraform/). Let’s get started with Terraform installation.
44 |
45 | Install terraform
46 | =================
47 |
48 | For our purposes we will be downloading a specific terraform version known to work
49 | with our current provider version.
50 |
51 | 1. Download the .zip file with the binary for the linux_amd64 platform
52 |
53 | ```bash
54 | curl https://releases.hashicorp.com/terraform/1.1.4/terraform_1.1.4_linux_amd64.zip --output terraform_1.1.4_linux_amd64.zip
55 | ```
56 |
57 | 2. Extract the executable in the current directory
58 |
59 | ```bash
60 | unzip terraform_1.1.4_linux_amd64.zip
61 | ```
62 | 3. Move the extracted binary to a location defined in the PATH variable
63 |
64 | ```bash
65 | mv ./terraform /usr/local/bin
66 | ```
67 | 4. Verify that the terraform binary is executable from the command line
68 |
69 | ```bash
70 | terraform version
71 | ```
72 | The above command should show you the version information of the terraform binary.
73 | > Ignore any out of date warning message.
74 |
75 | Conclusion
76 | ==========
77 |
78 | Having installed the terraform binary our next task will be
79 | to apply terraform configurations.
80 |
81 | Please proceed to the next challenge.
82 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-netscaler-adc-terraform-provider/01-setup-vpx/check-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform check the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | if ! which terraform ; then
10 | fail-message "Terraform binary not found in PATH"
11 | exit 1
12 | fi
13 | exit 0
14 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-netscaler-adc-terraform-provider/01-setup-vpx/cleanup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform cleanup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the cleanup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-netscaler-adc-terraform-provider/01-setup-vpx/setup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform setup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 |
10 | mkdir -p /var/tmp/first_time_password_reset
11 |
12 | cd /var/tmp/first_time_password_reset
13 |
14 | ADC_INSTANCE_ID=`gcloud compute instances describe --zone europe-west1-b adc-demo --format="value(id)"`
15 | ADC_INSTANCE_NSIP=`gcloud compute instances describe --zone europe-west1-b adc-demo --format="value(networkInterfaces[0].accessConfigs[0].natIP)"`
16 |
17 | # provider.tf
18 | cat <provider.tf
19 |
20 | terraform {
21 | required_providers {
22 | citrixadc = {
23 | source = "citrix/citrixadc"
24 | }
25 | }
26 | }
27 |
28 | provider "citrixadc" {
29 | endpoint = "http://${ADC_INSTANCE_NSIP}"
30 | username = "nsroot"
31 | password = "notnsroot"
32 | }
33 |
34 | EOF
35 |
36 | # resources.tf
37 | cat <resources.tf
38 |
39 | resource "citrixadc_password_resetter" "tf_resetter" {
40 | username = "nsroot"
41 | password = "${ADC_INSTANCE_ID}"
42 | new_password = "notnsroot"
43 | }
44 |
45 | EOF
46 |
47 | sleep 10
48 |
49 | terraform init
50 |
51 | sleep 5
52 |
53 | terraform apply -auto-approve
54 |
55 | exit 0
56 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-netscaler-adc-terraform-provider/01-setup-vpx/solve-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform solve the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "Installing terraform binary"
10 | mkdir -p /var/tmp/terraform_install
11 | cd /var/tmp/terraform_install
12 | curl https://releases.hashicorp.com/terraform/1.1.4/terraform_1.1.4_linux_amd64.zip --output terraform_1.1.4_linux_amd64.zip
13 | unzip terraform_1.1.4_linux_amd64.zip
14 | mv ./terraform /usr/local/bin
15 |
16 | exit 0
17 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-netscaler-adc-terraform-provider/02-reset-default-password/assignment.md:
--------------------------------------------------------------------------------
1 | ---
2 | slug: reset-default-password
3 | id: yhuuhfbnqpxy
4 | type: challenge
5 | title: Reset the NetScaler ADC default password
6 | teaser: Reset the NetScaler ADC default password
7 | notes:
8 | - type: text
9 | contents: Reset the NetScaler ADC default password
10 | tabs:
11 | - title: Code Editor
12 | type: code
13 | hostname: cloud-client
14 | path: /root/reset-default-password
15 | - title: Bastion Host CLI
16 | type: terminal
17 | hostname: cloud-client
18 | - title: NetScaler ADC data
19 | type: service
20 | hostname: cloud-client
21 | path: /adc.html
22 | port: 80
23 | difficulty: basic
24 | timelimit: 900
25 | ---
26 |
27 | Introduction
28 | ============
29 |
30 | ## Reset default password
31 |
32 | The NetScaler ADC instance provisioned in the Google Cloud
33 | has a default initial password. Before any configuration can be applied we need to reset this default password. This can be done interactively through the Web GUI or
34 | through the nscli by ssh.
35 | In this challenge, we are going to reset password using the terraform provider.
36 |
37 | Terraform configuration
38 | =======================
39 |
40 | The configuration has already been written to the directory
41 | `/root/reset-default-password`. You can browse the files in the Code Editor tab.
42 |
43 | `provider.tf` - This file contains the provider connection information.
44 | You will notice that we define the NITRO API endpoint to be
45 | an `https` url. This will ensure that the data exchanged with the target ADC
46 | will be encrypted.
47 | Because the target ADC's default TLS certificate is self signed
48 | we also need to set the option `insecure_skip_verify = true`. This will avoid the http requests failing due to certificate
49 | verification errors.
50 | For production instances it is strongly recommended to replace
51 | the default TLS certificate with a properly signed one.
52 |
53 | `resources.tf`- This file contains the resource which will do the actual
54 | reset of the password. For Google Cloud the default password is the instance id.
55 | The new password is defined with the `new_password` attribute.
56 | You can edit this to something else other than the provided one. If you edit the `new_password`, make sure to take note of it, because you will be needing to change the resource files for the subsequent challenges.
57 |
58 | Apply configuration
59 | ===================
60 | Go to Bastion Host CLI and perform following operations :
61 | 1. Change current directory to the one containing the terraform configuration files
62 |
63 | ```bash
64 | cd /root/reset-default-password
65 | ```
66 |
67 | 2. Initialize the terraform configuration.
68 | ```bash
69 | terraform init
70 | ```
71 | This command will download and install the NetScaler ADC provider[(terraform-provider-citrixadc)](https://registry.terraform.io/providers/citrix/citrixadc/latest) which is needed to run the configuration.
72 |
73 | 3. Apply the configuration.
74 | ```bash
75 | terraform apply
76 | ```
77 | This command will present you with the configuration changes
78 | and will prompt you to verify you want to apply them.
79 |
80 | Answer `yes` and press enter.
81 |
82 | If all goes well you should see a message saying 1 resource was
83 | created without any errors.
84 |
85 | Conclusion
86 | ==========
87 |
88 | We have now configured the target NetScaler ADC with a new password.
89 |
90 | If you changed the new password to something else than the one
91 | supplied please take note of it since you will be needing it
92 | for the subsequent challenges NetScaler ADC provider configuration.
93 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-netscaler-adc-terraform-provider/02-reset-default-password/check-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform check the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the check script"
10 |
11 | set -e
12 |
13 | cd /root/reset-default-password
14 |
15 | if ! terraform show | grep citrixadc_password_resetter ; then
16 | fail-message "Terraform state does not contain a password resetter resource"
17 | exit 1
18 | fi
19 |
20 | exit 0
21 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-netscaler-adc-terraform-provider/02-reset-default-password/cleanup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform cleanup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the cleanup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-netscaler-adc-terraform-provider/02-reset-default-password/setup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform setup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the setup script"
10 |
11 | set -e
12 |
13 | mkdir -p /root/reset-default-password
14 | cd /root/reset-default-password
15 | zone="europe-west1-b"
16 |
17 | adc_instance_nsip=`gcloud compute instances describe --zone ${zone} adc-demo --format="value(networkInterfaces[0].accessConfigs[0].natIP)"`
18 |
19 | # Create provider.tf
20 | cat <provider.tf
21 |
22 | terraform {
23 | required_providers {
24 | citrixadc = {
25 | source = "citrix/citrixadc"
26 | version = "1.9.0"
27 | }
28 | }
29 | }
30 |
31 | provider "citrixadc" {
32 | endpoint = "https://${adc_instance_nsip}"
33 | insecure_skip_verify = true
34 | }
35 |
36 | EOF
37 |
38 | instance_id=`gcloud compute instances describe --zone ${zone} adc-demo --format="value(id)"`
39 |
40 | # Apply the password reset
41 | cat <resources.tf
42 |
43 | resource "citrixadc_password_resetter" "tf_resetter" {
44 | username = "nsroot"
45 | password = "${instance_id}"
46 | new_password = "verysecret"
47 | }
48 |
49 | EOF
50 |
51 | exit 0
52 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-netscaler-adc-terraform-provider/02-reset-default-password/solve-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform solve the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the solve script"
10 | set -e
11 | cd /root/reset-default-password
12 | terraform init
13 | terraform apply -auto-approve
14 | exit 0
15 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-netscaler-adc-terraform-provider/03-apply-load-balancing-configuration/assignment.md:
--------------------------------------------------------------------------------
1 | ---
2 | slug: apply-load-balancing-configuration
3 | id: xzlas7iv5b2e
4 | type: challenge
5 | title: Configure load balancer in NetScaler ADC
6 | teaser: Apply load balancing configuration
7 | notes:
8 | - type: text
9 | contents: Apply load balancing configuration
10 | tabs:
11 | - title: Code Editor
12 | type: code
13 | hostname: cloud-client
14 | path: /root/apply-lb-configuration
15 | - title: Bastion Host CLI
16 | type: terminal
17 | hostname: cloud-client
18 | - title: NetScaler ADC data
19 | type: service
20 | hostname: cloud-client
21 | path: /adc.html
22 | port: 80
23 | difficulty: basic
24 | timelimit: 900
25 | ---
26 |
27 | Introduction
28 | ============
29 |
30 | ## Configure Load balancer in ADC
31 |
32 | In this challenge we will apply an load balancing configuration. For this we have created a configuration under the `/root/apply-lb-configuration` directory. All resources are in a single `resources.tf` file.
33 |
34 | In the file we first create a SNIP `snip` address to communicate
35 | with the backend services. Then we create the two services `tf_service1`and `tf_service2` that correspond to the backend servers.
36 |
37 | Notice that we add an explicit dependency, with the `depends_on` keyword, to the SNIP resource. This will ensure that the services are created after the
38 | SNIP address and consequently they will be reachable from the
39 | ADC instance.
40 |
41 | After that we create the lb vserver `tf_lbvserver` which also has an
42 | explicit dependency to the SNIP address.
43 |
44 | Lastly we create the bindings between the lb vserver and
45 | the services. These have an implicit dependency to the lb vserver and
46 | each service. These dependencies are created because we
47 | use references to resource attributes in the block of the binding.
48 |
49 | For example the following reference
50 | ```hcl
51 | name = citrixadc_lbvserver.tf_lbvserver.name
52 | ```
53 | establishes a depency to the lb vserver resource in the same file.
54 |
55 | With these dependencies defined, terraform will execute the configuration
56 | in dependency order. Learn more about ADC services, servicegroup, and loadbalancing [here](https://docs.citrix.com/en-us/citrix-adc/current-release/getting-started-with-citrix-adc/communicate-with-clients-servers.html).
57 |
58 | Apply Configuration
59 | ===================
60 | Go to Bastion Host CLI and perform following operations:-
61 |
62 | In order to apply the configuration we first need to change to
63 | the correct directory.
64 | ```bash
65 | cd /root/apply-lb-configuration
66 | ```
67 | Then we need to initilize the configuration in order to
68 | download the NetScaler ADC provider[(terraform-provider-citrixadc)](https://registry.terraform.io/providers/citrix/citrixadc/latest).
69 | ```bash
70 | terraform init
71 | ```
72 | Lastly we need to apply the configuration.
73 | ```bash
74 | terraform apply
75 | ```
76 | Answer `yes` and hit `enter` to proceed. If all is well you will see a message for the successful
77 | creation of the resources.
78 |
79 | Verifying the configuration
80 | ===========================
81 |
82 |
83 | ## Verifying using Browser
84 |
85 | Having applied the configuration you should be able to
86 | reach the backend services through the VIP address.
87 |
88 | You can view this address in the `NetScaler ADC data` tab.
89 |
90 | Opening a new browser window with this ip address should
91 | show you a message from the backend servers.
92 |
93 |
94 | ## Inspecting local terraform state
95 |
96 | You can inspect the local terraform state of the configuration
97 | by first changing to the configuration directory
98 | ```bash
99 | cd /root/apply-lb-configuration
100 | ```
101 | and then running the following command
102 | ```bash
103 | terraform show
104 | ```
105 | You should see the resources created along with their full attributes list.
106 | The local state will reflect what is configured at the target NetScaler ADC as long as the relevant configuration changes made to it are performed through the terraform tool.
107 |
108 |
109 | ## Inspect Configuration through ADC nscli
110 |
111 | You can also inspect the remote configuration by connecting
112 | to the target NetScaler ADC nscli.
113 |
114 | To do this you need to ssh into the NSIP.
115 |
116 | ```bash
117 | ssh nsroot@
118 | ```
119 | replace `` with the `NetScaler ADC Management IP` as shown on the `NetScaler ADC data` tab.
120 | Having logged in you can run the following command to inspect
121 | the configuration
122 | ```
123 | show lb vserver tf_lbvserver
124 | ```
125 | You should see the details of the lb vserver along with the backend services statuses.
126 |
127 |
128 | ## Inspect Configuration through NetScaler ADC Web GUI
129 |
130 | You can also inspect the same information through the
131 | NetScaler ADC Web GUI.
132 | Open a browser window with the NSIP, with username as `nsroot` and password as the value corresponding to `new_password` variable. After login, head to `Traffic Management` >> `Load Balancing` >> `Virtual servers`.
133 | You should be able to see the `tf_lbvserver` and by clicking on it
134 | you can view further details.
135 |
136 |
137 | Conclusion
138 | ==========
139 |
140 | In this challenge we demostrated how to apply a load balancing configuration.
141 |
142 | Proceed to the next challenge.
143 |
144 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-netscaler-adc-terraform-provider/03-apply-load-balancing-configuration/check-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform check the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the check script"
10 |
11 | set -e
12 |
13 | cd /root/apply-lb-configuration
14 |
15 | if ! terraform show | grep citrixadc_lbvserver ; then
16 | fail-message "Terraform state does not contain the lb vserver resource"
17 | exit 1
18 | fi
19 |
20 | exit 0
21 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-netscaler-adc-terraform-provider/03-apply-load-balancing-configuration/cleanup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform cleanup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the cleanup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-netscaler-adc-terraform-provider/03-apply-load-balancing-configuration/setup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform setup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the setup script"
10 |
11 | set -e
12 | mkdir -p /root/apply-lb-configuration
13 | cd /root/apply-lb-configuration
14 |
15 | zone="europe-west1-b"
16 |
17 | adc_instance_nsip=`gcloud compute instances describe --zone ${zone} adc-demo --format="value(networkInterfaces[0].accessConfigs[0].natIP)"`
18 | adc_instance_vip=`gcloud compute instances describe --zone ${zone} adc-demo --format="value(networkInterfaces[1].networkIP)"`
19 | adc_instance_snip=`gcloud compute instances describe --zone ${zone} adc-demo --format="value(networkInterfaces[2].networkIP)"`
20 | backend_server1_ip=`gcloud compute instances describe --zone ${zone} backend-server-1 --format="value(networkInterfaces[0].networkIP)"`
21 | backend_server2_ip=`gcloud compute instances describe --zone ${zone} backend-server-2 --format="value(networkInterfaces[0].networkIP)"`
22 |
23 | # Create provider.tf
24 | cat <provider.tf
25 |
26 | terraform {
27 | required_providers {
28 | citrixadc = {
29 | source = "citrix/citrixadc"
30 | version = "1.9.0"
31 | }
32 | }
33 | }
34 |
35 | provider "citrixadc" {
36 | endpoint = "https://${adc_instance_nsip}"
37 | insecure_skip_verify = true
38 | password = "verysecret"
39 | }
40 |
41 | EOF
42 |
43 | # Apply the password reset
44 | cat <resources.tf
45 |
46 | resource "citrixadc_nsip" "snip" {
47 | ipaddress = "${adc_instance_snip}"
48 | type = "SNIP"
49 | netmask = "255.255.255.0"
50 | }
51 |
52 | resource "citrixadc_service" "tf_service1" {
53 | servicetype = "HTTP"
54 | name = "tf_service1"
55 | ipaddress = "${backend_server1_ip}"
56 | ip = "${backend_server1_ip}"
57 | port = "80"
58 | depends_on = [ citrixadc_nsip.snip ]
59 | }
60 |
61 | resource "citrixadc_service" "tf_service2" {
62 | servicetype = "HTTP"
63 | name = "tf_service2"
64 | ipaddress = "${backend_server2_ip}"
65 | ip = "${backend_server2_ip}"
66 | port = "80"
67 | depends_on = [ citrixadc_nsip.snip ]
68 | }
69 |
70 | resource "citrixadc_lbvserver" "tf_lbvserver" {
71 | ipv46 = "${adc_instance_vip}"
72 | name = "tf_lbvserver"
73 | port = 80
74 | servicetype = "HTTP"
75 | lbmethod = "ROUNDROBIN"
76 |
77 | depends_on = [ citrixadc_nsip.snip ]
78 | }
79 |
80 | resource "citrixadc_lbvserver_service_binding" "tf_binding1" {
81 | name = citrixadc_lbvserver.tf_lbvserver.name
82 | servicename = citrixadc_service.tf_service1.name
83 | weight = 1
84 | }
85 |
86 | resource "citrixadc_lbvserver_service_binding" "tf_binding2" {
87 | name = citrixadc_lbvserver.tf_lbvserver.name
88 | servicename = citrixadc_service.tf_service2.name
89 | weight = 1
90 | }
91 |
92 | EOF
93 |
94 | exit 0
95 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-netscaler-adc-terraform-provider/03-apply-load-balancing-configuration/solve-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform solve the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the solve script"
10 |
11 | set -e
12 | cd /root/apply-lb-configuration
13 | terraform init
14 | terraform apply -auto-approve
15 |
16 | exit 0
17 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-netscaler-adc-terraform-provider/04-destroy-load-balancing-configuration/assignment.md:
--------------------------------------------------------------------------------
1 | ---
2 | slug: destroy-load-balancing-configuration
3 | id: qjyh2ya2ikg2
4 | type: challenge
5 | title: Destroy load balancing configuration
6 | teaser: Destroy load balancing configuration
7 | notes:
8 | - type: text
9 | contents: Destroy load balancing configuration
10 | tabs:
11 | - title: Code Editor
12 | type: code
13 | hostname: cloud-client
14 | path: /root/apply-lb-configuration
15 | - title: Bastion Host CLI
16 | type: terminal
17 | hostname: cloud-client
18 | - title: NetScaler ADC data
19 | type: service
20 | hostname: cloud-client
21 | path: /adc.html
22 | port: 80
23 | difficulty: basic
24 | timelimit: 900
25 | ---
26 |
27 | Introduction
28 | ============
29 |
30 | Having applied the lb vserver configuration in the previous
31 | step we will now destroy that configuration.
32 |
33 | This will remove the configuration from the NetScaler ADC instance.
34 |
35 | Destroy configuration
36 | =====================
37 |
38 | First change to the configuration directory
39 | ```bash
40 | cd /root/apply-lb-configuration
41 | ```
42 | Then run the following command
43 | ```bash
44 | terraform destroy
45 | ```
46 | You will be prompted with the destroy plan
47 | which will detail which resources will be destroyed.
48 |
49 | Answer `yes` and hit `enter` to proceed. After the operation is successfully completed
50 | all configuration from the target NetScaler ADC is deleted.
51 |
52 | The backend services will not be reachable through the VIP
53 | address.
54 |
55 | Conclusion
56 | ==========
57 |
58 | This concludes the track. You learned how to install the terraform CLI binary,
59 | install the NetScaler ADC provider[(terraform-provider-citrixadc)](https://registry.terraform.io/providers/citrix/citrixadc/latest) and apply and destroy
60 | configurations.
61 |
62 | You can find more example configurations and documentaion on the NetScaler ADC [Terraform
63 | provider's](https://registry.terraform.io/providers/citrix/citrixadc/latest/docs).
64 | You can experiment with them and combine them to achieve more complex configurations for advanced usecases such as web application firewall, multicluster etc.
65 |
66 | General documentation for the NetScaler ADC can be found
67 | at the [NetScaler ADC documentation site](https://docs.netscaler.com/en-us/citrix-adc/current-release.html).
68 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-netscaler-adc-terraform-provider/04-destroy-load-balancing-configuration/check-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform check the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the check script"
10 |
11 | set -e
12 |
13 | cd /root/apply-lb-configuration
14 |
15 | if terraform show | grep citrixadc_lbvserver ; then
16 | fail-message "Terraform state does still contains the lb vserver resource"
17 | exit 1
18 | fi
19 |
20 |
21 | exit 0
22 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-netscaler-adc-terraform-provider/04-destroy-load-balancing-configuration/cleanup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform cleanup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the cleanup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-netscaler-adc-terraform-provider/04-destroy-load-balancing-configuration/setup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform setup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the setup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-netscaler-adc-terraform-provider/04-destroy-load-balancing-configuration/solve-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform solve the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the solve script"
10 | set -e
11 | cd /root/apply-lb-configuration
12 | terraform destroy -auto-approve
13 |
14 | exit 0
15 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-netscaler-adc-terraform-provider/config.yml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | containers:
3 | - name: cloud-client
4 | image: gcr.io/instruqt/cloud-client
5 | shell: /bin/bash
6 | ports:
7 | - 80
8 | gcp_projects:
9 | - name: gcpproject
10 | services:
11 | - compute.googleapis.com
12 | roles: []
13 |
--------------------------------------------------------------------------------
/labs/deliver-apps-with-netscaler-adc-terraform-provider/track.yml:
--------------------------------------------------------------------------------
1 | slug: deliver-apps-with-netscaler-adc-terraform-provider
2 | id: bicjh6xslkjg
3 | title: Deliver Apps with NetScaler ADC and Terraform
4 | teaser: Learn the Terraform workflow by deploying NetScaler ADC and configuring a
5 | load balancer to front-end your web apps
6 | description: |
7 | NetScaler ADC is an advanced application delivery, load balancing and security solution for your web apps. Terraform provides infrastructure-as-code and declarative approach to managing your ADC infrastructure.
8 |
9 | In this hands-on lab, we will learn how to use Terraform to configure load balancing service in NetScaler ADC and expose your public web-apps over internet. The lab will provision the ADC, pair of web-servers, and automation controller and then guide you on using Terraform.
10 | icon: https://storage.googleapis.com/instruqt-frontend/img/tracks/default.png
11 | tags:
12 | - web-apps
13 | - automation
14 | - citrix
15 | - adc
16 | - netscaler
17 | - terraform
18 | - load-balancer
19 | owner: netscaler
20 | developers:
21 | - giorgos.nikolopoulos@citrix.com
22 | - ravi.shekhar@citrix.com
23 | - sumanth.lingappa@citrix.com
24 | - komal.bhardwaj@citrix.com
25 | - mayur.vadhar1@citrix.com
26 | - konstantinos.kaltsas@citrix.com
27 | - mayur.mohanpatil@citrix.com
28 | lab_config:
29 | overlay: false
30 | width: 33
31 | position: right
32 | checksum: "15310452429106074660"
33 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-application-protection-configuration-waf-using-terraform/01-netscaler-adc-basic-content-switching-prerequisites/assignment.md:
--------------------------------------------------------------------------------
1 | ---
2 | slug: netscaler-adc-basic-content-switching-prerequisites
3 | id: qydwhklw1oqa
4 | type: challenge
5 | title: Prerequisites
6 | teaser: Provision the base Infrastructure and Setup Terraform CLI
7 | notes:
8 | - type: text
9 | contents: Setup VPX
10 | tabs:
11 | - title: Bastion Host CLI
12 | type: terminal
13 | hostname: cloud-client
14 | - title: NetScaler ADC data
15 | type: service
16 | hostname: cloud-client
17 | path: /adc.html
18 | port: 80
19 | difficulty: basic
20 | timelimit: 3600
21 | ---
22 |
23 | Introduction
24 | ============
25 |
26 | Welcome to the lab.
27 |
28 | In this lab, we have provisioned the following:
29 | 1. NetScaler ADC VPX
30 | 2. Two back-end servers for you.
31 |
32 | You can visit `NetScaler ADC data ` tab to find more details about the IPs that have been assigned to these.
33 |
34 | As part of the lab, we will achieve the following :
35 | 1. Install Terraform CLI binary and NetScaler ADC provider[(terraform-provider-citrixadc)](https://registry.terraform.io/providers/citrix/citrixadc/latest) provider in a bastion host with access to the target ADC instance.
36 | 2. We will configure ADC to route traffic to our echo server. To learn more on how routing works please check `NetScaler ADC Basic Content Switching Configuration using Terraform ` Lab.
37 | 3. We will show how we can create WAF policies and profiles to protect different types of applications to either block or log a malicious request.
38 | 4. We will show how we can apply our protection both on load balancing or content switching level.
39 |
40 | For this challenge we will need to download and setup the Terraform CLI binary.
41 | Terraform provides instructions on how to download and install the
42 | Terraform binary in this [page](https://www.terraform.io/downloads).
43 | All available Terraform binaries are listed [here](https://releases.hashicorp.com/terraform/). Let’s get started with Terraform installation.
44 |
45 | Install Terraform
46 | =================
47 |
48 | First we will download Terraform:
49 |
50 | 1. Download the .zip file with the binary for the linux_amd64 platform
51 |
52 | ```bash
53 | curl https://releases.hashicorp.com/terraform/1.1.4/terraform_1.1.4_linux_amd64.zip --output terraform_1.1.4_linux_amd64.zip
54 | ```
55 |
56 | 2. Extract the executable in the current directory
57 |
58 | ```bash
59 | unzip terraform_1.1.4_linux_amd64.zip
60 | ```
61 | 3. Move the extracted binary to a location defined in the PATH variable
62 |
63 | ```bash
64 | mv ./terraform /usr/local/bin
65 | ```
66 | 4. Verify that the Terraform binary is executable from the command line
67 |
68 | ```bash
69 | terraform version
70 | ```
71 | The above command should show you the version information of the Terraform binary.
72 | > Ignore any out of date warning message.
73 |
74 | Conclusion
75 | ==========
76 |
77 | Having installed the Terraform binary our next task will be
78 | to start using Terraform for configuration management.
79 |
80 | Please proceed to the next challenge.
81 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-application-protection-configuration-waf-using-terraform/01-netscaler-adc-basic-content-switching-prerequisites/check-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform check the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | if ! which terraform ; then
10 | fail-message "Terraform binary not found in PATH"
11 | exit 1
12 | fi
13 | exit 0
14 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-application-protection-configuration-waf-using-terraform/01-netscaler-adc-basic-content-switching-prerequisites/cleanup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform cleanup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the cleanup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-application-protection-configuration-waf-using-terraform/01-netscaler-adc-basic-content-switching-prerequisites/setup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform setup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 |
10 | mkdir -p /var/tmp/first_time_password_reset
11 |
12 | cd /var/tmp/first_time_password_reset
13 |
14 | ADC_INSTANCE_ID=`gcloud compute instances describe --zone europe-west1-b adc-demo --format="value(id)"`
15 | ADC_INSTANCE_NSIP=`gcloud compute instances describe --zone europe-west1-b adc-demo --format="value(networkInterfaces[0].accessConfigs[0].natIP)"`
16 |
17 | # provider.tf
18 | cat <provider.tf
19 |
20 | terraform {
21 | required_providers {
22 | citrixadc = {
23 | source = "citrix/citrixadc"
24 | }
25 | }
26 | }
27 |
28 | provider "citrixadc" {
29 | endpoint = "http://${ADC_INSTANCE_NSIP}"
30 | username = "nsroot"
31 | password = "notnsroot"
32 | }
33 |
34 | EOF
35 |
36 | # resources.tf
37 | cat <resources.tf
38 |
39 | resource "citrixadc_password_resetter" "tf_resetter" {
40 | username = "nsroot"
41 | password = "${ADC_INSTANCE_ID}"
42 | new_password = "notnsroot"
43 | }
44 |
45 | EOF
46 |
47 | sleep 10
48 |
49 | terraform init
50 |
51 | sleep 5
52 |
53 | terraform apply -auto-approve
54 |
55 | exit 0
56 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-application-protection-configuration-waf-using-terraform/01-netscaler-adc-basic-content-switching-prerequisites/solve-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform solve the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "Installing terraform binary"
10 | mkdir -p /var/tmp/terraform_install
11 | cd /var/tmp/terraform_install
12 | curl https://releases.hashicorp.com/terraform/1.1.4/terraform_1.1.4_linux_amd64.zip --output terraform_1.1.4_linux_amd64.zip
13 | unzip terraform_1.1.4_linux_amd64.zip
14 | mv ./terraform /usr/local/bin
15 |
16 | exit 0
17 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-application-protection-configuration-waf-using-terraform/02-reset-default-password/assignment.md:
--------------------------------------------------------------------------------
1 | ---
2 | slug: reset-default-password
3 | id: pbmgc94ju9ce
4 | type: challenge
5 | title: Reset the NetScaler ADC default password
6 | teaser: Reset the NetScaler ADC default password
7 | notes:
8 | - type: text
9 | contents: Reset NetScaler ADC default password
10 | tabs:
11 | - title: Code Editor
12 | type: code
13 | hostname: cloud-client
14 | path: /root/reset-default-password
15 | - title: Bastion Host CLI
16 | type: terminal
17 | hostname: cloud-client
18 | - title: NetScaler ADC data
19 | type: service
20 | hostname: cloud-client
21 | path: /adc.html
22 | port: 80
23 | difficulty: basic
24 | timelimit: 3600
25 | ---
26 |
27 | Introduction
28 | ============
29 |
30 | ## Reset default password
31 |
32 | The NetScaler ADC instance provisioned in the Google Cloud
33 | has a default initial password. Before any configuration can be applied we need to reset this default password. This can be done interactively through the Web GUI or
34 | through the nscli by ssh.
35 |
36 | In this challenge, we are going to reset password using the terraform provider.
37 |
38 | Terraform configuration
39 | =======================
40 |
41 | The configuration has already been written to the directory
42 | `/root/reset-default-password`. You can browse the files in the Code Editor tab.
43 |
44 | `provider.tf` - This file contains the provider connection information.
45 | You will notice that we define the endpoint to be
46 | an `https` url. This will ensure that the data exchanged with the target ADC
47 | will be encrypted.
48 | Because the target ADC's default TLS certificate is self signed
49 | we also need to set the option `insecure_skip_verify = true`.This will avoid the http requests failing due to certificate
50 | verification errors.
51 | For production instances it is strongly recommended to replace
52 | the default TLS certificate with a properly signed one.
53 |
54 | `resources.tf`- This file contains the resource which will do the actual
55 | reset of the password. For Google Cloud the default password is the instance id.
56 | The new password is defined with the `new_password` attribute.
57 | You can edit this to something else other than the provided one.If you do make sure to take note of it, because you will be needing to change the resource files for the subsequent challenges.
58 |
59 | Apply configuration
60 | ===================
61 | Go to Bastion Host CLI and perform following operations :
62 | 1. Change current directory to the one containing the terraform configuration files
63 |
64 | ```bash
65 | cd /root/reset-default-password
66 | ```
67 |
68 | 2. Initialize the terraform configuration.
69 | ```bash
70 | terraform init
71 | ```
72 | This command will download and install the NetScaler ADC provider[(terraform-provider-citrixadc)](https://registry.terraform.io/providers/citrix/citrixadc/latest)
73 | which is needed to run the configuration.
74 |
75 | 3. Apply the configuration.
76 | ```bash
77 | terraform apply
78 | ```
79 | This command will present you with the configuration changes
80 | and will prompt you to verify you want to apply them.
81 |
82 | Answer `yes` and press enter.
83 |
84 | If all goes well you should see a message saying 1 resource was
85 | created without any errors.
86 |
87 | Conclusion
88 | ==========
89 |
90 | We have now configured the target ADC with a new password.
91 |
92 | If you changed the new password to something else than the one
93 | supplied please take note of it since you will be needing it
94 | for the subsequent challenges NetScaler ADC provider[(terraform-provider-citrixadc)](https://registry.terraform.io/providers/citrix/citrixadc/latest) configuration.
95 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-application-protection-configuration-waf-using-terraform/02-reset-default-password/check-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform check the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the check script"
10 |
11 | set -e
12 |
13 | cd /root/reset-default-password
14 |
15 | if ! terraform show | grep citrixadc_password_resetter ; then
16 | fail-message "Terraform state does not contain a password resetter resource"
17 | exit 1
18 | fi
19 |
20 | exit 0
21 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-application-protection-configuration-waf-using-terraform/02-reset-default-password/cleanup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform cleanup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the cleanup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-application-protection-configuration-waf-using-terraform/02-reset-default-password/setup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform setup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the setup script"
10 |
11 | set -e
12 |
13 | mkdir -p /root/reset-default-password
14 | cd /root/reset-default-password
15 | zone="europe-west1-b"
16 |
17 | adc_instance_nsip=`gcloud compute instances describe --zone ${zone} adc-demo --format="value(networkInterfaces[0].accessConfigs[0].natIP)"`
18 |
19 | # Create provider.tf
20 | cat <provider.tf
21 |
22 | terraform {
23 | required_providers {
24 | citrixadc = {
25 | source = "citrix/citrixadc"
26 | version = "1.9.0"
27 | }
28 | }
29 | }
30 |
31 | provider "citrixadc" {
32 | endpoint = "https://${adc_instance_nsip}"
33 | insecure_skip_verify = true
34 | }
35 |
36 | EOF
37 |
38 | instance_id=`gcloud compute instances describe --zone ${zone} adc-demo --format="value(id)"`
39 |
40 | # Apply the password reset
41 | cat <resources.tf
42 |
43 | resource "citrixadc_password_resetter" "tf_resetter" {
44 | username = "nsroot"
45 | password = "${instance_id}"
46 | new_password = "verysecret"
47 | }
48 |
49 | EOF
50 |
51 | exit 0
52 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-application-protection-configuration-waf-using-terraform/02-reset-default-password/solve-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform solve the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the solve script"
10 | set -e
11 | cd /root/reset-default-password
12 | terraform init
13 | terraform apply -auto-approve
14 | exit 0
15 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-application-protection-configuration-waf-using-terraform/03-netscaler-adc-basic-content-switching-csvserver/check-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform check the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the check script"
10 |
11 | set -e
12 |
13 | cd /root/apply-waf-configuration
14 |
15 | if ! terraform show | grep citrixadc_cspolicy ; then
16 | fail-message "Terraform state does not contain the cs policy resource"
17 | exit 1
18 | fi
19 |
20 | exit 0
21 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-application-protection-configuration-waf-using-terraform/03-netscaler-adc-basic-content-switching-csvserver/cleanup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform cleanup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the cleanup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-application-protection-configuration-waf-using-terraform/03-netscaler-adc-basic-content-switching-csvserver/solve-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform solve the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the solve script"
10 |
11 | set -e
12 | cd /root/apply-waf-configuration
13 | terraform init
14 | terraform apply -var-file example.tfvars -auto-approve -var-file example.tfvars
15 |
16 | exit 0
17 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-application-protection-configuration-waf-using-terraform/04-netscaler-adc-basic-waf-policy1/check-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform check the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the check script"
10 |
11 | set -e
12 |
13 | cd /root/apply-waf-configuration
14 |
15 | if ! terraform show | grep citrixadc_appfwpolicy ; then
16 | fail-message "Terraform state does not contain the appfw policy resource"
17 | exit 1
18 | fi
19 |
20 | exit 0
21 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-application-protection-configuration-waf-using-terraform/04-netscaler-adc-basic-waf-policy1/cleanup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform cleanup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the cleanup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-application-protection-configuration-waf-using-terraform/04-netscaler-adc-basic-waf-policy1/solve-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform solve the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the solve script"
10 |
11 | set -e
12 | cd /root/apply-waf-configuration
13 | terraform init
14 | terraform apply -var-file example.tfvars -auto-approve -var-file example.tfvars
15 |
16 | exit 0
17 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-application-protection-configuration-waf-using-terraform/06-netscaler-adc-basic-content-switching-destroy/assignment.md:
--------------------------------------------------------------------------------
1 | ---
2 | slug: netscaler-adc-basic-content-switching-destroy
3 | id: z1wzbxiblkp8
4 | type: challenge
5 | title: Destroy Configuration
6 | teaser: Destroy Terraform Managed Configuration
7 | notes:
8 | - type: text
9 | contents: Destroy Terraform managed configuration
10 | tabs:
11 | - title: Code Editor
12 | type: code
13 | hostname: cloud-client
14 | path: /root/apply-waf-configuration
15 | - title: Bastion Host CLI
16 | type: terminal
17 | hostname: cloud-client
18 | - title: NetScaler ADC data
19 | type: service
20 | hostname: cloud-client
21 | path: /adc.html
22 | port: 80
23 | difficulty: basic
24 | timelimit: 3600
25 | ---
26 | Introduction
27 | ============
28 |
29 | As we have already applied all configurations for our use cases in the previous
30 | steps we will now destroy that configuration.
31 |
32 | This will remove all configuration from the NetScaler ADC instance that is managed from Terraform.
33 |
34 | Destroy configuration
35 | =====================
36 |
37 | First change to the configuration directory
38 | ```bash
39 | cd /root/apply-waf-configuration
40 | ```
41 | Then run the following command
42 | ```bash
43 | terraform destroy -var-file example.tfvars
44 | ```
45 | You will be prompted with the destroy plan
46 | which will detail which resources will be destroyed.
47 |
48 | Answer `yes` and hit `enter` to proceed.After the operation is successfully completed
49 | all configuration from the target NetScaler ADC is deleted.
50 |
51 | The back-end services will not be reachable through the VIP
52 | address.
53 |
54 |
55 | Conclusion
56 | ==========
57 | This concludes our track.
58 |
59 | In this track we focused on some example configurations that can cover specific use cases. You can find more example configurations of how to configure ADC using Terraform on the NetScaler ADC provider's [Github repository](https://github.com/citrix/terraform-provider-citrixadc/tree/master/examples).
60 |
61 | You can experiment with them and combine them to achieve more complex configurations for advanced use cases such as Application Protection, High Availability and Global Server Load Balancing and more.
62 |
63 | General documentation for the NetScaler ADC can be found
64 | at the [NetScaler ADC documentation site](https://docs.netscaler.com/en-us/citrix-adc/current-release.html).
65 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-application-protection-configuration-waf-using-terraform/06-netscaler-adc-basic-content-switching-destroy/check-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform check the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the check script"
10 |
11 | set -e
12 |
13 | cd /root/apply-waf-configuration
14 |
15 | if terraform show | grep citrixadc_csvserver ; then
16 | fail-message "Terraform state does still contains the lb vserver resource"
17 | exit 1
18 | fi
19 |
20 |
21 | exit 0
22 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-application-protection-configuration-waf-using-terraform/06-netscaler-adc-basic-content-switching-destroy/cleanup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform cleanup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the cleanup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-application-protection-configuration-waf-using-terraform/06-netscaler-adc-basic-content-switching-destroy/setup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform setup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the setup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-application-protection-configuration-waf-using-terraform/06-netscaler-adc-basic-content-switching-destroy/solve-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform solve the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the solve script"
10 | set -e
11 | cd /root/apply-waf-configuration
12 | terraform destroy -var-file example.tfvars -auto-approve
13 |
14 | exit 0
15 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-application-protection-configuration-waf-using-terraform/config.yml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | containers:
3 | - name: cloud-client
4 | image: gcr.io/instruqt/cloud-client
5 | shell: /bin/bash
6 | ports:
7 | - 80
8 | gcp_projects:
9 | - name: gcpproject
10 | services:
11 | - compute.googleapis.com
12 | roles: []
13 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-application-protection-configuration-waf-using-terraform/track.yml:
--------------------------------------------------------------------------------
1 | slug: netscaler-adc-basic-application-protection-configuration-waf-using-terraform
2 | id: uxqrymyfpftk
3 | title: NetScaler ADC Basic Application Protection Configuration (WAF) using Terraform
4 | teaser: Learn how to leverage WAF Policies for protecting your Applications.
5 | description: |+
6 | ## Learn how to leverage WAF Policies for protecting your Applications.
7 |
8 | On this Track we will leverage infrastructure-as-code templates to demonstrate:
9 | - How to create WAF policies and profiles.
10 | - How to enable WAF policies on load balancing or content switching virtual server level.
11 | - How to block or log malicious requests based on different criteria.
12 |
13 | The lab will provision for you a NetScaler ADC and a simple echo server. Then it will guide you on using Terraform to apply your configuration. Echo server is a simple application that echoes back the request it receives. We will leverage the echo server to showcase how our policies are applied based on the information contained in the request / response.
14 |
15 | During this lab we are going to send some malicious requests and see how NetScaler can either block or log these (by acting as a Web Application Firewall) based on the policies that are configured.
16 |
17 | icon: https://storage.googleapis.com/instruqt-frontend/img/tracks/default.png
18 | tags:
19 | - web-apps
20 | - automation
21 | - citrix
22 | - adc
23 | - terraform
24 | - load-balancer
25 | owner: netscaler
26 | developers:
27 | - kkaltsas@tibco.com
28 | - rohit.myali@gmail.com
29 | - sumanth.lingappa@citrix.com
30 | - chris.chau@cloud.com
31 | timelimit: 3600
32 | lab_config:
33 | overlay: false
34 | width: 33
35 | position: right
36 | checksum: "12057409886485792925"
37 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/01-netscaler-adc-basic-rewrite-responder-policies-prerequisites/assignment.md:
--------------------------------------------------------------------------------
1 | ---
2 | slug: netscaler-adc-basic-rewrite-responder-policies-prerequisites
3 | id: ox5jvix0dzja
4 | type: challenge
5 | title: Prerequisites
6 | teaser: Provision the base Infrastructure and Setup Terraform CLI
7 | notes:
8 | - type: text
9 | contents: Setup VPX
10 | tabs:
11 | - title: Bastion Host CLI
12 | type: terminal
13 | hostname: cloud-client
14 | - title: NetScaler ADC data
15 | type: service
16 | hostname: cloud-client
17 | path: /adc.html
18 | port: 80
19 | difficulty: basic
20 | timelimit: 3600
21 | ---
22 |
23 | Introduction
24 | ============
25 |
26 | Welcome to the lab.
27 |
28 | The lab will provision for you a NetScaler ADC and a simple echo server. Then it will guide you on using Terraform to apply your configuration. Echo server is a simple application that echoes back the request it receives. We will leverage the echo server to showcase how our policies manipulate the information contained in the request or response.
29 |
30 | You can visit `NetScaler ADC data ` tab to find more details about the IPs that have been assigned to these.
31 |
32 |
33 | As part of the lab, we will achieve the following :
34 | 1. Install Terraform CLI binary and NetScaler ADC provider[(terraform-provider-citrixadc)](https://registry.terraform.io/providers/citrix/citrixadc/latest) in a bastion host with access to the target ADC instance.
35 | 2. We will configure ADC to route traffic to our echo server. To learn more on how routing works please check `NetScaler ADC Basic Content Switching Configuration using Terraform ` Lab.
36 | 3. We will show how we can manipulate an incoming request by adding an additional HTTP Header in an incoming request based on certain criteria.
37 | 4. . We will show how we can redirect a request to another URL using a responder policy.
38 |
39 |
40 | For this challenge we will need to download and setup the Terraform CLI binary.
41 | Terraform provides instructions on how to download and install the
42 | Terraform binary in this [page](https://www.terraform.io/downloads).
43 | All available Terraform binaries are listed [here](https://releases.hashicorp.com/terraform/). Lets get started with Terraform installation.
44 |
45 | Install Terraform
46 | =================
47 |
48 | First we will download Terraform:
49 |
50 | 1. Download the .zip file with the binary for the linux_amd64 platform
51 |
52 | ```bash
53 | curl https://releases.hashicorp.com/terraform/1.1.4/terraform_1.1.4_linux_amd64.zip --output terraform_1.1.4_linux_amd64.zip
54 | ```
55 |
56 | 2. Extract the executable in the current directory
57 |
58 | ```bash
59 | unzip terraform_1.1.4_linux_amd64.zip
60 | ```
61 | 3. Move the extracted binary to a location defined in the PATH variable
62 |
63 | ```bash
64 | mv ./terraform /usr/local/bin
65 | ```
66 | 4. Verify that the Terraform binary is executable from the command line
67 |
68 | ```bash
69 | terraform version
70 | ```
71 | The above command should show you the version information of the Terraform binary.
72 | > Ignore any out of date warning message.
73 |
74 | Conclusion
75 | ==========
76 |
77 | Having installed the Terraform binary our next task will be
78 | to start using Terraform for configuration management.
79 |
80 | Please proceed to the next challenge.
81 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/01-netscaler-adc-basic-rewrite-responder-policies-prerequisites/check-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform check the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | if ! which terraform ; then
10 | fail-message "Terraform binary not found in PATH"
11 | exit 1
12 | fi
13 | exit 0
14 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/01-netscaler-adc-basic-rewrite-responder-policies-prerequisites/cleanup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform cleanup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the cleanup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/01-netscaler-adc-basic-rewrite-responder-policies-prerequisites/setup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform setup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 |
10 | mkdir -p /var/tmp/first_time_password_reset
11 |
12 | cd /var/tmp/first_time_password_reset
13 |
14 | ADC_INSTANCE_ID=`gcloud compute instances describe --zone europe-west1-b adc-demo --format="value(id)"`
15 | ADC_INSTANCE_NSIP=`gcloud compute instances describe --zone europe-west1-b adc-demo --format="value(networkInterfaces[0].accessConfigs[0].natIP)"`
16 |
17 | # provider.tf
18 | cat <provider.tf
19 |
20 | terraform {
21 | required_providers {
22 | citrixadc = {
23 | source = "citrix/citrixadc"
24 | }
25 | }
26 | }
27 |
28 | provider "citrixadc" {
29 | endpoint = "http://${ADC_INSTANCE_NSIP}"
30 | username = "nsroot"
31 | password = "notnsroot"
32 | }
33 |
34 | EOF
35 |
36 | # resources.tf
37 | cat <resources.tf
38 |
39 | resource "citrixadc_password_resetter" "tf_resetter" {
40 | username = "nsroot"
41 | password = "${ADC_INSTANCE_ID}"
42 | new_password = "notnsroot"
43 | }
44 |
45 | EOF
46 |
47 | sleep 10
48 |
49 | terraform init
50 |
51 | sleep 5
52 |
53 | terraform apply -auto-approve
54 |
55 | exit 0
56 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/01-netscaler-adc-basic-rewrite-responder-policies-prerequisites/solve-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform solve the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "Installing terraform binary"
10 | mkdir -p /var/tmp/terraform_install
11 | cd /var/tmp/terraform_install
12 | curl https://releases.hashicorp.com/terraform/1.1.4/terraform_1.1.4_linux_amd64.zip --output terraform_1.1.4_linux_amd64.zip
13 | unzip terraform_1.1.4_linux_amd64.zip
14 | mv ./terraform /usr/local/bin
15 |
16 | exit 0
17 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/02-reset-default-password/assignment.md:
--------------------------------------------------------------------------------
1 | ---
2 | slug: reset-default-password
3 | id: bf9j8vxeclbw
4 | type: challenge
5 | title: Reset the NetScaler ADC default password
6 | teaser: Reset the NetScaler ADC default password
7 | notes:
8 | - type: text
9 | contents: Reset ADC password
10 | tabs:
11 | - title: Code Editor
12 | type: code
13 | hostname: cloud-client
14 | path: /root/reset-default-password
15 | - title: Bastion Host CLI
16 | type: terminal
17 | hostname: cloud-client
18 | - title: NetScaler ADC data
19 | type: service
20 | hostname: cloud-client
21 | path: /adc.html
22 | port: 80
23 | difficulty: basic
24 | timelimit: 3600
25 | ---
26 |
27 | Introduction
28 | ============
29 |
30 | ## Reset default password
31 |
32 | The ADC instance provisioned in the Google Cloud
33 | has a default initial password. Before any configuration can be applied we need to reset this default password. This can be done interactively through the Web GUI or
34 | through the nscli by ssh.
35 |
36 | In this challenge, we are going to reset password using the terraform provider.
37 |
38 | Terraform configuration
39 | =======================
40 |
41 | The configuration has already been written to the directory
42 | `/root/reset-default-password`. You can browse the files in the Code Editor tab.
43 |
44 | `provider.tf` - This file contains the provider connection information.
45 | You will notice that we define the endpoint to be
46 | an `https` url. This will ensure that the data exchanged with the target ADC
47 | will be encrypted.
48 | Because the target ADC's default TLS certificate is self signed
49 | we also need to set the option `insecure_skip_verify = true`.This will avoid the http requests failing due to certificate
50 | verification errors.
51 | For production instances it is strongly recommended to replace
52 | the default TLS certificate with a properly signed one.
53 |
54 | `resources.tf`- This file contains the resource which will do the actual
55 | reset of the password. For Google Cloud the default password is the instance id.
56 | The new password is defined with the `new_password` attribute.
57 | You can edit this to something else other than the provided one.If you do make sure to take note of it, because you will be needing to change the resource files for the subsequent challenges.
58 |
59 | Apply configuration
60 | ===================
61 | Go to Bastion Host CLI and perform following operations :
62 | 1. Change current directory to the one containing the terraform configuration files
63 |
64 | ```bash
65 | cd /root/reset-default-password
66 | ```
67 |
68 | 2. Initialize the terraform configuration.
69 | ```bash
70 | terraform init
71 | ```
72 | This command will download and install the NetScaler ADC provider[(terraform-provider-citrixadc)](https://registry.terraform.io/providers/citrix/citrixadc/latest)
73 | which is needed to run the configuration.
74 |
75 | 3. Apply the configuration.
76 | ```bash
77 | terraform apply
78 | ```
79 | This command will present you with the configuration changes
80 | and will prompt you to verify you want to apply them.
81 |
82 | Answer `yes` and press enter.
83 |
84 | If all goes well you should see a message saying 1 resource was
85 | created without any errors.
86 |
87 | Conclusion
88 | ==========
89 |
90 | We have now configured the target ADC with a new password.
91 |
92 | If you changed the new password to something else than the one
93 | supplied please take note of it since you will be needing it
94 | for the subsequent challenges NetScaler ADC provider[(terraform-provider-citrixadc)](https://registry.terraform.io/providers/citrix/citrixadc/latest) configuration.
95 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/02-reset-default-password/check-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform check the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the check script"
10 |
11 | set -e
12 |
13 | cd /root/reset-default-password
14 |
15 | if ! terraform show | grep citrixadc_password_resetter ; then
16 | fail-message "Terraform state does not contain a password resetter resource"
17 | exit 1
18 | fi
19 |
20 | exit 0
21 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/02-reset-default-password/cleanup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform cleanup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the cleanup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/02-reset-default-password/setup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform setup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the setup script"
10 |
11 | set -e
12 |
13 | mkdir -p /root/reset-default-password
14 | cd /root/reset-default-password
15 | zone="europe-west1-b"
16 |
17 | adc_instance_nsip=`gcloud compute instances describe --zone ${zone} adc-demo --format="value(networkInterfaces[0].accessConfigs[0].natIP)"`
18 |
19 | # Create provider.tf
20 | cat <provider.tf
21 |
22 | terraform {
23 | required_providers {
24 | citrixadc = {
25 | source = "citrix/citrixadc"
26 | version = "1.9.0"
27 | }
28 | }
29 | }
30 |
31 | provider "citrixadc" {
32 | endpoint = "https://${adc_instance_nsip}"
33 | insecure_skip_verify = true
34 | }
35 |
36 | EOF
37 |
38 | instance_id=`gcloud compute instances describe --zone ${zone} adc-demo --format="value(id)"`
39 |
40 | # Apply the password reset
41 | cat <resources.tf
42 |
43 | resource "citrixadc_password_resetter" "tf_resetter" {
44 | username = "nsroot"
45 | password = "${instance_id}"
46 | new_password = "verysecret"
47 | }
48 |
49 | EOF
50 |
51 | exit 0
52 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/02-reset-default-password/solve-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform solve the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the solve script"
10 | set -e
11 | cd /root/reset-default-password
12 | terraform init
13 | terraform apply -auto-approve
14 | exit 0
15 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/03-netscaler-adc-basic-virtual-servers-config/check-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform check the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the check script"
10 |
11 | set -e
12 |
13 | cd /root/apply-rewrite-configuration
14 |
15 | if ! terraform show | grep citrixadc_cspolicy ; then
16 | fail-message "Terraform state does not contain the cs policy resource"
17 | exit 1
18 | fi
19 |
20 | exit 0
21 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/03-netscaler-adc-basic-virtual-servers-config/cleanup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform cleanup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the cleanup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/03-netscaler-adc-basic-virtual-servers-config/solve-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform solve the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the solve script"
10 |
11 | set -e
12 | cd /root/apply-rewrite-configuration
13 | terraform init
14 | terraform apply -var-file example.tfvars -auto-approve -var-file example.tfvars
15 |
16 | exit 0
17 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/04-netscaler-adc-basic-rewrite-policies/check-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform check the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the check script"
10 |
11 | set -e
12 |
13 | cd /root/apply-rewrite-configuration
14 |
15 | if ! terraform show | grep citrixadc_rewritepolicy ; then
16 | fail-message "Terraform state does not contain the rewrite policy resource"
17 | exit 1
18 | fi
19 |
20 | exit 0
21 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/04-netscaler-adc-basic-rewrite-policies/cleanup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform cleanup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the cleanup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/04-netscaler-adc-basic-rewrite-policies/solve-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform solve the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the solve script"
10 |
11 | set -e
12 | cd /root/apply-rewrite-configuration
13 | terraform init
14 | terraform apply -var-file example.tfvars -auto-approve -var-file example.tfvars
15 |
16 | exit 0
17 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/05-netscaler-adc-basic-responder-policies/check-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform check the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the check script"
10 |
11 | set -e
12 |
13 | cd /root/apply-rewrite-configuration
14 |
15 | if ! terraform show | grep citrixadc_rewritepolicy ; then
16 | fail-message "Terraform state does not contain the rewrite policy resource"
17 | exit 1
18 | fi
19 |
20 | exit 0
21 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/05-netscaler-adc-basic-responder-policies/cleanup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform cleanup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the cleanup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/05-netscaler-adc-basic-responder-policies/solve-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform solve the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the solve script"
10 |
11 | set -e
12 | cd /root/apply-rewrite-configuration
13 | terraform init
14 | terraform apply -var-file example.tfvars -auto-approve -var-file example.tfvars
15 |
16 | exit 0
17 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/06-netscaler-adc-basic-content-switching-destroy/assignment.md:
--------------------------------------------------------------------------------
1 | ---
2 | slug: netscaler-adc-basic-content-switching-destroy
3 | id: mm42hgrqv0xc
4 | type: challenge
5 | title: Destroy Configuration
6 | teaser: Destroy Terraform Managed Configuration
7 | notes:
8 | - type: text
9 | contents: Destroy rewrite policy configuration
10 | tabs:
11 | - title: Code Editor
12 | type: code
13 | hostname: cloud-client
14 | path: /root/apply-rewrite-configuration
15 | - title: Bastion Host CLI
16 | type: terminal
17 | hostname: cloud-client
18 | - title: NetScaler ADC data
19 | type: service
20 | hostname: cloud-client
21 | path: /adc.html
22 | port: 80
23 | difficulty: basic
24 | timelimit: 3595
25 | ---
26 | Introduction
27 | ============
28 |
29 | As we have already applied all configurations for our use cases in the previous
30 | steps we will now destroy that configuration.
31 |
32 | This will remove all configuration from the NetScaler ADC instance that is managed from Terraform.
33 |
34 | Destroy configuration
35 | =====================
36 | First change to the configuration directory
37 | ```bash
38 | cd /root/apply-rewrite-configuration
39 | ```
40 | Then run the following command
41 | ```bash
42 | terraform destroy -var-file example.tfvars
43 | ```
44 | You will be prompted with the destroy plan
45 | which will detail which resources will be destroyed.
46 |
47 | Answer `yes` and hit `enter` to proceed.After the operation is successfully completed
48 | all configuration from the target NetScaler ADC is deleted.
49 |
50 | The backend services will not be reachable through the VIP
51 | address.
52 |
53 |
54 | Conclusion
55 | ==========
56 | This concludes our track.
57 |
58 | In this track we focused on some example configurations that can cover specific use cases. You can find more example configurations of how to configure ADC using Terraform on the NetScaler ADC provider's [Github repository](https://github.com/citrix/terraform-provider-citrixadc/tree/master/examples).
59 |
60 | You can experiment with them and combine them to achieve more complex configurations for advanced use cases such as Application Protection, High Availability and Global Server Load Balancing and more.
61 |
62 | General documentation for the NetScaler ADC can be found
63 | at the [NetScaler ADC documentation site](https://docs.netscaler.com/en-us/citrix-adc/current-release.html).
64 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/06-netscaler-adc-basic-content-switching-destroy/check-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform check the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the check script"
10 |
11 | set -e
12 |
13 | cd /root/apply-rewrite-configuration
14 |
15 | if terraform show | grep citrixadc_rewritepolicy ; then
16 | fail-message "Terraform state does still contains the rewrite policy resource"
17 | exit 1
18 | fi
19 |
20 |
21 | exit 0
22 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/06-netscaler-adc-basic-content-switching-destroy/cleanup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform cleanup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the cleanup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/06-netscaler-adc-basic-content-switching-destroy/setup-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform setup the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the setup script"
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/06-netscaler-adc-basic-content-switching-destroy/solve-cloud-client:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script runs when the platform solve the challenge.
4 | #
5 | # The platform determines if the script was successful using the exit code of this
6 | # script. If the exit code is not 0, the script fails.
7 | #
8 |
9 | echo "This is the solve script"
10 | set -e
11 | cd /root/apply-rewrite-configuration
12 | terraform destroy -var-file example.tfvars --auto-approve
13 |
14 | exit 0
15 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/config.yml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | containers:
3 | - name: cloud-client
4 | image: gcr.io/instruqt/cloud-client
5 | shell: /bin/bash
6 | ports:
7 | - 80
8 | gcp_projects:
9 | - name: gcpproject
10 | services:
11 | - compute.googleapis.com
12 | roles: []
13 |
--------------------------------------------------------------------------------
/labs/netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform/track.yml:
--------------------------------------------------------------------------------
1 | slug: netscaler-adc-basic-rewrite-responder-policies-configuration-using-terraform
2 | id: zlo9vicbxugi
3 | title: NetScaler ADC Basic Rewrite / Responder Policies Configuration using Terraform
4 | teaser: Learn how to leverage basic Rewrite / Responder Policies for manipulating
5 | Requests and Responses.
6 | description: |-
7 | ## Learn how to leverage basic Rewrite / Responder Policies for manipulating Requests and Responses.
8 |
9 | On this Track we will leverage infrastructure-as-code templates to demonstrate:
10 |
11 | - How to create rewrite / responder policies.
12 | - What is the difference between the two?
13 | - How to bind a policy on a content switching server.
14 | - How to manipulate an incoming request based on different criteria.
15 | - How to redirect a request based on different criteria.
16 |
17 | The lab will provision for you a NetScaler ADC and a simple echo server. Then it will guide you on using Terraform to apply your configuration. Echo server is a simple application that echoes back the request it receives. We will leverage the echo server to showcase how our policies are applied based on the information contained in the request and how we can manipulate request / response.
18 | icon: https://storage.googleapis.com/instruqt-frontend/img/tracks/default.png
19 | tags:
20 | - web-apps
21 | - automation
22 | - citrix
23 | - adc
24 | - netscaler
25 | - terraform
26 | - load-balancer
27 | owner: netscaler
28 | developers:
29 | - kkaltsas@tibco.com
30 | - rohit.myali@gmail.com
31 | - sumanth.lingappa@citrix.com
32 | - chris.chau@cloud.com
33 | timelimit: 3600
34 | lab_config:
35 | overlay: false
36 | width: 33
37 | position: right
38 | checksum: "13021517827523298247"
39 |
--------------------------------------------------------------------------------