├── tests ├── conftest.py ├── test_cli.py ├── test_cloud_build.py └── test_setup_command.py ├── nuke_from_orbit ├── __init__.py ├── utils │ ├── __init__.py │ ├── templates │ │ ├── config-default.yaml │ │ ├── loadtest-cert.yaml │ │ ├── prometheus-config.yaml │ │ ├── cloudwatch-config.yaml │ │ ├── loadtest-ingress.yaml │ │ ├── cloudwatch-controller.yaml │ │ ├── locust-worker-controller.yaml │ │ ├── grafana-controller.yaml │ │ ├── prometheus-controller.yaml │ │ ├── locust-controller.yaml │ │ └── grafana-config.yaml │ ├── rendered │ │ └── config_args.yaml │ ├── cloud_build.py │ ├── kubernetes_deploy.py │ ├── gke_cluster.py │ └── nuke_utils.py ├── commands │ ├── __init__.py │ ├── update_config_commands.py │ ├── update_test_commands.py │ ├── teardown_commands.py │ └── setup_commands.py ├── docker-image │ ├── realbrowserlocusts │ │ ├── realbrowserlocusts │ │ │ ├── __init__.py │ │ │ ├── core.py │ │ │ └── locusts.py │ │ ├── setup.py │ │ └── README.md │ ├── locust-tasks │ │ ├── run.sh │ │ └── tasks.py │ └── Dockerfile └── cli.py ├── credentials └── README.md ├── .gitignore ├── configs └── README.md ├── pyproject.toml ├── .github └── workflows │ └── nfo_build.yml ├── CONTRIBUTING.md ├── locust_test_scripts ├── default_dashboard_loadtest.py ├── scenario2.py └── multiple_content.py ├── CODE_OF_CONDUCT.md └── README.md /tests/conftest.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /nuke_from_orbit/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /nuke_from_orbit/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /nuke_from_orbit/commands/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /credentials/README.md: -------------------------------------------------------------------------------- 1 | ## Credentials 2 | 3 | Place your service account json files in this directory and refer to them in your config. Reference the file name only. 4 | -------------------------------------------------------------------------------- /nuke_from_orbit/utils/templates/config-default.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cloud.google.com/v1 3 | kind: BackendConfig 4 | metadata: 5 | name: config-default 6 | spec: 7 | iap: 8 | enabled: true 9 | oauthclientCredentials: 10 | secretName: iap-secret 11 | -------------------------------------------------------------------------------- /nuke_from_orbit/utils/templates/loadtest-cert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.gke.io/v1beta2 3 | kind: ManagedCertificate 4 | metadata: 5 | name: loadtest-cert 6 | spec: 7 | domains: 8 | - locust.{{loadtest_dns_domain}} 9 | {% if external -%} 10 | - locust-metrics.{{loadtest_dns_domain}} 11 | {% else -%} 12 | - prometheus.{{loadtest_dns_domain}} 13 | - grafana.{{loadtest_dns_domain}} 14 | {% endif %} 15 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | docker-compose.yml 2 | .env 3 | configs/*.yaml 4 | credentials/*.json 5 | locust_test_scripts/default_dashboard_loadtest.py 6 | nuke_from_orbit/utils/rendered/grafana*.yaml 7 | nuke_from_orbit/utils/rendered/loadtest*.yaml 8 | nuke_from_orbit/utils/rendered/locust*.yaml 9 | nuke_from_orbit/utils/rendered/prometheus*.yaml 10 | nuke_from_orbit/utils/rendered/config-default.yaml 11 | nuke_from_orbit/utils/rendered/kubeconfig.yaml 12 | .coverage 13 | -------------------------------------------------------------------------------- /nuke_from_orbit/docker-image/realbrowserlocusts/realbrowserlocusts/__init__.py: -------------------------------------------------------------------------------- 1 | # pylint:disable=undefined-all-variable 2 | """ Expose RealBrowserLocust subclasses at package level """ 3 | from realbrowserlocusts.locusts import FirefoxLocust, PhantomJSLocust, \ 4 | ChromeLocust, HeadlessChromeLocust 5 | 6 | __all__ = [ 7 | 'FirefoxLocust', 8 | 'PhantomJSLocust', 9 | 'ChromeLocust', 10 | 'HeadlessChromeLocust' 11 | ] 12 | 13 | __version__ = "0.2" 14 | -------------------------------------------------------------------------------- /nuke_from_orbit/utils/rendered/config_args.yaml: -------------------------------------------------------------------------------- 1 | required_args: 2 | - gcp_project_id 3 | - loadtest_name 4 | - loadtest_step_load 5 | - loadtest_worker_count 6 | - loadtest_script_name 7 | - gcp_zone 8 | - gcp_cluster_node_count 9 | - gcp_cluster_machine_type 10 | - gcp_service_account_file 11 | - looker_host 12 | required_external_args: 13 | - gcp_oauth_client_id 14 | - gcp_oauth_client_secret 15 | - loadtest_dns_domain 16 | optional_args: 17 | - looker_user 18 | - looker_pass 19 | - looker_api_client_id 20 | - looker_api_client_secret 21 | -------------------------------------------------------------------------------- /nuke_from_orbit/docker-image/locust-tasks/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | LOCUST="/usr/local/bin/locust" 4 | LOCUS_OPTS="-f /locust-tasks/tasks.py --host=$TARGET_HOST" 5 | LOCUST_MODE=${LOCUST_MODE:-standalone} 6 | 7 | if [[ "$LOCUST_MODE" = "master" ]]; then 8 | LOCUS_OPTS="$LOCUS_OPTS --master" 9 | if [[ "$LOCUST_STEP" = true ]]; then 10 | LOCUS_OPTS="$LOCUS_OPTS --step-load" 11 | fi 12 | elif [[ "$LOCUST_MODE" = "worker" ]]; then 13 | LOCUS_OPTS="$LOCUS_OPTS --slave --master-host=$LOCUST_MASTER_HOST" 14 | fi 15 | 16 | echo "$LOCUST $LOCUS_OPTS" 17 | 18 | $LOCUST $LOCUS_OPTS 19 | -------------------------------------------------------------------------------- /nuke_from_orbit/docker-image/Dockerfile: -------------------------------------------------------------------------------- 1 | # Start with a base Python 3.7 image that has chromedriver installed 2 | FROM joyzoursky/python-chromedriver:3.7 3 | 4 | # Add the external tasks directory into /tasks 5 | ADD locust-tasks /locust-tasks 6 | ADD realbrowserlocusts /realbrowserlocusts 7 | 8 | # Install the modified realbrowserlocusts package 9 | RUN pip install realbrowserlocusts/. 10 | 11 | # Expose the required Locust ports 12 | EXPOSE 5557 5558 8089 13 | 14 | # Set script to be executable 15 | RUN chmod 755 /locust-tasks/run.sh 16 | 17 | # Start Locust using LOCUS_OPTS environment variable 18 | ENTRYPOINT ["/locust-tasks/run.sh"] 19 | -------------------------------------------------------------------------------- /nuke_from_orbit/docker-image/realbrowserlocusts/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | NAME = "realbrowserlocusts" 4 | VERSION = "0.4.1" 5 | REQUIRES = ["greenlet==0.4.16", "locustio==0.14.6", "selenium==3.141.0"] 6 | 7 | setup( 8 | name=NAME, 9 | packages=["realbrowserlocusts"], 10 | version=VERSION, 11 | description="Minimal set of real browser locusts to be used in conjuntion with locust.io", 12 | install_requires=REQUIRES, 13 | author="Nick Bocuart", 14 | author_email="nboucart@gmail.com", 15 | url="https://github.com/nickboucart/realbrowserlocusts", 16 | download_url="https://github.com/nickboucart/realbrowserlocusts/tarball/0.3", 17 | keywords=["testing", "locust"], 18 | classifiers=[], 19 | ) 20 | -------------------------------------------------------------------------------- /nuke_from_orbit/utils/templates/prometheus-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: prom-conf 6 | labels: 7 | name: prom-conf 8 | data: 9 | prometheus.yml: |- 10 | global: 11 | scrape_interval: 5s 12 | evaluation_interval: 5s 13 | scrape_configs: 14 | - job_name: 'locust' 15 | scrape_interval: 2s 16 | static_configs: 17 | - targets: ['le-pod:80'] 18 | - job_name: 'looker' 19 | scrape_interval: 2s 20 | static_configs: 21 | - targets: [{% for l in looker_nodes %}{{ l }}:9810{{ ", " if not loop.last }}{% endfor %}] 22 | - job_name: 'cloudwatch' 23 | scrape_interval: 2s 24 | static_configs: 25 | - targets: ['cloudwatch-pod:9106'] 26 | -------------------------------------------------------------------------------- /configs/README.md: -------------------------------------------------------------------------------- 1 | ## Configs 2 | 3 | Place your config yaml files here! You'll refer to them via the `--config-file` argument in your commands. 4 | 5 | An example config yaml may look something like this: 6 | 7 | ``` 8 | gke_cluster: 9 | gcp_project_id: my-gcp-project 10 | gcp_zone: us-central1-c 11 | gcp_cluster_node_count: 3 12 | gcp_cluster_machine_type: c2-standard-8 13 | gcp_service_account_file: my-service-account-file.json 14 | loadtester: 15 | loadtest_name: demo-loadtest 16 | loadtest_step_load: "true" 17 | loadtest_worker_count: 20 18 | loadtest_script_name: default_dashboard_loadtest.py 19 | looker_credentials: 20 | looker_host: https://looker.company.com 21 | looker_user: me@company.com 22 | looker_pass: abc123fakepassword 23 | external: 24 | gcp_oauth_client_id: abc123.apps.googleusercontent.com 25 | gcp_oauth_client_secret: 789xzyfakeclient 26 | loadtest_dns_domain: py-loadtest.colinpistell.com 27 | ``` 28 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "nuke_from_orbit" 3 | version = "0.11.0" 4 | description = "A distributed load testing tool for running api and browser based tests on Looker" 5 | authors = ["Colin Pistell "] 6 | 7 | [tool.poetry.dependencies] 8 | python = "^3.8" 9 | click = "^7.1.2" 10 | selenium = "*" 11 | realbrowserlocusts = "*" 12 | locustio = "==0.14.6" 13 | looker-sdk = "*" 14 | jinja2 = "*" 15 | backoff-utils = "*" 16 | kubernetes = "*" 17 | google-cloud-container = "^2.2.0" 18 | google-cloud-build = "^3.0.0" 19 | google-cloud-storage = "^1.33.0" 20 | google-api-python-client = "^1.12.8" 21 | 22 | [tool.poetry.dev-dependencies] 23 | pytest = "^5.2" 24 | ipython = "*" 25 | flake8 = "*" 26 | pytest-mock = "^3.3.1" 27 | pytest-cov = "^2.10.1" 28 | 29 | [tool.poetry.scripts] 30 | nfo = "nuke_from_orbit.cli:nfo" 31 | 32 | [build-system] 33 | requires = ["poetry-core>=1.0.0"] 34 | build-backend = "poetry.core.masonry.api" 35 | -------------------------------------------------------------------------------- /.github/workflows/nfo_build.yml: -------------------------------------------------------------------------------- 1 | name: nuke_from_orbit 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: 9 | - main 10 | 11 | jobs: 12 | build_38: 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - uses: actions/checkout@v1 17 | - name: Set up Python 3.8 18 | uses: actions/setup-python@v1 19 | with: 20 | python-version: 3.8 21 | - name: Install dependencies 22 | uses: knowsuchagency/poetry-install@v1 23 | env: 24 | POETRY_VIRTUALENVS_CREATE: false 25 | - name: Lint with flake8 26 | run: | 27 | # stop the build if there are Python syntax errors or undefined names 28 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics 29 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 30 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics 31 | - name: Test with pytest 32 | run: | 33 | pytest 34 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # How to Contribute 2 | 3 | We'd love to accept your patches and contributions to this project. There are 4 | just a few small guidelines you need to follow. 5 | 6 | ## Contributor License Agreement 7 | 8 | Contributions to this project must be accompanied by a Contributor License 9 | Agreement (CLA). You (or your employer) retain the copyright to your 10 | contribution; this simply gives us permission to use and redistribute your 11 | contributions as part of the project. Head over to 12 | to see your current agreements on file or 13 | to sign a new one. 14 | 15 | You generally only need to submit a CLA once, so if you've already submitted one 16 | (even if it was for a different project), you probably don't need to do it 17 | again. 18 | 19 | ## Code Reviews 20 | 21 | All submissions, including submissions by project members, require review. We 22 | use GitHub pull requests for this purpose. Consult 23 | [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more 24 | information on using pull requests. 25 | 26 | ## Community Guidelines 27 | 28 | This project follows 29 | [Google's Open Source Community Guidelines](https://opensource.google/conduct/). 30 | -------------------------------------------------------------------------------- /nuke_from_orbit/commands/update_config_commands.py: -------------------------------------------------------------------------------- 1 | import os 2 | from nuke_from_orbit.utils import nuke_utils 3 | from pathlib import Path 4 | 5 | 6 | def main(**kwargs): 7 | root_dir = Path(__file__).parent.parent.parent 8 | config_dir = root_dir.joinpath("configs") 9 | sa_dir = root_dir.joinpath("credentials") 10 | 11 | config_file = config_dir.joinpath(kwargs["config_file"]) 12 | 13 | # get the user config 14 | user_config = nuke_utils.set_variables(config_file) 15 | 16 | # set gcp service account environment variable 17 | service_account_file = sa_dir.joinpath(user_config["gcp_service_account_file"]).resolve() 18 | os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = str(service_account_file) 19 | 20 | # set kubernetes context 21 | nuke_utils.set_kubernetes_context(user_config) 22 | 23 | # parse and render kubernetes template files 24 | file_list = nuke_utils.collect_kube_yaml_templates() 25 | nuke_utils.render_kubernetes_templates(user_config, file_list) 26 | 27 | # deploy secrets 28 | nuke_utils.deploy_looker_secret(user_config) 29 | 30 | # deploy locust 31 | nuke_utils.deploy_locust(cycle=True) 32 | 33 | print(f"{nuke_utils.BColors.OKGREEN}Update complete!{nuke_utils.BColors.ENDC}") 34 | -------------------------------------------------------------------------------- /nuke_from_orbit/utils/templates/cloudwatch-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: cloudwatch-conf 6 | labels: 7 | name: cloudwatch-conf 8 | data: 9 | config.yml: |- 10 | region: {{aws_region}} 11 | metrics: 12 | - aws_namespace: AWS/ELB 13 | aws_metric_name: RequestCount 14 | aws_dimensions: [AvailabilityZone, LoadBalancerName] 15 | aws_dimension_select: 16 | LoadBalancerName: [{{looker_lb_name}}] 17 | aws_statistics: [Sum] 18 | - aws_namespace: AWS/RDS 19 | aws_metric_name: CPUUtilization 20 | aws_dimensions: [DBInstanceIdentifier] 21 | aws_dimension_select: 22 | DBInstanceIdentifier: [{{looker_db_identifier[0]}}] 23 | aws_statistics: [Sum] 24 | - aws_namespace: AWS/RDS 25 | aws_metric_name: ReadIOPS 26 | aws_dimensions: [DBInstanceIdentifier] 27 | aws_dimension_select: 28 | DBInstanceIdentifier: [{{looker_db_identifier[0]}}] 29 | aws_statistics: [Sum] 30 | - aws_namespace: AWS/RDS 31 | aws_metric_name: WriteIOPS 32 | aws_dimensions: [DBInstanceIdentifier] 33 | aws_dimension_select: 34 | DBInstanceIdentifier: [{{looker_db_identifier[0]}}] 35 | aws_statistics: [Sum] 36 | -------------------------------------------------------------------------------- /nuke_from_orbit/cli.py: -------------------------------------------------------------------------------- 1 | import click 2 | from nuke_from_orbit.commands import setup_commands, teardown_commands 3 | from nuke_from_orbit.commands import update_config_commands, update_test_commands 4 | 5 | 6 | @click.group() 7 | @click.version_option() 8 | def nfo(): 9 | pass 10 | 11 | 12 | @nfo.command() 13 | @click.option("--config-file", help="Which config file to use for the setup", required=True) 14 | @click.option("--external", is_flag=True, help="Should external ingress be set up") 15 | @click.option("--persistence/--no-persistence", default=True, help="Should persistent disk setup be skipped?") 16 | def setup(**kwargs): 17 | setup_commands.main(**kwargs) 18 | 19 | 20 | @nfo.command() 21 | @click.option("--config-file", help="Which config file to use for the setup", required=True) 22 | @click.option("--all", is_flag=True, help="Should teardown include persistent disk") 23 | def teardown(**kwargs): 24 | teardown_commands.main(**kwargs) 25 | 26 | 27 | @nfo.group() 28 | def update(): 29 | pass 30 | 31 | 32 | @update.command() 33 | @click.option("--config-file", help="Which config file to use for the setup", required=True) 34 | def config(**kwargs): 35 | update_config_commands.main(**kwargs) 36 | 37 | 38 | @update.command() 39 | @click.option("-t", "--tag", required=True, help="How to tag the container version") 40 | @click.option("--config-file", help="Which config file to use for the setup", required=True) 41 | def test(**kwargs): 42 | update_test_commands.main(**kwargs) 43 | -------------------------------------------------------------------------------- /nuke_from_orbit/commands/update_test_commands.py: -------------------------------------------------------------------------------- 1 | import os 2 | from nuke_from_orbit.utils import nuke_utils 3 | from pathlib import Path 4 | 5 | 6 | def main(**kwargs): 7 | root_dir = Path(__file__).parent.parent.parent 8 | config_dir = root_dir.joinpath("configs") 9 | sa_dir = root_dir.joinpath("credentials") 10 | 11 | config_file = config_dir.joinpath(kwargs["config_file"]) 12 | tag = kwargs["tag"] 13 | 14 | # get the user config 15 | user_config = nuke_utils.set_variables(config_file, tag) 16 | 17 | # set gcp service account environment variable 18 | service_account_file = sa_dir.joinpath(user_config["gcp_service_account_file"]).resolve() 19 | os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = str(service_account_file) 20 | 21 | # set kubernetes context 22 | nuke_utils.set_kubernetes_context(user_config) 23 | 24 | # compare the provided tag with the existing tag 25 | nuke_utils.compare_tags(tag) 26 | 27 | # rebuild the test image with the new tag 28 | nuke_utils.deploy_test_container_image(user_config) 29 | 30 | # parse and render kubernetes template files 31 | file_list = nuke_utils.collect_kube_yaml_templates() 32 | nuke_utils.render_kubernetes_templates(user_config, file_list) 33 | 34 | # deploy secrets 35 | nuke_utils.deploy_looker_secret(user_config) 36 | 37 | # deploy locust 38 | nuke_utils.deploy_locust(cycle=True) 39 | 40 | print(f"{nuke_utils.BColors.OKGREEN}Update complete!{nuke_utils.BColors.ENDC}") 41 | -------------------------------------------------------------------------------- /nuke_from_orbit/commands/teardown_commands.py: -------------------------------------------------------------------------------- 1 | import os 2 | import concurrent.futures 3 | from nuke_from_orbit.utils import nuke_utils 4 | from pathlib import Path 5 | 6 | 7 | def main(**kwargs): 8 | root_dir = Path(__file__).parent.parent.parent 9 | config_dir = root_dir.joinpath("configs") 10 | sa_dir = root_dir.joinpath("credentials") 11 | 12 | teardown_all = kwargs["all"] 13 | 14 | config_file = config_dir.joinpath(kwargs["config_file"]) 15 | 16 | # get the user credentials 17 | user_config = nuke_utils.set_variables(config_file) 18 | 19 | # set gcp service account environment variable 20 | service_account_file = sa_dir.joinpath(user_config["gcp_service_account_file"]).resolve() 21 | os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = str(service_account_file) 22 | 23 | # determine if external has been triggered by testing for an ip address 24 | ip = nuke_utils.get_ip_address(user_config) 25 | 26 | # multithread the teardown 27 | with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor: 28 | tasks = [] 29 | tasks.append(executor.submit(nuke_utils.destroy_gke, user_config)) 30 | if ip: 31 | tasks.append(executor.submit(nuke_utils.destroy_ip_address, user_config)) 32 | if teardown_all: 33 | tasks.append(executor.submit(nuke_utils.destroy_persistent_disk, user_config)) 34 | 35 | for future in concurrent.futures.as_completed(tasks): 36 | future.result() 37 | 38 | print(f"{nuke_utils.BColors.OKGREEN}Teardown complete!{nuke_utils.BColors.ENDC}") 39 | -------------------------------------------------------------------------------- /nuke_from_orbit/utils/templates/loadtest-ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1 3 | kind: Ingress 4 | metadata: 5 | name: loadtest-ingress 6 | annotations: 7 | kubernetes.io/ingress.global-static-ip-name: {{loadtest_name}} 8 | networking.gke.io/managed-certificates: loadtest-cert 9 | kubernetes.io/ingress.class: "gce" 10 | spec: 11 | rules: 12 | - host: locust.{{loadtest_dns_domain}} 13 | http: 14 | paths: 15 | - path: /* 16 | pathType: ImplementationSpecific 17 | backend: 18 | service: 19 | name: lm-pod 20 | port: 21 | number: 80 22 | {% if external -%} 23 | - host: locust-metrics.{{loadtest_dns_domain}} 24 | http: 25 | paths: 26 | - path: /* 27 | pathType: ImplementationSpecific 28 | backend: 29 | service: 30 | name: lm-pod 31 | port: 32 | number: 80 33 | {% else -%} 34 | - host: prometheus.{{loadtest_dns_domain}} 35 | http: 36 | paths: 37 | - path: /* 38 | pathType: ImplementationSpecific 39 | backend: 40 | service: 41 | name: prom-pod 42 | port: 43 | number: 80 44 | - host: grafana.{{loadtest_dns_domain}} 45 | http: 46 | paths: 47 | - path: /* 48 | pathType: ImplementationSpecific 49 | backend: 50 | service: 51 | name: grafana 52 | port: 53 | number: 80 54 | {% endif %} 55 | -------------------------------------------------------------------------------- /nuke_from_orbit/utils/templates/cloudwatch-controller.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Service 3 | apiVersion: v1 4 | metadata: 5 | annotations: 6 | name: cloudwatch-pod 7 | labels: 8 | app: cloudwatch-pod 9 | spec: 10 | ports: 11 | - port: 9106 12 | targetPort: cw-web 13 | protocol: TCP 14 | name: cw-web 15 | selector: 16 | app: cloudwatch-pod 17 | type: ClusterIP 18 | --- 19 | apiVersion: apps/v1 20 | kind: Deployment 21 | metadata: 22 | name: cloudwatch-pod 23 | labels: 24 | name: cloudwatch-pod 25 | spec: 26 | replicas: 1 27 | selector: 28 | matchLabels: 29 | app: cloudwatch-pod 30 | template: 31 | metadata: 32 | labels: 33 | app: cloudwatch-pod 34 | spec: 35 | containers: 36 | - name: cloudwatch-pod 37 | image: prom/cloudwatch-exporter:cloudwatch_exporter-0.8.0 38 | env: 39 | - name: AWS_ACCESS_KEY_ID 40 | valueFrom: 41 | secretKeyRef: 42 | name: aws-creds 43 | key: aws-access-key 44 | - name: AWS_SECRET_ACCESS_KEY 45 | valueFrom: 46 | secretKeyRef: 47 | name: aws-creds 48 | key: aws-secret-key 49 | - name: AWS_SESSION_TOKEN 50 | valueFrom: 51 | secretKeyRef: 52 | name: aws-creds 53 | key: aws-session-token 54 | optional: true 55 | ports: 56 | - name: cw-web 57 | containerPort: 9106 58 | protocol: TCP 59 | volumeMounts: 60 | - name: cloudwatch-config-volume 61 | mountPath: /config/ 62 | volumes: 63 | - name: cloudwatch-config-volume 64 | configMap: 65 | defaultMode: 420 66 | name: cloudwatch-conf 67 | -------------------------------------------------------------------------------- /nuke_from_orbit/utils/templates/locust-worker-controller.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: "Deployment" 4 | metadata: 5 | name: lw-pod 6 | labels: 7 | name: lw-pod 8 | spec: 9 | replicas: {{loadtest_worker_count}} 10 | selector: 11 | matchLabels: 12 | app: lw-pod 13 | template: 14 | metadata: 15 | labels: 16 | app: lw-pod 17 | spec: 18 | containers: 19 | - name: lw-prod 20 | image: gcr.io/{{gcp_project_id}}/{{loadtest_name}}:{{image_tag}} 21 | resources: 22 | requests: 23 | memory: "2Gi" 24 | cpu: "1" 25 | limits: 26 | memory: "4Gi" 27 | cpu: "2" 28 | env: 29 | - name: LOCUST_MODE 30 | value: worker 31 | - name: LOCUST_MASTER_HOST 32 | value: lm-pod 33 | - name: TARGET_HOST 34 | value: dashboard 35 | - name: LOCUST_STEP 36 | value: "{{loadtest_step_load}}" 37 | - name: HOST 38 | valueFrom: 39 | secretKeyRef: 40 | name: website-host 41 | key: host 42 | optional: true 43 | - name: USERNAME 44 | valueFrom: 45 | secretKeyRef: 46 | name: website-creds 47 | key: username 48 | optional: true 49 | - name: PASS 50 | valueFrom: 51 | secretKeyRef: 52 | name: website-creds 53 | key: password 54 | optional: true 55 | - name: CLIENT_ID 56 | valueFrom: 57 | secretKeyRef: 58 | name: api-creds 59 | key: client_id 60 | optional: true 61 | - name: CLIENT_SECRET 62 | valueFrom: 63 | secretKeyRef: 64 | name: api-creds 65 | key: client_secret 66 | optional: true 67 | -------------------------------------------------------------------------------- /nuke_from_orbit/utils/templates/grafana-controller.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Service 3 | apiVersion: v1 4 | metadata: 5 | annotations: 6 | beta.cloud.google.com/backend-config: '{"default": "config-default"}' 7 | name: grafana 8 | labels: 9 | app: grafana 10 | spec: 11 | ports: 12 | - port: 80 13 | targetPort: loc-grafana-web 14 | protocol: TCP 15 | name: loc-grafana-web 16 | selector: 17 | app: grafana 18 | type: NodePort 19 | --- 20 | apiVersion: apps/v1 21 | kind: "Deployment" 22 | metadata: 23 | labels: 24 | app: grafana 25 | name: grafana 26 | spec: 27 | replicas: 1 28 | selector: 29 | matchLabels: 30 | app: grafana 31 | template: 32 | metadata: 33 | labels: 34 | app: grafana 35 | spec: 36 | containers: 37 | - image: grafana/grafana:7.1.0 38 | name: grafana 39 | ports: 40 | - containerPort: 3000 41 | name: loc-grafana-web 42 | protocol: TCP 43 | readinessProbe: 44 | httpGet: 45 | path: / 46 | port: loc-grafana-web 47 | initialDelaySeconds: 5 48 | periodSeconds: 5 49 | volumeMounts: 50 | - name: grafana-config-provision 51 | mountPath: /etc/grafana 52 | - name: grafana-datasource-provision 53 | mountPath: /etc/grafana/provisioning/datasources 54 | - name: grafana-dashboard-provision 55 | mountPath: /etc/grafana/provisioning/dashboards 56 | - name: grafana-dashboard-json 57 | mountPath: /var/lib/grafana/dashboards 58 | volumes: 59 | - name: grafana-config-provision 60 | configMap: 61 | defaultMode: 420 62 | name: grafana-config 63 | 64 | - name: grafana-datasource-provision 65 | configMap: 66 | defaultMode: 420 67 | name: grafana-datasource-conf 68 | 69 | - name: grafana-dashboard-provision 70 | configMap: 71 | defaultMode: 420 72 | name: grafana-dashboard-prov-conf 73 | 74 | - name: grafana-dashboard-json 75 | configMap: 76 | defaultMode: 420 77 | name: grafana-dashboard-json-conf 78 | -------------------------------------------------------------------------------- /nuke_from_orbit/docker-image/locust-tasks/tasks.py: -------------------------------------------------------------------------------- 1 | import os 2 | from realbrowserlocusts import HeadlessChromeLocust 3 | from selenium.webdriver.common.by import By 4 | from selenium.webdriver.support import expected_conditions as EC 5 | from selenium.common.exceptions import TimeoutException 6 | from locust import TaskSet, task, between 7 | 8 | 9 | SITE = "https://jcp-dev.lookersandbox.com" 10 | DASH_ID = 1 11 | 12 | 13 | class LocustUserBehavior(TaskSet): 14 | 15 | def on_start(self): 16 | self.login() 17 | 18 | def on_stop(self): 19 | self.logout() 20 | 21 | def login(self): 22 | self.client.get(SITE + "/login") 23 | 24 | user_entry = os.getenv("USERNAME") 25 | pass_entry = os.getenv("PASS") 26 | username = self.client.find_element_by_id("login-email") 27 | pw = self.client.find_element_by_id("login-password") 28 | box = self.client.find_element_by_class_name("checkbox") 29 | username.clear() 30 | username.send_keys(user_entry.strip()) 31 | pw.clear() 32 | pw.send_keys(pass_entry.strip()) 33 | box.click() 34 | self.client.find_element_by_id("login-submit").click() 35 | 36 | def logout(self): 37 | print("stopping session") 38 | self.client.close() 39 | 40 | def open_dashboard(self): 41 | script = """ 42 | window.awaitPerformanceObservation("rendered").then(function() { 43 | var dash_render = document.createElement("div"); 44 | dash_render.id = "dash_listener"; 45 | document.body.appendChild(dash_render); 46 | });""" 47 | 48 | try: 49 | self.client.get(f"{SITE}/embed/dashboards/{str(DASH_ID)}") 50 | self.client.execute_script(script) 51 | self.client.wait.until( 52 | EC.presence_of_element_located( 53 | (By.ID, "dash_listener") 54 | ) 55 | ) 56 | except TimeoutException: 57 | print("hit timeout") 58 | 59 | @task(1) 60 | def simple_dashboard_loading(self): 61 | self.client.timed_event_for_locust( 62 | "Load", "dashboard", 63 | self.open_dashboard 64 | ) 65 | 66 | 67 | class LocustUser(HeadlessChromeLocust): 68 | 69 | host = "dashboard load test" 70 | timeout = 30 # in seconds in waitUntil thingies 71 | wait_time = between(2, 5) 72 | screen_width = 1200 73 | screen_height = 600 74 | task_set = LocustUserBehavior 75 | -------------------------------------------------------------------------------- /locust_test_scripts/default_dashboard_loadtest.py: -------------------------------------------------------------------------------- 1 | import os 2 | from realbrowserlocusts import HeadlessChromeLocust 3 | from selenium.webdriver.common.by import By 4 | from selenium.webdriver.support import expected_conditions as EC 5 | from selenium.common.exceptions import TimeoutException 6 | from locust import TaskSet, task, between 7 | 8 | 9 | SITE = os.getenv("HOST") 10 | DASH_ID = 1 # Change this! 11 | 12 | 13 | class LocustUserBehavior(TaskSet): 14 | 15 | def on_start(self): 16 | self.login() 17 | 18 | def on_stop(self): 19 | self.logout() 20 | 21 | def login(self): 22 | self.client.get(SITE + "/login") 23 | 24 | user_entry = os.getenv("USERNAME") 25 | pass_entry = os.getenv("PASS") 26 | username = self.client.find_element_by_id("login-email") 27 | pw = self.client.find_element_by_id("login-password") 28 | box = self.client.find_element_by_class_name("checkbox") 29 | username.clear() 30 | username.send_keys(user_entry.strip()) 31 | pw.clear() 32 | pw.send_keys(pass_entry.strip()) 33 | box.click() 34 | self.client.find_element_by_id("login-submit").click() 35 | 36 | def logout(self): 37 | print("stopping session") 38 | self.client.close() 39 | 40 | def open_dashboard(self): 41 | script = """ 42 | window.awaitPerformanceObservation("rendered").then(function() { 43 | var dash_render = document.createElement("div"); 44 | dash_render.id = "dash_listener"; 45 | document.body.appendChild(dash_render); 46 | });""" 47 | 48 | try: 49 | self.client.get(f"{SITE}/embed/dashboards/{str(DASH_ID)}") 50 | self.client.execute_script(script) 51 | self.client.wait.until( 52 | EC.presence_of_element_located( 53 | (By.ID, "dash_listener") 54 | ) 55 | ) 56 | except TimeoutException: 57 | print("hit timeout") 58 | 59 | @task(1) 60 | def simple_dashboard_loading(self): 61 | self.client.timed_event_for_locust( 62 | "Load", "dashboard-update", 63 | self.open_dashboard 64 | ) 65 | 66 | 67 | class LocustUser(HeadlessChromeLocust): 68 | 69 | host = "dashboard load test" 70 | timeout = 30 # in seconds in waitUntil thingies 71 | wait_time = between(2, 5) 72 | screen_width = 1200 73 | screen_height = 600 74 | task_set = LocustUserBehavior 75 | -------------------------------------------------------------------------------- /nuke_from_orbit/utils/templates/prometheus-controller.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolume 3 | apiVersion: v1 4 | metadata: 5 | name: loadtest-prom-persist 6 | spec: 7 | storageClassName: "nfo-persistent-storage" 8 | capacity: 9 | storage: 50G 10 | accessModes: 11 | - ReadWriteOnce 12 | claimRef: 13 | namespace: default 14 | name: loadtest-prom-persist 15 | gcePersistentDisk: 16 | pdName: {{loadtest_name}} 17 | fsType: ext4 18 | --- 19 | kind: PersistentVolumeClaim 20 | apiVersion: v1 21 | metadata: 22 | name: loadtest-prom-persist 23 | spec: 24 | storageClassName: "nfo-persistent-storage" 25 | accessModes: 26 | - ReadWriteOnce 27 | resources: 28 | requests: 29 | storage: 50G 30 | --- 31 | kind: Service 32 | apiVersion: v1 33 | metadata: 34 | annotations: 35 | beta.cloud.google.com/backend-config: '{"default": "config-default"}' 36 | name: prom-pod 37 | labels: 38 | app: prom-pod 39 | spec: 40 | ports: 41 | - port: 80 42 | targetPort: loc-prom-web 43 | protocol: TCP 44 | name: loc-prom-web 45 | selector: 46 | app: prom-pod 47 | type: NodePort 48 | --- 49 | apiVersion: apps/v1 50 | kind: Deployment 51 | metadata: 52 | name: prom-pod 53 | spec: 54 | replicas: 1 55 | selector: 56 | matchLabels: 57 | app: prom-pod 58 | template: 59 | metadata: 60 | labels: 61 | app: prom-pod 62 | spec: 63 | containers: 64 | - name: prom-pod 65 | image: prom/prometheus:v2.19.1 66 | securityContext: 67 | allowPrivilegeEscalation: false 68 | runAsUser: 0 69 | args: 70 | - "--config.file=/etc/prometheus/prometheus.yml" 71 | - "--storage.tsdb.path=/prometheus/" 72 | ports: 73 | - name: loc-prom-web 74 | containerPort: 9090 75 | protocol: TCP 76 | readinessProbe: 77 | httpGet: 78 | path: /graph 79 | port: loc-prom-web 80 | initialDelaySeconds: 5 81 | periodSeconds: 5 82 | volumeMounts: 83 | - name: prometheus-config-volume 84 | mountPath: /etc/prometheus/ 85 | - name: prometheus-storage-volume 86 | mountPath: /prometheus/ 87 | volumes: 88 | - name: prometheus-config-volume 89 | configMap: 90 | defaultMode: 420 91 | name: prom-conf 92 | 93 | - name: prometheus-storage-volume 94 | persistentVolumeClaim: 95 | claimName: loadtest-prom-persist 96 | -------------------------------------------------------------------------------- /nuke_from_orbit/docker-image/realbrowserlocusts/README.md: -------------------------------------------------------------------------------- 1 | # Real Browser support for Locust.io load testing 2 | 3 | This is a modified local copy of the realbrowserlocust package available on pip. Some changes had to be made to allow 4 | for Chrome to run in a docker container. The original repo can be found 5 | [here.](https://github.com/nickboucart/realbrowserlocusts) 6 | 7 | The original readme is below: 8 | 9 | This python package provides different Locusts that represent real browsers. This package is a thin wrapper around (parts of) Selenium Webdriver. 10 | 11 | Once installed, simple make a locustfile.py as per usual, but instead of inheriting your locust from HttpLocust, instantiate a FirefoxLocust, ChromeLocust, HeadlessChromeLocust or PhantomJSLocust as you which. 12 | 13 | These locusts expose a self.client object, that is actually a selenium.webdriver, it will understand all the usual methods. The client also exposes a self.client.wait object, that is a selenium's WebDriverWait. A last method that is exposed by the client is the self.client.timed_event_for_locust method, that can be used to group a number of browser actions togehter, and time them in locust. 14 | 15 | An example locust scenario that uses real browser could be: 16 | 17 | ```python 18 | from realbrowserlocusts import FirefoxLocust, ChromeLocust, PhantomJSLocust 19 | from selenium.webdriver.common.by import By 20 | from selenium.webdriver.support import expected_conditions as EC 21 | 22 | 23 | from locust import TaskSet, task 24 | 25 | 26 | class LocustUserBehavior(TaskSet): 27 | 28 | def open_locust_homepage(self): 29 | self.client.get("http://locust.io/") 30 | self.client.wait.until(EC.visibility_of_element_located((By.XPATH, '//a[text()="Documentation"]')), "documentation link is visible") 31 | 32 | def click_through_to_documentation(self): 33 | self.client.find_element_by_xpath('//a[text()="Documentation"]').click() 34 | self.client.wait.until(EC.visibility_of_element_located((By.XPATH, '//h1[text()="Locust Documentation"]')), "documentation is visible") 35 | 36 | @task(1) 37 | def homepage_and_docs(self): 38 | self.client.timed_event_for_locust("Go to", "homepage", self.open_locust_homepage) 39 | self.client.timed_event_for_locust("Click to", "documentation", self.click_through_to_documentation) 40 | 41 | 42 | class LocustUser(FirefoxLocust): 43 | #class LocustUser(ChromeLocust): 44 | #class LocustUser(PhantomJSLocust): 45 | 46 | host = "not really used" 47 | timeout = 30 #in seconds in waitUntil thingies 48 | min_wait = 100 49 | max_wait = 1000 50 | screen_width = 1200 51 | screen_height = 600 52 | task_set = LocustUserBehavior 53 | ``` 54 | 55 | ### Using proxy with Chrome browser 56 | 57 | To use proxy server while testing with Chrome browser, use LOCUST_BROWSER_PROXY environment variable, for example: 58 | 59 | ``` 60 | export LOCUST_BROWSER_PROXY=socks5://localhost:8899 61 | ``` 62 | 63 | It can be helpful especially while tests development. 64 | -------------------------------------------------------------------------------- /nuke_from_orbit/docker-image/realbrowserlocusts/realbrowserlocusts/core.py: -------------------------------------------------------------------------------- 1 | # pylint:disable=too-few-public-methods 2 | """ Core Selenium wrapping functionality """ 3 | import time 4 | from selenium.webdriver.support.ui import WebDriverWait 5 | from locust import events 6 | from locust.exception import StopLocust 7 | 8 | 9 | def wrap_for_locust(request_type, name, func, *args, **kwargs): 10 | """ 11 | Wrap Selenium activity function with Locust's event fail/success 12 | method 13 | 14 | :param request_type: the type of request 15 | :param name: name to be reported to events.request_*.fire 16 | :param func: callable to be timed and logged 17 | :return result: Result of the provided function if doesn't raise exception 18 | """ 19 | try: 20 | start_time = time.time() 21 | result = func(*args, **kwargs) 22 | except Exception as event_exception: 23 | total_time = int((time.time() - start_time) * 1000) 24 | events.request_failure.fire( 25 | request_type=request_type, 26 | name=name, 27 | response_time=total_time, 28 | response_length=0, 29 | exception=event_exception 30 | ) 31 | raise StopLocust() 32 | else: 33 | total_time = int((time.time() - start_time) * 1000) 34 | events.request_success.fire( 35 | request_type=request_type, 36 | name=name, 37 | response_time=total_time, 38 | response_length=0 39 | ) 40 | return result 41 | 42 | 43 | class RealBrowserClient(object): 44 | """ 45 | Web Driver client with Locust functionality 46 | """ 47 | 48 | def __init__(self, driver, wait_time_to_finish, screen_width, 49 | screen_height, set_window=True): 50 | self.driver = driver 51 | if set_window: 52 | self.driver.set_window_size(screen_width, screen_height) 53 | self.wait = WebDriverWait(self.driver, wait_time_to_finish) 54 | 55 | @staticmethod 56 | def timed_event_for_locust(request_type, message, func, *args, **kwargs): 57 | """ 58 | Use this method whenever you have a logical sequence of browser steps 59 | that you would like to time. Group these in a seperate, not @task 60 | method and call them using this method. These will show up in the 61 | locust web interface with timings 62 | 63 | Args: 64 | request_type (str): the type of request 65 | message (str): name to be reported to events.request_*.fire 66 | func (Function): callable to be timed and logged 67 | *args: arguments to be used when calling func 68 | **kwargs: Arbitrary keyword args used for calling func 69 | 70 | Returns: 71 | func(*args, **kwargs) if this function invocation does not raise 72 | an exception 73 | 74 | Raises: 75 | StopLocust: whenever func raises an exception, this exception is 76 | catched, logged to locust as a failure and a StopLocust exception 77 | is raised. 78 | """ 79 | return wrap_for_locust(request_type, message, func, *args, **kwargs) 80 | 81 | def __getattr__(self, attr): 82 | """ 83 | Forward all messages this client doesn't understand to it's webdriver 84 | """ 85 | return getattr(self.driver, attr) 86 | -------------------------------------------------------------------------------- /nuke_from_orbit/commands/setup_commands.py: -------------------------------------------------------------------------------- 1 | import os 2 | import concurrent.futures 3 | from nuke_from_orbit.utils import nuke_utils 4 | from pathlib import Path 5 | 6 | 7 | def main(**kwargs): 8 | root_dir = Path(__file__).parent.parent.parent 9 | config_dir = root_dir.joinpath("configs") 10 | sa_dir = root_dir.joinpath("credentials") 11 | 12 | # set the external boolean 13 | external = kwargs["external"] 14 | 15 | # set the persistence boolean 16 | persistence = kwargs["persistence"] 17 | print(persistence) 18 | 19 | # setting tag to v1 for initial setup 20 | tag = "v1" 21 | 22 | config_file = config_dir.joinpath(kwargs["config_file"]) 23 | 24 | # get the user credentials 25 | user_config = nuke_utils.set_variables(config_file, tag, external) 26 | 27 | # set gcp service account environment variable 28 | service_account_file = sa_dir.joinpath(user_config["gcp_service_account_file"]).resolve() 29 | os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = str(service_account_file) 30 | 31 | # multithread the gke deployment and cloud build for maximum fast 32 | with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor: 33 | tasks = [] 34 | tasks.append(executor.submit(nuke_utils.deploy_gke, user_config)) 35 | tasks.append(executor.submit(nuke_utils.deploy_test_container_image, user_config)) 36 | if external: 37 | tasks.append(executor.submit(nuke_utils.deploy_ip_address, user_config)) 38 | if persistence: 39 | tasks.append(executor.submit(nuke_utils.deploy_persistent_disk, user_config)) 40 | 41 | for future in concurrent.futures.as_completed(tasks): 42 | future.result() 43 | 44 | # fetch the ip address for final output 45 | if external: 46 | ip = nuke_utils.get_ip_address(user_config) 47 | dns = user_config["loadtest_dns_domain"] 48 | ip_message = ( 49 | f"Cluster IP is {ip}.\n" 50 | f"Please create an A Record in your DNS provider for *.{dns} that points to {ip}.\n\n" 51 | ) 52 | 53 | # parse and render kubernetes template files 54 | file_list = nuke_utils.collect_kube_yaml_templates(external) 55 | nuke_utils.render_kubernetes_templates(user_config, file_list) 56 | 57 | # set kubernetes context 58 | nuke_utils.set_kubernetes_context(user_config) 59 | 60 | # deploy secrets 61 | nuke_utils.deploy_looker_secret(user_config) 62 | if external: 63 | nuke_utils.deploy_oauth_secret(user_config) 64 | 65 | # deploy external components if required 66 | if external: 67 | nuke_utils.deploy_external() 68 | 69 | # deploy locust 70 | nuke_utils.deploy_locust() 71 | 72 | # deploy secondary services 73 | nuke_utils.deploy_secondary() 74 | 75 | kubectl_message = ( 76 | "To configure kubectl access please run the following command:\n" 77 | f"export GOOGLE_APPLICATION_CREDENTIALS={str(service_account_file)}\n\n" 78 | ) 79 | port_forward_message = ( 80 | "You can now use `kubectl port-forward` commands. " 81 | "This will allow you to access your load test services directly. Read more here:\n" 82 | "https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster\n\n" 83 | "All services are available on port 80. Find services with `kubectl get svc`. Then forward to a desired port.\n" 84 | "(e.g. `kubectl port-forward service/lm-pod 8089:80` forwards locust master to your localhost port 8089)." 85 | ) 86 | 87 | print(f"{nuke_utils.BColors.OKGREEN}Setup complete!{nuke_utils.BColors.ENDC}") 88 | print(f"{nuke_utils.BColors.OKGREEN}{kubectl_message}{nuke_utils.BColors.ENDC}") 89 | if external: 90 | print(f"{nuke_utils.BColors.OKGREEN}{ip_message}{nuke_utils.BColors.ENDC}") 91 | print(f"{nuke_utils.BColors.OKGREEN}{port_forward_message}{nuke_utils.BColors.ENDC}") 92 | -------------------------------------------------------------------------------- /nuke_from_orbit/docker-image/realbrowserlocusts/realbrowserlocusts/locusts.py: -------------------------------------------------------------------------------- 1 | # pylint:disable=too-few-public-methods 2 | """ Combine Locust with Selenium Web Driver """ 3 | import logging 4 | from os import getenv as os_getenv 5 | from locust import Locust 6 | from locust.exception import LocustError 7 | from selenium import webdriver 8 | from realbrowserlocusts.core import RealBrowserClient 9 | 10 | _LOGGER = logging.getLogger(__name__) 11 | 12 | 13 | class RealBrowserLocust(Locust): 14 | """ 15 | This is the abstract Locust class which should be subclassed. 16 | """ 17 | client = None 18 | timeout = 30 19 | screen_width = None 20 | screen_height = None 21 | 22 | def __init__(self): 23 | super(RealBrowserLocust, self).__init__() 24 | if self.screen_width is None: 25 | raise LocustError("You must specify a screen_width " 26 | "for the browser") 27 | if self.screen_height is None: 28 | raise LocustError("You must specify a screen_height " 29 | "for the browser") 30 | self.proxy_server = os_getenv("LOCUST_BROWSER_PROXY", None) 31 | 32 | 33 | class ChromeLocust(RealBrowserLocust): 34 | """ 35 | Provides a Chrome webdriver that logs GET's and waits to locust 36 | """ 37 | def __init__(self): 38 | super(ChromeLocust, self).__init__() 39 | options = webdriver.ChromeOptions() 40 | if self.proxy_server: 41 | _LOGGER.info('Using proxy: ' + self.proxy_server) 42 | options.add_argument('proxy-server={}'.format(self.proxy_server)) 43 | self.client = RealBrowserClient( 44 | webdriver.Chrome(chrome_options=options), 45 | self.timeout, 46 | self.screen_width, 47 | self.screen_height 48 | ) 49 | 50 | 51 | class HeadlessChromeLocust(RealBrowserLocust): 52 | """ 53 | Provides a headless Chrome webdriver that logs GET's and waits to locust 54 | """ 55 | def __init__(self): 56 | super(HeadlessChromeLocust, self).__init__() 57 | options = webdriver.ChromeOptions() 58 | options.add_argument('headless') 59 | options.add_argument('--no-sandbox') 60 | options.add_argument('--disable-dev-shm-usage') 61 | options.add_argument('window-size={}x{}'.format( 62 | self.screen_width, self.screen_height 63 | )) 64 | options.add_argument('disable-gpu') 65 | if self.proxy_server: 66 | _LOGGER.info('Using proxy: ' + self.proxy_server) 67 | options.add_argument('proxy-server={}'.format(self.proxy_server)) 68 | driver = webdriver.Chrome(chrome_options=options) 69 | _LOGGER.info('Actually trying to run headless Chrome') 70 | self.client = RealBrowserClient( 71 | driver, 72 | self.timeout, 73 | self.screen_width, 74 | self.screen_height, 75 | set_window=False 76 | ) 77 | 78 | 79 | class FirefoxLocust(RealBrowserLocust): 80 | """ 81 | Provides a Firefox webdriver that logs GET's and waits to locust 82 | """ 83 | def __init__(self): 84 | super(FirefoxLocust, self).__init__() 85 | self.client = RealBrowserClient( 86 | webdriver.Firefox(), 87 | self.timeout, 88 | self.screen_width, 89 | self.screen_height 90 | ) 91 | 92 | 93 | class PhantomJSLocust(RealBrowserLocust): 94 | """ 95 | Provides a PhantomJS webdriver that logs GET's and waits to locust 96 | """ 97 | def __init__(self): 98 | super(PhantomJSLocust, self).__init__() 99 | self.client = RealBrowserClient( 100 | webdriver.PhantomJS(), 101 | self.timeout, 102 | self.screen_width, 103 | self.screen_height 104 | ) 105 | -------------------------------------------------------------------------------- /nuke_from_orbit/utils/templates/locust-controller.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Service 3 | apiVersion: v1 4 | metadata: 5 | annotations: 6 | beta.cloud.google.com/backend-config: '{"default": "config-default"}' 7 | name: lm-pod 8 | labels: 9 | app: lm-pod 10 | spec: 11 | ports: 12 | - port: 80 13 | targetPort: loc-master-web 14 | protocol: TCP 15 | name: loc-master-web 16 | - port: 5557 17 | targetPort: loc-master-p1 18 | protocol: TCP 19 | name: loc-master-p1 20 | - port: 5558 21 | targetPort: loc-master-p2 22 | protocol: TCP 23 | name: loc-master-p2 24 | selector: 25 | app: lm-pod 26 | type: NodePort 27 | --- 28 | apiVersion: apps/v1 29 | kind: "Deployment" 30 | metadata: 31 | name: lm-pod 32 | labels: 33 | name: lm-pod 34 | spec: 35 | replicas: 1 36 | selector: 37 | matchLabels: 38 | app: lm-pod 39 | template: 40 | metadata: 41 | labels: 42 | app: lm-pod 43 | spec: 44 | containers: 45 | - name: lm-pod 46 | image: gcr.io/{{gcp_project_id}}/{{loadtest_name}}:{{image_tag}} 47 | resources: 48 | requests: 49 | memory: "500Mi" 50 | cpu: "50m" 51 | limits: 52 | memory: "1Gi" 53 | cpu: "100m" 54 | env: 55 | - name: LOCUST_MODE 56 | value: master 57 | - name: TARGET_HOST 58 | value: dashboard 59 | - name: LOCUST_STEP 60 | value: "{{loadtest_step_load}}" 61 | - name: HOST 62 | valueFrom: 63 | secretKeyRef: 64 | name: website-host 65 | key: host 66 | optional: true 67 | - name: USERNAME 68 | valueFrom: 69 | secretKeyRef: 70 | name: website-creds 71 | key: username 72 | optional: true 73 | - name: PASS 74 | valueFrom: 75 | secretKeyRef: 76 | name: website-creds 77 | key: password 78 | optional: true 79 | - name: CLIENT_ID 80 | valueFrom: 81 | secretKeyRef: 82 | name: api-creds 83 | key: client_id 84 | optional: true 85 | - name: CLIENT_SECRET 86 | valueFrom: 87 | secretKeyRef: 88 | name: api-creds 89 | key: client_secret 90 | optional: true 91 | ports: 92 | - name: loc-master-web 93 | containerPort: 8089 94 | protocol: TCP 95 | - name: loc-master-p1 96 | containerPort: 5557 97 | protocol: TCP 98 | - name: loc-master-p2 99 | containerPort: 5558 100 | protocol: TCP 101 | --- 102 | kind: Service 103 | apiVersion: v1 104 | metadata: 105 | name: le-pod 106 | labels: 107 | app: le-pod 108 | spec: 109 | ports: 110 | - port: 80 111 | targetPort: loc-metrics-web 112 | protocol: TCP 113 | selector: 114 | app: le-pod 115 | type: NodePort 116 | --- 117 | apiVersion: apps/v1 118 | kind: "Deployment" 119 | metadata: 120 | name: le-pod 121 | labels: 122 | name: le-pod 123 | spec: 124 | replicas: 1 125 | selector: 126 | matchLabels: 127 | app: le-pod 128 | template: 129 | metadata: 130 | labels: 131 | app: le-pod 132 | spec: 133 | containers: 134 | - name: le-pod 135 | image: containersol/locust_exporter:v0.3.0 136 | env: 137 | - name: LOCUST_EXPORTER_URI 138 | value: http://lm-pod:80 139 | ports: 140 | - name: loc-metrics-web 141 | containerPort: 9646 142 | protocol: TCP 143 | -------------------------------------------------------------------------------- /nuke_from_orbit/utils/cloud_build.py: -------------------------------------------------------------------------------- 1 | import tarfile 2 | import tempfile 3 | import time 4 | from pathlib import Path 5 | from google.cloud.devtools import cloudbuild 6 | from google.cloud import storage 7 | from google.api_core.exceptions import NotFound 8 | 9 | 10 | def get_build_client(credentials=None): 11 | """Creates and returns a cloud build client. Credentials only needed 12 | if the Auth environment variable is not set 13 | """ 14 | 15 | client = cloudbuild.CloudBuildClient(credentials=credentials) 16 | 17 | return client 18 | 19 | 20 | def get_storage_client(credentials=None): 21 | """Creates and returns a cloud storage client. Credentials only needed 22 | if the Auth environment variable is not set 23 | """ 24 | 25 | client = storage.Client(credentials=credentials) 26 | 27 | return client 28 | 29 | 30 | def get_or_create_bucket(bucket_name, storage_client): 31 | """Returns a cloud storage bucket object. If not found, it will create the bucket""" 32 | 33 | try: 34 | bucket = storage_client.get_bucket(bucket_name) 35 | except NotFound: 36 | bucket = storage_client.create_bucket(bucket_name) 37 | 38 | return bucket 39 | 40 | 41 | def upload_source(project, storage_client): 42 | """Uploads data for the docker container to cloud storage. 43 | 44 | This makes the data available for cloud build to use. The bucket used 45 | is the same default that the gcloud builds submit command uses. Returns 46 | a tuple of the bucket name and object (blob) name. 47 | """ 48 | 49 | # A timestamp ensures a unique blob name 50 | timestamp = int(time.time()) 51 | bucket_name = f"{project}_cloudbuild" 52 | blob_name = f"source/loadtest-source-{timestamp}.tgz" 53 | bucket = get_or_create_bucket(bucket_name, storage_client) 54 | blob = bucket.blob(blob_name) 55 | 56 | root_dir = Path(__file__).parent.parent.resolve() 57 | source_dir = root_dir.joinpath("docker-image") 58 | 59 | # Creates the tgz file in a temp directory so it auto-deletes 60 | with tempfile.TemporaryDirectory() as d: 61 | source_tar = Path(d).joinpath("source.tgz") 62 | with tarfile.open(source_tar, "w:gz") as tar: 63 | tar.add(source_dir, arcname=".") 64 | 65 | blob.upload_from_filename(source_tar) 66 | 67 | return (bucket_name, blob_name) 68 | 69 | 70 | def build_test_image(name, project, image_tag, bucket, blob, build_client): 71 | """Creates the docker image used for the load test using cloud build. Once built, 72 | the image is then uploaded to the GCP project's container registry. Returns the 73 | job ID of the submitted operation which can be used to poll for job status. 74 | """ 75 | 76 | # https://googleapis.dev/python/cloudbuild/latest/cloudbuild_v1/types.html 77 | # https://cloud.google.com/cloud-build/docs/api/reference/rest/v1/projects.builds 78 | build = { 79 | "source": { 80 | "storage_source": { 81 | "bucket": bucket, 82 | "object_": blob 83 | } 84 | }, 85 | "images": [f"gcr.io/{project}/{name}:{image_tag}"], 86 | "steps": [ 87 | { 88 | "name": "gcr.io/cloud-builders/docker", 89 | "args": [ 90 | "build", 91 | "--network", 92 | "cloudbuild", 93 | "--no-cache", 94 | "-t", 95 | f"gcr.io/{project}/{name}:{image_tag}", 96 | "." 97 | ] 98 | } 99 | ] 100 | } 101 | 102 | request = cloudbuild.CreateBuildRequest(project_id=project, build=build) 103 | task = build_client.create_build(request=request) 104 | 105 | return task.metadata.build.id 106 | 107 | 108 | def build_status(build_id, project, build_client): 109 | """Gets and returns the status of a current build. To be used in a polling loop. Will return 110 | 'SUCCESS' upon a successful build. 111 | """ 112 | request = cloudbuild.GetBuildRequest(project_id=project, id=build_id) 113 | build = build_client.get_build(request=request) 114 | 115 | # Typical values are QUEUED, WORKING, and SUCCESS 116 | return build.status.name 117 | -------------------------------------------------------------------------------- /nuke_from_orbit/utils/kubernetes_deploy.py: -------------------------------------------------------------------------------- 1 | from kubernetes import client, config, utils 2 | from kubernetes.client.rest import ApiException 3 | from time import sleep, time 4 | 5 | 6 | def deploy_from_yaml(yaml_file, namespace="default"): 7 | """Deploys a multi-part yaml manifest to kubernetes""" 8 | 9 | config.load_kube_config() 10 | k8_client = client.ApiClient() 11 | utils.create_from_yaml(k8_client, yaml_file, namespace=namespace) 12 | 13 | 14 | def get_deployment(deployment_name, namespace="default"): 15 | """Accepts a deployment name and returns the specified deployment object from kubernetes. 16 | Can be used to extract relevant metadata such as the version tag. 17 | """ 18 | 19 | config.load_kube_config() 20 | 21 | with client.ApiClient() as api_client: 22 | api_instance = client.AppsV1Api(api_client) 23 | resp = api_instance.read_namespaced_deployment(deployment_name, namespace) 24 | 25 | return resp 26 | 27 | 28 | def wait_for_deployment(deployment_name, namespace="default", timeout=60): 29 | """Polls for the status of a given deployment. When successful, a success message is 30 | printed and boolean True is returned. If deployment is not ready by the specified 31 | timeout argument (default 60 seconds) a timeout exception is thrown. 32 | """ 33 | 34 | config.load_kube_config() 35 | 36 | with client.ApiClient() as api_client: 37 | api_instance = client.AppsV1Api(api_client) 38 | start = time() 39 | while time() - start < timeout: 40 | sleep(2) 41 | resp = api_instance.read_namespaced_deployment(deployment_name, namespace) 42 | dstatus = resp.status 43 | 44 | # parsing a bunch of info for easy comparison 45 | spec_replicas = resp.spec.replicas 46 | spec_generation = resp.metadata.generation 47 | 48 | ds_replicas = dstatus.replicas 49 | ds_updated = dstatus.updated_replicas 50 | ds_available = dstatus.available_replicas 51 | ds_generation = dstatus.observed_generation 52 | 53 | # creating booleans for relevant comparisons 54 | updated_replica_match = ds_updated == spec_replicas 55 | replica_match = ds_replicas == spec_replicas 56 | available_replica_match = ds_available == spec_replicas 57 | generation_match = ds_generation >= spec_generation 58 | 59 | # checking if the deployment is ready 60 | if (updated_replica_match and replica_match and available_replica_match and generation_match): 61 | print("Deployment ready!") 62 | return True 63 | else: 64 | print(f"Replicas: {ds_replicas}. Updated: {ds_updated}. Available: {ds_available}") 65 | 66 | # TODO: generate and throw an actual exception 67 | print("Timeout!") 68 | 69 | 70 | def deploy_secret(secret_name, secret_data, namespace="default"): 71 | """Creates or updates a secret with the values specified in the secret_data param. 72 | This param must be a dict where the key is the entry name and the value is the secret 73 | value (e.g. `{"username": "dudefella"}'). If the secret already exists then it will 74 | be patched instead. Returns the secret object that has been created/updated. 75 | """ 76 | 77 | secret_metadata = {"name": secret_name, "namespace": "default"} 78 | api_version = "v1" 79 | kind = "Secret" 80 | 81 | config.load_kube_config() 82 | k8 = client.CoreV1Api() 83 | body = client.V1Secret(api_version=api_version, kind=kind, metadata=secret_metadata, string_data=secret_data) 84 | 85 | # Try the post request. If it fails, handle the 409 response by trying a patch request instead 86 | try: 87 | resp = k8.create_namespaced_secret(namespace, body) 88 | except ApiException as e: 89 | if e.status == 409: 90 | print("Secret already exists! Updating...") 91 | resp = k8.patch_namespaced_secret(name=secret_name, namespace=namespace, body=body) 92 | 93 | return resp 94 | 95 | 96 | def delete_deployment(deployment_name): 97 | """Deletes a deployment - usually as a part of a config refresh.""" 98 | 99 | config.load_kube_config() 100 | 101 | with client.ApiClient() as api_client: 102 | api_instance = client.AppsV1Api(api_client) 103 | api_instance.delete_namespaced_deployment(deployment_name, "default") 104 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, gender identity and expression, level of 9 | experience, education, socio-economic status, nationality, personal appearance, 10 | race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or reject 41 | comments, commits, code, wiki edits, issues, and other contributions that are 42 | not aligned to this Code of Conduct, or to ban temporarily or permanently any 43 | contributor for other behaviors that they deem inappropriate, threatening, 44 | offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | This Code of Conduct also applies outside the project spaces when the Project 56 | Steward has a reasonable belief that an individual's behavior may have a 57 | negative impact on the project or its community. 58 | 59 | ## Conflict Resolution 60 | 61 | We do not believe that all conflict is bad; healthy debate and disagreement 62 | often yield positive results. However, it is never okay to be disrespectful or 63 | to engage in behavior that violates the project’s code of conduct. 64 | 65 | If you see someone violating the code of conduct, you are encouraged to address 66 | the behavior directly with those involved. Many issues can be resolved quickly 67 | and easily, and this gives people more control over the outcome of their 68 | dispute. If you are unable to resolve the matter for any reason, or if the 69 | behavior is threatening or harassing, report it. We are dedicated to providing 70 | an environment where participants feel welcome and safe. 71 | 72 | Reports should be directed to *Colin Pistell* colinpistell@google.com, the 73 | Project Steward(s) for *looker-load-testing*. It is the Project Steward’s duty to 74 | receive and address reported violations of the code of conduct. They will then 75 | work with a committee consisting of representatives from the Open Source 76 | Programs Office and the Google Open Source Strategy team. If for any reason you 77 | are uncomfortable reaching out to the Project Steward, please email 78 | opensource@google.com. 79 | 80 | We will investigate every complaint, but you may not receive a direct response. 81 | We will use our discretion in determining when and how to follow up on reported 82 | incidents, which may range from not taking action to permanent expulsion from 83 | the project and project-sponsored spaces. We will notify the accused of the 84 | report and provide them an opportunity to discuss it before any action is taken. 85 | The identity of the reporter will be omitted from the details of the report 86 | supplied to the accused. In potentially harmful situations, such as ongoing 87 | harassment or threats to anyone's safety, we may take action without notice. 88 | 89 | ## Attribution 90 | 91 | This Code of Conduct is adapted from the Contributor Covenant, version 1.4, 92 | available at 93 | https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 94 | -------------------------------------------------------------------------------- /tests/test_cli.py: -------------------------------------------------------------------------------- 1 | from nuke_from_orbit import cli 2 | from nuke_from_orbit.commands import setup_commands, teardown_commands 3 | from nuke_from_orbit.commands import update_config_commands, update_test_commands 4 | from click.testing import CliRunner 5 | 6 | 7 | def test_nfo(): 8 | runner = CliRunner() 9 | result = runner.invoke(cli.nfo) 10 | assert result.exit_code == 0 11 | 12 | 13 | def test_setup_no_config_file(mocker): 14 | mocker.patch("nuke_from_orbit.commands.setup_commands.main") 15 | runner = CliRunner() 16 | result = runner.invoke(cli.setup) 17 | assert result.exit_code == 2 18 | 19 | 20 | def test_setup_config_file_only(mocker): 21 | mocker.patch("nuke_from_orbit.commands.setup_commands.main") 22 | runner = CliRunner() 23 | result = runner.invoke(cli.setup, ["--config-file", "test_config.yaml"]) 24 | assert result.exit_code == 0 25 | setup_commands.main.assert_called_with(config_file="test_config.yaml", external=False, persistence=True) 26 | 27 | 28 | def test_setup_no_persist(mocker): 29 | mocker.patch("nuke_from_orbit.commands.setup_commands.main") 30 | runner = CliRunner() 31 | result = runner.invoke(cli.setup, ["--config-file", "test_config.yaml", "--no-persistence"]) 32 | assert result.exit_code == 0 33 | setup_commands.main.assert_called_with(config_file="test_config.yaml", external=False, persistence=False) 34 | 35 | 36 | def test_setup_external(mocker): 37 | mocker.patch("nuke_from_orbit.commands.setup_commands.main") 38 | runner = CliRunner() 39 | result = runner.invoke(cli.setup, ["--config-file", "test_config.yaml", "--external"]) 40 | assert result.exit_code == 0 41 | setup_commands.main.assert_called_with(config_file="test_config.yaml", external=True, persistence=True) 42 | 43 | 44 | def test_setup_external_no_persistence(mocker): 45 | mocker.patch("nuke_from_orbit.commands.setup_commands.main") 46 | runner = CliRunner() 47 | result = runner.invoke(cli.setup, ["--config-file", "test_config.yaml", "--external", "--no-persistence"]) 48 | assert result.exit_code == 0 49 | setup_commands.main.assert_called_with(config_file="test_config.yaml", external=True, persistence=False) 50 | 51 | 52 | def test_teardown_no_config_file(mocker): 53 | mocker.patch("nuke_from_orbit.commands.teardown_commands.main") 54 | runner = CliRunner() 55 | result = runner.invoke(cli.teardown) 56 | assert result.exit_code == 2 57 | 58 | 59 | def test_teardown_no_all(mocker): 60 | mocker.patch("nuke_from_orbit.commands.teardown_commands.main") 61 | runner = CliRunner() 62 | result = runner.invoke(cli.teardown, ["--config-file", "test_config.yaml"]) 63 | assert result.exit_code == 0 64 | teardown_commands.main.assert_called_with(config_file="test_config.yaml", all=False) 65 | 66 | 67 | def test_teardown_all(mocker): 68 | mocker.patch("nuke_from_orbit.commands.teardown_commands.main") 69 | runner = CliRunner() 70 | result = runner.invoke(cli.teardown, ["--config-file", "test_config.yaml", "--all"]) 71 | assert result.exit_code == 0 72 | teardown_commands.main.assert_called_with(config_file="test_config.yaml", all=True) 73 | 74 | 75 | def test_update(): 76 | runner = CliRunner() 77 | result = runner.invoke(cli.update) 78 | assert result.exit_code == 0 79 | 80 | 81 | def test_update_config_no_config_file(mocker): 82 | mocker.patch("nuke_from_orbit.commands.update_config_commands.main") 83 | runner = CliRunner() 84 | result = runner.invoke(cli.config) 85 | assert result.exit_code == 2 86 | 87 | 88 | def test_update_config(mocker): 89 | mocker.patch("nuke_from_orbit.commands.update_config_commands.main") 90 | runner = CliRunner() 91 | result = runner.invoke(cli.config, ["--config-file", "test_config.yaml"]) 92 | assert result.exit_code == 0 93 | update_config_commands.main.assert_called_with(config_file="test_config.yaml") 94 | 95 | 96 | def test_update_test_no_config(mocker): 97 | mocker.patch("nuke_from_orbit.commands.update_test_commands.main") 98 | runner = CliRunner() 99 | result = runner.invoke(cli.test) 100 | assert result.exit_code == 2 101 | 102 | 103 | def test_update_test_no_tag(mocker): 104 | mocker.patch("nuke_from_orbit.commands.update_test_commands.main") 105 | runner = CliRunner() 106 | result = runner.invoke(cli.test, ["--config-file", "test_config.yaml"]) 107 | assert result.exit_code == 2 108 | 109 | 110 | def test_update_test(mocker): 111 | mocker.patch("nuke_from_orbit.commands.update_test_commands.main") 112 | runner = CliRunner() 113 | result = runner.invoke(cli.test, ["--config-file", "test_config.yaml", "--tag", "v2"]) 114 | assert result.exit_code == 0 115 | update_test_commands.main.assert_called_with(config_file="test_config.yaml", tag="v2") 116 | -------------------------------------------------------------------------------- /tests/test_cloud_build.py: -------------------------------------------------------------------------------- 1 | import tarfile 2 | from pathlib import Path 3 | from nuke_from_orbit.utils import cloud_build 4 | from google.cloud.devtools import cloudbuild 5 | from google.cloud import storage 6 | from google.api_core.exceptions import NotFound 7 | 8 | 9 | ROOT_DIR = Path(__file__).parent.parent.joinpath("nuke_from_orbit/") 10 | 11 | 12 | class MockBucket: 13 | def __init__(self, name): 14 | self.name = name 15 | 16 | @staticmethod 17 | def blob(blob_name): 18 | return MockBlob() 19 | 20 | 21 | class MockBlob: 22 | @staticmethod 23 | def upload_from_filename(filename): 24 | return filename 25 | 26 | 27 | class MockStorageClient: 28 | @staticmethod 29 | def get_bucket(): 30 | pass 31 | 32 | @staticmethod 33 | def create_bucket(): 34 | pass 35 | 36 | 37 | class MockBuildClient: 38 | @staticmethod 39 | def create_build(): 40 | pass 41 | 42 | @staticmethod 43 | def get_build(): 44 | pass 45 | 46 | 47 | def test_get_build_client(mocker): 48 | mocker.patch("google.cloud.devtools.cloudbuild.CloudBuildClient") 49 | cloudbuild.CloudBuildClient.return_value = "foo" 50 | client = cloud_build.get_build_client() 51 | assert client == "foo" 52 | 53 | 54 | def test_get_storage_client(mocker): 55 | mocker.patch("google.cloud.storage.Client") 56 | storage.Client.return_value = "bar" 57 | client = cloud_build.get_storage_client() 58 | assert client == "bar" 59 | 60 | 61 | def test_get_or_create_bucket_exists(mocker): 62 | mock_storage_client = MockStorageClient() 63 | mocker.patch.object(mock_storage_client, "get_bucket") 64 | mock_storage_client.get_bucket.return_value = MockBucket(name="foo") 65 | bucket = cloud_build.get_or_create_bucket("foo", mock_storage_client) 66 | assert bucket.__dict__ == MockBucket(name="foo").__dict__ 67 | 68 | 69 | def test_get_or_create_bucket_no_exist(mocker): 70 | mock_storage_client = MockStorageClient() 71 | mocker.patch.object(mock_storage_client, "get_bucket", side_effect=NotFound("mock message")) 72 | mocker.patch.object(mock_storage_client, "create_bucket") 73 | mock_storage_client.create_bucket.return_value = MockBucket(name="bar") 74 | 75 | bucket = cloud_build.get_or_create_bucket("bar", mock_storage_client) 76 | assert bucket.__dict__ == MockBucket(name="bar").__dict__ 77 | 78 | 79 | def test_upload_source_return(mocker): 80 | mocker.patch("time.time").return_value = 1234 81 | mocker.patch("nuke_from_orbit.utils.cloud_build.get_or_create_bucket").return_value = MockBucket(name="foo") 82 | mocker.patch("tempfile.TemporaryDirectory").return_value.__enter__.return_value = "tempdirname" 83 | mocker.patch("tarfile.open").return_value.__enter__.return_value.add.return_value = "mocktar" 84 | 85 | resp = cloud_build.upload_source("foo_project", "foo_client") 86 | assert resp == ("foo_project_cloudbuild", "source/loadtest-source-1234.tgz") 87 | 88 | 89 | def test_upload_source_tar_add_call(mocker): 90 | 91 | docker_path = ROOT_DIR.joinpath("docker-image").resolve() 92 | 93 | mocker.patch("time.time").return_value = 1234 94 | mocker.patch("nuke_from_orbit.utils.cloud_build.get_or_create_bucket").return_value = MockBucket(name="foo") 95 | mocker.patch("tempfile.TemporaryDirectory").return_value.__enter__.return_value = "tempdirname" 96 | mocker.patch("tarfile.open").return_value.__enter__.return_value.add 97 | 98 | cloud_build.upload_source("foo_project", "foo_client") 99 | tarfile.open.return_value.__enter__.return_value.add.assert_called_with(docker_path, arcname=".") 100 | 101 | 102 | def test_upload_source_blob_upload_call(mocker): 103 | mocker.patch("time.time").return_value = 1234 104 | mocker.patch("nuke_from_orbit.utils.cloud_build.get_or_create_bucket").return_value = MockBucket(name="foo") 105 | mocker.patch("tempfile.TemporaryDirectory").return_value.__enter__.return_value = "tempdirname" 106 | mocker.patch("tarfile.open").return_value.__enter__.return_value.add.return_value = "mocktar" 107 | 108 | mocker.patch.object(MockBlob, "upload_from_filename") 109 | 110 | cloud_build.upload_source("foo_project", "foo_client") 111 | MockBlob.upload_from_filename.assert_called_with(Path("tempdirname/source.tgz")) 112 | 113 | 114 | def test_build_test_image(mocker): 115 | mock_build_client = MockBuildClient() 116 | mocker.patch.object(mock_build_client, "create_build").return_value.metadata.build.id = "abc123" 117 | mocker.patch("google.cloud.devtools.cloudbuild.CreateBuildRequest").return_value = "foo" 118 | 119 | task_id = cloud_build.build_test_image("taco", "cat", "v1", "foo_bucket", "foo_blob", mock_build_client) 120 | 121 | assert task_id == "abc123" 122 | 123 | 124 | def test_build_test_image_request(mocker): 125 | test_build = { 126 | "source": { 127 | "storage_source": { 128 | "bucket": "foo_bucket", 129 | "object_": "foo_blob" 130 | } 131 | }, 132 | "images": ["gcr.io/cat/taco:v1"], 133 | "steps": [ 134 | { 135 | "name": "gcr.io/cloud-builders/docker", 136 | "args": [ 137 | "build", 138 | "--network", 139 | "cloudbuild", 140 | "--no-cache", 141 | "-t", 142 | "gcr.io/cat/taco:v1", 143 | "." 144 | ] 145 | } 146 | ] 147 | } 148 | 149 | mock_build_client = MockBuildClient() 150 | mocker.patch.object(mock_build_client, "create_build").return_value.metadata.build.id = "abc123" 151 | mocker.patch("google.cloud.devtools.cloudbuild.CreateBuildRequest") 152 | 153 | cloud_build.build_test_image("taco", "cat", "v1", "foo_bucket", "foo_blob", mock_build_client) 154 | cloudbuild.CreateBuildRequest.assert_called_with(project_id="cat", build=test_build) 155 | 156 | 157 | def test_build_status(mocker): 158 | mock_build_client = MockBuildClient() 159 | mocker.patch.object(mock_build_client, "get_build").return_value.status.name = "WORKING" 160 | mocker.patch("google.cloud.devtools.cloudbuild.GetBuildRequest").return_value = "foo" 161 | 162 | status = cloud_build.build_status("abc123", "foo_project", mock_build_client) 163 | assert status == "WORKING" 164 | 165 | 166 | def test_build_status_request(mocker): 167 | mock_build_client = MockBuildClient() 168 | mocker.patch.object(mock_build_client, "get_build").return_value.status.name = "WORKING" 169 | mocker.patch("google.cloud.devtools.cloudbuild.GetBuildRequest") 170 | 171 | cloud_build.build_status("abc123", "foo_project", mock_build_client) 172 | cloudbuild.GetBuildRequest.assert_called_with(project_id="foo_project", id="abc123") 173 | -------------------------------------------------------------------------------- /locust_test_scripts/scenario2.py: -------------------------------------------------------------------------------- 1 | import os 2 | from realbrowserlocusts import ChromeLocust 3 | from selenium.webdriver.common.by import By 4 | from selenium.webdriver.support import expected_conditions as EC 5 | from selenium.common.exceptions import TimeoutException 6 | from locust import TaskSet, task, between 7 | import looker_sdk 8 | import random 9 | 10 | SITE = "https://sharonpbl.looker.com" 11 | ##define dashboard possibilities 12 | dash_id = [ 13 | "/19?Date=90%20days", 14 | "/19?Date=180%20days", 15 | "/19?Date=365%20days", 16 | "/17?Date=90%20days&State=New%20York", 17 | "/17?Date=180%20days&State=New%20York", 18 | "/17?Date=365%20days&qState=New%20York", 19 | "/17?Date=90%20days&State=California", 20 | "/17?Date=180%20days&State=California", 21 | "/17?Date=365%20days&State=California", 22 | "/17?Date=90%20days&State=Texas", 23 | "/17?Date=180%20days&State=Texas", 24 | "/17?Date=365%20days&State=Texas", 25 | "/18?Date=7%20days&Browser=Chrome", 26 | "/18?Date=14%20days&Browser=Chrome", 27 | "/18?Date=30%20days&Browser=Chrome", 28 | "/18?Date=7%20days&Browser=IE", 29 | "/18?Date=14%20days&Browser=IE", 30 | "/18?Date=30%20days&Browser=IE", 31 | "/18?Date=7%20days&Browser=Firefox", 32 | "/18?Date=14%20days&Browser=Firefox", 33 | "/18?Date=30%20days&Browser=Firefox", 34 | "/18?Date=7%20days&Browser=Safari", 35 | "/18?Date=14%20days&Browser=Safari", 36 | "/18?Date=30%20days&Browser=Safari" 37 | ] 38 | #for initial sso dashboard 39 | DASH_PATH= random.choice(dash_id) 40 | 41 | #explore path possibilities 42 | explore_id = [ 43 | "thelook/order_items?toggle=dat,fil,vis&qid=aLRS5ixI0q3G6Rk1yW6L99", 44 | "thelook/order_items?toggle=fil,vis,vse&qid=qry6EID7TMjxxBlfxFsCmX", 45 | "thelook/order_items?toggle=fil,vis&qid=yVX99oFCL5MvPOq2JAiuqT", 46 | "thelook/sessions?qid=EJZ99yHQkFBYBnDER9vyj1" 47 | ] 48 | 49 | ##create unique user_id for each web session 50 | users=random.randint(0,100000) 51 | EMBED_USER_ID = str(users) 52 | 53 | ##not currently using - how to insert random value in user_attribute brand? 54 | brand = ["Calvin Klein", "Carhartt", "Allegra K","Dockers","Levi's"] 55 | 56 | sdk=looker_sdk.init31() 57 | 58 | class LocustUserBehavior(TaskSet): 59 | 60 | #automatically called by locust 61 | def on_start(self): 62 | #create sso embed user url via API 63 | embed_params =looker_sdk.models.EmbedSsoParams( 64 | target_url=SITE+"/dashboards"+f"{random.choice(dash_id)}", 65 | session_length=10000, 66 | force_logout_login=True, 67 | external_user_id= f"{str(random.randint(0,100000))}", 68 | first_name= "Embed", 69 | last_name= "User", 70 | permissions=["access_data", "see_looks", "see_user_dashboards", "see_drill_overlay","explore"], 71 | models=["thelook"], 72 | group_ids=[12058, 11], 73 | user_attributes= {"brand":f"{random.choice(brand)}"} 74 | ) 75 | 76 | #create embed user+ get embed user credentials 77 | self.embed_url=sdk.create_sso_embed_url(embed_params) 78 | #run sso url -establish valid embed session 79 | self.client.get(self.embed_url.url) 80 | # self.login() 81 | 82 | #automatically called by locust 83 | def on_stop(self): 84 | self.logout() 85 | 86 | def login(self): 87 | self.client.get(SITE + "/login") 88 | 89 | user_entry = os.getenv("USERNAME") 90 | pass_entry = os.getenv("PASS") 91 | username = self.client.find_element_by_id("login-email") 92 | pw = self.client.find_element_by_id("login-password") 93 | box = self.client.find_element_by_class_name("checkbox") 94 | username.clear() 95 | username.send_keys(user_entry) 96 | pw.clear() 97 | pw.send_keys(pass_entry) 98 | box.click() 99 | self.client.find_element_by_id("login-submit").click() 100 | 101 | def logout(self): 102 | print("stopping session") 103 | 104 | def open_dashboard(self): 105 | script = """ 106 | window.awaitPerformanceObservation("rendered").then(function() { 107 | var dash_render = document.createElement("div"); 108 | dash_render.id = "dash_listener"; 109 | document.body.appendChild(dash_render); 110 | });""" 111 | 112 | try: 113 | self.client.get(f"{SITE}/embed/dashboards{str(random.choice(dash_id))}") 114 | 115 | self.client.execute_script(script) 116 | 117 | self.client.wait.until( 118 | EC.presence_of_element_located( 119 | (By.ID, "dash_listener") 120 | ) 121 | ) 122 | except TimeoutException: 123 | print("hit timeout") 124 | 125 | def open_sso_dashboard(self): 126 | script = """ 127 | window.awaitPerformanceObservation("rendered").then(function() { 128 | var dash_render = document.createElement("div"); 129 | dash_render.id = "dash_listener"; 130 | document.body.appendChild(dash_render); 131 | });""" 132 | 133 | try: 134 | self.client.get(self.embed_url.url) 135 | self.client.execute_script(script) 136 | self.client.wait.until( 137 | EC.presence_of_element_located( 138 | (By.ID, "dash_listener") 139 | ) 140 | ) 141 | 142 | except TimeoutException: 143 | print("hit timeout") 144 | 145 | def open_explore(self): 146 | script = """ 147 | window.awaitPerformanceObservation("rendered").then(function() { 148 | var dash_render = document.createElement("div"); 149 | dash_render.id = "finished"; 150 | document.body.appendChild(render); 151 | });""" 152 | 153 | try: 154 | self.client.get(f"{SITE}/embed/explore/{str(random.choice(explore_id))}") 155 | 156 | self.client.execute_script(script) 157 | 158 | self.client.wait.until( 159 | EC.presence_of_element_located( 160 | (By.ID, "finished") 161 | ) 162 | ) 163 | except TimeoutException: 164 | print("hit timeout") 165 | 166 | #tasks 167 | #@tasks(1) - create sso url via api; load /login/embed 168 | #@task(80) - load /embed/dashboards/(id) 169 | #@task(20) - load /embed/explore/... 170 | #(leave out for now) @task() - some background API activity 171 | 172 | #open dashboard 173 | @task(20) 174 | def embed_dashboard_loading(self): 175 | self.client.timed_event_for_locust( 176 | "Load", "embed dashboard", 177 | self.open_dashboard 178 | ) 179 | ##open sso dashboard 180 | @task(1) 181 | def sso_dashboard_loading(self): 182 | self.client.timed_event_for_locust( 183 | "Load", "sso dashboard", 184 | self.open_sso_dashboard, 185 | self.open_dashboard 186 | ) 187 | ##open explore 188 | @task(4) 189 | def embed_explore_loading(self): 190 | self.client.timed_event_for_locust( 191 | "Load", "explore", 192 | self.open_explore 193 | ) 194 | 195 | class LocustUser(ChromeLocust): 196 | 197 | host = "dashboard load test" 198 | timeout = 15 # in seconds in waitUntil thingies 199 | wait_time = between(5, 10) 200 | screen_width = 1200 201 | screen_height = 600 202 | task_set = LocustUserBehavior 203 | -------------------------------------------------------------------------------- /nuke_from_orbit/utils/gke_cluster.py: -------------------------------------------------------------------------------- 1 | import googleapiclient.discovery 2 | import subprocess 3 | from google.cloud import container_v1 4 | 5 | 6 | def get_gke_client(credentials=None): 7 | """Creates and returns a gke client. Credentials only needed 8 | if the Auth environment variable is not set. 9 | """ 10 | 11 | client = container_v1.ClusterManagerClient(credentials=credentials) 12 | 13 | return client 14 | 15 | 16 | def get_compute_client(credentials=None): 17 | """Creates and returns a compute client. Credentials only needed if the Auth 18 | environment variable is not set. 19 | """ 20 | 21 | client = googleapiclient.discovery.build("compute", "v1", credentials=credentials) 22 | 23 | return client 24 | 25 | 26 | def create_zonal_disk(name, project, zone, client): 27 | """Creates a persistant disk in the specified zone. This is suitable for use as 28 | a persistant volume for Prometheus data. Returns an operation id that can be 29 | used to track the job progress. 30 | """ 31 | 32 | # https://cloud.google.com/compute/docs/reference/rest/v1/disks/get 33 | 34 | body = {"name": name, "sizeGb": 50} 35 | zonal_disk = client.disks() 36 | request = zonal_disk.insert(project=project, zone=zone, body=body) 37 | response = request.execute() 38 | 39 | return response["name"] 40 | 41 | 42 | def fetch_zonal_disk(name, project, zone, client): 43 | """Attempts to fetch a specified persistant disk. Accepts the name of the 44 | disk, the project and zone. If the disk exists the name is returned. 45 | """ 46 | 47 | zonal_disk = client.disks() 48 | request = zonal_disk.get(project=project, zone=zone, disk=name) 49 | response = request.execute() 50 | 51 | return response 52 | 53 | 54 | def delete_zonal_disk(name, project, zone, client): 55 | """Deletes a persistant disk in the specified zone. Returns an operation that 56 | can be used to track the job progress. 57 | """ 58 | 59 | # https://cloud.google.com/compute/docs/reference/rest/v1/disks/get 60 | 61 | zonal_disk = client.disks() 62 | request = zonal_disk.delete(project=project, zone=zone, disk=name) 63 | response = request.execute() 64 | 65 | return response["name"] 66 | 67 | 68 | def compute_zonal_task_status(task_name, project, zone, client): 69 | """Accepts a compute zonal operation name and requests the status of the operation. 70 | Returns a string of the job status. Possible values are 'PENDING', 'RUNNING' 71 | and 'DONE' 72 | """ 73 | 74 | # https://cloud.google.com/compute/docs/reference/rest/v1/globalOperations/get 75 | zonal_operations = client.zoneOperations() 76 | request = zonal_operations.get(project=project, zone=zone, operation=task_name) 77 | response = request.execute() 78 | 79 | return response["status"] 80 | 81 | 82 | def create_global_ip(name, project, client): 83 | """Creates a global ip address suitable for use with GKE ingress controller. 84 | Returns a job ID that can be used to track the status of the address creation. 85 | """ 86 | 87 | # https://cloud.google.com/compute/docs/reference/rest/v1/globalAddresses/insert 88 | body = {"name": name} 89 | global_address = client.globalAddresses() 90 | request = global_address.insert(project=project, body=body) 91 | response = request.execute() 92 | 93 | return response["name"] 94 | 95 | 96 | def delete_global_ip(name, project, client): 97 | """Deletes the specified global ip address. Returns a job ID that can be used to 98 | track the status of the deletion request. 99 | """ 100 | 101 | # https://cloud.google.com/compute/docs/reference/rest/v1/globalAddresses/delete 102 | global_address = client.globalAddresses() 103 | request = global_address.delete(project=project, address=name) 104 | response = request.execute() 105 | 106 | return response["name"] 107 | 108 | 109 | def compute_task_status(task_name, project, client): 110 | """Accepts a compute operation name and requests the status of the operation. 111 | Returns a string of the job status. Possible values are 'PENDING', 'RUNNING' 112 | and 'DONE' 113 | """ 114 | 115 | # https://cloud.google.com/compute/docs/reference/rest/v1/globalOperations/get 116 | global_operations = client.globalOperations() 117 | request = global_operations.get(project=project, operation=task_name) 118 | response = request.execute() 119 | 120 | return response["status"] 121 | 122 | 123 | def fetch_ip_address(name, project, client): 124 | """Fetches the actual IP address once the global address creation has successfully 125 | completed. Accepts the name provided in the initial request and returns a string of 126 | the address. 127 | """ 128 | 129 | global_address = client.globalAddresses() 130 | request = global_address.get(project=project, address=name) 131 | response = request.execute() 132 | 133 | return response["address"] 134 | 135 | 136 | def setup_gke_cluster(name, project, zone, node_count, machine_type, client): 137 | """Creates a GKE compute cluster. Returns a job ID that can be used to track 138 | the status of the cluster creation. 139 | """ 140 | 141 | parent = f"projects/{project}/locations/{zone}" 142 | 143 | # https://googleapis.dev/python/container/latest/container_v1/types.html 144 | # https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters#Cluster 145 | cluster = { 146 | "name": name, 147 | "release_channel": { 148 | "channel": "REGULAR" 149 | }, 150 | "node_pools": [ 151 | { 152 | "name": "load-test-pool", 153 | "initial_node_count": node_count, 154 | "config": { 155 | "machine_type": machine_type, 156 | "oauth_scopes": [ 157 | "https://www.googleapis.com/auth/cloud-platform" 158 | ] 159 | } 160 | } 161 | ] 162 | } 163 | 164 | request = container_v1.types.CreateClusterRequest(parent=parent, cluster=cluster) 165 | task = client.create_cluster(request=request) 166 | 167 | return task.name 168 | 169 | 170 | def delete_gke_cluster(name, project, zone, client): 171 | """Deletes a GKE cluster. Returns a job ID that can be used to track the 172 | status of the cluster deletion. 173 | """ 174 | 175 | cluster_name = f"projects/{project}/locations/{zone}/clusters/{name}" 176 | request = container_v1.types.DeleteClusterRequest(name=cluster_name) 177 | task = client.delete_cluster(request=request) 178 | 179 | return task.name 180 | 181 | 182 | def gke_task_status(name, project, zone, client): 183 | """Returns details on the status of a specified GKE job. This can be used to 184 | poll for the results of a job. The returned object contains two useful elements: 185 | 1. `status.name` which returns a string of the current status. When this returns 186 | 'DONE' then the job has completed. 187 | 2. `detail` which returns a string with some extra context. 188 | """ 189 | 190 | task_name = f"projects/{project}/locations/{zone}/operations/{name}" 191 | request = container_v1.types.GetOperationRequest(name=task_name) 192 | task_status = client.get_operation(request=request) 193 | 194 | return task_status 195 | 196 | 197 | def setup_cluster_auth_file(name, project, zone, client): 198 | """Creates a kubeconfig entry suitable for use to authorize calls to the 199 | kubernetes API using GCP service account credentials. While this file sets up 200 | the use of GCP service accounts the user will still need to set the 201 | GOOGLE_APPLICATION_CREDENTIALS environment variable. Returns a string with the 202 | name of the context to use. 203 | """ 204 | 205 | cluster_name = f"projects/{project}/locations/{zone}/clusters/{name}" 206 | request = container_v1.types.GetClusterRequest(name=cluster_name) 207 | cluster = client.get_cluster(request=request) 208 | 209 | # We'll need both the CA Certificate and the correct endpoint (IP address) 210 | ca_cert = cluster.master_auth.cluster_ca_certificate 211 | endpoint = cluster.endpoint 212 | 213 | # Set the name for the entries 214 | entry_name = f"gke_{project}-{zone}-{name}" 215 | 216 | cluster_command = [ 217 | "kubectl", 218 | "config", 219 | "set-cluster", 220 | entry_name, 221 | "--server", 222 | f"https://{endpoint}" 223 | ] 224 | 225 | cluster_ca_command = [ 226 | "kubectl", 227 | "config", 228 | "set", 229 | f"clusters.{entry_name}.certificate-authority-data", 230 | ca_cert 231 | ] 232 | 233 | user_command = [ 234 | "kubectl", 235 | "config", 236 | "set-credentials", 237 | entry_name, 238 | "--auth-provider", 239 | "gcp" 240 | ] 241 | 242 | context_command = [ 243 | "kubectl", 244 | "config", 245 | "set-context", 246 | entry_name, 247 | "--cluster", 248 | entry_name, 249 | "--user", 250 | entry_name 251 | ] 252 | 253 | subprocess.run(cluster_command) 254 | subprocess.run(cluster_ca_command) 255 | subprocess.run(user_command) 256 | subprocess.run(context_command) 257 | 258 | return entry_name 259 | 260 | 261 | def teardown_cluster_auth_file(name, project, zone): 262 | """Removes a kubeconfig entry for the cluster.""" 263 | 264 | entry_name = f"gke_{project}-{zone}-{name}" 265 | 266 | cluster_command = [ 267 | "kubectl", 268 | "config", 269 | "delete-cluster", 270 | entry_name 271 | ] 272 | 273 | user_command = [ 274 | "kubectl", 275 | "config", 276 | "delete-credentials", 277 | entry_name 278 | ] 279 | 280 | context_command = [ 281 | "kubectl", 282 | "config", 283 | "delete-context", 284 | entry_name, 285 | ] 286 | 287 | subprocess.run(cluster_command) 288 | subprocess.run(user_command) 289 | subprocess.run(context_command) 290 | -------------------------------------------------------------------------------- /locust_test_scripts/multiple_content.py: -------------------------------------------------------------------------------- 1 | import os 2 | from realbrowserlocusts import HeadlessChromeLocust 3 | from selenium.webdriver.common.by import By 4 | from selenium.webdriver.support import expected_conditions as EC 5 | from selenium.webdriver.support.ui import WebDriverWait 6 | from selenium.common.exceptions import TimeoutException 7 | from locust import TaskSet, task, between 8 | 9 | # Change this 10 | SITE = "https://your.looker.com" 11 | 12 | 13 | class LocustUserBehavior(TaskSet): 14 | 15 | def on_start(self): 16 | self.login() 17 | 18 | def on_stop(self): 19 | self.logout() 20 | 21 | def login(self): 22 | self.client.get(SITE + "/login/email") 23 | WebDriverWait(self.client, 10).until( 24 | EC.element_to_be_clickable((By.XPATH, "//input[@name='remember']")) 25 | ) 26 | box = self.client.find_element_by_xpath("//input[@name='remember']") 27 | box.click() 28 | # These environment variables are provided by Kubernetes secrets 29 | user_entry = os.getenv("USERNAME") 30 | pass_entry = os.getenv("PASS") 31 | username = self.client.find_element_by_id("login-email") 32 | pw = self.client.find_element_by_id("login-password") 33 | username.clear() 34 | username.send_keys(user_entry.strip()) 35 | pw.clear() 36 | pw.send_keys(pass_entry.strip()) 37 | self.client.find_element_by_id("login-submit").click() 38 | 39 | def logout(self): 40 | print("stopping session") 41 | 42 | def open(self, content_id, stem='dashboards'): 43 | """ 44 | The script below is the appropriate way to detect when content has 45 | finished rendering in Looker. The injected Javascript makes Looker 46 | create a DOM element when finished, which Locust uses Selenium to 47 | detect and wait for. 48 | """ 49 | script = """ 50 | window.awaitPerformanceObservation("rendered").then(function() { 51 | var render = document.createElement("div"); 52 | render.id = "finished"; 53 | document.body.appendChild(render); 54 | });""" 55 | 56 | try: 57 | self.client.get(f"{SITE}/embed/{stem}/{content_id}") 58 | self.client.execute_script(script) 59 | self.client.wait.until( 60 | EC.presence_of_element_located( 61 | (By.ID, "finished") 62 | ) 63 | ) 64 | except TimeoutException: 65 | print("hit timeout") 66 | 67 | """ 68 | Each of these methods defines a load testing task to run randomly. 69 | The decorator @task(n) indicates the relative weight of the task 70 | versus all other tasks. If you decorate a method with @task(5) 71 | it will be five times more likely to run than a method decorated 72 | with @task(1). 73 | 74 | The name of the methods don't matter, but they do need to be unique. 75 | This is why there's a sequential ID appended to each method name. 76 | 77 | The tasks below result in one of about 27 different pieces of content 78 | being randomly loaded each time a Locust user executes this TaskSet. 79 | 80 | """ 81 | 82 | # Operations 83 | @task(1) 84 | def dashboard_1(self): 85 | id = "683" 86 | self.client.timed_event_for_locust( 87 | # The first and second parameters are 88 | # used to tag and identify tasks 89 | "Operations", f"dashboard {id}", 90 | self.open, content_id=id 91 | ) 92 | 93 | @task(1) 94 | def dashboard_2(self): 95 | id = "729" 96 | self.client.timed_event_for_locust( 97 | "Operations", f"dashboard {id}", 98 | # stem can be used to override the default 99 | # 'dashboards' route in the self.open method 100 | self.open, content_id=id, stem='dashboards-next' 101 | ) 102 | 103 | @task(1) 104 | def dashboard_3(self): 105 | id = "927" 106 | self.client.timed_event_for_locust( 107 | "Operations", f"dashboard {id}", 108 | self.open, content_id=id, stem='dashboards-next' 109 | ) 110 | 111 | @task(1) 112 | def dashboard_4(self): 113 | id = "623" 114 | self.client.timed_event_for_locust( 115 | "Operations", f"dashboard {id}", 116 | self.open, content_id=id 117 | ) 118 | 119 | @task(1) 120 | def dashboard_5(self): 121 | id = "797" 122 | self.client.timed_event_for_locust( 123 | "Operations", f"dashboard {id}", 124 | self.open, content_id=id, stem='dashboards-next' 125 | ) 126 | 127 | @task(1) 128 | def dashboard_6(self): 129 | id = "858" 130 | self.client.timed_event_for_locust( 131 | "Operations", f"dashboard {id}", 132 | self.open, content_id=id, stem='dashboards-next' 133 | ) 134 | 135 | @task(1) 136 | def dashboard_7(self): 137 | id = "944" 138 | self.client.timed_event_for_locust( 139 | "Operations", f"dashboard {id}", 140 | self.open, content_id=id 141 | ) 142 | 143 | @task(1) 144 | def dashboard_8(self): 145 | id = "156" 146 | self.client.timed_event_for_locust( 147 | "Operations", f"dashboard {id}", 148 | self.open, content_id=id 149 | ) 150 | 151 | @task(1) 152 | def dashboard_9(self): 153 | id = "943" 154 | self.client.timed_event_for_locust( 155 | "Operations", f"dashboard {id}", 156 | self.open, content_id=id, stem='dashboards-next' 157 | ) 158 | 159 | @task(1) 160 | def dashboard_10(self): 161 | id = "1014" 162 | self.client.timed_event_for_locust( 163 | "Operations", f"dashboard {id}", 164 | self.open, content_id=id, stem='dashboards-next' 165 | ) 166 | 167 | @task(1) 168 | def dashboard_11(self): 169 | id = "702" 170 | self.client.timed_event_for_locust( 171 | "Operations", f"dashboard {id}", 172 | self.open, content_id=id 173 | ) 174 | 175 | @task(1) 176 | def dashboard_12(self): 177 | id = "517" 178 | self.client.timed_event_for_locust( 179 | "Operations", f"dashboard {id}", 180 | self.open, content_id=id 181 | ) 182 | 183 | @task(1) 184 | def dashboard_13(self): 185 | id = "693" 186 | self.client.timed_event_for_locust( 187 | "Operations", f"dashboard {id}", 188 | self.open, content_id=id 189 | ) 190 | 191 | # Finance 192 | @task(1) 193 | def dashboard_14(self): 194 | id = "1180" 195 | self.client.timed_event_for_locust( 196 | "Finance", f"dashboard {id}", 197 | self.open, content_id=id 198 | ) 199 | 200 | @task(1) 201 | def look_1(self): 202 | id = "5818" 203 | self.client.timed_event_for_locust( 204 | "Finance", f"look {id}", 205 | self.open, content_id=id, stem='looks' 206 | ) 207 | 208 | @task(1) 209 | def look_2(self): 210 | id = "6448" 211 | self.client.timed_event_for_locust( 212 | "Finance", f"look {id}", 213 | self.open, content_id=id, stem='looks' 214 | ) 215 | 216 | @task(1) 217 | def look_3(self): 218 | id = "6592" 219 | self.client.timed_event_for_locust( 220 | "Finance", f"look {id}", 221 | self.open, content_id=id, stem='looks' 222 | ) 223 | 224 | @task(1) 225 | def look_4(self): 226 | id = "5817" 227 | self.client.timed_event_for_locust( 228 | "Finance", f"look {id}", 229 | self.open, content_id=id, stem='looks' 230 | ) 231 | 232 | @task(1) 233 | def look_5(self): 234 | id = "4488" 235 | self.client.timed_event_for_locust( 236 | "Finance", f"look {id}", 237 | self.open, content_id=id, stem='looks' 238 | ) 239 | 240 | @task(1) 241 | def look_6(self): 242 | id = "2857" 243 | self.client.timed_event_for_locust( 244 | "Finance", f"look {id}", 245 | self.open, content_id=id, stem='looks' 246 | ) 247 | 248 | # Marketing 249 | @task(1) 250 | def dashboard_15(self): 251 | id = "1114" 252 | self.client.timed_event_for_locust( 253 | "Marketing", f"dashboard {id}", 254 | self.open, content_id=id 255 | ) 256 | 257 | @task(1) 258 | def dashboard_16(self): 259 | id = "1115" 260 | self.client.timed_event_for_locust( 261 | "Marketing", f"dashboard {id}", 262 | self.open, content_id=id 263 | ) 264 | 265 | @task(1) 266 | def dashboard_17(self): 267 | id = "1126" 268 | self.client.timed_event_for_locust( 269 | "Marketing", f"dashboard {id}", 270 | self.open, content_id=id 271 | ) 272 | 273 | @task(1) 274 | def dashboard_18(self): 275 | id = "1119" 276 | self.client.timed_event_for_locust( 277 | "Marketing", f"dashboard {id}", 278 | self.open, content_id=id 279 | ) 280 | 281 | @task(1) 282 | def dashboard_19(self): 283 | id = "975" 284 | self.client.timed_event_for_locust( 285 | "Marketing", f"dashboard {id}", 286 | self.open, content_id=id 287 | ) 288 | 289 | # HR 290 | @task(1) 291 | def dashboard_20(self): 292 | id = "1087" 293 | self.client.timed_event_for_locust( 294 | "HR", f"dashboard {id}", 295 | self.open, content_id=id 296 | ) 297 | 298 | @task(1) 299 | def dashboard_21(self): 300 | id = "1176" 301 | self.client.timed_event_for_locust( 302 | "HR", f"dashboard {id}", 303 | self.open, content_id=id 304 | ) 305 | 306 | 307 | class LocustUser(HeadlessChromeLocust): 308 | host = "dashboard load test" 309 | timeout = 30 # in seconds in waitUntil thingies 310 | wait_time = between(2, 5) 311 | screen_width = 1200 312 | screen_height = 600 313 | task_set = LocustUserBehavior 314 | 315 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Introduction 2 | 3 | NFO is a framework for load testing Looker instances. It is designed to be easy to set up, easy to 4 | use, and approachable for just about anybody who wants to answer questions about how their infrastructure is performing. 5 | 6 | NFO is designed to perform either API-based or Browser-based load testing via distributed Kubernetes clusters in the 7 | cloud. 8 | 9 | NFO is a Python application - it makes use of the battle-tested [Locust.io](https://locust.io/) framework and adds 10 | the ability to run browser-based tests in a containerized/orchestrated environment (i.e. kubernetes). 11 | 12 | ## Status and Support 13 | 14 | NFO is NOT supported or warranteed by Looker in any way. Please do not contact Looker support for issues with NFO. 15 | 16 | ## Why browser-based tests? 17 | 18 | Browser-based load testing is a relatively new concept - in the past the expense of running enough browsers to 19 | stress-test an instance was cost-prohibitive. This challenge has been mitigated by the economies of scale that cloud 20 | computing provides. 21 | 22 | Browser tests offer several clear advantages. First, the writing of tests is significantly easier - simply use browser 23 | automation tools like selenium to dictate what you want to happen in the browser - no need to simulate the same process 24 | with dozens (if not hundreds) of API calls. For example, a Looker dashboard load is comprised of many many different API 25 | calls... but with browser based testing you simply load the dashboard URL and that's it. 26 | 27 | Second, there are some elements of Looker performance that cannot be captured by API tests. For example, loading a 28 | dashboard requires the final graphics be rendered in the page. Browser-based tests can capture this time. 29 | 30 | There is a trade-off - while cloud infrastructure makes browser-based testing affordable it is still more expensive than 31 | API-based testing. If you can frame your tests as pure http/API calls then you will be able to generate far more 32 | simulated traffic at a much lower price. NFO can handle both types of tests (and combinations of them within the same 33 | test script!) 34 | 35 | ## A note on scaling 36 | 37 | Running headless browsers is a CPU-bound process. For this reason, if you are trying to time dashboard load times with 38 | Selenium I strongly recommend using CPU-optimized machine types for your nodes. The example below uses the CPU-optimized 39 | C2 machine types for demonstration purposes. A reading of the Kubernetes deployment config files reveals that the worker 40 | pods request 1 core. A good rule of thumb is each worker can simulate 2 real browsers with 1 core, so if you wanted to 41 | simulate 20 browsers you will need approximately >10 cores in your cluster. (slightly more to handle some overhead - 42 | e.g. The master pod itself as well as Prometheus and Grafana if you want the dashboards) Attempting to run workers with 43 | less CPU will result in degraded dashboard loading performance, leading to incorrect test results, as well as risk of 44 | pod eviction. 45 | 46 | One more note: For these tests, one browser does not equal one user - each browser can make a new dashboard request 47 | every second or two, meaning one browser can simulate the traffic of several human users. If you see an RPS value of 20 48 | during your tests, that means 20 dashboard loads per second, or 1200/minute. Assuming a person spends about 30 seconds 49 | on a dashboard this is equivalent to approximately 600 concurrent users. 50 | 51 | ## Prerequisites 52 | 53 | First, you will need access to GCP and have Editor access to a GCP Project 54 | 55 | You will need a working version of python 3.8+. I would recommend making use of [pyenv](https://github.com/pyenv/pyenv) to manage your Python 56 | installations. 57 | 58 | For the moment you will need to use developer installation workflows (this will change soon). This means you will need 59 | [poetry](https://python-poetry.org/docs/) to handle the installation. 60 | 61 | You will also need [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/), the command line tool for interacting with kubernetes 62 | 63 | 64 | Finally, in order to access your NFO instance via the web you will need to own or have control of a registered domain. You should 65 | have the ability to create an A-Record from that domain's DNS. 66 | 67 | ## Before you begin 68 | 69 | The following steps need to be completed before you begin to deploy the load tester. They should only need to be done 70 | one time per project - during subsequent deployments you won't need to repeat these steps (unless you delete any of the 71 | assets of course) 72 | 73 | ### From the GCP Console 74 | 75 | 1. Create a suitable GCP Project. I recommend creating a new unique project for the load testing tool. We don’t want to run the risk of trampling other projects you may be working on. 76 | 2. Ensure the correct APIs have been enabled. From the GCP console open a [cloud shell](https://cloud.google.com/shell) and run the following command: 77 | 78 | $ gcloud services enable \ 79 | cloudbuild.googleapis.com \ 80 | compute.googleapis.com \ 81 | container.googleapis.com \ 82 | containeranalysis.googleapis.com \ 83 | containerregistry.googleapis.com \ 84 | iap.googleapis.com 85 | 86 | 3. Create a service account in your new project: 87 | * Navigate to IAM-Admin -> Service Accounts, click Create Service Account at top of page. 88 | * Follow the instructions to create a service account: 89 | * On the second page when prompted for roles you can give it Project Editor. 90 | * On the third page you do not need to grant any user access to the Service Account. 91 | * Back on the main page find your new service account and under the “Actions” menu choose “Create Key”: 92 | * Select JSON key and a credentials json file will be downloaded to your system. 93 | 94 | > ⚠ **WARNING: This file should be considered sensitive and should be secured appropriately.** 95 | 96 | > **Note:** These next steps are only required if you plan on using External Mode to access the load tester via the web 97 | 98 | 4. Assign IAP WebApp User Permissions to yourself: 99 | * Navigate to IAM-Admin -> IAM 100 | * Find yourself in the list of users and accounts (i.e. the email address you want to use to log in to the tool). Click the Edit icon on the right. 101 | * Click ‘Add Another Role’ and select ‘IAP-secured Web App User` 102 | 5. Create your OAuth Consent Page: 103 | * Navigate to API & Services -> Oauth Consent Screen. Create an app: 104 | * Set the type to Internal (unless you need to share permissions external to your org) 105 | * Enter an App Name, User Support Email and Developer Contact Information. 106 | * The next page should be Scopes - do not fill in anything. 107 | 6. Create Oauth Credentials: 108 | * Navigate to API & Services -> Credentials. 109 | * Click Create Credentials. 110 | * Select Oauth Client Id. 111 | * For Application Type, select Web Application. 112 | * Add a name and click Create. 113 | * You will find your Client ID and Client Secret in the upper right corner of the next page. Copy them somewhere - we’ll need them in a minute. 114 | * On this same page add an Authorized Redirect URI using the following template (replace `{{CLIENT_ID}}` with your new Client ID): `https://iap.googleapis.com/v1/oauth/clientIds/{{CLIENT_ID}}:handleRedirect` 115 | 116 | ### Clone The Repo 117 | 118 | In your development environment, clone the load testing repo: 119 | 120 | $ git clone https://github.com/looker-open-source/looker-load-testing.git 121 | $ cd looker-load-testing 122 | 123 | ### Install Python libraries 124 | 125 | From the project root directory use poetry to install the required libraries. This will also create a virtual 126 | environment for you. 127 | 128 | $ poetry install 129 | 130 | After the install completes you can access this new virtual environment with the following command: 131 | 132 | $ poetry shell 133 | 134 | 135 | ## Deploy The Load Tester 136 | 137 | ### Write your test manifest 138 | 139 | Navigate to the `locust_test_scripts` directory and create your test script. Documentation for standard http tests can be found 140 | [here](https://docs.locust.io/en/0.14.6/writing-a-locustfile.html) 141 | 142 | Examples for browser-based tests can be found in `locust_test_scripts`. 143 | 144 | You will need to pass the relevant script name into the config file - see below for more details. 145 | 146 | > The example `defaut_dashboard_loadtest` outlines a standard dashboard rendering performance test. If you want to use this with 147 | > your own instance, near the top of the file you will want to modify the `DASH_ID` variables to match the Looker instance 148 | > you are testing and the relevant dashboard id. Different testing goals will require specific test code - Locust is flexible enough 149 | > to handle just about any kind of test you can think of! 150 | 151 | ### Copy service account file to credentials directory 152 | 153 | In order for NFO to authenticate to GCP correctly you must copy the service account json you created above to the 154 | `credentials` directory. You will refer to this file in the config file you create next. 155 | 156 | ### Set Config Parameters 157 | 158 | Navigate to the configs directory and create a yaml file called ‘config.yaml’. You’ll need to add entries for the following items: 159 | 160 | * **gke_cluster** 161 | - **gcp_project_id**: The project ID of your GCP project 162 | - **gcp_zone**: The GCP zone 163 | - **gcp_cluster_node_count**: How many nodes should be included in the load test cluster 164 | - **gcp_cluster_machine_type**: What compute instance machine type should be used? (Almost certainly a C2 type instance) 165 | - **gcp_service_account_file**: The name of the service account file you generated from GCP. Just the file name, not 166 | the path 167 | * **loadtester** 168 | - **loadtest_name**: A unique identifier for your load test 169 | - **loadtest_step_load**: ("true"|"false") Should locust run in [step mode](https://docs.locust.io/en/0.14.6/running-locust-in-step-load-mode.html) 170 | - **loadtest_worker_count**: How many workers should be created 171 | - **loadtest_script_name**: The name of the script that contains your test logic. Only include the script's file name, not the rest of the path 172 | * **looker_credentials** 173 | - **looker_host**: The URL of the Looker instance you are testing 174 | - **looker_user**: (Optional) The username of the Looker instance you are testing 175 | - **looker_pass**: (Optional) The password of the Looker instance you are testing 176 | - **looker_api_client_id**: (Optional) The API client_id of the Looker instance you are testing 177 | - **looker_api_client_secret**: (Optional) The API client_secret of the Looker instance you are testing 178 | * **external** 179 | - **gcp_oauth_client_id**: (External Mode) The OAuth Client ID you generated earlier 180 | - **gcp_oauth_client_secret**: (External Mode) The OAuth Client Secret you generated earlier 181 | - **loadtest_dns_domain**: The DNS domain/subdomain name you want to use to access the NFO resources 182 | 183 | Your config may look something like this: 184 | 185 | ``` 186 | gke_cluster: 187 | gcp_project_id: my-gcp-project 188 | gcp_zone: us-central1-c 189 | gcp_cluster_node_count: 3 190 | gcp_cluster_machine_type: c2-standard-8 191 | gcp_service_account_file: my-service-account-file.json 192 | loadtester: 193 | loadtest_name: demo-loadtest 194 | loadtest_step_load: "true" 195 | loadtest_worker_count: 20 196 | loadtest_script_name: default_dashboard_loadtest.py 197 | looker_credentials: 198 | looker_host: https://looker.company.com 199 | looker_user: me@company.com 200 | looker_pass: abc123fakepassword 201 | external: 202 | gcp_oauth_client_id: abc123.apps.googleusercontent.com 203 | gcp_oauth_client_secret: 789xzyfakeclient 204 | loadtest_dns_domain: py-loadtest.colinpistell.com 205 | ``` 206 | 207 | > ⚠ Warning: This config contains sensitive information, so protect this file like any other credentials. 208 | 209 | ### Deploy! 210 | 211 | Navigate to the root directory and kick off the deployment! 212 | 213 | $ nfo setup --config-file config.yaml --external 214 | 215 | > ★ Tip: The script will take around 5 minutes to complete depending on what kind of instances it’s creating. 216 | 217 | When the script concludes it will output some final instructions. If you've chosen to run in external mode you will need 218 | to set up a DNS A Record for the printed IP address and URL. 219 | 220 | Some additional instructions will be printed in case you wish to port-forward the locust services for immediate access. 221 | If you're running in external mode the google-managed SSL certificate will take 15-20 minutes to provision, but you can 222 | port-forward immediately. See the [kubernetes documentation](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) for more details. 223 | 224 | 225 | ### Updating the test 226 | 227 | Since the test script is a part of the container you build and deploy any updates to the test script will require 228 | building and deploying a new container. This process has been automated with an `update` command. Make your required 229 | changes to the test script and then run the following command: 230 | 231 | $ nfo update test --config-file config.yaml --tag 232 | 233 | This will rebuild the container and execute the correct steps to update the kubernetes deployment. These changes will be 234 | available immediately upon completion of the command - no need to redeploy the ingress or wait for DNS this time around! 235 | 236 | > Note: You must provide a unique tag to trigger a rebuild - attempting to use the same tag will result in an error. 237 | > Consider using a tag that includes a version number. When you first deploy the load tester it automatically creates a 238 | > tag of 'v1' so one good option is to simply increment the number, e.g. 'v2', 'v3', etc. 239 | 240 | ### Updating the config 241 | 242 | If your updates involve changes to just the config you can make use of the following command: 243 | 244 | $ nfo update config --config-file config.yaml 245 | 246 | This will redeploy the master/worker deployments with the updated config - this is even faster than the test update 247 | command since there's no need to build a new container image! 248 | 249 | 250 | #### Accessing the UI via the web 251 | 252 | For the purposes of an example, let’s say the `load_test_dns_domain` parameter in your `config.yaml` was set to `my-loadtest.company.com`. Once everything has some time to bake 253 | you will be able to access your load tester at `https://locust.my-loadtest.company.com`. 254 | 255 | 256 | ### Scaling 257 | 258 | Scaling up the number of simulated users will require an increase in the number of Locust 259 | worker pods. To increase the number of pods deployed by the deployment, Kubernetes offers the ability to resize 260 | deployments without redeploying them. This can be done by editing the `loadtest_worker_count` field in the config 261 | file and triggering a config update (see above). You can also make use of imperative `kubectl` commands. For 262 | example, the following command scales the pool of Locust worker pods to 20: 263 | 264 | $ kubectl scale deployment/lw-pod --replicas=20 265 | 266 | ### Monitoring 267 | 268 | In addition to the locust interface itself, NFO makes available a grafana instance with a pre-configured dashboard. You 269 | can access this at `https://grafana.my-loadtest.company.com` (following the example from above - make sure you use your 270 | proper domain!) if you've deployed in external mode. If you're port forwarding you can forward the `grafana` service's port 80 to access. 271 | 272 | The preconfigured dashboard includes some locust tiles as well as preset looker monitoring for looker instances running 273 | on GCP - you'll need to create a generic Google Cloud Monitoring datasource - follow [grafana's documentation](https://grafana.com/docs/grafana/latest/datasources/cloudmonitoring/) 274 | for more details. Grafana can handle just about any standard data source so feel free to modify to suit your needs! 275 | 276 | ### Multiple load tests 277 | 278 | NFO supports deploying multiple load tests at any given time. Simply create a new config yaml in your `configs` 279 | directory with your desired configuration and deploy as normal, referencing the new config file in your `--config-file` 280 | parameter! NFO will handle setting up your kubectl context for you. 281 | 282 | ## Cleaning up 283 | 284 | Once you are done load testing and exporting data you can tear down your cluster to avoid additional costs. From the 285 | root directory: 286 | 287 | $ nfo teardown --config-file config.yaml 288 | 289 | You will likely want to clean up your DNS entry as well. 290 | 291 | To kick off another test simply rerun the `nfo setup` command and you're back in business! 292 | 293 | ## Persistent Test Data 294 | 295 | By default, NFO deploys a special storage disk that is used as a persistent volume to store locust data. This disk does 296 | not get torn down with the rest of the cluster and will get re-attached when the same config file is used to deploy a 297 | new cluster. The intention of this disk is to allow for test data to "survive" cluster teardowns without the need to 298 | keep your expensive kubernetes infrastructure running. Each test config (as defined by the config yaml) will have its 299 | own disk created. 300 | 301 | Should you wish to export your Locust test data to another source (e.g. BigQuery etc.) you can make use of the 302 | [Prometheus API](https://prometheus.io/docs/prometheus/latest/querying/api/) 303 | 304 | Should you wish to remove the persistent disk during teardown you can make use of the `--all` flag in the teardown 305 | command: 306 | 307 | $ nfo teardown --config-file config.yaml --all 308 | 309 | ## Local Mode 310 | 311 | You can run locust in local mode - this may be desirable during test development for rapid iteration. You will need to 312 | make sure you have a suitable version of [Chromedriver](https://chromedriver.chromium.org/downloads) installed. This 313 | **must** match the version of Chrome you have on your system... mismatches in versions will cause errors! 314 | 315 | Once you have Chromedriver installed you can start a locust instance with the following command: 316 | 317 | $ locust -f path/to/test-script.py 318 | 319 | 320 | (replace the path with the correct path to the load test you want to run) 321 | 322 | Locust will by default be made available on localhost:8089. 323 | 324 | You will very likely need to set some environment variables in order to properly run your tests - these variables will 325 | likely be: 326 | 327 | - HOST (your looker host) 328 | - USERNAME (the username you will log in with) 329 | - PASS (the password associated with the username you're using) 330 | 331 | ## Additional Reading 332 | 333 | 1. [Locust Documentation](https://docs.locust.io/en/0.14.6/) 334 | 2. [Managed Certificates GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs) 335 | 3. [IAP with GKE](https://cloud.google.com/iap/docs/enabling-kubernetes-howto#oauth-configure) 336 | -------------------------------------------------------------------------------- /nuke_from_orbit/utils/nuke_utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import yaml 3 | import subprocess 4 | import shutil 5 | from googleapiclient.errors import HttpError 6 | from pathlib import Path 7 | from jinja2 import Template 8 | from nuke_from_orbit.utils import gke_cluster, cloud_build, kubernetes_deploy 9 | from time import sleep 10 | 11 | SCRIPT_PATH = Path(__file__).parent 12 | APPLY_COMMAND = ["kubectl", "apply", "-f"] 13 | 14 | 15 | class BColors: 16 | """Convenience class for adding colors to output""" 17 | HEADER = '\033[95m' 18 | OKBLUE = '\033[94m' 19 | OKCYAN = '\033[96m' 20 | OKGREEN = '\033[92m' 21 | WARNING = '\033[93m' 22 | FAIL = '\033[91m' 23 | ENDC = '\033[0m' 24 | BOLD = '\033[1m' 25 | UNDERLINE = '\033[4m' 26 | 27 | 28 | class MissingRequiredArgsError(Exception): 29 | """Exception raised if user config is missing any required arguments""" 30 | def __init__(self, missing_args, message="Missing required args! Missing args:"): 31 | self.missing_args = missing_args 32 | self.message = message 33 | super().__init__(self.message) 34 | 35 | def __str__(self): 36 | return f"{self.message} {self.missing_args}" 37 | 38 | 39 | class TooManyWorkersError(Exception): 40 | """Exception raised if too many workers are requested for the underlying infrastrucure 41 | to handle. 42 | """ 43 | def __init__(self, requested_workers, max_workers, message="Cluster worker capacity exceeded!"): 44 | self.requested_workers = requested_workers 45 | self.max_workers = max_workers 46 | self.message = f"{message} Requested: {self.requested_workers}. Capacity: {self.max_workers}" 47 | super().__init__(self.message) 48 | 49 | def __str__(self): 50 | return f"{BColors.FAIL}{self.message}{BColors.ENDC}" 51 | 52 | 53 | class TagMatchError(Exception): 54 | """Exception raised if the provided update tag matches the existing tag.""" 55 | 56 | def __init__(self, tag, message="Provided tag already in use!"): 57 | self.tag = tag 58 | self.message = f"{message} Tag: {tag}" 59 | super().__init__(self.message) 60 | 61 | def __str__(self): 62 | return f"{BColors.FAIL}{self.message}{BColors.ENDC}" 63 | 64 | 65 | def check_required_args(user_config, external=False): 66 | """Checks a user config dict against required args and throws an error if any are missing. 67 | Returns a 1 if all required args are present. 68 | """ 69 | 70 | # parse required args 71 | config_args_path = SCRIPT_PATH.joinpath("rendered", "config_args.yaml") 72 | with open(config_args_path) as f: 73 | config_args = yaml.safe_load(f) 74 | 75 | # validate user config against required args 76 | required_args = config_args["required_args"] 77 | if external: 78 | required_args.extend(config_args["required_external_args"]) 79 | 80 | missing_args = set(required_args) - set(user_config) 81 | 82 | if missing_args: 83 | raise MissingRequiredArgsError(missing_args) 84 | 85 | # warn if any optional args are missing 86 | optional_args = config_args["optional_args"] 87 | missing_optional_args = set(optional_args) - set(user_config) 88 | if missing_optional_args: 89 | message = "Note - some optional arguments missing. This is probably fine, but you may want to double check!" 90 | print(f"{BColors.WARNING}{message} Missing args: {missing_optional_args}{BColors.ENDC}") 91 | 92 | return 1 93 | 94 | 95 | def check_worker_count(user_config): 96 | """Accepts a dict of user configs and confirms if the worker count is valid. If 97 | the specified number of workers exceeds what the cluster can handle an exception 98 | is thrown. Returns a 1 if worker count is acceptable. 99 | """ 100 | 101 | cores = int(user_config["gcp_cluster_machine_type"].split("-")[-1]) 102 | node_count = int(user_config["gcp_cluster_node_count"]) 103 | requested_workers = int(user_config["loadtest_worker_count"]) 104 | 105 | # Leave 2 cores for master and secondary service overhead 106 | max_workers = (cores * node_count) - 2 107 | 108 | if requested_workers > max_workers: 109 | raise TooManyWorkersError(requested_workers, max_workers) 110 | 111 | return 1 112 | 113 | 114 | def set_variables(config_file, image_tag="v1", external=False): 115 | """Reads the user config file and checks for required args. Image tag is then added. 116 | Config must be in yaml format. Returns the parsed config options. 117 | """ 118 | 119 | # parse and flatten user config 120 | with open(config_file) as f: 121 | user_config = yaml.safe_load(f) 122 | 123 | flat_user_config = {} 124 | for k, v in user_config.items(): 125 | flat_user_config = {**flat_user_config, **v} 126 | 127 | # check required args 128 | check_required_args(flat_user_config, external) 129 | 130 | # check worker count 131 | check_worker_count(flat_user_config) 132 | 133 | # update config values to include image tag 134 | flat_user_config["image_tag"] = image_tag 135 | 136 | return flat_user_config 137 | 138 | 139 | def copy_test_script_to_docker(test_script): 140 | """Accepts a file name (assumed to be in the root test_scripts directory) and copies/renames it 141 | into the appropriate directory for the docker image to build. 142 | """ 143 | 144 | test_script_path = SCRIPT_PATH.parent.parent.joinpath("locust_test_scripts", test_script) 145 | target_path = SCRIPT_PATH.parent.joinpath("docker-image", "locust-tasks", "tasks.py") 146 | 147 | shutil.copy(test_script_path, target_path) 148 | 149 | 150 | def collect_kube_yaml_templates(external=False): 151 | """Assembles and returns the appropriate list of template kubernetes yamls 152 | for rendering. Returns the list of files. 153 | """ 154 | 155 | template_path = SCRIPT_PATH.joinpath("templates") 156 | 157 | external_yamls = [ 158 | template_path.joinpath("loadtest-cert.yaml"), 159 | template_path.joinpath("loadtest-ingress.yaml"), 160 | template_path.joinpath("config-default.yaml") 161 | ] 162 | 163 | yamls = [ 164 | template_path.joinpath("locust-controller.yaml"), 165 | template_path.joinpath("locust-worker-controller.yaml"), 166 | template_path.joinpath("prometheus-config.yaml"), 167 | template_path.joinpath("prometheus-controller.yaml"), 168 | template_path.joinpath("grafana-config.yaml"), 169 | template_path.joinpath("grafana-controller.yaml") 170 | ] 171 | 172 | # external first, if required... order matters a little 173 | if external: 174 | file_list = external_yamls + yamls 175 | else: 176 | file_list = yamls 177 | 178 | return file_list 179 | 180 | 181 | def render_kubernetes_templates(values_dict, files): 182 | """Accepts a dict of user config values and a list of files and then renders those 183 | files and writes them to the `rendered` directory. 184 | """ 185 | 186 | for file in files: 187 | print(f"Rendering {file}") 188 | dest_file = SCRIPT_PATH.joinpath("rendered", file.name) 189 | template = file.read_text() 190 | rendered = Template(template).render(**values_dict) 191 | 192 | with open(dest_file, "w") as f: 193 | f.write(rendered) 194 | 195 | 196 | def deploy_persistent_disk(user_config): 197 | """Accepts a dict of validated user configs and uses them to build a GCE persistent disk. 198 | First we check to see if the disk currently exists (i.e. persisted from last session) If 199 | the disk does not exist it is created. This disk will be used as a persistent volume for 200 | prometheus to retain data. 201 | """ 202 | 203 | # set variables from user config 204 | name = user_config["loadtest_name"] 205 | project = user_config["gcp_project_id"] 206 | zone = user_config["gcp_zone"] 207 | 208 | # create the compute client. we're relying on the environment variable to be set for credentials 209 | client = gke_cluster.get_compute_client() 210 | 211 | try: 212 | gke_cluster.fetch_zonal_disk(name, project, zone, client) 213 | print(f"Found persistent disk {name}. Attaching to cluster...") 214 | except HttpError: 215 | print("No existing persistent disk found. Creating...") 216 | disk_task = gke_cluster.create_zonal_disk(name, project, zone, client) 217 | 218 | running = True 219 | while running: 220 | status = gke_cluster.compute_zonal_task_status(disk_task, project, zone, client) 221 | print(f"Creating persistent disk {name}: {status}") 222 | if status == "DONE": 223 | running = False 224 | else: 225 | sleep(2) 226 | 227 | 228 | def destroy_persistent_disk(user_config): 229 | """Accepts a dict of validated user configs and uses them to destroy a GCE persistent disk. 230 | Tracks the status of the job and confirms successful deletion. 231 | """ 232 | 233 | # set variables from user config 234 | name = user_config["loadtest_name"] 235 | project = user_config["gcp_project_id"] 236 | zone = user_config["gcp_zone"] 237 | 238 | # create the compute client. we're relying on the environment variable to be set for credentials 239 | client = gke_cluster.get_compute_client() 240 | 241 | try: 242 | gke_cluster.fetch_zonal_disk(name, project, zone, client) 243 | print(f"Found persistent disk {name}. Deleting...") 244 | disk_task = gke_cluster.delete_zonal_disk(name, project, zone, client) 245 | 246 | running = True 247 | while running: 248 | status = gke_cluster.compute_zonal_task_status(disk_task, project, zone, client) 249 | print(f"Deleting persistent disk {name}: {status}") 250 | if status == "DONE": 251 | running = False 252 | else: 253 | sleep(2) 254 | except HttpError: 255 | print("No persistent disk exists. Moving on!") 256 | 257 | 258 | def deploy_ip_address(user_config): 259 | """Accepts a dict of validated user configs and uses them to deploy a global ip 260 | address that is used for the gke ingress controller (if appropriate). 261 | """ 262 | 263 | # set variables from user config 264 | name = user_config["loadtest_name"] 265 | project = user_config["gcp_project_id"] 266 | 267 | # create the compute client. we're relying on the environment variable to be set for credentials 268 | client = gke_cluster.get_compute_client() 269 | 270 | address_task = gke_cluster.create_global_ip(name, project, client) 271 | 272 | running = True 273 | while running: 274 | status = gke_cluster.compute_task_status(address_task, project, client) 275 | print(f"Global IP Address {name}: {status}") 276 | if status == "DONE": 277 | running = False 278 | else: 279 | sleep(2) 280 | 281 | 282 | def get_ip_address(user_config): 283 | """Accepts a dict of validated user configs and uses them to attempt a get 284 | of the specified global ip address. If successful, returns the ip address as 285 | a string. If an exception is thrown it will return False. 286 | """ 287 | 288 | # set variables from user config 289 | name = user_config["loadtest_name"] 290 | project = user_config["gcp_project_id"] 291 | 292 | # create the compute client. we're relying on the environment variable to be set for credentials 293 | client = gke_cluster.get_compute_client() 294 | 295 | try: 296 | ip = gke_cluster.fetch_ip_address(name, project, client) 297 | return ip 298 | except HttpError: 299 | return False 300 | 301 | 302 | def destroy_ip_address(user_config): 303 | """Accepts a dict of validated user configs and uses them to destroy a global 304 | ip address. Tracks the status of the job and confirms successful deletion. 305 | """ 306 | 307 | # set variables from user config 308 | name = user_config["loadtest_name"] 309 | project = user_config["gcp_project_id"] 310 | 311 | # create the compute client. we're relying on the environment to be set for credentials 312 | client = gke_cluster.get_compute_client() 313 | 314 | address_delete_task = gke_cluster.delete_global_ip(name, project, client) 315 | 316 | running = True 317 | while running: 318 | status = gke_cluster.compute_task_status(address_delete_task, project, client) 319 | print(f"Delete Global IP Address {name}: {status}") 320 | if status == "DONE": 321 | running = False 322 | else: 323 | sleep(2) 324 | 325 | 326 | def deploy_gke(user_config): 327 | """Accepts a dict of validated user configs and uses them to configure and deploy 328 | a GKE cluster. Awaits the results and, upon success, configures the kubeconfig file 329 | for kubernetes API validation. 330 | """ 331 | 332 | # set variables from user config 333 | name = user_config["loadtest_name"] 334 | project = user_config["gcp_project_id"] 335 | zone = user_config["gcp_zone"] 336 | node_count = user_config["gcp_cluster_node_count"] 337 | machine_type = user_config["gcp_cluster_machine_type"] 338 | 339 | # create the gke client. we're relying on the environment variable to be set for credentials 340 | client = gke_cluster.get_gke_client() 341 | 342 | gke_task = gke_cluster.setup_gke_cluster(name, project, zone, node_count, machine_type, client) 343 | 344 | running = True 345 | while running: 346 | status = gke_cluster.gke_task_status(gke_task, project, zone, client) 347 | print(f"GKE Status {name}: {status.status.name}. {status.detail}") 348 | if status.status.name == "DONE": 349 | running = False 350 | else: 351 | sleep(2) 352 | 353 | # create entry for kubeconfig file 354 | gke_cluster.setup_cluster_auth_file(name, project, zone, client) 355 | 356 | 357 | def destroy_gke(user_config): 358 | """Accepts a dict of validated user configs and uses it to destroy the specified 359 | GKE cluster. Awaits the results and confirms successful deletion. 360 | """ 361 | 362 | # set variables from user config 363 | name = user_config["loadtest_name"] 364 | project = user_config["gcp_project_id"] 365 | zone = user_config["gcp_zone"] 366 | 367 | # create the gke client. we're relying on the environment variable to be set for credentials 368 | client = gke_cluster.get_gke_client() 369 | 370 | gke_delete_task = gke_cluster.delete_gke_cluster(name, project, zone, client) 371 | 372 | running = True 373 | while running: 374 | status = gke_cluster.gke_task_status(gke_delete_task, project, zone, client) 375 | print(f"GKE Delete Status {name}: {status.status.name}. {status.detail}") 376 | if status.status.name == "DONE": 377 | running = False 378 | else: 379 | sleep(2) 380 | 381 | # delete entry in kubeconfig file 382 | gke_cluster.teardown_cluster_auth_file(name, project, zone) 383 | 384 | 385 | def set_kubernetes_context(user_config): 386 | """Sets the kubernetes context to the provided name.""" 387 | 388 | # set variables from user config 389 | name = user_config["loadtest_name"] 390 | project = user_config["gcp_project_id"] 391 | zone = user_config["gcp_zone"] 392 | 393 | context_name = f"gke_{project}-{zone}-{name}" 394 | 395 | set_context_command = [ 396 | "kubectl", 397 | "config", 398 | "use-context", 399 | context_name 400 | ] 401 | 402 | subprocess.run(set_context_command) 403 | 404 | 405 | def deploy_test_container_image(user_config): 406 | """Accepts a dict of validated user configs and uses them to configure and send 407 | a cloud build job that copies the test script into the docker directory, creates 408 | the load test container image and uploads it to your GCP project's Container Registry. 409 | """ 410 | 411 | # set variables from user config 412 | name = user_config["loadtest_name"] 413 | project = user_config["gcp_project_id"] 414 | image_tag = user_config["image_tag"] 415 | test_script = user_config["loadtest_script_name"] 416 | 417 | # copy test script into docker directory in prep for building 418 | copy_test_script_to_docker(test_script) 419 | 420 | # create build and storage clients 421 | build_client = cloud_build.get_build_client() 422 | storage_client = cloud_build.get_storage_client() 423 | 424 | # upload the tgz of the docker image directory to cloud storage 425 | bucket, blob = cloud_build.upload_source(project, storage_client) 426 | 427 | # trigger the build 428 | build_task = cloud_build.build_test_image(name, project, image_tag, bucket, blob, build_client) 429 | 430 | running = True 431 | while running: 432 | status = cloud_build.build_status(build_task, project, build_client) 433 | print(f"Cloud Build {name} {build_task}: {status}") 434 | if status == "SUCCESS": 435 | running = False 436 | else: 437 | sleep(2) 438 | 439 | 440 | def deploy_looker_secret(user_config): 441 | """Accepts a dict of validated user configs and uses them to deploy looker website 442 | secrets. Since some of these values are optional if they are not present in the user 443 | config then that secret will be gracefully skipped. 444 | """ 445 | 446 | # set host variable 447 | looker_host = user_config["looker_host"] 448 | 449 | # set variables if present 450 | looker_user = user_config.get("looker_user") 451 | looker_pass = user_config.get("looker_pass") 452 | looker_api_client_id = user_config.get("looker_api_client_id") 453 | looker_api_client_secret = user_config.get("looker_api_client_secret") 454 | 455 | # set host secret 456 | host_secret = "website-host" 457 | host_secret_value = {"host": looker_host} 458 | kubernetes_deploy.deploy_secret(host_secret, host_secret_value) 459 | 460 | # conditionally set secrets 461 | if looker_user and looker_pass: 462 | creds_secret = "website-creds" 463 | creds_secret_value = {"username": looker_user, "password": looker_pass} 464 | kubernetes_deploy.deploy_secret(creds_secret, creds_secret_value) 465 | 466 | if looker_api_client_id and looker_api_client_secret: 467 | api_secret = "api-creds" 468 | api_secret_value = {"client_id": looker_api_client_id, "client_secret": looker_api_client_secret} 469 | kubernetes_deploy.deploy_secret(api_secret, api_secret_value) 470 | 471 | 472 | def deploy_oauth_secret(user_config): 473 | """Accepts a dict of validated user configs and uses them to deploy gcp oauth 474 | secrets. These are used to set up ingress for external deployments. 475 | """ 476 | 477 | # set variables 478 | gcp_oauth_client_id = user_config["gcp_oauth_client_id"] 479 | gcp_oauth_client_secret = user_config["gcp_oauth_client_secret"] 480 | 481 | # set secrets 482 | oauth_secret = "iap-secret" 483 | oauth_secret_value = {"client_id": gcp_oauth_client_id, "client_secret": gcp_oauth_client_secret} 484 | kubernetes_deploy.deploy_secret(oauth_secret, oauth_secret_value) 485 | 486 | 487 | def compare_tags(new_tag): 488 | """Accepts a container tag and compares it to the existing tag in the locust deployment. 489 | If the tags are the same then an exception is raised. Returns 1 if the tags are distinct. 490 | """ 491 | 492 | # fetch the current lm-pod deployment info 493 | locust_deployment = kubernetes_deploy.get_deployment("lm-pod") 494 | 495 | # parse the image tag from the container 496 | image = locust_deployment.spec.template.spec.containers[0].image 497 | current_tag = image.split(":")[-1] 498 | 499 | if new_tag == current_tag: 500 | raise TagMatchError(new_tag) 501 | 502 | return 1 503 | 504 | 505 | def deploy_locust(cycle=False): 506 | """Deploys the locust services and deployments to kubernetes. If the cycle argument is 507 | set to True then the deployments will be deleted prior to deployment (to be used during 508 | update commands). 509 | """ 510 | 511 | render_path = SCRIPT_PATH.joinpath("rendered") 512 | 513 | # conditionally delete deployments 514 | if cycle: 515 | kubernetes_deploy.delete_deployment("lw-pod") 516 | kubernetes_deploy.delete_deployment("lm-pod") 517 | 518 | # roll out locust services and deployments 519 | locust_master = str(render_path.joinpath("locust-controller.yaml")) 520 | locust_worker = str(render_path.joinpath("locust-worker-controller.yaml")) 521 | 522 | wait_command = ["kubectl", "rollout", "status"] 523 | 524 | lm_command = APPLY_COMMAND + [locust_master] 525 | lm_wait_command = wait_command + ["deployment/lm-pod"] 526 | lw_command = APPLY_COMMAND + [locust_worker] 527 | lw_wait_command = wait_command + ["deployment/lw-pod"] 528 | 529 | locust_commands = [lm_command, lm_wait_command, lw_command, lw_wait_command] 530 | win_exec = ["cmd.exe", "/c"] 531 | for command in locust_commands: 532 | if os.name == "nt": 533 | command = win_exec + command 534 | subprocess.run(command) 535 | 536 | 537 | def deploy_external(): 538 | """Deploys the external services to kubernetes""" 539 | 540 | render_path = SCRIPT_PATH.joinpath("rendered") 541 | 542 | external_yamls = [ 543 | str(render_path.joinpath("loadtest-cert.yaml")), 544 | str(render_path.joinpath("loadtest-ingress.yaml")), 545 | str(render_path.joinpath("config-default.yaml")) 546 | ] 547 | 548 | win_exec = ["cmd.exe", "/c"] 549 | for yml in external_yamls: 550 | command = APPLY_COMMAND + [yml] 551 | if os.name == "nt": 552 | command = win_exec + command 553 | subprocess.run(command) 554 | 555 | 556 | def deploy_secondary(): 557 | """Deploys the secondary services (prometheus, grafana, etc.) to kubernetes""" 558 | 559 | render_path = SCRIPT_PATH.joinpath("rendered") 560 | 561 | secondary_yamls = [ 562 | str(render_path.joinpath("prometheus-config.yaml")), 563 | str(render_path.joinpath("prometheus-controller.yaml")), 564 | str(render_path.joinpath("grafana-config.yaml")), 565 | str(render_path.joinpath("grafana-controller.yaml")) 566 | ] 567 | 568 | win_exec = ["cmd.exe", "/c"] 569 | for yml in secondary_yamls: 570 | command = APPLY_COMMAND + [yml] 571 | if os.name == "nt": 572 | command = win_exec + command 573 | subprocess.run(command) 574 | -------------------------------------------------------------------------------- /tests/test_setup_command.py: -------------------------------------------------------------------------------- 1 | from nuke_from_orbit.commands import setup_commands 2 | from nuke_from_orbit.utils import nuke_utils 3 | from pathlib import Path 4 | 5 | 6 | CONFIG_DIR = Path(__file__).parent.parent.joinpath("configs") 7 | MOCK_USER_CONFIG = { 8 | "gcp_service_account_file": "mock_sa_file.json", 9 | "loadtest_dns_domain": "mockdomain.com" 10 | } 11 | 12 | 13 | def test_main_set_variables(mocker): 14 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_variables") 15 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_gke") 16 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_test_container_image") 17 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_ip_address") 18 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_persistent_disk") 19 | mocker.patch("nuke_from_orbit.utils.nuke_utils.get_ip_address") 20 | mocker.patch("nuke_from_orbit.utils.nuke_utils.collect_kube_yaml_templates") 21 | mocker.patch("nuke_from_orbit.utils.nuke_utils.render_kubernetes_templates") 22 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_kubernetes_context") 23 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_looker_secret") 24 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_oauth_secret") 25 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_external") 26 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_locust") 27 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_secondary") 28 | 29 | mock_config_path = CONFIG_DIR.joinpath("mock_config.yaml") 30 | 31 | setup_commands.main(config_file="mock_config.yaml", external=False, persistence=True) 32 | 33 | nuke_utils.set_variables.assert_called_with(mock_config_path, "v1", False) 34 | 35 | 36 | def test_main_run_threads_persistence_no_external(mocker): 37 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_variables").return_value = MOCK_USER_CONFIG 38 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_gke") 39 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_test_container_image") 40 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_ip_address") 41 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_persistent_disk") 42 | mocker.patch("nuke_from_orbit.utils.nuke_utils.get_ip_address") 43 | mocker.patch("nuke_from_orbit.utils.nuke_utils.collect_kube_yaml_templates") 44 | mocker.patch("nuke_from_orbit.utils.nuke_utils.render_kubernetes_templates") 45 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_kubernetes_context") 46 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_looker_secret") 47 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_oauth_secret") 48 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_external") 49 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_locust") 50 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_secondary") 51 | 52 | setup_commands.main(config_file="mock_config.yaml", external=False, persistence=True) 53 | 54 | nuke_utils.deploy_gke.assert_called_with(MOCK_USER_CONFIG) 55 | nuke_utils.deploy_test_container_image.assert_called_with(MOCK_USER_CONFIG) 56 | nuke_utils.deploy_persistent_disk.assert_called_with(MOCK_USER_CONFIG) 57 | # deploy ip shouldn't be called in multithread 58 | nuke_utils.deploy_ip_address.assert_not_called() 59 | 60 | 61 | def test_main_run_threads_no_persistence_no_external(mocker): 62 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_variables").return_value = MOCK_USER_CONFIG 63 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_gke") 64 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_test_container_image") 65 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_ip_address") 66 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_persistent_disk") 67 | mocker.patch("nuke_from_orbit.utils.nuke_utils.get_ip_address") 68 | mocker.patch("nuke_from_orbit.utils.nuke_utils.collect_kube_yaml_templates") 69 | mocker.patch("nuke_from_orbit.utils.nuke_utils.render_kubernetes_templates") 70 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_kubernetes_context") 71 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_looker_secret") 72 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_oauth_secret") 73 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_external") 74 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_locust") 75 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_secondary") 76 | 77 | setup_commands.main(config_file="mock_config.yaml", external=False, persistence=False) 78 | 79 | nuke_utils.deploy_gke.assert_called_with(MOCK_USER_CONFIG) 80 | nuke_utils.deploy_test_container_image.assert_called_with(MOCK_USER_CONFIG) 81 | # deploy ip and persistent disk shouldn't be called in multithread 82 | nuke_utils.deploy_ip_address.assert_not_called() 83 | nuke_utils.deploy_persistent_disk.assert_not_called() 84 | 85 | 86 | def test_main_run_threads_persistence_external(mocker): 87 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_variables").return_value = MOCK_USER_CONFIG 88 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_gke") 89 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_test_container_image") 90 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_ip_address") 91 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_persistent_disk") 92 | mocker.patch("nuke_from_orbit.utils.nuke_utils.get_ip_address") 93 | mocker.patch("nuke_from_orbit.utils.nuke_utils.collect_kube_yaml_templates") 94 | mocker.patch("nuke_from_orbit.utils.nuke_utils.render_kubernetes_templates") 95 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_kubernetes_context") 96 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_looker_secret") 97 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_oauth_secret") 98 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_external") 99 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_locust") 100 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_secondary") 101 | 102 | setup_commands.main(config_file="mock_config.yaml", external=True, persistence=True) 103 | 104 | nuke_utils.deploy_gke.assert_called_with(MOCK_USER_CONFIG) 105 | nuke_utils.deploy_test_container_image.assert_called_with(MOCK_USER_CONFIG) 106 | nuke_utils.deploy_persistent_disk.assert_called_with(MOCK_USER_CONFIG) 107 | nuke_utils.deploy_ip_address.assert_called_with(MOCK_USER_CONFIG) 108 | 109 | 110 | def test_main_run_threads_no_persistence_external(mocker): 111 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_variables").return_value = MOCK_USER_CONFIG 112 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_gke") 113 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_test_container_image") 114 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_ip_address") 115 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_persistent_disk") 116 | mocker.patch("nuke_from_orbit.utils.nuke_utils.get_ip_address") 117 | mocker.patch("nuke_from_orbit.utils.nuke_utils.collect_kube_yaml_templates") 118 | mocker.patch("nuke_from_orbit.utils.nuke_utils.render_kubernetes_templates") 119 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_kubernetes_context") 120 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_looker_secret") 121 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_oauth_secret") 122 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_external") 123 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_locust") 124 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_secondary") 125 | 126 | setup_commands.main(config_file="mock_config.yaml", external=True, persistence=False) 127 | 128 | nuke_utils.deploy_gke.assert_called_with(MOCK_USER_CONFIG) 129 | nuke_utils.deploy_test_container_image.assert_called_with(MOCK_USER_CONFIG) 130 | nuke_utils.deploy_ip_address.assert_called_with(MOCK_USER_CONFIG) 131 | # persistent disk shouldn't be called 132 | nuke_utils.deploy_persistent_disk.assert_not_called() 133 | 134 | 135 | def test_main_collect_yaml_templates_call(mocker): 136 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_variables") 137 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_gke") 138 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_test_container_image") 139 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_ip_address") 140 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_persistent_disk") 141 | mocker.patch("nuke_from_orbit.utils.nuke_utils.get_ip_address") 142 | mocker.patch("nuke_from_orbit.utils.nuke_utils.collect_kube_yaml_templates") 143 | mocker.patch("nuke_from_orbit.utils.nuke_utils.render_kubernetes_templates") 144 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_kubernetes_context") 145 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_looker_secret") 146 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_oauth_secret") 147 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_external") 148 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_locust") 149 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_secondary") 150 | 151 | setup_commands.main(config_file="mock_config.yaml", external=True, persistence=False) 152 | 153 | nuke_utils.collect_kube_yaml_templates.assert_called_with(True) 154 | 155 | 156 | def test_main_collect_yaml_templates_call_no_external(mocker): 157 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_variables") 158 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_gke") 159 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_test_container_image") 160 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_ip_address") 161 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_persistent_disk") 162 | mocker.patch("nuke_from_orbit.utils.nuke_utils.get_ip_address") 163 | mocker.patch("nuke_from_orbit.utils.nuke_utils.collect_kube_yaml_templates") 164 | mocker.patch("nuke_from_orbit.utils.nuke_utils.render_kubernetes_templates") 165 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_kubernetes_context") 166 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_looker_secret") 167 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_oauth_secret") 168 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_external") 169 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_locust") 170 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_secondary") 171 | 172 | setup_commands.main(config_file="mock_config.yaml", external=False, persistence=False) 173 | 174 | nuke_utils.collect_kube_yaml_templates.assert_called_with(False) 175 | 176 | 177 | def test_main_render_templates(mocker): 178 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_variables").return_value = MOCK_USER_CONFIG 179 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_gke") 180 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_test_container_image") 181 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_ip_address") 182 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_persistent_disk") 183 | mocker.patch("nuke_from_orbit.utils.nuke_utils.get_ip_address") 184 | mocker.patch("nuke_from_orbit.utils.nuke_utils.collect_kube_yaml_templates").return_value = ["mock_file_list"] 185 | mocker.patch("nuke_from_orbit.utils.nuke_utils.render_kubernetes_templates") 186 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_kubernetes_context") 187 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_looker_secret") 188 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_oauth_secret") 189 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_external") 190 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_locust") 191 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_secondary") 192 | 193 | setup_commands.main(config_file="mock_config.yaml", external=True, persistence=False) 194 | 195 | nuke_utils.render_kubernetes_templates.assert_called_with(MOCK_USER_CONFIG, ["mock_file_list"]) 196 | 197 | 198 | def test_main_set_k8s_context(mocker): 199 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_variables").return_value = MOCK_USER_CONFIG 200 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_gke") 201 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_test_container_image") 202 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_ip_address") 203 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_persistent_disk") 204 | mocker.patch("nuke_from_orbit.utils.nuke_utils.get_ip_address") 205 | mocker.patch("nuke_from_orbit.utils.nuke_utils.collect_kube_yaml_templates") 206 | mocker.patch("nuke_from_orbit.utils.nuke_utils.render_kubernetes_templates") 207 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_kubernetes_context") 208 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_looker_secret") 209 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_oauth_secret") 210 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_external") 211 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_locust") 212 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_secondary") 213 | 214 | setup_commands.main(config_file="mock_config.yaml", external=True, persistence=False) 215 | 216 | nuke_utils.set_kubernetes_context.assert_called_with(MOCK_USER_CONFIG) 217 | 218 | 219 | def test_main_deploy_looker_secret(mocker): 220 | 221 | parent_mock = mocker.Mock() 222 | 223 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_variables").return_value = MOCK_USER_CONFIG 224 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_gke") 225 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_test_container_image") 226 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_ip_address") 227 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_persistent_disk") 228 | mocker.patch("nuke_from_orbit.utils.nuke_utils.get_ip_address") 229 | mocker.patch("nuke_from_orbit.utils.nuke_utils.collect_kube_yaml_templates") 230 | mocker.patch("nuke_from_orbit.utils.nuke_utils.render_kubernetes_templates") 231 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_oauth_secret") 232 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_external") 233 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_locust") 234 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_secondary") 235 | 236 | context_mock = mocker.patch("nuke_from_orbit.utils.nuke_utils.set_kubernetes_context") 237 | deploy_mock = mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_looker_secret") 238 | 239 | parent_mock.attach_mock(context_mock, "context_mock") 240 | parent_mock.attach_mock(deploy_mock, "deploy_mock") 241 | 242 | setup_commands.main(config_file="mock_config.yaml", external=True, persistence=False) 243 | 244 | # determine if context call occurs before deployment 245 | expected_call_order = [mocker.call.context_mock(MOCK_USER_CONFIG), mocker.call.deploy_mock(MOCK_USER_CONFIG)] 246 | assert parent_mock.mock_calls == expected_call_order 247 | 248 | 249 | def test_main_deploy_oauth_secret_external(mocker): 250 | 251 | parent_mock = mocker.Mock() 252 | 253 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_variables").return_value = MOCK_USER_CONFIG 254 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_gke") 255 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_test_container_image") 256 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_ip_address") 257 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_persistent_disk") 258 | mocker.patch("nuke_from_orbit.utils.nuke_utils.get_ip_address") 259 | mocker.patch("nuke_from_orbit.utils.nuke_utils.collect_kube_yaml_templates") 260 | mocker.patch("nuke_from_orbit.utils.nuke_utils.render_kubernetes_templates") 261 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_looker_secret") 262 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_external") 263 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_locust") 264 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_secondary") 265 | 266 | context_mock = mocker.patch("nuke_from_orbit.utils.nuke_utils.set_kubernetes_context") 267 | deploy_mock = mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_oauth_secret") 268 | 269 | parent_mock.attach_mock(context_mock, "context_mock") 270 | parent_mock.attach_mock(deploy_mock, "deploy_mock") 271 | 272 | setup_commands.main(config_file="mock_config.yaml", external=True, persistence=False) 273 | 274 | # determine if context call occurs before deployment 275 | expected_call_order = [mocker.call.context_mock(MOCK_USER_CONFIG), mocker.call.deploy_mock(MOCK_USER_CONFIG)] 276 | assert parent_mock.mock_calls == expected_call_order 277 | 278 | 279 | def test_main_deploy_oauth_secret_no_external(mocker): 280 | 281 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_variables").return_value = MOCK_USER_CONFIG 282 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_gke") 283 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_test_container_image") 284 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_ip_address") 285 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_persistent_disk") 286 | mocker.patch("nuke_from_orbit.utils.nuke_utils.get_ip_address") 287 | mocker.patch("nuke_from_orbit.utils.nuke_utils.collect_kube_yaml_templates") 288 | mocker.patch("nuke_from_orbit.utils.nuke_utils.render_kubernetes_templates") 289 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_kubernetes_context") 290 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_looker_secret") 291 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_oauth_secret") 292 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_external") 293 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_locust") 294 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_secondary") 295 | 296 | setup_commands.main(config_file="mock_config.yaml", external=False, persistence=False) 297 | 298 | nuke_utils.deploy_oauth_secret.assert_not_called() 299 | 300 | 301 | def test_main_deploy_external(mocker): 302 | 303 | parent_mock = mocker.Mock() 304 | 305 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_variables").return_value = MOCK_USER_CONFIG 306 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_gke") 307 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_test_container_image") 308 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_ip_address") 309 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_persistent_disk") 310 | mocker.patch("nuke_from_orbit.utils.nuke_utils.get_ip_address") 311 | mocker.patch("nuke_from_orbit.utils.nuke_utils.collect_kube_yaml_templates") 312 | mocker.patch("nuke_from_orbit.utils.nuke_utils.render_kubernetes_templates") 313 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_looker_secret") 314 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_oauth_secret") 315 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_locust") 316 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_secondary") 317 | 318 | context_mock = mocker.patch("nuke_from_orbit.utils.nuke_utils.set_kubernetes_context") 319 | deploy_mock = mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_external") 320 | 321 | parent_mock.attach_mock(context_mock, "context_mock") 322 | parent_mock.attach_mock(deploy_mock, "deploy_mock") 323 | 324 | setup_commands.main(config_file="mock_config.yaml", external=True, persistence=False) 325 | 326 | # determine if context call occurs before deployment 327 | expected_call_order = [mocker.call.context_mock(MOCK_USER_CONFIG), mocker.call.deploy_mock()] 328 | assert parent_mock.mock_calls == expected_call_order 329 | 330 | 331 | def test_main_deploy_external_no_external(mocker): 332 | 333 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_variables").return_value = MOCK_USER_CONFIG 334 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_gke") 335 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_test_container_image") 336 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_ip_address") 337 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_persistent_disk") 338 | mocker.patch("nuke_from_orbit.utils.nuke_utils.get_ip_address") 339 | mocker.patch("nuke_from_orbit.utils.nuke_utils.collect_kube_yaml_templates") 340 | mocker.patch("nuke_from_orbit.utils.nuke_utils.render_kubernetes_templates") 341 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_kubernetes_context") 342 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_looker_secret") 343 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_oauth_secret") 344 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_external") 345 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_locust") 346 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_secondary") 347 | 348 | setup_commands.main(config_file="mock_config.yaml", external=False, persistence=False) 349 | 350 | # determine if context call occurs before deployment 351 | nuke_utils.deploy_external.assert_not_called() 352 | 353 | 354 | def test_main_deploy_locust(mocker): 355 | 356 | parent_mock = mocker.Mock() 357 | 358 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_variables").return_value = MOCK_USER_CONFIG 359 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_gke") 360 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_test_container_image") 361 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_ip_address") 362 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_persistent_disk") 363 | mocker.patch("nuke_from_orbit.utils.nuke_utils.get_ip_address") 364 | mocker.patch("nuke_from_orbit.utils.nuke_utils.collect_kube_yaml_templates") 365 | mocker.patch("nuke_from_orbit.utils.nuke_utils.render_kubernetes_templates") 366 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_looker_secret") 367 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_oauth_secret") 368 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_external") 369 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_secondary") 370 | 371 | context_mock = mocker.patch("nuke_from_orbit.utils.nuke_utils.set_kubernetes_context") 372 | deploy_mock = mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_locust") 373 | 374 | parent_mock.attach_mock(context_mock, "context_mock") 375 | parent_mock.attach_mock(deploy_mock, "deploy_mock") 376 | 377 | setup_commands.main(config_file="mock_config.yaml", external=True, persistence=False) 378 | 379 | # determine if context call occurs before deployment 380 | expected_call_order = [mocker.call.context_mock(MOCK_USER_CONFIG), mocker.call.deploy_mock()] 381 | assert parent_mock.mock_calls == expected_call_order 382 | 383 | 384 | def test_main_deploy_secondary(mocker): 385 | 386 | parent_mock = mocker.Mock() 387 | 388 | mocker.patch("nuke_from_orbit.utils.nuke_utils.set_variables").return_value = MOCK_USER_CONFIG 389 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_gke") 390 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_test_container_image") 391 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_ip_address") 392 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_persistent_disk") 393 | mocker.patch("nuke_from_orbit.utils.nuke_utils.get_ip_address") 394 | mocker.patch("nuke_from_orbit.utils.nuke_utils.collect_kube_yaml_templates") 395 | mocker.patch("nuke_from_orbit.utils.nuke_utils.render_kubernetes_templates") 396 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_looker_secret") 397 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_oauth_secret") 398 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_external") 399 | mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_locust") 400 | 401 | context_mock = mocker.patch("nuke_from_orbit.utils.nuke_utils.set_kubernetes_context") 402 | deploy_mock = mocker.patch("nuke_from_orbit.utils.nuke_utils.deploy_secondary") 403 | 404 | parent_mock.attach_mock(context_mock, "context_mock") 405 | parent_mock.attach_mock(deploy_mock, "deploy_mock") 406 | 407 | setup_commands.main(config_file="mock_config.yaml", external=True, persistence=False) 408 | 409 | # determine if context call occurs before deployment 410 | expected_call_order = [mocker.call.context_mock(MOCK_USER_CONFIG), mocker.call.deploy_mock()] 411 | assert parent_mock.mock_calls == expected_call_order 412 | -------------------------------------------------------------------------------- /nuke_from_orbit/utils/templates/grafana-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: grafana-config 6 | labels: 7 | name: grafana-config 8 | data: 9 | grafana.ini: |- 10 | [auth.anonymous] 11 | enabled = true 12 | org_role = Admin 13 | --- 14 | apiVersion: v1 15 | kind: ConfigMap 16 | metadata: 17 | name: grafana-datasource-conf 18 | labels: 19 | name: grafana-datasource-conf 20 | data: 21 | datasource.yml: |- 22 | apiVersion: 1 23 | datasources: 24 | - name: Prometheus 25 | type: prometheus 26 | access: proxy 27 | url: 'http://prom-pod:80' 28 | --- 29 | apiVersion: v1 30 | kind: ConfigMap 31 | metadata: 32 | name: grafana-dashboard-prov-conf 33 | labels: 34 | name: grafana-dashboard-prov-conf 35 | data: 36 | dashboards.yml: |- 37 | apiVersion: 1 38 | providers: 39 | - name: 'locust_dashes' 40 | orgId: 1 41 | folder: '' 42 | folderUid: '' 43 | type: file 44 | disableDeletion: false 45 | editable: true 46 | updateIntervalSeconds: 30 47 | allowUiUpdates: false 48 | options: 49 | path: /var/lib/grafana/dashboards 50 | --- 51 | apiVersion: v1 52 | kind: ConfigMap 53 | metadata: 54 | name: grafana-dashboard-json-conf 55 | labels: 56 | name: grafana-dashboard-json-conf 57 | data: 58 | dashboard.json: |- 59 | { "annotations": { "list": [ { "builtIn": 1, "datasource": "-- Grafana --", "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "limit": 100, "name": "Annotations & Alerts", "showIn": 0, "type": "dashboard" } ] }, "description": "Locust Exporter", "editable": true, "gnetId": 11985, "graphTooltip": 0, "links": [], "panels": [ { "cacheTimeout": null, "datasource": "Prometheus", "fieldConfig": { "defaults": { "custom": {}, "mappings": [ { "id": 0, "op": "=", "text": "Hatching", "type": 1, "value": "1" }, { "id": 1, "op": "=", "text": "Stop", "type": 1, "value": "0" }, { "id": 2, "op": "=", "text": "Running", "type": 1, "value": "2" } ], "nullValueMode": "connected", "thresholds": { "mode": "absolute", "steps": [ { "color": "rgba(245, 54, 54, 0.9)", "value": null }, { "color": "rgba(237, 129, 40, 0.89)", "value": 0 }, { "color": "rgba(50, 172, 45, 0.97)", "value": 2 } ] }, "unit": "none" }, "overrides": [] }, "gridPos": { "h": 4, "w": 4, "x": 0, "y": 0 }, "id": 1, "interval": null, "links": [], "maxDataPoints": 100, "options": { "colorMode": "value", "fieldOptions": { "calcs": [ "mean" ] }, "graphMode": "area", "justifyMode": "auto", "orientation": "horizontal", "reduceOptions": { "calcs": [ "last" ], "fields": "", "values": false }, "textMode": "auto" }, "pluginVersion": "7.1.0", "targets": [ { "expr": "locust_running", "interval": "", "intervalFactor": 2, "legendFormat": "", "metric": "", "refId": "A", "step": 20 } ], "title": "Locust Status", "type": "stat" }, { "cacheTimeout": null, "datasource": "Prometheus", "fieldConfig": { "defaults": { "custom": {}, "mappings": [ { "id": 0, "op": "=", "text": "N/A", "type": 1, "value": "null" } ], "nullValueMode": "connected", "thresholds": { "mode": "absolute", "steps": [ { "color": "blue", "value": null } ] }, "unit": "none" }, "overrides": [] }, "gridPos": { "h": 4, "w": 4, "x": 4, "y": 0 }, "id": 2, "interval": null, "links": [], "maxDataPoints": 100, "options": { "colorMode": "value", "fieldOptions": { "calcs": [ "mean" ] }, "graphMode": "area", "justifyMode": "auto", "orientation": "horizontal", "reduceOptions": { "calcs": [ "last" ], "fields": "", "values": false }, "textMode": "auto" }, "pluginVersion": "7.1.0", "targets": [ { "expr": "locust_users", "interval": "", "intervalFactor": 2, "legendFormat": "", "refId": "A", "step": 20 } ], "title": "Swarmed users", "type": "stat" }, { "cacheTimeout": null, "datasource": "Prometheus", "fieldConfig": { "defaults": { "custom": {}, "decimals": 2, "mappings": [ { "id": 0, "op": "=", "text": "N/A", "type": 1, "value": "null" } ], "nullValueMode": "connected", "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] }, "unit": "short" }, "overrides": [] }, "gridPos": { "h": 8, "w": 6, "x": 8, "y": 0 }, "id": 4, "interval": null, "links": [], "maxDataPoints": 100, "options": { "colorMode": "value", "fieldOptions": { "calcs": [ "mean" ] }, "graphMode": "area", "justifyMode": "auto", "orientation": "auto", "reduceOptions": { "calcs": [ "lastNotNull" ], "fields": "", "values": false }, "textMode": "auto" }, "pluginVersion": "7.1.0", "targets": [ { "expr": "locust_requests_current_rps{job=\"locust\", name=\"Aggregated\"}", "format": "time_series", "interval": "", "intervalFactor": 2, "legendFormat": "", "refId": "A", "step": 20 } ], "timeFrom": null, "timeShift": null, "title": "Current RPS", "type": "stat" }, { "datasource": "Prometheus", "fieldConfig": { "defaults": { "custom": {}, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null } ] } }, "overrides": [] }, "gridPos": { "h": 5, "w": 4, "x": 14, "y": 0 }, "id": 18, "options": { "colorMode": "value", "graphMode": "area", "justifyMode": "auto", "orientation": "auto", "reduceOptions": { "calcs": [ "last" ], "fields": "", "values": false }, "textMode": "auto" }, "pluginVersion": "7.1.0", "targets": [ { "expr": "locust_requests_num_requests{method=~\"\"}", "interval": "", "legendFormat": "", "refId": "A" } ], "timeFrom": null, "timeShift": null, "title": "Requests", "type": "stat" }, { "datasource": "Prometheus", "fieldConfig": { "defaults": { "custom": { "align": null }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 3000 } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 6, "x": 18, "y": 0 }, "id": 6, "links": [], "options": { "orientation": "auto", "reduceOptions": { "calcs": [ "mean" ], "fields": "", "values": false }, "showThresholdLabels": false, "showThresholdMarkers": true }, "pluginVersion": "7.1.0", "targets": [ { "expr": "avg(locust_requests_max_response_time)", "intervalFactor": 2, "legendFormat": "AVG MAX", "refId": "A", "step": 2 }, { "expr": "avg(locust_requests_min_response_time)", "intervalFactor": 2, "legendFormat": "AVG MIN", "metric": "", "refId": "B", "step": 2 }, { "expr": "avg(locust_requests_avg_response_time)", "intervalFactor": 2, "legendFormat": "AVG AVG", "metric": "", "refId": "C", "step": 2 }, { "expr": "avg(locust_requests_median_response_time)", "intervalFactor": 2, "legendFormat": "AVG MEDIAN", "refId": "D", "step": 2 } ], "timeFrom": null, "timeShift": null, "title": "Response Times", "type": "gauge" }, { "datasource": "Prometheus", "fieldConfig": { "defaults": { "custom": { "align": null }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "yellow", "value": 1.8 }, { "color": "red", "value": 2.0005 } ] } }, "overrides": [] }, "gridPos": { "h": 4, "w": 4, "x": 0, "y": 4 }, "id": 27, "options": { "colorMode": "value", "graphMode": "area", "justifyMode": "auto", "orientation": "auto", "reduceOptions": { "calcs": [ "last" ], "fields": "", "values": false }, "textMode": "auto" }, "pluginVersion": "7.1.0", "targets": [ { "expr": "locust_users", "interval": "", "legendFormat": "", "refId": "A" }, { "expr": "locust_slaves_count", "interval": "", "legendFormat": "", "refId": "B" } ], "timeFrom": null, "timeShift": null, "title": "Users/Worker Ratio", "transformations": [ { "id": "seriesToColumns", "options": {} }, { "id": "calculateField", "options": { "alias": "Users/Worker Pods", "binary": { "left": "locust_users{instance=\"le-pod:80\", job=\"locust\"}", "operator": "/", "reducer": "sum", "right": "locust_slaves_count{instance=\"le-pod:80\", job=\"locust\"}" }, "mode": "binary", "reduce": { "reducer": "sum" } } }, { "id": "organize", "options": { "excludeByName": { "Time": false, "locust_slaves_count{instance=\"le-pod:80\", job=\"locust\"}": true, "locust_users{instance=\"le-pod:80\", job=\"locust\"}": true }, "indexByName": {}, "renameByName": {} } } ], "type": "stat" }, { "cacheTimeout": null, "datasource": "Prometheus", "fieldConfig": { "defaults": { "custom": {}, "mappings": [ { "id": 0, "op": "=", "text": "N/A", "type": 1, "value": "" } ], "nullValueMode": "connected", "thresholds": { "mode": "absolute", "steps": [ { "color": "blue", "value": null } ] }, "unit": "none" }, "overrides": [] }, "gridPos": { "h": 4, "w": 4, "x": 4, "y": 4 }, "id": 3, "interval": null, "links": [], "maxDataPoints": 100, "options": { "colorMode": "value", "fieldOptions": { "calcs": [ "mean" ] }, "graphMode": "area", "justifyMode": "auto", "orientation": "horizontal", "reduceOptions": { "calcs": [ "lastNotNull" ], "fields": "", "values": false }, "textMode": "auto" }, "pluginVersion": "7.1.0", "targets": [ { "expr": "locust_slaves_count", "interval": "", "intervalFactor": 2, "legendFormat": "", "refId": "A", "step": 20 } ], "title": "Connected workers", "type": "stat" }, { "datasource": "Prometheus", "fieldConfig": { "defaults": { "custom": {}, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "red", "value": null } ] } }, "overrides": [] }, "gridPos": { "h": 3, "w": 4, "x": 14, "y": 5 }, "id": 19, "options": { "colorMode": "value", "graphMode": "area", "justifyMode": "auto", "orientation": "auto", "reduceOptions": { "calcs": [ "mean" ], "fields": "", "values": false }, "textMode": "auto" }, "pluginVersion": "7.1.0", "targets": [ { "expr": "locust_requests_num_failures{method=~\"\"}", "interval": "", "legendFormat": "", "refId": "A" } ], "timeFrom": null, "timeShift": null, "title": "Failures", "type": "stat" }, { "columns": [], "datasource": "Prometheus", "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fontSize": "100%", "gridPos": { "h": 6, "w": 24, "x": 0, "y": 8 }, "id": 14, "pageSize": null, "showHeader": true, "sort": { "col": 0, "desc": true }, "styles": [ { "alias": "Time", "align": "auto", "dateFormat": "YYYY-MM-DD HH:mm:ss", "pattern": "Time", "type": "hidden" }, { "alias": "Method", "align": "auto", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "mappingType": 1, "pattern": "method", "preserveFormat": false, "thresholds": [], "type": "string", "unit": "short" }, { "alias": "URL", "align": "auto", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "mappingType": 1, "pattern": "name", "thresholds": [], "type": "string", "unit": "short" }, { "alias": "MIN RT", "align": "", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 0, "mappingType": 1, "pattern": "Value #A", "thresholds": [], "type": "number", "unit": "ms" }, { "alias": "Errors", "align": "", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], "decimals": 0, "pattern": "Value #B", "thresholds": [], "type": "number", "unit": "none" }, { "alias": "MAX RT", "align": "", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 0, "mappingType": 1, "pattern": "Value #C", "thresholds": [], "type": "number", "unit": "ms" }, { "alias": "MEDIAN RT", "align": "auto", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 0, "mappingType": 1, "pattern": "Value #D", "thresholds": [], "type": "number", "unit": "ms" }, { "alias": "AVG RT", "align": "auto", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "mappingType": 1, "pattern": "Value #E", "thresholds": [], "type": "number", "unit": "ms" }, { "alias": "Errors Ratio", "align": "auto", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "mappingType": 1, "pattern": "Value #H", "thresholds": [], "type": "number", "unit": "reqps" }, { "alias": "Requests", "align": "auto", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 0, "mappingType": 1, "pattern": "Value #F", "thresholds": [], "type": "number", "unit": "none" }, { "alias": "Requests Ratio", "align": "auto", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "mappingType": 1, "pattern": "Value #G", "thresholds": [], "type": "number", "unit": "reqps" }, { "alias": "Content", "align": "auto", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "mappingType": 1, "pattern": "Value #I", "thresholds": [], "type": "number", "unit": "bytes" } ], "targets": [ { "expr": "sum(locust_requests_avg_content_length{method=~\".+\"}) by (method, name)", "format": "table", "instant": true, "interval": "", "legendFormat": "", "refId": "I" }, { "expr": "sum(locust_requests_min_response_time{method=~\".+\"}) by (method, name)", "format": "table", "instant": true, "interval": "", "legendFormat": "", "refId": "A" }, { "expr": "sum(locust_requests_max_response_time{method=~\".+\"}) by (method, name)", "format": "table", "instant": true, "interval": "", "legendFormat": "", "refId": "C" }, { "expr": "sum(locust_requests_avg_response_time{method=~\".+\"}) by (method, name)", "format": "table", "instant": true, "interval": "", "legendFormat": "", "refId": "E" }, { "expr": "sum(locust_requests_median_response_time{method=~\".+\"}) by (method, name)", "format": "table", "instant": true, "interval": "", "legendFormat": "", "refId": "D" }, { "expr": "sum(locust_requests_num_failures{method=~\".+\"}) by (method, name)", "format": "table", "instant": true, "interval": "", "legendFormat": "", "refId": "B" }, { "expr": "sum(locust_requests_current_fail_per_sec{method=~\".+\"}) by (method, name)", "format": "table", "instant": true, "interval": "", "legendFormat": "", "refId": "H" }, { "expr": "sum(locust_requests_num_requests{method=~\".+\"}) by (method, name)", "format": "table", "instant": true, "interval": "", "legendFormat": "", "refId": "F" }, { "expr": "sum(locust_requests_current_rps{method=~\".+\"}) by (method, name)", "format": "table", "instant": true, "interval": "", "legendFormat": "", "refId": "G" } ], "timeFrom": null, "timeShift": null, "title": "Endpoints", "transform": "table", "type": "table-old" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "Prometheus", "editable": true, "error": false, "fieldConfig": { "defaults": { "custom": {}, "links": [] }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 14 }, "hiddenSeries": false, "id": 7, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "connected", "paceLength": 10, "percentage": false, "pluginVersion": "7.1.0", "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [ { "alias": "fail_ratio [%]", "yaxis": 2 } ], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "rate(locust_requests_num_requests{method=~\".+\"}[1m])", "format": "time_series", "interval": "", "intervalFactor": 2, "legendFormat": " - ", "refId": "A", "step": 2 }, { "expr": " sum(rate(locust_requests_num_requests{method=~\".+\"}[1m]))", "format": "time_series", "interval": "", "intervalFactor": 1, "legendFormat": "total", "refId": "C" }, { "expr": "locust_requests_fail_ratio * 100", "format": "time_series", "interval": "", "intervalFactor": 2, "legendFormat": "fail_ratio [%]", "refId": "B", "step": 2 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Requests per endpoint / s", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "reqps", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "percent", "label": null, "logBase": 1, "max": null, "min": "0", "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "Prometheus", "decimals": 0, "editable": true, "error": false, "fieldConfig": { "defaults": { "custom": {}, "links": [] }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 12, "y": 14 }, "hiddenSeries": false, "id": 15, "interval": "", "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "connected", "paceLength": 10, "percentage": false, "pluginVersion": "7.1.0", "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [ { "alias": "AVG MAX", "yaxis": 2 }, { "alias": "locust_users{instance=\"le-pod:80\", job=\"locust\"}", "yaxis": 2 }, { "alias": "Users", "yaxis": 2 } ], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "locust_requests_current_response_time_percentile_95", "interval": "", "intervalFactor": 2, "legendFormat": "P95", "refId": "D", "step": 2 }, { "expr": "locust_requests_current_response_time_percentile_50", "interval": "", "legendFormat": "P50", "refId": "A" }, { "expr": "locust_users", "interval": "", "legendFormat": "Users", "refId": "B" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Response Times", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "decimals": 0, "format": "ms", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "decimals": 0, "format": "none", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "Google Cloud Monitoring", "fieldConfig": { "defaults": { "custom": {}, "links": [] }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 22 }, "hiddenSeries": false, "id": 23, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": false, "total": false, "values": false }, "lines": true, "linewidth": 1, "nullPointMode": "null", "percentage": false, "pluginVersion": "7.1.0", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "metricQuery": { "aliasBy": "", "alignmentPeriod": "cloud-monitoring-auto", "crossSeriesReducer": "REDUCE_MEAN", "filters": [], "groupBys": [ "resource.label.instance_id" ], "metricKind": "GAUGE", "metricType": "compute.googleapis.com/instance/cpu/utilization", "perSeriesAligner": "ALIGN_INTERPOLATE", "projectName": "jcpistell-testspace", "unit": "10^2.%", "valueType": "DOUBLE" }, "queryType": "metrics", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Looker CPU", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "percentunit", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": false } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "Google Cloud Monitoring", "fieldConfig": { "defaults": { "custom": {}, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 12, "y": 22 }, "hiddenSeries": false, "id": 25, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": false, "total": false, "values": false }, "lines": true, "linewidth": 1, "nullPointMode": "null", "percentage": false, "pluginVersion": "7.1.0", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "metricQuery": { "aliasBy": "", "alignmentPeriod": "cloud-monitoring-auto", "crossSeriesReducer": "REDUCE_NONE", "filters": [], "groupBys": [ "resource.label.instance_id" ], "metricKind": "GAUGE", "metricType": "custom.googleapis.com/looker/puma_threads", "perSeriesAligner": "ALIGN_INTERPOLATE", "projectName": "jcpistell-testspace", "valueType": "DOUBLE" }, "queryType": "metrics", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Puma Thread Count", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "$$hashKey": "object:21715", "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "$$hashKey": "object:21716", "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "Google Cloud Monitoring", "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 30 }, "hiddenSeries": false, "id": 37, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "nullPointMode": "null", "percentage": false, "pluginVersion": "7.1.0", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "metricQuery": { "aliasBy": "", "alignmentPeriod": "cloud-monitoring-auto", "crossSeriesReducer": "REDUCE_MEAN", "filters": [], "groupBys": [], "metricKind": "DELTA", "metricType": "cloudsql.googleapis.com/database/disk/write_ops_count", "perSeriesAligner": "ALIGN_DELTA", "projectName": "jcpistell-testspace", "unit": "1", "valueType": "INT64" }, "queryType": "metrics", "refId": "A" }, { "metricQuery": { "aliasBy": "", "alignmentPeriod": "cloud-monitoring-auto", "crossSeriesReducer": "REDUCE_MEAN", "filters": [], "groupBys": [], "metricKind": "DELTA", "metricType": "cloudsql.googleapis.com/database/disk/read_ops_count", "perSeriesAligner": "ALIGN_DELTA", "projectName": "jcpistell-testspace", "unit": "1", "valueType": "INT64" }, "queryType": "metrics", "refId": "B" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Database IOPS", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "$$hashKey": "object:19890", "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "$$hashKey": "object:19891", "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "Google Cloud Monitoring", "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 12, "y": 30 }, "hiddenSeries": false, "id": 33, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": false, "total": false, "values": false }, "lines": true, "linewidth": 1, "nullPointMode": "null", "percentage": false, "pluginVersion": "7.1.0", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "metricQuery": { "aliasBy": "", "alignmentPeriod": "cloud-monitoring-auto", "crossSeriesReducer": "REDUCE_MEAN", "filters": [], "groupBys": [ "resource.label.database_id" ], "metricKind": "DELTA", "metricType": "cloudsql.googleapis.com/database/cpu/usage_time", "perSeriesAligner": "ALIGN_DELTA", "projectName": "jcpistell-testspace", "unit": "s{CPU}", "valueType": "DOUBLE" }, "queryType": "metrics", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Database CPU", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "percent", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": false } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "Google Cloud Monitoring", "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 38 }, "hiddenSeries": false, "id": 31, "legend": { "alignAsTable": false, "avg": false, "current": false, "max": false, "min": false, "rightSide": false, "show": false, "total": false, "values": false }, "lines": true, "linewidth": 1, "nullPointMode": "null", "percentage": false, "pluginVersion": "7.1.0", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "metricQuery": { "aliasBy": "", "alignmentPeriod": "cloud-monitoring-auto", "crossSeriesReducer": "REDUCE_NONE", "filters": [], "groupBys": [], "metricKind": "GAUGE", "metricType": "agent.googleapis.com/jvm/memory/usage", "perSeriesAligner": "ALIGN_INTERPOLATE", "projectName": "jcpistell-testspace", "unit": "By", "valueType": "DOUBLE" }, "queryType": "metrics", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "JVM Heap Memory", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "$$hashKey": "object:21917", "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "$$hashKey": "object:21918", "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "Google Cloud Monitoring", "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 12, "y": 38 }, "hiddenSeries": false, "id": 29, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": false, "total": false, "values": false }, "lines": true, "linewidth": 1, "nullPointMode": "null", "percentage": false, "pluginVersion": "7.1.0", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "metricQuery": { "aliasBy": "", "alignmentPeriod": "cloud-monitoring-auto", "crossSeriesReducer": "REDUCE_NONE", "filters": [], "groupBys": [], "metricKind": "CUMULATIVE", "metricType": "agent.googleapis.com/jvm/gc/count", "perSeriesAligner": "ALIGN_RATE", "projectName": "jcpistell-testspace", "unit": "1", "valueType": "INT64" }, "queryType": "metrics", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Garbage Collections", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "datasource": "Prometheus", "fieldConfig": { "defaults": { "custom": {}, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "semi-dark-red", "value": null } ] }, "unit": "percentunit" }, "overrides": [] }, "gridPos": { "h": 7, "w": 6, "x": 0, "y": 46 }, "id": 21, "options": { "orientation": "auto", "reduceOptions": { "calcs": [ "last" ], "fields": "", "values": false }, "showThresholdLabels": false, "showThresholdMarkers": true }, "pluginVersion": "7.1.0", "targets": [ { "expr": "locust_requests_fail_ratio", "interval": "", "legendFormat": "", "refId": "A" } ], "timeFrom": null, "timeShift": null, "title": "Fails", "type": "gauge" }, { "columns": [], "datasource": "Prometheus", "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fontSize": "100%", "gridPos": { "h": 7, "w": 18, "x": 6, "y": 46 }, "id": 16, "pageSize": null, "showHeader": true, "sort": { "col": 0, "desc": true }, "styles": [ { "alias": "Time", "align": "auto", "dateFormat": "YYYY-MM-DD HH:mm:ss", "pattern": "Time", "type": "hidden" }, { "alias": "Requests", "align": "auto", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 0, "mappingType": 1, "pattern": "Value", "thresholds": [], "type": "number", "unit": "none" }, { "alias": "Method", "align": "auto", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "mappingType": 1, "pattern": "method", "thresholds": [], "type": "string", "unit": "short" }, { "alias": "URL", "align": "auto", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "mappingType": 1, "pattern": "name", "thresholds": [], "type": "string", "unit": "short" }, { "alias": "Error", "align": "auto", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "link": false, "mappingType": 1, "pattern": "error", "thresholds": [], "type": "string", "unit": "short" }, { "alias": "", "align": "left", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], "decimals": 2, "pattern": "/.*/", "thresholds": [], "type": "number", "unit": "short" } ], "targets": [ { "expr": "sum(locust_errors) by (method, name, error)", "format": "table", "hide": false, "instant": true, "interval": "", "intervalFactor": 1, "legendFormat": "", "refId": "A" } ], "timeFrom": null, "timeShift": null, "title": "Errors", "transform": "table", "type": "table-old" } ], "refresh": "10s", "schemaVersion": 26, "style": "dark", "tags": [], "templating": { "list": [] }, "time": { "from": "now-3h", "to": "now" }, "timepicker": { "refresh_intervals": [ "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d" ], "time_options": [ "5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d" ] }, "timezone": "browser", "title": "Locust GCP Looker", "uid": "RGRtGt7Mk", "version": 1 } 60 | --------------------------------------------------------------------------------