├── .dockerignore
├── .gitattributes
├── .gitignore
├── ANALYTICS_LICENSE.txt
├── CODEOWNERS
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Dockerfile
├── FEEDBACK.md
├── LICENSE.txt
├── README.md
├── app
├── bamboo.yml
├── bitbucket.yml
├── confluence.yml
├── crowd.yml
├── datasets
│ ├── bitbucket
│ │ └── examples
│ │ │ ├── projects.csv
│ │ │ ├── pull_requests.csv
│ │ │ ├── repos.csv
│ │ │ └── users.csv
│ ├── confluence
│ │ └── static-content
│ │ │ ├── emojis_upload.csv
│ │ │ ├── files_upload.csv
│ │ │ ├── upload
│ │ │ ├── emoji1.jpg
│ │ │ ├── emoji2.png
│ │ │ ├── emoji3.gif
│ │ │ ├── emoji4.gif
│ │ │ ├── emoji5.gif
│ │ │ ├── test.pdf
│ │ │ ├── test1.png
│ │ │ ├── test2.jpg
│ │ │ ├── test3.png
│ │ │ ├── test4.png
│ │ │ └── test5.jpg
│ │ │ └── words.csv
│ ├── crowd
│ │ └── examples
│ │ │ └── users.csv
│ ├── jira
│ │ └── examples
│ │ │ ├── issues.csv
│ │ │ ├── jqls.csv
│ │ │ ├── kanban-boards.csv
│ │ │ ├── projects.csv
│ │ │ ├── scrum-boards.csv
│ │ │ └── users.csv
│ └── jsm
│ │ └── examples
│ │ ├── agents.csv
│ │ ├── customers.csv
│ │ ├── requests.csv
│ │ ├── service_desks_large.csv
│ │ ├── service_desks_medium.csv
│ │ └── service_desks_small.csv
├── extension
│ ├── __init__.py
│ ├── bamboo
│ │ ├── __init__.py
│ │ ├── extension_locust.py
│ │ └── extension_ui.py
│ ├── bitbucket
│ │ ├── __init__.py
│ │ └── extension_ui.py
│ ├── confluence
│ │ ├── __init__.py
│ │ ├── extension_locust.py
│ │ └── extension_ui.py
│ ├── jira
│ │ ├── __init__.py
│ │ ├── extension_locust.py
│ │ └── extension_ui.py
│ └── jsm
│ │ ├── __init__.py
│ │ ├── extension_locust_agents.py
│ │ ├── extension_locust_customers.py
│ │ ├── extension_ui_agents.py
│ │ └── extension_ui_customers.py
├── jira.yml
├── jmeter
│ ├── bamboo.jmx
│ ├── bitbucket.jmx
│ ├── confluence.jmx
│ ├── crowd.jmx
│ ├── jira.jmx
│ ├── jsm_agents.jmx
│ └── jsm_customers.jmx
├── jsm.yml
├── locustio
│ ├── bamboo
│ │ ├── http_actions.py
│ │ ├── locustfile.py
│ │ ├── locustfile_app_specific.py
│ │ └── requests_params.py
│ ├── common_utils.py
│ ├── confluence
│ │ ├── http_actions.py
│ │ ├── locustfile.py
│ │ ├── requests_params.py
│ │ └── resources.json
│ ├── jira
│ │ ├── http_actions.py
│ │ ├── locustfile.py
│ │ ├── requests_params.py
│ │ └── resources.json
│ └── jsm
│ │ ├── agents
│ │ ├── agents_http_actions.py
│ │ ├── agents_requests_params.py
│ │ └── agents_resources.json
│ │ ├── agents_locustfile.py
│ │ ├── customers
│ │ ├── customers_http_actions.py
│ │ ├── customers_requests_params.py
│ │ └── customers_resources.json
│ │ └── customers_locustfile.py
├── reports_generation
│ ├── README.md
│ ├── __init__.py
│ ├── bamboo_profile.yml
│ ├── csv_chart_generator.py
│ ├── performance_profile.yml
│ ├── scale_profile.yml
│ └── scripts
│ │ ├── __init__.py
│ │ ├── chart_generator.py
│ │ ├── config_provider.py
│ │ ├── csv_aggregator.py
│ │ ├── results_archivator.py
│ │ ├── summary_aggregator.py
│ │ └── utils.py
├── selenium_ui
│ ├── __init__.py
│ ├── bamboo
│ │ ├── __init__.py
│ │ ├── modules.py
│ │ └── pages
│ │ │ ├── pages.py
│ │ │ └── selectors.py
│ ├── bamboo_ui.py
│ ├── base_page.py
│ ├── bitbucket
│ │ ├── __init__.py
│ │ ├── modules.py
│ │ └── pages
│ │ │ ├── pages.py
│ │ │ └── selectors.py
│ ├── bitbucket_ui.py
│ ├── confluence
│ │ ├── __init__.py
│ │ ├── modules.py
│ │ └── pages
│ │ │ ├── pages.py
│ │ │ └── selectors.py
│ ├── confluence_ui.py
│ ├── conftest.py
│ ├── jira
│ │ ├── __init__.py
│ │ ├── modules.py
│ │ └── pages
│ │ │ ├── pages.py
│ │ │ └── selectors.py
│ ├── jira_ui.py
│ ├── jsm
│ │ ├── __init__.py
│ │ ├── modules_agents.py
│ │ ├── modules_customers.py
│ │ └── pages
│ │ │ ├── agent_pages.py
│ │ │ ├── agent_selectors.py
│ │ │ ├── customer_pages.py
│ │ │ └── customer_selectors.py
│ ├── jsm_ui_agents.py
│ └── jsm_ui_customers.py
└── util
│ ├── __init__.py
│ ├── analytics
│ ├── __init__.py
│ ├── analytics.py
│ ├── analytics_utils.py
│ ├── application_info.py
│ ├── bamboo_post_run_collector.py
│ └── log_reader.py
│ ├── api
│ ├── __init__.py
│ ├── abstract_clients.py
│ ├── bamboo_clients.py
│ ├── bitbucket_clients.py
│ ├── confluence_clients.py
│ ├── crowd_clients.py
│ ├── jira_clients.py
│ └── jsm_clients.py
│ ├── bamboo
│ └── bamboo_dataset_generator
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── pom.xml
│ │ ├── run.bat
│ │ ├── run.sh
│ │ └── src
│ │ └── main
│ │ ├── java
│ │ └── bamboogenerator
│ │ │ ├── Main.java
│ │ │ ├── model
│ │ │ └── PlanInfo.java
│ │ │ └── service
│ │ │ ├── BambooClient.java
│ │ │ ├── BambooServerInitializer.java
│ │ │ ├── PlansPublisher.java
│ │ │ └── generator
│ │ │ └── plan
│ │ │ ├── InlineBodies.java
│ │ │ ├── PlanGenerator.java
│ │ │ └── PlanInfoGenerator.java
│ │ └── resources
│ │ └── log4j2.xml
│ ├── common_util.py
│ ├── conf.py
│ ├── confluence
│ └── browser_metrics.py
│ ├── data_preparation
│ ├── __init__.py
│ ├── bamboo_prepare_data.py
│ ├── bitbucket_prepare_data.py
│ ├── confluence_prepare_data.py
│ ├── crowd_prepare_data.py
│ ├── crowd_sync_check.py
│ ├── jira_prepare_data.py
│ ├── jsm_prepare_data.py
│ └── prepare_data_common.py
│ ├── default_test_actions.json
│ ├── exceptions.py
│ ├── jmeter
│ ├── README.md
│ ├── __init__.py
│ └── start_jmeter_ui.py
│ ├── jtl_convertor
│ ├── __init__.py
│ ├── jtl_validator.py
│ ├── jtls-to-csv.py
│ ├── validation_exception.py
│ └── validation_funcs.py
│ ├── k8s
│ ├── README.MD
│ ├── aws_envs
│ ├── bzt_on_pod.sh
│ ├── copy_run_results.sh
│ ├── dcapt-small.tfvars
│ ├── dcapt-snapshots.json
│ ├── dcapt.tfvars
│ ├── script-runner.yml
│ └── terminate_cluster.py
│ ├── post_run
│ ├── __init__.py
│ ├── cleanup_results_dir.py
│ └── jmeter_post_check.py
│ ├── pre_run
│ ├── __init__.py
│ ├── environment_checker.py
│ ├── environment_compliance_check.py
│ └── git_client_check.py
│ └── project_paths.py
├── docs
├── bitbucket
│ └── README.md
├── confluence
│ └── README.md
├── crowd
│ └── README.md
├── dc-apps-performance-toolkit-user-guide-bamboo.md
├── dc-apps-performance-toolkit-user-guide-bitbucket.md
├── dc-apps-performance-toolkit-user-guide-confluence.md
├── dc-apps-performance-toolkit-user-guide-crowd.md
├── dc-apps-performance-toolkit-user-guide-jira.md
├── dc-apps-performance-toolkit-user-guide-jsm.md
├── jira
│ └── README.md
└── jsm
│ └── README.md
├── renovate.json
└── requirements.txt
/.dockerignore:
--------------------------------------------------------------------------------
1 | *
2 | !requirements.txt
3 | !Dockerfile
4 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.sh text eol=lf
2 | dcapt-snapshots.json text eol=lf
3 | *.tfvars text eol=lf
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea/
2 | outputs/
3 | **/results/
4 | **/datasets/*/*.csv
5 | *venv*
6 | .DS_Store
7 | *.swp
8 | *.iml
9 | *.log
10 | *.pyc
11 | local_*.yml
12 | **jmeter.properties
13 |
--------------------------------------------------------------------------------
/ANALYTICS_LICENSE.txt:
--------------------------------------------------------------------------------
1 | Atlassian is always striving to improve the Data Center App Performance Toolkit.
2 | In order to do so, Atlassian uses analytics techniques to better understand how the Data Center App Performance Toolkit is being used.
3 | For more information on these techniques and the type of data collected, please read our Privacy Policy (https://www.atlassian.com/legal/privacy-policy).
--------------------------------------------------------------------------------
/CODEOWNERS:
--------------------------------------------------------------------------------
1 | # Default code owners - Atlassian Data Center App Performance Toolkit
2 | * @ometelytsia @SergeyMoroz0703 @OlehStefanyshyn
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Code of Conduct
2 |
3 | As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities.
4 |
5 | We are committed to making participation in this project a harassment-free experience for everyone, regardless of level of experience, gender, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, or nationality.
6 |
7 | Examples of unacceptable behavior by participants include:
8 |
9 | * The use of sexualized language or imagery
10 | * Personal attacks
11 | * Trolling or insulting/derogatory comments
12 | * Public or private harassment
13 | * Publishing other's private information, such as physical or electronic addresses, without explicit permission
14 | * Submitting contributions or comments that you know to violate the intellectual property or privacy rights of others
15 | * Other unethical or unprofessional conduct
16 |
17 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
18 | By adopting this Code of Conduct, project maintainers commit themselves to fairly and consistently applying these principles to every aspect of managing this project. Project maintainers who do not follow or enforce the Code of Conduct may be permanently removed from the project team.
19 |
20 | This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community.
21 |
22 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting a project maintainer. Complaints will result in a response and be reviewed and investigated in a way that is deemed necessary and appropriate to the circumstances. Maintainers are obligated to maintain confidentiality with regard to the reporter of an incident.
23 |
24 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.3.0, available at [http://contributor-covenant.org/version/1/3/0/][version]
25 |
26 | [homepage]: http://contributor-covenant.org
27 | [version]: http://contributor-covenant.org/version/1/3/0/
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | Thanks for taking the time to contribute!
4 |
5 | The following is a set of guidelines for contributing to [DC App Performance Toolkit](README.md).
6 | All the changes are welcome. Please help us to improve code, examples and documentation.
7 |
8 |
9 | ## Submitting changes
10 |
11 | Pull requests, issues and comments are welcome. For pull requests:
12 |
13 | - Create your own fork of the repository and raise a pull request targeting `dev` branch in the main repository
14 | - Separate unrelated changes into multiple pull requests
15 |
16 | See the [existing issues](https://ecosystem.atlassian.net/projects/DAPT/issues) for things to start contributing.
17 |
18 | For bigger changes, make sure you start a discussion first by creating
19 | an issue and explaining the intended change.
20 |
21 | All the pull requests and other changes will be accepted and merged by Atlassians.
22 |
23 | Atlassian requires contributors to sign a Contributor License Agreement,
24 | known as a CLA. This serves as a record stating that the contributor is
25 | entitled to contribute the code/documentation/translation to the project
26 | and is willing to have it used in distributions and derivative works
27 | (or is willing to transfer ownership).
28 |
29 | Prior to accepting your contributions we ask that you please follow the appropriate
30 | link below to digitally sign the CLA. The Corporate CLA is for those who are
31 | contributing as a member of an organization and the individual CLA is for
32 | those contributing as an individual.
33 |
34 | * [CLA for corporate contributors](https://opensource.atlassian.com/corporate)
35 | * [CLA for individuals](https://opensource.atlassian.com/individual)
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # name: atlassian/dcapt
2 | # working dir: dc-app-performance-toolkit
3 | # build: docker build -t atlassian/dcapt .
4 | # bzt run: docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml
5 | # interactive run: docker run -it --entrypoint="/bin/bash" -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt
6 |
7 | FROM python:3.13-slim-bookworm
8 |
9 | ENV APT_INSTALL="apt-get -y install --no-install-recommends"
10 |
11 | ARG CHROME_VERSION="latest"
12 | ARG INCLUDE_BZT_TOOLS="false"
13 |
14 | ENV CHROME_LATEST_URL="https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb"
15 | ENV CHROME_VERSION_URL="https://dl.google.com/linux/chrome/deb/pool/main/g/google-chrome-stable/google-chrome-stable_${CHROME_VERSION}_amd64.deb"
16 |
17 | RUN apt-get -y update \
18 | && $APT_INSTALL vim git openssh-server wget \
19 | && python -m pip install --upgrade pip \
20 | && apt-get clean
21 |
22 | RUN wget https://download.oracle.com/java/21/latest/jdk-21_linux-x64_bin.deb \
23 | && $APT_INSTALL ./jdk-21_linux-x64_bin.deb \
24 | && rm -rf ./jdk-21_linux-x64_bin.deb
25 |
26 | RUN if [ "$CHROME_VERSION" = "latest" ]; then wget -O google-chrome.deb $CHROME_LATEST_URL; else wget -O google-chrome.deb $CHROME_VERSION_URL; fi \
27 | && $APT_INSTALL ./google-chrome.deb \
28 | && rm -rf ./google-chrome.deb
29 |
30 | COPY requirements.txt /tmp/requirements.txt
31 | RUN pip install --no-cache-dir -r /tmp/requirements.txt
32 |
33 | RUN if [ "$INCLUDE_BZT" = "true" ]; then \
34 | wget https://blazemeter-tools.s3.us-east-2.amazonaws.com/bzt.tar.gz -O /tmp/bzt.tar.gz && \
35 | tar -xzf /tmp/bzt.tar.gz -C /root && \
36 | rm /tmp/bzt.tar.gz; \
37 | fi
38 |
39 | WORKDIR /dc-app-performance-toolkit/app
40 |
41 | ENTRYPOINT ["bzt", "-o", "modules.console.disable=true"]
42 |
--------------------------------------------------------------------------------
/FEEDBACK.md:
--------------------------------------------------------------------------------
1 | # Providing feedback on DC App Performance Toolkit
2 |
3 | Thank you for taking the time to provide feedback on DAPT.
4 |
5 | ## Chat
6 | Come to our public [Atlassian Performance Tools][slack-invite] and chat.
7 | Feel free to ask any questions, discuss or lurk.
8 |
9 | ## Jira
10 |
11 | Issues are tracked in the [public Ecosystem Jira instance][ecosystem-jira],
12 | in the DC App Performance Toolkit (DAPT) project.
13 |
14 | ## Raising bugs
15 |
16 | Before you raise a bug, take a look at the [known bug list][bug-list].
17 |
18 | If you find it, vote on it and watch it. Add comments if it's missing
19 | some important context, or [contribute][article-contributing] a solution.
20 | We'll provide further updates in the ticket.
21 |
22 | If you don't find it, [create a new bug][bug-create]. When you create a
23 | new bug, please include the following information:
24 |
25 | * description of the problem
26 | * requirements and steps to reproduce the issue
27 | * expected behavior
28 | * actual behavior
29 | * attach log files
30 |
31 | ## Adding suggestions
32 |
33 | Before you share a suggestion, take a look at the
34 | [existing suggestions list][suggestion-list].
35 |
36 | If you find it, vote on it and watch it. We'll provide further updates
37 | in the ticket.
38 |
39 | If you don't find it, [create a new suggestion][suggestion-create]. When
40 | you create a new suggestion, make sure you describe:
41 |
42 | * a problem you would like to solve,
43 | * optionally provide an example,
44 | * optionally provide suggestions on how you think it should be solved.
45 |
46 | [slack-invite]: http://go.atlassian.com/jpt-slack
47 | [article-contributing]: CONTRIBUTING.md
48 | [ecosystem-jira]: https://ecosystem.atlassian.net/projects/DAPT/issues
49 | [suggestion-list]:https://ecosystem.atlassian.net/issues/?filter=62045
50 | [suggestion-create]: https://ecosystem.atlassian.net/secure/CreateIssue!default.jspa?projectKey=DAPT&issuetype=11500
51 | [bug-list]: https://ecosystem.atlassian.net/issues/?filter=62046
52 | [bug-create]: https://ecosystem.atlassian.net/secure/CreateIssue!default.jspa?projectKey=DAPT&issuetype=1
--------------------------------------------------------------------------------
/app/bitbucket.yml:
--------------------------------------------------------------------------------
1 | ---
2 | settings:
3 | artifacts-dir: results/bitbucket/%Y-%m-%d_%H-%M-%S
4 | aggregator: consolidator
5 | verbose: false
6 | check-updates: false # disable bzt check for updates
7 | env:
8 | application_hostname: test_bitbucket_instance.atlassian.com # Bitbucket DC hostname without protocol and port e.g. test-bitbucket.atlassian.com or localhost
9 | application_protocol: http # http or https
10 | application_port: 80 # 80, 443, 8080, 7990 etc
11 | secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate
12 | application_postfix: /bitbucket # e.g. /bitbucket for TerraForm deployment url like `http://a1234-54321.us-east-2.elb.amazonaws.com/bitbucket`. Leave this value blank for url without postfix.
13 | admin_login: admin
14 | admin_password: admin
15 | load_executor: jmeter # only jmeter executor is supported
16 | concurrency: 20 # number of concurrent virtual users for jmeter scenario
17 | test_duration: 50m
18 | ramp-up: 10m # time to spin all concurrent users
19 | total_actions_per_hour: 32700
20 | WEBDRIVER_VISIBLE: False
21 | JMETER_VERSION: 5.6.3
22 | LANGUAGE: en_US.utf8
23 | allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README.
24 | environment_compliance_check: True # Pre-test environment compliance validation. Set to "False" to skip it.
25 | services:
26 | - module: shellexec
27 | prepare:
28 | - python util/pre_run/environment_checker.py
29 | - python util/pre_run/environment_compliance_check.py bitbucket
30 | - python util/pre_run/git_client_check.py
31 | - python util/data_preparation/bitbucket_prepare_data.py
32 | shutdown:
33 | - python util/post_run/jmeter_post_check.py
34 | - python util/jtl_convertor/jtls-to-csv.py kpi.jtl selenium.jtl
35 | post-process:
36 | - python util/analytics/analytics.py bitbucket
37 | - python util/post_run/cleanup_results_dir.py
38 | - module: pip-install
39 | packages:
40 | - selenium==4.31.0
41 | execution:
42 | - scenario: ${load_executor}
43 | concurrency: ${concurrency}
44 | hold-for: ${test_duration}
45 | ramp-up: ${ramp-up}
46 | - scenario: selenium
47 | executor: selenium
48 | runner: pytest
49 | hold-for: ${test_duration}
50 | scenarios:
51 | selenium:
52 | script: selenium_ui/bitbucket_ui.py
53 | jmeter:
54 | script: jmeter/bitbucket.jmx
55 | properties:
56 | admin_login: ${admin_login}
57 | admin_password: ${admin_password}
58 | application_hostname: ${application_hostname}
59 | application_protocol: ${application_protocol}
60 | application_port: ${application_port}
61 | application_postfix: ${application_postfix}
62 | total_actions_per_hr: ${total_actions_per_hour}
63 | tmp_dir: /tmp
64 | ssh_key_url: https://centaurus-datasets.s3.us-east-2.amazonaws.com/bitbucket/ssh/id_rsa
65 | modules:
66 | consolidator:
67 | rtimes-len: 0 # CONFSRVDEV-7631 reduce sampling
68 | percentiles: [] # CONFSRVDEV-7631 disable all percentiles due to Taurus's excessive memory usage
69 | jmeter:
70 | version: ${JMETER_VERSION}
71 | detect-plugins: true
72 | memory-xmx: 8G # allow JMeter to use up to 8G of memory
73 | plugins:
74 | - jpgc-casutg=2.10
75 | - jpgc-dummy=0.4
76 | - jpgc-ffw=2.0
77 | - jpgc-fifo=0.2
78 | - jpgc-functions=2.2
79 | - jpgc-json=2.7
80 | - jpgc-perfmon=2.1
81 | - jpgc-prmctl=0.4
82 | - jpgc-tst=2.6
83 | - bzm-random-csv=0.8 # not used default jmx file
84 | system-properties:
85 | server.rmi.ssl.disable: true
86 | java.rmi.server.hostname: localhost
87 | httpsampler.ignore_failed_embedded_resources: "true"
88 | selenium:
89 | chromedriver:
90 | version: "135.0.7049.114" # Supports Chrome version 135. You can refer to https://googlechromelabs.github.io/chrome-for-testing
91 | reporting:
92 | - data-source: sample-labels
93 | module: junit-xml
94 |
--------------------------------------------------------------------------------
/app/crowd.yml:
--------------------------------------------------------------------------------
1 | ---
2 | settings:
3 | artifacts-dir: results/crowd/%Y-%m-%d_%H-%M-%S
4 | aggregator: consolidator
5 | verbose: false
6 | check-updates: false # disable bzt check for updates
7 | env:
8 | application_hostname: test_crowd_instance.atlassian.com # Crowd DC hostname without protocol and port e.g. test-crowd.atlassian.com or localhost
9 | application_protocol: http # http or https
10 | application_port: 80 # 80, 443, 8080, 4990, etc
11 | secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate
12 | application_postfix: /crowd # e.g. /crowd in case of url like http://localhost:4990/crowd
13 | admin_login: admin
14 | admin_password: admin
15 | application_name: crowd
16 | application_password: 1111
17 | load_executor: jmeter
18 | concurrency: 1000 # number of concurrent threads to authenticate random users
19 | test_duration: 45m
20 |
21 | # 1 node scenario parameters
22 | ramp-up: 20s # time to spin all concurrent threads
23 | total_actions_per_hour: 180000 # number of total JMeter actions per hour
24 |
25 | # 2 nodes scenario parameters
26 | # ramp-up: 10s # time to spin all concurrent threads
27 | # total_actions_per_hour: 360000 # number of total JMeter actions per hour
28 |
29 | # 4 nodes scenario parameters
30 | # ramp-up: 5s # time to spin all concurrent threads
31 | # total_actions_per_hour: 720000 # number of total JMeter actions per hour
32 |
33 | JMETER_VERSION: 5.6.3
34 | LANGUAGE: en_US.utf8
35 | allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README.
36 | environment_compliance_check: True # Pre-test environment compliance validation. Set to "False" to skip it.
37 | services:
38 | - module: shellexec
39 | prepare:
40 | - python util/pre_run/environment_checker.py
41 | - python util/pre_run/environment_compliance_check.py crowd
42 | - python util/data_preparation/crowd_prepare_data.py
43 | - python util/data_preparation/crowd_sync_check.py
44 | shutdown:
45 | - python util/post_run/jmeter_post_check.py
46 | - python util/jtl_convertor/jtls-to-csv.py kpi.jtl
47 | post-process:
48 | - python util/analytics/analytics.py crowd
49 | - python util/post_run/cleanup_results_dir.py
50 | execution:
51 | - scenario: ${load_executor}
52 | executor: ${load_executor}
53 | concurrency: ${concurrency}
54 | hold-for: ${test_duration}
55 | ramp-up: ${ramp-up}
56 | scenarios:
57 | jmeter:
58 | script: jmeter/crowd.jmx
59 | properties:
60 | application_hostname: ${application_hostname}
61 | application_protocol: ${application_protocol}
62 | application_port: ${application_port}
63 | application_postfix: ${application_postfix}
64 | application_name: ${application_name}
65 | application_password: ${application_password}
66 | # Workload model
67 | total_actions_per_hr: ${total_actions_per_hour}
68 | modules:
69 | consolidator:
70 | rtimes-len: 0 # CONFSRVDEV-7631 reduce sampling
71 | percentiles: [] # CONFSRVDEV-7631 disable all percentiles due to Taurus's excessive memory usage
72 | jmeter:
73 | version: ${JMETER_VERSION}
74 | detect-plugins: true
75 | memory-xmx: 8G # allow JMeter to use up to 8G of memory
76 | plugins:
77 | - jpgc-casutg=2.10
78 | - jpgc-dummy=0.4
79 | - jpgc-ffw=2.0
80 | - jpgc-fifo=0.2
81 | - jpgc-functions=2.2
82 | - jpgc-json=2.7
83 | - jpgc-perfmon=2.1
84 | - jpgc-prmctl=0.4
85 | - jpgc-tst=2.6
86 | - bzm-random-csv=0.8 # not used default jmx file
87 | system-properties:
88 | server.rmi.ssl.disable: true
89 | java.rmi.server.hostname: localhost
90 | httpsampler.ignore_failed_embedded_resources: "true"
91 | reporting:
92 | - data-source: sample-labels
93 | module: junit-xml
--------------------------------------------------------------------------------
/app/datasets/bitbucket/examples/projects.csv:
--------------------------------------------------------------------------------
1 | PRJ-1,5
2 |
--------------------------------------------------------------------------------
/app/datasets/bitbucket/examples/pull_requests.csv:
--------------------------------------------------------------------------------
1 | prj-1000-repo-1,PRJ-1000,perf-branch-56,master,perf-branch-55,master
2 |
--------------------------------------------------------------------------------
/app/datasets/bitbucket/examples/repos.csv:
--------------------------------------------------------------------------------
1 | prj-1000-repo-1,PRJ-1000
2 |
--------------------------------------------------------------------------------
/app/datasets/bitbucket/examples/users.csv:
--------------------------------------------------------------------------------
1 | 3,user-1,user-1
2 |
--------------------------------------------------------------------------------
/app/datasets/confluence/static-content/emojis_upload.csv:
--------------------------------------------------------------------------------
1 | datasets/confluence/static-content/upload/emoji1.jpg,image/jpeg,emoji1.jpg
2 | datasets/confluence/static-content/upload/emoji2.png,image/png,emoji2.png
3 | datasets/confluence/static-content/upload/emoji3.gif,image/gif,emoji3.gif
4 | datasets/confluence/static-content/upload/emoji4.gif,image/gif,emoji4.gif
5 | datasets/confluence/static-content/upload/emoji5.gif,image/gif,emoji5.gif
--------------------------------------------------------------------------------
/app/datasets/confluence/static-content/files_upload.csv:
--------------------------------------------------------------------------------
1 | datasets/confluence/static-content/upload/test1.png,image/png,test1.png
2 | datasets/confluence/static-content/upload/test2.jpg,image/jpeg,test2.jpg
3 | datasets/confluence/static-content/upload/test3.png,image/png,test3.png
4 | datasets/confluence/static-content/upload/test4.png,image/png,test4.png
5 | datasets/confluence/static-content/upload/test5.jpg,image/jpeg,test5.jpg
6 | datasets/confluence/static-content/upload/test.pdf,application/pdf,test.pdf
7 |
--------------------------------------------------------------------------------
/app/datasets/confluence/static-content/upload/emoji1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/datasets/confluence/static-content/upload/emoji1.jpg
--------------------------------------------------------------------------------
/app/datasets/confluence/static-content/upload/emoji2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/datasets/confluence/static-content/upload/emoji2.png
--------------------------------------------------------------------------------
/app/datasets/confluence/static-content/upload/emoji3.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/datasets/confluence/static-content/upload/emoji3.gif
--------------------------------------------------------------------------------
/app/datasets/confluence/static-content/upload/emoji4.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/datasets/confluence/static-content/upload/emoji4.gif
--------------------------------------------------------------------------------
/app/datasets/confluence/static-content/upload/emoji5.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/datasets/confluence/static-content/upload/emoji5.gif
--------------------------------------------------------------------------------
/app/datasets/confluence/static-content/upload/test.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/datasets/confluence/static-content/upload/test.pdf
--------------------------------------------------------------------------------
/app/datasets/confluence/static-content/upload/test1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/datasets/confluence/static-content/upload/test1.png
--------------------------------------------------------------------------------
/app/datasets/confluence/static-content/upload/test2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/datasets/confluence/static-content/upload/test2.jpg
--------------------------------------------------------------------------------
/app/datasets/confluence/static-content/upload/test3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/datasets/confluence/static-content/upload/test3.png
--------------------------------------------------------------------------------
/app/datasets/confluence/static-content/upload/test4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/datasets/confluence/static-content/upload/test4.png
--------------------------------------------------------------------------------
/app/datasets/confluence/static-content/upload/test5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/datasets/confluence/static-content/upload/test5.jpg
--------------------------------------------------------------------------------
/app/datasets/crowd/examples/users.csv:
--------------------------------------------------------------------------------
1 | performance_username,password
--------------------------------------------------------------------------------
/app/datasets/jira/examples/issues.csv:
--------------------------------------------------------------------------------
1 | TEST-1,10000,TEST
2 |
--------------------------------------------------------------------------------
/app/datasets/jira/examples/jqls.csv:
--------------------------------------------------------------------------------
1 | text ~ "a*" order by key
--------------------------------------------------------------------------------
/app/datasets/jira/examples/kanban-boards.csv:
--------------------------------------------------------------------------------
1 | 2
--------------------------------------------------------------------------------
/app/datasets/jira/examples/projects.csv:
--------------------------------------------------------------------------------
1 | ABC,10000
--------------------------------------------------------------------------------
/app/datasets/jira/examples/scrum-boards.csv:
--------------------------------------------------------------------------------
1 | 1
--------------------------------------------------------------------------------
/app/datasets/jira/examples/users.csv:
--------------------------------------------------------------------------------
1 | admin,password
--------------------------------------------------------------------------------
/app/datasets/jsm/examples/agents.csv:
--------------------------------------------------------------------------------
1 | test_username,password
2 |
--------------------------------------------------------------------------------
/app/datasets/jsm/examples/customers.csv:
--------------------------------------------------------------------------------
1 | testusername, password
--------------------------------------------------------------------------------
/app/datasets/jsm/examples/requests.csv:
--------------------------------------------------------------------------------
1 | 1,PRJ-1,1,10000,PRJ
2 |
--------------------------------------------------------------------------------
/app/datasets/jsm/examples/service_desks_large.csv:
--------------------------------------------------------------------------------
1 | # Projects with > 100k requests.
2 | service_desk_id,project_id,project_key,total_requests_count,all_open_queue_id,created_vs_resolved_id_report_id,time_to_resolution_id_report_id
3 | 1,1,TESTPRJ,350000,10,11,12
--------------------------------------------------------------------------------
/app/datasets/jsm/examples/service_desks_medium.csv:
--------------------------------------------------------------------------------
1 | # Projects with > 10k and less than 100k requests.
2 | service_desk_id,project_id,project_key,total_requests_count,all_open_queue_id,created_vs_resolved_id_report_id,time_to_resolution_id_report_id
3 | 1,1,TESTPRJ,350000,10,11,12
--------------------------------------------------------------------------------
/app/datasets/jsm/examples/service_desks_small.csv:
--------------------------------------------------------------------------------
1 | # Projects with < 10k requests.
2 | service_desk_id,project_id,project_key,total_requests_count,all_open_queue_id,created_vs_resolved_id_report_id,time_to_resolution_id_report_id
3 | 1,1,TESTPRJ,350000,10,11,12
--------------------------------------------------------------------------------
/app/extension/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/extension/__init__.py
--------------------------------------------------------------------------------
/app/extension/bamboo/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/extension/bamboo/__init__.py
--------------------------------------------------------------------------------
/app/extension/bamboo/extension_locust.py:
--------------------------------------------------------------------------------
1 | import re
2 | from locustio.common_utils import init_logger, bamboo_measure, run_as_specific_user # noqa F401
3 |
4 | logger = init_logger(app_type='bamboo')
5 |
6 |
7 | @bamboo_measure("locust_app_specific_action")
8 | # @run_as_specific_user(username='admin', password='admin') # run as specific user
9 | def app_specific_action(locust):
10 | r = locust.get('/app/get_endpoint', catch_response=True) # call app-specific GET endpoint
11 | content = r.content.decode('utf-8') # decode response content
12 |
13 | token_pattern_example = '"token":"(.+?)"'
14 | id_pattern_example = '"id":"(.+?)"'
15 | token = re.findall(token_pattern_example, content) # get TOKEN from response using regexp
16 | id = re.findall(id_pattern_example, content) # get ID from response using regexp
17 |
18 | logger.locust_info(f'token: {token}, id: {id}') # log info for debug when verbose is true in bamboo.yml file
19 | if 'assertion string' not in content:
20 | logger.error(f"'assertion string' was not found in {content}")
21 | assert 'assertion string' in content # assert specific string in response content
22 |
23 | body = {"id": id, "token": token} # include parsed variables to POST request body
24 | headers = {'content-type': 'application/json'}
25 | r = locust.post('/app/post_endpoint', body, headers, catch_response=True) # call app-specific POST endpoint
26 | content = r.content.decode('utf-8')
27 | if 'assertion string after successful POST request' not in content:
28 | logger.error(f"'assertion string after successful POST request' was not found in {content}")
29 | assert 'assertion string after successful POST request' in content # assertion after POST request
30 |
--------------------------------------------------------------------------------
/app/extension/bamboo/extension_ui.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | from selenium.webdriver.common.by import By
4 |
5 | from selenium_ui.base_page import BasePage
6 | from selenium_ui.conftest import print_timing
7 | from selenium_ui.bamboo.pages.pages import Login
8 | from util.conf import BAMBOO_SETTINGS
9 |
10 |
11 | def app_specific_action(webdriver, datasets):
12 | page = BasePage(webdriver)
13 | rnd_plan = random.choice(datasets["build_plans"])
14 |
15 | build_plan_id = rnd_plan[1]
16 |
17 | # To run action as specific user uncomment code bellow.
18 | # NOTE: If app_specific_action is running as specific user, make sure that app_specific_action is running
19 | # just before test_2_selenium_z_log_out action
20 | #
21 | # @print_timing("selenium_app_specific_user_login")
22 | # def measure():
23 | # def app_specific_user_login(username='admin', password='admin'):
24 | # login_page = Login(webdriver)
25 | # login_page.delete_all_cookies()
26 | # login_page.go_to()
27 | # login_page.set_credentials(username=username, password=password)
28 | # login_page.click_login_button()
29 | # app_specific_user_login(username='admin', password='admin')
30 | # measure()
31 |
32 | @print_timing("selenium_app_custom_action")
33 | def measure():
34 | @print_timing("selenium_app_custom_action:view_plan_summary_page")
35 | def sub_measure():
36 | page.go_to_url(f"{BAMBOO_SETTINGS.server_url}/browse/{build_plan_id}")
37 | page.wait_until_visible((By.ID, "buildResultsTable")) # Wait for summary field visible
38 | # Wait for you app-specific UI element by ID selector
39 | page.wait_until_visible((By.ID, "ID_OF_YOUR_APP_SPECIFIC_UI_ELEMENT"))
40 | sub_measure()
41 | measure()
42 |
--------------------------------------------------------------------------------
/app/extension/bitbucket/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/extension/bitbucket/__init__.py
--------------------------------------------------------------------------------
/app/extension/bitbucket/extension_ui.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | from selenium.webdriver.common.by import By
4 |
5 | from selenium_ui.base_page import BasePage
6 | from selenium_ui.conftest import print_timing
7 | from selenium_ui.bitbucket.pages.pages import LoginPage, GetStarted, AdminPage, PopupManager
8 | from util.conf import BITBUCKET_SETTINGS
9 |
10 |
11 | def app_specific_action(webdriver, datasets):
12 | page = BasePage(webdriver)
13 | rnd_repo = random.choice(datasets["repos"])
14 |
15 | project_key = rnd_repo[1]
16 | repo_slug = rnd_repo[0]
17 |
18 | # To run action as specific user uncomment code bellow.
19 | # NOTE: If app_specific_action is running as specific user, make sure that app_specific_action is running
20 | # just before test_2_selenium_logout action
21 |
22 | # @print_timing("selenium_app_specific_user_login")
23 | # def measure():
24 | # def app_specific_user_login(username='admin', password='admin'):
25 | # login_page = LoginPage(webdriver)
26 | # login_page.delete_all_cookies()
27 | # login_page.go_to()
28 | # login_page.wait_for_page_loaded()
29 | # login_page.set_credentials(username=username, password=password)
30 | # login_page.submit_login()
31 | # get_started_page = GetStarted(webdriver)
32 | # get_started_page.wait_for_page_loaded()
33 | # PopupManager(webdriver).dismiss_default_popup()
34 | # get_started_page.close_whats_new_window()
35 | #
36 | # # uncomment below line to do web_sudo and authorise access to admin pages
37 | # # AdminPage(webdriver).go_to(password=password)
38 | # app_specific_user_login(username='admin', password='admin')
39 | # measure()
40 |
41 | @print_timing("selenium_app_custom_action")
42 | def measure():
43 |
44 | @print_timing("selenium_app_custom_action:view_repo_page")
45 | def sub_measure():
46 | page.go_to_url(f"{BITBUCKET_SETTINGS.server_url}/projects/{project_key}/repos/{repo_slug}/browse")
47 | page.wait_until_visible((By.CSS_SELECTOR, '.aui-navgroup-vertical>.aui-navgroup-inner')) # Wait for repo navigation panel is visible
48 | page.wait_until_visible((By.ID, 'ID_OF_YOUR_APP_SPECIFIC_UI_ELEMENT')) # Wait for you app-specific UI element by ID selector
49 | sub_measure()
50 | measure()
51 |
--------------------------------------------------------------------------------
/app/extension/confluence/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/extension/confluence/__init__.py
--------------------------------------------------------------------------------
/app/extension/confluence/extension_locust.py:
--------------------------------------------------------------------------------
1 | import re
2 | from locustio.common_utils import init_logger, confluence_measure, run_as_specific_user # noqa F401
3 |
4 | logger = init_logger(app_type='confluence')
5 |
6 |
7 | @confluence_measure("locust_app_specific_action")
8 | # @run_as_specific_user(username='admin', password='admin') # run as specific user
9 | def app_specific_action(locust):
10 | r = locust.get('/app/get_endpoint', catch_response=True) # call app-specific GET endpoint
11 | content = r.content.decode('utf-8') # decode response content
12 |
13 | token_pattern_example = '"token":"(.+?)"'
14 | id_pattern_example = '"id":"(.+?)"'
15 | token = re.findall(token_pattern_example, content) # get TOKEN from response using regexp
16 | id = re.findall(id_pattern_example, content) # get ID from response using regexp
17 |
18 | logger.locust_info(f'token: {token}, id: {id}') # log info for debug when verbose is true in confluence.yml file
19 | if 'assertion string' not in content:
20 | logger.error(f"'assertion string' was not found in {content}")
21 | assert 'assertion string' in content # assert specific string in response content
22 |
23 | body = {"id": id, "token": token} # include parsed variables to POST request body
24 | headers = {'content-type': 'application/json'}
25 | r = locust.post('/app/post_endpoint', body, headers, catch_response=True) # call app-specific POST endpoint
26 | content = r.content.decode('utf-8')
27 | if 'assertion string after successful POST request' not in content:
28 | logger.error(f"'assertion string after successful POST request' was not found in {content}")
29 | assert 'assertion string after successful POST request' in content # assertion after POST request
30 |
--------------------------------------------------------------------------------
/app/extension/confluence/extension_ui.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | from selenium.webdriver.common.by import By
4 |
5 | from selenium_ui.base_page import BasePage
6 | from selenium_ui.conftest import print_timing
7 | from selenium_ui.confluence.pages.pages import Login, AllUpdates, AdminPage
8 | from util.conf import CONFLUENCE_SETTINGS
9 |
10 |
11 | def app_specific_action(webdriver, datasets):
12 | page = BasePage(webdriver)
13 | if datasets['custom_pages']:
14 | app_specific_page_id = datasets['custom_page_id']
15 |
16 | # To run action as specific user uncomment code bellow.
17 | # NOTE: If app_specific_action is running as specific user, make sure that app_specific_action is running
18 | # just before test_2_selenium_z_log_out
19 | # @print_timing("selenium_app_specific_user_login")
20 | # def measure():
21 | # def app_specific_user_login(username='admin', password='admin'):
22 | # login_page = Login(webdriver)
23 | # login_page.delete_all_cookies()
24 | # login_page.go_to()
25 | # login_page.wait_for_page_loaded()
26 | # login_page.set_credentials(username=username, password=password)
27 | # login_page.click_login_button()
28 | # if login_page.is_first_login():
29 | # login_page.first_user_setup()
30 | # all_updates_page = AllUpdates(webdriver)
31 | # all_updates_page.wait_for_page_loaded()
32 | # # uncomment below line to do web_sudo and authorise access to admin pages
33 | # # AdminPage(webdriver).go_to(password=password)
34 | #
35 | # app_specific_user_login(username='admin', password='admin')
36 | # measure()
37 |
38 | @print_timing("selenium_app_custom_action")
39 | def measure():
40 |
41 | @print_timing("selenium_app_custom_action:view_page")
42 | def sub_measure():
43 | page.go_to_url(f"{CONFLUENCE_SETTINGS.server_url}/pages/viewpage.action?pageId={app_specific_page_id}")
44 | page.wait_until_visible((By.ID, "title-text")) # Wait for title field visible
45 | page.wait_until_visible((By.ID, "ID_OF_YOUR_APP_SPECIFIC_UI_ELEMENT")) # Wait for you app-specific UI element by ID selector
46 | sub_measure()
47 | measure()
48 |
--------------------------------------------------------------------------------
/app/extension/jira/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/extension/jira/__init__.py
--------------------------------------------------------------------------------
/app/extension/jira/extension_locust.py:
--------------------------------------------------------------------------------
1 | import re
2 | from locustio.common_utils import init_logger, jira_measure, run_as_specific_user # noqa F401
3 |
4 | logger = init_logger(app_type='jira')
5 |
6 |
7 | @jira_measure("locust_app_specific_action")
8 | # @run_as_specific_user(username='admin', password='admin') # run as specific user
9 | def app_specific_action(locust):
10 | r = locust.get('/app/get_endpoint', catch_response=True) # call app-specific GET endpoint
11 | content = r.content.decode('utf-8') # decode response content
12 |
13 | token_pattern_example = '"token":"(.+?)"'
14 | id_pattern_example = '"id":"(.+?)"'
15 | token = re.findall(token_pattern_example, content) # get TOKEN from response using regexp
16 | id = re.findall(id_pattern_example, content) # get ID from response using regexp
17 |
18 | logger.locust_info(f'token: {token}, id: {id}') # log info for debug when verbose is true in jira.yml file
19 | if 'assertion string' not in content:
20 | logger.error(f"'assertion string' was not found in {content}")
21 | assert 'assertion string' in content # assert specific string in response content
22 |
23 | body = {"id": id, "token": token} # include parsed variables to POST request body
24 | headers = {'content-type': 'application/json'}
25 | r = locust.post('/app/post_endpoint', body, headers, catch_response=True) # call app-specific POST endpoint
26 | content = r.content.decode('utf-8')
27 | if 'assertion string after successful POST request' not in content:
28 | logger.error(f"'assertion string after successful POST request' was not found in {content}")
29 | assert 'assertion string after successful POST request' in content # assertion after POST request
30 |
--------------------------------------------------------------------------------
/app/extension/jira/extension_ui.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | from selenium.webdriver.common.by import By
4 |
5 | from selenium_ui.base_page import BasePage
6 | from selenium_ui.conftest import print_timing
7 | from selenium_ui.jira.pages.pages import Login, AdminPage
8 | from util.conf import JIRA_SETTINGS
9 |
10 |
11 | def app_specific_action(webdriver, datasets):
12 | page = BasePage(webdriver)
13 | if datasets['custom_issues']:
14 | issue_key = datasets['custom_issue_key']
15 |
16 | # To run action as specific user uncomment code bellow.
17 | # NOTE: If app_specific_action is running as specific user, make sure that app_specific_action is running
18 | # just before test_2_selenium_z_log_out action
19 | #
20 | # @print_timing("selenium_app_specific_user_login")
21 | # def measure():
22 | # def app_specific_user_login(username='admin', password='admin'):
23 | # login_page = Login(webdriver)
24 | # login_page.delete_all_cookies()
25 | # login_page.go_to()
26 | # login_page.wait_for_login_page_loaded()
27 | # login_page.set_credentials(username=username, password=password)
28 | # login_page.wait_for_dashboard_or_first_login_loaded()
29 | # if login_page.is_first_login():
30 | # login_page.first_login_setup()
31 | # if login_page.is_first_login_second_page():
32 | # login_page.first_login_second_page_setup()
33 | # login_page.wait_for_page_loaded()
34 | # # uncomment below line to do web_sudo and authorise access to admin pages
35 | # # AdminPage(webdriver).go_to(password=password)
36 | #
37 | # app_specific_user_login(username='admin', password='admin')
38 | # measure()
39 |
40 | @print_timing("selenium_app_custom_action")
41 | def measure():
42 | @print_timing("selenium_app_custom_action:view_issue")
43 | def sub_measure():
44 | page.go_to_url(f"{JIRA_SETTINGS.server_url}/browse/{issue_key}")
45 | page.wait_until_visible((By.ID, "summary-val")) # Wait for summary field visible
46 | page.wait_until_visible((By.ID, "ID_OF_YOUR_APP_SPECIFIC_UI_ELEMENT")) # Wait for you app-specific UI element by ID selector
47 | sub_measure()
48 | measure()
49 |
50 |
--------------------------------------------------------------------------------
/app/extension/jsm/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/extension/jsm/__init__.py
--------------------------------------------------------------------------------
/app/extension/jsm/extension_locust_agents.py:
--------------------------------------------------------------------------------
1 | import re
2 | from locustio.common_utils import init_logger, jsm_agent_measure, run_as_specific_user # noqa F401
3 |
4 | logger = init_logger(app_type='jsm')
5 |
6 |
7 | @jsm_agent_measure('locust_agent_app_specific_action')
8 | # @run_as_specific_user(username='admin', password='admin') # run as specific user
9 | def app_specific_action(locust):
10 | r = locust.get('/app/get_endpoint', catch_response=True) # call app-specific GET endpoint
11 | content = r.content.decode('utf-8') # decode response content
12 |
13 | token_pattern_example = '"token":"(.+?)"'
14 | id_pattern_example = '"id":"(.+?)"'
15 | token = re.findall(token_pattern_example, content) # get TOKEN from response using regexp
16 | id = re.findall(id_pattern_example, content) # get ID from response using regexp
17 |
18 | logger.locust_info(f'token: {token}, id: {id}') # log info for debug when verbose is true in jsm.yml file
19 | if 'assertion string' not in content:
20 | logger.error(f"'assertion string' was not found in {content}")
21 | assert 'assertion string' in content # assert specific string in response content
22 |
23 | body = {"id": id, "token": token} # include parsed variables to POST request body
24 | headers = {'content-type': 'application/json'}
25 | r = locust.post('/app/post_endpoint', body, headers, catch_response=True) # call app-specific POST endpoint
26 | content = r.content.decode('utf-8')
27 | if 'assertion string after successful POST request' not in content:
28 | logger.error(f"'assertion string after successful POST request' was not found in {content}")
29 | assert 'assertion string after successful POST request' in content # assertion after POST request
30 |
--------------------------------------------------------------------------------
/app/extension/jsm/extension_locust_customers.py:
--------------------------------------------------------------------------------
1 | import re
2 | from locustio.common_utils import init_logger, jsm_customer_measure, run_as_specific_user # noqa F401
3 |
4 | logger = init_logger(app_type='jsm')
5 |
6 |
7 | @jsm_customer_measure('locust_customer_app_specific_action')
8 | # @run_as_specific_user(username='admin', password='admin') # run as specific user
9 | def app_specific_action(locust):
10 | r = locust.get('/app/get_endpoint', catch_response=True) # call app-specific GET endpoint
11 | content = r.content.decode('utf-8') # decode response content
12 |
13 | token_pattern_example = '"token":"(.+?)"'
14 | id_pattern_example = '"id":"(.+?)"'
15 | token = re.findall(token_pattern_example, content) # get TOKEN from response using regexp
16 | id = re.findall(id_pattern_example, content) # get ID from response using regexp
17 |
18 | logger.locust_info(f'token: {token}, id: {id}') # log info for debug when verbose is true in jsm.yml file
19 | if 'assertion string' not in content:
20 | logger.error(f"'assertion string' was not found in {content}")
21 | assert 'assertion string' in content # assert specific string in response content
22 |
23 | body = {"id": id, "token": token} # include parsed variables to POST request body
24 | headers = {'content-type': 'application/json'}
25 | r = locust.post('/app/post_endpoint', body, headers, catch_response=True) # call app-specific POST endpoint
26 | content = r.content.decode('utf-8')
27 | if 'assertion string after successful POST request' not in content:
28 | logger.error(f"'assertion string after successful POST request' was not found in {content}")
29 | assert 'assertion string after successful POST request' in content # assertion after POST request
30 |
--------------------------------------------------------------------------------
/app/extension/jsm/extension_ui_agents.py:
--------------------------------------------------------------------------------
1 | from selenium.webdriver.common.by import By
2 |
3 | from selenium_ui.base_page import BasePage
4 | from selenium_ui.conftest import print_timing
5 | from selenium_ui.jsm.pages.agent_pages import Login
6 | from util.conf import JSM_SETTINGS
7 |
8 |
9 | def app_specific_action(webdriver, datasets):
10 | page = BasePage(webdriver)
11 | if datasets['custom_issues']:
12 | issue_key = datasets['custom_issue_key']
13 |
14 | # To run action as specific user uncomment code bellow.
15 | # NOTE: If app_specific_action is running as specific user, make sure that app_specific_action is running
16 | # just before test_2_selenium_z_log_out action
17 |
18 | # @print_timing("selenium_app_specific_user_login")
19 | # def measure():
20 | # def app_specific_user_login(username='admin', password='admin'):
21 | # login_page = Login(webdriver)
22 | # login_page.delete_all_cookies()
23 | # login_page.go_to()
24 | # login_page.set_credentials(username=username, password=password)
25 | # if login_page.is_first_login():
26 | # login_page.first_login_setup()
27 | # if login_page.is_first_login_second_page():
28 | # login_page.first_login_second_page_setup()
29 | # login_page.wait_for_page_loaded()
30 | # app_specific_user_login(username='admin', password='admin')
31 | # measure()
32 |
33 | @print_timing("selenium_agent_app_custom_action")
34 | def measure():
35 |
36 | @print_timing("selenium_agent_app_custom_action:view_request")
37 | def sub_measure():
38 | page.go_to_url(f"{JSM_SETTINGS.server_url}/browse/{issue_key}")
39 | # Wait for summary field visible
40 | page.wait_until_visible((By.ID, "summary-val"))
41 | # Wait for you app-specific UI element by ID selector
42 | page.wait_until_visible((By.ID, "ID_OF_YOUR_APP_SPECIFIC_UI_ELEMENT"))
43 | sub_measure()
44 | measure()
45 |
--------------------------------------------------------------------------------
/app/extension/jsm/extension_ui_customers.py:
--------------------------------------------------------------------------------
1 | from selenium.webdriver.common.by import By
2 |
3 | from selenium_ui.base_page import BasePage
4 | from selenium_ui.conftest import print_timing
5 | from selenium_ui.jsm.pages.customer_pages import Login
6 | from util.conf import JSM_SETTINGS
7 |
8 |
9 | def app_specific_action(webdriver, datasets):
10 | page = BasePage(webdriver)
11 | if datasets['custom_issues']:
12 | custom_request_key = datasets['custom_issue_key']
13 | custom_service_desk_id = datasets['custom_service_desk_id']
14 |
15 | # To run action as specific user uncomment code bellow.
16 | # NOTE: If app_specific_action is running as specific user, make sure that app_specific_action is running
17 | # just before test_2_selenium_z_log_out action
18 |
19 | # @print_timing("selenium_app_specific_user_login")
20 | # def measure():
21 | # def app_specific_user_login(username='admin', password='admin'):
22 | # login_page = Login(webdriver)
23 | # login_page.delete_all_cookies()
24 | # login_page.go_to()
25 | # login_page.set_credentials(username=username, password=password)
26 | # login_page.wait_for_page_loaded()
27 | # app_specific_user_login(username='admin', password='admin')
28 | # measure()
29 |
30 | @print_timing("selenium_customer_app_custom_action")
31 | def measure():
32 |
33 | @print_timing("selenium_customer_app_custom_action:view_request")
34 | def sub_measure():
35 | page.go_to_url(f"{JSM_SETTINGS.server_url}/servicedesk/customer/portal/"
36 | f"{custom_service_desk_id}/{custom_request_key}")
37 | # Wait for options element visible
38 | page.wait_until_visible((By.CLASS_NAME, 'cv-request-options'))
39 | # Wait for you app-specific UI element by ID selector
40 | page.wait_until_visible((By.ID, "ID_OF_YOUR_APP_SPECIFIC_UI_ELEMENT"))
41 | sub_measure()
42 | measure()
43 |
--------------------------------------------------------------------------------
/app/jira.yml:
--------------------------------------------------------------------------------
1 | ---
2 | settings:
3 | artifacts-dir: results/jira/%Y-%m-%d_%H-%M-%S
4 | aggregator: consolidator
5 | verbose: false
6 | check-updates: false # disable bzt check for updates
7 | env:
8 | application_hostname: test_jira_instance.atlassian.com # Jira DC hostname without protocol and port e.g. test-jira.atlassian.com or localhost
9 | application_protocol: http # http or https
10 | application_port: 80 # 80, 443, 8080, 2990, etc
11 | secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate
12 | application_postfix: /jira # e.g. /jira for TerraForm deployment url like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. Leave this value blank for url without postfix.
13 | admin_login: admin
14 | admin_password: admin
15 | load_executor: jmeter # jmeter and locust are supported. jmeter by default.
16 | concurrency: 200 # number of concurrent virtual users for jmeter or locust scenario
17 | test_duration: 45m
18 | ramp-up: 3m # time to spin all concurrent users
19 | total_actions_per_hour: 54500 # number of total JMeter/Locust actions per hour
20 | WEBDRIVER_VISIBLE: False
21 | JMETER_VERSION: 5.6.3
22 | LANGUAGE: en_US.utf8
23 | allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README.
24 | environment_compliance_check: True # Pre-test environment compliance validation. Set to "False" to skip it.
25 | # Action percentage for Jmeter and Locust load executors
26 | create_issue: 4
27 | search_jql: 11
28 | view_issue: 34
29 | view_project_summary: 3
30 | view_dashboard: 10
31 | edit_issue: 5
32 | add_comment: 2
33 | browse_projects: 9
34 | view_scrum_board: 8
35 | view_kanban_board: 7
36 | view_backlog: 6
37 | browse_boards: 1
38 | standalone_extension: 0 # By default disabled
39 | # Custom dataset section.
40 | custom_dataset_query: # Write JQL query to add JQL output to the app/datasets/jira/custom-issues.csv, e.g. "summary ~ 'AppIssue*'"
41 | services:
42 | - module: shellexec
43 | prepare:
44 | - python util/pre_run/environment_checker.py
45 | - python util/pre_run/environment_compliance_check.py jira
46 | - python util/data_preparation/jira_prepare_data.py
47 | shutdown:
48 | - python util/post_run/jmeter_post_check.py
49 | - python util/jtl_convertor/jtls-to-csv.py kpi.jtl selenium.jtl
50 | post-process:
51 | - python util/analytics/analytics.py jira
52 | - python util/post_run/cleanup_results_dir.py
53 | - module: pip-install
54 | packages:
55 | - selenium==4.31.0
56 | execution:
57 | - scenario: ${load_executor}
58 | executor: ${load_executor}
59 | concurrency: ${concurrency}
60 | hold-for: ${test_duration}
61 | ramp-up: ${ramp-up}
62 | - scenario: selenium
63 | executor: selenium
64 | runner: pytest
65 | hold-for: ${test_duration}
66 | scenarios:
67 | selenium:
68 | script: selenium_ui/jira_ui.py
69 | locust:
70 | script: locustio/jira/locustfile.py
71 | jmeter:
72 | script: jmeter/jira.jmx
73 | properties:
74 | application_hostname: ${application_hostname}
75 | application_protocol: ${application_protocol}
76 | application_port: ${application_port}
77 | application_postfix: ${application_postfix}
78 | # Workload model
79 | total_actions_per_hr: ${total_actions_per_hour}
80 | perc_create_issue: ${create_issue}
81 | perc_search_jql: ${search_jql}
82 | perc_view_issue: ${view_issue}
83 | perc_view_project_summary: ${view_project_summary}
84 | perc_view_dashboard: ${view_dashboard}
85 | perc_edit_issue: ${edit_issue}
86 | perc_add_comment: ${add_comment}
87 | perc_browse_projects: ${browse_projects}
88 | perc_view_scrum_board: ${view_scrum_board}
89 | perc_view_kanban_board: ${view_kanban_board}
90 | perc_view_backlog: ${view_backlog}
91 | perc_browse_boards: ${browse_boards}
92 | perc_standalone_extension: ${standalone_extension}
93 | modules:
94 | consolidator:
95 | rtimes-len: 0 # CONFSRVDEV-7631 reduce sampling
96 | percentiles: [] # CONFSRVDEV-7631 disable all percentiles due to Taurus's excessive memory usage
97 | jmeter:
98 | version: ${JMETER_VERSION}
99 | detect-plugins: true
100 | memory-xmx: 8G # allow JMeter to use up to 8G of memory
101 | plugins:
102 | - jpgc-casutg=2.10
103 | - jpgc-dummy=0.4
104 | - jpgc-ffw=2.0
105 | - jpgc-fifo=0.2
106 | - jpgc-functions=2.2
107 | - jpgc-json=2.7
108 | - jpgc-perfmon=2.1
109 | - jpgc-prmctl=0.4
110 | - jpgc-tst=2.6
111 | - bzm-random-csv=0.8 # not used default jmx file
112 | system-properties:
113 | server.rmi.ssl.disable: true
114 | java.rmi.server.hostname: localhost
115 | httpsampler.ignore_failed_embedded_resources: "true"
116 | selenium:
117 | chromedriver:
118 | version: "135.0.7049.114" # Supports Chrome version 135. You can refer to https://googlechromelabs.github.io/chrome-for-testing
119 | reporting:
120 | - data-source: sample-labels
121 | module: junit-xml
122 |
--------------------------------------------------------------------------------
/app/locustio/bamboo/locustfile.py:
--------------------------------------------------------------------------------
1 | from locust import HttpUser, task, between
2 | from locustio.bamboo.http_actions import run_build_plans
3 | from locustio.common_utils import LocustConfig, MyBaseTaskSet
4 | from util.conf import BAMBOO_SETTINGS
5 |
6 | config = LocustConfig(config_yml=BAMBOO_SETTINGS)
7 |
8 |
9 | class BambooBehavior(MyBaseTaskSet):
10 |
11 | @task()
12 | def run_build_plans(self):
13 | run_build_plans(self)
14 |
15 |
16 | class BambooUser(HttpUser):
17 | host = BAMBOO_SETTINGS.server_url
18 | tasks = [BambooBehavior]
19 | wait_time = between(0, 0)
20 |
--------------------------------------------------------------------------------
/app/locustio/bamboo/locustfile_app_specific.py:
--------------------------------------------------------------------------------
1 | from locust import HttpUser, task, between
2 | from extension.bamboo.extension_locust import app_specific_action
3 | from locustio.bamboo.http_actions import locust_bamboo_login
4 | from locustio.common_utils import LocustConfig, MyBaseTaskSet
5 | from util.conf import BAMBOO_SETTINGS
6 |
7 | config = LocustConfig(config_yml=BAMBOO_SETTINGS)
8 |
9 |
10 | class BambooBehavior(MyBaseTaskSet):
11 |
12 | def on_start(self):
13 | self.client.verify = config.secure
14 | locust_bamboo_login(self)
15 |
16 | @task(config.percentage('standalone_extension_locust'))
17 | def custom_action(self):
18 | app_specific_action(self)
19 |
20 |
21 | class BambooUser(HttpUser):
22 | host = BAMBOO_SETTINGS.server_url
23 | tasks = [BambooBehavior]
24 | wait_time = between(0, 0)
25 |
--------------------------------------------------------------------------------
/app/locustio/bamboo/requests_params.py:
--------------------------------------------------------------------------------
1 | from locustio.common_utils import read_input_file
2 | from util.project_paths import BAMBOO_USERS, BAMBOO_BUILD_PLANS
3 |
4 |
5 | class Login:
6 | action_name = 'jmeter_login_and_view_all_builds'
7 | atl_token_pattern = r'name="atlassian-token" content="(.+?)">'
8 | login_body = {
9 | 'os_username': '',
10 | 'os_password': '',
11 | 'os_destination': '',
12 | 'save': 'Log in',
13 | 'atl_token': '',
14 | }
15 |
16 |
17 | def bamboo_datasets():
18 | data_sets = dict()
19 | data_sets["users"] = read_input_file(BAMBOO_USERS)
20 | data_sets["build_plans"] = read_input_file(BAMBOO_BUILD_PLANS)
21 | return data_sets
22 |
--------------------------------------------------------------------------------
/app/locustio/confluence/locustfile.py:
--------------------------------------------------------------------------------
1 | from locust import HttpUser, task, between
2 |
3 | from extension.confluence.extension_locust import app_specific_action
4 | from locustio.common_utils import LocustConfig, MyBaseTaskSet
5 | from locustio.confluence.http_actions import login_and_view_dashboard, view_dashboard, view_blog,\
6 | open_editor_and_create_blog, create_and_edit_page, comment_page, view_attachments, \
7 | upload_attachments, like_page, view_page, search_cql_two_words_and_view_results, search_cql_three_words
8 | from util.conf import CONFLUENCE_SETTINGS
9 |
10 | config = LocustConfig(config_yml=CONFLUENCE_SETTINGS)
11 |
12 |
13 | class ConfluenceBehavior(MyBaseTaskSet):
14 |
15 | def on_start(self):
16 | self.client.verify = config.secure
17 | login_and_view_dashboard(self)
18 |
19 | @task(config.percentage('view_page'))
20 | def view_page_action(self):
21 | view_page(self)
22 |
23 | @task(config.percentage('view_dashboard'))
24 | def view_dashboard_action(self):
25 | view_dashboard(self)
26 |
27 | @task(config.percentage('view_blog'))
28 | def view_blog_action(self):
29 | view_blog(self)
30 |
31 | @task(config.percentage('search_cql'))
32 | def search_cql_action(self):
33 | search_cql_two_words_and_view_results(self)
34 |
35 | @task(config.percentage('search_cql'))
36 | def search_cql_action(self):
37 | search_cql_three_words(self)
38 |
39 | @task(config.percentage('create_blog'))
40 | def create_blog_action(self):
41 | open_editor_and_create_blog(self)
42 |
43 | @task(config.percentage('create_and_edit_page'))
44 | def create_and_edit_page_action(self):
45 | create_and_edit_page(self)
46 |
47 | @task(config.percentage('comment_page'))
48 | def comment_page_action(self):
49 | comment_page(self)
50 |
51 | @task(config.percentage('view_attachment'))
52 | def view_attachments_action(self):
53 | view_attachments(self)
54 |
55 | @task(config.percentage('upload_attachment'))
56 | def upload_attachments_action(self):
57 | upload_attachments(self)
58 |
59 | @task(config.percentage('like_page'))
60 | def like_page_action(self):
61 | like_page(self)
62 |
63 | @task(config.percentage('standalone_extension'))
64 | def custom_action(self):
65 | app_specific_action(self)
66 |
67 |
68 | class ConfluenceUser(HttpUser):
69 | host = CONFLUENCE_SETTINGS.server_url
70 | tasks = [ConfluenceBehavior]
71 | wait_time = between(0, 0)
72 |
--------------------------------------------------------------------------------
/app/locustio/confluence/requests_params.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 | from locustio.common_utils import read_input_file, BaseResource
3 | from util.project_paths import (CONFLUENCE_PAGES, CONFLUENCE_BLOGS, CONFLUENCE_USERS, CONFLUENCE_STATIC_CONTENT)
4 |
5 |
6 | def confluence_datasets():
7 | data_sets = dict()
8 | data_sets["pages"] = read_input_file(CONFLUENCE_PAGES)
9 | data_sets["blogs"] = read_input_file(CONFLUENCE_BLOGS)
10 | data_sets["users"] = read_input_file(CONFLUENCE_USERS)
11 | data_sets['static-content'] = read_input_file(CONFLUENCE_STATIC_CONTENT)
12 |
13 | return data_sets
14 |
15 |
16 | class ConfluenceResource(BaseResource):
17 |
18 | def __init__(self, resource_file='locustio/confluence/resources.json'):
19 | super().__init__(resource_file)
20 |
21 |
22 | class Login(ConfluenceResource):
23 | action_name = 'login_and_view_dashboard'
24 | login_body = {
25 | 'os_username': '',
26 | 'os_password': '',
27 | 'os_cookie': True,
28 | 'os_destination': '',
29 | 'login': 'Log in'
30 | }
31 | keyboard_hash_re = 'name=\"ajs-keyboardshortcut-hash\" content=\"(.*?)\">'
32 | static_resource_url_re = 'meta name=\"ajs-static-resource-url-prefix\" content=\"(.*?)/_\">'
33 | version_number_re = 'meta name=\"ajs-version-number\" content=\"(.*?)\">'
34 | build_number_re = 'meta name=\"ajs-build-number\" content=\"(.*?)\"'
35 | atl_token_pattern = r'"ajs-atl-token" content="(.+?)"'
36 |
37 |
38 | class ViewPage(ConfluenceResource):
39 | action_name = 'view_page'
40 | parent_page_id_re = 'meta name=\"ajs-parent-page-id\" content=\"(.*?)\"'
41 | page_id_re = 'meta name=\"ajs-page-id\" content=\"(.*?)\">'
42 | space_key_re = 'meta id=\"confluence-space-key\" name=\"confluence-space-key\" content=\"(.*?)\"'
43 | ancestor_ids_re = 'name=\"ancestorId\" value=\"(.*?)\"'
44 | tree_result_id_re = 'name="treeRequestId" value="(.+?)"'
45 | has_no_root_re = '"noRoot" value="(.+?)"'
46 | root_page_id_re = 'name="rootPageId" value="(.+?)"'
47 | editable_re = 'id=\"editPageLink\" href="(.+?)\?pageId=(.+?)\"'
48 | inline_comment_re = '\"id\":(.+?)\,\"'
49 |
50 |
51 | class ViewDashboard(ConfluenceResource):
52 | action_name = 'view_dashboard'
53 | keyboard_hash_re = 'name=\"ajs-keyboardshortcut-hash\" content=\"(.*?)\">'
54 | static_resource_url_re = 'meta name=\"ajs-static-resource-url-prefix\" content=\"(.*?)/_\">'
55 | version_number_re = 'meta name=\"ajs-version-number\" content=\"(.*?)\">'
56 | build_number_re = 'meta name=\"ajs-build-number\" content=\"(.*?)\"'
57 |
58 |
59 | class ViewBlog(ConfluenceResource):
60 | action_name = 'view_blog'
61 | parent_page_id_re = 'meta name=\"ajs-parent-page-id\" content=\"(.*?)\"'
62 | page_id_re = 'meta name=\"ajs-page-id\" content=\"(.*?)\">'
63 | space_key_re = 'meta id=\"confluence-space-key\" name=\"confluence-space-key\" content=\"(.*?)\"'
64 | inline_comment_re = '\"id\":(.+?)\,\"'
65 | keyboard_hash_re = 'name=\"ajs-keyboardshortcut-hash\" content=\"(.*?)\">'
66 | build_number_re = 'meta name=\"ajs-build-number\" content=\"(.*?)\"'
67 |
68 |
69 | class CreateBlog(ConfluenceResource):
70 | action_name = 'create_blog'
71 | content_id_re = 'name=\"ajs-content-id\" content=\"(.*?)\">'
72 | page_id_re = 'meta name=\"ajs-page-id\" content=\"(.*?)\">'
73 | space_key = 'createpage.action\?spaceKey=(.+?)\&'
74 | contribution_hash = '\"contributorsHash\":\"\"'
75 | parent_page_id_re = 'meta name=\"ajs-parent-page-id\" content=\"(.*?)\"'
76 |
77 | created_blog_title_re = 'anonymous_export_view.*?\"webui\":\"(.*?)\"'
78 |
79 |
80 | class CreateEditPage(ConfluenceResource):
81 | action_name = 'create_and_edit_page'
82 | content_id_re = 'meta name=\"ajs-content-id\" content=\"(.*?)\">'
83 | parent_page_id = 'meta name=\"ajs-parent-page-id\" content=\"(.*?)\"'
84 |
85 | editor_page_title_re = 'name=\"ajs-page-title\" content=\"(.*?)\"'
86 | editor_page_version_re = 'name=\"ajs-page-version\" content=\"(.*?)\">'
87 | editor_page_content_re = 'id=\"wysiwygTextarea\" name=\"wysiwygContent\" class=\
88 | "hidden tinymce-editor\">([\w\W]*?)'
89 |
90 |
91 | class CommentPage(ConfluenceResource):
92 | action_name = 'comment_page'
93 |
94 |
95 | class UploadAttachments(ConfluenceResource):
96 | action_name = 'upload_attachments'
97 |
98 | keyboard_hash_re = 'name=\"ajs-keyboardshortcut-hash\" content=\"(.*?)\">'
99 | build_number_re = 'meta name=\"ajs-build-number\" content=\"(.*?)\"'
100 | parent_page_id_re = 'meta name=\"ajs-parent-page-id\" content=\"(.*?)\"'
101 |
102 |
103 | class LikePage(ConfluenceResource):
104 | action_name = 'like_page'
105 | like_re = '\{\"likes\":\[\{"user":\{"name\"\:\"(.+?)",'
106 |
107 |
108 | class ViewAttachment(ConfluenceResource):
109 | action_name = 'view_attachment'
110 |
111 | keyboard_hash_re = 'name=\"ajs-keyboardshortcut-hash\" content=\"(.*?)\">'
112 | build_number_re = 'meta name=\"ajs-build-number\" content=\"(.*?)\"'
113 | parent_page_id_re = 'meta name=\"ajs-parent-page-id\" content=\"(.*?)\"'
114 | remote_user_key_re = 'meta name=\"ajs-remote-user-key\" content=\"(.*?)\">'
115 | data_linked_resource_id_re = 'data-linked-resource-id=\"(.*?)\"'
116 | page_id_re = 'meta name=\"ajs-page-id\" content=\"(.*?)\">'
117 |
118 |
--------------------------------------------------------------------------------
/app/locustio/jira/locustfile.py:
--------------------------------------------------------------------------------
1 | from locust import HttpUser, task, between
2 | from locustio.jira.http_actions import login_and_view_dashboard, create_issue, search_jql, view_issue, \
3 | view_project_summary, view_dashboard, edit_issue, add_comment, browse_boards, view_kanban_board, view_scrum_board, \
4 | view_backlog, browse_projects
5 | from locustio.common_utils import LocustConfig, MyBaseTaskSet
6 | from extension.jira.extension_locust import app_specific_action
7 | from util.conf import JIRA_SETTINGS
8 |
9 | config = LocustConfig(config_yml=JIRA_SETTINGS)
10 |
11 |
12 | class JiraBehavior(MyBaseTaskSet):
13 |
14 | def on_start(self):
15 | self.client.verify = config.secure
16 | login_and_view_dashboard(self)
17 |
18 | @task(config.percentage('create_issue'))
19 | def create_issue_action(self):
20 | create_issue(self)
21 |
22 | @task(config.percentage('search_jql'))
23 | def search_jql_action(self):
24 | search_jql(self)
25 |
26 | @task(config.percentage('view_issue'))
27 | def view_issue_action(self):
28 | view_issue(self)
29 |
30 | @task(config.percentage('view_project_summary'))
31 | def view_project_summary_action(self):
32 | view_project_summary(self)
33 |
34 | @task(config.percentage('view_dashboard'))
35 | def view_dashboard_action(self):
36 | view_dashboard(self)
37 |
38 | @task(config.percentage('edit_issue'))
39 | def edit_issue_action(self):
40 | edit_issue(self)
41 |
42 | @task(config.percentage('add_comment'))
43 | def add_comment_action(self):
44 | add_comment(self)
45 |
46 | @task(config.percentage('browse_projects'))
47 | def browse_projects_action(self):
48 | browse_projects(self)
49 |
50 | @task(config.percentage('view_kanban_board'))
51 | def view_kanban_board_action(self):
52 | view_kanban_board(self)
53 |
54 | @task(config.percentage('view_scrum_board'))
55 | def view_scrum_board_action(self):
56 | view_scrum_board(self)
57 |
58 | @task(config.percentage('view_backlog'))
59 | def view_backlog_action(self):
60 | view_backlog(self)
61 |
62 | @task(config.percentage('browse_boards'))
63 | def browse_boards_action(self):
64 | browse_boards(self)
65 |
66 | @task(config.percentage('standalone_extension')) # By default disabled
67 | def custom_action(self):
68 | app_specific_action(self)
69 |
70 |
71 | class JiraUser(HttpUser):
72 | host = JIRA_SETTINGS.server_url
73 | tasks = [JiraBehavior]
74 | wait_time = between(0, 0)
75 |
--------------------------------------------------------------------------------
/app/locustio/jsm/agents/agents_requests_params.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | from locustio.common_utils import read_input_file, BaseResource
4 | from util.project_paths import JSM_DATASET_AGENTS, JSM_DATASET_REQUESTS, JSM_DATASET_SERVICE_DESKS_L, \
5 | JSM_DATASET_SERVICE_DESKS_M, JSM_DATASET_SERVICE_DESKS_S
6 |
7 |
8 | def jsm_agent_datasets():
9 | data_sets = dict()
10 | data_sets['agents'] = read_input_file(JSM_DATASET_AGENTS)
11 | data_sets['requests'] = read_input_file(JSM_DATASET_REQUESTS)
12 | data_sets['s_project'] = read_input_file(JSM_DATASET_SERVICE_DESKS_S)
13 | data_sets['m_project'] = read_input_file(JSM_DATASET_SERVICE_DESKS_M)
14 | data_sets['l_project'] = read_input_file(JSM_DATASET_SERVICE_DESKS_L)
15 |
16 | return data_sets
17 |
18 |
19 | class JsmAgentsResource(BaseResource):
20 |
21 | def __init__(self, resource_file='locustio/jsm/agents/agents_resources.json'):
22 | super().__init__(resource_file)
23 |
24 |
25 | class Login(JsmAgentsResource):
26 | action_name = 'login_and_view_dashboard'
27 | atl_token_pattern = r'name="atlassian-token" content="(.+?)">'
28 | login_body = {
29 | 'os_username': '',
30 | 'os_password': '',
31 | 'os_destination': '',
32 | 'user_role': '',
33 | 'atl_token': '',
34 | 'login': 'Log in'
35 | }
36 |
37 |
38 | class AllOpenQueue(JsmAgentsResource):
39 | action_name = 'view_all_open_queue'
40 | last_visited_project_body = {"id": "com.atlassian.servicedesk.project-ui:sd-project-sidebar-queues"}
41 |
42 |
43 | class BrowseProjects(JsmAgentsResource):
44 | action_name = 'browse_jsm_projects'
45 |
46 |
47 | class ViewRequest(JsmAgentsResource):
48 | action_name = 'view_request'
49 |
50 |
51 | class AddComment(JsmAgentsResource):
52 | action_name = 'add_comment'
53 |
54 |
55 | class ViewWorkloadReport(JsmAgentsResource):
56 | action_name = 'view_workload'
57 | last_visited_project_body = {"id": "com.atlassian.servicedesk.project-ui:sd-project-sidebar-queues"}
58 |
59 |
60 | class ViewTimeToResolutionReport(JsmAgentsResource):
61 | action_name = 'view_time_to_resolution'
62 | last_visited_body = {"id": "com.atlassian.servicedesk.project-ui:sd-project-sidebar-reports"}
63 |
64 |
65 | class ViewReportCreatedVsResolved(JsmAgentsResource):
66 | action_name = 'view_created_vs_resolved'
67 | last_visited_body = {"id": "com.atlassian.servicedesk.project-ui:sd-project-sidebar-reports"}
68 |
69 |
70 | class ViewCustomers(JsmAgentsResource):
71 | action_name = 'view_customers'
72 | last_visited_body = {"id": "com.atlassian.servicedesk.project-ui:sd-project-sidebar-customers"}
73 |
--------------------------------------------------------------------------------
/app/locustio/jsm/agents_locustfile.py:
--------------------------------------------------------------------------------
1 | from locust import HttpUser, task, between
2 | from locustio.jsm.agents import agents_http_actions
3 | from locustio.common_utils import LocustConfig, MyBaseTaskSet
4 | from locustio.jsm.agents.agents_requests_params import jsm_agent_datasets
5 | from extension.jsm.extension_locust_agents import app_specific_action
6 | from util.conf import JSM_SETTINGS
7 |
8 | config = LocustConfig(config_yml=JSM_SETTINGS)
9 | jsm_agent_dataset = jsm_agent_datasets()
10 |
11 |
12 | class JsmAgentBehavior(MyBaseTaskSet):
13 |
14 | def on_start(self):
15 | self.client.verify = config.secure
16 | agents_http_actions.agent_login_and_view_dashboard(self, jsm_agent_dataset)
17 |
18 | @task(config.percentage('agent_view_queues_small') // 2)
19 | def agent_view_queues_small(self):
20 | agents_http_actions.agent_view_queue_all_open_small(self)
21 | agents_http_actions.agent_view_queue_random_small(self)
22 |
23 | @task(config.percentage('agent_view_queues_medium') // 2)
24 | def agent_view_queues_medium(self):
25 | if jsm_agent_dataset['m_project']:
26 | agents_http_actions.agent_view_queue_all_open_medium(self)
27 | agents_http_actions.agent_view_queue_random_medium(self)
28 |
29 | @task(config.percentage('agent_browse_projects'))
30 | def agent_browse_projects(self):
31 | agents_http_actions.agent_browse_projects(self)
32 |
33 | @task(config.percentage('agent_view_request'))
34 | def agent_view_request(self):
35 | agents_http_actions.agent_view_request(self)
36 |
37 | @task(config.percentage('agent_add_comment'))
38 | def agent_add_comment(self):
39 | agents_http_actions.agent_add_comment(self)
40 |
41 | @task(config.percentage('agent_view_report_workload_small'))
42 | def agent_view_report_workload_small(self):
43 | agents_http_actions.agent_view_report_workload_small(self)
44 |
45 | @task(config.percentage('agent_view_report_workload_medium'))
46 | def agent_view_report_workload_medium(self):
47 | if jsm_agent_dataset['m_project']:
48 | agents_http_actions.agent_view_report_workload_medium(self)
49 |
50 | @task(config.percentage('agent_view_report_created_vs_resolved_small'))
51 | def agent_view_report_created_vs_resolved_small(self):
52 | agents_http_actions.agent_view_report_created_vs_resolved_small(self)
53 |
54 | @task(config.percentage('agent_view_report_created_vs_resolved_medium'))
55 | def agent_view_report_created_vs_resolved_medium(self):
56 | if jsm_agent_dataset['m_project']:
57 | agents_http_actions.agent_view_report_created_vs_resolved_medium(self)
58 |
59 | @task(config.percentage('agent_view_customers'))
60 | def agent_view_customers(self):
61 | agents_http_actions.agent_view_customers(self)
62 |
63 | @task(config.percentage('agent_standalone_extension')) # By default disabled
64 | def custom_action(self):
65 | app_specific_action(self)
66 |
67 |
68 | class JsmAgent(HttpUser):
69 | host = JSM_SETTINGS.server_url
70 | tasks = [JsmAgentBehavior]
71 | wait_time = between(0, 0)
72 |
--------------------------------------------------------------------------------
/app/locustio/jsm/customers/customers_requests_params.py:
--------------------------------------------------------------------------------
1 | from locustio.common_utils import read_input_file, BaseResource
2 | import json
3 | from util.project_paths import JSM_DATASET_CUSTOMERS, JSM_DATASET_REQUEST_TYPES, JSM_DATASET_SERVICE_DESKS_S
4 |
5 |
6 | def jsm_customer_datasets():
7 | data_sets = dict()
8 | data_sets['customers'] = read_input_file(JSM_DATASET_CUSTOMERS)
9 | data_sets['s_portal'] = read_input_file(JSM_DATASET_SERVICE_DESKS_S)
10 | data_sets['request_types'] = read_input_file(JSM_DATASET_REQUEST_TYPES)
11 |
12 | return data_sets
13 |
14 |
15 | class JsmCustomersResource(BaseResource):
16 |
17 | def __init__(self, resource_file='locustio/jsm/customers/customers_resources.json'):
18 | super().__init__(resource_file)
19 |
20 |
21 | class Login(JsmCustomersResource):
22 | action_name = 'login_and_view_portals'
23 | login_body = {
24 | 'os_username': '',
25 | 'os_password': '',
26 | 'os_captcha': '',
27 | 'os_cookie': 'true',
28 | }
29 |
30 |
31 | class ViewPortal(JsmCustomersResource):
32 | action_name = 'view_portal'
33 |
34 |
35 | class ViewRequests(JsmCustomersResource):
36 | action_name = 'view_requests'
37 |
38 |
39 | class ViewRequest(JsmCustomersResource):
40 | action_name = 'view_request'
41 |
42 |
43 | class AddComment(JsmCustomersResource):
44 | action_name = 'add_comment'
45 |
46 |
47 | class ShareRequest(JsmCustomersResource):
48 | action_name = 'share_request'
49 |
50 |
51 | class ShareRequestOrg(JsmCustomersResource):
52 | action_name = 'share_request_org'
53 |
54 |
55 | class CreateRequest(JsmCustomersResource):
56 | action_name = 'create_request'
57 |
--------------------------------------------------------------------------------
/app/locustio/jsm/customers_locustfile.py:
--------------------------------------------------------------------------------
1 | from locust import HttpUser, task, between
2 | from locustio.jsm.customers import customers_http_actions
3 | from locustio.common_utils import LocustConfig, MyBaseTaskSet
4 | from extension.jsm.extension_locust_customers import app_specific_action
5 | from util.conf import JSM_SETTINGS
6 |
7 | config = LocustConfig(config_yml=JSM_SETTINGS)
8 |
9 |
10 | class JsmCustomerBehavior(MyBaseTaskSet):
11 |
12 | def on_start(self):
13 | self.client.verify = config.secure
14 | customers_http_actions.customer_login_and_view_portals(self)
15 |
16 | @task(config.percentage('customer_view_portal'))
17 | def customer_view_portal(self):
18 | customers_http_actions.customer_view_portal(self)
19 |
20 | @task(config.percentage('customer_view_requests'))
21 | def customer_view_requests(self):
22 | customers_http_actions.customer_view_requests(self)
23 |
24 | @task(config.percentage('customer_view_request'))
25 | def customer_view_request(self):
26 | customers_http_actions.customer_view_request(self)
27 |
28 | @task(config.percentage('customer_add_comment'))
29 | def customer_add_comment(self):
30 | customers_http_actions.customer_add_comment(self)
31 |
32 | @task(config.percentage('customer_share_request_with_customer'))
33 | def customer_share_request_with_customer(self):
34 | customers_http_actions.customer_share_request_with_customer(self)
35 |
36 | @task(config.percentage('customer_share_request_with_org'))
37 | def customer_share_request_with_org(self):
38 | customers_http_actions.customer_share_request_with_org(self)
39 |
40 | @task(config.percentage('customer_create_request'))
41 | def customer_create_request(self):
42 | customers_http_actions.customer_create_request(self)
43 |
44 | @task(config.percentage('customer_standalone_extension')) # By default disabled
45 | def custom_action(self):
46 | app_specific_action(self)
47 |
48 |
49 | class JsmCustomer(HttpUser):
50 | host = JSM_SETTINGS.server_url
51 | tasks = [JsmCustomerBehavior]
52 | wait_time = between(0, 0)
53 |
--------------------------------------------------------------------------------
/app/reports_generation/README.md:
--------------------------------------------------------------------------------
1 | ## Reports generator - a tool that creates an aggregated .csv file, chart and summary report from multiple run results.
2 | Before you start, make sure you have installed Python packages from [requirements.txt](../../requirements.txt).
3 |
4 | Otherwise, run the `pip install -r requirements.txt` command from DCAPT [root](../..) directory to install necessary packages to your virtual environment.
5 |
6 | To create reports, run the
7 | `python csv_chart_generator.py [performance_profile.yml or scale_profile.yml]` command from the `reports_generation` folder.
8 |
9 | The aggregated .csv files, charts and summary report are stored in the `results/reports` directory.
10 | Before run, you should edit `performance_profile.yml` or `scale_profile.yml` and set appropriate `relativePath` values.
11 |
12 | **Configuration**
13 | - `column_name` - column name from results.csv used for aggregation
14 | - `runName` - label for specific run
15 | - `runType` - label for run type
16 | - `relativePath` - the relative path to result folder of specific run starting from dc-app-performance-toolkit folder
17 | - `index_col` - index column
18 | - `title` - chart title (also this value is used to generate file name)
19 | - `image_height_px` - chart image height in pixels
20 | - `image_width_px` - chart image width in pixels
21 | - `check_actions_count` - [optional] check if actions count is the same for all runs. Default value is `true`
22 | - `judge` - [optional] compare results by measuring performance deviation of experiment version from baseline
23 |
--------------------------------------------------------------------------------
/app/reports_generation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/reports_generation/__init__.py
--------------------------------------------------------------------------------
/app/reports_generation/bamboo_profile.yml:
--------------------------------------------------------------------------------
1 | # Defines which column from test runs is used for aggregated report. Default is "90% Line"
2 | column_name: "90% Line"
3 | runs:
4 | # relativePath should contain a relative path to the directory with run results starting from dc-app-performance-toolkit folder.
5 | # E.g. relativePath: "./app/results/bamboo/2024-01-01_10-10-10"
6 | - runName: "without app"
7 | runType: "baseline"
8 | relativePath: "./app/results/bamboo/{TIMESTAMP}"
9 | - runName: "with app"
10 | runType: "experiment"
11 | relativePath: "./app/results/bamboo/{TIMESTAMP}"
12 | - runName: "with app and app-specific actions"
13 | runType: "experiment"
14 | relativePath: "./app/results/bamboo/{TIMESTAMP}"
15 |
16 | # Chart generation config
17 | index_col: "Action"
18 | title: "DCAPT Performance Testing for Bamboo"
19 | image_height_px: 1000
20 | image_width_px: 1600
21 | check_actions_count: false
22 |
--------------------------------------------------------------------------------
/app/reports_generation/csv_chart_generator.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | from pathlib import Path
3 |
4 | from scripts import (config_provider, csv_aggregator, chart_generator,
5 | summary_aggregator, results_archivator)
6 |
7 |
8 | def main():
9 | config = config_provider.get_config()
10 | product_name = summary_aggregator.__get_product_name(config)
11 | results_dir = __get_results_dir(config, product_name)
12 |
13 | agg_csv = csv_aggregator.aggregate(config, results_dir)
14 | agg, scenario_status = summary_aggregator.aggregate(config, results_dir)
15 | chart_generator_config = config_provider.get_chart_generator_config(config, agg_csv)
16 | chart_generator.perform_chart_creation(chart_generator_config, results_dir, scenario_status)
17 | results_archivator.archive_results(config, results_dir)
18 |
19 |
20 | def __get_results_dir(config, product_name) -> Path:
21 | path = (Path(__file__).absolute().parents[1] / "results" / "reports" /
22 | f"{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}_{product_name}_{config['profile']}")
23 | print(f"Results dir: {path}")
24 | path.mkdir(parents=True, exist_ok=True)
25 | return path
26 |
27 |
28 | if __name__ == "__main__":
29 | main()
30 |
--------------------------------------------------------------------------------
/app/reports_generation/performance_profile.yml:
--------------------------------------------------------------------------------
1 | # Defines which column from test runs is used for aggregated report. Default is "90% Line"
2 | column_name: "90% Line"
3 | runs:
4 | # relativePath should contain a relative path to the directory with run results starting from dc-app-performance-toolkit folder.
5 | # E.g. relativePath: "./app/results/jira/2024-01-01_10-10-10"
6 | - runName: "without app"
7 | runType: "baseline"
8 | relativePath: "./app/results/{PRODUCT}/{TIMESTAMP}"
9 | - runName: "with app"
10 | runType: "experiment"
11 | relativePath: "./app/results/{PRODUCT}/{TIMESTAMP}"
12 |
13 | # Chart generation config
14 | index_col: "Action"
15 | title: "DCAPT Performance Testing"
16 | image_height_px: 1000
17 | image_width_px: 1600
18 | check_actions_count: false
19 |
--------------------------------------------------------------------------------
/app/reports_generation/scale_profile.yml:
--------------------------------------------------------------------------------
1 | # Defines which column from test runs is used for aggregated report. Default is "90% Line"
2 | column_name: "90% Line"
3 | runs:
4 | # relativePath should contain a relative path to the directory with run results starting from dc-app-performance-toolkit folder.
5 | # E.g. relativePath: "./app/results/jira/2024-01-01_10-10-10"
6 | - runName: "1 Node"
7 | runType: "baseline"
8 | relativePath: "./app/results/{PRODUCT}/{TIMESTAMP}"
9 | - runName: "2 Nodes"
10 | runType: "experiment"
11 | relativePath: "./app/results/{PRODUCT}/{TIMESTAMP}"
12 | - runName: "4 Nodes"
13 | runType: "experiment"
14 | relativePath: "./app/results/{PRODUCT}/{TIMESTAMP}"
15 |
16 | # Chart generation configs
17 | index_col: "Action"
18 | title: "DCAPT Scale Testing"
19 | image_height_px: 1000
20 | image_width_px: 1600
21 |
--------------------------------------------------------------------------------
/app/reports_generation/scripts/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/reports_generation/scripts/__init__.py
--------------------------------------------------------------------------------
/app/reports_generation/scripts/chart_generator.py:
--------------------------------------------------------------------------------
1 | import re
2 | from pathlib import Path
3 |
4 | import matplotlib.pyplot as plt
5 | import pandas as pd
6 | from pandas import DataFrame
7 |
8 | from scripts.utils import validate_file_exists, validate_str_is_not_blank, validate_is_number, get_app_specific_actions
9 |
10 |
11 | def __normalize_file_name(s) -> str:
12 | s = s.lower()
13 | # Remove all non-word characters (everything except numbers, letters and '-')
14 | s = re.sub(r"[^\w\s-]", '', s)
15 | # Replace all runs of whitespace with a single dash
16 | s = re.sub(r"\s+", '_', s)
17 |
18 | return s
19 |
20 |
21 | def __resolve_and_expand_user_path(path: Path) -> Path:
22 | return path.resolve().expanduser()
23 |
24 |
25 | def __read_file_as_data_frame(file_path: Path, index_col: str) -> DataFrame:
26 | if not file_path.exists():
27 | raise SystemExit(f"File {file_path} does not exist")
28 | return pd.read_csv(file_path, index_col=index_col)
29 |
30 |
31 | def __generate_image_name(title: str) -> Path:
32 | return Path(f"{__normalize_file_name(title)}.png")
33 |
34 |
35 | def validate_config(config: dict):
36 | validate_str_is_not_blank(config, "aggregated_csv_path")
37 | validate_str_is_not_blank(config, "index_col")
38 | validate_str_is_not_blank(config, "title")
39 | validate_is_number(config, "image_height_px")
40 | validate_is_number(config, "image_width_px")
41 |
42 |
43 | def make_chart(config: dict, results_dir: Path, scenario_status: str) -> Path:
44 | csv_path_str = config["aggregated_csv_path"]
45 | index_col = config["index_col"]
46 | title = config["title"] + f" | Scenario status: {scenario_status}"
47 | image_height_px = config["image_height_px"]
48 | image_width_px = config["image_width_px"]
49 |
50 | image_height = image_height_px / 100
51 | image_width = image_width_px / 100
52 |
53 | file_path = __resolve_and_expand_user_path(Path(csv_path_str))
54 | data_frame = __read_file_as_data_frame(file_path, index_col)
55 | print(f"Input data file {file_path} successfully read")
56 |
57 | # Set app-specific mark
58 | app_specific_actions_list = get_app_specific_actions(file_path)
59 | for action in app_specific_actions_list:
60 | data_frame = data_frame.rename(index={action: f"\u2714{action}"})
61 |
62 | data_frame = data_frame.sort_index()
63 | data_frame.plot.barh(figsize=(image_width, image_height))
64 | plt.xlabel('Time, ms')
65 | plt.title(title)
66 | plt.tight_layout()
67 |
68 | image_path = results_dir / __generate_image_name(Path(csv_path_str).stem)
69 | plt.savefig(image_path)
70 | validate_file_exists(image_path, f"Result file {image_path} is not created")
71 | print(f"Chart file: {image_path.absolute()} successfully created")
72 |
73 | return image_path
74 |
75 |
76 | def perform_chart_creation(config: dict, results_dir: Path, scenario_status: str) -> Path:
77 | validate_config(config)
78 | output_file_path = make_chart(config, results_dir, scenario_status)
79 | return output_file_path
80 |
--------------------------------------------------------------------------------
/app/reports_generation/scripts/config_provider.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from pathlib import Path
3 |
4 | import yaml
5 |
6 |
7 | def get_config() -> dict:
8 | config_path = resolve_file_path(__get_config_file())
9 | config = __read_config_file(config_path)
10 | config['profile'] = config_path.stem
11 | return config
12 |
13 |
14 | def __get_config_file() -> str:
15 | config_file = ''.join(sys.argv[1:])
16 | if not len(config_file) > 0:
17 | raise SystemExit("Please provide configuration file path as input parameter")
18 |
19 | return config_file
20 |
21 |
22 | def __read_config_file(config_file_path: Path) -> dict:
23 | if not config_file_path.exists():
24 | raise SystemExit(f"File {config_file_path} does not exist")
25 |
26 | with config_file_path.open(mode="r") as stream:
27 | try:
28 | return yaml.safe_load(stream)
29 | except yaml.YAMLError as exc:
30 | raise SystemExit(exc, f"Error while parsing configuration file: {config_file_path}")
31 |
32 |
33 | def resolve_file_path(path: str):
34 | resolved_file_path = __resolve_and_expand_user_path(Path(path))
35 | return resolved_file_path
36 |
37 |
38 | def __resolve_and_expand_user_path(path: Path) -> Path:
39 | return path.resolve().expanduser()
40 |
41 |
42 | def get_chart_generator_config(config: dict, agg_csv_path: Path) -> dict:
43 | config["aggregated_csv_path"] = str(agg_csv_path)
44 | return config
45 |
--------------------------------------------------------------------------------
/app/reports_generation/scripts/csv_aggregator.py:
--------------------------------------------------------------------------------
1 | import csv
2 | from pathlib import Path
3 | from typing import List
4 |
5 | from scripts.utils import validate_file_exists, resolve_relative_path, validate_config
6 |
7 | RESULTS_CSV_FILE_NAME = "results.csv"
8 |
9 |
10 | class ResultsCSV:
11 |
12 | def __init__(self, absolute_file_path, actions: dict):
13 | self.absolute_file_path = absolute_file_path
14 | self.actions = actions
15 |
16 |
17 | def __create_header(config) -> List[str]:
18 | header = ['Action']
19 | [header.append(run['runName']) for run in config['runs']]
20 | # Append 'App-specific' header
21 | header.append('App-specific')
22 |
23 | return header
24 |
25 |
26 | def __validate_count_of_actions(tests_results: List[ResultsCSV]):
27 | if any(len(tests_results[0].actions) != len(actions_count.actions) for actions_count in tests_results):
28 | for file in tests_results:
29 | print(f'Result file {file.absolute_file_path} has {len(file.actions)} actions\n')
30 | raise SystemExit('Incorrect number of actions. '
31 | 'The number of actions should be the same for each results.csv.')
32 |
33 |
34 | def __get_tests_results(config: dict) -> List[ResultsCSV]:
35 | results_files_list = []
36 | column_name = config['column_name']
37 | for run in config['runs']:
38 | value_by_action = {}
39 | absolute_file_path = resolve_relative_path(run['relativePath']) / RESULTS_CSV_FILE_NAME
40 | with absolute_file_path.open(mode='r') as fs:
41 | for row in csv.DictReader(fs):
42 | value_by_action[row['Label']] = {column_name: row[column_name], 'App-specific': row['App specific']}
43 | results_files_list.append(ResultsCSV(absolute_file_path=absolute_file_path, actions=value_by_action))
44 |
45 | return results_files_list
46 |
47 |
48 | def __write_list_to_csv(header: List[str], tests_results: List[ResultsCSV], output_filename: Path, config: dict):
49 | actions = []
50 | for test_result in tests_results:
51 | for action in test_result.actions:
52 | if action not in actions:
53 | actions.append(action)
54 |
55 | with output_filename.open(mode='w', newline='') as file_stream:
56 | writer = csv.writer(file_stream)
57 | writer.writerow(header)
58 | for action in actions:
59 | row = [action]
60 | app_specific = False
61 | for test_result in tests_results:
62 | if test_result.actions.get(action):
63 | row.append(test_result.actions.get(action)[config['column_name']])
64 | app_specific = test_result.actions.get(action)['App-specific']
65 | else:
66 | row.append(None)
67 | row.append(app_specific)
68 | writer.writerow(row)
69 |
70 |
71 | def __get_output_file_path(config, results_dir) -> Path:
72 | return results_dir / f"{config['profile']}.csv"
73 |
74 |
75 | def aggregate(config: dict, results_dir: Path) -> Path:
76 | validate_config(config)
77 | tests_results = __get_tests_results(config)
78 | if config.get('check_actions_count', True):
79 | __validate_count_of_actions(tests_results)
80 | output_file_path = __get_output_file_path(config, results_dir)
81 | header = __create_header(config)
82 | __write_list_to_csv(header, tests_results, output_file_path, config)
83 |
84 | validate_file_exists(output_file_path, f"Result file {output_file_path} is not created")
85 | print(f'Results file {output_file_path.absolute()} is created')
86 | return output_file_path
87 |
--------------------------------------------------------------------------------
/app/reports_generation/scripts/results_archivator.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from shutil import make_archive
3 |
4 | from scripts.utils import validate_config, clean_str, resolve_relative_path
5 |
6 |
7 | def __zip_folder(folder_path: Path, destination_path: Path) -> Path:
8 | archive_path = make_archive(destination_path, 'zip', folder_path)
9 | return Path(archive_path)
10 |
11 |
12 | def archive_results(config: dict, results_dir: Path):
13 | validate_config(config)
14 | for run in config['runs']:
15 | results_folder_path = resolve_relative_path(run['relativePath'])
16 | destination_name = f"{config['profile']}_run_{clean_str(run['runName'])}_{results_folder_path.name}"
17 | destination_path = results_dir / destination_name
18 | archive_path = __zip_folder(results_folder_path, destination_path)
19 | print(f'Folder {results_folder_path} is zipped to {archive_path}')
20 |
--------------------------------------------------------------------------------
/app/reports_generation/scripts/summary_aggregator.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from typing import List
3 |
4 | from scripts.utils import validate_file_exists, resolve_relative_path, validate_config
5 |
6 | SUPPORTED_TEST_ATLASSIAN_PRODUCTS = ("bamboo", "bitbucket", "confluence", "crowd", "jira", "jsm", )
7 | SUMMARY_FILE_NAME = "results_summary.log"
8 | DELIMITER = ('\n================================================================================'
9 | '========================================\n')
10 |
11 |
12 | def __get_summary_files(config: dict) -> List[Path]:
13 | summary_files = []
14 | for run in config['runs']:
15 | file_path = resolve_relative_path(run['relativePath']) / SUMMARY_FILE_NAME
16 | validate_file_exists(file_path, f"File {file_path} does not exists")
17 | summary_files.append(resolve_relative_path(run['relativePath']) / SUMMARY_FILE_NAME)
18 | return summary_files
19 |
20 |
21 | def __get_product_name(config):
22 | summary_files = __get_summary_files(config)
23 | for file in summary_files:
24 | with file.open('r') as f:
25 | for line in f:
26 | if "Application" in line:
27 | file_content = line
28 | for product in SUPPORTED_TEST_ATLASSIAN_PRODUCTS:
29 | if product in file_content:
30 | return product
31 | print("WARNING: No product name found in log files.")
32 |
33 |
34 | def __get_run_names(config: dict) -> list:
35 | run_names = []
36 | for run in config['runs']:
37 | run_names.append(run['runName'])
38 | return run_names
39 |
40 |
41 | def __write_to_summary_report(file_names: List[Path], run_names: List, status: str, output_filename: Path):
42 | with output_filename.open('a') as outfile:
43 | outfile.write(f"Scenario status: {status}")
44 | outfile.write(DELIMITER)
45 | for file, run_name in zip(file_names, run_names):
46 | with file.open('r') as infile:
47 | outfile.write(f"Run name: {run_name}\n\n")
48 | outfile.write(infile.read())
49 | outfile.write(DELIMITER)
50 |
51 |
52 | def __get_output_file_path(config, results_dir) -> Path:
53 | return results_dir / f"{config['profile']}_summary.log"
54 |
55 |
56 | def __get_overall_status(files: List[Path]) -> bool:
57 | for file in files:
58 | with file.open('r') as f:
59 | first_line = f.readline()
60 | if 'FAIL' in first_line:
61 | return False
62 | return True
63 |
64 |
65 | def aggregate(config: dict, results_dir: Path) -> (Path, str):
66 | validate_config(config)
67 | output_file_path = __get_output_file_path(config, results_dir)
68 | summary_files = __get_summary_files(config)
69 | run_names = __get_run_names(config)
70 | status_message = 'OK' if __get_overall_status(summary_files) else "FAIL"
71 | __write_to_summary_report(summary_files, run_names, status_message, output_file_path)
72 | validate_file_exists(output_file_path, f"Results file {output_file_path} is not created")
73 | print(f'Results file {output_file_path.absolute()} is created')
74 | return output_file_path, status_message
75 |
--------------------------------------------------------------------------------
/app/reports_generation/scripts/utils.py:
--------------------------------------------------------------------------------
1 | import csv
2 | import numbers
3 | from pathlib import Path
4 |
5 |
6 | def resolve_relative_path(str_path: str) -> Path:
7 | """
8 | Resolve relative path from .yml scenario configuration file.
9 | Expected working dir for csv_chart_generator.py: ./dc-app-performance-toolkit/app/reports_generation
10 | Expected relative path starting from ./dc-app-performance-toolkit folder.
11 | """
12 | expected_working_dir_name = 'reports_generation'
13 | working_dir = Path().resolve().expanduser()
14 | if working_dir.name != expected_working_dir_name:
15 | raise SystemExit(f"ERROR: expected working dir name: {expected_working_dir_name}, actual: {working_dir.name}")
16 | return Path().resolve().expanduser().parents[1] / str_path
17 |
18 |
19 | def validate_str_is_not_blank(config: dict, key: str):
20 | value = config.get(key)
21 | if (value is None) or (not value.strip()):
22 | raise SystemExit(f"Config [{key}] is not present in config file or its value is empty")
23 | pass
24 |
25 |
26 | def validate_is_number(config: dict, key: str):
27 | value = config.get(key)
28 | if value is None:
29 | raise SystemExit(f"Config [{key}] is not present in config file or its value is empty")
30 |
31 | if not isinstance(value, numbers.Number):
32 | raise SystemExit(f"Value [{value}] is not a number")
33 |
34 |
35 | def validate_file_exists(file: Path, msg: str):
36 | if not file.exists():
37 | raise SystemExit(msg)
38 |
39 |
40 | def read_csv_by_line(file: Path) -> list:
41 | lines = []
42 | with open(file, 'r') as data:
43 | for line in csv.DictReader(data):
44 | lines.append(line)
45 | return lines
46 |
47 |
48 | def string_to_bool(val):
49 | """
50 | Convert a string representation of truth to a boolean.
51 | True values are 'y', 'yes', 't', 'true', 'on', and '1';
52 | False values are 'n', 'no', 'f', 'false', 'off', and '0'.
53 | Raises ValueError if 'val' is anything else.
54 | """
55 | val = val.strip().lower()
56 | if val in ('y', 'yes', 't', 'true', 'on', '1'):
57 | return True
58 | elif val in ('n', 'no', 'f', 'false', 'off', '0'):
59 | return False
60 | else:
61 | raise ValueError(f"Invalid truth value: {val}")
62 |
63 |
64 | def get_app_specific_actions(file: Path) -> list:
65 | app_specific_list = []
66 | actions = read_csv_by_line(file)
67 | for action in actions:
68 | if string_to_bool(action['App-specific']):
69 | app_specific_list.append(action['Action'])
70 | return app_specific_list
71 |
72 |
73 | def validate_config(config: dict):
74 | validate_str_is_not_blank(config, 'column_name')
75 | validate_str_is_not_blank(config, 'profile')
76 |
77 | runs = config.get('runs')
78 | if not isinstance(runs, list):
79 | raise SystemExit('Config key "runs" should be a list')
80 |
81 | for run in runs:
82 | if not isinstance(run, dict):
83 | raise SystemExit('Config key "run" should be a dictionary')
84 |
85 | validate_str_is_not_blank(run, 'runName')
86 | validate_str_is_not_blank(run, 'relativePath')
87 |
88 |
89 | def clean_str(string: str):
90 | # replace spaces with "_"
91 | string = string.replace(" ", "_")
92 | # Return alphanumeric characters from a string, except "_"
93 | return ''.join(e for e in string if e.isalnum() or e == "_")
94 |
--------------------------------------------------------------------------------
/app/selenium_ui/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/selenium_ui/__init__.py
--------------------------------------------------------------------------------
/app/selenium_ui/bamboo/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/selenium_ui/bamboo/__init__.py
--------------------------------------------------------------------------------
/app/selenium_ui/bamboo/modules.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | from selenium_ui.bamboo.pages.pages import Login, Logout, ProjectList, BuildList, PlanConfiguration, BuildActivity, \
4 | PlanSummary, BuildSummary, BuildLogs, PlanHistory, JobConfiguration
5 | from selenium_ui.conftest import print_timing
6 |
7 | USERS = "users"
8 | BUILD_PLANS = "build_plans"
9 |
10 |
11 | def setup_run_data(datasets):
12 | user = random.choice(datasets[USERS])
13 | build_plan = random.choice(datasets[BUILD_PLANS])
14 | datasets['username'] = user[0]
15 | datasets['password'] = user[1]
16 | datasets['build_plan_id'] = build_plan[1]
17 |
18 |
19 | def login(webdriver, datasets):
20 | setup_run_data(datasets)
21 | login_page = Login(webdriver)
22 |
23 | @print_timing("selenium_login")
24 | def measure():
25 | @print_timing("selenium_login:open_login_page")
26 | def sub_measure():
27 | login_page.go_to()
28 | login_page.wait_for_page_loaded()
29 |
30 | sub_measure()
31 | login_page.set_credentials(username=datasets['username'], password=datasets['password'])
32 | login_page.click_login_button()
33 |
34 | measure()
35 |
36 |
37 | def view_all_projects(webdriver, datasets):
38 | @print_timing("selenium_view_all_projects")
39 | def measure():
40 | projects_page = ProjectList(webdriver)
41 | projects_page.click_projects_button()
42 | projects_page.wait_for_page_loaded()
43 | measure()
44 |
45 |
46 | def view_all_builds(webdriver, datasets):
47 | @print_timing("selenium_view_all_builds")
48 | def measure():
49 | builds_page = BuildList(webdriver)
50 | builds_page.click_all_builds_button()
51 | builds_page.wait_for_page_loaded()
52 |
53 | measure()
54 |
55 |
56 | def config_plan(webdriver, datasets):
57 | @print_timing("selenium_config_plan")
58 | def measure():
59 | config_page = PlanConfiguration(webdriver)
60 | config_page.click_config_plan_button()
61 | config_page.wait_for_page_loaded()
62 |
63 | measure()
64 |
65 |
66 | def builds_queue_page(webdriver, datasets):
67 | @print_timing("selenium_view_build_activity")
68 | def measure():
69 | activity_page = BuildActivity(webdriver)
70 | activity_page.open_build_dropdown()
71 | activity_page.wait_for_page_loaded()
72 |
73 | measure()
74 |
75 |
76 | def view_plan_summary(webdriver, datasets):
77 | plan_summary = PlanSummary(webdriver, build_plan_id=datasets['build_plan_id'])
78 |
79 | @print_timing("selenium_view_plan_summary")
80 | def measure():
81 | plan_summary.go_to_summary_plan_page()
82 | plan_summary.wait_for_page_loaded()
83 |
84 | measure()
85 |
86 |
87 | def view_build_summary(webdriver, datasets):
88 | build_summary = BuildSummary(webdriver, build_plan_id=datasets['build_plan_id'])
89 |
90 | @print_timing("selenium_view_build_summary")
91 | def measure():
92 | build_summary.go_to_build_summary_page()
93 | build_summary.wait_for_page_loaded()
94 |
95 | measure()
96 |
97 |
98 | def view_plan_history_page(webdriver, datasets):
99 | plan_history = PlanHistory(webdriver, build_plan_id=datasets['build_plan_id'])
100 |
101 | @print_timing("selenium_view_plan_history")
102 | def measure():
103 | plan_history.go_to_plan_history_page()
104 | plan_history.wait_for_page_loaded()
105 |
106 | measure()
107 |
108 |
109 | def view_build_logs(webdriver, datasets):
110 | @print_timing("selenium_view_build_logs")
111 | def measure():
112 | view_logs = BuildLogs(webdriver)
113 | view_logs.go_to_build_logs()
114 | view_logs.wait_for_page_loaded()
115 |
116 | measure()
117 |
118 |
119 | def view_job_configuration(webdriver, datasets):
120 |
121 | @print_timing("selenium_view_job_configuration")
122 | def measure():
123 | view_job_configuration_page = JobConfiguration(webdriver)
124 | view_job_configuration_page.click_config_plan_button()
125 | view_job_configuration_page.click_job_config_button()
126 | view_job_configuration_page.wait_for_page_loaded()
127 | measure()
128 |
129 |
130 | def log_out(webdriver, datasets):
131 | logout = Logout(webdriver)
132 |
133 | @print_timing("selenium_log_out")
134 | def measure():
135 | logout.go_to()
136 | logout.wait_for_page_loaded()
137 | measure()
138 |
--------------------------------------------------------------------------------
/app/selenium_ui/bamboo/pages/pages.py:
--------------------------------------------------------------------------------
1 | from selenium_ui.base_page import BasePage
2 |
3 | from selenium_ui.bamboo.pages.selectors import UrlManager, LoginPageLocators, AllProjectsLocators, AllBuildsLocators, \
4 | PlanConfigurationLocators, BuildActivityLocators, PlanSummaryLocators, PlanHistoryLocators, BuildSummaryLocators, \
5 | BuildLogsLocators, JobConfigLocators, LogoutLocators
6 |
7 |
8 | class Login(BasePage):
9 | page_url = LoginPageLocators.login_page_url
10 |
11 | def __init__(self, driver):
12 | super().__init__(driver)
13 | self.is_2sv_login = False
14 |
15 | def wait_for_page_loaded(self):
16 | self.wait_until_visible(LoginPageLocators.login_page_content)
17 | if not self.get_elements(LoginPageLocators.login_button):
18 | self.is_2sv_login = True
19 | self.wait_until_visible(LoginPageLocators.login_button_2sv)
20 | print("INFO: 2sv login form")
21 |
22 | def click_login_button(self):
23 | if self.is_2sv_login:
24 | self.wait_until_visible(LoginPageLocators.login_button_2sv).click()
25 | self.wait_until_invisible(LoginPageLocators.login_button_2sv)
26 | else:
27 | self.wait_until_visible(LoginPageLocators.login_button).click()
28 | self.wait_until_invisible(LoginPageLocators.login_button)
29 |
30 | def set_credentials(self, username, password):
31 | if self.is_2sv_login:
32 | self.get_element(LoginPageLocators.login_username_field_2sv).send_keys(username)
33 | self.get_element(LoginPageLocators.login_password_field_2sv).send_keys(password)
34 | else:
35 | self.get_element(LoginPageLocators.login_username_field).send_keys(username)
36 | self.get_element(LoginPageLocators.login_password_field).send_keys(password)
37 |
38 |
39 | class ProjectList(BasePage):
40 | page_loaded_selector = [AllProjectsLocators.project_table, AllProjectsLocators.project_name_column]
41 |
42 | def click_projects_button(self):
43 | self.wait_until_visible(AllProjectsLocators.projects_button).click()
44 |
45 |
46 | class BuildList(BasePage):
47 | page_loaded_selector = AllBuildsLocators.builds_table
48 |
49 | def click_all_builds_button(self):
50 | self.wait_until_visible(AllBuildsLocators.all_builds_button).click()
51 |
52 |
53 | class PlanConfiguration(BasePage):
54 | page_loaded_selector = [PlanConfigurationLocators.config_plan_page,
55 | PlanConfigurationLocators.config_plan_page_content]
56 |
57 | def click_config_plan_button(self):
58 | self.wait_until_visible(PlanConfigurationLocators.edit_config_button).click()
59 |
60 |
61 | class BuildActivity(BasePage):
62 | page_loaded_selector = [BuildActivityLocators.build_activity_page, BuildActivityLocators.build_dashboard]
63 |
64 | def open_build_dropdown(self):
65 | self.wait_until_clickable(BuildActivityLocators.build_dropdown).click()
66 | self.wait_until_clickable(BuildActivityLocators.build_activity_button).click()
67 |
68 |
69 | class PlanSummary(BasePage):
70 | page_loaded_selector = [PlanSummaryLocators.plan_details_summary, PlanSummaryLocators.plan_stats_summary]
71 |
72 | def __init__(self, driver, build_plan_id=None):
73 | BasePage.__init__(self, driver)
74 | plan_summary = UrlManager(build_plan_id=build_plan_id)
75 | self.plan_summary_url = plan_summary.plan_summary_url()
76 |
77 | def go_to_summary_plan_page(self):
78 | self.go_to_url(self.plan_summary_url)
79 |
80 |
81 | class PlanHistory(BasePage):
82 | page_loaded_selector = PlanHistoryLocators.build_results
83 |
84 | def __init__(self, driver, build_plan_id=None):
85 | BasePage.__init__(self, driver)
86 | plan_history = UrlManager(build_plan_id=build_plan_id)
87 | self.plan_history_url = plan_history.plan_history_url()
88 |
89 | def go_to_plan_history_page(self):
90 | self.go_to_url(self.plan_history_url)
91 |
92 |
93 | class BuildSummary(BasePage):
94 | page_loaded_selector = BuildSummaryLocators.build_summary_status
95 |
96 | def __init__(self, driver, build_plan_id=None):
97 | BasePage.__init__(self, driver)
98 | build_summary = UrlManager(build_plan_id=build_plan_id)
99 | self.build_summary_url = build_summary.build_summary_url()
100 |
101 | def go_to_build_summary_page(self):
102 | self.go_to_url(self.build_summary_url)
103 |
104 |
105 | class BuildLogs(BasePage):
106 | page_loaded_selector = BuildLogsLocators.view_logs
107 |
108 | def go_to_build_logs(self):
109 | self.wait_until_clickable(BuildLogsLocators.logs_button).click()
110 |
111 |
112 | class JobConfiguration(BasePage):
113 | page_loaded_selector = [JobConfigLocators.edit_panel, JobConfigLocators.edit_panel]
114 |
115 | def click_config_plan_button(self):
116 | self.wait_until_clickable(AllBuildsLocators.all_builds_button).click()
117 | self.wait_until_visible(PlanConfigurationLocators.edit_config_button).click()
118 |
119 | def click_job_config_button(self):
120 | self.wait_until_clickable(JobConfigLocators.job_config).click()
121 |
122 |
123 | class Logout(BasePage):
124 | page_url = LogoutLocators.logout_url
125 |
126 | def wait_for_page_loaded(self):
127 | self.wait_until_visible(LogoutLocators.login_button_link)
128 |
--------------------------------------------------------------------------------
/app/selenium_ui/bamboo/pages/selectors.py:
--------------------------------------------------------------------------------
1 | from selenium.webdriver.common.by import By
2 | from util.conf import BAMBOO_SETTINGS
3 |
4 |
5 | class UrlManager:
6 |
7 | def __init__(self, build_plan_id=None):
8 | self.host = BAMBOO_SETTINGS.server_url
9 | self.login_params = '/userlogin.action'
10 | self.logout_params = '/userLogout.action'
11 | self.all_projects_params = '/allProjects.action'
12 | self.plan_summary_params = f'/browse/{build_plan_id}'
13 | self.plan_history_params = f'/browse/{build_plan_id}/history'
14 | self.build_summary_params = f'/browse/{build_plan_id}-1'
15 |
16 | def login_url(self):
17 | return f"{self.host}{self.login_params}"
18 |
19 | def all_projects_url(self):
20 | return f"{self.host}{self.all_projects_params}"
21 |
22 | def plan_summary_url(self):
23 | return f"{self.host}{self.plan_summary_params}"
24 |
25 | def plan_history_url(self):
26 | return f"{self.host}{self.plan_history_params}"
27 |
28 | def build_summary_url(self):
29 | return f"{self.host}{self.build_summary_params}"
30 |
31 | def logout_url(self):
32 | return f"{self.host}{self.logout_params}"
33 |
34 |
35 | class LoginPageLocators:
36 | login_page_url = UrlManager().login_url()
37 |
38 | login_page_content = (By.ID, "content")
39 |
40 | # legacy login form
41 | login_username_field = (By.ID, "loginForm_os_username")
42 | login_password_field = (By.ID, "loginForm_os_password")
43 | login_button = (By.ID, "loginForm_save")
44 |
45 | # 2sv login form
46 | login_button_2sv = (By.ID, "login-button")
47 | login_username_field_2sv = (By.ID, "username-field")
48 | login_password_field_2sv = (By.ID, "password-field")
49 |
50 |
51 | class AllProjectsLocators:
52 | view_all_projects_url = UrlManager().all_projects_url()
53 | project_table = (By.ID, "projectsTable")
54 | project_name_column = (By.ID, "projectsTable")
55 | projects_button = (By.ID, "allProjects")
56 |
57 |
58 | class AllBuildsLocators:
59 | all_builds_button = (By.ID, "logo")
60 | builds_table = (By.ID, "dashboard")
61 |
62 |
63 | class PlanConfigurationLocators:
64 | edit_config_button = (By.XPATH, "//span[@title='Configure plan']")
65 | config_plan_page = (By.ID, "config-sidebar")
66 | config_plan_page_content = (By.ID, "content")
67 |
68 |
69 | class BuildActivityLocators:
70 | build_dropdown = (By.ID, "system_build_menu")
71 | build_activity_button = (By.ID, "currentTab")
72 | build_activity_page = (By.ID, "page")
73 | build_dashboard = (By.ID, "dashboard-instance-name")
74 |
75 |
76 | class PlanSummaryLocators:
77 | plan_details_summary = (By.ID, "planDetailsSummary")
78 | plan_stats_summary = (By.ID, "planStatsSummary")
79 |
80 |
81 | class PlanHistoryLocators:
82 | build_results = (By.CLASS_NAME, "aui-page-panel-content")
83 |
84 |
85 | class BuildSummaryLocators:
86 | build_summary_status = (By.ID, "status-ribbon")
87 |
88 |
89 | class BuildLogsLocators:
90 | logs_button = (By.XPATH, "//strong[contains(text(),'Logs')]")
91 | view_logs = (By.CLASS_NAME, "log-trace")
92 |
93 |
94 | class JobConfigLocators:
95 | edit_panel = (By.ID, "panel-editor-setup")
96 | edit_panel_list = (By.ID, "panel-editor-list")
97 | job_config = (By.CLASS_NAME, "job")
98 |
99 |
100 | class LogoutLocators:
101 | logout_url = UrlManager().logout_url()
102 | login_button_link = (By.ID, "login")
103 |
--------------------------------------------------------------------------------
/app/selenium_ui/bamboo_ui.py:
--------------------------------------------------------------------------------
1 | from selenium_ui.bamboo import modules
2 | from extension.bamboo import extension_ui # noqa F401
3 |
4 |
5 | # this action should be the first one
6 | def test_0_selenium_a_login(bamboo_webdriver, bamboo_datasets, bamboo_screen_shots):
7 | modules.login(bamboo_webdriver, bamboo_datasets)
8 |
9 |
10 | def test_1_selenium_view_all_projects(bamboo_webdriver, bamboo_datasets, bamboo_screen_shots):
11 | modules.view_all_projects(bamboo_webdriver, bamboo_datasets)
12 |
13 |
14 | def test_1_selenium_view_all_builds(bamboo_webdriver, bamboo_datasets, bamboo_screen_shots):
15 | modules.view_all_builds(bamboo_webdriver, bamboo_datasets)
16 |
17 |
18 | def test_1_selenium_config_plan_page(bamboo_webdriver, bamboo_datasets, bamboo_screen_shots):
19 | modules.config_plan(bamboo_webdriver, bamboo_datasets)
20 |
21 |
22 | def test_1_selenium_build_activity_queue(bamboo_webdriver, bamboo_datasets, bamboo_screen_shots):
23 | modules.builds_queue_page(bamboo_webdriver, bamboo_datasets)
24 |
25 |
26 | def test_1_selenium_view_plan_summary(bamboo_webdriver, bamboo_datasets, bamboo_screen_shots):
27 | modules.view_plan_summary(bamboo_webdriver, bamboo_datasets)
28 |
29 |
30 | def test_1_selenium_view_plan_history(bamboo_webdriver, bamboo_datasets, bamboo_screen_shots):
31 | modules.view_plan_history_page(bamboo_webdriver, bamboo_datasets)
32 |
33 |
34 | def test_1_selenium_view_build_summary(bamboo_webdriver, bamboo_datasets, bamboo_screen_shots):
35 | modules.view_build_summary(bamboo_webdriver, bamboo_datasets)
36 |
37 |
38 | def test_1_selenium_view_build_logs(bamboo_webdriver, bamboo_datasets, bamboo_screen_shots):
39 | modules.view_build_logs(bamboo_webdriver, bamboo_datasets)
40 |
41 |
42 | def test_1_selenium_view_job_configuration(bamboo_webdriver, bamboo_datasets, bamboo_screen_shots):
43 | modules.view_job_configuration(bamboo_webdriver, bamboo_datasets)
44 |
45 |
46 | """
47 | Add custom actions anywhere between login and log out action. Move this to a different line as needed.
48 | Write your custom selenium scripts in `app/extension/bamboo/extension_ui.py`.
49 | Refer to `app/selenium_ui/bamboo/modules.py` for examples.
50 | """
51 | # def test_1_selenium_custom_action(bamboo_webdriver, bamboo_datasets, bamboo_screen_shots):
52 | # extension_ui.app_specific_action(bamboo_webdriver, bamboo_datasets)
53 |
54 |
55 | # this action should be the last one
56 | def test_2_selenium_z_log_out(bamboo_webdriver, bamboo_datasets, bamboo_screen_shots):
57 | modules.log_out(bamboo_webdriver, bamboo_datasets)
58 |
--------------------------------------------------------------------------------
/app/selenium_ui/bitbucket/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/selenium_ui/bitbucket/__init__.py
--------------------------------------------------------------------------------
/app/selenium_ui/bitbucket_ui.py:
--------------------------------------------------------------------------------
1 | from selenium_ui.bitbucket import modules
2 | from extension.bitbucket import extension_ui # noqa F401
3 |
4 |
5 | # this action should be the first one
6 | def test_0_selenium_a_login(bitbucket_webdriver, bitbucket_datasets, bitbucket_screen_shots):
7 | modules.login(bitbucket_webdriver, bitbucket_datasets)
8 |
9 |
10 | def test_1_selenium_view_list_pull_requests(bitbucket_webdriver, bitbucket_datasets, bitbucket_screen_shots):
11 | modules.view_list_pull_requests(bitbucket_webdriver, bitbucket_datasets)
12 |
13 |
14 | def test_1_selenium_view_pull_request_overview(bitbucket_webdriver, bitbucket_datasets, bitbucket_screen_shots):
15 | modules.view_pull_request_overview_tab(bitbucket_webdriver, bitbucket_datasets)
16 |
17 |
18 | def test_1_selenium_view_pull_request_diff(bitbucket_webdriver, bitbucket_datasets, bitbucket_screen_shots):
19 | modules.view_pull_request_diff_tab(bitbucket_webdriver, bitbucket_datasets)
20 |
21 |
22 | def test_1_selenium_view_pull_request_commits(bitbucket_webdriver, bitbucket_datasets, bitbucket_screen_shots):
23 | modules.view_pull_request_commits_tab(bitbucket_webdriver, bitbucket_datasets)
24 |
25 |
26 | def test_1_selenium_comment_pull_request_diff(bitbucket_webdriver, bitbucket_datasets, bitbucket_screen_shots):
27 | modules.comment_pull_request_diff(bitbucket_webdriver, bitbucket_datasets)
28 |
29 |
30 | def test_1_selenium_comment_pull_request_overview(bitbucket_webdriver, bitbucket_datasets, bitbucket_screen_shots):
31 | modules.comment_pull_request_overview(bitbucket_webdriver, bitbucket_datasets)
32 |
33 |
34 | def test_1_selenium_view_dashboard(bitbucket_webdriver, bitbucket_datasets, bitbucket_screen_shots):
35 | modules.view_dashboard(bitbucket_webdriver, bitbucket_datasets)
36 |
37 |
38 | def test_1_selenium_create_pull_request(bitbucket_webdriver, bitbucket_datasets, bitbucket_screen_shots):
39 | modules.create_pull_request(bitbucket_webdriver, bitbucket_datasets)
40 |
41 |
42 | def test_1_selenium_view_projects(bitbucket_webdriver, bitbucket_datasets, bitbucket_screen_shots):
43 | modules.view_projects(bitbucket_webdriver, bitbucket_datasets)
44 |
45 |
46 | def test_1_selenium_view_project_repositories(bitbucket_webdriver, bitbucket_datasets, bitbucket_screen_shots):
47 | modules.view_project_repos(bitbucket_webdriver, bitbucket_datasets)
48 |
49 |
50 | def test_1_selenium_view_repo(bitbucket_webdriver, bitbucket_datasets, bitbucket_screen_shots):
51 | modules.view_repo(bitbucket_webdriver, bitbucket_datasets)
52 |
53 |
54 | def test_1_selenium_view_branches(bitbucket_webdriver, bitbucket_datasets, bitbucket_screen_shots):
55 | modules.view_branches(bitbucket_webdriver, bitbucket_datasets)
56 |
57 |
58 | def test_1_selenium_view_commits(bitbucket_webdriver, bitbucket_datasets, bitbucket_screen_shots):
59 | modules.view_commits(bitbucket_webdriver, bitbucket_datasets)
60 |
61 |
62 | """
63 | Add custom actions anywhere between login and log out action. Move this to a different line as needed.
64 | Write your custom selenium scripts in `app/extension/bitbucket/extension_ui.py`.
65 | Refer to `app/selenium_ui/bitbucket/modules.py` for examples.
66 | """
67 | # def test_1_selenium_custom_action(bitbucket_webdriver, bitbucket_datasets, bitbucket_screen_shots):
68 | # extension_ui.app_specific_action(bitbucket_webdriver, bitbucket_datasets)
69 |
70 |
71 | def test_2_selenium_logout(bitbucket_webdriver, bitbucket_datasets, bitbucket_screen_shots):
72 | modules.logout(bitbucket_webdriver, bitbucket_datasets)
73 |
--------------------------------------------------------------------------------
/app/selenium_ui/confluence/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/selenium_ui/confluence/__init__.py
--------------------------------------------------------------------------------
/app/selenium_ui/confluence/pages/selectors.py:
--------------------------------------------------------------------------------
1 | from selenium.webdriver.common.by import By
2 | from util.conf import CONFLUENCE_SETTINGS
3 |
4 |
5 | class UrlManager:
6 |
7 | def __init__(self, page_id=None):
8 | self.host = CONFLUENCE_SETTINGS.server_url
9 | self.login_params = '/login.action'
10 | self.page_params = f"/pages/viewpage.action?pageId={page_id}&noRedirect=true"
11 | self.dashboard_params = '/dashboard.action#all-updates'
12 | self.edit_page_params = f'/pages/editpage.action?pageId={page_id}'
13 | self.logout_params = "/logout.action"
14 | self.admin_system_params = f"/admin/viewgeneralconfig.action"
15 |
16 | def login_url(self):
17 | return f"{self.host}{self.login_params}"
18 |
19 | def dashboard_url(self):
20 | return f"{self.host}{self.dashboard_params}"
21 |
22 | def page_url(self):
23 | return f"{self.host}{self.page_params}"
24 |
25 | def edit_page_url(self):
26 | return f"{self.host}{self.edit_page_params}"
27 |
28 | def logout_url(self):
29 | return f"{self.host}{self.logout_params}"
30 |
31 | def admin_system_url(self):
32 | return f"{self.host}{self.admin_system_params}"
33 |
34 |
35 | class PopupLocators:
36 | popup_selectors = [
37 | (By.CSS_SELECTOR, ".button-panel-button .set-timezone-button"),
38 | (By.CSS_SELECTOR, ".aui-button aui-button-link .skip-onboarding"),
39 | (By.CSS_SELECTOR, ".aui-button.aui-button-link.skip-onboarding"),
40 | (By.CSS_SELECTOR, "#closeDisDialog"),
41 | (By.CSS_SELECTOR, ".aui-button.aui-button-primary.show-onboarding"),
42 | (By.CSS_SELECTOR, 'button[aria-label="Close this modal"]')
43 | ]
44 |
45 |
46 | class LoginPageLocators:
47 |
48 | sidebar = (By.ID, "sidebar-container")
49 |
50 | # legacy login form
51 | login_button = (By.ID, "loginButton")
52 | login_username_field = (By.ID, "os_username")
53 | login_password_field = (By.ID, "os_password")
54 |
55 | # 2sv login form
56 | login_button_2sv = (By.ID, "login-button")
57 | login_username_field_2sv = (By.ID, "username-field")
58 | login_password_field_2sv = (By.ID, "password-field")
59 |
60 | login_page_url = UrlManager().login_url()
61 | footer_build_info = (By.ID, "footer-build-information")
62 | footer_node_info = (By.ID, "footer-cluster-node")
63 |
64 | # Setup user page per first login
65 | first_login_setup_page = (By.ID, "grow-ic-nav-container")
66 | current_step_sel = (By.CLASS_NAME, "grow-aui-progress-tracker-step-current")
67 | skip_welcome_button = (By.ID, "grow-intro-video-skip-button")
68 | skip_photo_upload = (By.CSS_SELECTOR, ".aui-button-link")
69 | skip_find_content = (By.CSS_SELECTOR, ".intro-find-spaces-space>.space-checkbox")
70 | finish_setup = (By.CSS_SELECTOR, ".intro-find-spaces-button-continue")
71 |
72 | # logout
73 | logout = (By.XPATH, "//a[@href='logout.action']")
74 |
75 |
76 | class AllUpdatesLocators:
77 | updates_content = (By.CLASS_NAME, "list-container-all-updates")
78 |
79 |
80 | class PageLocators:
81 | page_title = (By.ID, "title-text")
82 | comment_text_field = (By.CSS_SELECTOR, ".quick-comment-prompt")
83 | edit_page_button = (By.ID, "editPageLink")
84 | search_box = (By.ID, "quick-search-query")
85 | search_results = (By.ID, "search-result-container")
86 | close_search_button = (By.ID, "search-drawer-close")
87 | empty_search_results = (By.CLASS_NAME, "captioned-image-component")
88 |
89 |
90 | class DashboardLocators:
91 | dashboard_url = UrlManager().dashboard_url()
92 | all_updates = (By.CLASS_NAME, "content-header-all-updates")
93 |
94 |
95 | class TopPanelLocators:
96 | create_button = (By.ID, "quick-create-page-button")
97 |
98 |
99 | class EditorLocators:
100 | publish_button = (By.ID, "rte-button-publish")
101 | confirm_publishing_button = (By.ID, "qed-publish-button")
102 | title_field = (By.ID, "content-title")
103 | page_content_field = (By.ID, "wysiwygTextarea_ifr")
104 | tinymce_page_content_field = (By.ID, "tinymce")
105 | tinymce_page_content_parahraph = (By.TAG_NAME, 'p')
106 |
107 | status_indicator = (By.CLASS_NAME, "status-indicator-message")
108 | save_spinner = (By.ID, "rte-spinner")
109 |
110 |
111 | class LogoutLocators:
112 | logout_msg = (By.ID, "logout-message")
113 |
114 |
115 | class XsrfTokenLocators:
116 | xsrf_token = (By.ID, "atlassian-token")
117 |
118 | class AdminLocators:
119 | admin_system_page_url = UrlManager().admin_system_url()
120 | web_sudo_password = (By.ID, 'password')
121 | web_sudo_submit_btn = (By.ID, 'authenticateButton')
122 | login_form = (By.ID, 'login-container')
123 | edit_baseurl = (By.ID, 'editbaseurl')
124 |
--------------------------------------------------------------------------------
/app/selenium_ui/confluence_ui.py:
--------------------------------------------------------------------------------
1 | from selenium_ui.confluence import modules
2 | from extension.confluence import extension_ui # noqa F401
3 |
4 |
5 | # this action should be the first one
6 | def test_0_selenium_a_login(confluence_webdriver, confluence_datasets, confluence_screen_shots):
7 | modules.login(confluence_webdriver, confluence_datasets)
8 |
9 |
10 | def test_1_selenium_view_blog(confluence_webdriver, confluence_datasets, confluence_screen_shots):
11 | modules.view_blog(confluence_webdriver, confluence_datasets)
12 |
13 |
14 | def test_1_selenium_view_dashboard(confluence_webdriver, confluence_datasets, confluence_screen_shots):
15 | modules.view_dashboard(confluence_webdriver, confluence_datasets)
16 |
17 |
18 | def test_1_selenium_view_page(confluence_webdriver, confluence_datasets, confluence_screen_shots):
19 | modules.view_page(confluence_webdriver, confluence_datasets)
20 |
21 |
22 | def test_1_selenium_view_page_from_cache(confluence_webdriver, confluence_datasets, confluence_screen_shots):
23 | modules.view_page_from_cache(confluence_webdriver, confluence_datasets)
24 |
25 | def test_1_selenium_create_page(confluence_webdriver, confluence_datasets, confluence_screen_shots):
26 | modules.create_confluence_page(confluence_webdriver, confluence_datasets)
27 |
28 |
29 | def test_1_selenium_edit_by_url(confluence_webdriver, confluence_datasets, confluence_screen_shots):
30 | modules.edit_confluence_page_by_url(confluence_webdriver, confluence_datasets)
31 |
32 |
33 | def test_1_selenium_cql_search_two_words(confluence_webdriver, confluence_datasets, confluence_screen_shots):
34 | modules.cql_search_two_words(confluence_webdriver)
35 |
36 |
37 | def test_1_selenium_edit_page_quick_edit(confluence_webdriver, confluence_datasets, confluence_screen_shots):
38 | modules.edit_confluence_page_quick_edit(confluence_webdriver, confluence_datasets)
39 |
40 |
41 | def test_1_selenium_create_inline_comment(confluence_webdriver, confluence_datasets, confluence_screen_shots):
42 | modules.create_inline_comment(confluence_webdriver, confluence_datasets)
43 |
44 |
45 | def test_1_selenium_cql_search_three_words(confluence_webdriver, confluence_datasets, confluence_screen_shots):
46 | modules.cql_search_three_words(confluence_webdriver)
47 |
48 |
49 |
50 | """
51 | Add custom actions anywhere between login and log out action. Move this to a different line as needed.
52 | Write your custom selenium scripts in `app/extension/confluence/extension_ui.py`.
53 | Refer to `app/selenium_ui/confluence/modules.py` for examples.
54 | """
55 | # def test_1_selenium_custom_action(confluence_webdriver, confluence_datasets, confluence_screen_shots):
56 | # extension_ui.app_specific_action(confluence_webdriver, confluence_datasets)
57 |
58 |
59 | # this action should be the last one
60 | def test_2_selenium_z_log_out(confluence_webdriver, confluence_datasets, confluence_screen_shots):
61 | modules.log_out(confluence_webdriver, confluence_datasets)
62 |
--------------------------------------------------------------------------------
/app/selenium_ui/jira/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/selenium_ui/jira/__init__.py
--------------------------------------------------------------------------------
/app/selenium_ui/jira_ui.py:
--------------------------------------------------------------------------------
1 | from selenium_ui.jira import modules
2 | from extension.jira import extension_ui # noqa F401
3 |
4 |
5 | # this action should be the first one
6 | def test_0_selenium_a_login(jira_webdriver, jira_datasets, jira_screen_shots):
7 | modules.login(jira_webdriver, jira_datasets)
8 |
9 |
10 | def test_1_selenium_view_project_summary(jira_webdriver, jira_datasets, jira_screen_shots):
11 | modules.view_project_summary(jira_webdriver, jira_datasets)
12 |
13 |
14 | def test_1_selenium_browse_projects_list(jira_webdriver, jira_datasets, jira_screen_shots):
15 | modules.browse_projects_list(jira_webdriver, jira_datasets)
16 |
17 |
18 | def test_1_selenium_browse_boards_list(jira_webdriver, jira_datasets, jira_screen_shots):
19 | modules.browse_boards_list(jira_webdriver, jira_datasets)
20 |
21 |
22 | def test_1_selenium_create_issue(jira_webdriver, jira_datasets, jira_screen_shots):
23 | modules.create_issue(jira_webdriver, jira_datasets)
24 |
25 |
26 | def test_1_selenium_edit_issue(jira_webdriver, jira_datasets, jira_screen_shots):
27 | modules.edit_issue(jira_webdriver, jira_datasets)
28 |
29 |
30 | def test_1_selenium_save_comment(jira_webdriver, jira_datasets, jira_screen_shots):
31 | modules.save_comment(jira_webdriver, jira_datasets)
32 |
33 |
34 | def test_1_selenium_search_jql(jira_webdriver, jira_datasets, jira_screen_shots):
35 | modules.search_jql(jira_webdriver, jira_datasets)
36 |
37 |
38 | def test_1_selenium_view_backlog_for_scrum_board(jira_webdriver, jira_datasets, jira_screen_shots):
39 | modules.view_backlog_for_scrum_board(jira_webdriver, jira_datasets)
40 |
41 |
42 | def test_1_selenium_view_scrum_board(jira_webdriver, jira_datasets, jira_screen_shots):
43 | modules.view_scrum_board(jira_webdriver, jira_datasets)
44 |
45 |
46 | def test_1_selenium_view_kanban_board(jira_webdriver, jira_datasets, jira_screen_shots):
47 | modules.view_kanban_board(jira_webdriver, jira_datasets)
48 |
49 |
50 | def test_1_selenium_view_dashboard(jira_webdriver, jira_datasets, jira_screen_shots):
51 | modules.view_dashboard(jira_webdriver, jira_datasets)
52 |
53 |
54 | def test_1_selenium_view_issue(jira_webdriver, jira_datasets, jira_screen_shots):
55 | modules.view_issue(jira_webdriver, jira_datasets)
56 |
57 |
58 | """
59 | Add custom actions anywhere between login and log out action. Move this to a different line as needed.
60 | Write your custom selenium scripts in `app/extension/jira/extension_ui.py`.
61 | Refer to `app/selenium_ui/jira/modules.py` for examples.
62 | """
63 |
64 |
65 | # def test_1_selenium_custom_action(jira_webdriver, jira_datasets, jira_screen_shots):
66 | # extension_ui.app_specific_action(jira_webdriver, jira_datasets)
67 |
68 |
69 | # this action should be the last one
70 | def test_2_selenium_z_log_out(jira_webdriver, jira_datasets, jira_screen_shots):
71 | modules.log_out(jira_webdriver, jira_datasets)
72 |
--------------------------------------------------------------------------------
/app/selenium_ui/jsm/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/selenium_ui/jsm/__init__.py
--------------------------------------------------------------------------------
/app/selenium_ui/jsm_ui_agents.py:
--------------------------------------------------------------------------------
1 | from selenium_ui.jsm import modules_agents
2 | import pytest
3 | from extension.jsm import extension_ui_agents # noqa F401
4 | from util.conf import JSM_SETTINGS
5 |
6 |
7 | def is_dataset_small(jsm_datasets):
8 | return len(jsm_datasets[modules_agents.SERVICE_DESKS_MEDIUM]) == 0
9 |
10 |
11 | # this action should be the first one
12 | def test_0_selenium_agent_a_login(jsm_webdriver, jsm_datasets, jsm_screen_shots):
13 | modules_agents.login(jsm_webdriver, jsm_datasets)
14 |
15 |
16 | def test_1_selenium_agent_browse_projects(jsm_webdriver, jsm_datasets, jsm_screen_shots):
17 | modules_agents.agent_browse_projects(jsm_webdriver, jsm_datasets)
18 |
19 |
20 | def test_1_selenium_agent_view_customers(jsm_webdriver, jsm_datasets, jsm_screen_shots):
21 | modules_agents.view_customers(jsm_webdriver, jsm_datasets)
22 |
23 |
24 | def test_1_selenium_agent_view_request(jsm_webdriver, jsm_datasets, jsm_screen_shots):
25 | modules_agents.view_request(jsm_webdriver, jsm_datasets)
26 |
27 |
28 | def test_1_selenium_agent_view_report_workload_medium(jsm_webdriver, jsm_datasets, jsm_screen_shots):
29 | if is_dataset_small(jsm_datasets):
30 | pytest.skip("Dataset does not have medium (10k-100k requests) service desk. Skipping action.")
31 | modules_agents.view_report_workload_medium(jsm_webdriver, jsm_datasets)
32 |
33 |
34 | def test_1_selenium_agent_view_report_created_vs_resolved_medium(jsm_webdriver, jsm_datasets, jsm_screen_shots):
35 | if is_dataset_small(jsm_datasets):
36 | pytest.skip("Dataset does not have medium (10k-100k requests) service desk. Skipping action.")
37 | modules_agents.view_report_created_vs_resolved_medium(jsm_webdriver, jsm_datasets)
38 |
39 |
40 | def test_1_selenium_agent_view_report_workload_small(jsm_webdriver, jsm_datasets, jsm_screen_shots):
41 | modules_agents.view_report_workload_small(jsm_webdriver, jsm_datasets)
42 |
43 |
44 | def test_1_selenium_agent_view_report_created_vs_resolved_small(jsm_webdriver, jsm_datasets, jsm_screen_shots):
45 | modules_agents.view_report_created_vs_resolved_small(jsm_webdriver, jsm_datasets)
46 |
47 |
48 | def test_1_selenium_agent_add_comment(jsm_webdriver, jsm_datasets, jsm_screen_shots):
49 | modules_agents.add_comment(jsm_webdriver, jsm_datasets)
50 |
51 |
52 | def test_1_selenium_agent_view_queues_medium(jsm_webdriver, jsm_datasets, jsm_screen_shots):
53 | if is_dataset_small(jsm_datasets):
54 | pytest.skip("Dataset does not have medium (10k-100k requests) service desk. Skipping action.")
55 | modules_agents.view_queues_medium(jsm_webdriver, jsm_datasets)
56 |
57 |
58 | def test_1_selenium_agent_view_queues_small(jsm_webdriver, jsm_datasets, jsm_screen_shots):
59 | modules_agents.view_queues_small(jsm_webdriver, jsm_datasets)
60 |
61 |
62 | """
63 | Add custom actions anywhere between login and log out action. Move this to a different line as needed.
64 | Write your custom selenium scripts in `app/extension/jsm/extension_ui_agents.py`.
65 | Refer to `app/selenium_ui/jsm/modules_agents.py` for examples.
66 | """
67 | # def test_1_selenium_agent_custom_action(jsm_webdriver, jsm_datasets, jsm_screen_shots):
68 | # extension_ui_agents.app_specific_action(jsm_webdriver, jsm_datasets)
69 |
70 | """
71 | To enable specific tests for Insight below, set 'True' next to `insight` variable (False by default) in `app/jsm.yml`
72 | """
73 |
74 |
75 | def test_1_selenium_agent_insight_main_page(jsm_webdriver, jsm_datasets, jira_screen_shots):
76 | if not JSM_SETTINGS.insight:
77 | pytest.skip()
78 | modules_agents.insight_main_page(jsm_webdriver, jsm_datasets)
79 |
80 |
81 | def test_1_selenium_agent_insight_create_new_schema(jsm_webdriver, jsm_datasets, jira_screen_shots):
82 | if not JSM_SETTINGS.insight:
83 | pytest.skip()
84 | modules_agents.insight_create_new_schema(jsm_webdriver, jsm_datasets)
85 |
86 |
87 | def test_1_selenium_agent_insight_create_new_object(jsm_webdriver, jsm_datasets, jira_screen_shots):
88 | if not JSM_SETTINGS.insight:
89 | pytest.skip()
90 | modules_agents.insight_create_new_object(jsm_webdriver, jsm_datasets)
91 |
92 |
93 | def test_1_selenium_agent_insight_delete_new_schema(jsm_webdriver, jsm_datasets, jira_screen_shots):
94 | if not JSM_SETTINGS.insight:
95 | pytest.skip()
96 | modules_agents.insight_delete_new_schema(jsm_webdriver, jsm_datasets)
97 |
98 |
99 | def test_1_selenium_agent_insight_view_queue_with_insight_column(jsm_webdriver, jsm_datasets, jira_screen_shots):
100 | if not JSM_SETTINGS.insight:
101 | pytest.skip()
102 | modules_agents.insight_view_queue_insight_column(jsm_webdriver, jsm_datasets)
103 |
104 |
105 | def test_1_selenium_agent_insight_search_object_by_iql(jsm_webdriver, jsm_datasets, jira_screen_shots):
106 | if not JSM_SETTINGS.insight:
107 | pytest.skip()
108 | modules_agents.insight_search_object_by_iql(jsm_webdriver, jsm_datasets)
109 |
110 |
111 | def test_1_selenium_agent_insight_view_issue_with_objects(jsm_webdriver, jsm_datasets, jira_screen_shots):
112 | if not JSM_SETTINGS.insight:
113 | pytest.skip()
114 | modules_agents.view_issue_with_insight_objects(jsm_webdriver, jsm_datasets)
115 |
116 |
117 | # this action should be the last one
118 | def test_2_selenium_agent_z_logout(jsm_webdriver, jsm_datasets, jsm_screen_shots):
119 | modules_agents.logout(jsm_webdriver, jsm_datasets)
120 |
--------------------------------------------------------------------------------
/app/selenium_ui/jsm_ui_customers.py:
--------------------------------------------------------------------------------
1 | from selenium_ui.jsm import modules_customers
2 | from extension.jsm import extension_ui_customers # noqa F401
3 | from util.conf import JSM_SETTINGS
4 | import pytest
5 |
6 |
7 | # this action should be the first one
8 | def test_0_selenium_customer_a_login(jsm_webdriver, jsm_datasets, jsm_screen_shots):
9 | modules_customers.login(jsm_webdriver, jsm_datasets)
10 |
11 |
12 | def test_1_selenium_customer_create_request(jsm_webdriver, jsm_datasets, jsm_screen_shots):
13 | modules_customers.create_request(jsm_webdriver, jsm_datasets)
14 |
15 |
16 | def test_1_selenium_customer_view_request(jsm_webdriver, jsm_datasets, jsm_screen_shots):
17 | modules_customers.view_request(jsm_webdriver, jsm_datasets)
18 |
19 |
20 | def test_1_selenium_customer_view_requests(jsm_webdriver, jsm_datasets, jsm_screen_shots):
21 | modules_customers.view_requests(jsm_webdriver, jsm_datasets)
22 |
23 |
24 | def test_1_selenium_customer_view_all_requests(jsm_webdriver, jsm_datasets, jsm_screen_shots):
25 | modules_customers.view_all_requests(jsm_webdriver, jsm_datasets)
26 |
27 |
28 | def test_1_selenium_customer_share_request_with_customer(jsm_webdriver, jsm_datasets, jsm_screen_shots):
29 | modules_customers.share_request_with_customer(jsm_webdriver, jsm_datasets)
30 |
31 |
32 | def test_1_selenium_customer_add_comment(jsm_webdriver, jsm_datasets, jsm_screen_shots):
33 | modules_customers.add_comment(jsm_webdriver, jsm_datasets)
34 |
35 |
36 | """
37 | Add custom actions anywhere between login and log out action. Move this to a different line as needed.
38 | Write your custom selenium scripts in `app/extension/jsm/extension_ui_customers.py`.
39 | Refer to `app/selenium_ui/jsm/modules_customers.py` for examples.
40 | """
41 |
42 |
43 | # def test_1_selenium_customer_custom_action(jsm_webdriver, jsm_datasets, jsm_screen_shots):
44 | # extension_ui_customers.app_specific_action(jsm_webdriver, jsm_datasets)
45 |
46 | """
47 | To enable specific test for Insight below, set 'True' next to `insight` variable (False by default) in `app/jsm.yml`
48 | """
49 |
50 |
51 | def test_0_selenium_customer_insight_view_request(jsm_webdriver, jsm_datasets, jsm_screen_shots):
52 | if not JSM_SETTINGS.insight:
53 | pytest.skip()
54 | modules_customers.view_request_with_insight(jsm_webdriver, jsm_datasets)
55 |
56 |
57 | # this action should be the last one
58 | def test_2_selenium_customer_z_log_out(jsm_webdriver, jsm_datasets, jsm_screen_shots):
59 | modules_customers.log_out(jsm_webdriver, jsm_datasets)
60 |
--------------------------------------------------------------------------------
/app/util/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/util/__init__.py
--------------------------------------------------------------------------------
/app/util/analytics/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/util/analytics/__init__.py
--------------------------------------------------------------------------------
/app/util/analytics/bamboo_post_run_collector.py:
--------------------------------------------------------------------------------
1 | import re
2 | from multiprocessing.pool import ThreadPool
3 | import datetime
4 |
5 | from util.api.bamboo_clients import BambooClient
6 | from util.conf import BAMBOO_SETTINGS
7 | from multiprocessing import cpu_count
8 |
9 | pool = ThreadPool(processes=min(cpu_count() * 3, 12))
10 |
11 |
12 | class BambooPostRunCollector:
13 |
14 | def __init__(self, locust_log):
15 | self.client = BambooClient(host=BAMBOO_SETTINGS.server_url,
16 | user=BAMBOO_SETTINGS.admin_login, password=BAMBOO_SETTINGS.admin_password)
17 | self.locust_log = locust_log
18 | self.locust_build_job_results = self.parallel_get_all_builds_results()
19 | self.start_analytics_utc_time = datetime.datetime.now(datetime.timezone.utc)
20 |
21 | def parallel_get_all_builds_results(self):
22 | locust_log_lines = self.locust_log.get_locust_log()
23 | build_job_id_list = []
24 | for string in locust_log_lines:
25 | build_job_id = re.search(r"\|.*\|", string)
26 | if build_job_id:
27 | build_job_id = build_job_id.group()
28 | build_job_id_list.append(build_job_id.replace('|', ''))
29 |
30 | all_builds_job_results_lists = pool.map(self.client.get_build_job_results, [i for i in build_job_id_list])
31 | return all_builds_job_results_lists
32 |
33 | def is_build_starts_last_n_seconds(self, build_result, n_sec):
34 | build_start_time_regexp = r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}"
35 | build_start_match = re.search(build_start_time_regexp, build_result['buildStartedTime'])
36 | build_start_datetime = datetime.datetime.strptime(build_start_match.group(), '%Y-%m-%dT%H:%M:%S').\
37 | replace(tzinfo=datetime.timezone.utc)
38 | build_start_in_last_n_sec = build_start_datetime > (self.start_analytics_utc_time
39 | - datetime.timedelta(seconds=n_sec))
40 |
41 | build_in_progress = build_result['lifeCycleState'] == 'InProgress' and build_result['buildDuration'] == 0
42 | return build_in_progress and build_start_in_last_n_sec
43 |
44 | @property
45 | def unexpected_status_plan_count(self):
46 | unexpected_finished_plan_count = 0
47 | for build_result in self.locust_build_job_results:
48 | plan_name = build_result['plan']['name']
49 | plan_result = build_result['state']
50 | expected_status = re.search(r'Project \d+ - \d+ - Plan (.*) - Job \d+', plan_name)
51 | if not expected_status:
52 | raise Exception(f'ERROR: Could not parse expected plan status from the plan name {plan_name}')
53 | expected_status = expected_status.group(1)
54 | if expected_status not in plan_result:
55 | if not self.is_build_starts_last_n_seconds(build_result,
56 | BAMBOO_SETTINGS.default_dataset_plan_duration*2):
57 | unexpected_finished_plan_count = unexpected_finished_plan_count + 1
58 | return unexpected_finished_plan_count
59 |
60 | def get_plan_count_with_n_queue(self, n_sec):
61 | plan_count_with_n_sec = 0
62 | for build_result in self.locust_build_job_results:
63 | if build_result['queueTimeInSeconds'] >= n_sec:
64 | plan_count_with_n_sec = plan_count_with_n_sec + 1
65 | return plan_count_with_n_sec
66 |
67 | @property
68 | def unexpected_duration_plan_count(self):
69 | possible_diff_perc = 10
70 | expected_yml_build_duration = BAMBOO_SETTINGS.default_dataset_plan_duration
71 | expected_min_duration = expected_yml_build_duration - expected_yml_build_duration*possible_diff_perc/100
72 | expected_max_duration = expected_yml_build_duration + expected_yml_build_duration*possible_diff_perc/100
73 | unexpected_duration_plans_count = 0
74 | for build_result in self.locust_build_job_results:
75 | if not expected_min_duration <= build_result['buildDuration']/1000 <= expected_max_duration:
76 | if not self.is_build_starts_last_n_seconds(build_result,
77 | BAMBOO_SETTINGS.default_dataset_plan_duration * 2):
78 | unexpected_duration_plans_count = unexpected_duration_plans_count + 1
79 | return unexpected_duration_plans_count
80 |
--------------------------------------------------------------------------------
/app/util/api/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/util/api/__init__.py
--------------------------------------------------------------------------------
/app/util/api/crowd_clients.py:
--------------------------------------------------------------------------------
1 | from util.api.abstract_clients import RestClient, LOGIN_POST_HEADERS
2 | from selenium_ui.conftest import retry
3 |
4 |
5 | BATCH_SIZE_USERS = 1000
6 |
7 |
8 | class CrowdRestClient(RestClient):
9 |
10 | def add_user(self,
11 | name: str,
12 | password: str,
13 | first_name: str,
14 | last_name: str,
15 | display_name: str = None,
16 | email: str = None,
17 | active: bool = True):
18 | api_url = self.host + "/rest/usermanagement/1/user"
19 | payload = {
20 | "name": name,
21 | "password": {"value": password},
22 | "active": active,
23 | "first-name": first_name,
24 | "last-name": last_name,
25 | "display-name": display_name or f"{first_name} {last_name}",
26 | "email": email or name + '@localdomain.com'
27 | }
28 |
29 | response = self.post(api_url, "Could not create crowd user", body=payload)
30 |
31 | return response.json()
32 |
33 | def search(self, entity_type: str = "user", start_index: int = 0, max_results: int = 1000, restriction: str = '',
34 | expand: str = 'user'):
35 | api_url = self.host + f"/rest/usermanagement/1/search" \
36 | f"?entity-type={entity_type}" \
37 | f"&start-index={start_index}&max-results={max_results}&restriction={restriction}" \
38 | f"&expand={expand}"
39 | response = self.get(api_url, "Search failed")
40 |
41 | return [i['name'] for i in response.json()[f'{entity_type}s']]
42 |
43 | def users_search_parallel(self, cql: str = '', max_results: int = 1000):
44 | """
45 | Parallel version
46 | """
47 | from multiprocessing import cpu_count
48 | from multiprocessing.pool import ThreadPool
49 | print("Users parallel search")
50 |
51 | if max_results % BATCH_SIZE_USERS == 0:
52 | loop_count = max_results // BATCH_SIZE_USERS
53 | last_BATCH_SIZE_USERS = BATCH_SIZE_USERS
54 | else:
55 | loop_count = max_results // BATCH_SIZE_USERS + 1
56 | last_BATCH_SIZE_USERS = max_results % BATCH_SIZE_USERS
57 |
58 | def search_users(i):
59 | nonlocal loop_count, last_BATCH_SIZE_USERS
60 | if i == loop_count - 1:
61 | loop_max_results = last_BATCH_SIZE_USERS
62 | else:
63 | loop_max_results = BATCH_SIZE_USERS
64 |
65 | start_index = BATCH_SIZE_USERS * i
66 |
67 | loop_users = self.search(
68 | entity_type='user', start_index=start_index, max_results=loop_max_results, restriction=cql)
69 |
70 | print(".", end="", flush=True)
71 | return loop_users
72 |
73 | num_cores = cpu_count()
74 | pool = ThreadPool(processes=num_cores*2)
75 | loop_users_list = pool.map(search_users, [i for i in range(loop_count)])
76 | print("") # new line
77 | users = [user for loop_users in loop_users_list for user in loop_users]
78 | return users
79 |
80 | def group_members(self, group_name: str, start_index: int = 0, max_results: int = 1000):
81 | api_url = self.host + f"/rest/usermanagement/1/group/user/direct" \
82 | f"?groupname={group_name}&start-index={start_index}&max-results={max_results}"
83 | r = self.get(api_url, "Group members call failed")
84 | return r.json()
85 |
86 | def get_group_membership(self):
87 | api_url = self.host + '/rest/usermanagement/1/group/membership'
88 | self.headers = {'Accept': 'application/xml', 'Content-Type': 'application/xml'}
89 | response = self.get(api_url, 'Can not get group memberships')
90 | return response.content
91 |
92 | def get_server_info(self):
93 | api_url = self.host + '/rest/admin/1.0/server-info'
94 | response = self.get(api_url, 'Can not get Crowd server info')
95 | return response.json()
96 |
97 | def get_cluster_nodes(self):
98 | api_url = self.host + '/rest/atlassian-cluster-monitoring/cluster/nodes'
99 | response = self.get(api_url, 'Can not get Crowd cluster nodes information')
100 | return response.json()
101 |
102 | @retry()
103 | def get_status(self):
104 | api_url = f'{self.host}/status'
105 | status = self.get(api_url, "Could not get status")
106 | if status.ok:
107 | return status.text
108 | else:
109 | print(f"Warning: failed to get {api_url}: Error: {e}")
110 | return False
111 |
--------------------------------------------------------------------------------
/app/util/bamboo/bamboo_dataset_generator/.gitignore:
--------------------------------------------------------------------------------
1 | .gradle/
2 | .idea/
3 | .DS_Store
4 | **/*.iml
5 | out
6 | build
7 | **/build/
8 | **/node_modules
9 | **/target/
10 | **/*.bak
11 | __pycache__/
12 | # Bamboo Spec Ignored Files
13 | .credentials
14 |
15 |
--------------------------------------------------------------------------------
/app/util/bamboo/bamboo_dataset_generator/README.md:
--------------------------------------------------------------------------------
1 | ## Bamboo dataset generator - a tool that generates plans on Bamboo instance
2 |
3 | Before you start, make sure you have installed [Maven](https://maven.apache.org/install.html).
4 |
5 | Configuration located inside: [src/main/java/bamboogenerator/Main.java](src/main/java/bamboogenerator/Main.java)
6 |
7 | **POM Configuration**
8 |
9 | - Set the Bamboo version in the pom.xml file according to your Bamboo version.
10 |
11 | e.g. configuration for version 10.0.0:
12 | ```
13 | bamboo-specs-parent
14 | 10.0.0
15 | ```
16 | pom.xml file location: [pom.xml](pom.xml)
17 |
18 | **Client Configuration**
19 |
20 | - `BAMBOO_SERVER_URL` - the URL of Bamboo
21 |
22 | For TerraForm deployment URL should have port and postfix
23 | ```
24 | BAMBOO_SERVER_URL = "http://my-bamboo.amazonaws.com:80/bamboo"
25 | ```
26 | - `ADMIN_USER_NAME` - the username of admin account
27 |
28 |
29 | **Generator Configuration**
30 | - `PROJECTS_NUMBER` - the number of projects to generate
31 | - `PLANS` - the number of plans to generate
32 | - `PERCENT_OF_FAILED_PLANS` - the percent of plans to be generated as failed
33 |
34 | ---
35 |
36 | **NOTE**
37 |
38 | Please make sure you haven't changed `Generator Configuration` after initial generation.
39 | In case you need another configuration you have to start from clean dataset.
40 |
41 | The generator will check if you have plans on a Bamboo server that are out of the generated set,
42 | it will fail execution if such plans exist.
43 |
44 | ---
45 | **Generate Bamboo token**
46 |
47 | Login as admin user, go to **Profile > Personal access tokens** and create a new token with the same
48 | permissions as admin user.
49 |
50 | **Run on Linux/Mac:**
51 |
52 | export BAMBOO_TOKEN=newly_generarted_token
53 | ./run.sh
54 |
55 | **Run on Windows:**
56 |
57 | set BAMBOO_TOKEN=newly_generarted_token
58 | run
--------------------------------------------------------------------------------
/app/util/bamboo/bamboo_dataset_generator/pom.xml:
--------------------------------------------------------------------------------
1 |
3 | 4.0.0
4 |
5 |
6 | com.atlassian.bamboo
7 | bamboo-specs-parent
8 | 10.2.2
9 |
10 |
11 |
12 | bamboo-dataset-generator
13 | 1.0.0
14 | jar
15 |
16 |
17 |
18 |
19 | org.codehaus.mojo
20 | exec-maven-plugin
21 | 3.5.0
22 |
23 |
24 |
25 | exec
26 |
27 |
28 |
29 |
30 | java
31 | true
32 | true
33 | compile
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 | com.atlassian.bamboo
42 | bamboo-specs-api
43 |
44 |
45 | com.atlassian.bamboo
46 | bamboo-specs
47 |
48 |
49 |
50 | com.atlassian.buildeng
51 | bamboo-plan-ownership-specs
52 | 2.1.11
53 |
54 |
55 |
56 | com.atlassian.buildeng
57 | bamboo-pbc-specs
58 | 2.1.11
59 |
60 |
61 |
62 |
63 | com.google.guava
64 | guava
65 | 33.4.7-android
66 |
67 |
68 | com.fasterxml.jackson.core
69 | jackson-core
70 | 2.18.3
71 |
72 |
73 | com.fasterxml.jackson.core
74 | jackson-databind
75 | 2.18.3
76 |
77 |
78 | com.jayway.jsonpath
79 | json-path
80 | 2.9.0
81 |
82 |
83 | net.minidev
84 | json-smart
85 | 2.5.2
86 |
87 |
88 | org.slf4j
89 | slf4j-api
90 | 1.7.36
91 |
92 |
93 | commons-codec
94 | commons-codec
95 | 1.18.0
96 |
97 |
98 | org.apache.logging.log4j
99 | log4j-api
100 | 2.24.3
101 |
102 |
103 | org.apache.logging.log4j
104 | log4j-core
105 | 2.24.3
106 |
107 |
108 | org.apache.logging.log4j
109 | log4j-slf4j-impl
110 | 2.24.3
111 |
112 |
113 |
114 |
115 |
116 | atlassian-public
117 | https://packages.atlassian.com/mvn/maven-external/
118 |
119 |
120 |
121 |
122 |
123 | atlassian-public
124 | https://packages.atlassian.com/mvn/maven-external/
125 |
126 |
127 |
--------------------------------------------------------------------------------
/app/util/bamboo/bamboo_dataset_generator/run.bat:
--------------------------------------------------------------------------------
1 | @if [%DEBUG%] == [] @echo off
2 | IF "%BAMBOO_TOKEN%"=="" (echo "BAMBOO_TOKEN is not set" && exit /b)
3 | mvn compile exec:java -Dexec.mainClass="bamboogenerator.Main"
--------------------------------------------------------------------------------
/app/util/bamboo/bamboo_dataset_generator/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | [ -z "$BAMBOO_TOKEN" ] && echo "BAMBOO_TOKEN is not set" && exit
3 | mvn compile exec:java -Dexec.mainClass="bamboogenerator.Main" -Dexec.cleanupDaemonThreads=false
4 |
--------------------------------------------------------------------------------
/app/util/bamboo/bamboo_dataset_generator/src/main/java/bamboogenerator/Main.java:
--------------------------------------------------------------------------------
1 | package bamboogenerator;
2 |
3 | import bamboogenerator.model.PlanInfo;
4 | import bamboogenerator.service.BambooClient;
5 | import bamboogenerator.service.PlansPublisher;
6 | import bamboogenerator.service.generator.plan.PlanGenerator;
7 | import bamboogenerator.service.generator.plan.PlanInfoGenerator;
8 | import com.atlassian.bamboo.specs.api.BambooSpec;
9 | import com.atlassian.bamboo.specs.api.builders.plan.Plan;
10 | import org.slf4j.Logger;
11 | import org.slf4j.LoggerFactory;
12 |
13 | import java.util.List;
14 | import java.util.Set;
15 | import java.util.stream.Collectors;
16 |
17 | import static bamboogenerator.service.BambooServerInitializer.getToken;
18 | import static java.lang.System.currentTimeMillis;
19 |
20 |
21 | /**
22 | * Plan configuration for Bamboo.
23 | *
24 | * @see Bamboo Specs
25 | */
26 | @BambooSpec
27 | public class Main {
28 | private static final Logger LOG = LoggerFactory.getLogger(Main.class);
29 |
30 | // e.g. for TerraForm deployment: BAMBOO_SERVER_URL = "http://my-babmoo.amazonaws.com:80/bamboo"
31 | // e.g. for localhost deployment: BAMBOO_SERVER_URL = "http://0.0.0.0:8085"
32 | private static final String BAMBOO_SERVER_URL = "http://my-babmoo.amazonaws.com:80/bamboo";
33 | private static final String ADMIN_USER_NAME = "admin";
34 |
35 | // NOTE: Please make sure you haven't changed these values after initial run
36 | // in case you need another configuration you have to start from clean dataset
37 | private static final int PROJECTS_NUMBER = 100;
38 | private static final int PLANS = 2000; // plans per project = PLANS/PROJECTS_NUMBER
39 | private static final int PERCENT_OF_FAILED_PLANS = 20;
40 |
41 | public static void main(String[] args) throws Exception {
42 | long start = currentTimeMillis();
43 | LOG.info("Started Bamboo dataset generator");
44 | LOG.info("{} build plans will be generated", PLANS);
45 |
46 | List planInfoList = PlanInfoGenerator.generate(PROJECTS_NUMBER, PLANS, PERCENT_OF_FAILED_PLANS);
47 | checkIfThereAreOtherPlansOnServer(planInfoList);
48 | List plans = PlanGenerator.generate(planInfoList);
49 |
50 | PlansPublisher plansPublisher = new PlansPublisher(BAMBOO_SERVER_URL, ADMIN_USER_NAME);
51 | plansPublisher.publish(plans);
52 |
53 | LOG.info("----------------------------------------------------\n");
54 | LOG.info("Elapsed Time in seconds: {}", ((currentTimeMillis() - start) / 1000));
55 | }
56 |
57 | private static void checkIfThereAreOtherPlansOnServer(List planInfoList) throws Exception {
58 | Set generatedKeys = planInfoList.stream()
59 | .map(PlanInfo::getPlanKey)
60 | .collect(Collectors.toSet());
61 |
62 | List keysFromServer = new BambooClient(BAMBOO_SERVER_URL, getToken()).getAllPlanKeys();
63 | if (keysFromServer.isEmpty()) {
64 | return;
65 | }
66 |
67 | keysFromServer.removeAll(generatedKeys);
68 | if (!keysFromServer.isEmpty()) {
69 | throw new RuntimeException("There are " + keysFromServer.size()
70 | + " plans on server that were not generated."
71 | + " Keys " + keysFromServer);
72 | }
73 | }
74 |
75 | }
76 |
--------------------------------------------------------------------------------
/app/util/bamboo/bamboo_dataset_generator/src/main/java/bamboogenerator/model/PlanInfo.java:
--------------------------------------------------------------------------------
1 | package bamboogenerator.model;
2 |
3 | public class PlanInfo {
4 | private final String planName;
5 | private final boolean failed;
6 | private final String planKey;
7 | private final String projectName;
8 | private final String projectKey;
9 |
10 | public PlanInfo(String planName, boolean failed, String planKey, String projectName, String projectKey) {
11 | this.planName = planName;
12 | this.failed = failed;
13 | this.planKey = planKey;
14 | this.projectName = projectName;
15 | this.projectKey = projectKey;
16 | }
17 |
18 | public String getPlanName() {
19 | return planName;
20 | }
21 | public boolean isFailed() {
22 | return failed;
23 | }
24 |
25 | public String getPlanKey() {
26 | return planKey;
27 | }
28 |
29 | public String getProjectName() {
30 | return projectName;
31 | }
32 |
33 | public String getProjectKey() {
34 | return projectKey;
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/app/util/bamboo/bamboo_dataset_generator/src/main/java/bamboogenerator/service/BambooClient.java:
--------------------------------------------------------------------------------
1 | package bamboogenerator.service;
2 |
3 | import com.jayway.jsonpath.DocumentContext;
4 | import com.jayway.jsonpath.JsonPath;
5 | import org.apache.http.client.methods.HttpGet;
6 | import org.apache.http.client.utils.URIBuilder;
7 | import org.apache.http.impl.client.CloseableHttpClient;
8 | import org.apache.http.impl.client.HttpClients;
9 | import org.apache.http.util.EntityUtils;
10 |
11 | import java.net.URI;
12 | import java.net.URISyntaxException;
13 | import java.util.ArrayList;
14 | import java.util.List;
15 |
16 | public class BambooClient {
17 | private static final String AUTH_TEMPLATE = "Bearer %s";
18 | private static final int MAX_RESULT = 25;
19 |
20 | private final String serverUrl;
21 | private final String token;
22 | private final CloseableHttpClient client = HttpClients.createDefault();
23 |
24 | public BambooClient(String serverUrl, String token) {
25 | this.serverUrl = serverUrl;
26 | this.token = token;
27 | }
28 |
29 | public List getAllPlanKeys() throws Exception {
30 | List keys = new ArrayList<>();
31 | int index = 0;
32 | boolean hasMore = true;
33 | while (hasMore) {
34 | HttpGet request = preparePlanRequest(index);
35 | String body = EntityUtils.toString(client.execute(request).getEntity());
36 |
37 | DocumentContext context = JsonPath.parse(body);
38 | int size = context.read("$.plans.size");
39 | keys.addAll(context.read("$.plans..shortKey"));
40 |
41 | hasMore = keys.size() < size;
42 | if (hasMore) {
43 | index += MAX_RESULT;
44 | }
45 | }
46 |
47 | return keys;
48 | }
49 |
50 | private HttpGet preparePlanRequest(int index) throws URISyntaxException {
51 | HttpGet request = new HttpGet(buildPlanURI(index));
52 | request.addHeader("Authorization", String.format(AUTH_TEMPLATE, token));
53 | request.addHeader("Accept", "application/json");
54 | return request;
55 | }
56 |
57 | private URI buildPlanURI(int value) throws URISyntaxException {
58 | return new URIBuilder(serverUrl + "/rest/api/latest/plan")
59 | .addParameter("start-index", String.valueOf(value))
60 | .addParameter("max-result", String.valueOf(MAX_RESULT))
61 | .build();
62 | }
63 | }
64 |
--------------------------------------------------------------------------------
/app/util/bamboo/bamboo_dataset_generator/src/main/java/bamboogenerator/service/BambooServerInitializer.java:
--------------------------------------------------------------------------------
1 | package bamboogenerator.service;
2 |
3 | import com.atlassian.bamboo.specs.util.BambooServer;
4 | import com.atlassian.bamboo.specs.util.SimpleTokenCredentials;
5 | import org.slf4j.Logger;
6 | import org.slf4j.LoggerFactory;
7 |
8 | import static org.apache.commons.lang3.StringUtils.isBlank;
9 |
10 | public class BambooServerInitializer {
11 | private static final Logger LOG = LoggerFactory.getLogger(BambooServerInitializer.class);
12 | private static final String BAMBOO_TOKEN = "BAMBOO_TOKEN";
13 |
14 | public static BambooServer initBambooServer(String serverUrl) {
15 | String token = getToken();
16 | if (token == null) {
17 | return new BambooServer(serverUrl);
18 | }
19 |
20 | return new BambooServer(serverUrl, new SimpleTokenCredentials(token));
21 | }
22 |
23 | public static String getToken() {
24 | String token = System.getenv(BAMBOO_TOKEN);
25 | if (isBlank(token)) {
26 | LOG.warn("Env variable " + BAMBOO_TOKEN + " is not set or empty");
27 | return null;
28 | }
29 |
30 | return token;
31 |
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/app/util/bamboo/bamboo_dataset_generator/src/main/java/bamboogenerator/service/PlansPublisher.java:
--------------------------------------------------------------------------------
1 | package bamboogenerator.service;
2 |
3 | import com.atlassian.bamboo.specs.api.builders.permission.PermissionType;
4 | import com.atlassian.bamboo.specs.api.builders.permission.Permissions;
5 | import com.atlassian.bamboo.specs.api.builders.permission.PlanPermissions;
6 | import com.atlassian.bamboo.specs.api.builders.plan.Plan;
7 | import com.atlassian.bamboo.specs.api.builders.plan.PlanIdentifier;
8 | import com.atlassian.bamboo.specs.util.BambooServer;
9 |
10 | import java.util.List;
11 |
12 | import static bamboogenerator.service.BambooServerInitializer.initBambooServer;
13 |
14 | public class PlansPublisher {
15 | private final BambooServer bambooServer;
16 | private final String userName;
17 |
18 | public PlansPublisher(String serverUrl, String userName) {
19 | this.bambooServer = initBambooServer(serverUrl);
20 | this.userName = userName;
21 | }
22 |
23 | public void publish(List plans) {
24 | plans.forEach(this::publishPlan);
25 | }
26 |
27 | private void publishPlan(Plan plan) {
28 | bambooServer.publish(plan);
29 | PlanPermissions planPermission = createPlanPermission(plan.getIdentifier());
30 | bambooServer.publish(planPermission);
31 | }
32 |
33 | private PlanPermissions createPlanPermission(PlanIdentifier planIdentifier) {
34 | Permissions permissions = new Permissions()
35 | .userPermissions(userName, PermissionType.ADMIN, PermissionType.EDIT)
36 | .loggedInUserPermissions(PermissionType.EDIT)
37 | .loggedInUserPermissions(PermissionType.BUILD);
38 |
39 | return new PlanPermissions(planIdentifier)
40 | .permissions(permissions);
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/app/util/bamboo/bamboo_dataset_generator/src/main/java/bamboogenerator/service/generator/plan/InlineBodies.java:
--------------------------------------------------------------------------------
1 | package bamboogenerator.service.generator.plan;
2 |
3 | class InlineBodies {
4 | static final String BODY_SUCCESS = "rm -f *.xml\n" +
5 | "cat << EOF >> success.xml\n" +
6 | "\n" +
7 | "\n" +
8 | "\n" +
9 | "EOF\n" +
10 | "i=1\n" +
11 | "while [ \"$i\" -le %d ]; do echo \"\" >> success.xml;i=$(( i + 1 )); done\n" +
12 | "cat << EOF >> success.xml\n" +
13 | "\n" +
14 | "\n" +
15 | "EOF";
16 |
17 | static final String BODY_FAIL = "rm -f *.xml\n" +
18 | "cat << EOF >> failed.xml\n" +
19 | " \n" +
20 | "\n" +
21 | "\n" +
22 | "EOF\n" +
23 | "i=1\n" +
24 | "while [ \"$i\" -le %d ]; do\n" +
25 | "if [ $i -eq %d ]; then\n" +
26 | "echo \"\" >> failed.xml;i=$(( i + 1 ));\n" +
27 | "else\n" +
28 | "echo \"\" >> failed.xml;i=$(( i + 1 ));\n" +
29 | "fi\n" +
30 | "done\n" +
31 | "cat << EOF >> failed.xml\n" +
32 | "Test assertion error\n" +
33 | "\n" +
34 | "\n" +
35 | "EOF";
36 | }
37 |
--------------------------------------------------------------------------------
/app/util/bamboo/bamboo_dataset_generator/src/main/java/bamboogenerator/service/generator/plan/PlanGenerator.java:
--------------------------------------------------------------------------------
1 | package bamboogenerator.service.generator.plan;
2 |
3 | import bamboogenerator.model.PlanInfo;
4 | import com.atlassian.bamboo.specs.api.builders.Variable;
5 | import com.atlassian.bamboo.specs.api.builders.plan.Job;
6 | import com.atlassian.bamboo.specs.api.builders.plan.Plan;
7 | import com.atlassian.bamboo.specs.api.builders.plan.Stage;
8 | import com.atlassian.bamboo.specs.api.builders.plan.artifact.Artifact;
9 | import com.atlassian.bamboo.specs.api.builders.project.Project;
10 | import com.atlassian.bamboo.specs.builders.repository.git.GitRepository;
11 | import com.atlassian.bamboo.specs.builders.task.CheckoutItem;
12 | import com.atlassian.bamboo.specs.builders.task.ScriptTask;
13 | import com.atlassian.bamboo.specs.builders.task.TestParserTask;
14 | import com.atlassian.bamboo.specs.builders.task.VcsCheckoutTask;
15 | import com.atlassian.bamboo.specs.model.task.TestParserTaskProperties;
16 |
17 | import java.util.List;
18 | import java.util.stream.Collectors;
19 |
20 | import static bamboogenerator.service.generator.plan.InlineBodies.BODY_FAIL;
21 | import static bamboogenerator.service.generator.plan.InlineBodies.BODY_SUCCESS;
22 |
23 | public class PlanGenerator {
24 | private static final int TEST_COUNT = 1000;
25 | private static final String RESULT_NAME_FAIL = "failed.xml";
26 | private static final String RESULT_NAME_SUCCESS = "success.xml";
27 |
28 | public static List generate(List planInfoList) {
29 | return planInfoList.stream()
30 | .map(PlanGenerator::createPlan)
31 | .collect(Collectors.toList());
32 | }
33 |
34 | private static Plan createPlan(PlanInfo planInfo) {
35 | boolean isFailedPlan = planInfo.isFailed();
36 | return new Plan(new Project().name(planInfo.getProjectName())
37 | .key(planInfo.getProjectKey()), planInfo.getPlanName(), planInfo.getPlanKey())
38 | .description("DCAPT Bamboo test build plan")
39 | .planRepositories(new GitRepository()
40 | .name("dcapt-test-repo")
41 | .url("https://bitbucket.org/atlassianlabs/dcapt-bamboo-test-repo.git")
42 | .branch("master"))
43 | .variables(new Variable("stack_name", ""))
44 | .stages(new Stage("Stage 1")
45 | .jobs(new Job("Job 1", "JB1")
46 | .tasks(
47 | new VcsCheckoutTask()
48 | .description("Checkout repository task")
49 | .cleanCheckout(true)
50 | .checkoutItems(new CheckoutItem()
51 | .repository("dcapt-test-repo").path("dcapt-test-repo")),
52 | new ScriptTask()
53 | .description("Run Bash code")
54 | .interpreterBinSh()
55 | .inlineBody("for i in $(seq 1 1000); do date=$(date -u); echo $date >> results.log; echo $date; sleep 0.06; done"),
56 | new ScriptTask()
57 | .description("Write XML test results")
58 | .interpreterBinSh()
59 | .inlineBody(isFailedPlan
60 | ? String.format(BODY_FAIL, TEST_COUNT, TEST_COUNT, TEST_COUNT)
61 | : String.format(BODY_SUCCESS, TEST_COUNT, TEST_COUNT))
62 | )
63 | .finalTasks(new TestParserTask(TestParserTaskProperties.TestType.JUNIT)
64 | .description("Unit test results parser task")
65 | .resultDirectories(isFailedPlan ? RESULT_NAME_FAIL : RESULT_NAME_SUCCESS)
66 | )
67 | .artifacts(new Artifact("Test Reports")
68 | .location(".")
69 | .copyPattern("*.log"))));
70 | }
71 | }
72 |
--------------------------------------------------------------------------------
/app/util/bamboo/bamboo_dataset_generator/src/main/java/bamboogenerator/service/generator/plan/PlanInfoGenerator.java:
--------------------------------------------------------------------------------
1 | package bamboogenerator.service.generator.plan;
2 |
3 | import bamboogenerator.model.PlanInfo;
4 | import org.slf4j.Logger;
5 | import org.slf4j.LoggerFactory;
6 |
7 | import java.util.ArrayList;
8 | import java.util.HashMap;
9 | import java.util.List;
10 | import java.util.Map;
11 | import java.util.Random;
12 | import java.util.stream.Collectors;
13 |
14 | import static java.lang.Boolean.FALSE;
15 | import static java.lang.Boolean.TRUE;
16 |
17 | public class PlanInfoGenerator {
18 | private static final Logger LOG = LoggerFactory.getLogger(PlanInfoGenerator.class);
19 | private static final Map LETTERS_BY_NUMBER = prepareLettersByNumber();
20 |
21 | public static List generate(int projectsNumber, int plansNumber, int failedPercent) {
22 | List> projectNamesKeys = generateProjectsNameKeys(projectsNumber, plansNumber);
23 | List failedPlansIndexes = generateFailedPlansIndexes(plansNumber, failedPercent);
24 | LOG.info("Project name keys {}", projectNamesKeys);
25 | LOG.info("Indexes of failed plans {}", failedPlansIndexes);
26 |
27 | List plans = new ArrayList<>();
28 | for (int i = 0; i < plansNumber; i++) {
29 | String formatted = String.format("%03d", i);
30 | String planName = formatted + " - Plan Success";
31 | boolean planIsFailed = FALSE;
32 | if (failedPlansIndexes.contains(i)) {
33 | planIsFailed = TRUE;
34 | planName = formatted + " - Plan Fail";
35 | }
36 | String planKey = "PLANKEY" + generatePlanKeySuffix(i);
37 | String projectName = projectNamesKeys.get(i).get(0);
38 | String projectKey = projectNamesKeys.get(i).get(1);
39 | LOG.info("Generating plan: PlanName: " + planName + ". PlanKey: " + planKey + ". " +
40 | "Into the project: ProjectName: " + projectName + ". ProjectKey: " + projectKey);
41 | plans.add(new PlanInfo(planName, planIsFailed, planKey, projectName, projectKey));
42 | }
43 |
44 | return plans;
45 | }
46 |
47 | private static List> generateProjectsNameKeys(int projectsNumber, int plansToGenerate) {
48 | while (plansToGenerate % projectsNumber != 0) {
49 | projectsNumber = projectsNumber - 1;
50 | }
51 |
52 | int planPerProject = plansToGenerate / projectsNumber;
53 | List> projectNameKey = new ArrayList<>();
54 |
55 | for (int i = 1; i < projectsNumber + 1; i++) {
56 | String projectName = "Project " + i;
57 | String projectKey = "PRJ" + i;
58 | ArrayList arr = new ArrayList<>();
59 | arr.add(projectName);
60 | arr.add(projectKey);
61 | projectNameKey.add(arr);
62 | }
63 |
64 | // Copy projects data to create planPerProject logic
65 | List> projectNameKeys = new ArrayList<>();
66 | for (ArrayList element : projectNameKey) {
67 | for (int i = 0; i < planPerProject; i++) {
68 | projectNameKeys.add(element);
69 | }
70 | }
71 |
72 | return projectNameKeys;
73 | }
74 |
75 | private static List generateFailedPlansIndexes(int planToGenerate, int percentOfFailed) {
76 | int numberOfFailed = Math.round((percentOfFailed * planToGenerate) / 100f);
77 | List failedPlansIndexes = new ArrayList<>();
78 | for (int i = 0; i <= numberOfFailed; i++) {
79 | while (failedPlansIndexes.size() < numberOfFailed) {
80 | int randomIndex = new Random().nextInt(planToGenerate - 1) + 1;
81 | if (!failedPlansIndexes.contains(randomIndex)) {
82 | failedPlansIndexes.add(randomIndex);
83 | }
84 | }
85 | }
86 |
87 | return failedPlansIndexes;
88 | }
89 |
90 | private static Map prepareLettersByNumber() {
91 | Map map = new HashMap<>();
92 | map.put(0, "A");
93 | map.put(1, "B");
94 | map.put(2, "C");
95 | map.put(3, "D");
96 | map.put(4, "E");
97 | map.put(5, "F");
98 | map.put(6, "G");
99 | map.put(7, "H");
100 | map.put(8, "I");
101 | map.put(9, "J");
102 |
103 | return map;
104 | }
105 |
106 | private static String generatePlanKeySuffix(int number) {
107 | return String.valueOf(number)
108 | .chars()
109 | .mapToObj(Character::getNumericValue)
110 | .map(LETTERS_BY_NUMBER::get)
111 | .collect(Collectors.joining());
112 | }
113 | }
114 |
--------------------------------------------------------------------------------
/app/util/bamboo/bamboo_dataset_generator/src/main/resources/log4j2.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/app/util/common_util.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import functools
3 | import requests
4 | from datetime import timedelta
5 | from timeit import default_timer as timer
6 | from packaging import version
7 | from util.conf import TOOLKIT_VERSION
8 |
9 | CONF_URL = "https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/conf.py"
10 |
11 |
12 | def get_latest_version(supported=True):
13 | """
14 | Get the latest version of DCAPT from the master branch in GIT repository.
15 |
16 | :param supported - version is supported.
17 | :return: latest version.
18 | """
19 | VERSION_STR = "TOOLKIT_VERSION" if supported else "UNSUPPORTED_VERSION"
20 | try:
21 | r = requests.get(CONF_URL)
22 | r.raise_for_status()
23 | conf = r.text.splitlines()
24 | version_line = next((line for line in conf if VERSION_STR in line))
25 | latest_version_str = version_line.split(
26 | '=')[1].replace("'", "").replace('"', "").strip()
27 | latest_version = version.parse(latest_version_str)
28 | return latest_version
29 | except requests.exceptions.RequestException as e:
30 | print(f"Warning: DCAPT check for update failed - {e}")
31 | except StopIteration:
32 | print("Warning: failed to get the unsupported version")
33 |
34 |
35 | def get_unsupported_version():
36 | """
37 | Get the latest unsupported version of DCAPT from the master branch in GIT repository.
38 |
39 | :return: latest unsupported version.
40 | """
41 | unsupported_version_str = get_latest_version(supported=False)
42 |
43 | return unsupported_version_str
44 |
45 |
46 | def get_current_version():
47 | """
48 | Get the DCAPT version from the local repository that the tests were run from.
49 |
50 | :return: local DCAPT version.
51 | """
52 | return version.parse(TOOLKIT_VERSION)
53 |
54 |
55 | def print_timing(message, sep='-'):
56 | assert message is not None, "Message is not passed to print_timing decorator"
57 |
58 | def deco_wrapper(func):
59 | @functools.wraps(func)
60 | def wrapper(*args, **kwargs):
61 | start = timer()
62 | print(sep * 20)
63 | print(f'{message} started {datetime.datetime.now().strftime("%H:%M:%S")}')
64 | result = func(*args, **kwargs)
65 | end = timer()
66 | print(f"{message} finished in {timedelta(seconds=end - start)}")
67 | print(sep * 20)
68 | return result
69 |
70 | return wrapper
71 |
72 | return deco_wrapper
73 |
74 |
75 | def webdriver_pretty_debug(webdriver, additional_field):
76 | debug_message = {}
77 | for key, value in additional_field.items():
78 | debug_message[key] = value
79 |
80 | if 'debug_info' in dir(webdriver):
81 | webdriver.debug_info['current_url'] = webdriver.current_url
82 | webdriver.debug_info['session_id'] = webdriver.session_id
83 | debug_message.update(webdriver.debug_info)
84 | list_to_print = '\n'.join(
85 | [f'{key}: {value}' for key, value in debug_message.items()])
86 | pretty_formatted_string = f"""=============== WEBDRIVER DEBUG INFORMATION ===============""" + \
87 | f'\n{list_to_print}' + """\n===========================================================\n"""
88 | return pretty_formatted_string
89 |
--------------------------------------------------------------------------------
/app/util/confluence/browser_metrics.py:
--------------------------------------------------------------------------------
1 |
2 | browser_metrics = {
3 | "selenium_login": ["confluence.dashboard.view"],
4 | "selenium_view_blog": ["confluence.blogpost.view"],
5 | "selenium_view_dashboard": ["confluence.dashboard.view"],
6 | "selenium_view_page": ["confluence.page.view"],
7 | "selenium_view_page_from_cache": ["confluence.page.view"],
8 | "selenium_create_page": ["confluence.page.create.collaborative.view",
9 | "confluence.page.create.collaborative.view.connected"],
10 | "selenium_edit_page_by_url": ["confluence.page.edit.collaborative.view",
11 | "confluence.page.edit.collaborative.view.connected"],
12 | "selenium_quick_edit_page_click": ["confluence.page.edit.collaborative.quick-view",
13 | "confluence.page.edit.collaborative.quick-view.connected"],
14 | }
15 |
--------------------------------------------------------------------------------
/app/util/data_preparation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/util/data_preparation/__init__.py
--------------------------------------------------------------------------------
/app/util/data_preparation/bamboo_prepare_data.py:
--------------------------------------------------------------------------------
1 | from prepare_data_common import __generate_random_string, __write_to_file, __warnings_filter
2 | from util.api.bamboo_clients import BambooClient
3 | from util.conf import BAMBOO_SETTINGS
4 | from util.project_paths import BAMBOO_BUILD_PLANS, BAMBOO_USERS
5 |
6 | __warnings_filter()
7 |
8 | BUILD_PLANS = 'plans'
9 | USERS = 'users'
10 | PROJECTS = 'projects'
11 | DEFAULT_PASSWORD = 'password'
12 | AGENTS_BUILD_PLANS_PERCENT = 15
13 |
14 |
15 | def get_users(client, users_count):
16 | existing_users = client.get_users(users_count)
17 | existing_users = [user for user in existing_users if user['name'] != 'admin']
18 |
19 | users_to_generate = 0
20 | users = []
21 | if len(existing_users) <= users_count:
22 | users_to_generate = users_count - len(existing_users)
23 | users.extend(existing_users)
24 | if users_to_generate:
25 | for i in range(0, users_to_generate):
26 | username = f'performance_user_{__generate_random_string(5)}'
27 | password = DEFAULT_PASSWORD
28 | generated_user = client.create_user(name=username, password=password)
29 | users.append(generated_user)
30 | return users
31 |
32 |
33 | def generate_project_name_keys_dict(client):
34 | projects_name_key_dict = {}
35 | projects = client.get_projects()
36 | for project in projects:
37 | projects_name_key_dict[project['name']] = project['key']
38 | return projects_name_key_dict
39 |
40 |
41 | def assert_number_of_agents(client):
42 | number_of_online_agents = len(client.get_remote_agents(online=True))
43 | if number_of_online_agents < BAMBOO_SETTINGS.number_of_agents:
44 | raise Exception(f'ERROR: There are {number_of_online_agents} of desired {BAMBOO_SETTINGS.number_of_agents}. '
45 | f'Please, review the number of online agents.')
46 | print(f'There are {number_of_online_agents} online agents.')
47 |
48 |
49 | def verify_agents_plans_setup():
50 | parallel_plans_count = BAMBOO_SETTINGS.parallel_plans_count
51 | number_of_agents = BAMBOO_SETTINGS.number_of_agents
52 | agents_plans_diff_in_perc = 100 * float(number_of_agents) / float(parallel_plans_count) - 100
53 |
54 | if agents_plans_diff_in_perc < AGENTS_BUILD_PLANS_PERCENT:
55 | raise Exception(f'ERROR: The number of online agents should be more than the number of parallel running'
56 | f'plans by {AGENTS_BUILD_PLANS_PERCENT}%. '
57 | f'There are {number_of_agents} agents and {parallel_plans_count} parallel plans to '
58 | f'build are configured, which is {agents_plans_diff_in_perc}% difference.')
59 |
60 |
61 | def __create_dataset(client):
62 | dataset = dict()
63 | dataset[BUILD_PLANS] = client.get_build_plans(max_result=2000)
64 | dataset[PROJECTS] = generate_project_name_keys_dict(client)
65 | dataset[USERS] = get_users(client, BAMBOO_SETTINGS.concurrency)
66 |
67 | return dataset
68 |
69 |
70 | def write_test_data_to_files(dataset):
71 | build_plans = [f"{dataset[PROJECTS][build_plan['searchEntity']['projectName']]},{build_plan['id']}" for
72 | build_plan in dataset[BUILD_PLANS]]
73 | __write_to_file(BAMBOO_BUILD_PLANS, build_plans)
74 | users = [f"{user['name']},{DEFAULT_PASSWORD}" for user in dataset[USERS]]
75 | __write_to_file(BAMBOO_USERS, users)
76 |
77 |
78 | def main():
79 | print("Started preparing data")
80 | verify_agents_plans_setup()
81 |
82 | url = BAMBOO_SETTINGS.server_url
83 | print("Server url: ", url)
84 |
85 | client = BambooClient(url, BAMBOO_SETTINGS.admin_login, BAMBOO_SETTINGS.admin_password,
86 | verify=BAMBOO_SETTINGS.secure)
87 |
88 | dataset = __create_dataset(client)
89 | write_test_data_to_files(dataset)
90 | assert_number_of_agents(client)
91 |
92 | print("Finished preparing data")
93 |
94 |
95 | if __name__ == "__main__":
96 | main()
97 |
--------------------------------------------------------------------------------
/app/util/data_preparation/crowd_prepare_data.py:
--------------------------------------------------------------------------------
1 | from prepare_data_common import __write_to_file, __warnings_filter
2 | from util.api.crowd_clients import CrowdRestClient
3 | from util.conf import CROWD_SETTINGS
4 | from util.project_paths import CROWD_USERS
5 |
6 | __warnings_filter()
7 |
8 |
9 | USERS = "users"
10 | DEFAULT_USER_PASSWORD = 'password'
11 | DEFAULT_USER_PREFIX = 'performance_'
12 | USER_SEARCH_CQL = f'name={DEFAULT_USER_PREFIX}*'
13 | ERROR_LIMIT = 10
14 |
15 | USERS_COUNT = 100000
16 |
17 |
18 | def __get_users(crowd_api, count):
19 | cur_perf_users = crowd_api.users_search_parallel(cql=USER_SEARCH_CQL, max_results=count)
20 | if len(cur_perf_users) >= count:
21 | print(f'{USERS_COUNT} performance test users were found')
22 | return cur_perf_users
23 | else:
24 | raise SystemExit(f'Your Atlassian Crowd instance does not have enough users. '
25 | f'Current users count {len(cur_perf_users)} out of {count}.')
26 |
27 |
28 | def __create_data_set(crowd_api):
29 | dataset = dict()
30 | dataset[USERS] = __get_users(crowd_api, USERS_COUNT)
31 |
32 | print(f'Users count: {len(dataset[USERS])}')
33 |
34 | return dataset
35 |
36 |
37 | def write_test_data_to_files(dataset):
38 |
39 | users = [f"{user},{DEFAULT_USER_PASSWORD}" for user in dataset[USERS]]
40 | __write_to_file(CROWD_USERS, users)
41 |
42 |
43 | def main():
44 | print("Started preparing data")
45 |
46 | url = CROWD_SETTINGS.server_url
47 | print("Server url: ", url)
48 |
49 | client = CrowdRestClient(url, CROWD_SETTINGS.application_name,
50 | CROWD_SETTINGS.application_password, verify=CROWD_SETTINGS.secure)
51 |
52 | dataset = __create_data_set(client)
53 | write_test_data_to_files(dataset)
54 |
55 | print("Finished preparing data")
56 |
57 |
58 | if __name__ == "__main__":
59 | main()
60 |
--------------------------------------------------------------------------------
/app/util/data_preparation/crowd_sync_check.py:
--------------------------------------------------------------------------------
1 | import functools
2 |
3 | from timeit import default_timer as timer
4 | from prepare_data_common import __warnings_filter
5 | from datetime import timedelta
6 | from util.conf import CROWD_SETTINGS
7 | from util.api.crowd_clients import CrowdRestClient
8 |
9 | __warnings_filter()
10 |
11 |
12 | def print_timing(message):
13 | assert message is not None, "Message is not passed to print_timing decorator"
14 |
15 | def deco_wrapper(func):
16 | @functools.wraps(func)
17 | def wrapper(*args, **kwargs):
18 | start = timer()
19 | result = func(*args, **kwargs)
20 | end = timer()
21 | print(f"{message}: {timedelta(seconds=end - start)} seconds")
22 | return result
23 |
24 | return wrapper
25 |
26 | return deco_wrapper
27 |
28 |
29 | @print_timing('Users synchronization')
30 | def get_users(client):
31 | users = client.search(start_index=0, max_results='-1', expand='user')
32 | return users
33 |
34 |
35 | @print_timing('Users membership synchronization')
36 | def get_users_membership(client):
37 | membership = client.get_group_membership()
38 | return membership
39 |
40 |
41 | if __name__ == "__main__":
42 | client = CrowdRestClient(CROWD_SETTINGS.server_url, CROWD_SETTINGS.application_name,
43 | CROWD_SETTINGS.application_password, verify=CROWD_SETTINGS.secure)
44 | get_users(client)
45 | get_users_membership(client)
46 |
--------------------------------------------------------------------------------
/app/util/data_preparation/prepare_data_common.py:
--------------------------------------------------------------------------------
1 | import random
2 | import string
3 | import warnings
4 |
5 | from os import makedirs
6 |
7 |
8 | def __warnings_filter():
9 | warnings.filterwarnings('ignore', message='Unverified HTTPS request')
10 |
11 |
12 | def __generate_random_string(length=20):
13 | return "".join([random.choice(string.ascii_lowercase) for _ in range(length)])
14 |
15 |
16 | def __write_to_file(file_path, items):
17 | makedirs(file_path.parent, exist_ok=True)
18 | with open(file_path, 'w') as f:
19 | for item in items:
20 | f.write(f"{item}\n")
21 |
22 |
23 | def __read_file(file):
24 | with file.open('r') as f:
25 | lines = f.read().splitlines()
26 | return lines
27 |
--------------------------------------------------------------------------------
/app/util/exceptions.py:
--------------------------------------------------------------------------------
1 | """Module with all custom exceptions"""
2 |
3 |
4 | class WebDriverExceptionPostpone(Exception):
5 | """
6 | Class is created to postpone an exception raised from webdriver to first webdriver.get method. Thanks to that BZT
7 | version >1.16.3 is recognizing tests with a web driver exception as failed.
8 | """
9 |
10 | def __init__(self, msg: str):
11 | self.msg = msg
12 |
13 | def get(self, *args, **kwargs):
14 | """
15 | Simple method called instead of webdriver.get and raising an exception with a message from a driver exception.
16 |
17 | :param args: args passed to get method
18 | :param kwargs: kwargs passed to get method
19 | :return: None - Exception with a message from a driver is raised
20 | """
21 | raise Exception(self.msg)
22 |
--------------------------------------------------------------------------------
/app/util/jmeter/README.md:
--------------------------------------------------------------------------------
1 | ## Start JMeter UI
2 | Util script to launch JMeter UI with all settings from `.yml` file.
3 | Useful for JMeter debugging or JMeter-based app-specific actions.
4 |
5 | 1. Make sure you run `bzt your_product.yml` command locally at least once - to automatically install JMeter and Jmeter plugins to local computer.
6 | 1. Check that `.yml` file has correct settings of `application_hostname`, `application_protocol`, `application_port`, `application_postfix`, etc.
7 | 1. Set desired execution percentage for `standalone_extension`. Default value is `0`, which means that `standalone_extension` action will not be executed.
8 | Similarly, set any desired action percentage to 100 and others to 0, if you want to debug specific action execution.
9 | 1. Activate toolkit virtualenv. See [toolkit README](../../../README.md) file for more details.
10 | 1. Navigate to `app` folder and run command depending on your application type:
11 | ```
12 | cd app
13 | python util/jmeter/start_jmeter_ui.py --app jira
14 | # or
15 | python util/jmeter/start_jmeter_ui.py --app confluence
16 | # or
17 | python util/jmeter/start_jmeter_ui.py --app bitbucket
18 | # or
19 | python util/jmeter/start_jmeter_ui.py --app jsm --type agents
20 | # or
21 | python util/jmeter/start_jmeter_ui.py --app jsm --type customers
22 | ```
23 | 1. Right-click on `View Results Tree` controller and select `Enable` option.
24 | 1. Click `Start` button.
25 | 1. Disable `View Results Tree` controller before full-scale results generation.
--------------------------------------------------------------------------------
/app/util/jmeter/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/util/jmeter/__init__.py
--------------------------------------------------------------------------------
/app/util/jtl_convertor/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/util/jtl_convertor/__init__.py
--------------------------------------------------------------------------------
/app/util/jtl_convertor/jtl_validator.py:
--------------------------------------------------------------------------------
1 | import time
2 | from csv import DictReader
3 | from pathlib import Path
4 | from types import FunctionType
5 | from typing import List, Dict
6 |
7 | from util.jtl_convertor.validation_exception import ValidationException
8 | from util.jtl_convertor.validation_funcs import is_not_none, is_number, is_not_blank
9 |
10 | CONNECT = 'Connect'
11 | HOSTNAME = 'Hostname'
12 | LATENCY = 'Latency'
13 | ALL_THREADS = 'allThreads'
14 | GRP_THREADS = 'grpThreads'
15 | BYTES = 'bytes'
16 | SUCCESS = 'success'
17 | THREAD_NAME = 'threadName'
18 | RESPONSE_MESSAGE = 'responseMessage'
19 | RESPONSE_CODE = 'responseCode'
20 | LABEL = 'label'
21 | ELAPSED = 'elapsed'
22 | TIME_STAMP = 'timeStamp'
23 | METHOD = 'method'
24 |
25 | SUPPORTED_JTL_HEADER: List[str] = [TIME_STAMP, ELAPSED, LABEL, SUCCESS]
26 |
27 | VALIDATION_FUNCS_BY_COLUMN: Dict[str, List[FunctionType]] = {
28 | TIME_STAMP: [is_not_none, is_number],
29 | ELAPSED: [is_not_none, is_number],
30 | LABEL: [is_not_blank],
31 | RESPONSE_CODE: [],
32 | RESPONSE_MESSAGE: [],
33 | THREAD_NAME: [],
34 | SUCCESS: [],
35 | BYTES: [is_not_none, is_number],
36 | GRP_THREADS: [is_not_none, is_number],
37 | ALL_THREADS: [is_not_none, is_number],
38 | LATENCY: [],
39 | HOSTNAME: [],
40 | CONNECT: [],
41 | METHOD: [],
42 | }
43 |
44 |
45 | def get_validation_func(column: str) -> List[FunctionType]:
46 | validation_funcs = VALIDATION_FUNCS_BY_COLUMN.get(column)
47 | if validation_funcs is None:
48 | raise Exception(f"There is no validation function for column: [{column}]")
49 |
50 | return validation_funcs
51 |
52 |
53 | def __validate_value(column: str, value: str) -> None:
54 | validation_funcs = get_validation_func(column)
55 | try:
56 | for validation_func in validation_funcs:
57 | validation_func(value)
58 | except ValidationException as e:
59 | raise ValidationException(f"Column: [{column}]. Validation message: {str(e)}")
60 |
61 |
62 | def __validate_row(jtl_row: Dict) -> None:
63 | for column, value in jtl_row.items():
64 | __validate_value(column, str(value))
65 |
66 |
67 | def __validate_header(headers: List) -> None:
68 | for header in SUPPORTED_JTL_HEADER:
69 | if header not in headers:
70 | __raise_validation_error(f"Headers is not correct. Required headers is {SUPPORTED_JTL_HEADER}. "
71 | f"{header} is missed")
72 |
73 |
74 | def __raise_validation_error(error_msg: str) -> None:
75 | raise ValidationException(error_msg)
76 |
77 |
78 | def __validate_rows(reader) -> None:
79 | for file_row_num, jtl_row in enumerate(reader, 2):
80 | try:
81 | __validate_row(jtl_row)
82 | except ValidationException as e:
83 | __raise_validation_error(f"File row number: {file_row_num}. {str(e)}")
84 |
85 |
86 | def validate(file_path: Path) -> None:
87 | print(f'Started validating jtl file: {file_path}')
88 | start_time = time.time()
89 | try:
90 | with file_path.open(mode='r') as f:
91 | reader: DictReader = DictReader(f)
92 | __validate_header(reader.fieldnames)
93 | __validate_rows(reader)
94 |
95 | except (ValidationException, FileNotFoundError) as e:
96 | raise SystemExit(f"ERROR: Validation failed. File path: [{file_path}]. Validation details: {str(e)}")
97 |
98 | print(f'File: {file_path} validated in {time.time() - start_time} seconds')
99 |
--------------------------------------------------------------------------------
/app/util/jtl_convertor/validation_exception.py:
--------------------------------------------------------------------------------
1 | class ValidationException(Exception):
2 | pass
3 |
--------------------------------------------------------------------------------
/app/util/jtl_convertor/validation_funcs.py:
--------------------------------------------------------------------------------
1 | from util.jtl_convertor.validation_exception import ValidationException
2 |
3 |
4 | def is_not_none(value: str) -> None:
5 | if value is None:
6 | raise ValidationException("Value is empty")
7 |
8 |
9 | def is_number(value: str) -> None:
10 | if not value.isdigit():
11 | raise ValidationException(f"Value [{value}] is not a digit")
12 |
13 |
14 | def is_not_blank(value: str) -> None:
15 | if (value is None) or (not value.strip()):
16 | raise ValidationException("Value is blank")
17 |
--------------------------------------------------------------------------------
/app/util/k8s/aws_envs:
--------------------------------------------------------------------------------
1 | # aws_envs file should contain AWS variables needed for authorization (without quotes)
2 | AWS_ACCESS_KEY_ID=abc
3 | AWS_SECRET_ACCESS_KEY=efg
4 |
--------------------------------------------------------------------------------
/app/util/k8s/bzt_on_pod.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | DCAPT_DOCKER_IMAGE="atlassian/dcapt"
4 | echo "INFO: DCAPT docker image: $DCAPT_DOCKER_IMAGE"
5 |
6 | if [[ -z "$ENVIRONMENT_NAME" ]]; then
7 | echo "ERROR: ENVIRONMENT_NAME variable is not set."
8 | exit 1
9 | fi
10 | echo "INFO: Environment name: $ENVIRONMENT_NAME"
11 |
12 | if [[ -z "$REGION" ]]; then
13 | echo "ERROR: REGION variable is not set."
14 | exit 1
15 | fi
16 | echo "INFO: AWS REGION: $REGION"
17 |
18 | if [ $# -eq 0 ]; then
19 | echo "ERROR: No arguments supplied. Product .yml file need to be passed as first argument. E.g. jira.yml"
20 | exit 1
21 | fi
22 |
23 | if [[ $1 =~ "yml" ]]; then
24 | echo "INFO: Product .yml: $1"
25 | else
26 | echo "ERROR: first argument should be product.yml, e.g. jira.yml"
27 | echo "ERROR: provided first argument: $1"
28 | exit 1
29 | fi
30 |
31 |
32 | echo "INFO: Update kubeconfig"
33 | aws eks update-kubeconfig --name atlas-"$ENVIRONMENT_NAME"-cluster --region "$REGION"
34 |
35 | echo "INFO: Get execution environment pod name"
36 | exec_pod_name=$(kubectl get pods -n atlassian -l=exec=true --no-headers -o custom-columns=":metadata.name")
37 |
38 | if [[ -z "$exec_pod_name" ]]; then
39 | echo "ERROR: Current cluster does not have execution environment pod. Check what environment type is used.
40 | Development environment does not have execution environment pod by default because dedicated for local app-specific actions development only."
41 | exit 1
42 | fi
43 |
44 | echo "INFO: Execution environment pod name: $exec_pod_name"
45 |
46 | echo "INFO: Cleanup dc-app-performance-toolkit folder on the exec env pod"
47 | kubectl exec -it "$exec_pod_name" -n atlassian -- rm -rf /dc-app-performance-toolkit
48 |
49 | echo "INFO: Copy latest dc-app-performance-toolkit folder to the exec env pod"
50 | start=$(date +%s)
51 | # tar only app folder, exclude results and util/k8s folder
52 | tar -czf dcapt.tar.gz -C dc-app-performance-toolkit --exclude results --exclude util/k8s app Dockerfile requirements.txt
53 | kubectl exec -it "$exec_pod_name" -n atlassian -- mkdir /dc-app-performance-toolkit
54 | cat dcapt.tar.gz | kubectl exec -i -n atlassian "$exec_pod_name" -- tar xzf - -C /dc-app-performance-toolkit
55 | rm -rf dcapt.tar.gz
56 | end=$(date +%s)
57 | runtime=$((end-start))
58 | echo "INFO: Copy finished in $runtime seconds"
59 |
60 | if [[ $2 == "--docker_image_rebuild" ]]; then
61 | echo "INFO: Rebuild docker image"
62 | kubectl exec -it "$exec_pod_name" -n atlassian -- docker build -t $DCAPT_DOCKER_IMAGE dc-app-performance-toolkit
63 | fi
64 |
65 | echo "INFO: Run bzt on the exec env pod"
66 | kubectl exec -it "$exec_pod_name" -n atlassian -- docker run --shm-size=4g -v "/dc-app-performance-toolkit:/dc-app-performance-toolkit" $DCAPT_DOCKER_IMAGE "$1"
67 | sleep 10
68 |
69 | echo "INFO: Copy results folder from the exec env pod to local"
70 | # Ensure the local results directory exists
71 | local_results_dir="/data-center-terraform/dc-app-performance-toolkit/app/results"
72 | mkdir -p "$local_results_dir"
73 |
74 | for _ in {1..3}; do
75 | if kubectl exec -n atlassian "$exec_pod_name" --request-timeout=60s -- tar czf - -C /dc-app-performance-toolkit/app results | tar xzf - -C "$local_results_dir" --strip-components=1; then
76 | break
77 | else
78 | echo "Copying failed, retrying..."
79 | sleep 5
80 | fi
81 | done
82 |
83 | if [[ $? -ne 0 ]]; then
84 | echo "ERROR: Copy results folder failed"
85 | exit 1
86 | fi
--------------------------------------------------------------------------------
/app/util/k8s/copy_run_results.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | DCAPT_DOCKER_IMAGE="atlassian/dcapt"
4 | echo "INFO: DCAPT docker image: $DCAPT_DOCKER_IMAGE"
5 |
6 | if [[ -z "$ENVIRONMENT_NAME" ]]; then
7 | echo "ERROR: ENVIRONMENT_NAME variable is not set."
8 | exit 1
9 | fi
10 | echo "INFO: Environment name: $ENVIRONMENT_NAME"
11 |
12 | if [[ -z "$REGION" ]]; then
13 | echo "ERROR: REGION variable is not set."
14 | exit 1
15 | fi
16 | echo "INFO: AWS REGION: $REGION"
17 |
18 | echo "INFO: Update kubeconfig"
19 | aws eks update-kubeconfig --name atlas-"$ENVIRONMENT_NAME"-cluster --region "$REGION"
20 |
21 | echo "INFO: Get execution environment pod name"
22 | exec_pod_name=$(kubectl get pods -n atlassian -l=exec=true --no-headers -o custom-columns=":metadata.name")
23 |
24 | if [[ -z "$exec_pod_name" ]]; then
25 | echo "ERROR: Current cluster does not have execution environment pod. Check what environment type is used.
26 | Development environment does not have execution environment pod by default because dedicated for local app-specific actions development only."
27 | exit 1
28 | fi
29 |
30 | echo "INFO: Execution environment pod name: $exec_pod_name"
31 |
32 | echo "INFO: Copy results folder from the exec env pod to local"
33 | # Ensure the local results directory exists
34 | local_results_dir="/data-center-terraform/dc-app-performance-toolkit/app/results"
35 | mkdir -p "$local_results_dir"
36 |
37 | for _ in {1..3}; do
38 | if kubectl exec -n atlassian "$exec_pod_name" --request-timeout=60s -- tar czf - -C /dc-app-performance-toolkit/app results | tar xzf - -C "$local_results_dir" --strip-components=1; then
39 | break
40 | else
41 | echo "Copying failed, retrying..."
42 | sleep 5
43 | fi
44 | done
45 |
46 | if [[ $? -ne 0 ]]; then
47 | echo "ERROR: Copy results folder failed"
48 | exit 1
49 | fi
--------------------------------------------------------------------------------
/app/util/k8s/script-runner.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: script-runner
5 | labels:
6 | app: script-runner
7 | spec:
8 | containers:
9 | - name: script-runner
10 | image: ubuntu:latest
11 | command:
12 | - /bin/sh
13 | - -c
14 | - |
15 | echo "running below scripts"
16 | apt update;
17 | apt install postgresql-client curl wget -y;
18 | /bin/sleep 3650d;
19 | imagePullPolicy: IfNotPresent
20 | restartPolicy: Always
--------------------------------------------------------------------------------
/app/util/post_run/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/util/post_run/__init__.py
--------------------------------------------------------------------------------
/app/util/post_run/cleanup_results_dir.py:
--------------------------------------------------------------------------------
1 | import os
2 | from util.project_paths import ENV_TAURUS_ARTIFACT_DIR
3 |
4 | FILES_TO_REMOVE = ['jmeter.out',
5 | 'jmeter-bzt.properties',
6 | 'merged.json',
7 | 'merged.yml',
8 | 'PyTestExecutor.ldjson',
9 | 'system.properties',
10 | 'locust.out']
11 |
12 | for file in FILES_TO_REMOVE:
13 | file_path = ENV_TAURUS_ARTIFACT_DIR / file
14 | if file_path.exists():
15 | try:
16 | os.remove(file_path)
17 | print(f'The {file} was removed successfully')
18 | except OSError as e:
19 | print(f'Warning: Deleting of the {file} failed! Error message: {file_path}: {e.strerror}')
20 |
--------------------------------------------------------------------------------
/app/util/post_run/jmeter_post_check.py:
--------------------------------------------------------------------------------
1 | import os
2 | from pathlib import Path
3 | from shutil import rmtree
4 | from util.project_paths import ENV_TAURUS_ARTIFACT_DIR
5 |
6 |
7 | JMETER_JTL_FILE_NAME = 'kpi.jtl'
8 |
9 | jmeter_home_path = Path().home() / '.bzt' / 'jmeter-taurus'
10 | jmeter_jtl_file = ENV_TAURUS_ARTIFACT_DIR / JMETER_JTL_FILE_NAME
11 |
12 | if not os.path.exists(jmeter_jtl_file):
13 | if jmeter_home_path.exists():
14 | print(f'jmeter_post_check: removing {jmeter_home_path}')
15 | rmtree(str(jmeter_home_path))
16 | raise SystemExit(f'jmeter_post_check: ERROR - {jmeter_jtl_file} was not found. '
17 | f'JMeter folder {jmeter_home_path} was removed for recovery '
18 | f'and will be automatically downloaded on the next bzt run.')
19 |
20 | print('jmeter_post_check: PASS')
21 |
--------------------------------------------------------------------------------
/app/util/pre_run/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/be7881f8cd4fe03b7441b9dd129bbb3202c1b3e7/app/util/pre_run/__init__.py
--------------------------------------------------------------------------------
/app/util/pre_run/environment_checker.py:
--------------------------------------------------------------------------------
1 | from sys import version_info
2 |
3 | SUPPORTED_PYTHON_VERSIONS = ["3.9", "3.10", "3.11", "3.12", "3.13"]
4 |
5 | python_full_version = '.'.join(map(str, version_info[0:3]))
6 | python_short_version = '.'.join(map(str, version_info[0:2]))
7 | print("Python version: {}".format(python_full_version))
8 | if python_short_version not in SUPPORTED_PYTHON_VERSIONS:
9 | raise SystemExit("Python version {} is not supported. "
10 | "Supported versions: {}.".format(python_full_version, SUPPORTED_PYTHON_VERSIONS))
11 |
12 | # Print toolkit version after Python check
13 | from util.conf import TOOLKIT_VERSION # noqa E402
14 |
15 | print("Data Center App Performance Toolkit version: {}".format(TOOLKIT_VERSION))
16 |
--------------------------------------------------------------------------------
/app/util/pre_run/git_client_check.py:
--------------------------------------------------------------------------------
1 | from subprocess import check_output
2 |
3 |
4 | try:
5 | print("Git version: {}".format(check_output(["git", "--version"])))
6 | except Exception:
7 | raise Exception("Please check if git installed and available from command line.")
8 |
--------------------------------------------------------------------------------
/app/util/project_paths.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import os
3 | from pathlib import Path
4 |
5 |
6 | def __get_jira_yml():
7 | return Path(__file__).parents[1] / "jira.yml"
8 |
9 |
10 | def __get_jsm_yml():
11 | return Path(__file__).parents[1] / "jsm.yml"
12 |
13 |
14 | def __get_datasets():
15 | return Path(__file__).parents[1] / "datasets"
16 |
17 |
18 | def __get_jira_datasets():
19 | return __get_datasets() / "jira"
20 |
21 |
22 | def __get_jsm_datasets():
23 | return __get_datasets() / "jsm"
24 |
25 |
26 | def __get_jira_dataset(file_name):
27 | return __get_jira_datasets() / file_name
28 |
29 |
30 | def __get_jsm_dataset(file_name):
31 | return __get_jsm_datasets() / file_name
32 |
33 |
34 | def __get_confluence_yml():
35 | return Path(__file__).parents[1] / "confluence.yml"
36 |
37 |
38 | def __get_bitbucket_yml():
39 | return Path(__file__).parents[1] / "bitbucket.yml"
40 |
41 |
42 | def __get_bitbucket_datasets():
43 | return __get_datasets() / "bitbucket"
44 |
45 |
46 | def __get_crowd_yml():
47 | return Path(__file__).parents[1] / "crowd.yml"
48 |
49 |
50 | def __get_crowd_datasets():
51 | return __get_datasets() / "crowd"
52 |
53 |
54 | def __get_crowd_dataset(file_name):
55 | return __get_crowd_datasets() / file_name
56 |
57 |
58 | def __get_bamboo_yml():
59 | return Path(__file__).parents[1] / "bamboo.yml"
60 |
61 |
62 | def __get_bamboo_datasets():
63 | return __get_datasets() / "bamboo"
64 |
65 |
66 | def __get_confluence_datasets():
67 | return __get_datasets() / "confluence"
68 |
69 |
70 | def __get_confluence_dataset(file_name):
71 | return __get_confluence_datasets() / file_name
72 |
73 |
74 | def __get_bamboo_dataset(file_name):
75 | return __get_bamboo_datasets() / file_name
76 |
77 |
78 | def __get_bitbucket_dataset(file_name):
79 | return __get_bitbucket_datasets() / file_name
80 |
81 |
82 | def __get_taurus_artifacts_dir():
83 | if 'TAURUS_ARTIFACTS_DIR' in os.environ:
84 | return Path(os.environ.get('TAURUS_ARTIFACTS_DIR'))
85 | else:
86 | results_dir_name = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
87 | local_run_results = Path(f'results/local/{results_dir_name}')
88 | local_run_results.mkdir(parents=True)
89 | return local_run_results
90 |
91 |
92 | def __get_default_test_actions():
93 | return Path(__file__).parents[0] / "default_test_actions.json"
94 |
95 |
96 | JIRA_YML = __get_jira_yml()
97 | JIRA_DATASETS = __get_jira_datasets()
98 | JIRA_DATASET_JQLS = __get_jira_dataset('jqls.csv')
99 | JIRA_DATASET_SCRUM_BOARDS = __get_jira_dataset('scrum-boards.csv')
100 | JIRA_DATASET_KANBAN_BOARDS = __get_jira_dataset('kanban-boards.csv')
101 | JIRA_DATASET_USERS = __get_jira_dataset('users.csv')
102 | JIRA_DATASET_ISSUES = __get_jira_dataset('issues.csv')
103 | JIRA_DATASET_PROJECTS = __get_jira_dataset('projects.csv')
104 | JIRA_DATASET_CUSTOM_ISSUES = __get_jira_dataset('custom-issues.csv')
105 |
106 | JSM_YML = __get_jsm_yml()
107 | JSM_DATASETS = __get_jsm_datasets()
108 | JSM_DATASET_AGENTS = __get_jsm_dataset('agents.csv')
109 | JSM_DATASET_CUSTOMERS = __get_jsm_dataset('customers.csv')
110 | JSM_DATASET_REQUESTS = __get_jsm_dataset('requests.csv')
111 | JSM_DATASET_SERVICE_DESKS_L = __get_jsm_dataset('service_desks_large.csv')
112 | JSM_DATASET_SERVICE_DESKS_M = __get_jsm_dataset('service_desks_medium.csv')
113 | JSM_DATASET_SERVICE_DESKS_S = __get_jsm_dataset('service_desks_small.csv')
114 | JSM_DATASET_REQUEST_TYPES = __get_jsm_dataset('request_types.csv')
115 | JSM_DATASET_CUSTOM_ISSUES = __get_jsm_dataset('custom-issues.csv')
116 | JSM_DATASET_INSIGHT_ISSUES = __get_jsm_dataset('insight_issues.csv')
117 | JSM_DATASET_INSIGHT_SCHEMAS = __get_jsm_dataset('insight_schemas.csv')
118 |
119 | CONFLUENCE_YML = __get_confluence_yml()
120 | CONFLUENCE_DATASETS = __get_confluence_datasets()
121 | CONFLUENCE_USERS = __get_confluence_dataset('users.csv')
122 | CONFLUENCE_PAGES = __get_confluence_dataset('pages.csv')
123 | CONFLUENCE_BLOGS = __get_confluence_dataset('blogs.csv')
124 | CONFLUENCE_STATIC_CONTENT = __get_confluence_dataset('static-content/files_upload.csv')
125 | CONFLUENCE_CUSTOM_PAGES = __get_confluence_dataset('custom_pages.csv')
126 | CONFLUENCE_WORDS = __get_confluence_dataset('static-content/words.csv')
127 |
128 | BITBUCKET_YML = __get_bitbucket_yml()
129 | BITBUCKET_DATASETS = __get_bitbucket_datasets()
130 | BITBUCKET_USERS = __get_bitbucket_dataset('users.csv')
131 | BITBUCKET_PROJECTS = __get_bitbucket_dataset('projects.csv')
132 | BITBUCKET_REPOS = __get_bitbucket_dataset('repos.csv')
133 | BITBUCKET_PRS = __get_bitbucket_dataset('pull_requests.csv')
134 |
135 | CROWD_YML = __get_crowd_yml()
136 | CROWD_DATASETS = __get_crowd_datasets()
137 | CROWD_USERS = __get_crowd_dataset('users.csv')
138 |
139 | BAMBOO_YML = __get_bamboo_yml()
140 | BAMBOO_DATASETS = __get_bamboo_datasets()
141 | BAMBOO_BUILD_PLANS = __get_bamboo_dataset('build_plans.csv')
142 | BAMBOO_USERS = __get_bamboo_dataset('users.csv')
143 |
144 |
145 | DEFAULT_TEST_ACTIONS = __get_default_test_actions()
146 | ENV_TAURUS_ARTIFACT_DIR = __get_taurus_artifacts_dir()
147 |
--------------------------------------------------------------------------------
/docs/bitbucket/README.md:
--------------------------------------------------------------------------------
1 | # User Guide for Bitbucket
2 | https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-bitbucket/
3 |
4 | # Running tests
5 | ## Pre-requisites
6 | * Working Bitbucket Server of supported version ([toolkit README](../../README.md) for a list of supported Bitbucket versions) with repos, etc.
7 | * Client machine with 4 CPUs and 16 GBs of RAM to run the Toolkit.
8 | * Virtual environment with Python and bzt installed. See the root [toolkit README](../../README.md) file for more details.
9 | * [Git client](https://git-scm.com/downloads)
10 |
11 | If you need performance testing results at a production level, follow instructions described
12 | in the official User Guide to set up Bitbucket DC with the corresponding dataset.
13 | For spiking, testing, or developing, your local Bitbucket instance would work well.
14 |
15 | ## Step 1: Update bitbucket.yml
16 | * `application_hostname`: test bitbucket hostname (without http).
17 | * `application_protocol`: http or https.
18 | * `application_port`: 80 (for http) or 443 (for https), 8080, 7990 or your instance-specific port.
19 | * `secure`: True or False. Default value is True. Set False to allow insecure connections, e.g. when using self-signed SSL certificate.
20 | * `application_postfix`: it is empty by default; e.g., /jira for url like this http://localhost:7990/bitbucket.
21 | * `admin_login`: jira admin user name (after restoring dataset from SQL dump, the admin user name is: admin).
22 | * `admin_password`: jira admin user password (after restoring dataset from SQL dump, the admin user password is: admin) .
23 | * `concurrency`: `20` - number of concurrent users for JMeter scenario.
24 | * `test_duration`: `50m` - duration of test execution.
25 | * `ramp-up`: `10m` - amount of time it will take JMeter to add all test users to test execution.
26 | * `total_actions_per_hour`: `32700` - number of total JMeter actions per hour.
27 | * `WEBDRIVER_VISIBLE`: visibility of Chrome browser during selenium execution (False is by default).
28 |
29 | ## Step 2: Run tests
30 | Run Taurus.
31 | ```
32 | bzt bitbucket.yml
33 | ```
34 |
35 | ## Results
36 | Results are located in the `resutls/bitbucket/YY-MM-DD-hh-mm-ss` directory:
37 | * `bzt.log` - log of bzt run
38 | * `error_artifacts` - folder with screenshots and HTMLs of Selenium fails
39 | * `jmeter.err` - JMeter errors log
40 | * `kpi.jtl` - JMeter raw data
41 | * `pytest.out` - detailed log of Selenium execution, including stacktraces of Selenium fails
42 | * `selenium.jtl` - Selenium raw data
43 | * `results.csv` - consolidated results of execution
44 | * `resutls_summary.log` - detailed summary of the run. Make sure that overall run status is `OK` before moving to the
45 | next steps.
46 |
47 |
48 | # Useful information
49 |
50 | ## Jmeter
51 | ### Opening JMeter scripts
52 | 1. Open JMeter UI as described in [README.md](../../app/util/jmeter/README.md).
53 | 1. On the `View Results Tree` controller, click the `Browse` button and open `error.jtl` from `app/results/bitbucket/YY-MM-DD-hh-mm-ss` folder.
54 |
55 | From this view, you can click on any failed action and see the request and response data in appropriate tabs.
56 |
57 | ## Selenium
58 | ### Debugging Selenium scripts
59 | Detailed log and stacktrace of Selenium PyTest fails are located in the `results/bitbucket/YY-MM-DD-hh-mm-ss/pytest.out` file.
60 |
61 | Also, screenshots and HTMLs of Selenium fails are stared in the `results/bitbucket/YY-MM-DD-hh-mm-ss/error_artifacts` folder.
62 |
63 | ### Running Selenium tests with Browser GUI
64 | In [bitbucket.yml](../../app/bitbucket.yml) file, set the `WEBDRIVER_VISIBLE: True`.
65 |
66 |
67 | ### Running Selenium tests locally without the Performance Toolkit
68 | 1. Activate virualenv for the Performance Toolkit.
69 | 1. Navigate to the selenium folder using the `cd app/selenium_ui` command.
70 | 1. In [bitbucket.yml](../../app/bitbucket.yml) file, set the `WEBDRIVER_VISIBLE: True`.
71 | 1. Run all Selenium PyTest tests with the `pytest bitbucket_ui.py` command.
72 | 1. To run one Selenium PyTest test (e.g., `test_1_selenium_view_dashboard`), execute the first login test and the required one with this command:
73 |
74 | `pytest bitbucket_ui.py::test_0_selenium_a_login bitbucket_ui.py::test_1_selenium_view_dashboard`.
75 |
76 |
77 | ### Comparing different runs
78 | Navigate to the `reports_generation` folder and follow README.md instructions to generate side-by-side comparison charts.
79 |
80 | ### Run prepare data script locally
81 | 1. Activate virualenv for the Performance Toolkit.
82 | 2. Navigate to the `app` folder.
83 | 3. Set PYTHONPATH as full path to `app` folder with command:
84 | ```bash
85 | export PYTHONPATH=`pwd` # for mac or linux
86 | set PYTHONPATH=%cd% # for windows
87 | ```
88 |
89 | 4. Run prepare data script:
90 | ```bash
91 | python util/data_preparation/bitbucket_prepare_data.py
92 | ```
93 |
--------------------------------------------------------------------------------
/docs/crowd/README.md:
--------------------------------------------------------------------------------
1 | # User Guide for Crowd
2 | https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-crowd/
3 |
4 | # Running tests
5 | ## Pre-requisites
6 | * Working Crowd Software of supported version ([toolkit README](../../README.md) for a list of supported Crowd versions) with users, groups, etc.
7 | * Client machine with 4 CPUs and 16 GBs of RAM to run the Toolkit.
8 | * Virtual environment with Python and bzt installed. See the root [toolkit README](../../README.md) file for more details.
9 |
10 | If you need performance testing results at a production level, follow instructions described
11 | in the official User Guide to set up Crowd DC with the corresponding dataset.
12 | For spiking, testing, or developing, your local Crowd instance would work well.
13 |
14 | ## Step 1: Update crowd.yml
15 | * `application_hostname`: test crowd hostname (without http).
16 | * `application_protocol`: http or https.
17 | * `application_port`: 80 (for http) or 443 (for https), 8080, 4990 or your instance-specific port.
18 | * `secure`: True or False. Default value is True. Set False to allow insecure connections, e.g. when using self-signed SSL certificate.
19 | * `application_postfix`: it is empty by default; e.g., /crowd for url like this http://localhost:4990/crowd.
20 | * `admin_login`: crowd admin username (after restoring dataset from SQL dump, the admin user name is: admin).
21 | * `admin_password`: crowd admin user password (after restoring dataset from SQL dump, the admin user password is: admin) .
22 | * `application_name`: name of crowd application.
23 | * `application_password`: password of crowd application.
24 | * `load_executor`: `jmeter`.
25 | * `concurrency`: `1000` - number of concurrent users for JMeter scenario.
26 | * `test_duration`: `45m` - duration of test execution.
27 |
28 | ## Step 2: Run tests
29 | Run Taurus.
30 | ```
31 | bzt crowd.yml
32 | ```
33 |
34 | ## Results
35 | Results are located in the `resutls/crowd/YY-MM-DD-hh-mm-ss` directory:
36 | * `bzt.log` - log of bzt run
37 | * `error_artifacts` - folder with screenshots and HTMLs of Selenium fails
38 | * `jmeter.err` - JMeter errors log
39 | * `kpi.jtl` - JMeter raw data
40 | * `results.csv` - consolidated results of execution
41 | * `resutls_summary.log` - detailed summary of the run. Make sure that overall run status is `OK` before moving to the
42 | next steps.
43 |
44 |
45 | # Useful information
46 |
47 | ## Changing performance workload for JMeter
48 | The [crowd.yml](../../app/crowd.yml) has three pairs of parameters for different workload depends on crowd instance nodes count.
49 | ```
50 | # 1 node scenario parameters
51 | ramp-up: 20s # time to spin all concurrent threads
52 | total_actions_per_hour: 180000 # number of total JMeter actions per hour
53 |
54 | # 2 nodes scenario parameters
55 | # ramp-up: 10s # time to spin all concurrent threads
56 | # total_actions_per_hour: 360000 # number of total JMeter actions per hour
57 |
58 | # 4 nodes scenario parameters
59 | # ramp-up: 5s # time to spin all concurrent threads
60 | # total_actions_per_hour: 720000 # number of total JMeter actions per hour
61 | ```
62 | Uncomment appropriate part of configs to produce necessary instance workload.
63 | For app-specific actions development and testing it's ok to reduce concurrency, test_duration, total_actions_per_hour and ramp-up.
64 |
65 | ## JMeter
66 | ### Debugging JMeter scripts
67 | 1. Open JMeter UI as described in [README.md](../../app/util/jmeter/README.md).
68 | 1. On the `View Results Tree` controller, click the `Browse` button and open `error.jtl` from `app/results/crowd/YY-MM-DD-hh-mm-ss` folder.
69 |
70 | From this view, you can click on any failed action and see the request and response data in appropriate tabs.
71 |
72 | ### Run JMeter actions via GUI
73 | 1. Open JMeter UI as described in [README.md](../../app/util/jmeter/README.md).
74 | 1. Enable the `View Results Tree` controller and click `Run` the test scenario.
75 |
76 | ### Comparing different runs
77 | Navigate to the `reports_generation` folder and follow README.md instructions to generate side-by-side comparison charts.
78 |
79 | ### Run prepare data script locally
80 | 1. Activate virualenv for the Performance Toolkit.
81 | 2. Navigate to the `app` folder.
82 | 3. Set PYTHONPATH as full path to `app` folder with command:
83 | ```bash
84 | export PYTHONPATH=`pwd` # for mac or linux
85 | set PYTHONPATH=%cd% # for windows
86 | ```
87 | 4. Run prepare data script:
88 | ```bash
89 | python util/data_preparation/crowd_prepare_data.py
90 | ```
91 |
--------------------------------------------------------------------------------
/renovate.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": [
3 | "config:base"
4 | ],
5 | "baseBranches": ["dev"],
6 | "ignorePaths": ["src/test/**"]
7 | }
8 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | matplotlib==3.10.1
2 | pandas==2.2.3
3 | pytest==8.3.5
4 | locust==2.35.0
5 | selenium==4.31.0
6 | filelock==3.18.0
7 | packaging==24.2
8 | prettytable==3.16.0
9 | bzt==1.16.32 # bzt 1.16.34 has pinned setuptools==65.5.0, which does not have distutils
10 | boto3==1.37.30
11 | retry==0.9.2
12 |
--------------------------------------------------------------------------------