├── .env-example
├── .github
└── workflows
│ ├── docker-image.yml
│ └── major-version-updater.yml
├── .gitignore
├── CODEOWNERS
├── CONTRIBUTING.md
├── Dockerfile
├── LICENSE
├── README.md
├── action.yml
├── evaluate_workflow_runs.py
├── get_workflow_runs.py
├── test_evaluate_workflow_runs.py
├── test_get_workflow_runs.py
├── test_workflow_metrics.py
└── workflow_metrics.py
/.env-example:
--------------------------------------------------------------------------------
1 | GH_TOKEN=myPAT
2 | OWNER_NAME=myOrg
3 | START_DATE=2023-07-24
4 | END_DATE=2023-07-31
5 | REPO_NAME=myRepo
6 | DELAY_BETWEEN_QUERY=1
--------------------------------------------------------------------------------
/.github/workflows/docker-image.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: Docker Image CI
3 |
4 | on:
5 | push:
6 | branches: [ main ]
7 |
8 | jobs:
9 |
10 | build:
11 |
12 | runs-on: ubuntu-latest
13 |
14 | permissions:
15 | contents: read
16 | packages: write
17 |
18 | steps:
19 | - uses: actions/checkout@v3
20 |
21 | - name: Log in to the Container registry
22 | uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
23 | with:
24 | registry: ghcr.io
25 | username: ${{ github.actor }}
26 | password: ${{ secrets.GITHUB_TOKEN }}
27 |
28 | - name: Build the Docker image
29 | run: |
30 | docker build . --file Dockerfile --platform linux/amd64 -t ghcr.io/kittychiu/workflow-metrics:v1
31 | docker push ghcr.io/kittychiu/workflow-metrics:v1
32 |
--------------------------------------------------------------------------------
/.github/workflows/major-version-updater.yml:
--------------------------------------------------------------------------------
1 | name: Major Version Updater
2 | # Whenever a new release is made, push a major version tag
3 | on:
4 | release:
5 | types: [ published ]
6 |
7 | jobs:
8 | update-major-version-tag:
9 | runs-on: ubuntu-latest
10 |
11 | steps:
12 | - uses: actions/checkout@v3
13 |
14 | - name: Get major version num and update tag
15 | run: |
16 | MAJOR=$(echo $GITHUB_REF_NAME | grep -oE '^v[0-9]+' | cut -c2-)
17 | if [ -n "$MAJOR" ]; then
18 | git config --global user.name 'Kitty Chiu'
19 | git config --global user.email '42864823+KittyChiu@users.noreply.github.com'
20 | git tag -fa $MAJOR -m "Update major version tag"
21 | git push origin $MAJOR --force
22 | else
23 | echo "No major version found in tag name. Ensure that the tag name follows the Semantic Versioning Specification (https://semver.org/)."
24 | fi
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # MacOS
2 | *.DS_store
3 |
4 | # Byte-compiled / optimized / DLL files
5 | __pycache__/
6 | *.py[cod]
7 | *$py.class
8 |
9 | # C extensions
10 | *.so
11 |
12 | # Distribution / packaging
13 | .Python
14 | build/
15 | develop-eggs/
16 | dist/
17 | downloads/
18 | eggs/
19 | .eggs/
20 | lib/
21 | lib64/
22 | parts/
23 | sdist/
24 | var/
25 | wheels/
26 | share/python-wheels/
27 | *.egg-info/
28 | .installed.cfg
29 | *.egg
30 | MANIFEST
31 |
32 | # PyInstaller
33 | # Usually these files are written by a python script from a template
34 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
35 | *.manifest
36 | *.spec
37 |
38 | # Installer logs
39 | pip-log.txt
40 | pip-delete-this-directory.txt
41 |
42 | # Unit test / coverage reports
43 | htmlcov/
44 | .tox/
45 | .nox/
46 | .coverage
47 | .coverage.*
48 | .cache
49 | nosetests.xml
50 | coverage.xml
51 | *.cover
52 | *.py,cover
53 | .hypothesis/
54 | .pytest_cache/
55 | cover/
56 |
57 | # Translations
58 | *.mo
59 | *.pot
60 |
61 | # Django stuff:
62 | *.log
63 | local_settings.py
64 | db.sqlite3
65 | db.sqlite3-journal
66 |
67 | # Flask stuff:
68 | instance/
69 | .webassets-cache
70 |
71 | # Scrapy stuff:
72 | .scrapy
73 |
74 | # Sphinx documentation
75 | docs/_build/
76 |
77 | # PyBuilder
78 | .pybuilder/
79 | target/
80 |
81 | # Jupyter Notebook
82 | .ipynb_checkpoints
83 |
84 | # IPython
85 | profile_default/
86 | ipython_config.py
87 |
88 | # pyenv
89 | # For a library or package, you might want to ignore these files since the code is
90 | # intended to run in multiple environments; otherwise, check them in:
91 | # .python-version
92 |
93 | # pipenv
94 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
95 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
96 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
97 | # install all needed dependencies.
98 | #Pipfile.lock
99 |
100 | # poetry
101 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
102 | # This is especially recommended for binary packages to ensure reproducibility, and is more
103 | # commonly ignored for libraries.
104 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
105 | #poetry.lock
106 |
107 | # pdm
108 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
109 | #pdm.lock
110 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
111 | # in version control.
112 | # https://pdm.fming.dev/#use-with-ide
113 | .pdm.toml
114 |
115 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
116 | __pypackages__/
117 |
118 | # Celery stuff
119 | celerybeat-schedule
120 | celerybeat.pid
121 |
122 | # SageMath parsed files
123 | *.sage.py
124 |
125 | # Environments
126 | .env
127 | .venv
128 | env/
129 | venv/
130 | ENV/
131 | env.bak/
132 | venv.bak/
133 |
134 | # Spyder project settings
135 | .spyderproject
136 | .spyproject
137 |
138 | # Rope project settings
139 | .ropeproject
140 |
141 | # mkdocs documentation
142 | /site
143 |
144 | # mypy
145 | .mypy_cache/
146 | .dmypy.json
147 | dmypy.json
148 |
149 | # Pyre type checker
150 | .pyre/
151 |
152 | # pytype static type analyzer
153 | .pytype/
154 |
155 | # Cython debug symbols
156 | cython_debug/
157 |
158 | # PyCharm
159 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
160 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
161 | # and can be added to the global gitignore or merged into this file. For a more nuclear
162 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
163 | #.idea/
164 |
--------------------------------------------------------------------------------
/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @KittyChiu
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to `workflow-metrics`
2 |
3 | Thank you for your interest in contributing to `workflow-metrics`! We welcome contributions from everyone, and we appreciate your help in making this action better.
4 |
5 | ## Getting Started
6 |
7 | Before you start contributing, please make sure that you have read the [README.md](./README.md) file and have a basic understanding of the action.
8 |
9 | ## How to Contribute
10 |
11 | There are many ways to contribute to `workflow-metrics`, from writing code to submitting bug reports. Here are some ways that you can contribute:
12 |
13 | ### Submitting Issues
14 |
15 | If you find a bug or have a feature request, please submit an issue on the [GitHub issue tracker](https://github.com/kittychiu/workflow-metrics/issues). Please include as much detail as possible, including steps to reproduce the issue and any relevant error messages.
16 |
17 | ### Contributing Code
18 |
19 | If you would like to contribute code to `workflow-metrics`, please follow these steps:
20 |
21 | 1. Fork the repository on GitHub.
22 | 2. Clone your forked repository to your local machine.
23 | 3. Create a new branch for your changes.
24 | 4. Make your changes and commit them to your branch.
25 | 5. Push your changes to your forked repository on GitHub.
26 | 6. Submit a pull request to the main repository.
27 |
28 | Please make sure that your code follows the [PEP 8](https://www.python.org/dev/peps/pep-0008/) style guide and includes tests for any new functionality.
29 |
30 | ### Contributing Documentation
31 |
32 | If you would like to contribute documentation to `workflow-metrics`, please follow these steps:
33 |
34 | 1. Fork the repository on GitHub.
35 | 2. Clone your forked repository to your local machine.
36 | 3. Create a new branch for your changes.
37 | 4. Make your changes and commit them to your branch.
38 | 5. Push your changes to your forked repository on GitHub.
39 | 6. Submit a pull request to the main repository.
40 |
41 | Please make sure that your documentation follows the [Markdown syntax](https://www.markdownguide.org/basic-syntax/) and is clear and concise.
42 |
43 | ## Code of Conduct
44 |
45 | Please note that this project is released with a [Contributor Code of Conduct](./CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.
46 |
47 | ## License
48 |
49 | `workflow-metrics` is released under the [MIT License](./LICENSE).
50 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.9-slim-buster
2 | LABEL org.opencontainers.image.source https://github.com/kittychiu/workflow-metrics
3 |
4 | COPY *.py /
5 |
6 | # Update pip
7 | RUN python -m pip install --upgrade pip
8 |
9 | # Install the GitHub CLI and jq
10 | RUN apt-get update && \
11 | apt-get install -y gnupg && \
12 | apt-get install -y curl && \
13 | curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | gpg --dearmor -o /usr/share/keyrings/githubcli-archive-keyring.gpg && \
14 | echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null && \
15 | apt-get update && \
16 | apt-get install -y gh && \
17 | apt-get install -y jq
18 |
19 | CMD ["python", "/workflow_metrics.py"]
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Kitty Chiu
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Workflow Metrics Action
2 |
3 | [](https://github.com/KittyChiu/workflow-metrics/actions/workflows/github-code-scanning/codeql) [](https://github.com/KittyChiu/workflow-metrics/actions/workflows/docker-image.yml)
4 |
5 | This GitHub Action provides a way to evaluate statistics for your GitHub Actions workflows. With this action, you can easily monitor the performance of your workflows and identify areas for improvement.
6 |
7 | Metrics that are evaluated are:
8 |
9 | | Metric | Unit of measure | Description |
10 | | --- | --- | --- |
11 | | Average duration | second | Average of the successful workflow runs, with conclusion status of either `successful` or `skipped`. |
12 | | Median duration | second | Median of the successful workflow runs, with conclusion status of either `successful` or `skipped`. |
13 | | Total number of runs | workflow run | Total number of workflow runs. |
14 | | Success rate | percentage | Percentage of successful runs for the workflow, with conclusion status of either `successful` or `skipped`. |
15 |
16 | ## Example use cases
17 |
18 | - As a product engineer, I want to identify areas of improvement for my process automation, so that I can improve delivery in next iteration.
19 | - As an engineering manager, I want to identify waste and inefficiencies in my SDLC process, so that I can reduce cycle time and improve velocity.
20 | - As a DevOps platform owner, I want to identify long running workflows, so that I can right-sizing the runners.
21 |
22 | ## Configurations
23 |
24 | The following options are available for configuring the action:
25 |
26 | | Configuration | Required | Default | Description |
27 | | --- | --- | --- | --- |
28 | | `GH_TOKEN` | Yes | N/A | A GitHub token with access to the repository. Minimal scope is `repo` |
29 | | `OWNER_NAME` | Yes | N/A | Name of the repository owner. |
30 | | `REPO_NAME` | No | N/A | Name of the repository. If `REPO_NAME` is not provided, the action will analyse all the workflow runs in the organisation. |
31 | | `START_DATE` | Yes | N/A | Start date for the workflow runs data set. This should be in the format `YYYY-MM-DD`. |
32 | | `END_DATE` | Yes | N/A | End date for the workflow runs data set. This should be in the format `YYYY-MM-DD`. |
33 | | `DELAY_BETWEEN_QUERY` | No | N/A | No. of seconds to wait between queries to the GitHub API. This is to prevent errors from rate limiting when analysing the whole org. |
34 | | `workflow-names.txt` | No | N/A | A file that contains a list of selected workflow names to filter the result. This should be in the runner's workspace folder. |
35 |
36 | ## Outputs
37 |
38 | After the action has completed, two files will be created in the root of the runner workspace:
39 |
40 | - `runs.json` or `org-runs.json` - a JSON array of all workflow runs in the specified time range for the specified repository or organization.
41 | - `workflow-stats.csv` or `org-workflow-stats.csv` - a CSV file with workflow run statistics for the specified repository or organization.
42 |
43 | These are data files that then can be used for further analysis or reporting in visualizer of your choice. For example, you can ingest into datastore and visualize with PowerBI. Below are some examples on generating markdown table and mermaid diagram with the data files
44 |
45 | ## Example usages
46 |
47 | To use this action, simply include it in your workflow file:
48 |
49 | ### 1. Basic usage
50 |
51 | This will analyse workflow runs in the selected repository, including the durations and success rate of each workflow.
52 |
53 |
54 |
55 | ```yml
56 | name: My Workflow
57 | on: workflow_dispatch
58 | jobs:
59 | evaluate-actions-consumption:
60 | runs-on: ubuntu-latest
61 | steps:
62 | - name: Call workflow-runs action
63 | uses: kittychiu/workflow-metrics@v0.4.7
64 | env:
65 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
66 | OWNER_NAME: "myOrg"
67 | REPO_NAME: "myRepo"
68 | START_DATE: "2023-07-01"
69 | END_DATE: "2023-08-01"
70 |
71 | - name: Upload all .txt .csv .md files to artifact
72 | uses: actions/upload-artifact@v3
73 | with:
74 | name: workflow-stats
75 | path: |
76 | workflow-stats.csv
77 | runs.json
78 | ```
79 |
80 | Below is an example of the `workflow-stats.csv` file:
81 |
82 | ```csv
83 | workflow_name,average_duration,median_duration,success_rate,total_runs
84 | workflow_1,12.33,12.00,100.00,3
85 | workflow_3,25.12,22.00,20.93,43
86 | workflow_2,15.50,15.50,50.00,2
87 | ```
88 |
89 |
90 |
91 | ### 2. Weekly report on selected repository and post to a GitHub Issue
92 |
93 | This will further convert `workflow-stats.csv` file containing workflow metrics into a markdown table, mermaid diagram, and publishes it to a new issue. An example of the rendered outputs is [in this Issue](https://github.com/KittyChiu/workflow-metrics/issues/17).
94 |
95 |
96 |
97 | ```yml
98 | name: Weekly Retrospective Report
99 |
100 | on:
101 | schedule:
102 | - cron: '0 12 * * 5'
103 |
104 | jobs:
105 | evaluate-actions-consumption:
106 | runs-on: ubuntu-latest
107 | env:
108 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
109 | OWNER_NAME: ${{ github.repository_owner }}
110 |
111 | steps:
112 | - name: Checkout workflow-names.txt
113 | uses: actions/checkout@v3
114 |
115 | - name: Set dates and repo name
116 | run: |
117 | echo "START_DATE=$(date -d '-1 month' +%Y-%m-%d)" >> "$GITHUB_ENV"
118 | echo "END_DATE=$(date +%Y-%m-%d)" >> "$GITHUB_ENV"
119 |
120 | repo=$(echo "${{ github.repository }}" | cut -d'/' -f2)
121 | echo "REPO_NAME=${repo}" >> $GITHUB_ENV
122 |
123 | - name: Call workflow-runs action
124 | uses: kittychiu/workflow-metrics@v0.4.7
125 | env:
126 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
127 | REPO_NAME: ${{ env.REPO_NAME }}
128 | START_DATE: ${{ env.START_DATE }}
129 | END_DATE: ${{ env.END_DATE }}
130 |
131 | - name: Convert workflow-stats.CSV to stats-table.md markdown table
132 | run: |
133 | echo -e "## Table View\n" > stats-table.md
134 | header=$(head -n 1 workflow-stats.csv | sed 's/,/|/g' | sed 's/_/ /g')
135 | echo -e "|${header}|" >> stats-table.md
136 | metadata=$(head -n 1 workflow-stats.csv | sed 's/,/|/g' | sed 's/[^|]/-/g')
137 | echo -e "|${metadata}|" >> stats-table.md
138 | tail -n +2 workflow-stats.csv | sed 's/,/|/g; s/^/|/; s/$/|/' >> stats-table.md
139 |
140 | - name: Convert workflow-stats.CSV to stream-diagram.md mermaid diagram
141 | run: |
142 | echo -e "## Value Stream View\n" > stream-diagram.md
143 | echo -e '```mermaid' >> stream-diagram.md
144 | echo -e 'timeline' >> stream-diagram.md
145 | head -n 1 workflow-stats.csv | sed 's/,/ : /g' | sed 's/_/ /g' | awk -F'|' '{for(i=1;i<=NF;i++) printf("%s%s", " ", $i, i==NF?"\n":", ")}' | sed 's/^/ /' >> stream-diagram.md
146 | tail -n +2 workflow-stats.csv | sed 's/,/ : /g' | awk -F'|' '{for(i=1;i<=NF;i++) printf("%s%s", "\n ", $i, i==NF?"\n":", ")}' | sed 's/^/ /' >> stream-diagram.md
147 | echo -e '\n```' >> stream-diagram.md
148 |
149 | - name: Combine into issue content
150 | run: |
151 | echo "Combine output files"
152 | cat stream-diagram.md stats-table.md > issue_view.md
153 |
154 | - name: Publish content to a new GitHub Issue
155 | uses: peter-evans/create-issue-from-file@v4
156 | with:
157 | title: Workflow runs summary `${{ env.REPO_NAME }}` repo (${{ env.START_DATE }} - ${{ env.END_DATE }})
158 | content-filepath: issue_view.md
159 |
160 | - name: Upload all .txt .csv .md files to artifact
161 | uses: actions/upload-artifact@v3
162 | with:
163 | name: workflow-stats
164 | path: |
165 | stats-table.md
166 | stream-diagram.md
167 | workflow-stats.csv
168 | runs.json
169 | ```
170 |
171 | Example content of `workflow-names.txt`:
172 |
173 | ```
174 | workflow_1
175 | workflow_2
176 | workflow_3
177 | ```
178 |
179 | Below is an example of the `stats-table.md` file:
180 |
181 | ```md
182 | |workflow name|average duration|median duration|success rate|total runs|
183 | |-------------|----------------|---------------|------------|----------|
184 | |workflow_1|17.00|17.00|100.00|1|
185 | |workflow_2|36.17|36.50|53.70|54|
186 | |workflow_3|3.00|2.00|100.00|3|
187 | ```
188 |
189 | Below is an example of the `stream-diagram.md` file:
190 |
191 | ```mermaid
192 | timeline
193 | workflow name : average duration : median duration : success rate : total runs
194 | CI Build : 17.00 : 17.00 : 100.00 : 1
195 | QA & Validation : 36.17 : 36.50 : 53.70 : 54
196 | Deploy to non-prod : 3.00 : 2.00 : 100.00 : 3
197 | ```
198 |
199 |
200 |
201 | ### 3. Monthly report for the whole org and post to a GitHub Issue
202 |
203 | This will analyse workflow runs in the selected organisation, including all workflows for each repository. An example of the rendered output is [in this Issue](https://github.com/KittyChiu/workflow-metrics/issues/18).
204 |
205 |
206 |
207 |
208 |
209 | ```yml
210 | name: Monthly SLOs Report
211 |
212 | on:
213 | schedule:
214 | - cron: '0 0 1 * *'
215 |
216 | jobs:
217 | evaluate-actions-consumption:
218 | runs-on: ubuntu-latest
219 | env:
220 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
221 |
222 | steps:
223 | - name: Set dates
224 | run: |
225 | echo "START_DATE=$(date -d '-14 days' +%Y-%m-%d)" >> "$GITHUB_ENV"
226 | echo "END_DATE=$(date +%Y-%m-%d)" >> "$GITHUB_ENV"
227 |
228 | - name: Test docker action
229 | uses: kittychiu/workflow-metrics@v0.4.7
230 | env:
231 | GH_TOKEN: ${{ env.GH_TOKEN }}
232 | OWNER_NAME: ${{ github.repository_owner }}
233 | START_DATE: ${{ env.START_DATE }}
234 | END_DATE: ${{ env.END_DATE }}
235 | DELAY_BETWEEN_QUERY: 5
236 |
237 | - name: Convert org-workflow-stats.csv to stats-table.md markdown table
238 | run: |
239 | echo -e "## Table View\n" > stats-table.md
240 | header=$(head -n 1 org-workflow-stats.csv | sed 's/,/|/g' | sed 's/_/ /g')
241 | echo -e "|${header}|" >> stats-table.md
242 | metadata=$(head -n 1 org-workflow-stats.csv | sed 's/,/|/g' | sed 's/[^|]/-/g')
243 | echo -e "|${metadata}|" >> stats-table.md
244 | tail -n +2 org-workflow-stats.csv | sed 's/,/|/g; s/^/|/; s/$/|/' >> stats-table.md
245 |
246 | - name: Publish result to a new issue
247 | uses: peter-evans/create-issue-from-file@v4
248 | with:
249 | title: Workflow runs summary for `${{ env.OWNER_NAME }}` org (${{ env.START_DATE }} - ${{ env.END_DATE }})
250 | content-filepath: stats-table.md
251 |
252 | - name: Upload all .txt .csv .md files to artifact
253 | uses: actions/upload-artifact@v3
254 | with:
255 | name: workflow-stats
256 | path: |
257 | stats-table.md
258 | org-workflow-stats.csv
259 | org-runs.json
260 | ```
261 |
262 | Below is an example of the `stats-table.md` file:
263 |
264 | ```md
265 | |repository name|workflow name|average duration|median duration|success rate|total runs|
266 | |---------------|-------------|----------------|---------------|------------|----------|
267 | |repo_1|Test|3.00|3.00|100.00|1|
268 | |repo_1|Build|20.20|17.00|80.00|5|
269 | |repo_1|Deploy|17.00|17.00|100.00|1|
270 | |repo_2|Custom Validation|2.00|2.00|100.00|1|
271 | |repo_2|Linter|2.00|2.00|100.00|1|
272 | |repo_3|Superlinter|25.38|23.00|30.00|50|
273 | |repo_3|Long Build|36.17|36.50|53.70|54|
274 | |repo_3|Smoke Test|19.69|14.00|23.08|13|
275 | ```
276 |
277 |
278 |
279 | ## Contributing
280 |
281 | Please see the [contributing guidelines](CONTRIBUTING.md) for more information.
282 |
283 | ## License
284 |
285 | This project is licensed under the [MIT License](LICENSE).
286 |
--------------------------------------------------------------------------------
/action.yml:
--------------------------------------------------------------------------------
1 | name: Workflow Metrics
2 | author: kittychiu
3 | description: Generate metrics for historical workflow runs
4 | runs:
5 | using: 'docker'
6 | image: 'docker://ghcr.io/kittychiu/workflow-metrics:v1'
7 | branding:
8 | icon: 'rss'
9 | color: 'orange'
--------------------------------------------------------------------------------
/evaluate_workflow_runs.py:
--------------------------------------------------------------------------------
1 | """
2 | This script evaluates the stats for each workflow in the `runs.json` file and outputs the results to a CSV file.
3 |
4 | Usage:
5 | python evaluate_workflow_runs.py
6 |
7 | Requirements:
8 | - Python 3.x
9 | - `runs.json` file containing the workflow runs to evaluate
10 |
11 | Optional:
12 | - `workflow-names.txt` file containing the unique workflow names to evaluate
13 |
14 | Description:
15 | This script reads the `runs.json` file and extracts the workflow runs for each workflow specified in the
16 | `workflow-names.txt` file, if it exists. If `workflow-names.txt` is not found, the script evaluates all workflows
17 | in `runs.json`. For each workflow, the script calculates the average duration of the successful runs, the total
18 | number of runs, and the success rate (i.e. the percentage of successful runs).
19 |
20 | The script outputs the results to a CSV file named `workflow-stats.csv`, which contains the stats for each
21 | workflow. The CSV file has the following columns:
22 |
23 | - Workflow name: The name of the workflow.
24 | - Average duration of successful runs (in seconds): The average of the successful runs for the workflow.
25 | - Median duration of successful runs (in seconds): The median of the successful runs for the workflow.
26 | - Total number of runs: The total number of runs for the workflow.
27 | - Success rate (in percentage): The percentage of successful runs for the workflow.
28 |
29 | To run the script, you need to have Python 3.x installed on your system. You also need to have the `runs.json`
30 | file and the `workflow-names.txt` file in the same directory as the script.
31 |
32 | Output:
33 | The script outputs the results to a CSV file named `workflow-stats.csv` in the same directory as the script.
34 |
35 | Example:
36 | python evaluate_workflow_runs.py
37 |
38 | Note:
39 | - The script assumes that the `runs.json` file and the `workflow-names.txt` file are in the same directory as the script.
40 | - The script assumes that the `runs.json` file contains a list of workflow runs in JSON format.
41 | - The script assumes that the `workflow-names.txt` file (if it exists) contains a list of unique workflow names to evaluate, with one name per line.
42 | - The script calculates the success rate as the percentage of successful or skipped runs out of the total number of runs.
43 | - The script ignores failed runs when calculating the average duration of successful runs.
44 | """
45 |
46 | import os
47 | import json
48 | import statistics
49 |
50 | WORKFLOW_NAMES_FILE = 'workflow-names.txt'
51 | RUNS_FILE = 'runs.json'
52 | STATS_FILE = 'workflow-stats.csv'
53 |
54 | # Check if the workflow names file exists
55 | if os.path.isfile(WORKFLOW_NAMES_FILE):
56 | print(f' Info: {WORKFLOW_NAMES_FILE} file is found. Workflow runs will be filtered by the workflow names listed in the file.')
57 | else:
58 | print(f' Warning: {WORKFLOW_NAMES_FILE} file not found')
59 | # Load the workflow names from the RUNS_FILE
60 | with open(RUNS_FILE, 'r') as f:
61 | runs = json.load(f)
62 | workflow_names = list(set(run['name'] for run in runs))
63 | # Write the workflow names to the workflow names file
64 | with open(WORKFLOW_NAMES_FILE, 'w') as f:
65 | f.write('\n'.join(workflow_names))
66 |
67 | # Load the workflow names from the workflow names file
68 | with open(WORKFLOW_NAMES_FILE, 'r') as f:
69 | workflow_names = f.read().splitlines()
70 |
71 | # Output the results to a CSV file
72 | with open(STATS_FILE, 'w') as f:
73 | f.write('workflow_name,average_duration,median_duration,success_rate,total_runs\n')
74 |
75 | # Evaluate the stats for each workflow
76 | for workflow_name in workflow_names:
77 | print(f' Evaluating: {workflow_name}')
78 |
79 | # Filter the runs by workflow name
80 | try:
81 | with open(RUNS_FILE, 'r') as f:
82 | runs = json.load(f)
83 | runs_filtered = [run for run in runs if run['name'] == workflow_name]
84 | except FileNotFoundError:
85 | print(f'Error: {RUNS_FILE} file not found')
86 | continue
87 |
88 | # Evaluate the total number of runs
89 | total_runs = len(runs_filtered)
90 | duration_data = [run['duration'] for run in runs_filtered]
91 |
92 | if total_runs > 0:
93 | # Evaluate the average duration
94 | average_duration = f'{statistics.mean(duration_data):.2f}'
95 | # Evaluate the median duration
96 | median_duration = f'{statistics.median(duration_data):.2f}'
97 | # Evaluate the percentage of successful or skipped runs
98 | success_rate = f'{statistics.mean([1 if run["conclusion"] in ["success", "skipped"] else 0 for run in runs_filtered]) * 100:.2f}'
99 | else:
100 | average_duration = '0.00'
101 | median_duration = '0.00'
102 | success_rate = '0.00'
103 |
104 | # Output the results to a CSV file
105 | with open(STATS_FILE, 'a') as f:
106 | f.write(f'{workflow_name},{average_duration},{median_duration},{success_rate},{total_runs}\n')
107 |
108 | print(f' Evaluation completed: Results are written to workflow-stats.csv')
109 | os.remove(WORKFLOW_NAMES_FILE)
110 |
--------------------------------------------------------------------------------
/get_workflow_runs.py:
--------------------------------------------------------------------------------
1 | """
2 | Retrieves all workflow runs for a repository within the specified date range.
3 |
4 | Usage:
5 | python get_workflow_runs.py
6 |
7 | Arguments:
8 | repo_owner (str): The owner of the repository.
9 | repo_name (str): The name of the repository.
10 | start_date (str): The start date of the date range in ISO 8601 format.
11 | end_date (str): The end date of the date range in ISO 8601 format.
12 |
13 | Returns:
14 | A list of workflow runs with the following fields:
15 | - conclusion
16 | - created_at
17 | - display_title
18 | - event
19 | - head_branch
20 | - name
21 | - run_number
22 | - run_started_at
23 | - status
24 | - updated_at
25 | - url
26 | - duration
27 |
28 | Requirements:
29 | - Python 3.x
30 | - `jq` command-line tool
31 |
32 | Description:
33 | This script retrieves all workflow runs for a repository within the specified date range. The script takes four
34 | command-line arguments: the owner of the repository, the name of the repository, the start date of the date range,
35 | and the end date of the date range. The start and end dates should be in ISO 8601 format.
36 |
37 | The script uses the GitHub API to retrieve the workflow runs for the specified repository and date range. The
38 | script requires authentication with `repo` scope with the API.
39 |
40 | The script outputs a list of workflow runs in JSON format, with the following fields for each run:
41 |
42 | - conclusion
43 | - created_at
44 | - display_title
45 | - event
46 | - head_branch
47 | - name
48 | - run_number
49 | - run_started_at
50 | - status
51 | - updated_at
52 | - url
53 | - duration
54 |
55 | To run the script, you need to have Python 3.x and the `jq` command-line tool installed on your system. You also
56 | need to have a GitHub API token with the `repo` scope.
57 |
58 | Output:
59 | - A list of workflow runs in JSON format
60 |
61 | Example:
62 | python get_workflow_runs.py octocat hello-world 2022-01-01 2022-01-31
63 | """
64 |
65 | import subprocess
66 | import json
67 | import sys
68 |
69 | from datetime import datetime
70 |
71 | RUNS_FILE = 'runs.json'
72 |
73 | # Parse the command-line arguments
74 | if len(sys.argv) != 5:
75 | print('Usage: python get_workflow_runs.py ')
76 | sys.exit(1)
77 |
78 | repo_owner = sys.argv[1]
79 | repo_name = sys.argv[2]
80 | start_date = sys.argv[3]
81 | end_date = sys.argv[4]
82 |
83 | # Validate the start_date and end_date arguments
84 | try:
85 | start_date = datetime.fromisoformat(start_date)
86 | end_date = datetime.fromisoformat(end_date)
87 | except ValueError:
88 | print('Error: Invalid date format. Please use ISO format (YYYY-MM-DD).')
89 | sys.exit(1)
90 |
91 | # Parse jq query for gh api command
92 | jq_query = (
93 | f'[.workflow_runs[] '
94 | f'| select(.run_started_at >= "{start_date}" and .run_started_at <= "{end_date}") '
95 | f'| {{conclusion,created_at,display_title,event,head_branch,name,run_number,run_started_at,run_attempt,status,updated_at,url}}] '
96 | f'| select(length > 0)'
97 | )
98 |
99 | # Construct the gh api command
100 | cmd = f'gh api repos/{repo_owner}/{repo_name}/actions/runs --paginate --jq \'{jq_query}\''
101 |
102 | # Send the command and retrieve the output
103 | output = subprocess.check_output(cmd, shell=True, text=True)
104 |
105 | # Parse the output as JSON and return the workflow runs
106 | workflow_runs = []
107 | for line in output.strip().split('\n'):
108 | try:
109 | data = json.loads(line)
110 | if isinstance(data, list):
111 | workflow_runs.extend(data)
112 | else:
113 | workflow_runs.append(data)
114 | except json.JSONDecodeError:
115 | pass
116 |
117 | # Add the duration field to each workflow run, calculated as the difference between the updated_at and run_started_at fields
118 | for item in workflow_runs:
119 | updated_at = datetime.fromisoformat(item['updated_at'].replace('Z', '+00:00'))
120 | run_started_at = datetime.fromisoformat(item['run_started_at'].replace('Z', '+00:00'))
121 | duration = (updated_at - run_started_at).total_seconds()
122 | item['duration'] = duration
123 |
124 | # Print the workflow runs as raw.json file
125 | with open(RUNS_FILE, 'w') as f:
126 | json.dump(workflow_runs, f)
127 |
128 | # Print the number of workflow runs
129 | print(f'[{repo_owner}/{repo_name}]: No. of workflow runs: {len(workflow_runs)}')
130 |
--------------------------------------------------------------------------------
/test_evaluate_workflow_runs.py:
--------------------------------------------------------------------------------
1 | """
2 | This file contains unit tests for the `evaluate_workflow_runs.py` script.
3 |
4 | Usage:
5 | python -m unittest test_evaluate_workflow_runs.TestEvaluateWorkflowRuns.test_evaluate_workflow_runs
6 |
7 | Requirements:
8 | - Python 3.x
9 | - `jq` command-line tool
10 | - `evaluate_workflow_runs.py` script to test
11 |
12 | Description:
13 | This script contains unit tests for the `evaluate_workflow_runs.py` script. The tests verify that the script
14 | correctly calculates the average duration of the successful runs.
15 |
16 | To run the tests, you need to have Python 3.x and the `jq` command-line tool installed on your system. You also
17 | need to be authenticated with the GitHub API with `repo` scope.
18 |
19 | The tests use the `unittest` module in Python to define test cases and assertions. Each test case corresponds to
20 | a specific function in the `evaluate_workflow_runs.py` script, and tests the function's behavior under different
21 | conditions.
22 |
23 | To run the tests, you can use the following command:
24 |
25 | python -m unittest test_evaluate_workflow_runs.py
26 |
27 | This command runs the `test_evaluate_workflow_runs()` function in the `TestEvaluateWorkflowRuns` class, which
28 | executes all the test cases defined in the class.
29 |
30 | Output:
31 | - Test results for the `evaluate_workflow_runs.py` script
32 |
33 | Example:
34 | python -m unittest test_evaluate_workflow_runs.TestEvaluateWorkflowRuns.test_evaluate_workflow_runs
35 | """
36 |
37 | import unittest
38 | import json
39 | import subprocess
40 | import os
41 |
42 | class TestEvaluateWorkflowRuns(unittest.TestCase):
43 | def test_evaluate_workflow_runs(self):
44 | # Create a test workflow-names.txt file
45 | with open('workflow-names.txt', 'w') as f:
46 | f.write('workflow_1\nworkflow_2\n')
47 |
48 | # Run the evaluate-workflow-runs.py script
49 | subprocess.run(['python', 'evaluate_workflow_runs.py'])
50 |
51 | # Check the contents of the workflow-stats.csv file
52 | with open('workflow-stats.csv', 'r') as f:
53 | actual_csv_contents = f.read()
54 | print(actual_csv_contents)
55 |
56 | expected_csv_contents = 'workflow_name,average_duration,median_duration,success_rate,total_runs\nworkflow_1,12.33,12.00,100.00,3\nworkflow_2,15.50,15.50,50.00,2\n'
57 | self.assertEqual(actual_csv_contents, expected_csv_contents)
58 |
59 |
60 | def test_evaluate_workflow_runs_no_workflow_names_file(self):
61 | # Run the evaluate-workflow-runs.py script
62 | subprocess.run(['python', 'evaluate_workflow_runs.py'])
63 |
64 | # Check the contents of the workflow-stats.csv file
65 | with open('workflow-stats.csv', 'r') as f:
66 | actual_csv_contents = f.read()
67 | print(actual_csv_contents)
68 |
69 | self.assertIn('workflow_1,12.33,12.00,100.00,3\n', actual_csv_contents)
70 | self.assertIn('workflow_2,15.50,15.50,50.00,2\n', actual_csv_contents)
71 | self.assertIn('workflow_3,25.12,22.00,20.93,43\n', actual_csv_contents)
72 |
73 |
74 | def tearDown(self):
75 | # Remove the test files
76 | os.remove('runs.json')
77 | os.remove('workflow-stats.csv')
78 |
79 |
80 | def setUp(self):
81 | # Create a test runs.json file
82 | runs = [
83 | {
84 | "conclusion": "success",
85 | "created_at": "2023-08-05T01:50:57Z",
86 | "display_title": "workflow_1",
87 | "event": "schedule",
88 | "head_branch": "main",
89 | "name": "workflow_1",
90 | "run_attempt": 1,
91 | "run_number": 109,
92 | "run_started_at": "2023-08-05T01:50:57Z",
93 | "status": "completed",
94 | "updated_at": "2023-08-05T01:51:09Z",
95 | "url": "https://repo-url/actions/runs/5768112009",
96 | "duration": 12
97 | },
98 | {
99 | "conclusion": "success",
100 | "created_at": "2023-08-04T01:52:27Z",
101 | "display_title": "workflow_1",
102 | "event": "schedule",
103 | "head_branch": "main",
104 | "name": "workflow_1",
105 | "run_attempt": 1,
106 | "run_number": 108,
107 | "run_started_at": "2023-08-04T01:52:27Z",
108 | "status": "completed",
109 | "updated_at": "2023-08-04T01:52:39Z",
110 | "url": "https://repo-url/actions/runs/5757521092",
111 | "duration": 12
112 | },
113 | {
114 | "conclusion": "success",
115 | "created_at": "2023-08-03T01:51:55Z",
116 | "display_title": "workflow_1",
117 | "event": "schedule",
118 | "head_branch": "main",
119 | "name": "workflow_1",
120 | "run_attempt": 1,
121 | "run_number": 107,
122 | "run_started_at": "2023-08-03T01:51:55Z",
123 | "status": "completed",
124 | "updated_at": "2023-08-03T01:52:08Z",
125 | "url": "https://repo-url/actions/runs/5745695002",
126 | "duration": 13
127 | },
128 | {
129 | "conclusion": "success",
130 | "created_at": "2023-08-02T22:40:38Z",
131 | "display_title": "workflow_3",
132 | "event": "workflow_dispatch",
133 | "head_branch": "main",
134 | "name": "workflow_3",
135 | "run_attempt": 1,
136 | "run_number": 43,
137 | "run_started_at": "2023-08-02T22:40:38Z",
138 | "status": "completed",
139 | "updated_at": "2023-08-02T22:41:05Z",
140 | "url": "https://repo-url/actions/runs/5744498681",
141 | "duration": 27
142 | },
143 | {
144 | "conclusion": "success",
145 | "created_at": "2023-08-02T17:25:51Z",
146 | "display_title": "workflow_3",
147 | "event": "workflow_dispatch",
148 | "head_branch": "main",
149 | "name": "workflow_3",
150 | "run_attempt": 1,
151 | "run_number": 42,
152 | "run_started_at": "2023-08-02T17:25:51Z",
153 | "status": "completed",
154 | "updated_at": "2023-08-02T17:26:20Z",
155 | "url": "https://repo-url/actions/runs/5741918816",
156 | "duration": 29
157 | },
158 | {
159 | "conclusion": "failure",
160 | "created_at": "2023-08-02T17:23:56Z",
161 | "display_title": "workflow_3",
162 | "event": "workflow_dispatch",
163 | "head_branch": "main",
164 | "name": "workflow_3",
165 | "run_attempt": 1,
166 | "run_number": 41,
167 | "run_started_at": "2023-08-02T17:23:56Z",
168 | "status": "completed",
169 | "updated_at": "2023-08-02T17:24:15Z",
170 | "url": "https://repo-url/actions/runs/5741899384",
171 | "duration": 19
172 | },
173 | {
174 | "conclusion": "success",
175 | "created_at": "2023-08-02T17:12:03Z",
176 | "display_title": "workflow_3",
177 | "event": "workflow_dispatch",
178 | "head_branch": "main",
179 | "name": "workflow_3",
180 | "run_attempt": 1,
181 | "run_number": 40,
182 | "run_started_at": "2023-08-02T17:12:03Z",
183 | "status": "completed",
184 | "updated_at": "2023-08-02T17:12:33Z",
185 | "url": "https://repo-url/actions/runs/5741780688",
186 | "duration": 30
187 | },
188 | {
189 | "conclusion": "failure",
190 | "created_at": "2023-08-02T17:09:03Z",
191 | "display_title": "workflow_3",
192 | "event": "workflow_dispatch",
193 | "head_branch": "main",
194 | "name": "workflow_3",
195 | "run_attempt": 1,
196 | "run_number": 39,
197 | "run_started_at": "2023-08-02T17:09:03Z",
198 | "status": "completed",
199 | "updated_at": "2023-08-02T17:09:21Z",
200 | "url": "https://repo-url/actions/runs/5741751563",
201 | "duration": 18
202 | },
203 | {
204 | "conclusion": "success",
205 | "created_at": "2023-08-02T16:59:53Z",
206 | "display_title": "workflow_3",
207 | "event": "workflow_dispatch",
208 | "head_branch": "main",
209 | "name": "workflow_3",
210 | "run_attempt": 1,
211 | "run_number": 38,
212 | "run_started_at": "2023-08-02T16:59:53Z",
213 | "status": "completed",
214 | "updated_at": "2023-08-02T17:00:17Z",
215 | "url": "https://repo-url/actions/runs/5741655896",
216 | "duration": 24
217 | },
218 | {
219 | "conclusion": "success",
220 | "created_at": "2023-08-02T16:52:58Z",
221 | "display_title": "workflow_3",
222 | "event": "workflow_dispatch",
223 | "head_branch": "main",
224 | "name": "workflow_3",
225 | "run_attempt": 1,
226 | "run_number": 37,
227 | "run_started_at": "2023-08-02T16:52:58Z",
228 | "status": "completed",
229 | "updated_at": "2023-08-02T16:53:27Z",
230 | "url": "https://repo-url/actions/runs/5741599662",
231 | "duration": 29
232 | },
233 | {
234 | "conclusion": "success",
235 | "created_at": "2023-08-02T16:40:46Z",
236 | "display_title": "workflow_3",
237 | "event": "workflow_dispatch",
238 | "head_branch": "main",
239 | "name": "workflow_3",
240 | "run_attempt": 1,
241 | "run_number": 36,
242 | "run_started_at": "2023-08-02T16:40:46Z",
243 | "status": "completed",
244 | "updated_at": "2023-08-02T16:41:14Z",
245 | "url": "https://repo-url/actions/runs/5741498070",
246 | "duration": 28
247 | },
248 | {
249 | "conclusion": "success",
250 | "created_at": "2023-08-02T16:38:05Z",
251 | "display_title": "workflow_3",
252 | "event": "workflow_dispatch",
253 | "head_branch": "main",
254 | "name": "workflow_3",
255 | "run_attempt": 1,
256 | "run_number": 35,
257 | "run_started_at": "2023-08-02T16:38:05Z",
258 | "status": "completed",
259 | "updated_at": "2023-08-02T16:38:23Z",
260 | "url": "https://repo-url/actions/runs/5741468129",
261 | "duration": 18
262 | },
263 | {
264 | "conclusion": "success",
265 | "created_at": "2023-08-02T16:18:55Z",
266 | "display_title": "workflow_3",
267 | "event": "workflow_dispatch",
268 | "head_branch": "main",
269 | "name": "workflow_3",
270 | "run_attempt": 1,
271 | "run_number": 34,
272 | "run_started_at": "2023-08-02T16:18:55Z",
273 | "status": "completed",
274 | "updated_at": "2023-08-02T16:19:15Z",
275 | "url": "https://repo-url/actions/runs/5741287077",
276 | "duration": 20
277 | },
278 | {
279 | "conclusion": "failure",
280 | "created_at": "2023-08-02T16:17:36Z",
281 | "display_title": "workflow_3",
282 | "event": "workflow_dispatch",
283 | "head_branch": "main",
284 | "name": "workflow_3",
285 | "run_attempt": 1,
286 | "run_number": 33,
287 | "run_started_at": "2023-08-02T16:17:36Z",
288 | "status": "completed",
289 | "updated_at": "2023-08-02T16:17:53Z",
290 | "url": "https://repo-url/actions/runs/5741274712",
291 | "duration": 17
292 | },
293 | {
294 | "conclusion": "failure",
295 | "created_at": "2023-08-02T16:15:30Z",
296 | "display_title": "workflow_3",
297 | "event": "workflow_dispatch",
298 | "head_branch": "main",
299 | "name": "workflow_3",
300 | "run_attempt": 1,
301 | "run_number": 32,
302 | "run_started_at": "2023-08-02T16:15:30Z",
303 | "status": "completed",
304 | "updated_at": "2023-08-02T16:15:58Z",
305 | "url": "https://repo-url/actions/runs/5741254037",
306 | "duration": 28
307 | },
308 | {
309 | "conclusion": "failure",
310 | "created_at": "2023-08-02T16:12:24Z",
311 | "display_title": "workflow_3",
312 | "event": "workflow_dispatch",
313 | "head_branch": "main",
314 | "name": "workflow_3",
315 | "run_attempt": 1,
316 | "run_number": 31,
317 | "run_started_at": "2023-08-02T16:12:24Z",
318 | "status": "completed",
319 | "updated_at": "2023-08-02T16:15:00Z",
320 | "url": "https://repo-url/actions/runs/5741224684",
321 | "duration": 156
322 | },
323 | {
324 | "conclusion": "failure",
325 | "created_at": "2023-08-02T16:10:25Z",
326 | "display_title": "workflow_3",
327 | "event": "workflow_dispatch",
328 | "head_branch": "main",
329 | "name": "workflow_3",
330 | "run_attempt": 1,
331 | "run_number": 30,
332 | "run_started_at": "2023-08-02T16:10:25Z",
333 | "status": "completed",
334 | "updated_at": "2023-08-02T16:10:50Z",
335 | "url": "https://repo-url/actions/runs/5741206030",
336 | "duration": 25
337 | },
338 | {
339 | "conclusion": "failure",
340 | "created_at": "2023-08-02T16:08:44Z",
341 | "display_title": "workflow_3",
342 | "event": "workflow_dispatch",
343 | "head_branch": "main",
344 | "name": "workflow_3",
345 | "run_attempt": 1,
346 | "run_number": 29,
347 | "run_started_at": "2023-08-02T16:08:44Z",
348 | "status": "completed",
349 | "updated_at": "2023-08-02T16:09:10Z",
350 | "url": "https://repo-url/actions/runs/5741189545",
351 | "duration": 26
352 | },
353 | {
354 | "conclusion": "failure",
355 | "created_at": "2023-08-02T16:03:20Z",
356 | "display_title": "workflow_3",
357 | "event": "workflow_dispatch",
358 | "head_branch": "main",
359 | "name": "workflow_3",
360 | "run_attempt": 1,
361 | "run_number": 28,
362 | "run_started_at": "2023-08-02T16:03:20Z",
363 | "status": "completed",
364 | "updated_at": "2023-08-02T16:03:58Z",
365 | "url": "https://repo-url/actions/runs/5741131614",
366 | "duration": 38
367 | },
368 | {
369 | "conclusion": "failure",
370 | "created_at": "2023-08-02T16:00:53Z",
371 | "display_title": "workflow_3",
372 | "event": "workflow_dispatch",
373 | "head_branch": "main",
374 | "name": "workflow_3",
375 | "run_attempt": 1,
376 | "run_number": 27,
377 | "run_started_at": "2023-08-02T16:00:53Z",
378 | "status": "completed",
379 | "updated_at": "2023-08-02T16:01:19Z",
380 | "url": "https://repo-url/actions/runs/5741103822",
381 | "duration": 26
382 | },
383 | {
384 | "conclusion": "failure",
385 | "created_at": "2023-08-02T15:55:36Z",
386 | "display_title": "workflow_3",
387 | "event": "workflow_dispatch",
388 | "head_branch": "main",
389 | "name": "workflow_3",
390 | "run_attempt": 1,
391 | "run_number": 26,
392 | "run_started_at": "2023-08-02T15:55:36Z",
393 | "status": "completed",
394 | "updated_at": "2023-08-02T15:56:01Z",
395 | "url": "https://repo-url/actions/runs/5741051958",
396 | "duration": 25
397 | },
398 | {
399 | "conclusion": "failure",
400 | "created_at": "2023-08-02T15:48:58Z",
401 | "display_title": "workflow_3",
402 | "event": "workflow_dispatch",
403 | "head_branch": "main",
404 | "name": "workflow_3",
405 | "run_attempt": 1,
406 | "run_number": 25,
407 | "run_started_at": "2023-08-02T15:48:58Z",
408 | "status": "completed",
409 | "updated_at": "2023-08-02T15:49:20Z",
410 | "url": "https://repo-url/actions/runs/5740992508",
411 | "duration": 22
412 | },
413 | {
414 | "conclusion": "failure",
415 | "created_at": "2023-08-02T15:44:42Z",
416 | "display_title": "workflow_3",
417 | "event": "workflow_dispatch",
418 | "head_branch": "main",
419 | "name": "workflow_3",
420 | "run_attempt": 1,
421 | "run_number": 24,
422 | "run_started_at": "2023-08-02T15:44:42Z",
423 | "status": "completed",
424 | "updated_at": "2023-08-02T15:45:08Z",
425 | "url": "https://repo-url/actions/runs/5740952125",
426 | "duration": 26
427 | },
428 | {
429 | "conclusion": "failure",
430 | "created_at": "2023-08-02T15:33:09Z",
431 | "display_title": "workflow_3",
432 | "event": "workflow_dispatch",
433 | "head_branch": "main",
434 | "name": "workflow_3",
435 | "run_attempt": 1,
436 | "run_number": 23,
437 | "run_started_at": "2023-08-02T15:33:09Z",
438 | "status": "completed",
439 | "updated_at": "2023-08-02T15:33:32Z",
440 | "url": "https://repo-url/actions/runs/5740835844",
441 | "duration": 23
442 | },
443 | {
444 | "conclusion": "failure",
445 | "created_at": "2023-08-02T15:30:03Z",
446 | "display_title": "workflow_3",
447 | "event": "workflow_dispatch",
448 | "head_branch": "main",
449 | "name": "workflow_3",
450 | "run_attempt": 1,
451 | "run_number": 22,
452 | "run_started_at": "2023-08-02T15:30:03Z",
453 | "status": "completed",
454 | "updated_at": "2023-08-02T15:30:21Z",
455 | "url": "https://repo-url/actions/runs/5740799895",
456 | "duration": 18
457 | },
458 | {
459 | "conclusion": "failure",
460 | "created_at": "2023-08-02T15:26:29Z",
461 | "display_title": "workflow_3",
462 | "event": "workflow_dispatch",
463 | "head_branch": "main",
464 | "name": "workflow_3",
465 | "run_attempt": 1,
466 | "run_number": 21,
467 | "run_started_at": "2023-08-02T15:26:29Z",
468 | "status": "completed",
469 | "updated_at": "2023-08-02T15:26:45Z",
470 | "url": "https://repo-url/actions/runs/5740766666",
471 | "duration": 16
472 | },
473 | {
474 | "conclusion": "failure",
475 | "created_at": "2023-08-02T15:23:25Z",
476 | "display_title": "workflow_3",
477 | "event": "workflow_dispatch",
478 | "head_branch": "main",
479 | "name": "workflow_3",
480 | "run_attempt": 1,
481 | "run_number": 20,
482 | "run_started_at": "2023-08-02T15:23:25Z",
483 | "status": "completed",
484 | "updated_at": "2023-08-02T15:24:00Z",
485 | "url": "https://repo-url/actions/runs/5740733967",
486 | "duration": 35
487 | },
488 | {
489 | "conclusion": "failure",
490 | "created_at": "2023-08-02T15:16:36Z",
491 | "display_title": "workflow_3",
492 | "event": "workflow_dispatch",
493 | "head_branch": "main",
494 | "name": "workflow_3",
495 | "run_attempt": 1,
496 | "run_number": 19,
497 | "run_started_at": "2023-08-02T15:16:36Z",
498 | "status": "completed",
499 | "updated_at": "2023-08-02T15:17:02Z",
500 | "url": "https://repo-url/actions/runs/5740658965",
501 | "duration": 26
502 | },
503 | {
504 | "conclusion": "failure",
505 | "created_at": "2023-08-02T15:11:40Z",
506 | "display_title": "workflow_3",
507 | "event": "workflow_dispatch",
508 | "head_branch": "main",
509 | "name": "workflow_3",
510 | "run_attempt": 1,
511 | "run_number": 18,
512 | "run_started_at": "2023-08-02T15:11:40Z",
513 | "status": "completed",
514 | "updated_at": "2023-08-02T15:12:06Z",
515 | "url": "https://repo-url/actions/runs/5740603562",
516 | "duration": 26
517 | },
518 | {
519 | "conclusion": "failure",
520 | "created_at": "2023-08-02T15:04:40Z",
521 | "display_title": "workflow_3",
522 | "event": "workflow_dispatch",
523 | "head_branch": "main",
524 | "name": "workflow_3",
525 | "run_attempt": 1,
526 | "run_number": 17,
527 | "run_started_at": "2023-08-02T15:04:40Z",
528 | "status": "completed",
529 | "updated_at": "2023-08-02T15:04:56Z",
530 | "url": "https://repo-url/actions/runs/5740519101",
531 | "duration": 16
532 | },
533 | {
534 | "conclusion": "success",
535 | "created_at": "2023-08-02T14:49:55Z",
536 | "display_title": "workflow_3",
537 | "event": "workflow_dispatch",
538 | "head_branch": "main",
539 | "name": "workflow_3",
540 | "run_attempt": 1,
541 | "run_number": 16,
542 | "run_started_at": "2023-08-02T14:49:55Z",
543 | "status": "completed",
544 | "updated_at": "2023-08-02T14:50:22Z",
545 | "url": "https://repo-url/actions/runs/5740361667",
546 | "duration": 27
547 | },
548 | {
549 | "conclusion": "failure",
550 | "created_at": "2023-08-02T14:48:53Z",
551 | "display_title": "workflow_3",
552 | "event": "workflow_dispatch",
553 | "head_branch": "main",
554 | "name": "workflow_3",
555 | "run_attempt": 1,
556 | "run_number": 15,
557 | "run_started_at": "2023-08-02T14:48:53Z",
558 | "status": "completed",
559 | "updated_at": "2023-08-02T14:49:15Z",
560 | "url": "https://repo-url/actions/runs/5740350689",
561 | "duration": 22
562 | },
563 | {
564 | "conclusion": "success",
565 | "created_at": "2023-08-02T14:10:32Z",
566 | "display_title": "workflow_2",
567 | "event": "workflow_dispatch",
568 | "head_branch": "main",
569 | "name": "workflow_2",
570 | "run_attempt": 1,
571 | "run_number": 48,
572 | "run_started_at": "2023-08-02T14:10:32Z",
573 | "status": "completed",
574 | "updated_at": "2023-08-02T14:10:49Z",
575 | "url": "https://repo-url/actions/runs/5739906808",
576 | "duration": 17
577 | },
578 | {
579 | "conclusion": "failure",
580 | "created_at": "2023-08-02T14:09:34Z",
581 | "display_title": "workflow_2",
582 | "event": "workflow_dispatch",
583 | "head_branch": "main",
584 | "name": "workflow_2",
585 | "run_attempt": 1,
586 | "run_number": 47,
587 | "run_started_at": "2023-08-02T14:09:34Z",
588 | "status": "completed",
589 | "updated_at": "2023-08-02T14:09:48Z",
590 | "url": "https://repo-url/actions/runs/5739895314",
591 | "duration": 14
592 | },
593 | {
594 | "conclusion": "failure",
595 | "created_at": "2023-08-02T12:33:24Z",
596 | "display_title": "workflow_3",
597 | "event": "workflow_dispatch",
598 | "head_branch": "main",
599 | "name": "workflow_3",
600 | "run_attempt": 1,
601 | "run_number": 14,
602 | "run_started_at": "2023-08-02T12:33:24Z",
603 | "status": "completed",
604 | "updated_at": "2023-08-02T12:33:39Z",
605 | "url": "https://repo-url/actions/runs/5738850142",
606 | "duration": 15
607 | },
608 | {
609 | "conclusion": "failure",
610 | "created_at": "2023-08-02T12:27:40Z",
611 | "display_title": "workflow_3",
612 | "event": "workflow_dispatch",
613 | "head_branch": "main",
614 | "name": "workflow_3",
615 | "run_attempt": 1,
616 | "run_number": 13,
617 | "run_started_at": "2023-08-02T12:27:40Z",
618 | "status": "completed",
619 | "updated_at": "2023-08-02T12:27:57Z",
620 | "url": "https://repo-url/actions/runs/5738789750",
621 | "duration": 17
622 | },
623 | {
624 | "conclusion": "failure",
625 | "created_at": "2023-08-02T12:26:26Z",
626 | "display_title": "workflow_3",
627 | "event": "workflow_dispatch",
628 | "head_branch": "main",
629 | "name": "workflow_3",
630 | "run_attempt": 1,
631 | "run_number": 12,
632 | "run_started_at": "2023-08-02T12:26:26Z",
633 | "status": "completed",
634 | "updated_at": "2023-08-02T12:26:42Z",
635 | "url": "https://repo-url/actions/runs/5738777757",
636 | "duration": 16
637 | },
638 | {
639 | "conclusion": "failure",
640 | "created_at": "2023-08-02T12:25:56Z",
641 | "display_title": "workflow_3",
642 | "event": "workflow_dispatch",
643 | "head_branch": "main",
644 | "name": "workflow_3",
645 | "run_attempt": 1,
646 | "run_number": 11,
647 | "run_started_at": "2023-08-02T12:25:56Z",
648 | "status": "completed",
649 | "updated_at": "2023-08-02T12:26:11Z",
650 | "url": "https://repo-url/actions/runs/5738773319",
651 | "duration": 15
652 | },
653 | {
654 | "conclusion": "failure",
655 | "created_at": "2023-08-02T12:23:15Z",
656 | "display_title": "workflow_3",
657 | "event": "workflow_dispatch",
658 | "head_branch": "main",
659 | "name": "workflow_3",
660 | "run_attempt": 1,
661 | "run_number": 10,
662 | "run_started_at": "2023-08-02T12:23:15Z",
663 | "status": "completed",
664 | "updated_at": "2023-08-02T12:23:32Z",
665 | "url": "https://repo-url/actions/runs/5738746190",
666 | "duration": 17
667 | },
668 | {
669 | "conclusion": "failure",
670 | "created_at": "2023-08-02T12:18:37Z",
671 | "display_title": "workflow_3",
672 | "event": "workflow_dispatch",
673 | "head_branch": "main",
674 | "name": "workflow_3",
675 | "run_attempt": 2,
676 | "run_number": 9,
677 | "run_started_at": "2023-08-02T12:19:39Z",
678 | "status": "completed",
679 | "updated_at": "2023-08-02T12:19:59Z",
680 | "url": "https://repo-url/actions/runs/5738698092",
681 | "duration": 20
682 | },
683 | {
684 | "conclusion": "failure",
685 | "created_at": "2023-08-02T12:16:58Z",
686 | "display_title": "workflow_3",
687 | "event": "workflow_dispatch",
688 | "head_branch": "main",
689 | "name": "workflow_3",
690 | "run_attempt": 2,
691 | "run_number": 8,
692 | "run_started_at": "2023-08-02T12:17:58Z",
693 | "status": "completed",
694 | "updated_at": "2023-08-02T12:18:14Z",
695 | "url": "https://repo-url/actions/runs/5738681879",
696 | "duration": 16
697 | },
698 | {
699 | "conclusion": "failure",
700 | "created_at": "2023-08-02T12:15:27Z",
701 | "display_title": "workflow_3",
702 | "event": "workflow_dispatch",
703 | "head_branch": "main",
704 | "name": "workflow_3",
705 | "run_attempt": 1,
706 | "run_number": 7,
707 | "run_started_at": "2023-08-02T12:15:27Z",
708 | "status": "completed",
709 | "updated_at": "2023-08-02T12:15:45Z",
710 | "url": "https://repo-url/actions/runs/5738667301",
711 | "duration": 18
712 | },
713 | {
714 | "conclusion": "failure",
715 | "created_at": "2023-08-02T12:11:33Z",
716 | "display_title": "workflow_3",
717 | "event": "workflow_dispatch",
718 | "head_branch": "main",
719 | "name": "workflow_3",
720 | "run_attempt": 1,
721 | "run_number": 6,
722 | "run_started_at": "2023-08-02T12:11:33Z",
723 | "status": "completed",
724 | "updated_at": "2023-08-02T12:11:55Z",
725 | "url": "https://repo-url/actions/runs/5738629026",
726 | "duration": 22
727 | },
728 | {
729 | "conclusion": "failure",
730 | "created_at": "2023-08-02T11:42:10Z",
731 | "display_title": "workflow_3",
732 | "event": "workflow_dispatch",
733 | "head_branch": "main",
734 | "name": "workflow_3",
735 | "run_attempt": 1,
736 | "run_number": 5,
737 | "run_started_at": "2023-08-02T11:42:10Z",
738 | "status": "completed",
739 | "updated_at": "2023-08-02T11:42:27Z",
740 | "url": "https://repo-url/actions/runs/5738360713",
741 | "duration": 17
742 | },
743 | {
744 | "conclusion": "failure",
745 | "created_at": "2023-08-02T11:41:10Z",
746 | "display_title": "workflow_3",
747 | "event": "workflow_dispatch",
748 | "head_branch": "main",
749 | "name": "workflow_3",
750 | "run_attempt": 1,
751 | "run_number": 4,
752 | "run_started_at": "2023-08-02T11:41:10Z",
753 | "status": "completed",
754 | "updated_at": "2023-08-02T11:41:24Z",
755 | "url": "https://repo-url/actions/runs/5738352682",
756 | "duration": 14
757 | },
758 | {
759 | "conclusion": "failure",
760 | "created_at": "2023-08-02T11:39:14Z",
761 | "display_title": "workflow_3",
762 | "event": "workflow_dispatch",
763 | "head_branch": "main",
764 | "name": "workflow_3",
765 | "run_attempt": 1,
766 | "run_number": 3,
767 | "run_started_at": "2023-08-02T11:39:14Z",
768 | "status": "completed",
769 | "updated_at": "2023-08-02T11:39:32Z",
770 | "url": "https://repo-url/actions/runs/5738337268",
771 | "duration": 18
772 | },
773 | {
774 | "conclusion": "failure",
775 | "created_at": "2023-08-02T11:33:58Z",
776 | "display_title": "workflow_3",
777 | "event": "workflow_dispatch",
778 | "head_branch": "main",
779 | "name": "workflow_3",
780 | "run_attempt": 1,
781 | "run_number": 2,
782 | "run_started_at": "2023-08-02T11:33:58Z",
783 | "status": "completed",
784 | "updated_at": "2023-08-02T11:34:15Z",
785 | "url": "https://repo-url/actions/runs/5738292428",
786 | "duration": 17
787 | },
788 | {
789 | "conclusion": "failure",
790 | "created_at": "2023-08-02T11:31:14Z",
791 | "display_title": "workflow_3",
792 | "event": "workflow_dispatch",
793 | "head_branch": "main",
794 | "name": "workflow_3",
795 | "run_attempt": 1,
796 | "run_number": 1,
797 | "run_started_at": "2023-08-02T11:31:14Z",
798 | "status": "completed",
799 | "updated_at": "2023-08-02T11:31:32Z",
800 | "url": "https://repo-url/actions/runs/5738265376",
801 | "duration": 18
802 | }
803 | ]
804 | with open('runs.json', 'w') as f:
805 | json.dump(runs, f)
806 |
807 | if __name__ == '__main__':
808 | unittest.main()
--------------------------------------------------------------------------------
/test_get_workflow_runs.py:
--------------------------------------------------------------------------------
1 | """
2 | This file contains unit tests for the `get_workflow_runs.py` script.
3 |
4 | Usage:
5 | python -m unittest test_get_workflow_runs.TestGetWorkflowRuns.test_get_workflow_runs
6 |
7 | Requirements:
8 | - Python 3.x
9 | - `jq` command-line tool
10 | - `get_workflow_runs.py` script to test
11 |
12 | Description:
13 | This script contains unit tests for the `get_workflow_runs.py` script. The tests verify that the script correctly
14 | retrieves workflow runs from the GitHub API within the specified date range, and outputs the runs to a JSON file.
15 |
16 | To run the tests, you need to have Python 3.x and the `jq` command-line tool installed on your system. You also
17 | need to be authenticated with the GitHub API with `repo` scope.
18 |
19 | The tests use the `unittest` module in Python to define test cases and assertions. Each test case corresponds to
20 | a specific function in the `get_workflow_runs.py` script, and tests the function's behavior under different
21 | conditions.
22 |
23 | To run the tests, you can use the following command:
24 |
25 | python -m unittest test_get_workflow_runs.TestGetWorkflowRuns.test_get_workflow_runs
26 |
27 | This command runs the `test_get_workflow_runs()` function in the `TestGetWorkflowRuns` class, which executes all
28 | the test cases defined in the class.
29 |
30 | Output:
31 | - Test results for the `get_workflow_runs.py` script
32 |
33 | Example:
34 | python -m unittest test_get_workflow_runs.py
35 | """
36 | import unittest
37 | import subprocess
38 | import json
39 | import os
40 |
41 | from dotenv import load_dotenv
42 |
43 | class TestGetWorkflowRuns(unittest.TestCase):
44 | def setUp(self):
45 | load_dotenv()
46 | self.repo_owner = os.getenv("OWNER_NAME")
47 | self.repo_name = os.getenv("REPO_NAME")
48 | self.start_date = os.getenv("START_DATE")
49 | self.end_date = os.getenv("END_DATE")
50 | self.invalid_start_date = "abc"
51 | self.invalid_end_date = "xyz"
52 |
53 | def test_get_workflow_runs_with_valid_dates(self):
54 | # Run the script to retrieve workflow runs with valid dates
55 | subprocess.run(["python", "get_workflow_runs.py", self.repo_owner, self.repo_name, self.start_date, self.end_date])
56 |
57 | # Load the workflow runs from file
58 | with open("runs.json", "r") as f:
59 | workflow_runs = json.load(f)
60 |
61 | # Check that the workflow runs are not empty
62 | self.assertGreater(len(workflow_runs), 0)
63 |
64 | # Check that each workflow run has the expected fields
65 | for run in workflow_runs:
66 | self.assertIn("conclusion", run)
67 | self.assertIn("created_at", run)
68 | self.assertIn("display_title", run)
69 | self.assertIn("event", run)
70 | self.assertIn("head_branch", run)
71 | self.assertIn("name", run)
72 | self.assertIn("run_number", run)
73 | self.assertIn("run_started_at", run)
74 | self.assertIn("run_attempt", run)
75 | self.assertIn("status", run)
76 | self.assertIn("updated_at", run)
77 | self.assertIn("url", run)
78 | self.assertIn("duration", run)
79 |
80 | # Print the workflow runs
81 | with open("runs.json", "r") as f:
82 | raw_json = f.read()
83 | print("Number of characters in runs.json:", len(raw_json))
84 |
85 | # Clean up the temporary file
86 | os.remove("runs.json")
87 |
88 | def test_get_workflow_runs_with_invalid_dates(self):
89 | # Run the script to retrieve workflow runs with invalid dates
90 | subprocess.run(["python", "get_workflow_runs.py", self.repo_owner, self.repo_name, self.invalid_start_date, self.invalid_end_date])
91 |
92 | # Check that the runs.json file does not exist
93 | self.assertFalse(os.path.exists("runs.json"))
94 |
95 | if __name__ == '__main__':
96 | unittest.main()
--------------------------------------------------------------------------------
/test_workflow_metrics.py:
--------------------------------------------------------------------------------
1 | """
2 | test_workflow_metrics.py - Unit tests for the workflow_metrics.py script.
3 |
4 | This script defines a set of unit tests for the workflow_metrics.py script. The tests check that the script
5 | raises a ValueError if any of the required environment variables are not set, and that the script produces
6 | the expected output when all environment variables are set.
7 |
8 | Usage:
9 | python -m unittest test_workflow_metrics.py
10 |
11 | Environment Variables:
12 | - GH_TOKEN: A personal access token for authenticating with the GitHub API, with `repo` and `admin:org` scopes
13 | - OWNER_NAME: The name of the organization or user that owns the repository.
14 | - START_DATE: The start date for the time range to analyze, in ISO 8601 format (e.g. "2022-01-01").
15 | - END_DATE: The end date for the time range to analyze, in ISO 8601 format (e.g. "2022-01-31").
16 | - REPO_NAME: The name of the repository to analyze. If not set, the script will analyze all repositories
17 | owned by the specified organization or user.
18 | - DELAY_BETWEEN_QUERY: The number of seconds to wait between queries to the GitHub API. This is useful
19 | when running the script against a large number of repositories, to avoid hitting the GitHub API rate
20 | limit. If not set, the script will not wait between queries.
21 |
22 |
23 | Expected Output:
24 | The script should produce a file named "runs.json" or "org-runs.json" in the current directory, containing
25 | a JSON array of all workflow runs in the specified time range. And it also should produce a file named
26 | "workflow-stats.csv" or "org-workflow-stats.csv" in the current directory, containing a CSV file with
27 | workflow run statistics for the specified repository or organization.
28 |
29 |
30 | Note that the script requires the `gh` command-line tool to be installed and authenticated with the `GH_TOKEN`
31 | environment variable. The `gh` tool can be installed from https://cli.github.com/.
32 | """
33 |
34 | import os
35 | import subprocess
36 | import unittest
37 | from dotenv import load_dotenv
38 |
39 | class TestWorkflowMetrics(unittest.TestCase):
40 |
41 | def setUp(self):
42 | print('Setting up test harness...')
43 | # Load environment variables from .env file
44 | load_dotenv()
45 | print(' Environment variables loaded from .env file')
46 |
47 | def test_workflow_metrics(self):
48 |
49 | # Check gh auth status
50 | auth_status = subprocess.run(['gh', 'auth', 'status'], capture_output=True)
51 |
52 | result = subprocess.run(['python', 'workflow_metrics.py'], capture_output=True)
53 | print(result.stdout.decode())
54 | print(result.stderr.decode())
55 |
56 | # Assert that runs.json was created
57 | self.assertTrue(os.path.exists('runs.json'))
58 | self.assertEqual(result.returncode, 0)
59 |
60 | # Assert that workflow-names.txt contains more than one line
61 | with open('workflow-stats.csv', 'r') as f:
62 | lines = f.readlines()
63 | self.assertGreater(len(lines), 1)
64 |
65 |
66 | # Usage: python -m unittest test_workflow_metrics.TestWorkflowMetrics.test_org_workflow_metrics
67 | def test_org_workflow_metrics(self):
68 | # Unset environment variables from session for the test case
69 | del os.environ['REPO_NAME']
70 | del os.environ['DELAY_BETWEEN_QUERY']
71 |
72 | # Check gh auth status
73 | auth_status = subprocess.run(['gh', 'auth', 'status'], capture_output=True)
74 |
75 | result = subprocess.run(['python', 'workflow_metrics.py'], capture_output=True)
76 | print(result.stdout.decode())
77 | print(result.stderr.decode())
78 | self.assertEqual(result.returncode, 0)
79 |
80 | # Assert that org-runs.json was created
81 | self.assertTrue(os.path.exists('org-runs.json'))
82 |
83 | # Assert that org-workflow-names.txt contains more than one line
84 | with open('org-workflow-stats.csv', 'r') as f:
85 | lines = f.readlines()
86 | self.assertGreater(len(lines), 1)
87 |
88 |
89 | def test_org_workflow_metrics_with_delay(self):
90 | # Unset environment variables from session for the test case
91 | del os.environ['REPO_NAME']
92 |
93 | # Check gh auth status
94 | auth_status = subprocess.run(['gh', 'auth', 'status'], capture_output=True)
95 |
96 | result = subprocess.run(['python', 'workflow_metrics.py'], capture_output=True)
97 | print(result.stdout.decode())
98 | print(result.stderr.decode())
99 | self.assertEqual(result.returncode, 0)
100 |
101 | # Assert that org-runs.json contains more than one line
102 | with open('org-runs.json', 'r') as f:
103 | lines = f.readlines()
104 | self.assertGreater(len(lines), 1)
105 |
106 | # Assert that org-workflow-names.txt contains more than one line
107 | with open('org-workflow-stats.csv', 'r') as f:
108 | lines = f.readlines()
109 | self.assertGreater(len(lines), 1)
110 |
111 |
112 | def tearDown(self):
113 | print('Tearing down test harness...')
114 | # Remove the test files if they exist
115 | if os.path.exists('runs.json'):
116 | os.remove('runs.json')
117 | print(' runs.json removed')
118 | if os.path.exists('org-runs.json'):
119 | os.remove('org-runs.json')
120 | print(' org-runs.json removed')
121 | if os.path.exists('workflow-stats.csv'):
122 | os.remove('workflow-stats.csv')
123 | print(' workflow-stats.csv removed')
124 | if os.path.exists('org-workflow-stats.csv'):
125 | os.remove('org-workflow-stats.csv')
126 | print(' org-workflow-stats.csv removed')
127 |
128 |
129 | if __name__ == '__main__':
130 | unittest.main()
--------------------------------------------------------------------------------
/workflow_metrics.py:
--------------------------------------------------------------------------------
1 | """
2 | workflow_metrics.py - Retrieve and evaluate GitHub Actions workflow runs for a repository.
3 |
4 | The script uses the GitHub API to retrieve workflow runs for the specified repository or repositories inside an org, and
5 | calculates metrics such as the average duration, median duration, success rate, and total number of runs for
6 | each workflow.
7 |
8 | The following environment variables must be set:
9 |
10 | - OWNER_NAME: The name of the repository owner (e.g. "myorg").
11 | - START_DATE: The start date of the date range in ISO format (e.g. "2022-01-01").
12 | - END_DATE: The end date of the date range in ISO format (e.g. "2022-01-31").
13 | - REPO_NAME: Optional - The name of the repository (e.g. "myrepo").
14 | - DELAY_BETWEEN_QUERY: Optional - The number of seconds to wait between queries to the GitHub API.
15 |
16 | The script uses the following external tools:
17 |
18 | - `gh` (GitHub CLI): Used to authenticate with GitHub and retrieve workflow runs.
19 | - `jq`: Used to extract workflow names from the workflow runs JSON.
20 |
21 | The script outputs the following files:
22 |
23 | - `runs.json`: Workflow runs in JSON, or `org-runs.json`: Workflow runs in JSON for every repo in the org.
24 | - `workflow-stats.csv`: Workflow statistics in CSV, or `org-workflow-stats.csv`: Workflow statistics in CSV for every repo in the org.
25 |
26 | Usage: python workflow_metrics.py
27 | """
28 |
29 | import os
30 | import subprocess
31 | import time
32 | import json
33 |
34 | # Get environment variables
35 | gh_token = os.getenv("GH_TOKEN")
36 | if not gh_token:
37 | raise ValueError("GITHUB_TOKEN environment variable not set")
38 |
39 | owner_name = os.getenv("OWNER_NAME")
40 | if not owner_name:
41 | raise ValueError("OWNER_NAME environment variable not set")
42 |
43 | start_date = os.getenv("START_DATE")
44 | if not start_date:
45 | raise ValueError("START_DATE environment variable not set")
46 |
47 | end_date = os.getenv("END_DATE")
48 | if not end_date:
49 | raise ValueError("END_DATE environment variable not set")
50 |
51 | repo_name = os.getenv("REPO_NAME")
52 |
53 | sleep_time = os.getenv("DELAY_BETWEEN_QUERY")
54 |
55 |
56 | # Authenticate with GitHub CLI
57 | subprocess.run(['gh', 'auth', 'login', '--with-token'], input=gh_token.encode())
58 |
59 | # Get list of repository names if no repository name is specified
60 | if not repo_name:
61 | # Get list of repository names
62 | cmd = f'gh api orgs/{owner_name}/repos --jq \'.[] | .name\''
63 | query_output = subprocess.check_output(cmd, shell=True, text=True)
64 | repo_names = []
65 | for line in query_output.strip().split('\n'):
66 | repo_names.append(line)
67 |
68 | with open('org-workflow-stats.csv', 'w') as f:
69 | f.write('repository_name,workflow_name,average_duration,median_duration,success_rate,total_runs\n')
70 | # create a file for org-runs.json
71 | with open('org-runs.json', 'w') as f:
72 | f.write('[\n')
73 |
74 | # Get workflow runs for each repository
75 | for repo in repo_names:
76 |
77 | # Get workflow runs
78 | subprocess.run(['python', '/get_workflow_runs.py', owner_name, repo, start_date, end_date])
79 | # Read every JSON record in runs.json, add repo name to each record, and append to org-runs.json
80 | with open('runs.json', 'r') as f1, open('org-runs.json', 'a') as f2:
81 | data = json.load(f1)
82 | for record in data:
83 | record['repository_name'] = str(repo)
84 | for i, record in enumerate(data):
85 | json.dump(record, f2)
86 | if i != len(data) - 1:
87 | f2.write(',\n')
88 | else:
89 | f2.write('\n]')
90 |
91 | # Evaluate workflow runs statistics
92 | subprocess.run(['python', '/evaluate_workflow_runs.py'])
93 | # Read every line of workflow-stats.csv skipping the header line, add repo name to the beginning of each line, and write to all-workflow-stats.csv
94 | with open('workflow-stats.csv', 'r') as f:
95 | lines = f.readlines()
96 | with open('org-workflow-stats.csv', 'a') as f2:
97 | for line in lines[1:]:
98 | f2.write(f'{repo},{line}')
99 | if sleep_time:
100 | print(f' Sleeping for {sleep_time} seconds to prevent rate limiting...')
101 | time.sleep(int(sleep_time))
102 |
103 | else:
104 | # Get workflow runs
105 | subprocess.run(['python', '/get_workflow_runs.py', owner_name, repo_name, start_date, end_date])
106 |
107 | # Evaluate workflow runs statistics
108 | subprocess.run(['python', '/evaluate_workflow_runs.py'])
109 |
--------------------------------------------------------------------------------