├── test_results ├── python │ ├── __init__.py │ ├── test_sample.py │ ├── report.xml │ └── report_flaky.xml ├── cunit │ ├── testEmpty.xml │ └── testFailure.xml ├── tests │ ├── utils │ │ ├── src │ │ │ ├── test │ │ │ │ └── java │ │ │ │ │ └── action │ │ │ │ │ └── surefire │ │ │ │ │ └── report │ │ │ │ │ └── calc │ │ │ │ │ ├── AllOkTest.java │ │ │ │ │ ├── CalcUtilsTest.kt │ │ │ │ │ └── StringUtilsTest.java │ │ │ └── main │ │ │ │ └── java │ │ │ │ └── action │ │ │ │ └── surefire │ │ │ │ └── report │ │ │ │ └── calc │ │ │ │ ├── StringUtils.java │ │ │ │ └── CalcUtils.java │ │ ├── target │ │ │ └── surefire-reports │ │ │ │ ├── TEST-action.surefire.report.calc.AllOkTest.xml │ │ │ │ ├── TEST-action.surefire.report.calc.CalcUtilsTest.xml │ │ │ │ └── TEST-action.surefire.report.calc.StringUtilsTest.xml │ │ └── pom.xml │ ├── email │ │ ├── src │ │ │ ├── main │ │ │ │ └── java │ │ │ │ │ └── action │ │ │ │ │ └── surefire │ │ │ │ │ └── report │ │ │ │ │ └── email │ │ │ │ │ ├── InvalidEmailAddressException.java │ │ │ │ │ └── EmailAddress.java │ │ │ └── test │ │ │ │ └── java │ │ │ │ └── action │ │ │ │ └── surefire │ │ │ │ └── report │ │ │ │ └── email │ │ │ │ └── EmailAddressTest.java │ │ ├── pom.xml │ │ └── target │ │ │ └── surefire-reports │ │ │ └── TEST-action.surefire.report.email.EmailAddressTest.xml │ └── pom.xml ├── perl │ └── result.xml ├── issues │ ├── testFailedDisabled.xml │ └── testDisabled.xml ├── marathon_tests │ ├── com.mikepenz.DummyTest3#test_01.xml │ ├── com.mikepenz.DummyTest#test_02_dummy.xml │ └── com.mikepenz.DummyUtilTest#test_01_dummy.xml ├── multiple │ ├── test_10.xml │ ├── test_12.xml │ └── test_11.xml ├── container-structure │ └── test.xml ├── xunit │ ├── report.xml │ └── report_fl_on_f.xml ├── junit_flaky_failure │ └── marathon_junit_report.xml ├── mocha │ └── mocha.xml ├── catch2 │ └── report.xml ├── nested │ ├── multi-level.xml │ └── junit.xml ├── junit-server-test │ └── report.xml ├── corrupt-junit │ └── e2e-tests │ │ └── corrupt │ │ └── target │ │ └── sf-reports │ │ └── TEST-test.CorruptTest.xml ├── multiple_failures │ └── test_multiple_failures.xml ├── junit-web-test │ ├── expected.xml │ └── expectedRetries.xml └── nextest │ └── basic.xml ├── .prettierignore ├── .gitattributes ├── dist └── package.json ├── .devcontainer ├── postCreateCommand.sh └── devcontainer.json ├── .github ├── FUNDING.yml ├── images │ ├── action.png │ ├── annotated.png │ └── annotations.png ├── dependabot.yml ├── config │ └── configuration.json └── workflows │ ├── codeql-analysis.yml │ └── build.yml ├── vitest.config.ts ├── .prettierrc.json ├── SECURITY.md ├── __tests__ ├── utils.test.ts ├── outputs.test.ts ├── table.test.ts └── annotator.test.ts ├── tsconfig.json ├── package.json ├── .gitignore ├── eslint.config.mjs ├── src ├── utils.ts ├── table.ts ├── annotator.ts ├── main.ts └── testParser.ts ├── action.yml ├── LICENSE └── README.md /test_results/python/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | dist/ 2 | lib/ 3 | node_modules/ -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | dist/** -diff linguist-generated=true -------------------------------------------------------------------------------- /dist/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "module" 3 | } 4 | -------------------------------------------------------------------------------- /.devcontainer/postCreateCommand.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | npm install -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [mikepenz] 4 | -------------------------------------------------------------------------------- /.github/images/action.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zendesk/action-junit-report/main/.github/images/action.png -------------------------------------------------------------------------------- /.github/images/annotated.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zendesk/action-junit-report/main/.github/images/annotated.png -------------------------------------------------------------------------------- /.github/images/annotations.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zendesk/action-junit-report/main/.github/images/annotations.png -------------------------------------------------------------------------------- /vitest.config.ts: -------------------------------------------------------------------------------- 1 | import {defineConfig} from 'vitest/config' 2 | 3 | export default defineConfig({ 4 | test: { 5 | testTimeout: 30000 6 | } 7 | }) 8 | -------------------------------------------------------------------------------- /test_results/cunit/testEmpty.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /.prettierrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "printWidth": 120, 3 | "tabWidth": 2, 4 | "useTabs": false, 5 | "semi": false, 6 | "singleQuote": true, 7 | "trailingComma": "none", 8 | "bracketSpacing": false, 9 | "arrowParens": "avoid" 10 | } 11 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: 'npm' 4 | directory: '/' 5 | schedule: 6 | interval: 'weekly' 7 | - package-ecosystem: "github-actions" 8 | directory: "/" 9 | schedule: 10 | interval: "weekly" -------------------------------------------------------------------------------- /test_results/tests/utils/src/test/java/action/surefire/report/calc/AllOkTest.java: -------------------------------------------------------------------------------- 1 | package action.surefire.report.calc; 2 | 3 | import org.junit.Test; 4 | 5 | public class AllOkTest { 6 | 7 | @Test 8 | public void everythingIsFine() { 9 | } 10 | } -------------------------------------------------------------------------------- /test_results/perl/result.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /test_results/tests/email/src/main/java/action/surefire/report/email/InvalidEmailAddressException.java: -------------------------------------------------------------------------------- 1 | package action.surefire.report.email; 2 | 3 | public class InvalidEmailAddressException extends RuntimeException { 4 | public InvalidEmailAddressException(String message) { 5 | super(message); 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | Patches will be released to the latest major version. 6 | 7 | ## Reporting a Vulnerability 8 | 9 | Please report (suspected) security vulnerabilities to mikepenz+osss@gmail.com. If the issue is confirmed, we will release a patch as soon as possible depending on complexity. 10 | -------------------------------------------------------------------------------- /test_results/issues/testFailedDisabled.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Assert: Boolean true check failed. 6 | 7 | 8 | -------------------------------------------------------------------------------- /test_results/marathon_tests/com.mikepenz.DummyTest3#test_01.xml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test_results/marathon_tests/com.mikepenz.DummyTest#test_02_dummy.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | -------------------------------------------------------------------------------- /test_results/multiple/test_10.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | -------------------------------------------------------------------------------- /test_results/container-structure/test.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | -------------------------------------------------------------------------------- /test_results/python/test_sample.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | 4 | def test_which_succeeds(): 5 | event = { 'attr': 'test'} 6 | assert event['attr'] == 'test' 7 | 8 | def test_which_fails(): 9 | event = { 'attr': 'test'} 10 | assert event['attr'] == 'xyz' 11 | 12 | def test_with_error(): 13 | event = { 'attr': 'test'} 14 | assert event.attr == 'test' 15 | -------------------------------------------------------------------------------- /test_results/multiple/test_12.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /test_results/multiple/test_11.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | SackTrace 7 | 8 | 9 | -------------------------------------------------------------------------------- /test_results/tests/utils/target/surefire-reports/TEST-action.surefire.report.calc.AllOkTest.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /test_results/xunit/report.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /test_results/xunit/report_fl_on_f.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /test_results/marathon_tests/com.mikepenz.DummyUtilTest#test_01_dummy.xml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.github/config/configuration.json: -------------------------------------------------------------------------------- 1 | { 2 | "categories": [ 3 | { 4 | "title": "## 🚀 Features", 5 | "labels": ["feature"] 6 | }, 7 | { 8 | "title": "## 🐛 Fixes", 9 | "labels": ["fix"] 10 | }, 11 | { 12 | "title": "## 🧪 Tests", 13 | "labels": ["test"] 14 | }, 15 | { 16 | "title": "## 💬 Other", 17 | "labels": ["other"] 18 | }, 19 | { 20 | "title": "## 📦 Dependencies", 21 | "labels": ["dependencies"] 22 | } 23 | ], 24 | "template": "#{{CHANGELOG}}\n## Contributors:\n- #{{CONTRIBUTORS}}" 25 | } -------------------------------------------------------------------------------- /test_results/junit_flaky_failure/marathon_junit_report.xml: -------------------------------------------------------------------------------- 1 | 2 | 6 | 8 | 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /test_results/mocha/mocha.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /test_results/catch2/report.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | FAILED: 10 | REQUIRE( v == 1 ) 11 | with expansion: 12 | 0 == 1 13 | 0 14 | at /__w/futures/futures/test/unit/detail/utility/is_constant_evaluated.cpp:19 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /test_results/tests/utils/src/main/java/action/surefire/report/calc/StringUtils.java: -------------------------------------------------------------------------------- 1 | package action.surefire.report.calc; 2 | 3 | public final class StringUtils { 4 | 5 | private StringUtils() { 6 | // utility class 7 | } 8 | 9 | public static String nullIfBlank(String value) { 10 | if (org.apache.commons.lang3.StringUtils.isBlank(value)) { 11 | return null; 12 | } 13 | return value; 14 | } 15 | 16 | 17 | public static String requireNotBlank(String input) { 18 | return requireNotBlank(input, null); 19 | } 20 | 21 | public static String requireNotBlank(String input, String message) { 22 | if (!org.apache.commons.lang3.StringUtils.isBlank(input)) { 23 | return input; 24 | } else { 25 | throw new IllegalArgumentException( 26 | message != null ? message : "Input='" + input + "' didn't match condition."); 27 | } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /test_results/nested/multi-level.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /test_results/cunit/testFailure.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | false == something.loadXml(xml_string) 11 | File: /dumm/core/tests/testFailure.cpp 12 | Line: 77 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /__tests__/utils.test.ts: -------------------------------------------------------------------------------- 1 | import {readTransformers} from '../src/utils.js' 2 | import {describe, expect, it} from 'vitest' 3 | 4 | /** 5 | * Copyright 2024 Mike Penz 6 | */ 7 | 8 | describe('readTransformers', () => { 9 | it('should successfully parse default transformer', async () => { 10 | const transformer = readTransformers('[{"searchValue":"::","replaceValue":"/"}]') 11 | expect(transformer).toStrictEqual([ 12 | { 13 | regex: /::/gu, 14 | searchValue: '::', 15 | replaceValue: '/' 16 | } 17 | ]) 18 | }) 19 | 20 | it('should successfully parse custom transformer', async () => { 21 | const transformer = readTransformers( 22 | '[{"searchValue":"\\\\.","replaceValue":"/"},{"searchValue":"_t\\\\z","replaceValue":".t"}]' 23 | ) 24 | expect(transformer).toStrictEqual([ 25 | { 26 | regex: /\./gu, 27 | searchValue: '\\.', 28 | replaceValue: '/' 29 | }, 30 | { 31 | searchValue: '_t\\z', 32 | replaceValue: '.t' 33 | } 34 | ]) 35 | }) 36 | }) 37 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/devcontainer.json. For config options, see the 2 | // README at: https://github.com/devcontainers/templates/tree/main/src/typescript-node 3 | { 4 | "name": "Node.js & TypeScript", 5 | "image": "mcr.microsoft.com/devcontainers/typescript-node:24-bullseye", 6 | "features": { 7 | "ghcr.io/devcontainers/features/node:1": {}, 8 | "ghcr.io/devcontainers-extra/features/typescript:2.0.16": {} 9 | }, 10 | 11 | // Features to add to the dev container. More info: https://containers.dev/features. 12 | // "features": {}, 13 | 14 | // Use 'forwardPorts' to make a list of ports inside the container available locally. 15 | // "forwardPorts": [], 16 | 17 | // Use 'postCreateCommand' to run commands after the container is created. 18 | "postCreateCommand": "./.devcontainer/postCreateCommand.sh", 19 | 20 | // Configure tool-specific properties. 21 | // "customizations": {}, 22 | 23 | // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. 24 | // "remoteUser": "root" 25 | } -------------------------------------------------------------------------------- /test_results/python/report.xml: -------------------------------------------------------------------------------- 1 | def test_which_fails(): 2 | event = { 'attr': 'test'} 3 | > assert event['attr'] == 'xyz' 4 | E AssertionError: assert 'test' == 'xyz' 5 | E - xyz 6 | E + test 7 | 8 | python/test_sample.py:10: AssertionErrordef test_with_error(): 9 | event = { 'attr': 'test'} 10 | > assert event.attr == 'test' 11 | E AttributeError: 'dict' object has no attribute 'attr' 12 | 13 | python/test_sample.py:14: AttributeError -------------------------------------------------------------------------------- /test_results/python/report_flaky.xml: -------------------------------------------------------------------------------- 1 | def test_which_fails(): 2 | event = { 'attr': 'test'} 3 | > assert event['attr'] == 'xyz' 4 | E AssertionError: assert 'test' == 'xyz' 5 | E - xyz 6 | E + test 7 | 8 | python/test_sample.py:10: AssertionErrordef test_with_error(): 9 | event = { 'attr': 'test'} 10 | > assert event.attr == 'test' 11 | E AttributeError: 'dict' object has no attribute 'attr' 12 | 13 | python/test_sample.py:14: AttributeError -------------------------------------------------------------------------------- /test_results/junit-server-test/report.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | === RUN TestWebSocketReconnectRace --- FAIL: TestWebSocketReconnectRace (3.32s) 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /test_results/tests/email/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | action.surefire.report 5 | tests 6 | 0.0.1-SNAPSHOT 7 | 8 | 4.0.0 9 | email 10 | 11 | 12 | 13 | junit 14 | junit 15 | test 16 | 17 | 18 | org.apache.commons 19 | commons-lang3 20 | 21 | 22 | org.hamcrest 23 | hamcrest-all 24 | test 25 | 26 | 27 | org.assertj 28 | assertj-core 29 | test 30 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /test_results/corrupt-junit/e2e-tests/corrupt/target/sf-reports/TEST-test.CorruptTest.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 26 11 | at corrupt.core.Main$Companion.initAndRun(Main.kt:244) 12 | at corrupt.core.Main.initAndRun(Main.kt) 13 | at java.util.concurrent.FutureTask.run(FutureTask.java:266) 14 | at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) 15 | at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) 16 | at java.lang.Thread.run(Thread.java:750) 17 | ]]> 18 | { 9 | it('should format single URL correctly', () => { 10 | const checkInfos: CheckInfo[] = [ 11 | { 12 | name: 'Test Report 1', 13 | url: 'https://github.com/owner/repo/runs/123' 14 | } 15 | ] 16 | 17 | const reportUrls = checkInfos.map(info => info.url).join('\n') 18 | expect(reportUrls).toBe('https://github.com/owner/repo/runs/123') 19 | }) 20 | 21 | it('should format multiple URLs with newline separation', () => { 22 | const checkInfos: CheckInfo[] = [ 23 | { 24 | name: 'Test Report 1', 25 | url: 'https://github.com/owner/repo/runs/123' 26 | }, 27 | { 28 | name: 'Test Report 2', 29 | url: 'https://github.com/owner/repo/runs/456' 30 | }, 31 | { 32 | name: 'Test Report 3', 33 | url: 'https://github.com/owner/repo/runs/789' 34 | } 35 | ] 36 | 37 | const reportUrls = checkInfos.map(info => info.url).join('\n') 38 | expect(reportUrls).toBe( 39 | 'https://github.com/owner/repo/runs/123\nhttps://github.com/owner/repo/runs/456\nhttps://github.com/owner/repo/runs/789' 40 | ) 41 | }) 42 | 43 | it('should handle empty checkInfos array', () => { 44 | const checkInfos: CheckInfo[] = [] 45 | 46 | const reportUrls = checkInfos.map(info => info.url).join('\n') 47 | expect(reportUrls).toBe('') 48 | }) 49 | }) 50 | -------------------------------------------------------------------------------- /test_results/nested/junit.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /test_results/tests/utils/src/test/java/action/surefire/report/calc/StringUtilsTest.java: -------------------------------------------------------------------------------- 1 | package action.surefire.report.calc; 2 | 3 | import org.junit.Ignore; 4 | import org.junit.Rule; 5 | import org.junit.Test; 6 | import org.junit.rules.ExpectedException; 7 | 8 | import static org.hamcrest.CoreMatchers.equalTo; 9 | import static org.hamcrest.MatcherAssert.assertThat; 10 | import static org.junit.Assert.assertTrue; 11 | 12 | public class StringUtilsTest { 13 | 14 | @Rule 15 | public ExpectedException thrown = ExpectedException.none(); 16 | 17 | @Test 18 | public void require() { 19 | final String output = StringUtils.requireNotBlank("hello"); 20 | assertTrue(output.equals("wrong")); 21 | } 22 | 23 | @Test 24 | public void require_fail() { 25 | thrown.expect(IllegalArgumentException.class); 26 | thrown.expectMessage("This is unexpected"); 27 | StringUtils.requireNotBlank(""); 28 | } 29 | 30 | @Test 31 | public void require_failMsg() { 32 | thrown.expect(IllegalArgumentException.class); 33 | thrown.expectMessage("I really need that input"); 34 | StringUtils.requireNotBlank("", "I really need that input"); 35 | } 36 | 37 | @Ignore 38 | @Test 39 | public void require_fail_null() { 40 | thrown.expect(IllegalArgumentException.class); 41 | thrown.expectMessage("Input='null' didn't match condition."); 42 | StringUtils.requireNotBlank(null); 43 | } 44 | 45 | @Test 46 | public void require_withNullMsg() { 47 | thrown.expect(IllegalArgumentException.class); 48 | thrown.expectMessage("Input='' didn't match condition."); 49 | StringUtils.requireNotBlank(""); 50 | } 51 | } -------------------------------------------------------------------------------- /test_results/tests/utils/target/surefire-reports/TEST-action.surefire.report.calc.CalcUtilsTest.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | but was: 5 | at action.surefire.report.calc.CalcUtilsTest.test error handling(CalcUtilsTest.kt:27) 6 | Caused by: java.lang.IllegalArgumentException: Amount must have max 2 non-zero decimal places 7 | at action.surefire.report.calc.CalcUtilsTest.scale(CalcUtilsTest.kt:31) 8 | at action.surefire.report.calc.CalcUtilsTest.access$scale(CalcUtilsTest.kt:9) 9 | at action.surefire.report.calc.CalcUtilsTest.test error handling(CalcUtilsTest.kt:27) 10 | ]]> 11 | 12 | 13 | 16 | but: was <100.11> 17 | at action.surefire.report.calc.CalcUtilsTest.test scale(CalcUtilsTest.kt:15) 18 | ]]> 19 | 20 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "action-junit-report", 3 | "version": "5.3.0", 4 | "private": true, 5 | "description": "junit report action", 6 | "main": "lib/main.js", 7 | "type": "module", 8 | "scripts": { 9 | "build": "tsc", 10 | "format": "prettier --write **/*.ts", 11 | "format-check": "prettier --check **/*.ts", 12 | "format-fix": "eslint --fix src/**.ts", 13 | "lint": "eslint src/**/*.ts", 14 | "package": "ncc build --source-map --license licenses.txt", 15 | "test": "vitest --run --reporter=default", 16 | "all": "npm run build && npm run format && npm run lint && npm run package && npm test" 17 | }, 18 | "repository": { 19 | "type": "git", 20 | "url": "git+https://github.com/mikepenz/action-junit-report" 21 | }, 22 | "bugs": { 23 | "url": "https://github.com/mikepenz/action-junit-report/issues" 24 | }, 25 | "keywords": [ 26 | "GitHub", 27 | "Actions", 28 | "Junit", 29 | "test" 30 | ], 31 | "author": "Mike Penz", 32 | "license": "Apache 2.0", 33 | "dependencies": { 34 | "@actions/core": "^1.11.1", 35 | "@actions/github": "^6.0.1", 36 | "@actions/glob": "^0.5.0", 37 | "@octokit/rest": "^22.0.1", 38 | "xml-js": "^1.6.11" 39 | }, 40 | "devDependencies": { 41 | "@eslint/eslintrc": "^3.3.1", 42 | "@eslint/js": "^9.39.1", 43 | "@types/node": "^24.10.1", 44 | "@typescript-eslint/eslint-plugin": "^8.47.0", 45 | "@typescript-eslint/parser": "^8.47.0", 46 | "@vercel/ncc": "^0.38.4", 47 | "eslint": "^9.39.1", 48 | "eslint-import-resolver-typescript": "^4.4.4", 49 | "eslint-plugin-github": "^6.0.0", 50 | "eslint-plugin-import": "^2.32.0", 51 | "eslint-plugin-prettier": "^5.5.4", 52 | "globals": "^16.5.0", 53 | "js-yaml": "^4.1.1", 54 | "nock": "^14.0.10", 55 | "prettier": "^3.6.2", 56 | "typescript": "^5.9.3", 57 | "vitest": "^4.0.8", 58 | "webpack": "^5.103.0" 59 | }, 60 | "overrides": { 61 | "glob": "11.0.1" 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /test_results/tests/utils/src/main/java/action/surefire/report/calc/CalcUtils.java: -------------------------------------------------------------------------------- 1 | package action.surefire.report.calc; 2 | 3 | import static java.math.BigDecimal.ZERO; 4 | import static java.math.RoundingMode.HALF_EVEN; 5 | 6 | import java.math.BigDecimal; 7 | 8 | public final class CalcUtils { 9 | 10 | private CalcUtils() { 11 | // utility class 12 | } 13 | 14 | /** 15 | * Rounds value to 4 decimal places 16 | */ 17 | public static BigDecimal roundPercentageValue(BigDecimal value) { 18 | return value.setScale(4, HALF_EVEN); 19 | } 20 | 21 | public static boolean equalTo(BigDecimal value1, BigDecimal value2) { 22 | return value1.compareTo(value2) == 0; 23 | } 24 | 25 | public static boolean greaterThanEqualTo(BigDecimal value1, BigDecimal value2) { 26 | return value1.compareTo(value2) >= 0; 27 | } 28 | 29 | public static BigDecimal positiveOrZero(BigDecimal value) { 30 | return greaterThanEqualTo(value, ZERO) ? value : ZERO; 31 | } 32 | 33 | /** 34 | * @return zero if divisor is 0 35 | */ 36 | public static BigDecimal safeDivide(BigDecimal dividend, BigDecimal divisor, 37 | int decimalPlaces) { 38 | if (equalTo(divisor, ZERO)) { 39 | return ZERO; 40 | } else { 41 | return dividend.divide(divisor, decimalPlaces, HALF_EVEN); 42 | } 43 | } 44 | 45 | /** 46 | * Scales amount to 2 decimal places, throwing error if decimal places overflow 47 | */ 48 | public static BigDecimal scaleAmount(BigDecimal amount) { 49 | if (!(amount.stripTrailingZeros().scale() <= 2)) { 50 | throw new IllegalArgumentException("Amount must have max 2 non-zero decimal places"); 51 | } 52 | return amount.stripTrailingZeros().setScale(2, HALF_EVEN); 53 | } 54 | 55 | public static BigDecimal nullToZero(BigDecimal value) { 56 | if (value != null) { 57 | return value; 58 | } else { 59 | return ZERO; 60 | } 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Dependency directory 2 | node_modules 3 | 4 | # Rest pulled from https://github.com/github/gitignore/blob/master/Node.gitignore 5 | # Logs 6 | logs 7 | *.log 8 | npm-debug.log* 9 | yarn-debug.log* 10 | yarn-error.log* 11 | lerna-debug.log* 12 | 13 | # Diagnostic reports (https://nodejs.org/api/report.html) 14 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 15 | 16 | # Runtime data 17 | pids 18 | *.pid 19 | *.seed 20 | *.pid.lock 21 | 22 | # Directory for instrumented libs generated by jscoverage/JSCover 23 | lib-cov 24 | 25 | # Coverage directory used by tools like istanbul 26 | coverage 27 | *.lcov 28 | 29 | # nyc test coverage 30 | .nyc_output 31 | 32 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 33 | .grunt 34 | 35 | # Bower dependency directory (https://bower.io/) 36 | bower_components 37 | 38 | # node-waf configuration 39 | .lock-wscript 40 | 41 | # Compiled binary addons (https://nodejs.org/api/addons.html) 42 | build/Release 43 | 44 | # Dependency directories 45 | jspm_packages/ 46 | 47 | # TypeScript v1 declaration files 48 | typings/ 49 | 50 | # TypeScript cache 51 | *.tsbuildinfo 52 | 53 | # Optional npm cache directory 54 | .npm 55 | 56 | # Optional eslint cache 57 | .eslintcache 58 | 59 | # Optional REPL history 60 | .node_repl_history 61 | 62 | # Output of 'npm pack' 63 | *.tgz 64 | 65 | # Yarn Integrity file 66 | .yarn-integrity 67 | 68 | # dotenv environment variables file 69 | .env 70 | .env.test 71 | 72 | # parcel-bundler cache (https://parceljs.org/) 73 | .cache 74 | 75 | # next.js build output 76 | .next 77 | 78 | # nuxt.js build output 79 | .nuxt 80 | 81 | # vuepress build output 82 | .vuepress/dist 83 | 84 | # Serverless directories 85 | .serverless/ 86 | 87 | # FuseBox cache 88 | .fusebox/ 89 | 90 | # DynamoDB Local files 91 | .dynamodb/ 92 | 93 | # OS metadata 94 | .DS_Store 95 | Thumbs.db 96 | 97 | # Ignore built ts files 98 | __tests__/runner/* 99 | lib/**/* 100 | 101 | # Jetbrains files 102 | .idea/ -------------------------------------------------------------------------------- /test_results/multiple_failures/test_multiple_failures.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 10 | 11 | 12 | 15 | 16 | 17 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ main ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ main ] 20 | schedule: 21 | - cron: '15 2 * * 6' 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: ubuntu-latest 27 | permissions: 28 | actions: read 29 | contents: read 30 | security-events: write 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | language: [ 'javascript' ] 36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] 37 | # Learn more: 38 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed 39 | 40 | steps: 41 | - name: Checkout repository 42 | uses: actions/checkout@v6 43 | 44 | # Initializes the CodeQL tools for scanning. 45 | - name: Initialize CodeQL 46 | uses: github/codeql-action/init@v4 47 | with: 48 | languages: ${{ matrix.language }} 49 | # If you wish to specify custom queries, you can do so here or in a config file. 50 | # By default, queries listed here will override any specified in a config file. 51 | # Prefix the list here with "+" to use these queries and those in the config file. 52 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 53 | 54 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 55 | # If this step fails, then you should remove it and run the build manually (see below) 56 | - name: Autobuild 57 | uses: github/codeql-action/autobuild@v4 58 | 59 | - name: Perform CodeQL Analysis 60 | uses: github/codeql-action/analyze@v4 61 | -------------------------------------------------------------------------------- /test_results/issues/testDisabled.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /test_results/junit-web-test/expected.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | (packages/test-runner-junit-reporter/test/fixtures/multiple/simple-test.js:15:29)]]> 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /test_results/nextest/basic.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | thread 'test_failure' panicked at tests/parry3d.rs:154:5: 6 | assertion `left == right` failed: 0 must equal 1 7 | left: 0 8 | right: 1 9 | note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace 10 | 11 | running 1 test 12 | test test_failure ... FAILED 13 | 14 | failures: 15 | 16 | failures: 17 | test_failure 18 | 19 | test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.66s 20 | 21 | 22 | thread 'test_failure' panicked at tests/parry3d.rs:154:5: 23 | assertion `left == right` failed: 0 must equal 1 24 | left: 0 25 | right: 1 26 | note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace 27 | 28 | 29 | 30 | 31 | running 1 test 32 | test test_simple_navigation ... ok 33 | 34 | test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.70s 35 | 36 | 37 | 38 | 39 | 40 | 41 | running 1 test 42 | test test_annotations ... ok 43 | 44 | test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.74s 45 | 46 | 47 | 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /test_results/tests/email/src/test/java/action/surefire/report/email/EmailAddressTest.java: -------------------------------------------------------------------------------- 1 | package action.surefire.report.email; 2 | 3 | import static org.junit.Assert.assertEquals; 4 | import static org.junit.Assert.assertNotEquals; 5 | import static org.junit.Assert.fail; 6 | 7 | import org.junit.Test; 8 | 9 | public class EmailAddressTest { 10 | 11 | @Test 12 | public void shouldBeGoodEnoughForSalesforce() { 13 | EmailAddress.of("abcdefg.hijklmnopqrstuvwxyz!#$%&'*/=?^_+-`{|}~0123456789@host.com"); 14 | } 15 | 16 | @Test 17 | public void shouldNotBeBlank() { 18 | expectException(null); 19 | expectException(""); 20 | expectException(" "); 21 | } 22 | 23 | @Test 24 | public void shouldNotMissComponents() { 25 | expectException("user-without-host@test.com"); 26 | expectException("@host-without-user"); 27 | expectException("just-something-that-i-do-not-what-that-is"); 28 | } 29 | 30 | @Test 31 | public void shouldNotContainLocalHosts() { 32 | expectException("user@host"); 33 | expectException("user@localhost"); 34 | expectException("user@whatever-without-a-tld"); 35 | } 36 | 37 | @Test 38 | public void shouldNotContainInternationalizedHostNames() { 39 | expectException("user@ñandú.com.ar"); 40 | } 41 | 42 | @Test 43 | public void shouldAcceptInternationalizedDomainNamesUsingPunycode() { 44 | EmailAddress.of("user@xn--and-6ma2c.com.ar"); 45 | } 46 | 47 | @Test 48 | public void shouldBeStricterThanRfc2821() { 49 | expectException("Abc\\@def@example.com"); 50 | expectException("Fred\\ Bloggs@example.com"); 51 | expectException("Joe.\\Blow@example.com"); 52 | expectException("\"Fred Bloggs\"@example.com"); 53 | } 54 | 55 | @Test 56 | public void shouldBeStricterThanRfc2822() { 57 | expectException("aba@bab.com"); 58 | expectException("выфавы@asdasd.com"); 59 | expectException("ñañlkjdf@hotmail.com"); 60 | expectException("test+§@test.com"); 61 | expectException("可扩展@资本.com"); 62 | } 63 | 64 | @Test 65 | public void shouldNotAllowDotsInWeirdPlaces() { 66 | expectException(".user@host.com"); 67 | expectException("user.@host.com"); 68 | expectException(".user.@host.com"); 69 | expectException("user..name@host.com"); 70 | } 71 | 72 | private void expectException(String address) { 73 | try { 74 | EmailAddress.of(address); 75 | fail(String.format("Address %s should have thrown InvalidEmailAddressException", address)); 76 | } catch (IllegalArgumentException ex) { 77 | // OK 78 | } 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /test_results/junit-web-test/expectedRetries.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | (packages/test-runner-junit-reporter/test/fixtures/multiple/simple-test.js:15:29)]]> 22 | 23 | 24 | (packages/test-runner-junit-reporter/test/fixtures/multiple/simple-test.js:15:29)]]> 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /test_results/tests/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 4 | 4.0.0 5 | 6 | action.surefire.report 7 | tests 8 | pom 9 | 10 | 0.0.1-SNAPSHOT 11 | 12 | 13 | utils 14 | email 15 | 16 | 17 | 18 | UTF-8 19 | 1.8 20 | 1.8 21 | 1.8 22 | 23 | 24 | 25 | 26 | 27 | junit 28 | junit 29 | 4.13.1 30 | 31 | 32 | org.hamcrest 33 | hamcrest-all 34 | 1.3 35 | test 36 | 37 | 38 | org.jetbrains.kotlin 39 | kotlin-stdlib-jdk8 40 | ${kotlin.version} 41 | 42 | 43 | org.jetbrains.kotlin 44 | kotlin-test-junit 45 | ${kotlin.version} 46 | test 47 | 48 | 49 | commons-io 50 | commons-io 51 | 2.7 52 | 53 | 54 | org.assertj 55 | assertj-core 56 | 3.11.1 57 | test 58 | 59 | 60 | org.apache.commons 61 | commons-lang3 62 | 3.18 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | org.apache.maven.plugins 71 | maven-surefire-plugin 72 | 2.22.2 73 | 74 | -Duser.language=en 75 | true 76 | 77 | 78 | 79 | 80 | 81 | 82 | -------------------------------------------------------------------------------- /eslint.config.mjs: -------------------------------------------------------------------------------- 1 | import typescriptEslint from "@typescript-eslint/eslint-plugin"; 2 | import globals from "globals"; 3 | import tsParser from "@typescript-eslint/parser"; 4 | import path from "node:path"; 5 | import { fileURLToPath } from "node:url"; 6 | import js from "@eslint/js"; 7 | import { FlatCompat } from "@eslint/eslintrc"; 8 | import github from "eslint-plugin-github"; 9 | 10 | const __filename = fileURLToPath(import.meta.url); 11 | const __dirname = path.dirname(__filename); 12 | const compat = new FlatCompat({ 13 | baseDirectory: __dirname, 14 | recommendedConfig: js.configs.recommended, 15 | allConfig: js.configs.all 16 | }); 17 | 18 | export default [{ 19 | ignores: ["**/dist/", "**/lib/", "**/node_modules/"] 20 | }, { 21 | 22 | files: ["src/**.ts", "__tests__/**.ts"], 23 | 24 | plugins: { 25 | github, 26 | "@typescript-eslint": typescriptEslint 27 | }, 28 | 29 | languageOptions: { 30 | globals: { 31 | ...globals.node 32 | }, 33 | 34 | parser: tsParser, 35 | ecmaVersion: 9, 36 | sourceType: "module", 37 | 38 | parserOptions: { 39 | project: "./tsconfig.json" 40 | } 41 | }, 42 | 43 | rules: { 44 | "filenames/match-regex": "off", 45 | "eslint-comments/no-use": "off", 46 | "import/no-namespace": "off", 47 | "import/named": "off", 48 | "no-unused-vars": "off", 49 | "sort-imports": "off", 50 | "i18n-text/no-en": "off", 51 | "@typescript-eslint/no-unused-vars": "error", 52 | 53 | "@typescript-eslint/explicit-member-accessibility": ["error", { 54 | accessibility: "no-public" 55 | }], 56 | 57 | "@typescript-eslint/no-require-imports": "error", 58 | "@typescript-eslint/array-type": "error", 59 | "@typescript-eslint/await-thenable": "error", 60 | "@typescript-eslint/ban-ts-comment": "error", 61 | camelcase: "off", 62 | "@typescript-eslint/consistent-type-assertions": "error", 63 | 64 | "@typescript-eslint/explicit-function-return-type": ["error", { 65 | allowExpressions: true 66 | }], 67 | 68 | "@typescript-eslint/no-array-constructor": "error", 69 | "@typescript-eslint/no-empty-interface": "error", 70 | "@typescript-eslint/no-explicit-any": "error", 71 | "@typescript-eslint/no-extraneous-class": "error", 72 | "@typescript-eslint/no-for-in-array": "error", 73 | "@typescript-eslint/no-inferrable-types": "error", 74 | "@typescript-eslint/no-misused-new": "error", 75 | "@typescript-eslint/no-namespace": "error", 76 | "@typescript-eslint/no-non-null-assertion": "warn", 77 | "@typescript-eslint/no-unnecessary-qualifier": "error", 78 | "@typescript-eslint/no-unnecessary-type-assertion": "error", 79 | "@typescript-eslint/no-useless-constructor": "error", 80 | "@typescript-eslint/no-var-requires": "error", 81 | "@typescript-eslint/prefer-for-of": "warn", 82 | "@typescript-eslint/prefer-function-type": "warn", 83 | "@typescript-eslint/prefer-includes": "error", 84 | "@typescript-eslint/prefer-string-starts-ends-with": "error", 85 | "@typescript-eslint/promise-function-async": "error", 86 | "@typescript-eslint/require-array-sort-compare": "error", 87 | "@typescript-eslint/restrict-plus-operands": "error", 88 | semi: "off", 89 | "@typescript-eslint/unbound-method": "error" 90 | }, 91 | 92 | settings: { 93 | "import/resolver": { 94 | "typescript": { 95 | "alwaysTryTypes": true 96 | }, 97 | "node": { 98 | "extensions": [".js", ".jsx", ".ts", ".tsx"], 99 | "moduleDirectory": ["src", "node_modules"] 100 | } 101 | } 102 | } 103 | }]; -------------------------------------------------------------------------------- /test_results/tests/utils/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | tests 5 | action.surefire.report 6 | 0.0.1-SNAPSHOT 7 | 8 | 4.0.0 9 | utils 10 | 11 | 12 | 1.3.72 13 | 14 | 15 | 16 | 17 | org.apache.commons 18 | commons-lang3 19 | 20 | 21 | junit 22 | junit 23 | test 24 | 25 | 26 | org.hamcrest 27 | hamcrest-all 28 | test 29 | 30 | 31 | org.jetbrains.kotlin 32 | kotlin-stdlib-jdk8 33 | ${kotlin.version} 34 | 35 | 36 | org.jetbrains.kotlin 37 | kotlin-test 38 | ${kotlin.version} 39 | test 40 | 41 | 42 | org.jetbrains.kotlin 43 | kotlin-stdlib-jdk8 44 | ${kotlin.version} 45 | 46 | 47 | 48 | 49 | 50 | 51 | org.jetbrains.kotlin 52 | kotlin-maven-plugin 53 | ${kotlin.version} 54 | 55 | 56 | compile 57 | compile 58 | 59 | compile 60 | 61 | 62 | 63 | test-compile 64 | test-compile 65 | 66 | test-compile 67 | 68 | 69 | 70 | 71 | 1.8 72 | 73 | 74 | 75 | org.apache.maven.plugins 76 | maven-compiler-plugin 77 | 78 | 79 | compile 80 | compile 81 | 82 | compile 83 | 84 | 85 | 86 | testCompile 87 | test-compile 88 | 89 | testCompile 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | -------------------------------------------------------------------------------- /test_results/tests/email/target/surefire-reports/TEST-action.surefire.report.email.EmailAddressTest.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | action.surefire.report.email.InvalidEmailAddressException: Invalid email address 'user@ñandú.com.ar' 5 | at action.surefire.report.email.EmailAddressTest.expectException(EmailAddressTest.java:74) 6 | at action.surefire.report.email.EmailAddressTest.shouldNotContainInternationalizedHostNames(EmailAddressTest.java:39) 7 | 8 | 9 | 10 | action.surefire.report.email.InvalidEmailAddressException: Invalid email address 'Abc\@def@example.com' 11 | at action.surefire.report.email.EmailAddressTest.expectException(EmailAddressTest.java:74) 12 | at action.surefire.report.email.EmailAddressTest.shouldBeStricterThanRfc2821(EmailAddressTest.java:49) 13 | 😋 14 | 15 | 16 | 17 | java.lang.AssertionError: Address aba@bab.com should have thrown InvalidEmailAddressException 18 | at action.surefire.report.email.EmailAddressTest.expectException(EmailAddressTest.java:75) 19 | at action.surefire.report.email.EmailAddressTest.shouldBeStricterThanRfc2822(EmailAddressTest.java:57) 20 | 21 | 22 | 23 | 24 | action.surefire.report.email.InvalidEmailAddressException: Email address must not be null, empty, or blanks 25 | at action.surefire.report.email.EmailAddressTest.expectException(EmailAddressTest.java:74) 26 | at action.surefire.report.email.EmailAddressTest.shouldNotBeBlank(EmailAddressTest.java:18) 27 | 28 | 29 | 30 | action.surefire.report.email.InvalidEmailAddressException: Invalid email address 'user@host' 31 | at action.surefire.report.email.EmailAddressTest.expectException(EmailAddressTest.java:74) 32 | at action.surefire.report.email.EmailAddressTest.shouldNotContainLocalHosts(EmailAddressTest.java:32) 33 | 34 | 35 | 36 | java.lang.AssertionError: Address user-without-host@test.com should have thrown InvalidEmailAddressException 37 | at action.surefire.report.email.EmailAddressTest.expectException(EmailAddressTest.java:75) 38 | at action.surefire.report.email.EmailAddressTest.shouldNotMissComponents(EmailAddressTest.java:25) 39 | 40 | 41 | 42 | action.surefire.report.email.InvalidEmailAddressException: Invalid email address '.user@host.com' 43 | at action.surefire.report.email.EmailAddressTest.expectException(EmailAddressTest.java:74) 44 | at action.surefire.report.email.EmailAddressTest.shouldNotAllowDotsInWeirdPlaces(EmailAddressTest.java:66) 45 | 46 | 47 | 48 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: 'CI' 2 | on: 3 | push: 4 | tags: 5 | - '*' 6 | pull_request: 7 | workflow_dispatch: 8 | 9 | jobs: 10 | build: 11 | if: github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@v6 16 | - uses: actions/setup-node@v6 17 | with: 18 | node-version: '24' 19 | - name: Install NPM 20 | run: | 21 | npm install 22 | - name: Run NPM 23 | run: | 24 | npm run all 25 | env: 26 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 27 | 28 | test: 29 | if: github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' 30 | runs-on: ubuntu-latest 31 | steps: 32 | - name: Checkout 33 | uses: actions/checkout@v6 34 | 35 | - uses: actions/setup-node@v6 36 | with: 37 | node-version: '24' 38 | 39 | - name: Test JUnit test import 40 | uses: ./ 41 | with: 42 | check_name: Example JUnit Test Report 43 | report_paths: '**/surefire-reports/TEST-*.xml' 44 | include_passed: true 45 | include_skipped: false 46 | detailed_summary: true 47 | summary: '
Application (src/applications)
test
' 48 | job_summary_text: | 49 | # Test Header for Example Junit Test Report 50 | 51 | This is some custom body to show in the summary. 52 | check_title_template: '{{SUITE_NAME}} | {{TEST_NAME}}' 53 | annotate_only: ${{ github.event_name == 'workflow_dispatch' }} 54 | 55 | - name: Test PyTest test import 56 | uses: ./ 57 | with: 58 | check_name: Example Pytest Report 59 | report_paths: test_results/python/report_flaky.xml 60 | include_passed: true 61 | detailed_summary: true 62 | comment: true 63 | annotate_only: ${{ github.event_name == 'workflow_dispatch' }} 64 | 65 | - name: Test Ungrouped test import 66 | uses: ./ 67 | with: 68 | check_name: Example Ungrouped Test Report 69 | report_paths: test_results/multiple/*.xml 70 | verbose_summary: false 71 | include_passed: true 72 | detailed_summary: false 73 | group_reports: false 74 | comment: true 75 | 76 | - name: Test JUnit flaky test import 77 | uses: ./ 78 | with: 79 | check_name: Example JUnit Flaky Report 80 | report_paths: test_results/junit-web-test/expectedRetries.xml 81 | include_passed: true 82 | check_retries: true 83 | job_summary: true 84 | detailed_summary: true 85 | flaky_summary: true 86 | annotate_only: ${{ github.event_name == 'workflow_dispatch' }} 87 | 88 | - name: Test Multi test import 89 | uses: ./ 90 | with: 91 | check_name: |- 92 | Example Multi JUnit Test Report 93 | Example Multi Pytest Report 94 | report_paths: |- 95 | **/surefire-reports/TEST-*.xml 96 | test_results/python/report.xml 97 | summary: |- 98 |
Application (src/applications)
multi test
99 | Custom Summary 100 | check_title_template: |- 101 | {{SUITE_NAME}} | {{TEST_NAME}} 102 | \n 103 | transformers: | 104 | [{"searchValue":"::","replaceValue":"/"}] 105 | annotate_only: ${{ github.event_name == 'workflow_dispatch' }} 106 | 107 | - name: Test Nested JUnit test import 108 | uses: ./ 109 | with: 110 | check_name: Example Nested JUnit Test Report 111 | report_paths: 'test_results/nested/multi-level.xml' 112 | include_passed: true 113 | detailed_summary: true 114 | group_suite: true 115 | comment: true 116 | skip_annotations: true 117 | check_title_template: '{{TEST_NAME}}' 118 | annotate_only: ${{ github.event_name == 'workflow_dispatch' }} 119 | 120 | release: 121 | if: startsWith(github.ref, 'refs/tags/') 122 | runs-on: ubuntu-latest 123 | steps: 124 | - name: Checkout 125 | uses: actions/checkout@v6 126 | 127 | - name: "Build Changelog" 128 | id: github_release 129 | uses: mikepenz/release-changelog-builder-action@v6 130 | with: 131 | configuration: ".github/config/configuration.json" 132 | ignorePreReleases: true 133 | env: 134 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 135 | 136 | - name: Create Release 137 | uses: mikepenz/action-gh-release@v1 138 | with: 139 | body: ${{steps.github_release.outputs.changelog}} 140 | prerelease: ${{ contains(github.ref, '-rc') || contains(github.ref, '-b') || contains(github.ref, '-a') }} 141 | env: 142 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 143 | -------------------------------------------------------------------------------- /src/utils.ts: -------------------------------------------------------------------------------- 1 | import * as core from '@actions/core' 2 | import {Transformer} from './testParser.js' 3 | import {SummaryTableRow} from '@actions/core/lib/summary.js' 4 | 5 | export function retrieve(name: string, items: string[], index: number, total: number): string { 6 | if (total > 1) { 7 | if (items.length !== 0 && items.length !== total) { 8 | core.warning(`${name} has a different number of items than the 'reportPaths' input. This is usually a bug.`) 9 | } 10 | 11 | if (items.length === 0) { 12 | return '' 13 | } else if (items.length === 1) { 14 | return items[0].replace('\n', '') 15 | } else if (items.length > index) { 16 | return items[index].replace('\n', '') 17 | } else { 18 | core.error(`${name} has no valid config for position ${index}.`) 19 | return '' 20 | } 21 | } else if (items.length === 1) { 22 | return items[0].replace('\n', '') 23 | } else { 24 | return '' 25 | } 26 | } 27 | 28 | /** 29 | * Reads in the configuration from the JSON file 30 | */ 31 | export function readTransformers(raw: string | undefined): Transformer[] { 32 | if (!raw) { 33 | return [] 34 | } 35 | try { 36 | const transformers: Transformer[] = JSON.parse(raw) 37 | for (const transformer of transformers) { 38 | try { 39 | transformer.regex = new RegExp(transformer.searchValue.replace('\\\\', '\\'), 'gu') 40 | } catch (error: unknown) { 41 | core.warning(`⚠️ Bad replacer regex: ${transformer.searchValue} (${error})`) 42 | } 43 | } 44 | return transformers 45 | } catch (error: unknown) { 46 | core.info(`⚠️ Transformers provided, but they couldn't be parsed. Fallback to Defaults. (${error})`) 47 | core.debug(` Provided input: ${raw}`) 48 | return [] 49 | } 50 | } 51 | 52 | export function applyTransformer(transformer: Transformer, string: string): string { 53 | const regExp = transformer.regex 54 | if (regExp) { 55 | return string.replace(regExp, transformer.replaceValue) 56 | } else { 57 | return string.replace(transformer.searchValue, transformer.replaceValue) 58 | } 59 | } 60 | 61 | /** 62 | * Function extracted from: https://github.com/actions/toolkit/blob/main/packages/core/src/summary.ts#L229 63 | */ 64 | export function buildLink(text: string, href: string): string { 65 | return wrap('a', text, {href}) 66 | } 67 | 68 | /** 69 | * Function extracted from: https://github.com/actions/toolkit/blob/main/packages/core/src/summary.ts#L229 70 | */ 71 | export function buildList(items: string[], ordered = false): string { 72 | const tag = ordered ? 'ol' : 'ul' 73 | const listItems = items.map(item => wrap('li', item)).join('') 74 | const element = wrap(tag, listItems) 75 | return element 76 | } 77 | 78 | /** 79 | * Function extracted from: https://github.com/actions/toolkit/blob/main/packages/core/src/summary.ts#L229 80 | */ 81 | export function buildTable(rows: SummaryTableRow[]): string { 82 | const tableBody = rows 83 | .map(row => { 84 | const cells = row 85 | // eslint-disable-next-line @typescript-eslint/no-explicit-any 86 | .map((cell: any) => { 87 | if (typeof cell === 'string') { 88 | return wrap('td', cell) 89 | } 90 | 91 | const {header, data, colspan, rowspan} = cell 92 | const tag = header ? 'th' : 'td' 93 | const attrs = { 94 | ...(colspan && {colspan}), 95 | ...(rowspan && {rowspan}) 96 | } 97 | 98 | return wrap(tag, data, attrs) 99 | }) 100 | .join('') 101 | 102 | return wrap('tr', cells) 103 | }) 104 | .join('') 105 | 106 | return wrap('table', tableBody) 107 | } 108 | 109 | /** 110 | * Wraps content in an HTML tag, adding any HTML attributes 111 | * 112 | * @param {string} tag HTML tag to wrap 113 | * @param {string | null} content content within the tag 114 | * @param {[attribute: string]: string} attrs key-value list of HTML attributes to add 115 | * 116 | * @returns {string} content wrapped in HTML element 117 | */ 118 | function wrap(tag: string, content: string | null, attrs: {[attribute: string]: string} = {}): string { 119 | const htmlAttrs = Object.entries(attrs) 120 | .map(([key, value]) => ` ${key}="${value}"`) 121 | .join('') 122 | 123 | if (!content) { 124 | return `<${tag}${htmlAttrs}>` 125 | } 126 | 127 | return `<${tag}${htmlAttrs}>${content}` 128 | } 129 | 130 | /** 131 | * Removes a specified prefix from the beginning of a string. 132 | * 133 | * @param {string} str - The original string. 134 | * @param {string} prefix - The prefix to be removed. 135 | * @returns {string} - The string without the prefix if it was present, otherwise the original string. 136 | */ 137 | export function removePrefix(str: string, prefix: string): string { 138 | if (prefix.length === 0) return str 139 | if (str.startsWith(prefix)) { 140 | return str.slice(prefix.length) 141 | } 142 | return str 143 | } 144 | 145 | /** 146 | * Formats a time in seconds into a human-readable string representation. 147 | * If the input is 0, returns an empty string. 148 | * Otherwise, converts seconds to days, hours, minutes, seconds, and milliseconds, 149 | * and includes only non-zero units in the output. 150 | * 151 | * @param {number} timeS - The time in seconds to format. 152 | * @returns {string} A formatted string representation of the time (e.g., "1h 30m 45s"). 153 | */ 154 | export function toFormatedTime(timeS: number): string { 155 | if (timeS === 0) return '' 156 | let ms = timeS * 1000 157 | 158 | if (ms < 0) ms = -ms 159 | const time = { 160 | day: Math.floor(ms / 86400000), 161 | h: Math.floor(ms / 3600000) % 24, 162 | m: Math.floor(ms / 60000) % 60, 163 | s: Math.floor(ms / 1000) % 60, 164 | ms: Math.floor(ms) % 1000 165 | } 166 | return Object.entries(time) 167 | .filter(val => val[1] !== 0) 168 | .map(([key, val]) => `${val}${key}${val > 0 && key === 'day' ? 's' : ''}`) 169 | .join(' ') 170 | } 171 | -------------------------------------------------------------------------------- /__tests__/table.test.ts: -------------------------------------------------------------------------------- 1 | import {parseTestReports} from '../src/testParser.js' 2 | import {buildSummaryTables} from '../src/table.js' 3 | import {describe, expect, it} from 'vitest' 4 | 5 | /** 6 | * Copyright Mike Penz 7 | */ 8 | 9 | const NORMAL_TABLE = [ 10 | [ 11 | { 12 | data: '', 13 | header: true 14 | }, 15 | { 16 | data: 'Tests', 17 | header: true 18 | }, 19 | { 20 | data: 'Passed ✅', 21 | header: true 22 | }, 23 | { 24 | data: 'Skipped', 25 | header: true 26 | }, 27 | { 28 | data: 'Failed', 29 | header: true 30 | }, 31 | { 32 | data: 'Time ⏱', 33 | header: true 34 | } 35 | ], 36 | ['checkName', '3 ran', '3 passed', '0 skipped', '0 failed', '100ms'] 37 | ] 38 | const FLAKY_TABLE = [ 39 | [ 40 | { 41 | data: 'Test', 42 | header: true 43 | }, 44 | { 45 | data: 'Retries', 46 | header: true 47 | }, 48 | { 49 | data: 'Time ⏱', 50 | header: true 51 | } 52 | ] 53 | ] 54 | 55 | describe('buildSummaryTables', () => { 56 | it('should build simple tables', async () => { 57 | const testResult = await parseTestReports( 58 | 'checkName', 59 | 'summary', 60 | 'test_results/nested/multi-level.xml', 61 | '*', 62 | true, 63 | true, 64 | true, 65 | [], 66 | '{{SUITE_NAME}}/{{TEST_NAME}}', 67 | '/' 68 | ) 69 | 70 | const [table, detailTable, flakyTable] = buildSummaryTables([testResult], true, true, true, true, true, false) 71 | 72 | expect(table).toStrictEqual(NORMAL_TABLE) 73 | expect(detailTable).toStrictEqual([ 74 | [ 75 | { 76 | data: 'Test', 77 | header: true 78 | }, 79 | { 80 | data: 'Result', 81 | header: true 82 | }, 83 | { 84 | data: 'Time ⏱', 85 | header: true 86 | } 87 | ], 88 | [ 89 | { 90 | data: 'checkName', 91 | colspan: '3' 92 | } 93 | ], 94 | ['ABC-0199: XMPP Ping/PingIntegrationTest.pingAsync (Normal)', '✅ passed', '54ms'], 95 | ['ABC-0199: XMPP Ping/PingIntegrationTest.pingServer (Normal)', '✅ passed', ''], 96 | [ 97 | 'ABC-0045: Multi-User Chat/MultiUserIntegrationTest.mucRoleTestForReceivingModerator (Normal)', 98 | '✅ passed', 99 | '46ms' 100 | ] 101 | ]) 102 | expect(flakyTable).toStrictEqual(FLAKY_TABLE) 103 | }) 104 | 105 | it('should skip only successful tables', async () => { 106 | const testResult = await parseTestReports( 107 | 'checkName', 108 | 'summary', 109 | 'test_results/nested/multi-level.xml', 110 | '*', 111 | true, 112 | true, 113 | true, 114 | [], 115 | '{{SUITE_NAME}}/{{TEST_NAME}}', 116 | '/' 117 | ) 118 | 119 | const [table, detailTable, flakyTable] = buildSummaryTables([testResult], true, true, true, true, true, true) 120 | expect(table).toStrictEqual([]) 121 | expect(detailTable).toStrictEqual([]) 122 | expect(flakyTable).toStrictEqual([]) 123 | }) 124 | 125 | it('should exclude skipped tests when includeSkipped is false', async () => { 126 | const testResult = await parseTestReports( 127 | 'checkName', 128 | 'summary', 129 | 'test_results/tests/utils/target/surefire-reports/TEST-action.surefire.report.calc.StringUtilsTest.xml', // This file has skipped tests 130 | '*', 131 | true, 132 | true, 133 | true, 134 | [], 135 | '{{SUITE_NAME}}/{{TEST_NAME}}', 136 | '/' 137 | ) 138 | 139 | // Test with includeSkipped = false (should exclude skipped tests from detailed table) 140 | const [, detailTable] = buildSummaryTables([testResult], true, false, true, false, false, false) 141 | 142 | // Check that the detail table doesn't include skipped tests 143 | const flatResults = detailTable.flat() 144 | const hasSkippedTests = flatResults.some(cell => typeof cell === 'string' && cell.includes('⚠️ skipped')) 145 | expect(hasSkippedTests).toBe(false) 146 | 147 | // Test with includeSkipped = true (should include skipped tests in detailed table) 148 | const [, detailTableWithSkipped] = buildSummaryTables([testResult], true, true, true, false, false, false) 149 | 150 | // Check that the detail table includes skipped tests 151 | const flatResultsWithSkipped = detailTableWithSkipped.flat() 152 | const hasSkippedTestsIncluded = flatResultsWithSkipped.some( 153 | cell => typeof cell === 'string' && cell.includes('⚠️ skipped') 154 | ) 155 | expect(hasSkippedTestsIncluded).toBe(true) 156 | }) 157 | 158 | it('should group detail tables', async () => { 159 | const testResult = await parseTestReports( 160 | 'checkName', 161 | 'summary', 162 | 'test_results/nested/multi-level.xml', 163 | '*', 164 | true, 165 | true, 166 | true, 167 | [], 168 | '{{SUITE_NAME}}/{{TEST_NAME}}', 169 | '/' 170 | ) 171 | 172 | const [table, detailTable, flakyTable] = buildSummaryTables([testResult], true, true, true, true, true, false, true) 173 | 174 | expect(table).toStrictEqual(NORMAL_TABLE) 175 | expect(detailTable).toStrictEqual([ 176 | [ 177 | { 178 | data: 'Test', 179 | header: true 180 | }, 181 | { 182 | data: 'Result', 183 | header: true 184 | }, 185 | { 186 | data: 'Time ⏱', 187 | header: true 188 | } 189 | ], 190 | [ 191 | { 192 | data: 'checkName', 193 | colspan: '3' 194 | } 195 | ], 196 | [ 197 | { 198 | data: 'ABC-0199: XMPP Ping', 199 | colspan: '3' 200 | } 201 | ], 202 | ['ABC-0199: XMPP Ping/PingIntegrationTest.pingAsync (Normal)', '✅ passed', '54ms'], 203 | ['ABC-0199: XMPP Ping/PingIntegrationTest.pingServer (Normal)', '✅ passed', ''], 204 | [ 205 | { 206 | data: 'ABC-0045: Multi-User Chat', 207 | colspan: '3' 208 | } 209 | ], 210 | [ 211 | 'ABC-0045: Multi-User Chat/MultiUserIntegrationTest.mucRoleTestForReceivingModerator (Normal)', 212 | '✅ passed', 213 | '46ms' 214 | ] 215 | ]) 216 | expect(flakyTable).toStrictEqual(FLAKY_TABLE) 217 | }) 218 | }) 219 | -------------------------------------------------------------------------------- /test_results/tests/utils/target/surefire-reports/TEST-action.surefire.report.calc.StringUtilsTest.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | java.lang.AssertionError: 6 | 7 | Expected: (an instance of java.lang.IllegalArgumentException and exception with message a string containing "This is unexpected") 8 | but: exception with message a string containing "This is unexpected" message was "Input='' didn't match condition." 9 | Stacktrace was: java.lang.IllegalArgumentException: Input='' didn't match condition. 10 | at action.surefire.report.calc.StringUtils.requireNotBlank(StringUtils.java:25) 11 | at action.surefire.report.calc.StringUtils.requireNotBlank(StringUtils.java:18) 12 | at action.surefire.report.calc.StringUtilsTest.require_fail(StringUtilsTest.java:27) 13 | at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) 14 | at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) 15 | at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) 16 | at java.lang.reflect.Method.invoke(Method.java:498) 17 | at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) 18 | at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) 19 | at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) 20 | at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) 21 | at org.junit.rules.ExpectedException$ExpectedExceptionStatement.evaluate(ExpectedException.java:258) 22 | at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) 23 | at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) 24 | at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) 25 | at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) 26 | at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) 27 | at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) 28 | at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) 29 | at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) 30 | at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) 31 | at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) 32 | at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) 33 | at org.junit.runners.ParentRunner.run(ParentRunner.java:413) 34 | at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:365) 35 | at org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:273) 36 | at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:238) 37 | at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:159) 38 | at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:384) 39 | at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:345) 40 | at org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:126) 41 | at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:418) 42 | 43 | 44 | 45 | 46 | 47 | java.lang.AssertionError 48 | at action.surefire.report.calc.StringUtilsTest.require(StringUtilsTest.java:20) 49 | 50 | 51 | 52 | 53 | 54 | -------------------------------------------------------------------------------- /action.yml: -------------------------------------------------------------------------------- 1 | name: 'JUnit Report Action' 2 | description: 'Report JUnit test results as annotations on Github Pull Request [junit]' 3 | branding: 4 | icon: 'check-circle' 5 | color: 'green' 6 | inputs: 7 | token: 8 | description: 'Specify the token to use to publish the check.' 9 | required: false 10 | default: ${{ github.token }} 11 | github_token: 12 | description: 'Deprecated syntax to specify github token.' 13 | required: false 14 | report_paths: 15 | description: 'Xml report paths in glob format' 16 | required: false 17 | default: '**/junit-reports/TEST-*.xml' 18 | group_reports: 19 | description: 'Defines if reports are grouped into a combined test result. This ' 20 | required: false 21 | default: 'true' 22 | test_files_prefix: 23 | description: 'Prefix to add to test file paths from report files when annotating' 24 | required: false 25 | default: '' 26 | exclude_sources: 27 | description: 'Comma seperated list of source folders to ignore for lookup' 28 | required: false 29 | annotate_only: 30 | description: 'Enable to only annotate the results on the files, will not create a check run.' 31 | required: false 32 | default: 'false' 33 | check_annotations: 34 | description: 'Defines if the checks will include annotations (This is different than `annotate_only`).' 35 | required: false 36 | default: 'true' 37 | update_check: 38 | description: 'Defines if the active check should be updated instead' 39 | required: false 40 | default: 'false' 41 | check_name: 42 | description: 'Check name for test reports.' 43 | required: false 44 | default: 'JUnit Test Report' 45 | commit: 46 | description: 'Commit SHA to update the check status.' 47 | required: false 48 | fail_on_failure: 49 | description: 'Fail the build in case a test failure occurred.' 50 | required: false 51 | default: 'false' 52 | fail_on_parse_error: 53 | description: 'Fail the build if the test report file can not be parsed.' 54 | required: false 55 | default: 'false' 56 | require_tests: 57 | description: 'Fail if no test are found.' 58 | required: false 59 | default: 'false' 60 | require_passed_tests: 61 | description: 'Fail if no passed test are found.' 62 | required: false 63 | default: 'false' 64 | include_passed: 65 | description: 'Include passed tests in the report' 66 | required: false 67 | default: 'false' 68 | include_skipped: 69 | description: 'Include skipped tests in the report' 70 | required: false 71 | default: 'true' 72 | check_title_template: 73 | description: |- 74 | Template to configure the title format. Placeholders: {{FILE_NAME}}, {{SUITE_NAME}}, {{TEST_NAME}}, {{CLASS_NAME}}, {{BREAD_CRUMB}}. 75 | required: false 76 | bread_crumb_delimiter: 77 | description: |- 78 | Defines the delimiter characters between the breadcrumb elements. Defaults to: `/`. 79 | required: false 80 | default: '/' 81 | summary: 82 | description: 'Additional text to summary output' 83 | required: false 84 | default: '' 85 | check_retries: 86 | description: 'If a testcase is retried, ignore the original failure.' 87 | required: false 88 | default: 'false' 89 | transformers: 90 | description: 'Provide a regex pattern and target pattern' 91 | required: false 92 | default: '[]' 93 | job_summary: 94 | description: 'Enables the publishing of a JOB_SUMMARY with the report.' 95 | required: false 96 | default: 'true' 97 | job_summary_text: 98 | description: 'Additional text to include in the job summary prior to the tables' 99 | required: false 100 | default: '' 101 | detailed_summary: 102 | description: 'Include table with all test results in summary' 103 | required: false 104 | default: 'false' 105 | flaky_summary: 106 | description: 'Include table with all flaky results in summary' 107 | required: false 108 | default: 'false' 109 | verbose_summary: 110 | description: 'Include note of missing test annotations in summary.' 111 | required: false 112 | default: 'true' 113 | skip_success_summary: 114 | description: 'Skips summaries that would not contain failed tests' 115 | required: false 116 | default: 'false' 117 | include_empty_in_summary: 118 | description: 'Include entries in summaries that have 0 count' 119 | required: false 120 | default: 'true' 121 | include_time_in_summary: 122 | description: 'Include time in summaries' 123 | required: false 124 | default: 'false' 125 | simplified_summary: 126 | description: 'Use icons instead of text to indicate status in summary' 127 | required: false 128 | default: 'false' 129 | group_suite: 130 | description: 'If enabled, will group the testcases by test suite in the `detailed_summary`' 131 | required: false 132 | default: 'false' 133 | comment: 134 | description: 'Enables a comment being added to the PR with the summary tables (summary has to be enabled). Default: false' 135 | required: false 136 | default: 'false' 137 | updateComment: 138 | description: 'Enables updating the prior comment if one already exists. Default: true' 139 | required: false 140 | default: 'true' 141 | annotate_notice: 142 | description: 'Annotate passed tests along with warning and failed ones' 143 | required: false 144 | default: 'false' 145 | follow_symlink: 146 | description: 'Enables the file globber to follow symlinks. Default: false' 147 | required: false 148 | default: 'false' 149 | job_name: 150 | description: 'Specify the name of a check to update' 151 | required: false 152 | default: ${{ github.job }} 153 | annotations_limit: 154 | description: 'Specify the limit for annotations. This will also interrupt parsing all test-suites if the limit is reached.' 155 | required: false 156 | skip_annotations: 157 | description: 'Setting this flag will result in no annotations being added to the run.' 158 | required: false 159 | truncate_stack_traces: 160 | description: 'Truncate stack traces from test output to 2 lines in annotations' 161 | required: false 162 | default: 'true' 163 | resolve_ignore_classname: 164 | description: 'Force ignore test case classname from the xml report (This can help fix issues with some tools/languages)' 165 | required: false 166 | default: 'false' 167 | skip_comment_without_tests: 168 | description: 'Disable commenting if no tests are detected' 169 | required: false 170 | default: 'false' 171 | pr_id: 172 | description: 'PR number to comment on (useful for workflow_run contexts)' 173 | required: false 174 | outputs: 175 | total: 176 | description: 'The total count of all checks' 177 | passed: 178 | description: 'The count of all passed tests' 179 | skipped: 180 | description: 'The count of all skipped tests' 181 | failed: 182 | description: 'The count of all failed tests' 183 | time: 184 | description: 'The total time taken for all the tests' 185 | summary: 186 | description: 'The short summary of the junit report. In html format (as also constructed by GitHub for the summary).' 187 | detailed_summary: 188 | description: 'The full table with all test results in a summary. In html format (as also constructed by GitHub for the summary).' 189 | flaky_summary: 190 | description: 'The full table with all flaky results in a summary. In html format (as also constructed by GitHub for the summary).' 191 | report_url: 192 | description: 'The URL(s) to the test report(s). If multiple reports are created, they are separated by newlines.' 193 | runs: 194 | using: 'node24' 195 | main: 'dist/index.js' 196 | -------------------------------------------------------------------------------- /src/table.ts: -------------------------------------------------------------------------------- 1 | import * as core from '@actions/core' 2 | import {SummaryTableRow} from '@actions/core/lib/summary.js' 3 | import {ActualTestResult, TestResult} from './testParser.js' 4 | import {toFormatedTime} from './utils.js' 5 | 6 | export function buildSummaryTables( 7 | testResults: TestResult[], 8 | includePassed: boolean, 9 | includeSkipped: boolean, 10 | detailedSummary: boolean, 11 | flakySummary: boolean, 12 | verboseSummary: boolean, 13 | skipSuccessSummary: boolean, 14 | groupSuite = false, 15 | includeEmptyInSummary = true, 16 | includeTimeInSummary = true, 17 | simplifiedSummary = false 18 | ): [SummaryTableRow[], SummaryTableRow[], SummaryTableRow[]] { 19 | // only include a warning icon if there are skipped tests 20 | const hasPassed = testResults.some(testResult => testResult.passed > 0) 21 | const hasSkipped = testResults.some(testResult => testResult.skipped > 0) 22 | const hasFailed = testResults.some(testResult => testResult.failed > 0) 23 | const hasTests = testResults.some(testResult => testResult.totalCount > 0) 24 | 25 | if (skipSuccessSummary && !hasFailed) { 26 | // if we have skip success summary enabled, and we don't have any test failures, return empty tables 27 | return [[], [], []] 28 | } 29 | 30 | const passedHeader = hasTests ? (hasPassed ? (hasFailed ? 'Passed ☑️' : 'Passed ✅') : 'Passed') : 'Passed ❌️' 31 | const skippedHeader = hasSkipped ? 'Skipped ⚠️' : 'Skipped' 32 | const failedHeader = hasFailed ? 'Failed ❌️' : 'Failed' 33 | const timeHeader = 'Time ⏱' 34 | 35 | const passedIcon = simplifiedSummary ? '✅' : 'passed' 36 | const skippedIcon = simplifiedSummary ? '⚠️' : 'skipped' 37 | const failedIcon = simplifiedSummary ? '❌' : 'failed' 38 | const passedDetailIcon = simplifiedSummary ? '✅' : '✅ passed' 39 | const skippedDetailIcon = simplifiedSummary ? '⚠️' : '⚠️ skipped' 40 | 41 | const table: SummaryTableRow[] = [ 42 | [ 43 | {data: '', header: true}, 44 | {data: 'Tests', header: true}, 45 | {data: passedHeader, header: true}, 46 | {data: skippedHeader, header: true}, 47 | {data: failedHeader, header: true} 48 | ] 49 | ] 50 | if (includeTimeInSummary) { 51 | table[0].push({data: timeHeader, header: true}) 52 | } 53 | 54 | const detailsTable: SummaryTableRow[] = !detailedSummary 55 | ? [] 56 | : [ 57 | [ 58 | {data: 'Test', header: true}, 59 | {data: 'Result', header: true} 60 | ] 61 | ] 62 | 63 | if (detailedSummary && includeTimeInSummary) { 64 | detailsTable[0].push({data: timeHeader, header: true}) 65 | } 66 | 67 | const flakyTable: SummaryTableRow[] = !flakySummary 68 | ? [] 69 | : [ 70 | [ 71 | {data: 'Test', header: true}, 72 | {data: 'Retries', header: true} 73 | ] 74 | ] 75 | 76 | if (flakySummary && includeTimeInSummary) { 77 | flakyTable[0].push({data: timeHeader, header: true}) 78 | } 79 | 80 | const colspan = includeTimeInSummary ? '3' : '2' 81 | for (const testResult of testResults) { 82 | const row = [ 83 | `${testResult.checkName}`, 84 | includeEmptyInSummary || testResult.totalCount > 0 ? `${testResult.totalCount} ran` : ``, 85 | includeEmptyInSummary || testResult.passed > 0 ? `${testResult.passed} ${passedIcon}` : ``, 86 | includeEmptyInSummary || testResult.skipped > 0 ? `${testResult.skipped} ${skippedIcon}` : ``, 87 | includeEmptyInSummary || testResult.failed > 0 ? `${testResult.failed} ${failedIcon}` : `` 88 | ] 89 | if (includeTimeInSummary) { 90 | row.push(toFormatedTime(testResult.time)) 91 | } 92 | table.push(row) 93 | 94 | const annotations = testResult.globalAnnotations.filter( 95 | annotation => 96 | (includePassed || annotation.annotation_level !== 'notice') && 97 | (includeSkipped || annotation.status !== 'skipped') 98 | ) 99 | 100 | if (annotations.length === 0) { 101 | if (!includePassed) { 102 | core.info( 103 | `⚠️ No annotations found for ${testResult.checkName}. If you want to include passed results in this table please configure 'include_passed' as 'true'` 104 | ) 105 | } 106 | if (verboseSummary) { 107 | detailsTable.push([{data: `No test annotations available`, colspan}]) 108 | } 109 | } else { 110 | if (detailedSummary) { 111 | detailsTable.push([{data: `${testResult.checkName}`, colspan}]) 112 | if (!groupSuite) { 113 | for (const annotation of annotations) { 114 | const detailsRow = [ 115 | `${annotation.title}`, 116 | `${ 117 | annotation.status === 'success' 118 | ? passedDetailIcon 119 | : annotation.status === 'skipped' 120 | ? skippedDetailIcon 121 | : `❌ ${annotation.annotation_level}` 122 | }` 123 | ] 124 | if (includeTimeInSummary) { 125 | detailsRow.push(toFormatedTime(annotation.time)) 126 | } 127 | detailsTable.push(detailsRow) 128 | } 129 | } else { 130 | for (const internalTestResult of testResult.testResults) { 131 | appendDetailsTable( 132 | internalTestResult, 133 | detailsTable, 134 | includePassed, 135 | includeSkipped, 136 | includeTimeInSummary, 137 | passedDetailIcon, 138 | skippedDetailIcon 139 | ) 140 | } 141 | } 142 | } 143 | 144 | if (flakySummary) { 145 | const flakyAnnotations = annotations.filter(annotation => annotation.retries > 0) 146 | if (flakyAnnotations.length > 0) { 147 | flakyTable.push([{data: `${testResult.checkName}`, colspan}]) 148 | for (const annotation of flakyAnnotations) { 149 | const flakyRow = [`${annotation.title}`, `${annotation.retries}`] 150 | if (includeTimeInSummary) { 151 | flakyRow.push(toFormatedTime(annotation.time)) 152 | } 153 | flakyTable.push(flakyRow) 154 | } 155 | } 156 | } 157 | } 158 | } 159 | return [table, detailsTable, flakyTable] 160 | } 161 | 162 | function appendDetailsTable( 163 | testResult: ActualTestResult, 164 | detailsTable: SummaryTableRow[], 165 | includePassed: boolean, 166 | includeSkipped: boolean, 167 | includeTimeInSummary: boolean, 168 | passedDetailIcon: string, 169 | skippedDetailIcon: string 170 | ): void { 171 | const colspan = includeTimeInSummary ? '3' : '2' 172 | const annotations = testResult.annotations.filter( 173 | annotation => 174 | (includePassed || annotation.annotation_level !== 'notice') && (includeSkipped || annotation.status !== 'skipped') 175 | ) 176 | if (annotations.length > 0) { 177 | detailsTable.push([{data: `${testResult.name}`, colspan}]) 178 | for (const annotation of annotations) { 179 | const row = [ 180 | `${annotation.title}`, 181 | `${ 182 | annotation.status === 'success' 183 | ? passedDetailIcon 184 | : annotation.status === 'skipped' 185 | ? skippedDetailIcon 186 | : `❌ ${annotation.annotation_level}` 187 | }` 188 | ] 189 | if (includeTimeInSummary) { 190 | row.push(toFormatedTime(annotation.time)) 191 | } 192 | detailsTable.push(row) 193 | } 194 | } 195 | for (const childTestResult of testResult.testResults) { 196 | appendDetailsTable( 197 | childTestResult, 198 | detailsTable, 199 | includePassed, 200 | includeSkipped, 201 | includeTimeInSummary, 202 | passedDetailIcon, 203 | skippedDetailIcon 204 | ) 205 | } 206 | } 207 | -------------------------------------------------------------------------------- /__tests__/annotator.test.ts: -------------------------------------------------------------------------------- 1 | import {vi, describe, it, expect, beforeEach, afterEach} from 'vitest' 2 | import {attachComment, buildCommentIdentifier} from '../src/annotator.js' 3 | import * as core from '@actions/core' 4 | 5 | /** 6 | * Copyright 2024 Mike Penz 7 | */ 8 | 9 | // Mock the context object with a mutable reference 10 | const mockContextData = vi.hoisted(() => ({ 11 | issue: {number: undefined as number | undefined}, 12 | repo: {owner: 'test-owner', repo: 'test-repo'} 13 | })) 14 | 15 | vi.mock('@actions/github/lib/utils.js', () => ({ 16 | context: mockContextData 17 | })) 18 | 19 | describe('attachComment', () => { 20 | let mockOctokit: any 21 | let mockWarning: any 22 | 23 | beforeEach(() => { 24 | // Reset mock context 25 | mockContextData.issue.number = undefined 26 | mockContextData.repo.owner = 'test-owner' 27 | mockContextData.repo.repo = 'test-repo' 28 | 29 | // Mock core.warning 30 | mockWarning = vi.spyOn(core, 'warning').mockImplementation(() => {}) 31 | 32 | // Mock octokit 33 | mockOctokit = { 34 | paginate: vi.fn(), 35 | rest: { 36 | issues: { 37 | listComments: vi.fn(), 38 | createComment: vi.fn(), 39 | updateComment: vi.fn() 40 | } 41 | } 42 | } 43 | }) 44 | 45 | afterEach(() => { 46 | vi.restoreAllMocks() 47 | }) 48 | 49 | it('should use pr_id when provided and context.issue.number is not available', async () => { 50 | // Setup: no context issue number 51 | mockContextData.issue.number = undefined 52 | 53 | mockOctokit.paginate.mockResolvedValue([]) 54 | 55 | const checkName = ['Test Check'] 56 | const table = [ 57 | ['Test', 'Result'], 58 | ['Example Test', 'Passed'] 59 | ] 60 | const prId = '123' 61 | 62 | await attachComment(mockOctokit, checkName, false, table, [], [], [], prId) 63 | 64 | // Verify comment was created with correct issue number 65 | expect(mockOctokit.rest.issues.createComment).toHaveBeenCalledWith({ 66 | owner: 'test-owner', 67 | repo: 'test-repo', 68 | issue_number: 123, 69 | body: expect.stringContaining('Example Test') 70 | }) 71 | 72 | expect(mockWarning).not.toHaveBeenCalled() 73 | }) 74 | 75 | it('should fall back to context.issue.number when pr_id is not provided', async () => { 76 | // Setup: context issue number available 77 | mockContextData.issue.number = 456 78 | 79 | mockOctokit.paginate.mockResolvedValue([]) 80 | 81 | const checkName = ['Test Check'] 82 | const table = [ 83 | ['Test', 'Result'], 84 | ['Example Test', 'Passed'] 85 | ] 86 | 87 | await attachComment(mockOctokit, checkName, false, table, [], [], []) 88 | 89 | // Verify comment was created with context issue number 90 | expect(mockOctokit.rest.issues.createComment).toHaveBeenCalledWith({ 91 | owner: 'test-owner', 92 | repo: 'test-repo', 93 | issue_number: 456, 94 | body: expect.stringContaining('Example Test') 95 | }) 96 | 97 | expect(mockWarning).not.toHaveBeenCalled() 98 | }) 99 | 100 | it('should warn and return early when no issue number is available', async () => { 101 | // Setup: no context issue number and no pr_id 102 | mockContextData.issue.number = undefined 103 | 104 | const checkName = ['Test Check'] 105 | const table = [ 106 | ['Test', 'Result'], 107 | ['Example Test', 'Passed'] 108 | ] 109 | 110 | await attachComment(mockOctokit, checkName, false, table, [], [], []) 111 | 112 | // Verify warning was called and no comment was created 113 | expect(mockWarning).toHaveBeenCalledWith( 114 | expect.stringContaining('Action requires a valid issue number (PR reference) or pr_id input') 115 | ) 116 | expect(mockOctokit.rest.issues.createComment).not.toHaveBeenCalled() 117 | }) 118 | 119 | it('should update existing comment when updateComment is true', async () => { 120 | // Setup: context issue number available 121 | mockContextData.issue.number = 456 122 | 123 | const existingComment = { 124 | id: 999, 125 | body: 'Existing comment ' 126 | } 127 | mockOctokit.paginate.mockResolvedValue([existingComment]) 128 | 129 | const checkName = ['Test Check'] 130 | const table = [ 131 | ['Test', 'Result'], 132 | ['Example Test', 'Updated'] 133 | ] 134 | 135 | await attachComment(mockOctokit, checkName, true, table, [], [], []) 136 | 137 | // Verify comment was updated 138 | expect(mockOctokit.rest.issues.updateComment).toHaveBeenCalledWith({ 139 | owner: 'test-owner', 140 | repo: 'test-repo', 141 | comment_id: 999, 142 | body: expect.stringContaining('Example Test') 143 | }) 144 | expect(mockOctokit.rest.issues.createComment).not.toHaveBeenCalled() 145 | }) 146 | it('should warn and return early when pr_id is invalid', async () => { 147 | // Setup: no context issue number and invalid pr_id 148 | mockContextData.issue.number = undefined 149 | 150 | const checkName = ['Test Check'] 151 | const table = [ 152 | ['Test', 'Result'], 153 | ['Example Test', 'Passed'] 154 | ] 155 | const prId = 'invalid-number' 156 | 157 | await attachComment(mockOctokit, checkName, false, table, [], [], [], prId) 158 | 159 | // Verify warning was called and no comment was created 160 | expect(mockWarning).toHaveBeenCalledWith( 161 | expect.stringContaining('Action requires a valid issue number (PR reference) or pr_id input') 162 | ) 163 | expect(mockOctokit.rest.issues.createComment).not.toHaveBeenCalled() 164 | }) 165 | 166 | it('should handle pr_id with leading/trailing whitespace', async () => { 167 | // Setup: no context issue number 168 | mockContextData.issue.number = undefined 169 | 170 | mockOctokit.paginate.mockResolvedValue([]) 171 | 172 | const checkName = ['Test Check'] 173 | const table = [ 174 | ['Test', 'Result'], 175 | ['Example Test', 'Passed'] 176 | ] 177 | const prId = ' 123 ' 178 | 179 | await attachComment(mockOctokit, checkName, false, table, [], [], [], prId) 180 | 181 | // Verify comment was created with correct issue number (whitespace trimmed) 182 | expect(mockOctokit.rest.issues.createComment).toHaveBeenCalledWith({ 183 | owner: 'test-owner', 184 | repo: 'test-repo', 185 | issue_number: 123, 186 | body: expect.stringContaining('Example Test') 187 | }) 188 | 189 | expect(mockWarning).not.toHaveBeenCalled() 190 | }) 191 | 192 | it('should update existing comment when pr_id is provided and updateComment is true', async () => { 193 | // Setup: no context issue number but pr_id provided 194 | mockContextData.issue.number = undefined 195 | 196 | const existingComment = { 197 | id: 888, 198 | body: 'Existing comment ' 199 | } 200 | mockOctokit.paginate.mockResolvedValue([existingComment]) 201 | 202 | const checkName = ['Test Check'] 203 | const table = [ 204 | ['Test', 'Result'], 205 | ['Example Test', 'Updated'] 206 | ] 207 | const prId = '789' 208 | 209 | await attachComment(mockOctokit, checkName, true, table, [], [], [], prId) 210 | 211 | // Verify paginate was called with correct issue number 212 | expect(mockOctokit.paginate).toHaveBeenCalledWith(mockOctokit.rest.issues.listComments, { 213 | owner: 'test-owner', 214 | repo: 'test-repo', 215 | issue_number: 789 216 | }) 217 | 218 | // Verify comment was updated 219 | expect(mockOctokit.rest.issues.updateComment).toHaveBeenCalledWith({ 220 | owner: 'test-owner', 221 | repo: 'test-repo', 222 | comment_id: 888, 223 | body: expect.stringContaining('Example Test') 224 | }) 225 | expect(mockOctokit.rest.issues.createComment).not.toHaveBeenCalled() 226 | }) 227 | }) 228 | 229 | describe('buildCommentIdentifier', () => { 230 | it('should build correct identifier', () => { 231 | const checkName = ['Test Check'] 232 | const identifier = buildCommentIdentifier(checkName) 233 | expect(identifier).toBe('') 234 | }) 235 | }) 236 | -------------------------------------------------------------------------------- /src/annotator.ts: -------------------------------------------------------------------------------- 1 | import * as core from '@actions/core' 2 | import {Annotation, TestResult} from './testParser.js' 3 | import * as github from '@actions/github' 4 | import {SummaryTableRow} from '@actions/core/lib/summary.js' 5 | import {context, GitHub} from '@actions/github/lib/utils.js' 6 | import {buildLink, buildList, buildTable} from './utils.js' 7 | 8 | export interface CheckInfo { 9 | name: string 10 | url: string 11 | } 12 | 13 | export async function annotateTestResult( 14 | testResult: TestResult, 15 | token: string, 16 | headSha: string, 17 | checkAnnotations: boolean, 18 | annotateOnly: boolean, 19 | updateCheck: boolean, 20 | annotateNotice: boolean, 21 | jobName: string 22 | ): Promise { 23 | const annotations = testResult.globalAnnotations.filter( 24 | annotation => annotateNotice || annotation.annotation_level !== 'notice' 25 | ) 26 | const foundResults = testResult.totalCount > 0 || testResult.skipped > 0 27 | 28 | let title = 'No test results found!' 29 | if (foundResults) { 30 | title = `${testResult.totalCount} tests run, ${testResult.passed} passed, ${testResult.skipped} skipped, ${testResult.failed} failed.` 31 | } 32 | 33 | core.info(`ℹ️ - ${testResult.checkName} - ${title}`) 34 | 35 | const conclusion: 'success' | 'failure' = testResult.failed <= 0 ? 'success' : 'failure' 36 | 37 | for (const annotation of annotations) { 38 | core.info(` 🧪 - ${annotation.path} | ${annotation.message.split('\n', 1)[0]}`) 39 | } 40 | 41 | const octokit = github.getOctokit(token) 42 | if (annotateOnly) { 43 | // only create annotaitons, no check 44 | for (const annotation of annotations) { 45 | const properties: core.AnnotationProperties = { 46 | title: annotation.title, 47 | file: annotation.path, 48 | startLine: annotation.start_line, 49 | endLine: annotation.end_line, 50 | startColumn: annotation.start_column, 51 | endColumn: annotation.end_column 52 | } 53 | if (annotation.annotation_level === 'failure') { 54 | core.error(annotation.message, properties) 55 | } else if (annotation.annotation_level === 'warning') { 56 | core.warning(annotation.message, properties) 57 | } else if (annotateNotice) { 58 | core.notice(annotation.message, properties) 59 | } 60 | } 61 | return undefined // No check created, so no URL to return 62 | } else { 63 | // check status is being created, annotations are included in this (if not diasbled by "checkAnnotations") 64 | if (updateCheck) { 65 | const checks = await octokit.rest.checks.listForRef({ 66 | ...github.context.repo, 67 | ref: headSha, 68 | check_name: jobName, 69 | status: 'in_progress', 70 | filter: 'latest' 71 | }) 72 | 73 | core.debug(JSON.stringify(checks, null, 2)) 74 | 75 | const check_run_id = checks.data.check_runs[0].id 76 | const checkUrl = `${github.context.serverUrl}/${github.context.repo.owner}/${github.context.repo.repo}/runs/${check_run_id}` 77 | 78 | if (checkAnnotations) { 79 | core.info(`ℹ️ - ${testResult.checkName} - Updating checks (Annotations: ${annotations.length})`) 80 | for (let i = 0; i < annotations.length; i = i + 50) { 81 | const sliced = annotations.slice(i, i + 50) 82 | await updateChecks(octokit, check_run_id, title, testResult.summary, sliced) 83 | } 84 | } else { 85 | core.info(`ℹ️ - ${testResult.checkName} - Updating checks (disabled annotations)`) 86 | await updateChecks(octokit, check_run_id, title, testResult.summary, []) 87 | } 88 | 89 | return { 90 | name: testResult.checkName, 91 | url: checkUrl 92 | } 93 | } else { 94 | const status: 'completed' | 'in_progress' | 'queued' | undefined = 'completed' 95 | // don't send annotations if disabled 96 | const adjustedAnnotations = checkAnnotations ? annotations : [] 97 | const createCheckRequest = { 98 | ...github.context.repo, 99 | name: testResult.checkName, 100 | head_sha: headSha, 101 | status, 102 | conclusion, 103 | output: { 104 | title, 105 | summary: testResult.summary, 106 | annotations: adjustedAnnotations.slice(0, 50) 107 | } 108 | } 109 | 110 | core.debug(JSON.stringify(createCheckRequest, null, 2)) 111 | 112 | core.info(`ℹ️ - ${testResult.checkName} - Creating check (Annotations: ${adjustedAnnotations.length})`) 113 | const checkResponse = await octokit.rest.checks.create(createCheckRequest) 114 | 115 | // Return the check URL for use in job summary 116 | return { 117 | name: testResult.checkName, 118 | url: `${github.context.serverUrl}/${github.context.repo.owner}/${github.context.repo.repo}/runs/${checkResponse.data.id}` 119 | } 120 | } 121 | } 122 | } 123 | 124 | async function updateChecks( 125 | octokit: InstanceType, 126 | check_run_id: number, 127 | title: string, 128 | summary: string, 129 | annotations: Annotation[] 130 | ): Promise { 131 | const updateCheckRequest = { 132 | ...github.context.repo, 133 | check_run_id, 134 | output: { 135 | title, 136 | summary, 137 | annotations 138 | } 139 | } 140 | 141 | core.debug(JSON.stringify(updateCheckRequest, null, 2)) 142 | await octokit.rest.checks.update(updateCheckRequest) 143 | } 144 | 145 | export async function attachSummary( 146 | table: SummaryTableRow[], 147 | detailsTable: SummaryTableRow[], 148 | flakySummary: SummaryTableRow[], 149 | checkInfos: CheckInfo[] = [], 150 | summaryText?: string 151 | ): Promise { 152 | // Add summary text if provided 153 | if (summaryText) { 154 | core.summary.addRaw(summaryText) 155 | } 156 | 157 | if (table.length > 0) { 158 | core.summary.addTable(table) 159 | } 160 | if (detailsTable.length > 1) { 161 | core.summary.addTable(detailsTable) 162 | } 163 | if (flakySummary.length > 1) { 164 | core.summary.addTable(flakySummary) 165 | } 166 | 167 | // Add check links to the job summary if any checks were created 168 | if (checkInfos.length > 0) { 169 | const links = checkInfos.map(checkInfo => { 170 | return buildLink(`View ${checkInfo.name}`, checkInfo.url) 171 | }) 172 | core.summary.addList(links) 173 | } 174 | core.summary.addSeparator() 175 | await core.summary.write() 176 | } 177 | 178 | export function buildCommentIdentifier(checkName: string[]): string { 179 | return `` 180 | } 181 | 182 | export async function attachComment( 183 | octokit: InstanceType, 184 | checkName: string[], 185 | updateComment: boolean, 186 | table: SummaryTableRow[], 187 | detailsTable: SummaryTableRow[], 188 | flakySummary: SummaryTableRow[], 189 | checkInfos: CheckInfo[] = [], 190 | prId?: string 191 | ): Promise { 192 | // Use provided prId or fall back to context issue number 193 | const issueNumber = prId ? parseInt(prId, 10) : context.issue.number 194 | 195 | if (!issueNumber) { 196 | core.warning( 197 | `⚠️ Action requires a valid issue number (PR reference) or pr_id input to be able to attach a comment..` 198 | ) 199 | return 200 | } 201 | 202 | if (table.length === 0 && detailsTable.length === 0 && flakySummary.length === 0) { 203 | core.debug(`Tables for comment were empty. 'skip_success_summary' enabled?`) 204 | return 205 | } 206 | 207 | const identifier = buildCommentIdentifier(checkName) 208 | 209 | let comment = buildTable(table) 210 | if (detailsTable.length > 1) { 211 | comment += '\n\n' 212 | comment += buildTable(detailsTable) 213 | } 214 | if (flakySummary.length > 1) { 215 | comment += '\n\n' 216 | comment += buildTable(flakySummary) 217 | } 218 | 219 | // Add check links to the job summary if any checks were created 220 | if (checkInfos.length > 0) { 221 | const links = checkInfos.map(checkInfo => { 222 | return buildLink(`View ${checkInfo.name}`, checkInfo.url) 223 | }) 224 | comment += buildList(links) 225 | comment += `\n\n` 226 | } 227 | 228 | comment += `\n\n${identifier}` 229 | 230 | const priorComment = updateComment ? await findPriorComment(octokit, identifier, issueNumber) : undefined 231 | if (priorComment) { 232 | await octokit.rest.issues.updateComment({ 233 | owner: context.repo.owner, 234 | repo: context.repo.repo, 235 | comment_id: priorComment, 236 | body: comment 237 | }) 238 | } else { 239 | await octokit.rest.issues.createComment({ 240 | owner: context.repo.owner, 241 | repo: context.repo.repo, 242 | issue_number: issueNumber, 243 | body: comment 244 | }) 245 | } 246 | } 247 | 248 | async function findPriorComment( 249 | octokit: InstanceType, 250 | identifier: string, 251 | issueNumber: number 252 | ): Promise { 253 | const comments = await octokit.paginate(octokit.rest.issues.listComments, { 254 | owner: context.repo.owner, 255 | repo: context.repo.repo, 256 | issue_number: issueNumber 257 | }) 258 | 259 | const foundComment = comments.find(comment => comment.body?.endsWith(identifier)) 260 | return foundComment?.id 261 | } 262 | -------------------------------------------------------------------------------- /src/main.ts: -------------------------------------------------------------------------------- 1 | import * as core from '@actions/core' 2 | import * as github from '@actions/github' 3 | import {annotateTestResult, attachComment, attachSummary, CheckInfo} from './annotator.js' 4 | import {parseTestReports, TestResult} from './testParser.js' 5 | import {buildTable, readTransformers, retrieve} from './utils.js' 6 | import {GitHub} from '@actions/github/lib/utils.js' 7 | import {buildSummaryTables} from './table.js' 8 | 9 | export async function run(): Promise { 10 | try { 11 | core.startGroup(`📘 Reading input values`) 12 | 13 | const token = core.getInput('token') || core.getInput('github_token') || process.env.GITHUB_TOKEN 14 | if (!token) { 15 | core.setFailed('❌ A token is required to execute this action') 16 | return 17 | } 18 | 19 | const groupReports = core.getInput('group_reports') === 'true' 20 | const annotateOnly = core.getInput('annotate_only') === 'true' 21 | const updateCheck = core.getInput('update_check') === 'true' 22 | const checkAnnotations = core.getInput('check_annotations') === 'true' 23 | const commit = core.getInput('commit') 24 | const failOnFailure = core.getInput('fail_on_failure') === 'true' 25 | const failOnParseError = core.getInput('fail_on_parse_error') === 'true' 26 | const requireTests = core.getInput('require_tests') === 'true' 27 | const requirePassedTests = core.getInput('require_passed_tests') === 'true' 28 | const includePassed = core.getInput('include_passed') === 'true' 29 | const includeSkipped = core.getInput('include_skipped') === 'true' 30 | const checkRetries = core.getInput('check_retries') === 'true' 31 | const annotateNotice = core.getInput('annotate_notice') === 'true' 32 | const jobSummary = core.getInput('job_summary') === 'true' 33 | const jobSummaryText = core.getInput('job_summary_text') 34 | const detailedSummary = core.getInput('detailed_summary') === 'true' 35 | const flakySummary = core.getInput('flaky_summary') === 'true' 36 | const verboseSummary = core.getInput('verbose_summary') === 'true' 37 | const skipSuccessSummary = core.getInput('skip_success_summary') === 'true' 38 | const includeEmptyInSummary = core.getInput('include_empty_in_summary') === 'true' 39 | const includeTimeInSummary = core.getInput('include_time_in_summary') === 'true' 40 | const simplifiedSummary = core.getInput('simplified_summary') === 'true' 41 | const groupSuite = core.getInput('group_suite') === 'true' 42 | const comment = core.getInput('comment') === 'true' 43 | const updateComment = core.getInput('updateComment') === 'true' 44 | const jobName = core.getInput('job_name') 45 | const skipCommentWithoutTests = core.getInput('skip_comment_without_tests') === 'true' 46 | const prId = core.getInput('pr_id').trim() || undefined 47 | 48 | const reportPaths = core.getMultilineInput('report_paths') 49 | const summary = core.getMultilineInput('summary') 50 | const checkName = core.getMultilineInput('check_name') 51 | const testFilesPrefix = core.getMultilineInput('test_files_prefix') 52 | const suiteRegex = core.getMultilineInput('suite_regex') 53 | let excludeSources = core.getMultilineInput('exclude_sources') ? core.getMultilineInput('exclude_sources') : [] 54 | const checkTitleTemplate = core.getMultilineInput('check_title_template') 55 | const breadCrumbDelimiter = core.getInput('bread_crumb_delimiter') 56 | const transformers = readTransformers(core.getInput('transformers', {trimWhitespace: true})) 57 | const followSymlink = core.getBooleanInput('follow_symlink') 58 | const annotationsLimit = Number(core.getInput('annotations_limit') || -1) 59 | const skipAnnotations = core.getInput('skip_annotations') === 'true' 60 | const truncateStackTraces = core.getBooleanInput('truncate_stack_traces') 61 | const resolveIgnoreClassname = core.getBooleanInput('resolve_ignore_classname') 62 | 63 | if (excludeSources.length === 0) { 64 | excludeSources = ['/build/', '/__pycache__/'] 65 | } 66 | 67 | core.endGroup() 68 | core.startGroup(`📦 Process test results`) 69 | 70 | const reportsCount = reportPaths.length 71 | 72 | const testResults: TestResult[] = [] 73 | const mergedResult: TestResult = { 74 | checkName: '', 75 | summary: '', 76 | totalCount: 0, 77 | skipped: 0, 78 | failed: 0, 79 | passed: 0, 80 | retried: 0, 81 | time: 0, 82 | foundFiles: 0, 83 | globalAnnotations: [], 84 | testResults: [] 85 | } 86 | 87 | core.info(`Preparing ${reportsCount} report as configured.`) 88 | 89 | for (let i = 0; i < reportsCount; i++) { 90 | const testResult = await parseTestReports( 91 | retrieve('checkName', checkName, i, reportsCount), 92 | retrieve('summary', summary, i, reportsCount), 93 | retrieve('reportPaths', reportPaths, i, reportsCount), 94 | retrieve('suiteRegex', suiteRegex, i, reportsCount), 95 | includePassed, 96 | annotateNotice, 97 | checkRetries, 98 | excludeSources, 99 | retrieve('checkTitleTemplate', checkTitleTemplate, i, reportsCount), 100 | breadCrumbDelimiter, 101 | retrieve('testFilesPrefix', testFilesPrefix, i, reportsCount), 102 | transformers, 103 | followSymlink, 104 | annotationsLimit, 105 | truncateStackTraces, 106 | failOnParseError, 107 | resolveIgnoreClassname 108 | ) 109 | mergedResult.totalCount += testResult.totalCount 110 | mergedResult.skipped += testResult.skipped 111 | mergedResult.failed += testResult.failed 112 | mergedResult.passed += testResult.passed 113 | mergedResult.retried += testResult.retried 114 | mergedResult.time += testResult.time 115 | 116 | if (groupReports) { 117 | testResults.push(testResult) 118 | } else { 119 | for (const actualTestResult of testResult.testResults) { 120 | testResults.push({ 121 | checkName: `${testResult.checkName} | ${actualTestResult.name}`, 122 | summary: testResult.summary, 123 | totalCount: actualTestResult.totalCount, 124 | skipped: actualTestResult.skippedCount, 125 | failed: actualTestResult.failedCount, 126 | passed: actualTestResult.passedCount, 127 | retried: actualTestResult.retriedCount, 128 | time: actualTestResult.time, 129 | foundFiles: 1, 130 | globalAnnotations: actualTestResult.annotations, 131 | testResults: actualTestResult.testResults 132 | }) 133 | } 134 | } 135 | } 136 | 137 | core.setOutput('total', mergedResult.totalCount) 138 | core.setOutput('passed', mergedResult.passed) 139 | core.setOutput('skipped', mergedResult.skipped) 140 | core.setOutput('failed', mergedResult.failed) 141 | core.setOutput('retried', mergedResult.retried) 142 | core.setOutput('time', mergedResult.time) 143 | 144 | if (!(mergedResult.totalCount > 0 || mergedResult.skipped > 0) && requireTests) { 145 | core.setFailed(`❌ No test results found for ${checkName}`) 146 | return // end if we failed due to no tests, but configured to require tests 147 | } else if (!(mergedResult.passed > 0) && requirePassedTests) { 148 | core.setFailed(`❌ No passed test results found for ${checkName}`) 149 | return // end if we failed due to no passed tests, but configured to require passed tests 150 | } 151 | 152 | const pullRequest = github.context.payload.pull_request 153 | const link = (pullRequest && pullRequest.html_url) || github.context.ref 154 | const conclusion: 'success' | 'failure' = mergedResult.failed <= 0 ? 'success' : 'failure' 155 | const headSha = commit || (pullRequest && pullRequest.head.sha) || github.context.sha 156 | core.info(`ℹ️ Posting with conclusion '${conclusion}' to ${link} (sha: ${headSha})`) 157 | 158 | core.endGroup() 159 | core.startGroup(`🚀 Publish results`) 160 | 161 | const checkInfos: CheckInfo[] = [] 162 | if (!skipAnnotations) { 163 | try { 164 | for (const testResult of testResults) { 165 | const checkInfo = await annotateTestResult( 166 | testResult, 167 | token, 168 | headSha, 169 | checkAnnotations, 170 | annotateOnly, 171 | updateCheck, 172 | annotateNotice, 173 | jobName 174 | ) 175 | if (checkInfo) { 176 | checkInfos.push(checkInfo) 177 | } 178 | } 179 | } catch (error) { 180 | core.error(`❌ Failed to create checks using the provided token. (${error})`) 181 | core.warning( 182 | `⚠️ This usually indicates insufficient permissions. More details: https://github.com/mikepenz/action-junit-report/issues/23` 183 | ) 184 | } 185 | } 186 | 187 | const supportsJobSummary = process.env['GITHUB_STEP_SUMMARY'] 188 | const [table, detailTable, flakyTable] = buildSummaryTables( 189 | testResults, 190 | includePassed, 191 | includeSkipped, 192 | detailedSummary, 193 | flakySummary, 194 | verboseSummary, 195 | skipSuccessSummary, 196 | groupSuite, 197 | includeEmptyInSummary, 198 | includeTimeInSummary, 199 | simplifiedSummary 200 | ) 201 | if (jobSummary && supportsJobSummary) { 202 | try { 203 | await attachSummary(table, detailTable, flakyTable, checkInfos, jobSummaryText) 204 | } catch (error) { 205 | core.error(`❌ Failed to set the summary using the provided token. (${error})`) 206 | } 207 | } else if (jobSummary && !supportsJobSummary) { 208 | core.warning(`⚠️ Your environment seems to not support job summaries.`) 209 | } else { 210 | core.info('⏩ Skipped creation of job summary') 211 | } 212 | 213 | if (comment && (!skipCommentWithoutTests || mergedResult.totalCount > 0)) { 214 | const octokit: InstanceType = github.getOctokit(token) 215 | await attachComment(octokit, checkName, updateComment, table, detailTable, flakyTable, checkInfos, prId) 216 | } 217 | 218 | core.setOutput('summary', buildTable(table)) 219 | core.setOutput('detailed_summary', buildTable(detailTable)) 220 | core.setOutput('flaky_summary', buildTable(flakyTable)) 221 | 222 | // Set report URLs as output (newline-separated for multiple reports) 223 | const reportUrls = checkInfos.map(info => info.url).join('\n') 224 | core.setOutput('report_url', reportUrls) 225 | 226 | if (failOnFailure && conclusion === 'failure') { 227 | core.setFailed(`❌ Tests reported ${mergedResult.failed} failures`) 228 | } 229 | 230 | core.endGroup() 231 | } catch (error: any /* eslint-disable-line @typescript-eslint/no-explicit-any */) { 232 | core.setFailed(error.message) 233 | } 234 | } 235 | 236 | run() 237 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright (C) 2022 Mike Penz 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | 204 | ----------------- 205 | 206 | # github-actions-template 207 | 208 | The MIT License (MIT) 209 | 210 | Copyright (c) 2018 GitHub, Inc. and contributors 211 | 212 | Permission is hereby granted, free of charge, to any person obtaining a copy 213 | of this software and associated documentation files (the "Software"), to deal 214 | in the Software without restriction, including without limitation the rights 215 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 216 | copies of the Software, and to permit persons to whom the Software is 217 | furnished to do so, subject to the following conditions: 218 | 219 | The above copyright notice and this permission notice shall be included in 220 | all copies or substantial portions of the Software. 221 | 222 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 223 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 224 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 225 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 226 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 227 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 228 | THE SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 | :octocat: 3 |
4 |

5 | action-junit-report 6 |

7 | 8 |

9 | ... reports JUnit test results as GitHub pull request check. 10 |

11 | 12 |
13 | 14 |
15 | 16 |
17 | 18 | 19 | 20 |
21 |
22 | 23 | ------- 24 | 25 |

26 | What's included 🚀 • 27 | Setup 🛠️ • 28 | Sample 🖥️ • 29 | Contribute 🧬 • 30 | License 📓 31 |

32 | 33 | ------- 34 | 35 | ### What's included 🚀 36 | 37 | - Flexible JUnit parser with wide support 38 | - Supports nested test suites 39 | - Blazingly fast execution 40 | - Lighweight 41 | - Rich build log output 42 | 43 | This action processes JUnit XML test reports on pull requests and shows the result as a PR check with summary and 44 | annotations. 45 | 46 | Based on action for [Surefire Reports by ScaCap](https://github.com/ScaCap/action-surefire-report) 47 | 48 | ## Setup 49 | 50 | ### Configure the workflow 51 | 52 | ```yml 53 | name: build 54 | on: 55 | pull_request: 56 | 57 | jobs: 58 | build: 59 | name: Build and Run Tests 60 | runs-on: ubuntu-latest 61 | steps: 62 | - name: Checkout Code 63 | uses: actions/checkout@v4 64 | - name: Build and Run Tests 65 | run: # execute your tests generating test results 66 | - name: Publish Test Report 67 | uses: mikepenz/action-junit-report@v5 68 | if: success() || failure() # always run even if the previous step fails 69 | with: 70 | report_paths: '**/build/test-results/test/TEST-*.xml' 71 | ``` 72 | 73 | ### Inputs 74 | 75 | | **Input** | **Description** | 76 | |------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| 77 | | `report_paths` | Optional. [Glob](https://github.com/actions/toolkit/tree/master/packages/glob) expression to junit report paths. Defaults to: `**/junit-reports/TEST-*.xml`. | 78 | | `token` | Optional. GitHub token for creating a check run. Set to `${{ github.token }}` by default. | 79 | | `group_reports` | Optional. Defines if different reports found by a single `report_paths` glob expression are grouped together. Defaults to `true`. | 80 | | `test_files_prefix` | Optional. Prepends the provided prefix to test file paths within the report when annotating on GitHub. | 81 | | `exclude_sources` | Optional. Provide `,` seperated array of folders to ignore for source lookup. Defaults to: `/build/,/__pycache__/` | 82 | | `check_name` | Optional. Check name to use when creating a check run. The default is `JUnit Test Report`. | 83 | | `suite_regex` | REMOVED (as of v5). Instead use `check_title_template` and configure: `{{BREAD_CRUMB}}{{SUITE_NAME}}/{{TEST_NAME}}` | 84 | | `commit` | Optional. The commit SHA to update the status. This is useful when you run it with `workflow_run`. | 85 | | `fail_on_failure` | Optional. Fail the build in case of a test failure. | 86 | | `fail_on_parse_error` | Optional. Fail the build if the test report file cannot be parsed. | 87 | | `require_tests` | Optional. Fail if no test are found. | 88 | | `require_passed_tests` | Optional. Fail if no passed test are found. (This is stricter than `require_tests`, which accepts skipped tests). | 89 | | `include_passed` | Optional. By default the action will skip passed items for the annotations. Enable this flag to include them. | 90 | | `include_skipped` | Optional. Controls whether skipped tests are included in the detailed summary table. Defaults to `true`. | 91 | | `check_retries` | Optional. If a testcase is retried, ignore the original failure. | 92 | | `check_title_template` | Optional. Template to configure the title format. Placeholders: {{FILE_NAME}}, {{SUITE_NAME}}, {{TEST_NAME}}, {{CLASS_NAME}}, {{BREAD_CRUMB}}. | 93 | | `bread_crumb_delimiter` | Optional. Defines the delimiter characters between the breadcrumb elements. Defaults to: `/`. | 94 | | `summary` | Optional. Additional text to summary output | 95 | | `check_annotations` | Optional. Defines if the checks will include annotations. If disabled skips all annotations for the check. (This does not affect `annotate_only`, which uses no checks). | 96 | | `update_check` | Optional. Uses an alternative API to update checks, use for cases with more than 50 annotations. Default: `false`. | 97 | | `annotate_only` | Optional. Will only annotate the results on the files, won't create a check run. Defaults to `false`. | 98 | | `transformers` | Optional. Array of `Transformer`s offering the ability to adjust the fileName. Defaults to: `[{"searchValue":"::","replaceValue":"/"}]` | 99 | | `job_summary` | Optional. Enables the publishing of the job summary for the results. Defaults to `true`. May be required to disable [Enterprise Server](https://github.com/mikepenz/action-junit-report/issues/637) | 100 | | `job_summary_text` | Optional. Additional text to include in the job summary prior to the tables. Defaults to empty string. | 101 | | `detailed_summary` | Optional. Include table with all test results in the summary (Also applies to comment). Defaults to `false`. | 102 | | `flaky_summary` | Optional. Include table with all flaky results in the summary (Also applies to comment). Defaults to `false`. | 103 | | `verbose_summary` | Optional. Detail table will note if there were no test annotations for a test suite (Also applies to comment). Defaults to `true`. | 104 | | `skip_success_summary` | Optional. Skips the summary table if only successful tests were detected (Also applies to comment). Defaults to `false`. | 105 | | `include_empty_in_summary` | Optional. Include entries in summaries that have 0 count. Defaults to `true`. | 106 | | `include_time_in_summary` | Optional. Include spent time in summaries. Defaults to `false`. | 107 | | `simplified_summary` | Optional. Use icons instead of text to indicate status in summary. Defaults to `false`. | 108 | | `group_suite` | Optional. If enabled, will group the testcases by test suite in the `detailed_summary`. Defaults to `false`. | 109 | | `comment` | Optional. Enables a comment being added to the PR with the summary tables (Respects the summary configuration flags). Defaults to `false`. | 110 | | `updateComment` | Optional. If a prior action run comment exists, it is updated. If disabled, new comments are creted for each run. Defaults to `true`. | 111 | | `annotate_notice` | Optional. Annotate passed test results along with warning/failed ones. Defaults to `false`. (Changed in v3.5.0) | 112 | | `follow_symlink` | Optional. Enables to follow symlinks when searching test files via the globber. Defaults to `false`. | 113 | | `job_name` | Optional. Specify the name of a check to update | 114 | | `annotations_limit` | Optional. Specify the limit for annotations. This will also interrupt parsing all test-suites if the limit is reached. Defaults to: `No Limit`. | 115 | | `skip_annotations` | Optional. Setting this flag will result in no annotations being added to the run. Defaults to `false`. | 116 | | `truncate_stack_traces` | Optional. Truncate stack traces from test output to 2 lines in annotations. Defaults to `true`. | 117 | | `resolve_ignore_classname` | Optional. Force ignore test case classname from the xml report (This can help fix issues with some tools/languages). Defaults to `false`. | 118 | | `skip_comment_without_tests` | Optional. Disable commenting if no tests are detected. Defaults to `false`. | 119 | | `pr_id` | Optional. PR number to comment on (useful for workflow_run contexts where the action runs outside the PR context). When provided, overrides the automatic PR detection. | 120 | 121 | ### Common Configurations 122 | 123 |
Common report_paths 124 |

125 | 126 | - Surefire: 127 | `**/target/surefire-reports/TEST-*.xml` 128 | - sbt: 129 | `**/target/test-reports/*.xml` 130 | 131 |

132 |
133 | 134 | If you observe out-of-memory errors, follow the below configuration suggestion. 135 | 136 | > [!TIP] 137 | > FATAL ERROR: Reached heap limit Allocation failed - JavaScript heap out of memory 138 | 139 |
Increase Node Heap Memory 140 |

141 | 142 | If you encounter an out-of-memory from Node, such as 143 | 144 | ``` 145 | FATAL ERROR: Ineffective mark-compacts near heap limit Allocation failed - JavaScript heap out of memory 146 | ``` 147 | 148 | you can increase the memory allocation by setting an environment variable 149 | 150 | ```yaml 151 | - name: Publish Test Report 152 | uses: mikepenz/action-junit-report@v5 153 | env: 154 | NODE_OPTIONS: "--max-old-space-size=4096" 155 | if: success() || failure() # always run even if the previous step fails 156 | with: 157 | report_paths: '**/build/test-results/test/TEST-*.xml' 158 | ``` 159 | 160 |

161 |
162 | 163 | ### Action outputs 164 | 165 | After action execution it will return the test counts as output. 166 | 167 | ```yml 168 | # ${{steps.{CHANGELOG_STEP_ID}.outputs.total}} 169 | ``` 170 | 171 | A full set list of possible output values for this action. 172 | 173 | | **Output** | **Description** | 174 | |----------------------------|---------------------------------------------------------------------------------------------------------------------| 175 | | `outputs.total` | The total number of test cases covered by this test-step. | 176 | | `outputs.passed` | The number of passed test cases. | 177 | | `outputs.skipped` | The number of skipped test cases. | 178 | | `outputs.retried` | The number of retried test cases. | 179 | | `outputs.failed` | The number of failed test cases. | 180 | | `outputs.summary` | The short summary of the junit report. In html format (as also constructed by GitHub for the summary). | 181 | | `outputs.detailed_summary` | The full table with all test results in a summary. In html format (as also constructed by GitHub for the summary). | 182 | | `outputs.flaky_summary` | The full table with all flaky results in a summary. In html format (as also constructed by GitHub for the summary). | 183 | | `outputs.report_url` | The URL(s) to the test report(s). If multiple reports are created, they are separated by newlines. | 184 | 185 | ### PR run permissions 186 | 187 | The action requires `write` permission on the checks. If the GA token is `read-only` (this is a repository 188 | configuration) please enable `write` permission via: 189 | 190 | ```yml 191 | permissions: 192 | checks: write 193 | pull-requests: write # only required if `comment: true` was enabled 194 | ``` 195 | 196 | Additionally for [security reasons], the github token used for `pull_request` workflows is [marked as read-only]. 197 | If you want to post checks to a PR from an external repository, you will need to use a separate workflow 198 | which has a read/write token, or use a PAT with elevated permissions. 199 | 200 | [security reasons]: https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ 201 | 202 | [marked as read-only]: https://docs.github.com/en/actions/security-guides/automatic-token-authentication#permissions-for-the-github_token 203 | 204 |
Example 205 |

206 | 207 | ```yml 208 | name: build 209 | on: 210 | pull_request: 211 | 212 | jobs: 213 | build: 214 | name: Build and Run Tests 215 | runs-on: ubuntu-latest 216 | steps: 217 | - name: Checkout Code 218 | uses: actions/checkout@v3 219 | - name: Build and Run Tests 220 | run: # execute your tests generating test results 221 | - name: Upload Test Report 222 | uses: actions/upload-artifact@v3 223 | if: always() # always run even if the previous step fails 224 | with: 225 | name: junit-test-results 226 | path: '**/build/test-results/test/TEST-*.xml' 227 | retention-days: 1 228 | 229 | --- 230 | name: report 231 | on: 232 | workflow_run: 233 | workflows: [ build ] 234 | types: [ completed ] 235 | 236 | permissions: 237 | checks: write 238 | 239 | jobs: 240 | checks: 241 | runs-on: ubuntu-latest 242 | steps: 243 | - name: Download Test Report 244 | uses: dawidd6/action-download-artifact@v2 245 | with: 246 | name: junit-test-results 247 | workflow: ${{ github.event.workflow.id }} 248 | run_id: ${{ github.event.workflow_run.id }} 249 | - name: Publish Test Report 250 | uses: mikepenz/action-junit-report@v5 251 | with: 252 | commit: ${{github.event.workflow_run.head_sha}} 253 | report_paths: '**/build/test-results/test/TEST-*.xml' 254 | # Optional: if you want to add PR comments from workflow_run context 255 | # comment: true 256 | # pr_id: ${{ github.event.workflow_run.pull_requests[0].number }} 257 | ``` 258 | 259 | This will securely post the check results from the privileged workflow onto the PR's checks report. 260 | 261 | > [!TIP] 262 | > When running from `workflow_run` context, use the `pr_id` parameter to enable PR comments: `pr_id: ${{ github.event.workflow_run.pull_requests[0].number }}` 263 | 264 |

265 |
266 | 267 | In environments that do not allow `checks: write`, the action can be configured to leverage the annotate\_only option. 268 | 269 |
Example 270 |

271 | 272 | ```yml 273 | name: pr 274 | 275 | on: 276 | pull_request: 277 | 278 | jobs: 279 | unit_test: 280 | runs-on: ubuntu-latest 281 | steps: 282 | - name: Checkout Code 283 | uses: actions/checkout@v4 284 | 285 | - name: Build and Run Tests 286 | run: # execute your tests generating test results 287 | 288 | - name: Write out Unit Test report annotation for forked repo 289 | if: ${{ failure() && (github.event.pull_request.head.repo.full_name != github.repository) }} 290 | uses: mikepenz/action-junit-report@v5 291 | with: 292 | annotate_only: true # forked repo cannot write to checks so just do annotations 293 | ``` 294 | 295 | This will selectively use different methods for forked and unforked repos. 296 |

297 |
298 | 299 | ## Sample 🖥️ 300 | 301 |
302 | 303 |
304 | 305 |
306 | 307 |
308 | 309 | ## Contribute 🧬 310 | 311 | ```bash 312 | # Install the dependencies 313 | $ npm install 314 | 315 | # Verify lint is happy 316 | $ npm run lint -- --fix 317 | 318 | # Format 319 | $ npm run format 320 | 321 | # Build the typescript and package it for distribution 322 | $ npm run build && npm run package 323 | 324 | # Run the tests, use to debug, and test it out 325 | $ npm test 326 | ``` 327 | 328 | ### Credits 329 | 330 | Original idea and GitHub Actions by: https://github.com/ScaCap/action-surefire-report 331 | 332 | ## Other actions 333 | 334 | - [release-changelog-builder-action](https://github.com/mikepenz/release-changelog-builder-action) 335 | - [xray-action](https://github.com/mikepenz/xray-action/) 336 | - [jira-release-composition-action](https://github.com/mikepenz/jira-release-composite-action) 337 | 338 | ## License 339 | 340 | Copyright (C) 2025 Mike Penz 341 | 342 | Licensed under the Apache License, Version 2.0 (the "License"); 343 | you may not use this file except in compliance with the License. 344 | You may obtain a copy of the License at 345 | 346 | http://www.apache.org/licenses/LICENSE-2.0 347 | 348 | Unless required by applicable law or agreed to in writing, software 349 | distributed under the License is distributed on an "AS IS" BASIS, 350 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 351 | See the License for the specific language governing permissions and 352 | limitations under the License. 353 | -------------------------------------------------------------------------------- /src/testParser.ts: -------------------------------------------------------------------------------- 1 | import * as core from '@actions/core' 2 | import * as glob from '@actions/glob' 3 | import * as fs from 'fs' 4 | import * as parser from 'xml-js' 5 | import * as pathHelper from 'path' 6 | import {applyTransformer, removePrefix} from './utils.js' 7 | 8 | export interface ActualTestResult { 9 | name: string 10 | totalCount: number 11 | skippedCount: number 12 | failedCount: number 13 | passedCount: number 14 | retriedCount: number 15 | time: number 16 | annotations: Annotation[] 17 | globalAnnotations: Annotation[] 18 | testResults: ActualTestResult[] 19 | } 20 | 21 | interface TestCasesResult { 22 | totalCount: number 23 | skippedCount: number 24 | failedCount: number 25 | passedCount: number 26 | retriedCount: number 27 | time: number 28 | annotations: Annotation[] 29 | } 30 | 31 | export interface TestResult { 32 | checkName: string 33 | summary: string 34 | totalCount: number 35 | skipped: number 36 | failed: number 37 | passed: number 38 | retried: number 39 | time: number 40 | foundFiles: number 41 | globalAnnotations: Annotation[] 42 | testResults: ActualTestResult[] 43 | } 44 | 45 | export interface Annotation { 46 | path: string 47 | start_line: number 48 | end_line: number 49 | start_column: number 50 | end_column: number 51 | retries: number 52 | annotation_level: 'failure' | 'notice' | 'warning' 53 | status: 'success' | 'failure' | 'skipped' 54 | title: string 55 | message: string 56 | raw_details: string 57 | time: number 58 | } 59 | 60 | export interface Position { 61 | fileName: string 62 | line: number 63 | } 64 | 65 | export interface Transformer { 66 | searchValue: string 67 | replaceValue: string 68 | regex?: RegExp 69 | } 70 | 71 | /** 72 | * Copyright 2020 ScaCap 73 | * https://github.com/ScaCap/action-surefire-report/blob/master/utils.js#L6 74 | * 75 | * Modification Copyright 2022 Mike Penz 76 | * https://github.com/mikepenz/action-junit-report/ 77 | */ 78 | export async function resolveFileAndLine( 79 | file: string | null, 80 | line: string | null, 81 | className: string, 82 | output: string 83 | ): Promise { 84 | let fileName = file ? file : className.split('.').slice(-1)[0] 85 | const lineNumber = safeParseInt(line) 86 | try { 87 | if (fileName && lineNumber) { 88 | return {fileName, line: lineNumber} 89 | } 90 | 91 | const escapedFileName = fileName.replace(/[.*+?^${}()|[\]\\]/g, '\\$&').replace('::', '/') // Rust test output contains colons between package names - See: https://github.com/mikepenz/action-junit-report/pull/359 92 | 93 | const matches = output.match(new RegExp(` [^ ]*${escapedFileName}.*?:\\d+`, 'g')) 94 | if (!matches) return {fileName, line: lineNumber || 1} 95 | 96 | const [lastItem] = matches.slice(-1) 97 | const lineTokens = lastItem.split(':') 98 | line = lineTokens.pop() || '0' 99 | 100 | // check, if the error message is from a rust file -- this way we have the chance to find 101 | // out the involved test file 102 | // See: https://github.com/mikepenz/action-junit-report/pull/360 103 | { 104 | const lineNumberPrefix = lineTokens.pop() || '' 105 | if (lineNumberPrefix.endsWith('.rs')) { 106 | fileName = lineNumberPrefix.split(' ').pop() || '' 107 | } 108 | } 109 | 110 | core.debug(`Resolved file ${fileName} and line ${line}`) 111 | 112 | return {fileName, line: safeParseInt(line) || -1} 113 | } catch (error: unknown) { 114 | core.warning(`⚠️ Failed to resolve file (${file}) and/or line (${line}) for ${className} (${error})`) 115 | return {fileName, line: safeParseInt(line) || -1} 116 | } 117 | } 118 | 119 | /** 120 | * Parse the provided string line number, and return its value, or null if it is not available or NaN. 121 | */ 122 | function safeParseInt(line: string | null): number | null { 123 | if (!line) return null 124 | const parsed = parseInt(line) 125 | if (isNaN(parsed)) return null 126 | return parsed 127 | } 128 | 129 | /** 130 | * Copyright 2020 ScaCap 131 | * https://github.com/ScaCap/action-surefire-report/blob/master/utils.js#L18 132 | * 133 | * Modification Copyright 2022 Mike Penz 134 | * https://github.com/mikepenz/action-junit-report/ 135 | */ 136 | const resolvePathCache: {[key: string]: string} = {} 137 | 138 | /** 139 | * Resolves the path of a given file, optionally following symbolic links. 140 | * 141 | * @param {string} workspace - The optional workspace directory. 142 | * @param {string} transformedFileName - The transformed file name to find. 143 | * @param {string[]} excludeSources - List of source paths to exclude. 144 | * @param {boolean} [followSymlink=false] - Whether to follow symbolic links. 145 | * @returns {Promise} - The resolved file path. 146 | */ 147 | export async function resolvePath( 148 | workspace: string, 149 | transformedFileName: string, 150 | excludeSources: string[], 151 | followSymlink = false 152 | ): Promise { 153 | const fileName: string = removePrefix(transformedFileName, workspace) 154 | if (resolvePathCache[fileName]) { 155 | return resolvePathCache[fileName] 156 | } 157 | 158 | let workspacePath: string 159 | if (workspace.length === 0 || workspace.endsWith('/')) { 160 | workspacePath = workspace 161 | } else { 162 | workspacePath = `${workspace}/` 163 | } 164 | 165 | core.debug(`Resolving path for ${fileName} in ${workspacePath}`) 166 | const normalizedFilename = fileName.replace(/^\.\//, '') // strip relative prefix (./) 167 | const globber = await glob.create(`${workspacePath}**/${normalizedFilename}.*`, { 168 | followSymbolicLinks: followSymlink 169 | }) 170 | const searchPath = globber.getSearchPaths() ? globber.getSearchPaths()[0] : '' 171 | for await (const result of globber.globGenerator()) { 172 | core.debug(`Matched file: ${result}`) 173 | 174 | const found = excludeSources.find(v => result.includes(v)) 175 | if (!found) { 176 | const path = result.slice(searchPath.length + 1) 177 | core.debug(`Resolved path: ${path}`) 178 | resolvePathCache[fileName] = path 179 | return path 180 | } 181 | } 182 | resolvePathCache[fileName] = normalizedFilename 183 | return normalizedFilename 184 | } 185 | 186 | /** 187 | * Copyright 2020 ScaCap 188 | * https://github.com/ScaCap/action-surefire-report/blob/master/utils.js#L43 189 | * 190 | * Modification Copyright 2022 Mike Penz 191 | * https://github.com/mikepenz/action-junit-report/ 192 | */ 193 | export async function parseFile( 194 | file: string, 195 | suiteRegex = '', // no-op 196 | includePassed = false, 197 | annotateNotice = false, 198 | checkRetries = false, 199 | excludeSources: string[] = ['/build/', '/__pycache__/'], 200 | checkTitleTemplate: string | undefined = undefined, 201 | breadCrumbDelimiter = '/', 202 | testFilesPrefix = '', 203 | transformer: Transformer[] = [], 204 | followSymlink = false, 205 | annotationsLimit = -1, 206 | truncateStackTraces = true, 207 | failOnParseError = false, 208 | globalAnnotations: Annotation[] = [], 209 | resolveIgnoreClassname = false 210 | ): Promise { 211 | core.debug(`Parsing file ${file}`) 212 | 213 | const data: string = fs.readFileSync(file, 'utf8') 214 | 215 | // eslint-disable-next-line @typescript-eslint/no-explicit-any 216 | let report: any 217 | try { 218 | report = JSON.parse(parser.xml2json(data, {compact: true})) 219 | } catch (error) { 220 | core.error(`⚠️ Failed to parse file (${file}) with error ${error}`) 221 | if (failOnParseError) throw Error(`⚠️ Failed to parse file (${file}) with error ${error}`) 222 | return undefined 223 | } 224 | 225 | // parse child test suites 226 | const testsuite = report.testsuites ? report.testsuites : report.testsuite 227 | 228 | if (!testsuite) { 229 | core.error(`⚠️ Failed to retrieve root test suite from file (${file})`) 230 | return undefined 231 | } 232 | 233 | const testResult = await parseSuite( 234 | testsuite, 235 | suiteRegex, // no-op 236 | '', 237 | breadCrumbDelimiter, 238 | includePassed, 239 | annotateNotice, 240 | checkRetries, 241 | excludeSources, 242 | checkTitleTemplate, 243 | testFilesPrefix, 244 | transformer, 245 | followSymlink, 246 | annotationsLimit, 247 | truncateStackTraces, 248 | globalAnnotations, 249 | resolveIgnoreClassname 250 | ) 251 | 252 | if (testResult !== undefined && !testResult.name) { 253 | testResult.name = pathHelper.basename(file) 254 | } 255 | 256 | return testResult 257 | } 258 | 259 | function templateVar(varName: string): string { 260 | return `{{${varName}}}` 261 | } 262 | 263 | async function parseSuite( 264 | /* eslint-disable @typescript-eslint/no-explicit-any */ 265 | suite: any, 266 | suiteRegex: string, // no-op 267 | breadCrumb: string, 268 | breadCrumbDelimiter = '/', 269 | includePassed = false, 270 | annotateNotice = false, 271 | checkRetries = false, 272 | excludeSources: string[], 273 | checkTitleTemplate: string | undefined = undefined, 274 | testFilesPrefix = '', 275 | transformer: Transformer[], 276 | followSymlink: boolean, 277 | annotationsLimit: number, 278 | truncateStackTraces: boolean, 279 | globalAnnotations: Annotation[], 280 | resolveIgnoreClassname = false 281 | ): Promise { 282 | if (!suite) { 283 | // not a valid suite, return fast 284 | return undefined 285 | } 286 | 287 | let suiteName = '' 288 | if (suite._attributes && suite._attributes.name) { 289 | suiteName = suite._attributes.name 290 | } 291 | 292 | let totalCount = 0 293 | let skippedCount = 0 294 | let failedCount = 0 295 | let passedCount = 0 296 | let retriedCount = 0 297 | let time = 0 298 | const annotations: Annotation[] = [] 299 | 300 | // parse testCases 301 | if (suite.testcase) { 302 | const testcases = Array.isArray(suite.testcase) ? suite.testcase : suite.testcase ? [suite.testcase] : [] 303 | const suiteFile = suite._attributes !== undefined ? suite._attributes.file : null 304 | const suiteLine = suite._attributes !== undefined ? suite._attributes.line : null 305 | const limit = annotationsLimit >= 0 ? annotationsLimit - globalAnnotations.length : annotationsLimit 306 | const parsedTestCases = await parseTestCases( 307 | suiteName, 308 | suiteFile, 309 | suiteLine, 310 | breadCrumb, 311 | testcases, 312 | includePassed, 313 | annotateNotice, 314 | checkRetries, 315 | excludeSources, 316 | checkTitleTemplate, 317 | testFilesPrefix, 318 | transformer, 319 | followSymlink, 320 | truncateStackTraces, 321 | limit, 322 | resolveIgnoreClassname 323 | ) 324 | 325 | // expand global annotations array 326 | totalCount += parsedTestCases.totalCount 327 | skippedCount += parsedTestCases.skippedCount 328 | failedCount += parsedTestCases.failedCount 329 | passedCount += parsedTestCases.passedCount 330 | retriedCount += parsedTestCases.retriedCount 331 | time += parsedTestCases.time 332 | annotations.push(...parsedTestCases.annotations) 333 | globalAnnotations.push(...parsedTestCases.annotations) 334 | } 335 | // if we have a limit, and we are above the limit, return fast 336 | if (annotationsLimit > 0 && globalAnnotations.length >= annotationsLimit) { 337 | return { 338 | name: suiteName, 339 | totalCount, 340 | skippedCount, 341 | failedCount, 342 | passedCount, 343 | retriedCount, 344 | time, 345 | annotations, 346 | globalAnnotations, 347 | testResults: [] 348 | } 349 | } 350 | 351 | // parse child test suites 352 | const childTestSuites = suite.testsuite 353 | ? Array.isArray(suite.testsuite) 354 | ? suite.testsuite 355 | : [suite.testsuite] 356 | : Array.isArray(suite.testsuites) 357 | ? suite.testsuites 358 | : [suite.testsuites] 359 | 360 | const childSuiteResults: ActualTestResult[] = [] 361 | const childBreadCrumb = suiteName ? `${breadCrumb}${suiteName}${breadCrumbDelimiter}` : breadCrumb 362 | for (const childSuite of childTestSuites) { 363 | const childSuiteResult = await parseSuite( 364 | childSuite, 365 | suiteRegex, 366 | childBreadCrumb, 367 | breadCrumbDelimiter, 368 | includePassed, 369 | annotateNotice, 370 | checkRetries, 371 | excludeSources, 372 | checkTitleTemplate, 373 | testFilesPrefix, 374 | transformer, 375 | followSymlink, 376 | annotationsLimit, 377 | truncateStackTraces, 378 | globalAnnotations, 379 | resolveIgnoreClassname 380 | ) 381 | 382 | if (childSuiteResult) { 383 | childSuiteResults.push(childSuiteResult) 384 | totalCount += childSuiteResult.totalCount 385 | skippedCount += childSuiteResult.skippedCount 386 | failedCount += childSuiteResult.failedCount 387 | passedCount += childSuiteResult.passedCount 388 | retriedCount += childSuiteResult.retriedCount 389 | time += childSuiteResult.time 390 | } 391 | 392 | // skip out if we reached our annotations limit 393 | if (annotationsLimit > 0 && globalAnnotations.length >= annotationsLimit) { 394 | return { 395 | name: suiteName, 396 | totalCount, 397 | skippedCount, 398 | failedCount, 399 | passedCount, 400 | retriedCount, 401 | time, 402 | annotations, 403 | globalAnnotations, 404 | testResults: childSuiteResults 405 | } 406 | } 407 | } 408 | 409 | return { 410 | name: suiteName, 411 | totalCount, 412 | skippedCount, 413 | failedCount, 414 | passedCount, 415 | retriedCount, 416 | time, 417 | annotations, 418 | globalAnnotations, 419 | testResults: childSuiteResults 420 | } 421 | } 422 | 423 | /** 424 | * Helper function to create an annotation for a test case 425 | */ 426 | async function createTestCaseAnnotation( 427 | testcase: any, 428 | failure: any | null, 429 | failureIndex: number, 430 | totalFailures: number, 431 | suiteName: string, 432 | suiteFile: string | null, 433 | suiteLine: string | null, 434 | breadCrumb: string, 435 | testTime: number, 436 | skip: boolean, 437 | success: boolean, 438 | annotationLevel: 'failure' | 'notice' | 'warning', 439 | flakyFailuresCount: number, 440 | annotateNotice: boolean, 441 | failed: boolean, 442 | excludeSources: string[], 443 | checkTitleTemplate: string | undefined, 444 | testFilesPrefix: string, 445 | transformer: Transformer[], 446 | followSymlink: boolean, 447 | truncateStackTraces: boolean, 448 | resolveIgnoreClassname: boolean 449 | ): Promise { 450 | // Extract stack trace based on whether we have a failure or error 451 | const stackTrace: string = ( 452 | (failure && failure._cdata) || 453 | (failure && failure._text) || 454 | (testcase.error && testcase.error._cdata) || 455 | (testcase.error && testcase.error._text) || 456 | '' 457 | ) 458 | .toString() 459 | .trim() 460 | 461 | const stackTraceMessage = truncateStackTraces ? stackTrace.split('\n').slice(0, 2).join('\n') : stackTrace 462 | 463 | // Extract message based on failure or error 464 | const message: string = ( 465 | (failure && failure._attributes && failure._attributes.message) || 466 | (testcase.error && testcase.error._attributes && testcase.error._attributes.message) || 467 | stackTraceMessage || 468 | testcase._attributes.name 469 | ).trim() 470 | 471 | // Determine class name for resolution 472 | let resolveClassname = testcase._attributes.name 473 | if (!resolveIgnoreClassname && testcase._attributes.classname) { 474 | resolveClassname = testcase._attributes.classname 475 | } 476 | 477 | // Resolve file and line information 478 | const pos = await resolveFileAndLine( 479 | testcase._attributes.file || failure?._attributes?.file || suiteFile, 480 | testcase._attributes.line || failure?._attributes?.line || suiteLine, 481 | resolveClassname, 482 | stackTrace 483 | ) 484 | 485 | // Apply transformations to filename 486 | let transformedFileName = pos.fileName 487 | for (const r of transformer) { 488 | transformedFileName = applyTransformer(r, transformedFileName) 489 | } 490 | 491 | // Resolve the full path 492 | const githubWorkspacePath = process.env['GITHUB_WORKSPACE'] 493 | let resolvedPath: string = transformedFileName 494 | if (failed || (annotateNotice && success)) { 495 | if (fs.existsSync(transformedFileName)) { 496 | resolvedPath = transformedFileName 497 | } else if (githubWorkspacePath && fs.existsSync(`${githubWorkspacePath}${transformedFileName}`)) { 498 | resolvedPath = `${githubWorkspacePath}${transformedFileName}` 499 | } else { 500 | resolvedPath = await resolvePath(githubWorkspacePath || '', transformedFileName, excludeSources, followSymlink) 501 | } 502 | } 503 | 504 | core.debug(`Path prior to stripping: ${resolvedPath}`) 505 | if (githubWorkspacePath) { 506 | resolvedPath = resolvedPath.replace(`${githubWorkspacePath}/`, '') // strip workspace prefix, make the path relative 507 | } 508 | 509 | // Generate title 510 | let title = '' 511 | if (checkTitleTemplate) { 512 | // ensure to not duplicate the test_name if file_name is equal 513 | const fileName = pos.fileName !== testcase._attributes.name ? pos.fileName : '' 514 | const baseClassName = testcase._attributes.classname ? testcase._attributes.classname : testcase._attributes.name 515 | const className = baseClassName.split('.').slice(-1)[0] 516 | title = checkTitleTemplate 517 | .replace(templateVar('FILE_NAME'), fileName) 518 | .replace(templateVar('BREAD_CRUMB'), breadCrumb ?? '') 519 | .replace(templateVar('SUITE_NAME'), suiteName ?? '') 520 | .replace(templateVar('TEST_NAME'), testcase._attributes.name) 521 | .replace(templateVar('CLASS_NAME'), className) 522 | } else if (pos.fileName !== testcase._attributes.name) { 523 | // special handling to use class name only for title in case class name was ignored for `resolveClassname` 524 | if (resolveIgnoreClassname && testcase._attributes.classname) { 525 | title = `${testcase._attributes.classname}.${testcase._attributes.name}` 526 | } else { 527 | title = `${pos.fileName}.${testcase._attributes.name}` 528 | } 529 | } else { 530 | title = `${testcase._attributes.name}` 531 | } 532 | 533 | // Add failure index to title if multiple failures exist 534 | if (totalFailures > 1) { 535 | title = `${title} (failure ${failureIndex + 1}/${totalFailures})` 536 | } 537 | 538 | // optionally attach the prefix to the path 539 | resolvedPath = testFilesPrefix ? pathHelper.join(testFilesPrefix, resolvedPath) : resolvedPath 540 | 541 | const testTimeString = testTime > 0 ? `${testTime}s` : '' 542 | core.info(`${resolvedPath}:${pos.line} | ${message.split('\n', 1)[0]}${testTimeString}`) 543 | 544 | return { 545 | path: resolvedPath, 546 | start_line: pos.line, 547 | end_line: pos.line, 548 | start_column: 0, 549 | end_column: 0, 550 | retries: (testcase.retries || 0) + flakyFailuresCount, 551 | annotation_level: annotationLevel, 552 | status: skip ? 'skipped' : success ? 'success' : 'failure', 553 | title: escapeEmoji(title), 554 | message: escapeEmoji(message), 555 | raw_details: escapeEmoji(stackTrace), 556 | time: testTime 557 | } 558 | } 559 | 560 | async function parseTestCases( 561 | suiteName: string, 562 | suiteFile: string | null, 563 | suiteLine: string | null, 564 | breadCrumb: string, 565 | testcases: any[], 566 | includePassed = false, 567 | annotateNotice = false, 568 | checkRetries = false, 569 | excludeSources: string[], 570 | checkTitleTemplate: string | undefined = undefined, 571 | testFilesPrefix = '', 572 | transformer: Transformer[], 573 | followSymlink: boolean, 574 | truncateStackTraces: boolean, 575 | limit = -1, 576 | resolveIgnoreClassname = false 577 | ): Promise { 578 | const annotations: Annotation[] = [] 579 | let totalCount = 0 580 | let skippedCount = 0 581 | let retriedCount = 0 582 | let time = 0 583 | if (checkRetries) { 584 | // identify duplicates in case of flaky tests, and remove them 585 | const testcaseMap = new Map() 586 | for (const testcase of testcases) { 587 | const key = testcase._attributes.name 588 | if (testcaseMap.get(key) !== undefined) { 589 | // testcase with matching name exists 590 | const failed = testcase.failure || testcase.error 591 | const previous = testcaseMap.get(key) 592 | const previousFailed = previous.failure || previous.error 593 | if (failed && !previousFailed) { 594 | // previous is a success, drop failure 595 | previous.retries = (previous.retries || 0) + 1 596 | retriedCount += 1 597 | core.debug(`Drop flaky test failure for (1): ${key}`) 598 | } else if (!failed && previousFailed) { 599 | // previous failed, new one not, replace 600 | testcase.retries = (previous.retries || 0) + 1 601 | testcaseMap.set(key, testcase) 602 | retriedCount += 1 603 | core.debug(`Drop flaky test failure for (2): ${JSON.stringify(testcase)}`) 604 | } 605 | } else { 606 | testcaseMap.set(key, testcase) 607 | } 608 | } 609 | testcases = Array.from(testcaseMap.values()) 610 | } 611 | 612 | let testCaseFailedCount = 0 // Track number of test cases that failed 613 | 614 | for (const testcase of testcases) { 615 | totalCount++ 616 | 617 | // fish the time-taken out of the test case attributes, if present 618 | const testTime = testcase._attributes.time === undefined ? 0 : parseFloat(testcase._attributes.time) 619 | time += testTime 620 | 621 | const testFailure = testcase.failure || testcase.error // test failed 622 | const skip = 623 | testcase.skipped || testcase._attributes.status === 'disabled' || testcase._attributes.status === 'ignored' 624 | const failed = testFailure && !skip // test failure, but was skipped -> don't fail if a ignored test failed 625 | const success = !testFailure // not a failure -> thus a success 626 | const annotationLevel = success || skip ? 'notice' : 'failure' // a skipped test shall not fail the run 627 | 628 | if (skip) { 629 | skippedCount++ 630 | } 631 | 632 | // Count this test case as failed if it has any failures (regardless of how many) 633 | if (failed) { 634 | testCaseFailedCount++ 635 | } 636 | 637 | // If this isn't reported as a failure and processing all passed tests 638 | // isn't enabled, then skip the rest of the processing. 639 | if (annotationLevel !== 'failure' && !includePassed) { 640 | continue 641 | } 642 | 643 | // in some definitions `failure` may be an array 644 | const failures = testcase.failure ? (Array.isArray(testcase.failure) ? testcase.failure : [testcase.failure]) : [] 645 | 646 | // identify the number of flaky failures 647 | const flakyFailuresCount = testcase.flakyFailure 648 | ? Array.isArray(testcase.flakyFailure) 649 | ? testcase.flakyFailure.length 650 | : 1 651 | : 0 652 | 653 | // Handle multiple failures or single case (success/skip/error) 654 | const failuresToProcess = failures.length > 0 ? failures : [null] // Process at least once for non-failure cases 655 | 656 | for (let failureIndex = 0; failureIndex < failuresToProcess.length; failureIndex++) { 657 | const failure = failuresToProcess[failureIndex] 658 | 659 | const annotation = await createTestCaseAnnotation( 660 | testcase, 661 | failure, 662 | failureIndex, 663 | failures.length, 664 | suiteName, 665 | suiteFile, 666 | suiteLine, 667 | breadCrumb, 668 | testTime, 669 | skip, 670 | success, 671 | annotationLevel, 672 | flakyFailuresCount, 673 | annotateNotice, 674 | failed, 675 | excludeSources, 676 | checkTitleTemplate, 677 | testFilesPrefix, 678 | transformer, 679 | followSymlink, 680 | truncateStackTraces, 681 | resolveIgnoreClassname 682 | ) 683 | 684 | annotations.push(annotation) 685 | 686 | if (limit >= 0 && annotations.length >= limit) break 687 | } 688 | 689 | // Break from the outer testcase loop if we've reached the limit 690 | if (limit >= 0 && annotations.length >= limit) break 691 | } 692 | 693 | const failedCount = testCaseFailedCount // Use test case count, not annotation count 694 | const passedCount = totalCount - failedCount - skippedCount 695 | return { 696 | totalCount, 697 | skippedCount, 698 | failedCount, 699 | passedCount, 700 | retriedCount, 701 | time, 702 | annotations 703 | } 704 | } 705 | 706 | /** 707 | * Copyright 2020 ScaCap 708 | * https://github.com/ScaCap/action-surefire-report/blob/master/utils.js#L113 709 | * 710 | * Modification Copyright 2022 Mike Penz 711 | * https://github.com/mikepenz/action-junit-report/ 712 | */ 713 | export async function parseTestReports( 714 | checkName: string, 715 | summary: string, 716 | reportPaths: string, 717 | suiteRegex: string, // no-op 718 | includePassed = false, 719 | annotateNotice = false, 720 | checkRetries = false, 721 | excludeSources: string[], 722 | checkTitleTemplate: string | undefined = undefined, 723 | breadCrumbDelimiter: string, 724 | testFilesPrefix = '', 725 | transformer: Transformer[] = [], 726 | followSymlink = false, 727 | annotationsLimit = -1, 728 | truncateStackTraces = true, 729 | failOnParseError = false, 730 | resolveIgnoreClassname = false 731 | ): Promise { 732 | core.debug(`Process test report for: ${reportPaths} (${checkName})`) 733 | const globber = await glob.create(reportPaths, {followSymbolicLinks: followSymlink}) 734 | const globalAnnotations: Annotation[] = [] 735 | const testResults: ActualTestResult[] = [] 736 | let totalCount = 0 737 | let skipped = 0 738 | let failed = 0 739 | let passed = 0 740 | let retried = 0 741 | let time = 0 742 | let foundFiles = 0 743 | for await (const file of globber.globGenerator()) { 744 | foundFiles++ 745 | core.debug(`Parsing report file: ${file}`) 746 | 747 | const testResult = await parseFile( 748 | file, 749 | suiteRegex, 750 | includePassed, 751 | annotateNotice, 752 | checkRetries, 753 | excludeSources, 754 | checkTitleTemplate, 755 | breadCrumbDelimiter, 756 | testFilesPrefix, 757 | transformer, 758 | followSymlink, 759 | annotationsLimit, 760 | truncateStackTraces, 761 | failOnParseError, 762 | globalAnnotations, 763 | resolveIgnoreClassname 764 | ) 765 | 766 | if (!testResult) continue 767 | const {totalCount: c, skippedCount: s, failedCount: f, passedCount: p, retriedCount: r, time: t} = testResult 768 | totalCount += c 769 | skipped += s 770 | failed += f 771 | passed += p 772 | retried += r 773 | time += t 774 | testResults.push(testResult) 775 | 776 | if (annotationsLimit > 0 && globalAnnotations.length >= annotationsLimit) { 777 | break 778 | } 779 | } 780 | 781 | return { 782 | checkName, 783 | summary, 784 | totalCount, 785 | skipped, 786 | failed, 787 | passed, 788 | retried, 789 | time, 790 | foundFiles, 791 | globalAnnotations, 792 | testResults 793 | } 794 | } 795 | 796 | /** 797 | * Escape emoji sequences. 798 | */ 799 | export function escapeEmoji(input: string): string { 800 | const regex = 801 | /[\u{1f300}-\u{1f5ff}\u{1f900}-\u{1f9ff}\u{1f600}-\u{1f64f}\u{1f680}-\u{1f6ff}\u{2600}-\u{26ff}\u{2700}-\u{27bf}\u{1f1e6}-\u{1f1ff}\u{1f191}-\u{1f251}\u{1f004}\u{1f0cf}\u{1f170}-\u{1f171}\u{1f17e}-\u{1f17f}\u{1f18e}\u{3030}\u{2b50}\u{2b55}\u{2934}-\u{2935}\u{2b05}-\u{2b07}\u{2b1b}-\u{2b1c}\u{3297}\u{3299}\u{303d}\u{00a9}\u{00ae}\u{2122}\u{23f3}\u{24c2}\u{23e9}-\u{23ef}\u{25b6}\u{23f8}-\u{23fa}]/gu 802 | return input.replace(regex, ``) // replace emoji with empty string (\\u${(match.codePointAt(0) || "").toString(16)}) 803 | } 804 | --------------------------------------------------------------------------------