├── .eslintignore
├── .eslintrc
├── .github
├── FUNDING.yml
└── ISSUE_TEMPLATE
│ ├── bug_report.md
│ ├── enhancement.md
│ └── question.md
├── .gitignore
├── .pre-commit-config.yaml
├── .prettierignore
├── .prettierrc
├── .vscode
├── extensions.json
├── launch.json
└── tasks.json
├── .vscodeignore
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── LICENSE.txt
├── README.md
├── bundled_requirements.txt
├── cliff.toml
├── examples
└── simple
│ ├── .gitignore
│ ├── .vscode
│ ├── launch.json
│ └── settings.json
│ ├── features
│ ├── Bladerunner.feature
│ ├── Minimal.feature
│ ├── __init__.robot
│ ├── doc_strings.feature
│ ├── documentation.feature
│ ├── rule_sample.feature
│ ├── simple.feature.md
│ └── steps
│ │ ├── __init__.resource
│ │ ├── hooks.resource
│ │ ├── simple.resource
│ │ └── step_impls.py
│ ├── robot.toml
│ └── tests
│ └── first.robot
├── hatch.toml
├── icons
├── cucumber.png
├── cucumber.svg
├── gherkin.svg
├── icon.png
└── icon.svg
├── language-configuration.json
├── package-lock.json
├── package.json
├── packages
├── gurke
│ ├── LICENSE.txt
│ ├── README.md
│ ├── gherkin-python.razor
│ ├── gherkin.berp
│ ├── pyproject.toml
│ └── src
│ │ └── gurke
│ │ ├── __init__.py
│ │ ├── __main__.py
│ │ ├── __version__.py
│ │ ├── ast_builder.py
│ │ ├── ast_node.py
│ │ ├── count_symbols.py
│ │ ├── dialect.py
│ │ ├── errors.py
│ │ ├── gherkin-languages.json
│ │ ├── gherkin_line.py
│ │ ├── location.py
│ │ ├── parser.py
│ │ ├── pickles
│ │ ├── __init__.py
│ │ └── compiler.py
│ │ ├── py.typed
│ │ ├── stream
│ │ ├── __init__.py
│ │ ├── gherkin_events.py
│ │ ├── id_generator.py
│ │ └── source_events.py
│ │ ├── token.py
│ │ ├── token_formatter_builder.py
│ │ ├── token_matcher.py
│ │ ├── token_matcher_markdown.py
│ │ └── token_scanner.py
└── language_server_plugin
│ └── .comming_soon
├── pyproject.toml
├── scripts
├── deploy_docs.py
├── extract_release_notes.py
├── install_bundled_editable.py
├── install_packages.py
├── is_prerelease.py
├── package.py
├── publish.py
├── tools.py
├── update_changelog.py
├── update_doc_links.py
└── update_git_versions.py
├── src
└── GherkinParser
│ ├── Library.py
│ ├── __init__.py
│ ├── __version__.py
│ ├── gherkin_builder.py
│ ├── gherkin_parser.py
│ ├── glob_path.py
│ └── py.typed
├── syntaxes
└── gherkin-classic.tmLanguage
├── tests
├── __init__.py
└── gurke
│ ├── __init__.py
│ ├── test_gherkin.py
│ └── test_gherkin_in_markdown_token_matcher.py
├── tsconfig.json
├── vscode-client
├── extension.ts
├── formattingEditProvider.ts
├── parseGherkinDocument.ts
└── test
│ └── runTest.ts
└── webpack.config.js
/.eslintignore:
--------------------------------------------------------------------------------
1 | node_modules/
2 | dist/
3 | out/
4 | coverage/
5 |
6 | vscode.d.ts
7 | vscode.proposed.d.ts
8 |
9 | .mypy_cache/
10 | .pytest_cache/
11 |
12 | site/
--------------------------------------------------------------------------------
/.eslintrc:
--------------------------------------------------------------------------------
1 | {
2 | "env": {
3 | "node": true,
4 | "es6": true,
5 | "mocha": true
6 | },
7 | "parserOptions": {
8 | "project": "./tsconfig.json"
9 | },
10 | "parser": "@typescript-eslint/parser",
11 | "plugins": [
12 | "@typescript-eslint",
13 | "prettier"
14 | ],
15 | "extends": [
16 | "eslint:recommended",
17 | "plugin:@typescript-eslint/recommended",
18 | "plugin:@typescript-eslint/recommended-requiring-type-checking",
19 | "plugin:import/errors",
20 | "plugin:import/warnings",
21 | "plugin:import/typescript",
22 | "prettier"
23 | ],
24 | "rules": {
25 | // Overriding ESLint rules with Typescript-specific ones
26 | "@typescript-eslint/ban-ts-comment": [
27 | "error",
28 | {
29 | "ts-ignore": "allow-with-description"
30 | }
31 | ],
32 | "@typescript-eslint/explicit-module-boundary-types": "error",
33 | "no-bitwise": "off",
34 | "no-dupe-class-members": "off",
35 | "@typescript-eslint/no-dupe-class-members": "error",
36 | "no-empty-function": "off",
37 | "@typescript-eslint/no-empty-function": [
38 | "error"
39 | ],
40 | "@typescript-eslint/no-empty-interface": "off",
41 | "@typescript-eslint/no-explicit-any": "error",
42 | "@typescript-eslint/no-non-null-assertion": "off",
43 | "no-unused-vars": "off",
44 | "@typescript-eslint/no-unused-vars": [
45 | "error",
46 | {
47 | "args": "after-used",
48 | "argsIgnorePattern": "^_"
49 | }
50 | ],
51 | "no-use-before-define": "off",
52 | "@typescript-eslint/no-use-before-define": [
53 | "error",
54 | {
55 | "functions": false
56 | }
57 | ],
58 | "no-useless-constructor": "off",
59 | "@typescript-eslint/no-useless-constructor": "error",
60 | "@typescript-eslint/no-var-requires": "off",
61 | // Other rules
62 | "class-methods-use-this": [
63 | "error",
64 | {
65 | "exceptMethods": [
66 | "dispose"
67 | ]
68 | }
69 | ],
70 | "func-names": "off",
71 | "import/extensions": "off",
72 | "import/namespace": "off",
73 | "import/no-extraneous-dependencies": "off",
74 | "import/no-unresolved": [
75 | "error",
76 | {
77 | "ignore": [
78 | "monaco-editor",
79 | "vscode"
80 | ]
81 | }
82 | ],
83 | "import/prefer-default-export": "off",
84 | "linebreak-style": "off",
85 | "no-await-in-loop": "off",
86 | "no-console": "off",
87 | "no-control-regex": "off",
88 | "no-extend-native": "off",
89 | "no-multi-str": "off",
90 | "no-param-reassign": "off",
91 | "no-prototype-builtins": "off",
92 | "no-restricted-syntax": [
93 | "error",
94 | {
95 | "selector": "ForInStatement",
96 | "message": "for..in loops iterate over the entire prototype chain, which is virtually never what you want. Use Object.{keys,values,entries}, and iterate over the resulting array."
97 | },
98 | {
99 | "selector": "LabeledStatement",
100 | "message": "Labels are a form of GOTO; using them makes code confusing and hard to maintain and understand."
101 | },
102 | {
103 | "selector": "WithStatement",
104 | "message": "`with` is disallowed in strict mode because it makes code impossible to predict and optimize."
105 | }
106 | ],
107 | "no-template-curly-in-string": "off",
108 | "no-underscore-dangle": "off",
109 | "no-useless-escape": "off",
110 | "no-void": [
111 | "error",
112 | {
113 | "allowAsStatement": true
114 | }
115 | ],
116 | "operator-assignment": "off",
117 | "strict": "off",
118 | "prettier/prettier": [
119 | "error"
120 | ]
121 | }
122 | }
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 |
3 | github: [d-biehl] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
4 | # patreon: # Replace with a single Patreon username
5 | # open_collective: # Replace with a single Open Collective username
6 | # ko_fi: # Replace with a single Ko-fi username
7 | # tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
8 | # community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
9 | # liberapay: # Replace with a single Liberapay username
10 | # issuehunt: # Replace with a single IssueHunt username
11 | # otechie: # Replace with a single Otechie username
12 | # lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
13 | custom: ['https://www.buymeacoffee.com/dbiehl', 'https://paypal.me/dbiehl74'] # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
14 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: '[BUG]
'
5 | labels: 'bug'
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. Go to '...'
16 | 2. Click on '....'
17 | 3. Scroll down to '....'
18 | 4. See error
19 |
20 | If possible add some example source code like:
21 | ```robotframework
22 | ***Settings***
23 | Library Collections
24 |
25 | ***Test Cases***
26 | a simple test
27 | do something # this should shown something
28 | ```
29 |
30 | **Expected behavior**
31 | A clear and concise description of what you expected to happen.
32 |
33 | **Screenshots/ Videos**
34 | If applicable, add screenshots or videos to help explain your problem.
35 |
36 | **Logs**
37 | Copy the messages from VSCode "Output" view for RobotCode and RobotCode Language Server for the specific folder/workspace.
38 |
39 | **Desktop (please complete the following information):**
40 | - VS Code Version [e.g. 1.60]
41 | - RobotCode Version [e.g. 0.3.2]
42 | - OS: [e.g. Windows, Linux]
43 | - Python Version [e.g. 3.9.7]
44 | - RobotFramework Version [e.g. 4.0.0]
45 | - Additional tools like robocop, robotidy
46 |
47 | **Additional context**
48 | Add any other context about the problem here.
49 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/enhancement.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Enhancement
3 | about: Suggest an idea for this project
4 | title: '[ENHANCEMENT]'
5 | labels: 'enhancement'
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your enhancement request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/question.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Question
3 | about: If you have a question how to setup something or getting things running and if you unsure if it's a bug.
4 | title: '[QUESTION]'
5 | labels: 'question'
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Please describe.**
11 | A clear and concise description of the problem and the question at hand.
12 |
13 |
14 | **Desktop (please complete the following information):**
15 | - VS Code Version [e.g. 1.60]
16 | - RobotCode Version [e.g. 0.3.2]
17 | - OS: [e.g. Windows, Linux]
18 | - Python Version [e.g. 3.9.7]
19 | - RobotFramework Version [e.g. 4.0.0]
20 | - Additional tools like robocop, robotidy
21 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | sdist/
20 | var/
21 | wheels/
22 | share/python-wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .nox/
42 | .coverage
43 | .coverage.*
44 | .cache
45 | nosetests.xml
46 | coverage.xml
47 | *.cover
48 | *.py,cover
49 | .hypothesis/
50 | .pytest_cache/
51 | cover/
52 |
53 | # profiler
54 | prof/
55 |
56 | # Translations
57 | *.mo
58 | *.pot
59 |
60 | # Django stuff:
61 | *.log
62 | local_settings.py
63 | db.sqlite3
64 | db.sqlite3-journal
65 |
66 | # Flask stuff:
67 | instance/
68 | .webassets-cache
69 |
70 | # Scrapy stuff:
71 | .scrapy
72 |
73 | # Sphinx documentation
74 | docs/_build/
75 |
76 | # PyBuilder
77 | .pybuilder/
78 | target/
79 |
80 | # Jupyter Notebook
81 | .ipynb_checkpoints
82 |
83 | # IPython
84 | profile_default/
85 | ipython_config.py
86 |
87 | # pyenv
88 | # For a library or package, you might want to ignore these files since the code is
89 | # intended to run in multiple environments; otherwise, check them in:
90 | # .python-version
91 |
92 | # pipenv
93 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
94 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
95 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
96 | # install all needed dependencies.
97 | #Pipfile.lock
98 |
99 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
100 | __pypackages__/
101 |
102 | # Celery stuff
103 | celerybeat-schedule
104 | celerybeat.pid
105 |
106 | # SageMath parsed files
107 | *.sage.py
108 |
109 | # Environments
110 | .env
111 | .venv
112 | env/
113 | venv/
114 | ENV/
115 | env.bak/
116 | venv.bak/
117 | .hatch/
118 |
119 | # Spyder project settings
120 | .spyderproject
121 | .spyproject
122 |
123 | # Rope project settings
124 | .ropeproject
125 |
126 | # mkdocs documentation
127 | /site
128 |
129 | # mypy
130 | .mypy_cache/
131 | .dmypy.json
132 | dmypy.json
133 |
134 | # Pyre type checker
135 | .pyre/
136 |
137 | # pytype static type analyzer
138 | .pytype/
139 |
140 | # Cython debug symbols
141 | cython_debug/
142 |
143 | .vscode/*
144 | #!.vscode/settings.json
145 | !.vscode/tasks.json
146 | !.vscode/launch.json
147 | !.vscode/extensions.json
148 | *.code-workspace
149 |
150 | # Local History for Visual Studio Code
151 | .history/
152 |
153 | # Logs
154 | logs
155 | *.log
156 | npm-debug.log*
157 | yarn-debug.log*
158 | yarn-error.log*
159 | lerna-debug.log*
160 |
161 | # Diagnostic reports (https://nodejs.org/api/report.html)
162 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
163 |
164 | # Runtime data
165 | pids
166 | *.pid
167 | *.seed
168 | *.pid.lock
169 |
170 | # Directory for instrumented libs generated by jscoverage/JSCover
171 | lib-cov
172 |
173 | # Coverage directory used by tools like istanbul
174 | coverage
175 | *.lcov
176 |
177 | # nyc test coverage
178 | .nyc_output
179 |
180 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
181 | .grunt
182 |
183 | # Bower dependency directory (https://bower.io/)
184 | bower_components
185 |
186 | # node-waf configuration
187 | .lock-wscript
188 |
189 | # Compiled binary addons (https://nodejs.org/api/addons.html)
190 | build/Release
191 |
192 | # Dependency directories
193 | node_modules/
194 | jspm_packages/
195 |
196 | # Snowpack dependency directory (https://snowpack.dev/)
197 | web_modules/
198 |
199 | # TypeScript cache
200 | *.tsbuildinfo
201 |
202 | # Optional npm cache directory
203 | .npm
204 |
205 | # Optional eslint cache
206 | .eslintcache
207 |
208 | # Microbundle cache
209 | .rpt2_cache/
210 | .rts2_cache_cjs/
211 | .rts2_cache_es/
212 | .rts2_cache_umd/
213 |
214 | # Optional REPL history
215 | .node_repl_history
216 |
217 | # Output of 'npm pack'
218 | *.tgz
219 |
220 | # Yarn Integrity file
221 | .yarn-integrity
222 |
223 | # dotenv environment variables file
224 | .env
225 | .env.test
226 |
227 | # parcel-bundler cache (https://parceljs.org/)
228 | .cache
229 | .parcel-cache
230 |
231 | # Next.js build output
232 | .next
233 | out
234 |
235 | # Nuxt.js build / generate output
236 | .nuxt
237 | dist
238 |
239 | # Gatsby files
240 | .cache/
241 | # Comment in the public line in if your project uses Gatsby and not Next.js
242 | # https://nextjs.org/blog/next-9-1#public-directory-support
243 | # public
244 |
245 | # vuepress build output
246 | .vuepress/dist
247 |
248 | # Serverless directories
249 | .serverless/
250 |
251 | # FuseBox cache
252 | .fusebox/
253 |
254 | # DynamoDB Local files
255 | .dynamodb/
256 |
257 | # TernJS port file
258 | .tern-port
259 |
260 | # Stores VSCode versions used for testing VSCode extensions
261 | .vscode-test
262 |
263 | vscode.d.ts
264 | vscode.proposed.d.ts
265 |
266 | # yarn v2
267 | .yarn/cache
268 | .yarn/unplugged
269 | .yarn/build-state.yml
270 | .yarn/install-state.gz
271 | .pnp.*
272 |
273 | *.vsix
274 |
275 | # playground files
276 | playground/
277 |
278 | # pycharm
279 | .idea/
280 |
281 | # test results
282 | test-results
283 | results.html
284 | report.html
285 |
286 | # pyenv
287 | .python-version
288 |
289 | # robotcode
290 | .robotcode_cache/
291 |
292 | # ruff
293 | .ruff_cache/
294 |
295 | bundled/libs
296 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | fail_fast: true
2 | default_language_version:
3 | python: python3.8
4 | default_stages: [commit, push]
5 | repos:
6 | - repo: local
7 | hooks:
8 | - id: lint_package_update
9 | name: Update package for lint environment
10 | entry: hatch run lint:install-packages
11 | pass_filenames: false
12 | language: system
13 | types:
14 | - "python"
15 | - id: python_style
16 | name: Check Python Style
17 | entry: hatch run lint:style
18 | pass_filenames: false
19 | language: system
20 | types:
21 | - "python"
22 | - id: python_typing
23 | name: Check Python Typing
24 | entry: hatch run lint:typing
25 | pass_filenames: false
26 | language: system
27 | types:
28 | - "python"
29 | - id: eslint
30 | name: Check JavaScript Style and Typing
31 | entry: npm run lint
32 | pass_filenames: false
33 | language: system
34 | types:
35 | - "javascript"
36 |
--------------------------------------------------------------------------------
/.prettierignore:
--------------------------------------------------------------------------------
1 | # don't ever lint node_modules
2 | node_modules/
3 | # don't lint build output (make sure it's set to your correct build folder name)
4 | dist/
5 | out/
6 | # don't lint nyc coverage output
7 | coverage/
8 |
9 | package-lock.json
10 | .vscode/
11 | vscode.d.ts
12 | vscode.proposed.d.ts
13 |
14 | .mypy_cache/
15 | .pytest_cache/
16 | robotcode/
17 |
18 | *.md
--------------------------------------------------------------------------------
/.prettierrc:
--------------------------------------------------------------------------------
1 | {
2 | "tabWidth": 2,
3 | "useTabs": false,
4 | "endOfLine": "auto",
5 | "quoteProps": "as-needed",
6 | "printWidth": 120
7 | }
8 |
--------------------------------------------------------------------------------
/.vscode/extensions.json:
--------------------------------------------------------------------------------
1 | {
2 | // See http://go.microsoft.com/fwlink/?LinkId=827846
3 | // for the documentation about the extensions.json format
4 | "recommendations": [
5 | "ms-python.python",
6 | "esbenp.prettier-vscode",
7 | "dbaeumer.vscode-eslint",
8 | "tamasfe.even-better-toml"
9 | ]
10 | }
--------------------------------------------------------------------------------
/.vscode/launch.json:
--------------------------------------------------------------------------------
1 | // A launch configuration that compiles the extension and then opens it inside a new window
2 | // Use IntelliSense to learn about possible attributes.
3 | // Hover to view descriptions of existing attributes.
4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
5 | {
6 | "version": "0.2.0",
7 | "configurations": [
8 | {
9 | "name": "Python: Debug in terminal",
10 | "type": "python",
11 | "request": "launch",
12 | "purpose": [
13 | "debug-in-terminal"
14 | ],
15 | "justMyCode": false
16 | },
17 | {
18 | "name": "Python: Attach using Process Id",
19 | "type": "python",
20 | "request": "attach",
21 | "processId": "${command:pickProcess}"
22 | },
23 | {
24 | "name": "Python: Current File",
25 | "type": "python",
26 | "request": "launch",
27 | "program": "${file}",
28 | "console": "integratedTerminal",
29 | "justMyCode": false,
30 | "cwd": "${workspaceFolder}",
31 | },
32 | {
33 | "name": "Python: Attach to 5678",
34 | "type": "python",
35 | "request": "attach",
36 | "connect": {
37 | "host": "localhost",
38 | "port": 5678
39 | },
40 | "justMyCode": false,
41 | "subProcess": true,
42 | "showReturnValue": true,
43 | },
44 | {
45 | "name": "Python: Attach Prompt",
46 | "type": "python",
47 | "request": "attach",
48 | "connect": {
49 | "host": "localhost",
50 | "port": "${input:portNumber}"
51 | }
52 | },
53 | {
54 | "name": "Python: Pytest All Test",
55 | "type": "python",
56 | "request": "launch",
57 | "module": "pytest",
58 | "args": [
59 | "."
60 | ],
61 | "console": "integratedTerminal",
62 | "justMyCode": false,
63 | "cwd": "${workspaceFolder}",
64 | },
65 | {
66 | "name": "Python: Pytest Some Test",
67 | "type": "python",
68 | "request": "launch",
69 | "module": "pytest",
70 | "args": [
71 | "--full-trace",
72 | "--tb=native",
73 | "tests"
74 | ],
75 | "console": "integratedTerminal",
76 | "justMyCode": false,
77 | "cwd": "${workspaceFolder}",
78 | },
79 | {
80 | "name": "Python: Pytest Current File",
81 | "type": "python",
82 | "request": "launch",
83 | "module": "pytest",
84 | "args": [
85 | "${file}"
86 | ],
87 | "console": "integratedTerminal",
88 | "justMyCode": false,
89 | "cwd": "${workspaceFolder}",
90 | },
91 | {
92 | "name": "Run Extension",
93 | "type": "extensionHost",
94 | "request": "launch",
95 | "args": [
96 | "--extensionDevelopmentPath=${workspaceFolder}/../robotcode",
97 | "--extensionDevelopmentPath=${workspaceFolder}",
98 | ],
99 | "outFiles": [
100 | "${workspaceFolder}/out/**/*.js"
101 | ],
102 | "preLaunchTask": "npm: compile"
103 | },
104 | {
105 | "name": "Extension Tests",
106 | "type": "extensionHost",
107 | "request": "launch",
108 | "runtimeExecutable": "${execPath}",
109 | "args": [
110 | "--extensionDevelopmentPath=${workspaceFolder}",
111 | "--extensionTestsPath=${workspaceFolder}/out/test/suite/index"
112 | ],
113 | "outFiles": [
114 | "${workspaceFolder}/out/test/**/*.js"
115 | ],
116 | "preLaunchTask": "npm: compile"
117 | },
118 | {
119 | "name": "Python: Debug Tests",
120 | "type": "python",
121 | "request": "launch",
122 | "program": "${file}",
123 | "purpose": [
124 | "debug-test"
125 | ],
126 | "console": "integratedTerminal",
127 | "justMyCode": false
128 | },
129 | {
130 | "name": "Python: Remote-Attach",
131 | "type": "python",
132 | "request": "attach",
133 | "connect": {
134 | "host": "localhost",
135 | "port": 5678
136 | },
137 | "pathMappings": [
138 | {
139 | "localRoot": "${workspaceFolder}",
140 | "remoteRoot": "."
141 | }
142 | ],
143 | "justMyCode": true
144 | }
145 | ],
146 | "inputs": [
147 | {
148 | "type": "promptString",
149 | "id": "portNumber",
150 | "description": "debugpy portnumber",
151 | "default": "5678",
152 | }
153 | ]
154 | }
155 |
--------------------------------------------------------------------------------
/.vscode/tasks.json:
--------------------------------------------------------------------------------
1 | // See https://go.microsoft.com/fwlink/?LinkId=733558
2 | // for the documentation about the tasks.json format
3 | {
4 | "version": "2.0.0",
5 | "tasks": [
6 | {
7 | "label": "npm: watch",
8 | "type": "npm",
9 | "script": "watch",
10 | "problemMatcher": "$tsc-watch",
11 | "isBackground": true,
12 | "presentation": {
13 | "reveal": "never"
14 | },
15 | "group": {
16 | "kind": "build",
17 | "isDefault": true
18 | }
19 | },
20 | {
21 | "label": "npm: compile",
22 | "type": "npm",
23 | "script": "compile",
24 | "problemMatcher": [],
25 | "dependsOn": ["robotcode npm: compile"]
26 | },
27 | {
28 | "label": "robotcode npm: compile",
29 | "type": "npm",
30 | "script": "compile",
31 | "path": "../robotcode",
32 | "problemMatcher": []
33 | },
34 | {
35 | "label": "Python Lint All",
36 | "type": "shell",
37 | "command": "hatch run lint:all",
38 | "problemMatcher": []
39 | }
40 | ]
41 | }
--------------------------------------------------------------------------------
/.vscodeignore:
--------------------------------------------------------------------------------
1 | .idea
2 | .github
3 | .vscode
4 | .vscode-test
5 |
6 | **/.gitignore
7 |
8 | # node
9 | src/
10 | node_modules/
11 | webpack.config.js
12 | .yarnrc
13 | .eslintrc.json
14 | tsconfig.json
15 | vscode-client
16 | .eslintignore
17 | .prettier*
18 |
19 |
20 | # python
21 | build
22 | .ruff_cache
23 | .hatch
24 | .venv
25 | .flake8
26 | mypy.ini
27 | log.ini
28 | pyproject.toml
29 | *.lock
30 |
31 | tests
32 | dist
33 |
34 | **/__pycache__
35 | **/.mypy_cache
36 | **/.pytest_cache
37 | test-results
38 |
39 | playground
40 | examples
41 |
42 | # svg files
43 | **/*.svg
44 |
45 | # coverage
46 | .coverage
47 | coverage.xml
48 | .python-version
49 |
50 | # mkdocs
51 | docs
52 | overrides
53 | site
54 |
55 | # others
56 | scripts
57 |
58 | **/.robotcode_cache
59 |
60 | .pre-commit-config.yaml
61 | .devcontainer
62 |
63 | robotcode
64 | packages
65 | bundled/libs/bin
66 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | All notable changes to this project will be documented in this file. See [conventional commits](https://www.conventionalcommits.org/) for commit guidelines.
4 |
5 | ## [0.3.2](https://github.com/d-biehl/robotframework-gherkin-parser/compare/v0.3.1..v0.3.2) - 2024-02-05
6 |
7 | ### Bug Fixes
8 |
9 | - **vscode:** Use png icons instead of svg ([b8256ec](https://github.com/d-biehl/robotframework-gherkin-parser/commit/b8256ecc4b5d5c12a12872a2774134d53f690c8c))
10 |
11 |
12 | ## [0.3.1](https://github.com/d-biehl/robotframework-gherkin-parser/compare/v0.3.0..v0.3.1) - 2024-02-05
13 |
14 | ### Bug Fixes
15 |
16 | - **build:** Correct dependency versions ([dfadf76](https://github.com/d-biehl/robotframework-gherkin-parser/commit/dfadf765df342d7234f6e5ed089c5e2bbbd6764b))
17 |
18 |
19 | ## [0.3.0](https://github.com/d-biehl/robotframework-gherkin-parser/compare/v0.2.1..v0.3.0) - 2024-02-05
20 |
21 | ### Bug Fixes
22 |
23 | - **GherkinParser:** Fix lineno in hooks ([07ad49c](https://github.com/d-biehl/robotframework-gherkin-parser/commit/07ad49c203f489e6c8a96e5ef96261de4b155418))
24 | - **GherkinParser:** Steps with arguments now works correctly ([0c6ee44](https://github.com/d-biehl/robotframework-gherkin-parser/commit/0c6ee44af3ed55b57f83d89dc6bccbf0dcdb1529))
25 |
26 |
27 | ### Features
28 |
29 | - **vscode:** Implement simple formatting provider ([cba43de](https://github.com/d-biehl/robotframework-gherkin-parser/commit/cba43debe49d8fdb48820a79fa53b08ae525871e))
30 |
31 |
32 | ## [0.2.1](https://github.com/d-biehl/robotframework-gherkin-parser/compare/v0.2.0..v0.2.1) - 2024-02-03
33 |
34 | ### Bug Fixes
35 |
36 | - Again correct version scripts ([5a95b82](https://github.com/d-biehl/robotframework-gherkin-parser/commit/5a95b82ceb6c0a2ba221f828d610a66fe39e71bf))
37 |
38 |
39 | ## [0.2.0](https://github.com/d-biehl/robotframework-gherkin-parser/compare/v0.1.0..v0.2.0) - 2024-02-03
40 |
41 | ### Features
42 |
43 | - Correct versions ([d72116c](https://github.com/d-biehl/robotframework-gherkin-parser/commit/d72116c7a22746fd0cce282d1902681ae115d16f))
44 |
45 |
46 | ## [0.1.0](https://github.com/d-biehl/robotframework-gherkin-parser/compare/v0.0.1..v0.1.0) - 2024-02-03
47 |
48 | ### Features
49 |
50 | - **build:** Update changelog and bump scripts ([3dda318](https://github.com/d-biehl/robotframework-gherkin-parser/commit/3dda318374ea7bbc00cc529894904bcd5bc2ef38))
51 |
52 |
53 | ## [0.0.1] - 2024-02-03
54 |
55 | ### Features
56 |
57 | - **GherkinParser:** First running version ([bef0e6f](https://github.com/d-biehl/robotframework-gherkin-parser/commit/bef0e6f5cf6a198927168c59497eb60263024edd))
58 | - **gurke:** Introduce a `gurke` as gherkin parser ([9e9d47e](https://github.com/d-biehl/robotframework-gherkin-parser/commit/9e9d47e6837eca0eb8b4e36291958688fc4a268d))
59 | - **vscode:** Implement contributions for robotcode ([a724aa7](https://github.com/d-biehl/robotframework-gherkin-parser/commit/a724aa7a8874b9d5d0baac64bad7e795a92bf05e))
60 | - **vscode:** Add simple highlightning for feature files ([338a137](https://github.com/d-biehl/robotframework-gherkin-parser/commit/338a13770e1469dbee521c0906897fc2c1d6b98b))
61 |
62 |
63 |
64 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | We as members, contributors, and leaders pledge to make participation in our
6 | community a harassment-free experience for everyone, regardless of age, body
7 | size, visible or invisible disability, ethnicity, sex characteristics, gender
8 | identity and expression, level of experience, education, socio-economic status,
9 | nationality, personal appearance, race, religion, or sexual identity
10 | and orientation.
11 |
12 | We pledge to act and interact in ways that contribute to an open, welcoming,
13 | diverse, inclusive, and healthy community.
14 |
15 | ## Our Standards
16 |
17 | Examples of behavior that contributes to a positive environment for our
18 | community include:
19 |
20 | * Demonstrating empathy and kindness toward other people
21 | * Being respectful of differing opinions, viewpoints, and experiences
22 | * Giving and gracefully accepting constructive feedback
23 | * Accepting responsibility and apologizing to those affected by our mistakes,
24 | and learning from the experience
25 | * Focusing on what is best not just for us as individuals, but for the
26 | overall community
27 |
28 | Examples of unacceptable behavior include:
29 |
30 | * The use of sexualized language or imagery, and sexual attention or
31 | advances of any kind
32 | * Trolling, insulting or derogatory comments, and personal or political attacks
33 | * Public or private harassment
34 | * Publishing others' private information, such as a physical or email
35 | address, without their explicit permission
36 | * Other conduct which could reasonably be considered inappropriate in a
37 | professional setting
38 |
39 | ## Enforcement Responsibilities
40 |
41 | Community leaders are responsible for clarifying and enforcing our standards of
42 | acceptable behavior and will take appropriate and fair corrective action in
43 | response to any behavior that they deem inappropriate, threatening, offensive,
44 | or harmful.
45 |
46 | Community leaders have the right and responsibility to remove, edit, or reject
47 | comments, commits, code, wiki edits, issues, and other contributions that are
48 | not aligned to this Code of Conduct, and will communicate reasons for moderation
49 | decisions when appropriate.
50 |
51 | ## Scope
52 |
53 | This Code of Conduct applies within all community spaces, and also applies when
54 | an individual is officially representing the community in public spaces.
55 | Examples of representing our community include using an official e-mail address,
56 | posting via an official social media account, or acting as an appointed
57 | representative at an online or offline event.
58 |
59 | ## Enforcement
60 |
61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
62 | reported to the community leaders responsible for enforcement at
63 | dbiehl@live.de
64 | All complaints will be reviewed and investigated promptly and fairly.
65 |
66 | All community leaders are obligated to respect the privacy and security of the
67 | reporter of any incident.
68 |
69 | ## Enforcement Guidelines
70 |
71 | Community leaders will follow these Community Impact Guidelines in determining
72 | the consequences for any action they deem in violation of this Code of Conduct:
73 |
74 | ### 1. Correction
75 |
76 | **Community Impact**: Use of inappropriate language or other behavior deemed
77 | unprofessional or unwelcome in the community.
78 |
79 | **Consequence**: A private, written warning from community leaders, providing
80 | clarity around the nature of the violation and an explanation of why the
81 | behavior was inappropriate. A public apology may be requested.
82 |
83 | ### 2. Warning
84 |
85 | **Community Impact**: A violation through a single incident or series
86 | of actions.
87 |
88 | **Consequence**: A warning with consequences for continued behavior. No
89 | interaction with the people involved, including unsolicited interaction with
90 | those enforcing the Code of Conduct, for a specified period of time. This
91 | includes avoiding interactions in community spaces as well as external channels
92 | like social media. Violating these terms may lead to a temporary or
93 | permanent ban.
94 |
95 | ### 3. Temporary Ban
96 |
97 | **Community Impact**: A serious violation of community standards, including
98 | sustained inappropriate behavior.
99 |
100 | **Consequence**: A temporary ban from any sort of interaction or public
101 | communication with the community for a specified period of time. No public or
102 | private interaction with the people involved, including unsolicited interaction
103 | with those enforcing the Code of Conduct, is allowed during this period.
104 | Violating these terms may lead to a permanent ban.
105 |
106 | ### 4. Permanent Ban
107 |
108 | **Community Impact**: Demonstrating a pattern of violation of community
109 | standards, including sustained inappropriate behavior, harassment of an
110 | individual, or aggression toward or disparagement of classes of individuals.
111 |
112 | **Consequence**: A permanent ban from any sort of public interaction within
113 | the community.
114 |
115 | ## Attribution
116 |
117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage],
118 | version 2.0, available at
119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
120 |
121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct
122 | enforcement ladder](https://github.com/mozilla/diversity).
123 |
124 | [homepage]: https://www.contributor-covenant.org
125 |
126 | For answers to common questions about this code of conduct, see the FAQ at
127 | https://www.contributor-covenant.org/faq. Translations are available at
128 | https://www.contributor-covenant.org/translations.
129 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
10 |
11 | "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
12 |
13 | "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
14 |
15 | "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
16 |
17 | "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
18 |
19 | "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
20 |
21 | "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
22 |
23 | "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
24 |
25 | "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
26 |
27 | "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
28 |
29 | 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
30 |
31 | 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
32 |
33 | 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
34 |
35 | (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
36 |
37 | (b) You must cause any modified files to carry prominent notices stating that You changed the files; and
38 |
39 | (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
40 |
41 | (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
42 |
43 | You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
44 |
45 | 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
46 |
47 | 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
48 |
49 | 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
50 |
51 | 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
52 |
53 | 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
54 |
55 | END OF TERMS AND CONDITIONS
56 |
57 | APPENDIX: How to apply the Apache License to your work.
58 |
59 | To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
60 |
61 | Copyright [yyyy] [name of copyright owner]
62 |
63 | Licensed under the Apache License, Version 2.0 (the "License");
64 | you may not use this file except in compliance with the License.
65 | You may obtain a copy of the License at
66 |
67 | http://www.apache.org/licenses/LICENSE-2.0
68 |
69 | Unless required by applicable law or agreed to in writing, software
70 | distributed under the License is distributed on an "AS IS" BASIS,
71 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
72 | See the License for the specific language governing permissions and
73 | limitations under the License.
74 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # **Robot Framework Gherkin Parser**: Quick Overview
2 |
3 | The **Robot Framework Gherkin Parser** enables seamless integration of Gherkin feature files with the **Robot Framework**, facilitating behavior-driven development (BDD) with ease. This integration not only allows for the flexible execution of Gherkin feature files alongside **Robot Framework** test files but also highlights the complementary strengths of both approaches. Gherkin feature files, with their less technical and more scenario-focused syntax, emphasize the behavioral aspects of what is being tested, rather than the how. In contrast, **Robot Framework** test files tend to be more technical, focusing on the step-by-step implementation of test scenarios through keyword sequences.
4 |
5 | Utilizing a slightly modified version of the official [Cucumber Gherkin Parser](https://github.com/cucumber/gherkin), this custom parser implementation ensures the direct execution of Gherkin scenarios within the **Robot Framework** environment. This supports efficient transitions to and from BDD practices, catering to both technical and non-technical stakeholders by bridging the gap between business requirements and technical implementation.
6 |
7 | The **Robot Framework Gherkin Parser** simplifies test step implementation, allowing technical testers to implement test steps in the **Robot Framework**'s keyword-driven language. This is particularly beneficial when compared to the traditional BDD approach, which might require complex programming skills for step definitions in languages such as Java or C#. The parser thereby reduces the barrier to BDD test creation and maintenance, making it more accessible.
8 |
9 | ## Core Features
10 |
11 | - **Focus on Behavioral Testing**: Gherkin feature files allow for specifying test scenarios in a less technical, more narrative form, focusing on what needs to be tested rather than how it is to be tested. This complements the more technically oriented **Robot Framework** test files, providing a balanced approach to defining and executing tests.
12 | - **User-Friendly Test Implementation**: Technical testers can easily implement test steps in the **Robot Framework**'s intuitive language, avoiding the complexity of traditional programming languages for BDD step definitions.
13 | - **Efficient Execution and Porting**: Enables direct execution and easy porting of Gherkin feature files, bridging the gap between Gherkin's scenario-focused syntax and the **Robot Framework**'s technical implementation.
14 | - **Seamless Development Environment**: The inclusion of a plugin/extension for [RobotCode](https://robotcode.io) enhances the development and testing process within Visual Studio Code, offering integrated tools tailored for both BDD and automated testing.
15 |
16 | Designed for teams leveraging the **Robot Framework** and looking to integrate or enhance their BDD methodology, the **Robot Framework Gherkin Parser** facilitates a comprehensive testing strategy. It encourages a collaborative testing environment by simplifying the creation of BDD tests and improving testing efficiency and flexibility.
17 |
18 | Explore the subsequent sections for details on integrating this parser into your testing strategy, optimizing its usage, and contributing to its development.
19 |
20 | ## Requirements
21 |
22 | Only the Parser
23 |
24 | * Python 3.8 or above
25 | * Robotframework 7.0 and above
26 |
27 | For Support in VSCode
28 |
29 | * VSCode version 1.82 and above
30 |
31 | ## Installation
32 |
33 | The **Robot Framework Gherkin Parser** can be installed using the following methods:
34 |
35 | - **Pip**: The parser can be installed using pip, the Python package manager. Run the following command to install the parser:
36 |
37 | ```bash
38 | pip install robotframework-gherkin-parser
39 | ```
40 |
41 | If you are using the [RobotCode](https://marketplace.visualstudio.com/items?itemName=d-biehl.robotcode) extension for VSCode as your IDE, you can install the [**RobotCode GherkinParser Support** extension](https://marketplace.visualstudio.com/items?itemName=d-biehl.robotcode-gherkin) from the VSCode Marketplace.
42 |
43 |
44 | ## Usage
45 |
46 | ## On command line
47 |
48 | To execute `.feature` files using the **Robot Framework Gherkin Parser** on command line, you need to use the `robot` command line option `--parser` to specify the parser to be used. The following command demonstrates how to execute a `.feature` file using the **Robot Framework Gherkin Parser**:
49 |
50 | ```bash
51 | robot --parser GherkinParser path/to/your/feature/file.feature
52 | ```
53 |
54 | ## IDE
55 |
56 | ### Visual Studio Code with [RobotCode](https://marketplace.visualstudio.com/items?itemName=d-biehl.robotcode) extension
57 |
58 | If the plugin-extension for [**RobotCode GherkinParser Support** extension](https://marketplace.visualstudio.com/items?itemName=d-biehl.robotcode-gherkin) is installed in VSCode
59 |
60 | By creating a `robot.toml` file in your project root and adding the following configuration:
61 |
62 | ```toml
63 | [parsers]
64 | GherkinParser=[]
65 | ```
66 |
67 | NOT IMPLEMENTED YET: ~~You can enable the GherkinParser by the VSCode Setting: `robotcode.robot.parsers`~~
68 |
69 | ## Examples
70 |
71 | The following example demonstrates a simple Gherkin feature file that can be executed using the **Robot Framework Gherkin Parser**:
72 |
73 | Create a folder named `features` in your project root.
74 | Create a file named `calculator.feature` in the folder `features` with the following content:
75 |
76 | ```gherkin
77 | Feature: Calculator
78 | As a user
79 | I want to use a calculator
80 | So that I can perform basic arithmetic operations
81 |
82 | Scenario: Add two numbers
83 | Given I have entered 50 into the calculator
84 | And I have entered 70 into the calculator
85 | When I press add
86 | Then the result should be 120 on the screen
87 | ```
88 |
89 | To execute the `calculator.feature` file using the **Robot Framework Gherkin Parser** on command line, run the following command:
90 |
91 | ```bash
92 | robot --parser GherkinParser features/calculator.feature
93 | ```
94 |
95 | If your are using VSCode + RobotCode + RobotCode GherkinParser Support, you can run the test by clicking on the play buttons in the feature file.
96 |
97 |
98 |
99 | ## Contributing
100 |
101 | TODO
102 |
--------------------------------------------------------------------------------
/bundled_requirements.txt:
--------------------------------------------------------------------------------
1 | typing-extensions>=4.4.0
2 |
--------------------------------------------------------------------------------
/cliff.toml:
--------------------------------------------------------------------------------
1 | # git-cliff ~ default configuration file
2 | # https://git-cliff.org/docs/configuration
3 | #
4 | # Lines starting with "#" are comments.
5 | # Configuration options are organized into tables and keys.
6 | # See documentation for more information on available options.
7 |
8 | [changelog]
9 | # changelog header
10 | header = """
11 | # Changelog\n
12 | All notable changes to this project will be documented in this file. See [conventional commits](https://www.conventionalcommits.org/) for commit guidelines.\n
13 | """
14 | # template for the changelog body
15 | # https://keats.github.io/tera/docs/#introduction
16 | body = """
17 | {% if version %}\
18 | {% if previous.version %}\
19 | ## [{{ version | trim_start_matches(pat="v") }}]($REPO/compare/{{ previous.version }}..{{ version }}) - {{ timestamp | date(format="%Y-%m-%d") }}
20 | {% else %}\
21 | ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }}
22 | {% endif %}\
23 | {% else %}\
24 | ## [unreleased]
25 | {% endif %}\
26 | {% for group, commits in commits | group_by(attribute="group") %}
27 | ### {{ group | upper_first }}
28 | {% for commit in commits
29 | | filter(attribute="scope")
30 | | sort(attribute="scope") %}
31 | - **{{commit.scope}}:** {{ commit.message | upper_first }} ([{{ commit.id | truncate(length=7, end="") }}]($REPO/commit/{{ commit.id }}))
32 | {%- if commit.breaking %}
33 | {% raw %} {% endraw %}- **BREAKING**: {{commit.breaking_description}}
34 | {%- endif -%}
35 | {% if commit.body %}\n\n{{ commit.body | trim | indent(prefix=" ", first=true) }}\n{% endif -%}
36 | {%- endfor -%}
37 | {% raw %}\n{% endraw %}\
38 | {%- for commit in commits %}
39 | {%- if commit.scope -%}
40 | {% else -%}
41 | - {{ commit.message | upper_first }} ([{{ commit.id | truncate(length=7, end="") }}]($REPO/commit/{{ commit.id }}))
42 | {% if commit.breaking -%}
43 | {% raw %} {% endraw %}- **BREAKING**: {{commit.breaking_description}}
44 | {% endif -%}
45 | {% if commit.body %}\n\n{{ commit.body | trim | indent(prefix=" ", first=true) }}\n{% endif -%}
46 | {% endif -%}
47 | {% endfor -%}
48 | {% raw %}\n{% endraw %}\
49 | {% endfor %}\n
50 | """
51 | # remove the leading and trailing whitespace from the template
52 | trim = true
53 | # changelog footer
54 | footer = """
55 |
56 | """
57 | # postprocessors
58 | postprocessors = [
59 | { pattern = '\$REPO', replace = "https://github.com/d-biehl/robotframework-gherkin-parser" }, # replace repository URL
60 | ]
61 | [git]
62 | # parse the commits based on https://www.conventionalcommits.org
63 | conventional_commits = true
64 | # filter out the commits that are not conventional
65 | filter_unconventional = true
66 | # process each line of a commit as an individual commit
67 | split_commits = false
68 | # regex for preprocessing the commit messages
69 | commit_preprocessors = [
70 | # { pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](/issues/${2}))"}, # replace issue numbers
71 | ]
72 | # regex for parsing and grouping commits
73 | commit_parsers = [
74 | { message = "^feat", group = "Features" },
75 | { message = "^Feat", group = "Features" },
76 | { message = "^Feature", group = "Features" },
77 | { message = "^fix", group = "Bug Fixes" },
78 | { message = "^doc", group = "Documentation" },
79 | { message = "^perf", group = "Performance" },
80 | { message = "^refactor", group = "Refactor" },
81 | { message = "^style", group = "Styling", skip = true },
82 | { message = "^test", group = "Testing" },
83 | { message = "^chore\\(release\\): prepare for", skip = true },
84 | { message = "^chore\\(deps\\)", skip = true },
85 | { message = "^chore\\(pr\\)", skip = true },
86 | { message = "^chore\\(pull\\)", skip = true },
87 | { message = "^chore\\(build\\)", skip = true },
88 | { message = "^chore|ci", group = "Miscellaneous Tasks", skip = true },
89 | { message = "^bump", skip = true },
90 | { body = ".*security", group = "Security" },
91 | { message = "^revert", group = "Revert" },
92 | ]
93 | # protect breaking changes from being skipped due to matching a skipping commit_parser
94 | protect_breaking_commits = false
95 | # filter out the commits that are not matched by commit parsers
96 | filter_commits = true
97 | # regex for matching git tags
98 | tag_pattern = "v[0-9]+\\.[0-9]+\\.[0-9]+-*"
99 |
100 | # regex for skipping tags
101 | skip_tags = "v[0-9]+\\.[0-9]+\\.[0-9]+-alpha\\.[0-9]+"
102 | # regex for ignoring tags
103 | ignore_tags = ""
104 | # sort the tags topologically
105 | topo_order = false
106 | # sort the commits inside sections by oldest/newest order
107 | sort_commits = "oldest"
108 | # limit the number of commits included in the changelog.
109 | # limit_commits = 42
110 |
--------------------------------------------------------------------------------
/examples/simple/.gitignore:
--------------------------------------------------------------------------------
1 | results/
2 | _*
3 | .*
--------------------------------------------------------------------------------
/examples/simple/.vscode/launch.json:
--------------------------------------------------------------------------------
1 | {
2 | // Use IntelliSense to learn about possible attributes.
3 | // Hover to view descriptions of existing attributes.
4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
5 | "version": "0.2.0",
6 | "configurations": [
7 | {
8 | "name": "RobotCode: Run Current",
9 | "type": "robotcode",
10 | "request": "launch",
11 | "cwd": "${workspaceFolder}",
12 | "target": "${file}"
13 | },
14 | {
15 | "name": "RobotCode: Run All",
16 | "type": "robotcode",
17 | "request": "launch",
18 | "cwd": "${workspaceFolder}",
19 | "target": "."
20 | },
21 | {
22 | "name": "RobotCode: Default",
23 | "type": "robotcode",
24 | "request": "launch",
25 | "purpose": "default",
26 | "presentation": {
27 | "hidden": true
28 | },
29 | "attachPython": true,
30 | "pythonConfiguration": "RobotCode: Python"
31 | },
32 | {
33 | "name": "RobotCode: Python",
34 | "type": "python",
35 | "request": "attach",
36 | "presentation": {
37 | "hidden": true
38 | },
39 | "justMyCode": false
40 | }
41 | ]
42 | }
--------------------------------------------------------------------------------
/examples/simple/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | // "robotcode.extraArgs": [
3 | // "-v"
4 | // ]
5 | "robotcode.debug.attachPython": true,
6 | "debugpy.debugJustMyCode": false,
7 | "python.analysis.typeCheckingMode": "off"
8 | }
--------------------------------------------------------------------------------
/examples/simple/features/Bladerunner.feature:
--------------------------------------------------------------------------------
1 | Feature: Android Detection System
2 | As a security officer
3 | I want to ensure that only humans gain access
4 |
5 | Scenario: A human attempts to gain access
6 | Given An individual reaches the security checkpoint
7 | And The individual is a human
8 | When The detection system is activated
9 | Then The individual should be granted access
10 |
11 | Scenario: An android attempts to gain access
12 | Given An individual reaches the security checkpoint
13 | And The individual is an android
14 | When The detection system is activated
15 | Then The individual should be denied access
16 |
--------------------------------------------------------------------------------
/examples/simple/features/Minimal.feature:
--------------------------------------------------------------------------------
1 | @browser
2 | Feature: Minimal
3 |
4 | @minimal
5 | Scenario: minimalistic
6 | Given the minimalism
7 |
8 | @maximal
9 | Scenario: another one
10 | Given do something in the maximal way
11 |
12 | @another @slow
13 | Scenario: another one1
14 | Given the minimalism
15 |
16 | @browser
17 | Scenario: the last one
18 | Given the minimalism
19 |
20 | Scenario Outline: Cucumber Data Table
21 | Given Table with example
22 | | FirstName | |
23 | | MiddleName | |
24 | | LastName | |
25 |
26 | Examples:
27 | | FirstName | MiddleName | LastName |
28 | | Daniel | D | Biehl |
29 | | Philip | K | Dick |
30 |
--------------------------------------------------------------------------------
/examples/simple/features/__init__.robot:
--------------------------------------------------------------------------------
1 | *** Settings ***
2 | Suite Setup Log Suite Setup
3 | Suite Teardown Log Suite Teardown
4 | Test Setup Log Test Setup
5 | Test Teardown Log Test Teardown
6 |
7 | Resource ./steps/hooks.resource
8 |
--------------------------------------------------------------------------------
/examples/simple/features/doc_strings.feature:
--------------------------------------------------------------------------------
1 | Feature: Parser Should Support DocStrings
2 |
3 | Scenario: A scenario with a docstring
4 | Given a blog post named "Random" with Markdown body
5 | """
6 | Some Title, Eh?
7 | ===============
8 | Here is the first paragraph of my blog post. Lorem ipsum dolor sit amet,
9 | consectetur adipiscing elit.
10 | """
11 |
12 | Scenario: A scenario with a docstring and multiple whitespace and vars
13 | Given a blog post named "Random" with Markdown body
14 | """
15 | 😂🚲🚓
16 | (❁´◡`❁)
17 | (*/ω\*)
18 | (^///^)
19 | this text contains spaces
20 | and ${TEST NAME}
21 | """
22 |
23 | Scenario: A scenario with a backtick in the docstring
24 | Given a blog post named "Random" with Markdown body
25 | ```python
26 | 😂🚲🚓
27 | (❁´◡`❁)
28 | (*/ω\*)
29 | (^///^)
30 | this text contains spaces
31 | and ${TEST NAME}
32 | ```
33 |
--------------------------------------------------------------------------------
/examples/simple/features/documentation.feature:
--------------------------------------------------------------------------------
1 | Feature: Scenario Outline with a docstring
2 | This is a documentation for the feature file.
3 |
4 | it can be more than one line long.
5 |
6 | Scenario Outline: Greetings come in many forms
7 | this is the documentation for the scenario
8 |
9 | it can be more than one line long.
10 |
11 | and it can contain that will be replaced by the examples.
12 |
13 | Given this file:
14 | """
15 | Greeting:
16 | """
17 |
18 | Examples:
19 | | type | content |
20 | | en | Hello |
21 | | fr | Bonjour |
22 |
--------------------------------------------------------------------------------
/examples/simple/features/rule_sample.feature:
--------------------------------------------------------------------------------
1 | Feature: Highlander
2 |
3 | Rule: There can be only One
4 |
5 | Example: Only One -- More than one alive
6 | Given there are 3 ninjas
7 | And there are more than one ninja alive
8 | When 2 ninjas meet, they will fight
9 | Then one ninja dies (but not me)
10 | And there is one ninja less alive
11 |
12 | Example: Only One -- One alive
13 | Given there is only 1 ninja alive
14 | Then he (or she) will live forever ;-)
15 |
16 | Rule: There can be Two (in some cases)
17 |
18 | Example: Two -- Dead and Reborn as Phoenix
19 |
--------------------------------------------------------------------------------
/examples/simple/features/simple.feature.md:
--------------------------------------------------------------------------------
1 | # Feature: blah
2 |
3 | @atag
4 | ## Scenario: whatever
5 |
6 | - Given something
7 | - When something else
8 |
--------------------------------------------------------------------------------
/examples/simple/features/steps/__init__.resource:
--------------------------------------------------------------------------------
1 | *** Settings ***
2 | Library step_impls.py
3 |
--------------------------------------------------------------------------------
/examples/simple/features/steps/hooks.resource:
--------------------------------------------------------------------------------
1 | *** Keywords ***
2 | before_test
3 | [Arguments] ${context}= ${test}=
4 | [Tags] hook:before-test
5 | Log before test
6 | # Fail har har test
7 |
8 | before_suite
9 | [Arguments] ${context}= ${suite}=
10 | [Tags] hook:before-suite
11 | Log hook before suite
12 |
13 | before_keyword
14 | [Arguments] ${context}= ${suite}=
15 | [Tags] hook:before-keyword
16 | Log before keyword
17 | Fail blah
18 | RETURN after_keyword
19 |
20 | after_keyword
21 | [Arguments] ${context}= ${suite}=
22 | [Tags] hook:after-step
23 | Log after step
24 |
25 | before_tag
26 | [Arguments] ${context}= ${suite}=
27 | [Tags] hook:before-tag
28 | Log before suite
29 |
30 |
31 | start_browser
32 | [Arguments] ${context}= ${suite}=
33 | [Tags] hook:before-tag:browser
34 | Log before suite
35 |
--------------------------------------------------------------------------------
/examples/simple/features/steps/simple.resource:
--------------------------------------------------------------------------------
1 | *** Settings ***
2 | Library step_impls.py
3 |
4 |
5 | *** Keywords ***
6 | the minimalism
7 | Log yeah
8 |
9 | Table with example
10 | [Tags] gherkin:step
11 | [Arguments] ${rows}
12 |
13 | FOR ${index} ${row} IN ENUMERATE @{rows}
14 | Log ${index} ${{", ".join((c["value"] for c in $row["cells"]))}}
15 | END
16 |
17 | FOR ${row} IN @{rows}
18 | FOR ${cell} IN @{row}[cells]
19 | Log ${cell["value"]}
20 | END
21 | END
22 |
23 | a blog post named "${name}" with Markdown body
24 | [Tags] gherkin:step
25 | [Arguments] ${body}
26 |
27 | Log ${name}
28 | Log ${body}
29 |
30 | this file:
31 | [Tags] gerkin:step
32 | [Arguments] ${content} ${mediaType}=
33 |
34 | Log ${content}
35 | Log ${mediaType}
36 |
--------------------------------------------------------------------------------
/examples/simple/features/steps/step_impls.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 |
3 | from robot.api.deco import keyword
4 |
5 |
6 | @keyword(name="before_feature", tags=["hook:before-feature"])
7 | def before_feature(lang: Optional[str] = None):
8 | print("I'm doing something in lang")
9 | #raise Exception("I'm failing")
10 |
11 |
12 | @keyword(name="Do something in ${lang}")
13 | def do_something_in_python(lang: str):
14 | print(f"I'm doing something in {lang}")
15 |
--------------------------------------------------------------------------------
/examples/simple/robot.toml:
--------------------------------------------------------------------------------
1 | [parsers]
2 | GherkinParser = []
3 |
--------------------------------------------------------------------------------
/examples/simple/tests/first.robot:
--------------------------------------------------------------------------------
1 | *** Test Cases ***
2 | first
3 | Run Keyword My Keyword
4 |
5 |
6 | *** Keywords ***
7 | My Keyword
8 | # TODO: implement keyword "My Keyword".
9 | Fail Not Implemented
10 |
--------------------------------------------------------------------------------
/hatch.toml:
--------------------------------------------------------------------------------
1 | [version]
2 | path = "src/GherkinParser/__version__.py"
3 |
4 | [build]
5 | dev-mode-dirs = ["src"]
6 |
7 | [build.targets.wheel]
8 | only-include = ["src/GherkinParser"]
9 | sources = ["src"]
10 |
11 | [build.targets.sdist]
12 | only-include = ["src", "CHANGELOG.md"]
13 |
14 |
15 | [envs.default]
16 | dependencies = [
17 | "pytest",
18 | "pytest-html",
19 | "pytest_asyncio",
20 | "pytest-regtest>=1.5.0",
21 | "pytest-cov",
22 | "mypy",
23 | "ruff",
24 | "black",
25 | "debugpy",
26 | "GitPython",
27 | "semantic-version",
28 | ]
29 | pre-install-commands = ["install-packages"]
30 |
31 |
32 | [envs.default.scripts]
33 | cov = "pytest --cov-report=term-missing --cov-config=pyproject.toml --cov=GherkinParser --cov=tests {args}"
34 | no-cov = "cov --no-cov {args}"
35 | test = "pytest {args}"
36 | test-reset = "test --regtest-reset"
37 | install-bundled-editable = "python ./scripts/install_bundled_editable.py"
38 | install-packages = "python ./scripts/install_packages.py"
39 |
40 | [envs.rfmaster]
41 | python = "3.11"
42 | extra-dependencies = [
43 | "robotframework @ git+https://github.com/robotframework/robotframework.git",
44 | ]
45 |
46 | [envs.rfdevel]
47 | python = "3.11"
48 | post-install-commands = ["pip install -U -e {root:uri}/../robotframework"]
49 |
50 | [envs.py312_rfmaster]
51 | python = "3.12"
52 | extra-dependencies = [
53 | "robotframework @ git+https://github.com/robotframework/robotframework.git",
54 | ]
55 |
56 | [envs.devel]
57 | python = "3.8"
58 |
59 | [[envs.devel.matrix]]
60 | python = ["3.8", "3.9", "3.10", "3.11", "3.12"]
61 | rf = ["rf70"]
62 |
63 | [envs.devel.overrides]
64 | matrix.rf.dependencies = [
65 | { value = "robotframework>=7.0, <7.1", if = [
66 | "rf70",
67 | ] },
68 | ]
69 |
70 | [[envs.test.matrix]]
71 | rf = ["rf70", "master"]
72 |
73 | [envs.test.overrides]
74 | matrix.rf.dependencies = [
75 | { value = "robotframework==7.0, <7.1", if = [
76 | "rf70",
77 | ] },
78 | ]
79 |
80 | [envs.lint]
81 | #skip-install = true
82 | #extra-dependencies = ["tomli>=2.0.0"]
83 |
84 |
85 | [envs.lint.scripts]
86 | typing = ["mypy --install-types --non-interactive {args:.}"]
87 | style = ["ruff .", "black --check --diff ."]
88 | fmt = ["black .", "ruff --fix .", "style"]
89 | all = ["style", "typing"]
90 |
91 | [envs.pages]
92 | #detached = true
93 | extra-dependencies = [
94 | "mkdocs>=1.4.2",
95 | "mkdocs-material",
96 | # Plugins
97 | "mkdocs-minify-plugin",
98 | "mkdocs-git-revision-date-localized-plugin",
99 | "mkdocstrings-python",
100 | "mkdocs-redirects",
101 | "mkdocs-glightbox",
102 |
103 | # Extensions
104 | "mkdocs-click~=0.8.0",
105 | "pymdown-extensions~=9.6.0",
106 | # Necessary for syntax highlighting in code blocks
107 | "pygments",
108 | "mike",
109 | ]
110 |
111 | [envs.pages.scripts]
112 | build = ["mkdocs build"]
113 | deploy = ["python scripts/deploy_docs.py"]
114 |
115 |
116 | [envs.build]
117 | skip-install = true
118 | detached = true
119 | python = "38"
120 | dependencies = ["GitPython", "semantic-version", "commitizen", "git-cliff"]
121 |
122 | [envs.build.scripts]
123 | update-changelog = ["python scripts/update_changelog.py"]
124 | update-git-versions = ["python scripts/update_git_versions.py"]
125 | update-doc-links = ["python scripts/update_doc_links.py"]
126 | package = ["python scripts/package.py"]
127 | publish = ["python scripts/publish.py"]
128 | extract-release-notes = ["python scripts/extract_release_notes.py"]
129 | is-prerelease = ["python scripts/is_prerelease.py"]
130 | bump = ["cz bump"]
131 |
--------------------------------------------------------------------------------
/icons/cucumber.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robotcodedev/robotframework-gherkin-parser/dc81a6845d19c447d006aa4b67219354d79fa9c1/icons/cucumber.png
--------------------------------------------------------------------------------
/icons/gherkin.svg:
--------------------------------------------------------------------------------
1 |
2 |
11 |
--------------------------------------------------------------------------------
/icons/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robotcodedev/robotframework-gherkin-parser/dc81a6845d19c447d006aa4b67219354d79fa9c1/icons/icon.png
--------------------------------------------------------------------------------
/language-configuration.json:
--------------------------------------------------------------------------------
1 | {
2 | "comments": {
3 | // symbol used for single line comment. Remove this entry if your language does not support line comments
4 | "lineComment": "#"
5 | },
6 | // symbols used as brackets
7 | "brackets": [
8 | ["${", "}"],
9 | ["@{", "}"],
10 | ["%{", "}"],
11 | ["&{", "}"],
12 | ["{", "}"],
13 | ["[", "]"],
14 | ["(", ")"]
15 | ],
16 | // symbols that are auto closed when typing
17 | "autoClosingPairs": [
18 | ["${", "}"],
19 | ["@{", "}"],
20 | ["%{", "}"],
21 | ["&{", "}"],
22 | ["{", "}"],
23 | ["[", "]"],
24 | ["(", ")"],
25 | ["\"", "\""],
26 | ["'", "'"]
27 | ],
28 | // symbols that can be used to surround a selection
29 | "surroundingPairs": [
30 | ["${", "}"],
31 | ["@{", "}"],
32 | ["%{", "}"],
33 | ["&{", "}"],
34 | ["{", "}"],
35 | ["[", "]"],
36 | ["(", ")"],
37 | ["\"", "\""],
38 | ["'", "'"],
39 | ["*", "*"],
40 | ["_", "_"]
41 | ],
42 | "onEnterRules": [
43 | {
44 | "beforeText": "^\\s*#.*",
45 | "afterText": ".+$",
46 | "action": {
47 | "indent": "none",
48 | "appendText": "# "
49 | }
50 | },
51 | {
52 | "beforeText": "^\\s*[^\\s].*",
53 | "afterText": "\\s*[^\\s].*$",
54 | "action": {
55 | "indent": "none",
56 | "appendText": "... "
57 | }
58 | }
59 | ]
60 | }
61 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "robotcode-gherkin",
3 | "displayName": "RobotCode GherkinParser Support",
4 | "description": "GherkinParser Support for RobotCode and Robot Framework",
5 | "icon": "icons/icon.png",
6 | "publisher": "d-biehl",
7 | "version": "0.3.2",
8 | "author": {
9 | "name": "Daniel Biehl",
10 | "url": "https://github.com/d-biehl/"
11 | },
12 | "homepage": "https://github.com/d-biehl/robotframework-gherkin-parser",
13 | "repository": {
14 | "type": "git",
15 | "url": "https://github.com/d-biehl/robotframework-gherkin-parser"
16 | },
17 | "bugs": {
18 | "url": "https://github.com/d-biehl/robotframework-gherkin-parser/issues"
19 | },
20 | "engines": {
21 | "vscode": "^1.82.0"
22 | },
23 | "categories": [
24 | "Programming Languages",
25 | "Testing",
26 | "Debuggers",
27 | "Formatters",
28 | "Linters"
29 | ],
30 | "keywords": [
31 | "Test",
32 | "Testing",
33 | "RobotFramework",
34 | "Robot Framework",
35 | "Robot",
36 | "Keyword Driven",
37 | "Data Driven",
38 | "Acceptance Testing",
39 | "Acceptance Test Driven Development",
40 | "Test Driven",
41 | "Behavior Driven Development",
42 | "BDD",
43 | "Behavior Driven Testing",
44 | "BDT",
45 | "Robotic Process Automation",
46 | "RPA"
47 | ],
48 | "featureFlags": {
49 | "usingNewInterpreterStorage": true
50 | },
51 | "capabilities": {
52 | "untrustedWorkspaces": {
53 | "supported": "limited",
54 | "description": "Only Partial IntelliSense is supported."
55 | },
56 | "virtualWorkspaces": {
57 | "supported": "limited",
58 | "description": "Only Partial IntelliSense supported."
59 | }
60 | },
61 | "activationEvents": [
62 | "workspaceContains:**/*.{feature,feature.md}"
63 | ],
64 | "galleryBanner": {
65 | "theme": "dark",
66 | "color": "#111111"
67 | },
68 | "sponsor": {
69 | "url": "https://github.com/sponsors/d-biehl"
70 | },
71 | "main": "./out/extension.js",
72 | "contributes": {
73 | "robotCode": {
74 | "fileExtensions": [
75 | "feature",
76 | "feature.md"
77 | ],
78 | "languageIds": [
79 | "gherkin",
80 | "markdown"
81 | ]
82 | },
83 | "activationEvents": [
84 | "workspaceContains:**/*.{feature,feature.md}"
85 | ],
86 | "languages": [
87 | {
88 | "id": "gherkin",
89 | "aliases": [
90 | "Gherkin",
91 | "Cucumber"
92 | ],
93 | "icon": {
94 | "light": "./icons/cucumber.png",
95 | "dark": "./icons/cucumber.png"
96 | },
97 | "extensions": [
98 | ".feature"
99 | ]
100 | },
101 | {
102 | "id": "markdown",
103 | "aliases": [
104 | "Markdown"
105 | ],
106 | "icon": {
107 | "light": "./icons/cucumber.png",
108 | "dark": "./icons/cucumber.png"
109 | },
110 | "extensions": [
111 | ".feature.md"
112 | ]
113 | }
114 | ],
115 | "grammars": [
116 | {
117 | "language": "gherkin",
118 | "scopeName": "text.gherkin.feature",
119 | "path": "./syntaxes/gherkin-classic.tmLanguage"
120 | }
121 | ],
122 | "breakpoints": [
123 | {
124 | "language": "gherkin"
125 | },
126 | {
127 | "language": "markdown",
128 | "when": "resourceFilename =~ /^.*\\.feature\\.md$/"
129 | }
130 | ]
131 | },
132 | "scripts": {
133 | "vscode:prepublish": "webpack --mode production",
134 | "webpack": "webpack --mode development",
135 | "webpack-dev": "webpack --mode development --watch",
136 | "test-compile": "tsc -p ./",
137 | "compile": "tsc -p ./",
138 | "watch": "tsc -watch -p ./",
139 | "pretest": "npm run compile && npm run lint",
140 | "lint": "eslint --ext .ts,.tsx,.js .",
141 | "lint-fix": "eslint --ext .ts,.tsx,.js --fix .",
142 | "test": "node ./out/test/runTest.js"
143 | },
144 | "extensionDependencies": [
145 | "d-biehl.robotcode"
146 | ],
147 | "dependencies": {
148 | "ansi-colors": "^4.1.3",
149 | "@cucumber/gherkin": "^27.0.0",
150 | "@cucumber/gherkin-utils": "^8.0.5"
151 | },
152 | "devDependencies": {
153 | "@types/node": "^18.17.1",
154 | "@types/vscode": "^1.82.0",
155 | "@typescript-eslint/eslint-plugin": "^6.20.0",
156 | "@typescript-eslint/parser": "^6.20.0",
157 | "@vscode/test-electron": "^2.3.9",
158 | "eslint": "^8.56.0",
159 | "eslint-config-prettier": "^9.1.0",
160 | "eslint-plugin-import": "^2.29.1",
161 | "eslint-plugin-jsx-a11y": "^6.8.0",
162 | "eslint-plugin-node": "^11.1.0",
163 | "eslint-plugin-prettier": "^5.1.3",
164 | "@vscode/vsce": "^2.23.0",
165 | "ovsx": "^0.8.3",
166 | "prettier": "^3.2.5",
167 | "ts-loader": "^9.5.1",
168 | "typescript": "^5.3.3",
169 | "webpack": "^5.90.1",
170 | "webpack-cli": "^5.1.4"
171 | }
172 | }
--------------------------------------------------------------------------------
/packages/gurke/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
10 |
11 | "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
12 |
13 | "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
14 |
15 | "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
16 |
17 | "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
18 |
19 | "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
20 |
21 | "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
22 |
23 | "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
24 |
25 | "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
26 |
27 | "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
28 |
29 | 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
30 |
31 | 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
32 |
33 | 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
34 |
35 | (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
36 |
37 | (b) You must cause any modified files to carry prominent notices stating that You changed the files; and
38 |
39 | (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
40 |
41 | (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
42 |
43 | You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
44 |
45 | 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
46 |
47 | 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
48 |
49 | 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
50 |
51 | 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
52 |
53 | 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
54 |
55 | END OF TERMS AND CONDITIONS
56 |
57 | APPENDIX: How to apply the Apache License to your work.
58 |
59 | To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
60 |
61 | Copyright [yyyy] [name of copyright owner]
62 |
63 | Licensed under the Apache License, Version 2.0 (the "License");
64 | you may not use this file except in compliance with the License.
65 | You may obtain a copy of the License at
66 |
67 | http://www.apache.org/licenses/LICENSE-2.0
68 |
69 | Unless required by applicable law or agreed to in writing, software
70 | distributed under the License is distributed on an "AS IS" BASIS,
71 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
72 | See the License for the specific language governing permissions and
73 | limitations under the License.
74 |
--------------------------------------------------------------------------------
/packages/gurke/README.md:
--------------------------------------------------------------------------------
1 | # gurke
2 |
3 | A Python3 only `gherkin` implementation with type hints.
4 |
5 | ## Introduction
6 |
7 | TODO: comming soon...
8 |
9 | ## Installation
10 |
11 | ```console
12 | pip install gurke
13 | ```
14 |
15 | ## License
16 |
17 | `gurke` is distributed under the terms of the [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html) license.
18 |
19 | ## Third Party Notice
20 |
21 | `gurke` contains parts of the original [`gherkin`](https://github.com/cucumber/gherkin) implementation
22 |
--------------------------------------------------------------------------------
/packages/gurke/gherkin-python.razor:
--------------------------------------------------------------------------------
1 | @using Berp;
2 | @helper CallProduction(ProductionRule production)
3 | {
4 | switch(production.Type)
5 | {
6 | case ProductionRuleType.Start:
7 | @: self.start_rule(context, "@production.RuleName")
8 | break;
9 | case ProductionRuleType.End:
10 | @: self.end_rule(context, "@production.RuleName")
11 | break;
12 | case ProductionRuleType.Process:
13 | @: self.build(context, token)
14 | break;
15 | }
16 | }
17 | @helper HandleParserError(IEnumerable expectedTokens, State state)
18 | { token.detach()
19 | expected_tokens = [
20 | "@Raw(string.Join("\",\n \"", expectedTokens))",
21 | ]
22 | error = (
23 | UnexpectedEOFException(token, expected_tokens)
24 | if token.eof()
25 | else UnexpectedTokenException(token, expected_tokens)
26 | )
27 | if self.stop_at_first_error:
28 | raise error
29 | self.add_error(context, error)
30 | return @state.Id
31 | }
32 | @helper MatchToken(TokenType tokenType)
33 | {match_@(tokenType)(context, token)}
34 | # This file is generated. Do not edit! Edit gherkin-python.razor instead.
35 | from collections import deque
36 | from typing import Any, Callable, Deque, Dict, List, Optional, Union
37 |
38 | from .ast_builder import AstBuilder
39 | from .errors import CompositeParserException, ParserException, UnexpectedEOFException, UnexpectedTokenException
40 | from .token import Token
41 | from .token_matcher import TokenMatcher
42 | from .token_scanner import TokenScanner
43 |
44 | RULE_TYPE = [
45 | "None",
46 | @foreach(var rule in Model.RuleSet.Where(r => !r.TempRule))
47 | { "@rule.Name.Replace("#", "_")", # @rule.ToString(true)
48 | }
49 | ]
50 |
51 |
52 | class ParserContext:
53 | def __init__(
54 | self,
55 | token_scanner: TokenScanner,
56 | token_matcher: TokenMatcher,
57 | token_queue: Deque[Token],
58 | errors: List[BaseException],
59 | ) -> None:
60 | self.token_scanner = token_scanner
61 | self.token_matcher = token_matcher
62 | self.token_queue = token_queue
63 | self.errors = errors
64 |
65 |
66 | class @(Model.ParserClassName):
67 | def __init__(self, ast_builder: Optional[AstBuilder] = None) -> None:
68 | self.ast_builder = ast_builder if ast_builder is not None else AstBuilder()
69 | self.stop_at_first_error = False
70 |
71 | def parse(
72 | self, token_scanner_or_str: Union[TokenScanner, str], token_matcher: Optional[TokenMatcher] = None
73 | ) -> Any:
74 | token_scanner = (
75 | TokenScanner(token_scanner_or_str) if isinstance(token_scanner_or_str, str) else token_scanner_or_str
76 | )
77 | self.ast_builder.reset()
78 | if token_matcher is None:
79 | token_matcher = TokenMatcher()
80 | token_matcher.reset()
81 | context = ParserContext(token_scanner, token_matcher, deque(), [])
82 |
83 | self.start_rule(context, "@Model.RuleSet.StartRule.Name")
84 | state = 0
85 | token = None
86 | while True:
87 | token = self.read_token(context)
88 | state = self.match_token(state, token, context)
89 | if token.eof():
90 | break
91 |
92 | self.end_rule(context, "@Model.RuleSet.StartRule.Name")
93 |
94 | if context.errors:
95 | raise CompositeParserException(context.errors)
96 |
97 | return self.get_result()
98 |
99 | def build(self, context: ParserContext, token: Token) -> None:
100 | self.handle_ast_error(context, token, self.ast_builder.build)
101 |
102 | def add_error(self, context: ParserContext, error: BaseException) -> None:
103 | if str(error) not in (str(e) for e in context.errors):
104 | context.errors.append(error)
105 | if len(context.errors) > 10:
106 | raise CompositeParserException(context.errors)
107 |
108 | def start_rule(self, context: ParserContext, rule_type: str) -> None:
109 | self.handle_ast_error(context, rule_type, self.ast_builder.start_rule)
110 |
111 | def end_rule(self, context: ParserContext, rule_type: str) -> None:
112 | self.handle_ast_error(context, rule_type, self.ast_builder.end_rule)
113 |
114 | def get_result(self) -> Any:
115 | return self.ast_builder.get_result()
116 |
117 | def read_token(self, context: ParserContext) -> Token:
118 | if context.token_queue:
119 | return context.token_queue.popleft()
120 |
121 | return context.token_scanner.read()
122 | @foreach(var rule in Model.RuleSet.TokenRules)
123 | {
124 | def match_@(rule.Name.Replace("#", ""))(self, context: ParserContext, token: Token) -> Any: # noqa: N802
125 | @if (rule.Name != "#EOF")
126 | {
127 | @:if token.eof():
128 | @: return False
129 | }
130 | return self.handle_external_error(context, False, token, context.token_matcher.match_@(rule.Name.Replace("#", "")))
131 |
132 | }
133 |
134 | def match_token(self, state: int, token: Token, context: ParserContext) -> int:
135 | state_map: Dict[int, Callable[[Token, ParserContext], int]] = {
136 | @foreach(var state in Model.States.Values.Where(s => !s.IsEndState))
137 | {
138 | @: @state.Id: self.match_token_at_@(state.Id),
139 | }
140 | }
141 | if state in state_map:
142 | return state_map[state](token, context)
143 |
144 | raise RuntimeError("Unknown state: " + str(state))
145 | @foreach(var state in Model.States.Values.Where(s => !s.IsEndState))
146 | {
147 | def match_token_at_@(state.Id)(self, token: Token, context: ParserContext) -> int:
148 | @foreach(var transition in state.Transitions)
149 | {
150 |
151 | if (transition.LookAheadHint != null) {
152 | @:if self.@MatchToken(transition.TokenType) and self.lookahead_@(transition.LookAheadHint.Id)(context, token):
153 | foreach(var production in transition.Productions)
154 | {
155 | @CallProduction(production)
156 | }
157 | @:return @transition.TargetState
158 | } else {
159 | @:if self.@MatchToken(transition.TokenType):
160 | foreach(var production in transition.Productions)
161 | {
162 | @CallProduction(production)
163 | }
164 | @:return @transition.TargetState
165 | }
166 | }
167 |
168 | @HandleParserError(state.Transitions.Select(t => "#" + t.TokenType.ToString()).Distinct(), state)
169 |
170 | }
171 | @foreach(var lookAheadHint in Model.RuleSet.LookAheadHints)
172 | {
173 |
174 | def lookahead_@(lookAheadHint.Id)(self, context: ParserContext, current_token: Token) -> bool:
175 | current_token.detach()
176 | token = None
177 | queue = []
178 | match = False
179 | while True:
180 | token = self.read_token(context)
181 | token.detach()
182 | queue.append(token)
183 |
184 | if (@foreach(var tokenType in lookAheadHint.ExpectedTokens) {self.@MatchToken(tokenType) or }False):
185 | match = True
186 | break
187 |
188 | if not (@foreach(var tokenType in lookAheadHint.Skip) {self.@MatchToken(tokenType) or }False):
189 | break
190 |
191 | context.token_queue.extend(queue)
192 |
193 | return match
194 |
195 | }
196 |
197 | # private
198 |
199 | def handle_ast_error(self, context: ParserContext, argument: Any, action: Callable[[Any], Any]) -> Any:
200 | self.handle_external_error(context, True, argument, action)
201 |
202 | def handle_external_error(
203 | self, context: ParserContext, default_value: Any, argument: Any, action: Callable[[Any], Any]
204 | ) -> Any:
205 | if self.stop_at_first_error:
206 | return action(argument)
207 |
208 | try:
209 | return action(argument)
210 | except CompositeParserException as e:
211 | for error in e.errors:
212 | self.add_error(context, error)
213 | except ParserException as e:
214 | self.add_error(context, e)
215 | return default_value
216 |
--------------------------------------------------------------------------------
/packages/gurke/gherkin.berp:
--------------------------------------------------------------------------------
1 | [
2 | Tokens -> #Empty,#Comment,#TagLine,#FeatureLine,#RuleLine,#BackgroundLine,#ScenarioLine,#ExamplesLine,#StepLine,#DocStringSeparator,#TableRow,#Language
3 | IgnoredTokens -> #Comment,#Empty
4 | ClassName -> Parser
5 | Namespace -> Gherkin
6 | ]
7 |
8 | GherkinDocument! := Feature?
9 | Feature! := FeatureHeader Background? ScenarioDefinition* Rule*
10 | FeatureHeader! := #Language? Tags? #FeatureLine DescriptionHelper
11 |
12 | Rule! := RuleHeader Background? ScenarioDefinition*
13 | RuleHeader! := Tags? #RuleLine DescriptionHelper
14 |
15 | Background! := #BackgroundLine DescriptionHelper Step*
16 |
17 | // Interpreting a tag line is ambiguous (tag line of rule or of scenario)
18 | ScenarioDefinition! [#Empty|#Comment|#TagLine->#ScenarioLine]:= Tags? Scenario
19 |
20 | Scenario! := #ScenarioLine DescriptionHelper Step* ExamplesDefinition*
21 | // after the first "Data" block, interpreting a tag line is ambiguous (tagline of next examples or of next scenario)
22 | // because of this, we need a lookahead hint, that connects the tag line to the next examples, if there is an examples block ahead
23 | ExamplesDefinition! [#Empty|#Comment|#TagLine->#ExamplesLine]:= Tags? Examples
24 | Examples! := #ExamplesLine DescriptionHelper ExamplesTable?
25 | ExamplesTable! := #TableRow #TableRow*
26 |
27 | Step! := #StepLine StepArg?
28 | StepArg := (DataTable | DocString)
29 |
30 | DataTable! := #TableRow+
31 | DocString! := #DocStringSeparator #Other* #DocStringSeparator
32 |
33 | Tags! := #TagLine+
34 |
35 | // we need to explicitly mention comment, to avoid merging it into the description line's #Other token
36 | // we also eat the leading empty lines, the tailing lines are not removed by the parser to avoid lookahead, this has to be done by the AST builder
37 | DescriptionHelper := #Empty* Description? #Comment*
38 | Description! := #Other+
39 |
--------------------------------------------------------------------------------
/packages/gurke/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["hatchling"]
3 | build-backend = "hatchling.build"
4 |
5 | [project]
6 | name = "gurke"
7 | description = 'Pure Python3 Gherkin parser'
8 | readme = { "file" = "README.md", "content-type" = "text/markdown" }
9 | requires-python = ">=3.8"
10 | license = "Apache-2.0"
11 | keywords = []
12 | authors = [{ name = "Daniel Biehl", email = "dbiehl@live.de" }]
13 | classifiers = [
14 | "Development Status :: 5 - Production/Stable",
15 | "Programming Language :: Python",
16 | "Programming Language :: Python :: 3.8",
17 | "Programming Language :: Python :: 3.9",
18 | "Programming Language :: Python :: 3.10",
19 | "Programming Language :: Python :: 3.11",
20 | "Programming Language :: Python :: Implementation :: CPython",
21 | "Programming Language :: Python :: Implementation :: PyPy",
22 | "Operating System :: OS Independent",
23 | "Topic :: Utilities",
24 | "Typing :: Typed",
25 | ]
26 | dependencies = ["typing-extensions>=4.4.0"]
27 | dynamic = ["version"]
28 |
29 | [project.urls]
30 | Homepage = "https://github.com/d-biehl/robotframework-gherkin-parser"
31 | Donate = "https://github.com/sponsors/d-biehl"
32 | Documentation = "https://github.com/d-biehl/robotframework-gherkin-parser#readme"
33 | Changelog = "https://github.com/d-biehl/robotframework-gherkin-parser/blob/main/CHANGELOG.md"
34 | Issues = "https://github.com/d-biehl/robotframework-gherkin-parser/issues"
35 | Source = "https://github.com/d-biehl/robotframework-gherkin-parser/robotcode"
36 |
37 | [tool.hatch.version]
38 | path = "src/gurke/__version__.py"
39 |
40 | [tool.hatch.build]
41 | dev-mode-dirs = ["src"]
42 |
43 | [tool.hatch.build.targets.wheel]
44 | only-include = ["src/gurke"]
45 | sources = ["src"]
46 |
47 | [tool.hatch.build.targets.sdist]
48 | only-include = ["src"]
49 |
50 | [tool.hatch.envs.default.scripts]
51 | generate-parser = "berp -g gherkin.berp -t gherkin-python.razor --noBOM -o src/gurke/parser.py"
52 |
53 | [tool.hatch.envs.build]
54 | detached = true
55 | python = "38"
56 |
--------------------------------------------------------------------------------
/packages/gurke/src/gurke/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robotcodedev/robotframework-gherkin-parser/dc81a6845d19c447d006aa4b67219354d79fa9c1/packages/gurke/src/gurke/__init__.py
--------------------------------------------------------------------------------
/packages/gurke/src/gurke/__main__.py:
--------------------------------------------------------------------------------
1 | import json
2 | from optparse import OptionParser
3 |
4 | from gurke.stream.gherkin_events import GherkinEvents
5 | from gurke.stream.source_events import SourceEvents
6 |
7 | parser = OptionParser()
8 | parser.add_option(
9 | "--no-source", action="store_false", dest="print_source", default=True, help="don't print source events"
10 | )
11 | parser.add_option("--no-ast", action="store_false", dest="print_ast", default=True, help="don't print ast events")
12 | parser.add_option(
13 | "--no-pickles", action="store_false", dest="print_pickles", default=True, help="don't print pickle events"
14 | )
15 |
16 | (options, args) = parser.parse_args()
17 |
18 | source_events = SourceEvents(args)
19 | gherkin_events = GherkinEvents(options)
20 |
21 | for source_event in source_events.enum():
22 | for event in gherkin_events.enum(source_event):
23 | print(json.dumps(event))
24 |
--------------------------------------------------------------------------------
/packages/gurke/src/gurke/__version__.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.3.2"
2 |
--------------------------------------------------------------------------------
/packages/gurke/src/gurke/ast_builder.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict, List, Optional
2 |
3 | from .ast_node import AstNode
4 | from .errors import AstBuilderException
5 | from .location import Location
6 | from .stream.id_generator import IdGenerator
7 | from .token import Token
8 |
9 |
10 | class AstBuilder:
11 | stack: List[AstNode]
12 | comments: List[Dict[str, Any]]
13 |
14 | def __init__(self, id_generator: Optional[IdGenerator] = None) -> None:
15 | if id_generator is None:
16 | self.id_generator = IdGenerator()
17 | else:
18 | self.id_generator = id_generator
19 | self.reset()
20 |
21 | def reset(self) -> None:
22 | self.stack = [AstNode("None")]
23 | self.comments = []
24 | self.id_counter = 0
25 |
26 | def start_rule(self, rule_type: str) -> None:
27 | self.stack.append(AstNode(rule_type))
28 |
29 | def end_rule(self, rule_type: str) -> None:
30 | node = self.stack.pop()
31 | self.current_node.add(node.rule_type, self.transform_node(node))
32 |
33 | def build(self, token: Token) -> None:
34 | if token.matched_type == "Comment":
35 | self.comments.append({"location": self.get_location(token), "text": token.matched_text})
36 | else:
37 | self.current_node.add(token.matched_type, token)
38 |
39 | def get_result(self) -> Any:
40 | return self.current_node.get_single("GherkinDocument")
41 |
42 | @property
43 | def current_node(self) -> AstNode:
44 | return self.stack[-1]
45 |
46 | def get_location(self, token: Token, column: Optional[int] = None) -> Location:
47 | return token.location if not column else Location(token.location.line, column)
48 |
49 | def get_tags(self, node):
50 | tags = []
51 | tags_node = node.get_single("Tags")
52 | if not tags_node:
53 | return tags
54 |
55 | for token in tags_node.get_tokens("TagLine"):
56 | tags += [
57 | {
58 | "id": self.id_generator.get_next_id(),
59 | "location": self.get_location(token, tag_item["column"]),
60 | "name": tag_item["text"],
61 | }
62 | for tag_item in token.matched_items
63 | ]
64 |
65 | return tags
66 |
67 | def get_table_rows(self, node):
68 | rows = [
69 | {
70 | "id": self.id_generator.get_next_id(),
71 | "location": self.get_location(token),
72 | "cells": self.get_cells(token),
73 | }
74 | for token in node.get_tokens("TableRow")
75 | ]
76 | self.ensure_cell_count(rows)
77 | return rows
78 |
79 | def ensure_cell_count(self, rows):
80 | if not rows:
81 | return
82 |
83 | cell_count = len(rows[0]["cells"])
84 | for row in rows:
85 | if len(row["cells"]) != cell_count:
86 | raise AstBuilderException("inconsistent cell count within the table", row["location"])
87 |
88 | def get_cells(self, table_row_token):
89 | return [
90 | self.reject_nones(
91 | {"location": self.get_location(table_row_token, cell_item["column"]), "value": cell_item["text"]}
92 | )
93 | for cell_item in table_row_token.matched_items
94 | ]
95 |
96 | def get_description(self, node: AstNode) -> str:
97 | return node.get_single("Description", "")
98 |
99 | def get_steps(self, node):
100 | return node.get_items("Step")
101 |
102 | def transform_node(self, node: AstNode) -> Any:
103 | if node.rule_type == "Step":
104 | step_line = node.get_token("StepLine")
105 | step_argument_type = "dummy_type"
106 | step_argument = None
107 | if node.get_single("DataTable"):
108 | step_argument_type = "dataTable"
109 | step_argument = node.get_single("DataTable")
110 | elif node.get_single("DocString"):
111 | step_argument_type = "docString"
112 | step_argument = node.get_single("DocString")
113 |
114 | return self.reject_nones(
115 | {
116 | "id": self.id_generator.get_next_id(),
117 | "location": self.get_location(step_line),
118 | "keyword": step_line.matched_keyword,
119 | "keywordType": step_line.matched_keyword_type,
120 | "text": step_line.matched_text,
121 | step_argument_type: step_argument,
122 | }
123 | )
124 | if node.rule_type == "DocString":
125 | separator_token = node.get_tokens("DocStringSeparator")[0]
126 | media_type = separator_token.matched_text if len(separator_token.matched_text) > 0 else None
127 | line_tokens = node.get_tokens("Other")
128 | content = "\n".join([t.matched_text for t in line_tokens])
129 |
130 | return self.reject_nones(
131 | {
132 | "location": self.get_location(separator_token),
133 | "content": content,
134 | "delimiter": separator_token.matched_keyword,
135 | "mediaType": media_type,
136 | }
137 | )
138 | if node.rule_type == "DataTable":
139 | rows = self.get_table_rows(node)
140 | return self.reject_nones(
141 | {
142 | "location": rows[0]["location"],
143 | "rows": rows,
144 | }
145 | )
146 | if node.rule_type == "Background":
147 | background_line = node.get_token("BackgroundLine")
148 | description = self.get_description(node)
149 | steps = self.get_steps(node)
150 |
151 | return self.reject_nones(
152 | {
153 | "id": self.id_generator.get_next_id(),
154 | "location": self.get_location(background_line),
155 | "keyword": background_line.matched_keyword,
156 | "name": background_line.matched_text,
157 | "description": description,
158 | "steps": steps,
159 | }
160 | )
161 | if node.rule_type == "ScenarioDefinition":
162 | tags = self.get_tags(node)
163 | scenario_node = node.get_single("Scenario")
164 | scenario_line = scenario_node.get_token("ScenarioLine")
165 | description = self.get_description(scenario_node)
166 | steps = self.get_steps(scenario_node)
167 | examples = scenario_node.get_items("ExamplesDefinition")
168 |
169 | return self.reject_nones(
170 | {
171 | "id": self.id_generator.get_next_id(),
172 | "tags": tags,
173 | "location": self.get_location(scenario_line),
174 | "keyword": scenario_line.matched_keyword,
175 | "name": scenario_line.matched_text,
176 | "description": description,
177 | "steps": steps,
178 | "examples": examples,
179 | }
180 | )
181 | if node.rule_type == "ExamplesDefinition":
182 | tags = self.get_tags(node)
183 | examples_node = node.get_single("Examples")
184 | examples_line = examples_node.get_token("ExamplesLine")
185 | description = self.get_description(examples_node)
186 | examples_table_rows = examples_node.get_single("ExamplesTable")
187 | table_header = examples_table_rows[0] if examples_table_rows else None
188 | table_body = examples_table_rows[1:] if examples_table_rows else []
189 |
190 | return self.reject_nones(
191 | {
192 | "id": self.id_generator.get_next_id(),
193 | "tags": tags,
194 | "location": self.get_location(examples_line),
195 | "keyword": examples_line.matched_keyword,
196 | "name": examples_line.matched_text,
197 | "description": description,
198 | "tableHeader": table_header,
199 | "tableBody": table_body,
200 | }
201 | )
202 | if node.rule_type == "ExamplesTable":
203 | return self.get_table_rows(node)
204 | if node.rule_type == "Description":
205 | line_tokens = node.get_tokens("Other")
206 | # Trim trailing empty lines
207 | last_non_empty = next(i for i, j in reversed(list(enumerate(line_tokens))) if j.matched_text)
208 | return "\n".join([token.matched_text for token in line_tokens[: last_non_empty + 1]])
209 |
210 | if node.rule_type == "Rule":
211 | header = node.get_single("RuleHeader")
212 | if not header:
213 | return None
214 |
215 | tags = self.get_tags(header)
216 | rule_line = header.get_token("RuleLine")
217 | if not rule_line:
218 | return None
219 |
220 | children = []
221 | background = node.get_single("Background")
222 | if background:
223 | children.append({"background": background})
224 | children = children + [{"scenario": i} for i in node.get_items("ScenarioDefinition")]
225 | description = self.get_description(header)
226 |
227 | return self.reject_nones(
228 | {
229 | "id": self.id_generator.get_next_id(),
230 | "tags": tags,
231 | "location": self.get_location(rule_line),
232 | "keyword": rule_line.matched_keyword,
233 | "name": rule_line.matched_text,
234 | "description": description,
235 | "children": children,
236 | }
237 | )
238 | if node.rule_type == "Feature":
239 | header = node.get_single("FeatureHeader")
240 | if not header:
241 | return None
242 |
243 | tags = self.get_tags(header)
244 | feature_line = header.get_token("FeatureLine")
245 | if not feature_line:
246 | return None
247 |
248 | children = []
249 | background = node.get_single("Background")
250 | if background:
251 | children.append({"background": background})
252 | children = children + [{"scenario": i} for i in node.get_items("ScenarioDefinition")]
253 | children = children + [{"rule": i} for i in node.get_items("Rule")]
254 | description = self.get_description(header)
255 | language = feature_line.matched_gherkin_dialect
256 |
257 | return self.reject_nones(
258 | {
259 | "tags": tags,
260 | "location": self.get_location(feature_line),
261 | "language": language,
262 | "keyword": feature_line.matched_keyword,
263 | "name": feature_line.matched_text,
264 | "description": description,
265 | "children": children,
266 | }
267 | )
268 | if node.rule_type == "GherkinDocument":
269 | feature = node.get_single("Feature")
270 |
271 | return self.reject_nones({"feature": feature, "comments": self.comments})
272 |
273 | return node
274 |
275 | def reject_nones(self, values):
276 | return {k: v for k, v in values.items() if v is not None}
277 |
--------------------------------------------------------------------------------
/packages/gurke/src/gurke/ast_node.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from collections import defaultdict
4 | from typing import Any, Dict, List, Optional, cast
5 |
6 | from gurke.token import Token
7 |
8 |
9 | class AstNode:
10 | def __init__(self, rule_type: str) -> None:
11 | self.rule_type = rule_type
12 | self._sub_items: Dict[Optional[str], List[Any]] = defaultdict(list)
13 |
14 | def add(self, rule_type: Optional[str], obj: Any) -> None:
15 | self._sub_items[rule_type].append(obj)
16 |
17 | def get_single(self, rule_type: str, default_value: Any = None) -> AstNode:
18 | return cast(AstNode, self._sub_items[rule_type][0] if self._sub_items[rule_type] else default_value)
19 |
20 | def get_items(self, rule_type: str) -> List[Any]:
21 | return self._sub_items[rule_type]
22 |
23 | def get_token(self, token_type: str) -> Token:
24 | return cast(Token, self.get_single(token_type))
25 |
26 | def get_tokens(self, token_type: str) -> List[Token]:
27 | return [v for v in self._sub_items[token_type] if isinstance(v, Token)]
28 |
--------------------------------------------------------------------------------
/packages/gurke/src/gurke/count_symbols.py:
--------------------------------------------------------------------------------
1 | def count_symbols(string: str) -> int:
2 | return len(string)
3 |
--------------------------------------------------------------------------------
/packages/gurke/src/gurke/dialect.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import io
4 | import json
5 | import os
6 | from typing import Dict, List, Optional
7 |
8 | DIALECT_FILE_PATH = os.path.join(os.path.dirname(__file__), "gherkin-languages.json")
9 |
10 | with io.open(DIALECT_FILE_PATH, "r", encoding="utf-8") as file:
11 | DIALECTS = json.load(file)
12 |
13 |
14 | class Dialect(object):
15 | @classmethod
16 | def for_name(cls, name: str) -> Optional[Dialect]:
17 | return cls(DIALECTS[name]) if name in DIALECTS else None
18 |
19 | def __init__(self, spec: Dict[str, List[str]]) -> None:
20 | self.spec = spec
21 |
22 | @property
23 | def feature_keywords(self) -> List[str]:
24 | return self.spec["feature"]
25 |
26 | @property
27 | def rule_keywords(self) -> List[str]:
28 | return self.spec["rule"]
29 |
30 | @property
31 | def scenario_keywords(self) -> List[str]:
32 | return self.spec["scenario"]
33 |
34 | @property
35 | def scenario_outline_keywords(self) -> List[str]:
36 | return self.spec["scenarioOutline"]
37 |
38 | @property
39 | def background_keywords(self) -> List[str]:
40 | return self.spec["background"]
41 |
42 | @property
43 | def examples_keywords(self) -> List[str]:
44 | return self.spec["examples"]
45 |
46 | @property
47 | def given_keywords(self) -> List[str]:
48 | return self.spec["given"]
49 |
50 | @property
51 | def when_keywords(self) -> List[str]:
52 | return self.spec["when"]
53 |
54 | @property
55 | def then_keywords(self) -> List[str]:
56 | return self.spec["then"]
57 |
58 | @property
59 | def and_keywords(self) -> List[str]:
60 | return self.spec["and"]
61 |
62 | @property
63 | def but_keywords(self) -> List[str]:
64 | return self.spec["but"]
65 |
--------------------------------------------------------------------------------
/packages/gurke/src/gurke/errors.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional, Sequence
2 |
3 | from .location import Location
4 | from .token import Token
5 |
6 |
7 | class ParserError(Exception):
8 | pass
9 |
10 |
11 | class ParserException(ParserError): # noqa: N818
12 | def __init__(self, message: str, location: Optional[Location] = None) -> None:
13 | self.location = location
14 | super(ParserException, self).__init__(
15 | (f"({location.line}:{location.column or 0}): " if location is not None else "") + message
16 | )
17 |
18 |
19 | class NoSuchLanguageException(ParserException):
20 | def __init__(self, language: str, location: Optional[Location] = None) -> None:
21 | super().__init__("Language not supported: " + language, location)
22 |
23 |
24 | class AstBuilderException(ParserException):
25 | pass
26 |
27 |
28 | class UnexpectedEOFException(ParserException):
29 | def __init__(self, received_token: Token, expected_token_types: Sequence[str]) -> None:
30 | message = "unexpected end of file, expected: " + ", ".join(expected_token_types)
31 | super().__init__(message, received_token.location)
32 |
33 |
34 | class UnexpectedTokenException(ParserException):
35 | def __init__(self, received_token: Token, expected_token_types: Sequence[str]) -> None:
36 | message = (
37 | "expected: " + ", ".join(expected_token_types) + ", got '" + received_token.token_value().strip() + "'"
38 | )
39 | column = received_token.location.column
40 | location = (
41 | received_token.location
42 | if column
43 | else Location(received_token.location.line, received_token.line.indent + 1)
44 | )
45 | super().__init__(message, location)
46 |
47 |
48 | class CompositeParserException(ParserError): # noqa: N818
49 | def __init__(self, errors: List[BaseException]) -> None:
50 | self.errors = errors
51 | super().__init__("Parser errors:\n" + "\n".join([error.args[0] for error in errors]))
52 |
--------------------------------------------------------------------------------
/packages/gurke/src/gurke/gherkin_line.py:
--------------------------------------------------------------------------------
1 | import re
2 | from typing import Dict, Iterator, List, Tuple, TypedDict, Union
3 |
4 |
5 | class Cell(TypedDict):
6 | column: int
7 | text: str
8 |
9 |
10 | class GherkinLine:
11 | def __init__(self, line_text: str, line_number: int) -> None:
12 | self._line_text = line_text
13 | self._line_number = line_number
14 | self._trimmed_line_text = line_text.lstrip()
15 | self.indent = len(line_text) - len(self._trimmed_line_text)
16 |
17 | def __bool__(self) -> bool:
18 | return bool(self._line_text)
19 |
20 | def get_rest_trimmed(self, length: int) -> str:
21 | return self._trimmed_line_text[length:].strip()
22 |
23 | def get_line_text(self, indent_to_remove: int = -1) -> str:
24 | if indent_to_remove < 0 or indent_to_remove > self.indent:
25 | return self._trimmed_line_text
26 |
27 | return self._line_text[indent_to_remove:]
28 |
29 | def is_empty(self) -> bool:
30 | return not self._trimmed_line_text
31 |
32 | def startswith(self, prefix: str) -> bool:
33 | return self._trimmed_line_text.startswith(prefix)
34 |
35 | def startswith_title_keyword(self, keyword: str) -> bool:
36 | return self._trimmed_line_text.startswith(keyword + ":")
37 |
38 | @property
39 | def table_cells(self) -> List[Cell]:
40 | cells: List[Cell] = []
41 | for cell, col in self.split_table_cells(self._trimmed_line_text.strip()):
42 | lstripped_cell = re.sub(r"^[^\S\n]*", "", cell, flags=re.U)
43 | cell_indent = len(cell) - len(lstripped_cell)
44 | cells.append(
45 | {
46 | "column": col + self.indent + cell_indent,
47 | "text": re.sub(r"[^\S\n]*$", "", lstripped_cell, flags=re.U),
48 | }
49 | )
50 | return cells
51 |
52 | def split_table_cells(self, row_str: str) -> Iterator[Tuple[str, int]]:
53 | """
54 | An iterator returning all the table cells in a row with their positions,
55 | accounting for escaping.
56 | """
57 |
58 | row = iter(row_str)
59 | col = 0
60 | start_col = col + 1
61 | cell = ""
62 | first_cell = True
63 | while True:
64 | char = next(row, None)
65 | col += 1
66 | if char == "|":
67 | if first_cell:
68 | # First cell (content before the first |) is skipped
69 | first_cell = False
70 | else:
71 | yield (cell, start_col)
72 | cell = ""
73 | start_col = col + 1
74 | elif char == "\\":
75 | char = next(row)
76 | col += 1
77 | if char == "n":
78 | cell += "\n"
79 | else:
80 | if char not in ["|", "\\"]:
81 | cell += "\\"
82 | cell += char
83 | elif char:
84 | cell += char
85 | else:
86 | break
87 | # Last cell (content after the last |) is skipped
88 |
89 | @property
90 | def tags(self):
91 | column = self.indent + 1
92 | uncommented_line = re.split(r"\s#", self._trimmed_line_text.strip(), 2)[0]
93 | items = uncommented_line.strip().split("@")
94 | tags = []
95 | for item in items[1:]:
96 | tag_value = "@" + item.strip()
97 | if re.search(r"[^\S+]", tag_value) is not None:
98 | from .errors import ParserException
99 |
100 | location = {"line": self._line_number, "column": column}
101 | raise ParserException("A tag may not contain whitespace", location)
102 |
103 | tags.append({"column": column, "text": tag_value})
104 | column += len(item) + 1
105 | return tags
106 |
--------------------------------------------------------------------------------
/packages/gurke/src/gurke/location.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 | from typing import Optional
3 |
4 |
5 | @dataclass
6 | class Location:
7 | line: int
8 | column: Optional[int] = None
9 |
10 |
11 | INVALID_LOCATION = Location(-1, -1)
12 |
--------------------------------------------------------------------------------
/packages/gurke/src/gurke/pickles/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robotcodedev/robotframework-gherkin-parser/dc81a6845d19c447d006aa4b67219354d79fa9c1/packages/gurke/src/gurke/pickles/__init__.py
--------------------------------------------------------------------------------
/packages/gurke/src/gurke/pickles/compiler.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | from ..stream.id_generator import IdGenerator
4 |
5 |
6 | class Compiler(object):
7 | def __init__(self, id_generator=None):
8 | self.id_generator = id_generator
9 | if self.id_generator is None:
10 | self.id_generator = IdGenerator()
11 |
12 | def compile(self, gherkin_document):
13 | pickles = []
14 | if "feature" not in gherkin_document:
15 | return pickles
16 |
17 | feature = gherkin_document["feature"]
18 | if not feature["children"]:
19 | return pickles
20 |
21 | uri = gherkin_document["uri"]
22 | feature_tags = feature["tags"]
23 | language = feature["language"]
24 | background_steps = []
25 | for scenario_definition in feature["children"]:
26 | if "background" in scenario_definition:
27 | if scenario_definition["background"]["steps"]:
28 | background_steps += scenario_definition["background"]["steps"]
29 | elif "rule" in scenario_definition:
30 | self._compile_rule(uri, feature_tags, background_steps, scenario_definition["rule"], language, pickles)
31 | else:
32 | scenario = scenario_definition["scenario"]
33 | args = (uri, feature_tags, background_steps, scenario, language, pickles)
34 | if not scenario["examples"]:
35 | self._compile_scenario(*args)
36 | else:
37 | self._compile_scenario_outline(*args)
38 | return pickles
39 |
40 | def _compile_rule(self, uri, feature_tags, feature_background_steps, rule, language, pickles):
41 | tags = list(feature_tags) + list(rule["tags"])
42 | background_steps = []
43 | background_steps += feature_background_steps
44 | for scenario_definition in rule["children"]:
45 | if "background" in scenario_definition:
46 | if scenario_definition["background"]["steps"]:
47 | background_steps += scenario_definition["background"]["steps"]
48 | else:
49 | scenario = scenario_definition["scenario"]
50 | args = (uri, tags, background_steps, scenario, language, pickles)
51 | if not scenario["examples"]:
52 | self._compile_scenario(*args)
53 | else:
54 | self._compile_scenario_outline(*args)
55 | return pickles
56 |
57 | def _compile_scenario(self, uri, inherited_tags, background_steps, scenario, language, pickles):
58 | tags = list(inherited_tags) + list(scenario["tags"])
59 | last_keyword_type = "Unknown"
60 | steps = list()
61 | if scenario["steps"]:
62 | for step in background_steps + scenario["steps"]:
63 | last_keyword_type = last_keyword_type if step["keywordType"] == "Conjunction" else step["keywordType"]
64 | steps.append(self._pickle_step(step, last_keyword_type))
65 |
66 | pickle = {
67 | "astNodeIds": [scenario["id"]],
68 | "id": self.id_generator.get_next_id(),
69 | "tags": self._pickle_tags(tags),
70 | "name": scenario["name"],
71 | "language": language,
72 | "steps": steps,
73 | "uri": uri,
74 | }
75 | pickles.append(pickle)
76 |
77 | def _compile_scenario_outline(self, uri, inherited_tags, background_steps, scenario, language, pickles):
78 | for examples in (e for e in scenario["examples"] if "tableHeader" in e):
79 | variable_cells = examples["tableHeader"]["cells"]
80 |
81 | for values in examples["tableBody"]:
82 | value_cells = values["cells"]
83 | tags = list(inherited_tags) + list(scenario["tags"]) + list(examples["tags"])
84 | last_keyword_type = None
85 | steps = list()
86 | if scenario["steps"]:
87 | for step in background_steps:
88 | last_keyword_type = (
89 | last_keyword_type if step["keywordType"] == "Conjunction" else step["keywordType"]
90 | )
91 | steps.append(self._pickle_step(step, last_keyword_type))
92 |
93 | if scenario["steps"]:
94 | for outline_step in scenario["steps"]:
95 | last_keyword_type = (
96 | last_keyword_type
97 | if outline_step["keywordType"] == "Conjunction"
98 | else outline_step["keywordType"]
99 | )
100 | step_text = self._interpolate(outline_step["text"], variable_cells, value_cells)
101 | argument = self._create_pickle_arguments(outline_step, variable_cells, value_cells)
102 | _pickle_step = {
103 | "astNodeIds": [outline_step["id"], values["id"]],
104 | "id": self.id_generator.get_next_id(),
105 | "type": last_keyword_type,
106 | "text": step_text,
107 | }
108 | if argument is not None:
109 | _pickle_step["argument"] = argument
110 |
111 | steps.append(_pickle_step)
112 |
113 | pickle = {
114 | "astNodeIds": [scenario["id"], values["id"]],
115 | "id": self.id_generator.get_next_id(),
116 | "name": self._interpolate(scenario["name"], variable_cells, value_cells),
117 | "language": language,
118 | "steps": steps,
119 | "tags": self._pickle_tags(tags),
120 | "uri": uri,
121 | }
122 | pickles.append(pickle)
123 |
124 | def _create_pickle_arguments(self, step, variables, values):
125 | if "dataTable" in step:
126 | table = {"rows": []}
127 | for row in step["dataTable"]["rows"]:
128 | cells = [{"value": self._interpolate(cell["value"], variables, values)} for cell in row["cells"]]
129 | table["rows"].append({"cells": cells})
130 | return {"dataTable": table}
131 |
132 | if "docString" in step:
133 | argument = step["docString"]
134 | docstring = {"content": self._interpolate(argument["content"], variables, values)}
135 | if "mediaType" in argument:
136 | docstring["mediaType"] = self._interpolate(argument["mediaType"], variables, values)
137 | return {"docString": docstring}
138 |
139 | return None
140 |
141 | def _interpolate(self, name, variable_cells, value_cells):
142 | if name is None:
143 | return name
144 |
145 | for n, variable_cell in enumerate(variable_cells):
146 | value_cell = value_cells[n]
147 | # For the case of trailing backslash, re-escaping backslashes are needed
148 | reescaped_value = re.sub(r"\\", r"\\\\", value_cell["value"])
149 | name = re.sub("<{0[value]}>".format(variable_cell), reescaped_value, name)
150 | return name
151 |
152 | def _pickle_step(self, step, keyword_type):
153 | pickle_step = {
154 | "astNodeIds": [step["id"]],
155 | "id": self.id_generator.get_next_id(),
156 | "type": keyword_type,
157 | "text": step["text"],
158 | }
159 | argument = self._create_pickle_arguments(step, [], [])
160 | if argument is not None:
161 | pickle_step["argument"] = argument
162 | return pickle_step
163 |
164 | def _pickle_tags(self, tags):
165 | return [self._pickle_tag(tag) for tag in tags]
166 |
167 | def _pickle_tag(self, tag):
168 | return {"astNodeId": tag["id"], "name": tag["name"]}
169 |
--------------------------------------------------------------------------------
/packages/gurke/src/gurke/py.typed:
--------------------------------------------------------------------------------
1 | # Marker file for PEP 561.
2 |
--------------------------------------------------------------------------------
/packages/gurke/src/gurke/stream/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robotcodedev/robotframework-gherkin-parser/dc81a6845d19c447d006aa4b67219354d79fa9c1/packages/gurke/src/gurke/stream/__init__.py
--------------------------------------------------------------------------------
/packages/gurke/src/gurke/stream/gherkin_events.py:
--------------------------------------------------------------------------------
1 | from ..ast_builder import AstBuilder
2 | from ..errors import CompositeParserException, ParserError
3 | from ..parser import Parser
4 | from ..pickles.compiler import Compiler
5 | from ..stream.id_generator import IdGenerator
6 |
7 |
8 | def create_errors(errors, uri):
9 | for error in errors:
10 | yield {
11 | "parseError": {
12 | "source": {"uri": uri, "location": error.location},
13 | "message": str(error),
14 | }
15 | }
16 |
17 |
18 | class GherkinEvents:
19 | def __init__(self, options):
20 | self.options = options
21 | self.id_generator = IdGenerator()
22 | self.parser = Parser(ast_builder=AstBuilder(self.id_generator))
23 | self.compiler = Compiler(self.id_generator)
24 |
25 | def enum(self, source_event):
26 | uri = source_event["source"]["uri"]
27 | source = source_event["source"]["data"]
28 |
29 | try:
30 | gherkin_document = self.parser.parse(source)
31 | gherkin_document["uri"] = uri
32 |
33 | if self.options.print_source:
34 | yield source_event
35 |
36 | if self.options.print_ast:
37 | yield {"gherkinDocument": gherkin_document}
38 |
39 | if self.options.print_pickles:
40 | pickles = self.compiler.compile(gherkin_document)
41 | for pickle in pickles:
42 | yield {"pickle": pickle}
43 | except CompositeParserException as e:
44 | for event in create_errors(e.errors, uri):
45 | yield event
46 | except ParserError as e:
47 | for event in create_errors([e], uri):
48 | yield event
49 |
--------------------------------------------------------------------------------
/packages/gurke/src/gurke/stream/id_generator.py:
--------------------------------------------------------------------------------
1 | class IdGenerator:
2 | def __init__(self) -> None:
3 | self._id_counter = 0
4 |
5 | def get_next_id(self) -> str:
6 | id = self._id_counter
7 | self._id_counter += 1
8 | return str(id)
9 |
--------------------------------------------------------------------------------
/packages/gurke/src/gurke/stream/source_events.py:
--------------------------------------------------------------------------------
1 | import io
2 |
3 |
4 | def source_event(path):
5 | return {
6 | "source": {
7 | "uri": path,
8 | "data": io.open(path, "r", encoding="utf8", newline="").read(),
9 | "mediaType": "text/x.cucumber.gherkin+plain",
10 | }
11 | }
12 |
13 |
14 | class SourceEvents:
15 | def __init__(self, paths):
16 | self.paths = paths
17 |
18 | def enum(self):
19 | return map(source_event, self.paths)
20 |
--------------------------------------------------------------------------------
/packages/gurke/src/gurke/token.py:
--------------------------------------------------------------------------------
1 | from typing import Any, List, Optional
2 |
3 | from .gherkin_line import GherkinLine
4 | from .location import Location
5 |
6 |
7 | class Token:
8 | def __init__(self, gherkin_line: GherkinLine, location: Location):
9 | self.line = gherkin_line
10 | self.location = location
11 |
12 | # TODO: check types
13 | self.matched_text: str = ""
14 | self.matched_type: str = ""
15 | self.matched_items: List[Any] = []
16 | self.matched_keyword: Optional[str] = None
17 | self.matched_keyword_type: Optional[str] = None
18 | self.matched_indent: int = -1
19 | self.matched_gherkin_dialect: Optional[str] = None
20 |
21 | def eof(self) -> bool:
22 | return not self.line
23 |
24 | def detach(self) -> None:
25 | pass # TODO: detach line - is this needed?
26 |
27 | def token_value(self) -> str:
28 | return "EOF" if self.eof() or not self.line else self.line.get_line_text()
29 |
--------------------------------------------------------------------------------
/packages/gurke/src/gurke/token_formatter_builder.py:
--------------------------------------------------------------------------------
1 | from .ast_builder import AstBuilder
2 |
3 |
4 | class TokenFormatterBuilder(AstBuilder):
5 | def __init__(self):
6 | self.reset()
7 |
8 | def reset(self):
9 | self._tokens = []
10 |
11 | def build(self, token):
12 | self._tokens.append(token)
13 |
14 | def start_rule(self, rule_type):
15 | pass
16 |
17 | def end_rule(self, rule_type):
18 | pass
19 |
20 | def get_result(self):
21 | return "\n".join([self._format_token(token) for token in self._tokens])
22 |
23 | def _format_token(self, token):
24 | if token.eof():
25 | return "EOF"
26 |
27 | return "".join(
28 | [
29 | "(",
30 | str(token.location.line),
31 | ":",
32 | str(token.location.column),
33 | ")",
34 | token.matched_type,
35 | ":",
36 | "".join(
37 | ["(", token.matched_keyword_type if token.matched_keyword_type else "", ")", token.matched_keyword]
38 | )
39 | if token.matched_keyword
40 | else "",
41 | "/",
42 | (token.matched_text if token.matched_text else ""),
43 | "/",
44 | ",".join([str(item["column"]) + ":" + item["text"] for item in token.matched_items]),
45 | ]
46 | )
47 |
--------------------------------------------------------------------------------
/packages/gurke/src/gurke/token_matcher.py:
--------------------------------------------------------------------------------
1 | import re
2 | import textwrap
3 | from collections import defaultdict
4 | from typing import Any, List, Optional
5 |
6 | from .dialect import Dialect
7 | from .errors import NoSuchLanguageException
8 | from .location import Location
9 | from .token import Token
10 |
11 |
12 | def indent(text: str, amount: int, ch: str = " ") -> str:
13 | return textwrap.indent(text, amount * ch)
14 |
15 |
16 | class TokenMatcher:
17 | LANGUAGE_RE = re.compile(r"^\s*#\s*language\s*:\s*([a-zA-Z\-_]+)\s*$")
18 |
19 | _active_doc_string_separator: Optional[str]
20 |
21 | def __init__(self, dialect_name: str = "en") -> None:
22 | self._default_dialect_name = dialect_name
23 | self._change_dialect(dialect_name)
24 | self.reset()
25 |
26 | def reset(self) -> None:
27 | if self.dialect_name != self._default_dialect_name:
28 | self._change_dialect(self._default_dialect_name)
29 | self._indent_to_remove = 0
30 | self._active_doc_string_separator = None
31 |
32 | def match_FeatureLine(self, token: Token) -> bool: # noqa: N802
33 | return self._match_title_line(token, "FeatureLine", self.dialect.feature_keywords)
34 |
35 | def match_RuleLine(self, token: Token) -> bool: # noqa: N802
36 | return self._match_title_line(token, "RuleLine", self.dialect.rule_keywords)
37 |
38 | def match_ScenarioLine(self, token: Token) -> bool: # noqa: N802
39 | return self._match_title_line(token, "ScenarioLine", self.dialect.scenario_keywords) or self._match_title_line(
40 | token, "ScenarioLine", self.dialect.scenario_outline_keywords
41 | )
42 |
43 | def match_BackgroundLine(self, token: Token) -> bool: # noqa: N802
44 | return self._match_title_line(token, "BackgroundLine", self.dialect.background_keywords)
45 |
46 | def match_ExamplesLine(self, token: Token) -> bool: # noqa: N802
47 | return self._match_title_line(token, "ExamplesLine", self.dialect.examples_keywords)
48 |
49 | def match_TableRow(self, token: Token) -> bool: # noqa: N802
50 | if not token.line.startswith("|"):
51 | return False
52 | # TODO: indent
53 | self._set_token_matched(token, "TableRow", items=token.line.table_cells)
54 | return True
55 |
56 | def match_StepLine(self, token: Token) -> bool: # noqa: N802
57 | keywords = (
58 | self.dialect.given_keywords
59 | + self.dialect.when_keywords
60 | + self.dialect.then_keywords
61 | + self.dialect.and_keywords
62 | + self.dialect.but_keywords
63 | )
64 | for keyword in (k for k in keywords if token.line.startswith(k)):
65 | title = token.line.get_rest_trimmed(len(keyword))
66 | keyword_types = self.keyword_types[keyword]
67 | if len(keyword_types) == 1:
68 | keyword_type = keyword_types[0]
69 | else:
70 | keyword_type = "Unknown"
71 | self._set_token_matched(token, "StepLine", title, keyword, keyword_type=keyword_type)
72 | return True
73 |
74 | return False
75 |
76 | def match_Comment(self, token: Token) -> bool: # noqa: N802
77 | if not token.line.startswith("#"):
78 | return False
79 |
80 | text = token.line._line_text # take the entire line, including leading space
81 | self._set_token_matched(token, "Comment", text, indent=0)
82 | return True
83 |
84 | def match_Empty(self, token: Token) -> bool: # noqa: N802
85 | if not token.line.is_empty():
86 | return False
87 |
88 | self._set_token_matched(token, "Empty", indent=0)
89 | return True
90 |
91 | def match_Language(self, token: Token) -> bool: # noqa: N802
92 | match = self.LANGUAGE_RE.match(token.line.get_line_text())
93 | if not match:
94 | return False
95 |
96 | dialect_name = match.group(1)
97 | self._set_token_matched(token, "Language", dialect_name)
98 | self._change_dialect(dialect_name, token.location)
99 | return True
100 |
101 | def match_TagLine(self, token: Token) -> bool: # noqa: N802
102 | if not token.line.startswith("@"):
103 | return False
104 |
105 | self._set_token_matched(token, "TagLine", items=token.line.tags)
106 | return True
107 |
108 | def match_DocStringSeparator(self, token: Token) -> bool: # noqa: N802
109 | if not self._active_doc_string_separator:
110 | # open
111 | return self._match_DocStringSeparator(token, '"""', True) or self._match_DocStringSeparator(
112 | token, "```", True
113 | )
114 |
115 | # close
116 | return self._match_DocStringSeparator(token, self._active_doc_string_separator, False)
117 |
118 | def _match_DocStringSeparator(self, token: Token, separator: str, is_open: bool) -> bool: # noqa: N802
119 | if not token.line.startswith(separator):
120 | return False
121 |
122 | content_type = None
123 | if is_open:
124 | content_type = token.line.get_rest_trimmed(len(separator))
125 | self._active_doc_string_separator = separator
126 | self._indent_to_remove = token.line.indent
127 | else:
128 | self._active_doc_string_separator = None
129 | self._indent_to_remove = 0
130 |
131 | # TODO: Use the separator as keyword. That's needed for pretty printing.
132 | self._set_token_matched(token, "DocStringSeparator", content_type, separator)
133 | return True
134 |
135 | def match_Other(self, token: Token) -> bool: # noqa: N802
136 | # take the entire line, except removing DocString indents
137 | text = token.line.get_line_text(self._indent_to_remove)
138 | self._set_token_matched(token, "Other", self._unescaped_docstring(text), indent=0)
139 | return True
140 |
141 | def match_EOF(self, token: Token) -> bool: # noqa: N802
142 | if not token.eof():
143 | return False
144 |
145 | self._set_token_matched(token, "EOF")
146 | return True
147 |
148 | def _match_title_line(self, token: Token, token_type: str, keywords: List[str]) -> bool:
149 | for keyword in (k for k in keywords if token.line.startswith_title_keyword(k)):
150 | title = token.line.get_rest_trimmed(len(keyword) + len(":"))
151 | self._set_token_matched(token, token_type, title, keyword)
152 | return True
153 |
154 | return False
155 |
156 | def _set_token_matched(
157 | self,
158 | token: Token,
159 | matched_type: str,
160 | text: Optional[str] = None,
161 | keyword: Optional[str] = None,
162 | keyword_type: Optional[str] = None,
163 | indent: Optional[int] = None,
164 | items: Optional[List[Any]] = None,
165 | ) -> None:
166 | if items is None:
167 | items = []
168 | token.matched_type = matched_type
169 | # text == '' should not result in None
170 | token.matched_text = text.rstrip("\r\n") if text is not None else None
171 | token.matched_keyword = keyword
172 | token.matched_keyword_type = keyword_type
173 | if indent is not None:
174 | token.matched_indent = indent
175 | else:
176 | token.matched_indent = token.line.indent if token.line else 0
177 | token.matched_items = items
178 | token.location.column = token.matched_indent + 1
179 | token.matched_gherkin_dialect = self.dialect_name
180 |
181 | def _change_dialect(self, dialect_name: str, location: Optional[Location] = None) -> None:
182 | dialect = Dialect.for_name(dialect_name)
183 | if not dialect:
184 | raise NoSuchLanguageException(dialect_name, location or Location(0, 0))
185 |
186 | self.dialect_name = dialect_name
187 | self.dialect = dialect
188 | self.keyword_types = defaultdict(list)
189 | for keyword in self.dialect.given_keywords:
190 | self.keyword_types[keyword].append("Context")
191 | for keyword in self.dialect.when_keywords:
192 | self.keyword_types[keyword].append("Action")
193 | for keyword in self.dialect.then_keywords:
194 | self.keyword_types[keyword].append("Outcome")
195 | for keyword in self.dialect.and_keywords + self.dialect.but_keywords:
196 | self.keyword_types[keyword].append("Conjunction")
197 |
198 | def _unescaped_docstring(self, text: str) -> str:
199 | if self._active_doc_string_separator == '"""':
200 | return text.replace('\\"\\"\\"', '"""')
201 |
202 | if self._active_doc_string_separator == "```":
203 | return text.replace("\\`\\`\\`", "```")
204 |
205 | return text
206 |
--------------------------------------------------------------------------------
/packages/gurke/src/gurke/token_matcher_markdown.py:
--------------------------------------------------------------------------------
1 | import re
2 | from collections import defaultdict
3 | from typing import Any
4 |
5 | from .dialect import Dialect
6 | from .errors import NoSuchLanguageException
7 | from .location import Location
8 | from .token import Token
9 |
10 | KEYWORD_PREFIX_BULLET = "^(\\s*[*+-]\\s*)"
11 | KEYWORD_PREFIX_HEADER = "^(#{1,6}\\s)"
12 |
13 |
14 | class GherkinInMarkdownTokenMatcher:
15 | LANGUAGE_RE = re.compile(r"^\s*#\s*language\s*:\s*([a-zA-Z\-_]+)\s*$")
16 |
17 | def __init__(self, dialect_name: str = "en") -> None:
18 | self._default_dialect_name = dialect_name
19 | self._change_dialect(dialect_name)
20 | self.reset()
21 |
22 | def reset(self) -> None:
23 | if self.dialect_name != self._default_dialect_name:
24 | self._change_dialect(self._default_dialect_name)
25 | self._indent_to_remove = 0
26 | self._active_doc_string_separator = None
27 | self.matched_feature_line = False
28 |
29 | def match_FeatureLine(self, token: Token) -> bool: # noqa: N802
30 | if self.matched_feature_line:
31 | self._set_token_matched(token, None)
32 | return False
33 |
34 | # We first try to match "# Feature: blah"
35 | result = self._match_title_line(KEYWORD_PREFIX_HEADER, self.dialect.feature_keywords, ":", token, "FeatureLine")
36 | # If we didn't match "# Feature: blah", we still match this line
37 | # as a FeatureLine.
38 | # The reason for this is that users may not want to be constrained by having this as their fist line.
39 |
40 | if not result:
41 | self._set_token_matched(token, "FeatureLine", token.line.get_line_text())
42 | result = True
43 | self.matched_feature_line = result
44 | return result
45 |
46 | def match_RuleLine(self, token): # noqa: N802
47 | return self._match_title_line(KEYWORD_PREFIX_HEADER, self.dialect.rule_keywords, ":", token, "RuleLine")
48 |
49 | def match_ScenarioLine(self, token): # noqa: N802
50 | return self._match_title_line(
51 | KEYWORD_PREFIX_HEADER, self.dialect.scenario_keywords, ":", token, "ScenarioLine"
52 | ) or self._match_title_line(
53 | KEYWORD_PREFIX_HEADER, self.dialect.scenario_outline_keywords, ":", token, "ScenarioLine"
54 | )
55 |
56 | def match_BackgroundLine(self, token): # noqa: N802
57 | return self._match_title_line(
58 | KEYWORD_PREFIX_HEADER, self.dialect.background_keywords, ":", token, "BackgroundLine"
59 | )
60 |
61 | def match_ExamplesLine(self, token): # noqa: N802
62 | return self._match_title_line(KEYWORD_PREFIX_HEADER, self.dialect.examples_keywords, ":", token, "ExamplesLine")
63 |
64 | def match_TableRow(self, token: Token) -> bool: # noqa: N802
65 | # Gherkin tables must be indented 2-5 spaces in order to be distinguidedn from non-Gherkin tables
66 |
67 | if re.match("^\\s?\\s?\\s?\\s?\\s?\\|", token.line.get_line_text(0)):
68 | table_cells = token.line.table_cells
69 | if self._is_gfm_table_separator(table_cells):
70 | return False
71 |
72 | self._set_token_matched(token, "TableRow", keyword="|", items=token.line.table_cells)
73 |
74 | return True
75 | return False
76 |
77 | def _is_gfm_table_separator(self, table_cells):
78 | text_of_table_cells = map(lambda x: x["text"], table_cells)
79 | separator_values = list(filter(lambda x: re.match("^:?-+:?$", x), text_of_table_cells))
80 | return len(separator_values) > 0
81 |
82 | def match_StepLine(self, token): # noqa: N802
83 | non_star_step_keywords = (
84 | self.dialect.given_keywords
85 | + self.dialect.when_keywords
86 | + self.dialect.then_keywords
87 | + self.dialect.and_keywords
88 | + self.dialect.but_keywords
89 | )
90 | return self._match_title_line(KEYWORD_PREFIX_BULLET, non_star_step_keywords, "", token, "StepLine")
91 |
92 | def match_Comment(self, token): # noqa: N802
93 | result = False
94 | if token.line.startswith("|"):
95 | table_cells = token.line.table_cells
96 | if self._is_gfm_table_separator(table_cells):
97 | result = True
98 | self._set_token_matched(token, "Comment")
99 | return result
100 |
101 | def match_Empty(self, token): # noqa: N802
102 | result = False
103 | if token.line.is_empty():
104 | result = True
105 | if (
106 | not self.match_TagLine(token)
107 | and not self.match_FeatureLine(token)
108 | and not self.match_ScenarioLine(token)
109 | and not self.match_BackgroundLine(token)
110 | and not self.match_ExamplesLine(token)
111 | and not self.match_RuleLine(token)
112 | and not self.match_TableRow(token)
113 | and not self.match_Comment(token)
114 | and not self.match_Language(token)
115 | and not self.match_DocStringSeparator(token)
116 | and not self.match_EOF(token)
117 | and not self.match_StepLine(token)
118 | ):
119 | # neutered
120 | result = True
121 |
122 | if result:
123 | self._set_token_matched(token, "Empty", indent=0)
124 | return result
125 | return False
126 |
127 | # We've made a deliberate choice not to support `# language: [ISO 639-1]` headers or similar
128 | # in Markdown. Users should specify a language globally.
129 | def match_Language(self, token): # noqa: N802
130 | if not token:
131 | raise ValueError("no token")
132 | return False
133 |
134 | def match_TagLine(self, token): # noqa: N802
135 | tags = []
136 | matching_tags = re.finditer("`(@[^`]+)`", token.line.get_line_text())
137 | idx = 0
138 | for match in matching_tags:
139 | tags.append({"column": token.line.indent + match.start(idx) + 2, "text": match.group(1)})
140 |
141 | if len(tags) == 0:
142 | return False
143 |
144 | self._set_token_matched(token, "TagLine", items=tags)
145 | return True
146 |
147 | def match_DocStringSeparator(self, token): # noqa: N802
148 | if not self._active_doc_string_separator:
149 | # open
150 | return (
151 | self._match_DocStringSeparator(token, '"""', True)
152 | or self._match_DocStringSeparator(token, "````", True)
153 | or self._match_DocStringSeparator(token, "```", True)
154 | )
155 |
156 | # close
157 | return self._match_DocStringSeparator(token, self._active_doc_string_separator, False)
158 |
159 | def _match_DocStringSeparator(self, token, separator, is_open): # noqa: N802
160 | if not token.line.startswith(separator):
161 | return False
162 |
163 | content_type = ""
164 | if is_open:
165 | content_type = token.line.get_rest_trimmed(len(separator))
166 | self._active_doc_string_separator = separator
167 | self._indent_to_remove = token.line.indent
168 | else:
169 | self._active_doc_string_separator = None
170 | self._indent_to_remove = 0
171 |
172 | # TODO: Use the separator as keyword. That's needed for pretty printing.
173 | self._set_token_matched(token, "DocStringSeparator", content_type, separator)
174 | return True
175 |
176 | def match_Other(self, token): # noqa: N802
177 | # take the entire line, except removing DocString indents
178 | text = token.line.get_line_text(self._indent_to_remove)
179 | self._set_token_matched(token, "Other", self._unescaped_docstring(text), indent=0)
180 | return True
181 |
182 | def match_EOF(self, token): # noqa: N802
183 | if not token.eof():
184 | return False
185 |
186 | self._set_token_matched(token, "EOF")
187 | return True
188 |
189 | def _match_title_line(self, prefix: Any, keywords: Any, keyword_suffix: Any, token: Token, token_type: Any) -> bool:
190 | keywords_or_list = "|".join(map(lambda x: re.escape(x), keywords))
191 | match = re.search("{}({}){}(.*)".format(prefix, keywords_or_list, keyword_suffix), token.line.get_line_text())
192 | indent = token.line.indent
193 | matched_keyword_type = None
194 | if match:
195 | matched_keyword = match.group(2)
196 | indent += len(match.group(1))
197 |
198 | # only set the keyword type if this is a step keyword
199 | if matched_keyword in self.keyword_types:
200 | matched_keyword_type = self.keyword_types[matched_keyword][0]
201 |
202 | self._set_token_matched(
203 | token,
204 | token_type,
205 | match.group(3).strip(),
206 | matched_keyword,
207 | keyword_type=matched_keyword_type,
208 | indent=indent,
209 | )
210 | return True
211 | return False
212 |
213 | def _set_token_matched2(
214 | self,
215 | token,
216 | matched,
217 | indent=None,
218 | ):
219 | token.matched_gherkin_dialect = self.dialect_name
220 | if indent is not None:
221 | token.matched_indent = indent
222 | else:
223 | token.matched_indent = token.line.indent if token.line else 0
224 | token.location.column = token.matched_indent + 1
225 | return matched
226 |
227 | def _set_token_matched(
228 | self,
229 | token: Token,
230 | matched_type: Any,
231 | text: Any = None,
232 | keyword: Any = None,
233 | keyword_type: Any = None,
234 | indent: Any = None,
235 | items: Any = None,
236 | ) -> None:
237 | if items is None:
238 | items = []
239 | token.matched_type = matched_type
240 | # text == '' should not result in None
241 | token.matched_text = text.rstrip("\r\n") if text is not None else None
242 | token.matched_keyword = keyword
243 | token.matched_keyword_type = keyword_type
244 | if indent is not None:
245 | token.matched_indent = indent
246 | else:
247 | token.matched_indent = token.line.indent if token.line else 0
248 | token.matched_items = items
249 | token.location.column = token.matched_indent + 1
250 | token.matched_gherkin_dialect = self.dialect_name
251 |
252 | def _change_dialect(self, dialect_name: str, location: Location = None) -> None:
253 | dialect = Dialect.for_name(dialect_name)
254 | if not dialect:
255 | raise NoSuchLanguageException(dialect_name, location)
256 |
257 | self.dialect_name = dialect_name
258 | self.dialect = dialect
259 | self.keyword_types = defaultdict(list)
260 | for keyword in self.dialect.given_keywords:
261 | self.keyword_types[keyword].append("Context")
262 | for keyword in self.dialect.when_keywords:
263 | self.keyword_types[keyword].append("Action")
264 | for keyword in self.dialect.then_keywords:
265 | self.keyword_types[keyword].append("Outcome")
266 | for keyword in self.dialect.and_keywords + self.dialect.but_keywords:
267 | self.keyword_types[keyword].append("Conjunction")
268 |
269 | def _unescaped_docstring(self, text):
270 | if self._active_doc_string_separator == '"""':
271 | return text.replace('\\"\\"\\"', '"""')
272 | if self._active_doc_string_separator == "```":
273 | return text.replace("\\`\\`\\`", "```")
274 | return text
275 |
--------------------------------------------------------------------------------
/packages/gurke/src/gurke/token_scanner.py:
--------------------------------------------------------------------------------
1 | import io
2 | import os
3 | from typing import Union
4 |
5 | from .gherkin_line import GherkinLine
6 | from .location import Location
7 | from .token import Token
8 |
9 |
10 | class TokenScanner(object):
11 | def __init__(self, path_or_str: Union[os.PathLike[str], str]) -> None:
12 | if isinstance(path_or_str, os.PathLike):
13 | self.io = io.open(path_or_str, "r", encoding="utf8")
14 | else:
15 | self.io = io.StringIO(path_or_str)
16 | self.line_number = 0
17 |
18 | def read(self) -> Token:
19 | self.line_number += 1
20 | line = self.io.readline()
21 | return Token(GherkinLine(line, self.line_number), Location(self.line_number, None))
22 |
23 | def __del__(self) -> None:
24 | try:
25 | self.io.close()
26 | except AttributeError:
27 | pass
28 |
--------------------------------------------------------------------------------
/packages/language_server_plugin/.comming_soon:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robotcodedev/robotframework-gherkin-parser/dc81a6845d19c447d006aa4b67219354d79fa9c1/packages/language_server_plugin/.comming_soon
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["hatchling>=1.12.2"]
3 | build-backend = "hatchling.build"
4 |
5 |
6 | [project]
7 | name = "robotframework-gherkin-parser"
8 | description = "Gherkin parser for Robot Framework"
9 | authors = [{ name = "Daniel Biehl", email = "dbiehl@live.de" }]
10 | readme = { "file" = "README.md", "content-type" = "text/markdown" }
11 | license = { text = "Apache-2.0" }
12 | keywords = [
13 | "Test",
14 | "Testing",
15 | "RobotFramework",
16 | "Keyword Driven",
17 | "Data Driven",
18 | "Acceptance Testing",
19 | "Acceptance Test Driven Development",
20 | "BDD",
21 | "Behavior Driven Testing",
22 | "BDT",
23 | "Gherkin",
24 | ]
25 | classifiers = [
26 | "Development Status :: 5 - Production/Stable",
27 | "License :: OSI Approved :: Apache Software License",
28 | "Operating System :: OS Independent",
29 | "Programming Language :: Python :: 3.8",
30 | "Programming Language :: Python :: 3.9",
31 | "Programming Language :: Python :: 3.10",
32 | "Programming Language :: Python :: 3.11",
33 | "Programming Language :: Python :: 3.12",
34 | "Programming Language :: Python :: Implementation :: CPython",
35 | "Topic :: Software Development :: Testing",
36 | "Topic :: Software Development :: Testing :: Acceptance",
37 | "Topic :: Software Development :: Testing :: BDD",
38 | "Topic :: Software Development :: Quality Assurance",
39 | "Topic :: Utilities",
40 | "Typing :: Typed",
41 | "Topic :: Software Development :: Debuggers",
42 | "Topic :: Text Editors :: Integrated Development Environments (IDE)",
43 | "Intended Audience :: Developers",
44 | "Framework :: Robot Framework",
45 | "Framework :: Robot Framework :: Library",
46 | "Framework :: Robot Framework :: Tool",
47 | ]
48 | requires-python = ">=3.8"
49 | dependencies = ["robotframework>=7.0", "gurke==0.3.2"]
50 | dynamic = ["version"]
51 |
52 |
53 | [project.urls]
54 | Homepage = "https://github.com/d-biehl/robotframework-gherkin-parser"
55 | Donate = "https://github.com/sponsors/d-biehl"
56 | Documentation = "https://github.com/d-biehl/robotframework-gherkin-parser#readme"
57 | Changelog = "https://github.com/d-biehl/robotframework-gherkin-parser/blob/main/CHANGELOG.md"
58 | Issues = "https://github.com/d-biehl/robotframework-gherkin-parser/issues"
59 | Source = "https://github.com/d-biehl/robotframework-gherkin-parser"
60 |
61 |
62 | [tool.semantic_release]
63 | version_variable = [
64 | "src/GherkinParser/__version__.py:__version__",
65 | "packages/gurke/src/gurke/__version__.py:__version__",
66 | ]
67 | version_pattern = [
68 | 'package.json:"version": "{version}"',
69 | "packages/gurke/pyproject.toml:\"robotcode\\S*=={version}\"",
70 | "pyproject.toml:\"robotcode\\S*=={version}\"",
71 | "pyproject.toml:\"gurke\\S*=={version}\"",
72 | ]
73 | branch = "main"
74 | upload_to_release = false
75 | upload_to_repository = false
76 | build_command = "pip install hatch && hatch build"
77 |
78 |
79 | [tool.black]
80 | line-length = 120
81 | target-version = ['py38']
82 | extend-exclude = '''
83 | (
84 | /(
85 | | bundled/libs
86 | )/
87 | )
88 | '''
89 |
90 |
91 | [tool.pytest.ini_options]
92 | minversion = "6.0"
93 | addopts = "-ra -vv -rf --ignore=bundled --ignore=.hatch"
94 | filterwarnings = "ignore:.*Using or importing the ABCs from 'collections' instead of from 'collections.abc'.*:DeprecationWarning"
95 | testpaths = ["tests"]
96 | junit_suite_name = "GherkinParser Tests"
97 | # console_output_style = "classic"
98 | # log_cli = true
99 | # log_cli_level = 4
100 | # log_cli_format = "%(levelname)s %(name)s: %(message)s"
101 | asyncio_mode = "auto"
102 | faulthandler_timeout = 30
103 |
104 |
105 | [tool.coverage.run]
106 | omit = ["*tests*"]
107 | source = ["GherkinParser"]
108 |
109 |
110 | [tool.coverage.report]
111 | exclude_lines = [
112 | "pragma: no cover",
113 | "if __name__ == .__main__.:",
114 | "raise NotImplementedError",
115 | ]
116 | fail_under = 40
117 |
118 |
119 | [tool.ruff]
120 | line-length = 120
121 | target-version = "py38"
122 | extend-exclude = ["bundled/libs"]
123 | ignore = ["E741", "N805", "N999"]
124 | select = [
125 | "E",
126 | "F",
127 | "W",
128 | "I",
129 | "N",
130 | #"UP",
131 | "YTT",
132 | #"ANN",
133 | #"BLE",
134 | #"B",
135 | #"A"
136 | #"COM"
137 | # "C4", # TODO enable this
138 | "DTZ",
139 | "T10",
140 | # "EM",
141 | "ISC",
142 | "G",
143 | #"INP",
144 | "PIE",
145 | # "T20",
146 | "PT",
147 | "Q",
148 | "RET",
149 | # "SIM", # TODO enable this
150 | # "TID",
151 | # "TCH",
152 | # "ARG",
153 | # "PTH", # TODO enable this
154 | # "SLF", # TODO enable this
155 | # "ERA", # TODO enable this
156 | "RSE",
157 | # "PL",
158 | #"TRY",
159 | "RUF",
160 | # "TID"
161 | ]
162 |
163 |
164 | [tool.ruff.per-file-ignores]
165 | #"__init__.py" = ["F401"]
166 |
167 |
168 | [tool.mypy]
169 | python_version = "3.8"
170 | strict = true
171 | warn_redundant_casts = true
172 | warn_unused_ignores = true
173 | warn_return_any = true
174 | warn_unused_configs = true
175 | warn_unreachable = true
176 | implicit_optional = true
177 | # disallow_untyped_decorators = false
178 | disallow_subclassing_any = false
179 | exclude = [
180 | "\\.mypy_cache",
181 | "\\.venv",
182 | "\\.hatch",
183 | "build",
184 | "dist",
185 | "out",
186 | "playground",
187 | "scripts",
188 | "bundled/libs",
189 | ]
190 | mypy_path = ["typings", "src", "packages/gurke/src"]
191 | explicit_package_bases = true
192 | namespace_packages = true
193 |
194 |
195 | [[tool.mypy.overrides]]
196 | module = [
197 | "robot.*",
198 | "debugpy.*",
199 | "robotidy.*",
200 | "robocop.*",
201 | "pytest_regtest.*",
202 | "pluggy",
203 | ]
204 | ignore_missing_imports = true
205 |
206 | [tool.pyright]
207 | exclude = ["**/.hatch", "**/node_modules", "**/__pycache__", "bundled/libs"]
208 | typeCheckingMode = "off"
209 | pythonVersion = "3.8"
210 |
211 |
212 | [tool.commitizen]
213 | name = "cz_conventional_commits"
214 | bump_message = "chore(release): bump version $current_version → $new_version"
215 | tag_format = "v$version"
216 | version_scheme = "semver"
217 | version_provider = "scm"
218 | update_changelog_on_bump = false
219 | major_version_zero = true
220 | changelog_incremental = true
221 | changelog_merge_prerelease = true
222 | gpg_sign = true
223 | annotated_tag = true
224 | pre_bump_hooks = [
225 | "hatch env remove lint",
226 | "hatch run build:update-git-versions",
227 | "hatch run build:update-changelog",
228 | "git add .",
229 | ]
230 |
--------------------------------------------------------------------------------
/scripts/deploy_docs.py:
--------------------------------------------------------------------------------
1 | import contextlib
2 | import sys
3 | from pathlib import Path
4 | from subprocess import run
5 |
6 | if __name__ == "__main__" and __package__ is None or __package__ == "":
7 | file = Path(__file__).resolve()
8 | parent, top = file.parent, file.parents[1]
9 |
10 | if str(top) not in sys.path:
11 | sys.path.append(str(top))
12 |
13 | with contextlib.suppress(ValueError):
14 | sys.path.remove(str(parent))
15 |
16 | __package__ = "scripts"
17 |
18 |
19 | from scripts.tools import get_current_version_from_git
20 |
21 |
22 | def main() -> None:
23 | version = get_current_version_from_git()
24 | alias = "latest"
25 |
26 | if version.prerelease:
27 | version = version.next_minor()
28 | alias = "dev"
29 |
30 | version.major, version.minor
31 |
32 | run(
33 | "mike deploy --push --update-aliases --rebase --force "
34 | f'--title "v{version.major}.{version.minor}.x ({alias})" {version.major}.{version.minor} {alias}',
35 | shell=True,
36 | ).check_returncode()
37 |
38 |
39 | if __name__ == "__main__":
40 | main()
41 |
--------------------------------------------------------------------------------
/scripts/extract_release_notes.py:
--------------------------------------------------------------------------------
1 | import contextlib
2 | import re
3 | import sys
4 | from pathlib import Path
5 |
6 | if __name__ == "__main__" and __package__ is None or __package__ == "":
7 | file = Path(__file__).resolve()
8 | parent, top = file.parent, file.parents[1]
9 |
10 | if str(top) not in sys.path:
11 | sys.path.append(str(top))
12 |
13 | with contextlib.suppress(ValueError):
14 | sys.path.remove(str(parent))
15 |
16 | __package__ = "scripts"
17 |
18 |
19 | from scripts.tools import get_version
20 |
21 |
22 | def main() -> None:
23 | version = get_version()
24 |
25 | changelog = Path("CHANGELOG.md").read_text()
26 |
27 | regex = re.compile(rf"^\#\#\s*v({version})[^\n]*?\n(?P.*?)^\#\#\s+", re.MULTILINE | re.DOTALL)
28 |
29 | for match in regex.finditer(changelog):
30 | print(match.group("text").strip())
31 |
32 |
33 | if __name__ == "__main__":
34 | main()
35 |
--------------------------------------------------------------------------------
/scripts/install_bundled_editable.py:
--------------------------------------------------------------------------------
1 | import shutil
2 | from pathlib import Path
3 | from subprocess import run
4 |
5 |
6 | def main() -> None:
7 | dist_path = Path("./dist")
8 | if not dist_path.exists():
9 | dist_path.mkdir()
10 |
11 | shutil.rmtree("./bundled/libs", ignore_errors=True)
12 |
13 | run(
14 | "pip --disable-pip-version-check install -U -t ./bundled/libs --no-cache-dir --implementation py "
15 | "--only-binary=:all: --no-binary=:none: -r ./bundled_requirements.txt",
16 | shell=True,
17 | ).check_returncode()
18 |
19 | packages = [f"-e {path}" for path in Path("./packages").iterdir() if (path / "pyproject.toml").exists()]
20 |
21 | run(
22 | "pip --disable-pip-version-check "
23 | f"install -U -t ./bundled/libs --no-cache-dir --implementation py --no-deps {' '.join(packages)} -e .",
24 | shell=True,
25 | ).check_returncode()
26 |
27 |
28 | if __name__ == "__main__":
29 | main()
30 |
--------------------------------------------------------------------------------
/scripts/install_packages.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from subprocess import run
3 |
4 |
5 | def main() -> None:
6 | dist_path = Path("./dist")
7 | if not dist_path.exists():
8 | dist_path.mkdir()
9 |
10 | run(
11 | "pip --disable-pip-version-check install -U -r ./bundled_requirements.txt",
12 | shell=True,
13 | ).check_returncode()
14 |
15 | packages = [f"-e {path}" for path in Path("./packages").iterdir() if (path / "pyproject.toml").exists()]
16 |
17 | if not packages:
18 | return
19 |
20 | run(
21 | f"pip --disable-pip-version-check install --no-deps -U {' '.join(packages)}",
22 | shell=True,
23 | ).check_returncode()
24 |
25 |
26 | if __name__ == "__main__":
27 | main()
28 |
--------------------------------------------------------------------------------
/scripts/is_prerelease.py:
--------------------------------------------------------------------------------
1 | import contextlib
2 | import sys
3 | from pathlib import Path
4 |
5 | if __name__ == "__main__" and __package__ is None or __package__ == "":
6 | file = Path(__file__).resolve()
7 | parent, top = file.parent, file.parents[1]
8 |
9 | if str(top) not in sys.path:
10 | sys.path.append(str(top))
11 |
12 | with contextlib.suppress(ValueError):
13 | sys.path.remove(str(parent))
14 |
15 | __package__ = "scripts"
16 |
17 |
18 | from scripts.tools import get_version
19 |
20 |
21 | def main() -> None:
22 | version = get_version()
23 | preview = 1 if version.prerelease else 0
24 |
25 | print(preview)
26 |
27 |
28 | if __name__ == "__main__":
29 | main()
30 |
--------------------------------------------------------------------------------
/scripts/package.py:
--------------------------------------------------------------------------------
1 | import contextlib
2 | import shutil
3 | import sys
4 | from pathlib import Path
5 | from subprocess import run
6 |
7 | if __name__ == "__main__" and __package__ is None or __package__ == "":
8 | file = Path(__file__).resolve()
9 | parent, top = file.parent, file.parents[1]
10 |
11 | if str(top) not in sys.path:
12 | sys.path.append(str(top))
13 |
14 | with contextlib.suppress(ValueError):
15 | sys.path.remove(str(parent))
16 |
17 | __package__ = "scripts"
18 |
19 |
20 | from scripts.tools import get_version
21 |
22 | PRE_RELEASE = True
23 |
24 | def main() -> None:
25 | dist_path = Path("./dist").absolute()
26 | if not dist_path.exists():
27 | dist_path.mkdir()
28 |
29 | packages = [f"{path}" for path in Path("./packages").iterdir() if (path / "pyproject.toml").exists()]
30 | for package in packages:
31 | run(f"hatch -e build build {dist_path}", shell=True, cwd=package).check_returncode()
32 |
33 | run(f"hatch -e build build {dist_path}", shell=True).check_returncode()
34 |
35 | shutil.rmtree("./bundled/libs", ignore_errors=True)
36 |
37 | run(
38 | "pip --disable-pip-version-check install -U -t ./bundled/libs --no-cache-dir --implementation py "
39 | "--only-binary=:all: --no-binary=:none: -r ./bundled_requirements.txt",
40 | shell=True,
41 | ).check_returncode()
42 |
43 | run(
44 | "pip --disable-pip-version-check "
45 | f"install -U -t ./bundled/libs --no-cache-dir --implementation py --no-deps {' '.join(packages)} .",
46 | shell=True,
47 | ).check_returncode()
48 |
49 | run(
50 | f"npx vsce package {'--pre-release' if PRE_RELEASE or get_version().prerelease else ''} -o ./dist", shell=True
51 | ).check_returncode()
52 |
53 |
54 | if __name__ == "__main__":
55 | main()
56 |
--------------------------------------------------------------------------------
/scripts/publish.py:
--------------------------------------------------------------------------------
1 | import contextlib
2 | import os
3 | import subprocess
4 | import sys
5 | from pathlib import Path
6 | from typing import Any
7 |
8 | if __name__ == "__main__" and __package__ is None or __package__ == "":
9 | file = Path(__file__).resolve()
10 | parent, top = file.parent, file.parents[1]
11 |
12 | if str(top) not in sys.path:
13 | sys.path.append(str(top))
14 |
15 | with contextlib.suppress(ValueError):
16 | sys.path.remove(str(parent))
17 |
18 | __package__ = "scripts"
19 |
20 |
21 | from scripts.tools import get_version
22 |
23 |
24 | def run(title: str, *args: Any, **kwargs: Any) -> None:
25 | try:
26 | print(f"running {title}")
27 | subprocess.run(*args, **kwargs)
28 | except (SystemExit, KeyboardInterrupt):
29 | raise
30 | except BaseException as e:
31 | print(f"{title} failed: {e}", file=sys.stderr)
32 |
33 |
34 | def main() -> None:
35 | dist_path = Path("./dist").absolute()
36 |
37 | if not dist_path.exists():
38 | raise FileNotFoundError(f"dist folder '{dist_path}' not exists")
39 |
40 | current_version = get_version()
41 |
42 | vsix_path = Path(dist_path, f"robotcode-gherkin-{current_version}.vsix")
43 |
44 | run("npx vsce publish", f"npx vsce publish -i {vsix_path}", shell=True, timeout=600)
45 | run("npx ovsx publish", f"npx ovsx publish {vsix_path}", shell=True, timeout=600)
46 |
47 | pypi_args = ""
48 | if os.environ.get("PYPI_USERNAME") and os.environ.get("PYPI_PASSWORD"):
49 | pypi_args = f' -u "{os.environ["PYPI_USERNAME"]}" -a "{os.environ["PYPI_PASSWORD"]}"'
50 |
51 | run(
52 | "hatch publish",
53 | f"hatch -e build publish{pypi_args}",
54 | shell=True,
55 | timeout=600,
56 | )
57 |
58 |
59 | if __name__ == "__main__":
60 | main()
61 |
--------------------------------------------------------------------------------
/scripts/tools.py:
--------------------------------------------------------------------------------
1 | import os
2 | from pathlib import Path
3 | from typing import NamedTuple, Optional
4 |
5 | from git.repo import Repo
6 | from semantic_version import Version
7 |
8 |
9 | class GitDescribeVersion(NamedTuple):
10 | version: str
11 | commits: Optional[str] = None
12 | hash: Optional[str] = None
13 |
14 |
15 | def get_current_version_from_git() -> Version:
16 | repo = Repo(Path.cwd())
17 |
18 | git_version = GitDescribeVersion(
19 | *repo.git.describe("--tag", "--long", "--first-parent", "--match", "v[0-9]*").rsplit("-", 2)
20 | )
21 |
22 | version = Version(git_version.version[1:])
23 | if git_version.commits is not None and git_version.commits != "0":
24 | version = version.next_patch()
25 | version.prerelease = ("dev", git_version.commits)
26 |
27 | return version
28 |
29 |
30 | def get_version() -> Version:
31 | if "npm_package_version" in os.environ:
32 | return Version(os.environ["npm_package_version"])
33 | if "CZ_PRE_NEW_VERSION" in os.environ:
34 | return Version(os.environ["CZ_PRE_NEW_VERSION"])
35 |
36 | return get_current_version_from_git()
37 |
--------------------------------------------------------------------------------
/scripts/update_changelog.py:
--------------------------------------------------------------------------------
1 | import contextlib
2 | import subprocess
3 | import sys
4 | from pathlib import Path
5 | from typing import Any
6 |
7 | if __name__ == "__main__" and not __package__:
8 | file = Path(__file__).resolve()
9 | parent, top = file.parent, file.parents[1]
10 |
11 | if str(top) not in sys.path:
12 | sys.path.append(str(top))
13 |
14 | with contextlib.suppress(ValueError):
15 | sys.path.remove(str(parent))
16 |
17 | __package__ = "scripts"
18 |
19 |
20 | from scripts.tools import get_version
21 |
22 |
23 | def run(title: str, *args: Any, **kwargs: Any) -> None:
24 | try:
25 | print(f"running {title}")
26 | subprocess.run(*args, **kwargs)
27 | except (SystemExit, KeyboardInterrupt):
28 | raise
29 | except BaseException as e:
30 | print(f"{title} failed: {e}", file=sys.stderr)
31 |
32 |
33 | def main() -> None:
34 | current_version = get_version()
35 |
36 | run(
37 | "create changelog",
38 | f"git-cliff --bump -t v{current_version} -o CHANGELOG.md",
39 | shell=True,
40 | timeout=600,
41 | )
42 |
43 |
44 | if __name__ == "__main__":
45 | main()
46 |
--------------------------------------------------------------------------------
/scripts/update_doc_links.py:
--------------------------------------------------------------------------------
1 | import contextlib
2 | import re
3 | import sys
4 | from pathlib import Path
5 |
6 | if __name__ == "__main__" and __package__ is None or __package__ == "":
7 | file = Path(__file__).resolve()
8 | parent, top = file.parent, file.parents[1]
9 |
10 | if str(top) not in sys.path:
11 | sys.path.append(str(top))
12 |
13 | with contextlib.suppress(ValueError):
14 | sys.path.remove(str(parent))
15 |
16 | __package__ = "scripts"
17 |
18 | from scripts.tools import get_version
19 |
20 |
21 | def replace_in_file(filename: Path, pattern: "re.Pattern[str]", to: str) -> None:
22 | text = filename.read_text()
23 |
24 | new = pattern.sub(to, text)
25 | filename.write_text(new)
26 |
27 |
28 | REPOSITORY_BASE = "https://raw.githubusercontent.com/d-biehl/robotframework-gherkin"
29 |
30 |
31 | def main() -> None:
32 | version = get_version()
33 | if version.prerelease:
34 | tag_base = f"{REPOSITORY_BASE}/v{version}"
35 |
36 | replace_in_file(
37 | Path("README.md"),
38 | re.compile(r"(\!\[.*?\]\()(\.)(/[^\)]*?)(\))"),
39 | rf"""\g<1>{tag_base}\g<3>\g<4>""",
40 | )
41 | replace_in_file(
42 | Path("CHANGELOG.md"),
43 | re.compile(r"(\!\[.*?\]\()(\.)(/[^\)]*?)(\))"),
44 | rf"""\g<1>{tag_base}\g<3>\g<4>""",
45 | )
46 |
47 |
48 | if __name__ == "__main__":
49 | main()
50 |
--------------------------------------------------------------------------------
/scripts/update_git_versions.py:
--------------------------------------------------------------------------------
1 | import contextlib
2 | import re
3 | import sys
4 | from pathlib import Path
5 |
6 | if __name__ == "__main__" and __package__ is None or __package__ == "":
7 | file = Path(__file__).resolve()
8 | parent, top = file.parent, file.parents[1]
9 |
10 | if str(top) not in sys.path:
11 | sys.path.append(str(top))
12 |
13 | with contextlib.suppress(ValueError):
14 | sys.path.remove(str(parent))
15 |
16 | __package__ = "scripts"
17 |
18 |
19 | from scripts.tools import get_version
20 |
21 |
22 | def replace_in_file(filename: Path, pattern: "re.Pattern[str]", to: str) -> None:
23 | text = filename.read_text()
24 | new = pattern.sub(to, text)
25 | filename.write_text(new)
26 |
27 |
28 | def main() -> None:
29 | version = get_version()
30 | version_files = list(Path("packages").rglob("__version__.py"))
31 |
32 | for f in [Path("src/GherkinParser/__version__.py"), *version_files]:
33 | replace_in_file(
34 | f,
35 | re.compile(r"""(^_*version_*\s*=\s*['"])([^'"]*)(['"])""", re.MULTILINE),
36 | rf"\g<1>{version or ''}\g<3>",
37 | )
38 |
39 | replace_in_file(
40 | Path("package.json"),
41 | re.compile(r"""(\"version\"\s*:\s*['"])([0-9]+\.[0-9]+\.[0-9]+.*)(['"])""", re.MULTILINE),
42 | rf"\g<1>{version or ''}\g<3>",
43 | )
44 |
45 | pyproject_files = list(Path("packages").rglob("pyproject.toml"))
46 |
47 | for f in [Path("pyproject.toml"), *pyproject_files]:
48 | replace_in_file(
49 | f,
50 | re.compile(r'("gurke\S*==)([0-9]+\.[0-9]+\.[0-9]+.*)(")', re.MULTILINE),
51 | rf"\g<1>{version or ''}\g<3>",
52 | )
53 |
54 |
55 | if __name__ == "__main__":
56 | main()
57 |
--------------------------------------------------------------------------------
/src/GherkinParser/Library.py:
--------------------------------------------------------------------------------
1 | from itertools import chain
2 | from typing import Any, Iterator, List, Tuple, Union
3 |
4 | from robot import result, running
5 | from robot.api.deco import library
6 | from robot.api.interfaces import ListenerV3
7 | from robot.libraries.BuiltIn import EXECUTION_CONTEXTS, BuiltIn
8 |
9 |
10 | @library(
11 | scope="SUITE",
12 | version="1.0.0",
13 | )
14 | class Library(ListenerV3):
15 |
16 | def __init__(self) -> None:
17 | self.ROBOT_LIBRARY_LISTENER = self
18 | self._in_call_hooks = False
19 |
20 | def call_hooks(self, events: Union[str, Tuple[str, ...]], *args: Any, **kwargs: Any) -> None:
21 | if self._in_call_hooks:
22 | return
23 |
24 | errored = False
25 | self._in_call_hooks = False
26 | try:
27 | if isinstance(events, str):
28 | events = (events,)
29 | ctx = EXECUTION_CONTEXTS.current
30 | for name, keyword in chain(
31 | *([(v.name, l) for l in v.keywords] for v in ctx.namespace._kw_store.resources.values()),
32 | *([(v.name, l) for l in v.keywords] for v in ctx.namespace._kw_store.libraries.values()),
33 | ):
34 | hook_tags = [tag for tag in keyword.tags if tag.startswith(self.prefix)]
35 | for tag in hook_tags:
36 | if tag[len(self.prefix) :] in events:
37 | runner = EXECUTION_CONTEXTS.current.get_runner(name + "." + keyword.name)
38 | if runner.keyword is keyword:
39 | try:
40 | BuiltIn().run_keyword(name + "." + keyword.name, *args, **kwargs)
41 | except Exception as e:
42 | print(e)
43 | errored = True
44 | raise e
45 | finally:
46 | break
47 | if errored:
48 | break
49 | finally:
50 | self._in_call_hooks = False
51 |
52 | def yield_hooks(self, events: Union[str, Tuple[str, ...]], *args: Any, **kwargs: Any) -> Iterator[str]:
53 | if isinstance(events, str):
54 | events = (events,)
55 | ctx = EXECUTION_CONTEXTS.current
56 | for name, kw in chain(
57 | *([(v.name, l) for l in v.keywords] for v in ctx.namespace._kw_store.resources.values()),
58 | *([(v.name, l) for l in v.keywords] for v in ctx.namespace._kw_store.libraries.values()),
59 | ):
60 | hook_tags = [tag for tag in kw.tags if tag.startswith(self.prefix)]
61 | for tag in hook_tags:
62 | if tag[len(self.prefix) :] in events:
63 | full_name = name + "." + kw.name
64 | runner = EXECUTION_CONTEXTS.current.get_runner(full_name)
65 | if runner.keyword is kw:
66 | yield name + "." + kw.name
67 |
68 | prefix = "hook:"
69 |
70 | def _create_setup_and_teardown(
71 | self, data: Union[running.TestSuite, running.TestCase], events: Union[str, Tuple[str, ...]]
72 | ) -> None:
73 | if isinstance(events, str):
74 | events = (events,)
75 |
76 | kws: List[str] = []
77 |
78 | for name in self.yield_hooks(events):
79 | if kws:
80 | kws.append("AND")
81 | kws.append(name)
82 |
83 | lineno = data.lineno if isinstance(data, running.TestCase) else 1
84 |
85 | if kws:
86 | if data.setup.name:
87 | data.setup.config(
88 | name="BuiltIn.Run Keywords",
89 | args=(*kws, "AND", data.setup.name, *data.setup.args),
90 | lineno = lineno
91 | )
92 |
93 | else:
94 | data.setup.config(
95 | name="BuiltIn.Run Keywords",
96 | args=(*kws,),
97 | lineno = lineno
98 | )
99 |
100 | kws = []
101 |
102 | for name in self.yield_hooks(events):
103 | if kws:
104 | kws.append("AND")
105 | kws.append(name)
106 |
107 | if kws:
108 | if data.teardown.name:
109 | data.setup.config(
110 | name="BuiltIn.Run Keywords",
111 | args=(*kws, "AND", data.teardown.name, *data.teardown.args),
112 | lineno = lineno
113 | )
114 |
115 | else:
116 | data.teardown.config(
117 | name="BuiltIn.Run Keywords",
118 | args=(*kws,),
119 | lineno = lineno
120 | )
121 |
122 | def start_suite(self, data: running.TestSuite, result: result.TestSuite) -> None:
123 | self._create_setup_and_teardown(data, ("before-suite", "before-feature"))
124 |
125 | def start_test(self, data: running.TestCase, result: result.TestCase) -> None:
126 | self._create_setup_and_teardown(data, ("before-test", "before-test"))
127 |
128 | # def start_keyword(self, data: running.Keyword, result: result.Keyword) -> None:
129 | # # self.call_hooks(("before-keyword", "before-step"))
130 | # # if result.tags.match("gerkin:step:docstring"):
131 | # # data.args = (1,2)
132 |
133 |
134 | # def end_keyword(self, data: running.Keyword, result: result.Keyword) -> None:
135 | # # self.call_hooks(("after-keyword", "after-step"))
136 | # pass
137 |
--------------------------------------------------------------------------------
/src/GherkinParser/__init__.py:
--------------------------------------------------------------------------------
1 | from .gherkin_parser import GherkinParser
2 |
3 | __all__ = ["GherkinParser"]
4 |
--------------------------------------------------------------------------------
/src/GherkinParser/__version__.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.3.2"
2 |
--------------------------------------------------------------------------------
/src/GherkinParser/gherkin_builder.py:
--------------------------------------------------------------------------------
1 | import ast
2 | import re
3 | from os import PathLike
4 | from pathlib import Path
5 | from typing import Any, Dict, List, Optional, Tuple
6 |
7 | import robot.running
8 | from gurke.errors import CompositeParserException
9 | from gurke.parser import Parser
10 | from gurke.pickles.compiler import Compiler
11 | from gurke.token_matcher import TokenMatcher
12 | from gurke.token_matcher_markdown import GherkinInMarkdownTokenMatcher
13 | from robot.api import SuiteVisitor
14 | from robot.parsing.lexer import Token
15 | from robot.parsing.model.blocks import CommentSection, File, SettingSection, TestCase, TestCaseSection
16 | from robot.parsing.model.statements import (
17 | Documentation,
18 | Error,
19 | KeywordCall,
20 | KeywordName,
21 | LibraryImport,
22 | Metadata,
23 | ResourceImport,
24 | SectionHeader,
25 | Tags,
26 | TestTags,
27 | )
28 | from robot.utils.filereader import FileReader
29 |
30 | from .glob_path import iter_files
31 |
32 | _CONTROL_WORDS = frozenset(("ELSE", "ELSE IF", "AND"))
33 | _SEQUENCES_TO_BE_ESCAPED = ("=",)
34 |
35 |
36 | def escape_whitespace(match: re.Match) -> str:
37 | return "\\".join(match.group(0))
38 |
39 |
40 | def escape(item: str) -> str:
41 | if item in _CONTROL_WORDS:
42 | return "\\" + item
43 |
44 | item = repr(item)[1:-1]
45 |
46 | for seq in _SEQUENCES_TO_BE_ESCAPED:
47 | if seq in item:
48 | item = item.replace(seq, "\\" + seq)
49 |
50 | return re.sub(r"\s+", escape_whitespace, item)
51 |
52 |
53 | def find_ast_node_id(
54 | node: Any, id: str, parent: Any = None
55 | ) -> Tuple[Optional[Dict[str, Any]], Optional[Dict[str, Any]]]:
56 | if isinstance(node, dict) and "id" in node and node["id"] == id:
57 | return node, parent
58 |
59 | if isinstance(node, dict):
60 | for v in node.values():
61 | n, p = find_ast_node_id(v, id, node)
62 | if n is not None:
63 | return n, p
64 |
65 | elif isinstance(node, list):
66 | for v in node:
67 | n, p = find_ast_node_id(v, id, parent)
68 | if n is not None:
69 | return n, p
70 |
71 | return None, None
72 |
73 |
74 | def build_gherkin_model(source: PathLike[str], content: Optional[str] = None) -> Tuple[ast.AST, Optional[str]]:
75 | try:
76 | path = Path(source).resolve()
77 |
78 | if content is None:
79 | with FileReader(path) as reader:
80 | content = reader.read()
81 |
82 | parser = Parser()
83 |
84 | gherkin_document = parser.parse(
85 | content,
86 | token_matcher=GherkinInMarkdownTokenMatcher() if path.suffix == ".md" else TokenMatcher(),
87 | )
88 |
89 | gherkin_document["uri"] = path
90 | pickles = Compiler().compile(gherkin_document)
91 |
92 | feature_tags = gherkin_document["feature"]["tags"]
93 |
94 | test_cases = []
95 |
96 | for pickle in pickles:
97 | testcase_body = []
98 |
99 | node, _parent = find_ast_node_id(gherkin_document, pickle["astNodeIds"][0])
100 | doc = node.get("description", None) if node else None
101 | if doc:
102 | testcase_body.append(Documentation.from_params(doc.strip()))
103 |
104 | tags = [r["name"][1:] for r in pickle["tags"] if not any(f["id"] == r["astNodeId"] for f in feature_tags)]
105 |
106 | if tags:
107 | testcase_body.append(Tags.from_params(tags))
108 |
109 | for step in pickle["steps"]:
110 | node, _parent = find_ast_node_id(gherkin_document, step["astNodeIds"][0])
111 | if node is None:
112 | continue
113 |
114 | args: Tuple[str, ...] = ()
115 | step_argument = step.get("argument", None)
116 | if step_argument is not None:
117 | doc_string = step_argument.get("docString", None)
118 | if doc_string is not None:
119 | args = (f"&{{{{{escape(repr(doc_string))}}}}}",)
120 |
121 | datatable = step_argument.get("dataTable", None)
122 | if datatable is not None:
123 | #rows = [[v.get("value") for v in r.get("cells")] for r in datatable["rows"]]
124 | args = (f"&{{{{{escape(repr(datatable))}}}}}",)
125 |
126 | keyword_call = KeywordCall.from_params(
127 | f"{node['keyword'] if step['type']!='Unknown' else ''}{step['text']}", args=args
128 | )
129 |
130 | if node is not None and "location" in node:
131 | location = node["location"]
132 |
133 | column = 0
134 | for t in keyword_call.tokens:
135 | t.lineno = location.line
136 |
137 | if t.type == Token.KEYWORD:
138 | t.col_offset = location.column - 1
139 |
140 | column = t.end_col_offset
141 | else:
142 | t.col_offset = column
143 |
144 | testcase_body.append(keyword_call)
145 |
146 | node, _parent = find_ast_node_id(gherkin_document, pickle["astNodeIds"][0])
147 |
148 | test_case_name = pickle["name"]
149 | if len(pickle["astNodeIds"]) > 1:
150 | e_node, e_parent = find_ast_node_id(gherkin_document, pickle["astNodeIds"][1])
151 | if e_node and "cells" in e_node and e_parent is not None and e_parent["keyword"] == "Examples":
152 | for i, s in enumerate(e_node["cells"]):
153 | test_case_name += f" {e_parent['tableHeader']['cells'][i]['value']} = {s['value']}"
154 |
155 | test_case = TestCase(KeywordName.from_params(test_case_name), body=testcase_body)
156 |
157 | if node is not None and "location" in node:
158 | location = node["location"]
159 |
160 | test_case.header.tokens[0].lineno = location.line
161 | test_case.header.tokens[0].col_offset = location.column - 1
162 | test_cases.append(test_case)
163 |
164 | resources = [f for f in iter_files(path.parent, "**/*.resource") if not f.stem.startswith(("_", "."))]
165 |
166 | doc = gherkin_document["feature"]["description"].strip()
167 | settings = [
168 | *([Documentation.from_params(doc)] if doc else []),
169 | LibraryImport.from_params("GherkinParser.Library"),
170 | *[
171 | ResourceImport.from_params(f)
172 | for f in sorted((str(r.relative_to(path.parent).as_posix()) for r in resources), key=str)
173 | ],
174 | *(
175 | [
176 | Metadata.from_params("Tags", ", ".join((r["name"][1:] for r in feature_tags))),
177 | TestTags.from_params((r["name"][1:] for r in feature_tags)),
178 | ]
179 | if feature_tags
180 | else []
181 | ),
182 | ]
183 |
184 | file = File(
185 | [
186 | SettingSection(
187 | SectionHeader.from_params(Token.SETTING_HEADER),
188 | body=settings,
189 | ),
190 | TestCaseSection(
191 | SectionHeader.from_params(Token.TESTCASE_HEADER),
192 | body=test_cases,
193 | ),
194 | ],
195 | source=str(path),
196 | )
197 |
198 | # file.save(path.with_suffix(".robot").with_stem("_" + path.name))
199 |
200 | return file, gherkin_document["feature"]["name"]
201 | except (SystemExit, KeyboardInterrupt):
202 | raise
203 | except BaseException as ex:
204 | errors = []
205 |
206 | if isinstance(ex, CompositeParserException):
207 | for e in ex.errors:
208 | token = Token(
209 | Token.ERROR,
210 | "",
211 | e.location.line,
212 | e.location.column - 1 if e.location is not None else 0,
213 | str(e),
214 | )
215 | errors.append(Error.from_tokens([token]))
216 | else:
217 | token = Token(Token.ERROR, "", 1, 0, f"{type(ex).__qualname__}: {ex}")
218 | errors.append(Error.from_tokens([token]))
219 |
220 | return (
221 | File(
222 | [
223 | CommentSection(
224 | SectionHeader.from_params(Token.COMMENT_HEADER),
225 | body=errors,
226 | ),
227 | ],
228 | source=str(path),
229 | ),
230 | None,
231 | )
232 |
233 |
234 | def collect_gherkin_suites(path: Path) -> List[robot.running.TestSuite]:
235 | feature_files = [f for f in iter_files(path, "**/*.{feature,feature.md}")]
236 |
237 | return [build_gherkin_model(f) for f in feature_files]
238 |
239 |
240 | class GherkinRunner(SuiteVisitor):
241 | def __init__(self, *included: str) -> None:
242 | super().__init__()
243 | self.included = included
244 |
245 | def start_suite(self, suite: robot.running.TestSuite) -> None:
246 | if suite.source and Path(suite.source).is_dir():
247 | suite.suites = [*suite.suites, *collect_gherkin_suites(Path(suite.source))]
248 |
249 | def end_suite(self, suite: robot.running.TestSuite) -> None:
250 | pass
251 |
--------------------------------------------------------------------------------
/src/GherkinParser/gherkin_parser.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from typing import ClassVar, Sequence, Union
3 |
4 | from robot.api.interfaces import Parser
5 | from robot.running import TestDefaults, TestSuite
6 | from robot.running.builder.settings import FileSettings
7 | from robot.running.builder.transformers import SuiteBuilder
8 |
9 | from .gherkin_builder import build_gherkin_model
10 |
11 |
12 | class GherkinParser(Parser):
13 | extension: ClassVar[Union[str, Sequence[str]]] = [
14 | ".feature",
15 | ".feature.md",
16 | # ".md",
17 | ]
18 |
19 | def parse(self, source: Path, defaults: TestDefaults) -> TestSuite:
20 | model, name = build_gherkin_model(source)
21 | suite = TestSuite(name=name or "", source=source)
22 | SuiteBuilder(suite, FileSettings(defaults)).build(model)
23 |
24 | return suite
25 |
--------------------------------------------------------------------------------
/src/GherkinParser/glob_path.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import os
4 | import re
5 | from pathlib import Path
6 | from typing import Any, Generator, Iterable, Sequence, Union, cast
7 |
8 |
9 | def _glob_pattern_to_re(pattern: str) -> str:
10 | result = "(?ms)^"
11 |
12 | in_group = False
13 |
14 | i = 0
15 | while i < len(pattern):
16 | c = pattern[i]
17 |
18 | if c in "\\/$^+.()=!|":
19 | result += "\\" + c
20 | elif c == "?":
21 | result += "."
22 | elif c in "[]":
23 | result += c
24 | elif c == "{":
25 | in_group = True
26 | result += "("
27 | elif c == "}":
28 | in_group = False
29 | result += ")"
30 | elif c == ",":
31 | if in_group:
32 | result += "|"
33 | else:
34 | result += "\\" + c
35 | elif c == "*":
36 | prev_char = pattern[i - 1] if i > 0 else None
37 | star_count = 1
38 |
39 | while (i + 1) < len(pattern) and pattern[i + 1] == "*":
40 | star_count += 1
41 | i += 1
42 |
43 | next_char = pattern[i + 1] if (i + 1) < len(pattern) else None
44 |
45 | is_globstar = (
46 | star_count > 1 and (prev_char is None or prev_char == "/") and (next_char is None or next_char == "/")
47 | )
48 |
49 | if is_globstar:
50 | result += "((?:[^/]*(?:/|$))*)"
51 | i += 1
52 | else:
53 | result += "([^/]*)"
54 | else:
55 | result += c
56 |
57 | i += 1
58 |
59 | result += "$"
60 |
61 | return result
62 |
63 |
64 | class Pattern:
65 | def __init__(self, pattern: str) -> None:
66 | self.pattern = pattern
67 | self._re_pattern = re.compile(_glob_pattern_to_re(pattern))
68 |
69 | def matches(self, path: Union[Path, str, os.PathLike[Any]]) -> bool:
70 | if not isinstance(path, Path):
71 | path = Path(path)
72 | return self._re_pattern.fullmatch(str(path).replace(os.sep, "/")) is not None
73 |
74 | def __str__(self) -> str:
75 | return self.pattern
76 |
77 | def __repr__(self) -> str:
78 | return f"{type(self).__qualname__}(pattern={repr(self.pattern)}"
79 |
80 |
81 | def globmatches(pattern: str, path: Union[Path, str, os.PathLike[Any]]) -> bool:
82 | return Pattern(pattern).matches(path)
83 |
84 |
85 | def iter_files(
86 | path: Union[Path, str, os.PathLike[str]],
87 | patterns: Union[Sequence[Union[Pattern, str]], Pattern, str, None] = None,
88 | ignore_patterns: Union[Sequence[Union[Pattern, str]], Pattern, str, None] = None,
89 | *,
90 | absolute: bool = False,
91 | _base_path: Union[Path, str, os.PathLike[str], None] = None,
92 | ) -> Generator[Path, None, None]:
93 | if not isinstance(path, Path):
94 | path = Path(path or ".")
95 |
96 | if _base_path is None:
97 | _base_path = path
98 | else:
99 | if not isinstance(_base_path, Path):
100 | path = Path(_base_path)
101 |
102 | if patterns is not None and isinstance(patterns, (str, Pattern)):
103 | patterns = [patterns]
104 | if patterns is not None:
105 | patterns = list(map(lambda p: p if isinstance(p, Pattern) else Pattern(p), patterns))
106 |
107 | if ignore_patterns is not None and isinstance(ignore_patterns, (str, Pattern)):
108 | ignore_patterns = [ignore_patterns]
109 | if ignore_patterns is not None:
110 | ignore_patterns = list(map(lambda p: p if isinstance(p, Pattern) else Pattern(p), ignore_patterns))
111 |
112 | try:
113 | for f in path.iterdir():
114 | if ignore_patterns is None or not any(
115 | p.matches(f.relative_to(_base_path)) for p in cast(Iterable[Pattern], ignore_patterns)
116 | ):
117 | if f.is_dir():
118 | for e in iter_files(f, patterns, ignore_patterns, absolute=absolute, _base_path=_base_path):
119 | yield e
120 | elif patterns is None or any(
121 | p.matches(str(f.relative_to(_base_path))) for p in cast(Iterable[Pattern], patterns)
122 | ):
123 | yield f.absolute() if absolute else f
124 | except PermissionError:
125 | pass
126 |
--------------------------------------------------------------------------------
/src/GherkinParser/py.typed:
--------------------------------------------------------------------------------
1 | # Marker file for PEP 561.
2 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robotcodedev/robotframework-gherkin-parser/dc81a6845d19c447d006aa4b67219354d79fa9c1/tests/__init__.py
--------------------------------------------------------------------------------
/tests/gurke/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robotcodedev/robotframework-gherkin-parser/dc81a6845d19c447d006aa4b67219354d79fa9c1/tests/gurke/__init__.py
--------------------------------------------------------------------------------
/tests/gurke/test_gherkin.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from gurke.errors import ParserError
3 | from gurke.location import Location
4 | from gurke.parser import Parser
5 | from gurke.token_matcher import TokenMatcher
6 | from gurke.token_scanner import TokenScanner
7 |
8 |
9 | def test_parser() -> None:
10 | parser = Parser()
11 | feature_file = parser.parse(TokenScanner("Feature: Foo"))
12 | expected = {
13 | "comments": [],
14 | "feature": {
15 | "keyword": "Feature",
16 | "language": "en",
17 | "location": Location(line=1, column=1),
18 | "name": "Foo",
19 | "description": "",
20 | "children": [],
21 | "tags": [],
22 | },
23 | }
24 |
25 | assert expected == feature_file
26 |
27 |
28 | def test_parse_multiple_features() -> None:
29 | parser = Parser()
30 | ff1 = parser.parse(TokenScanner("Feature: 1"))
31 | ff2 = parser.parse(TokenScanner("Feature: 2"))
32 |
33 | assert "1" == ff1["feature"]["name"]
34 | assert "2" == ff2["feature"]["name"]
35 |
36 |
37 | def test_parse_feature_after_parser_error() -> None:
38 | parser = Parser()
39 | with pytest.raises(ParserError):
40 | parser.parse(
41 | TokenScanner(
42 | "# a comment\n"
43 | "Feature: Foo\n"
44 | " Scenario: Bar\n"
45 | " Given x\n"
46 | " ```\n"
47 | " unclosed docstring\n"
48 | )
49 | )
50 | feature_file = parser.parse(
51 | TokenScanner(
52 | "Feature: Foo\n" + " Scenario: Bar\n" + " Given x\n"
53 | ' """\n'
54 | " closed docstring\n"
55 | ' """\n'
56 | )
57 | )
58 | expected = [
59 | {
60 | "scenario": {
61 | "id": "1",
62 | "name": "Bar",
63 | "description": "",
64 | "keyword": "Scenario",
65 | "tags": [],
66 | "steps": [
67 | {
68 | "id": "0",
69 | "text": "x",
70 | "location": Location(column=5, line=3),
71 | "keyword": "Given ",
72 | "keywordType": "Context",
73 | "docString": {
74 | "content": "closed docstring",
75 | "delimiter": '"""',
76 | "location": Location(column=7, line=4),
77 | },
78 | }
79 | ],
80 | "location": Location(column=3, line=2),
81 | "examples": [],
82 | }
83 | }
84 | ]
85 |
86 | assert expected == feature_file["feature"]["children"]
87 |
88 |
89 | def test_change_the_default_language() -> None:
90 | parser = Parser()
91 | matcher = TokenMatcher("no")
92 | feature_file = parser.parse(TokenScanner("Egenskap: i18n support - åæø"), matcher)
93 | expected = {
94 | "comments": [],
95 | "feature": {
96 | "keyword": "Egenskap",
97 | "language": "no",
98 | "location": Location(column=1, line=1),
99 | "name": "i18n support - åæø",
100 | "description": "",
101 | "children": [],
102 | "tags": [],
103 | },
104 | }
105 |
106 | assert expected == feature_file
107 |
--------------------------------------------------------------------------------
/tests/gurke/test_gherkin_in_markdown_token_matcher.py:
--------------------------------------------------------------------------------
1 | from gurke.gherkin_line import GherkinLine
2 | from gurke.location import Location
3 | from gurke.token import Token
4 | from gurke.token_matcher_markdown import GherkinInMarkdownTokenMatcher
5 |
6 | location = Location(line=1, column=1)
7 |
8 |
9 | def test_it_matches_feature_line() -> None:
10 | tm = GherkinInMarkdownTokenMatcher("en")
11 | line = GherkinLine("""## Feature: hello""", location.line)
12 | token = Token(gherkin_line=line, location=location)
13 | assert tm.match_FeatureLine(token)
14 | assert token.matched_type == "FeatureLine"
15 | assert token.matched_keyword == "Feature"
16 | assert token.matched_text == "hello"
17 |
18 |
19 | def test_it_matches_feature_line_in_french() -> None:
20 | tm = GherkinInMarkdownTokenMatcher("fr")
21 | line = GherkinLine("""## Fonctionnalité: hello""", location.line)
22 | token = Token(gherkin_line=line, location=location)
23 | assert tm.match_FeatureLine(token)
24 | assert token.matched_type == "FeatureLine"
25 | assert token.matched_keyword == "Fonctionnalité"
26 | assert token.matched_text == "hello"
27 |
28 |
29 | def test_it_matches_bullet_step() -> None:
30 | tm = GherkinInMarkdownTokenMatcher("en")
31 | line = GherkinLine(""" * Given I have 3 cukes""", location.line)
32 | token = Token(gherkin_line=line, location=location)
33 | assert tm.match_StepLine(token)
34 | assert token.matched_type == "StepLine"
35 | assert token.matched_keyword == "Given "
36 | assert token.matched_text == "I have 3 cukes"
37 | assert token.location.column == 6
38 |
39 |
40 | def test_it_matches_plus_step() -> None:
41 | tm = GherkinInMarkdownTokenMatcher("en")
42 | line = GherkinLine(""" + Given I have 3 cukes""", location.line)
43 | token = Token(gherkin_line=line, location=location)
44 | assert tm.match_StepLine(token)
45 | assert token.matched_type == "StepLine"
46 | assert token.matched_keyword == "Given "
47 | assert token.matched_text == "I have 3 cukes"
48 | assert token.location.column == 6
49 |
50 |
51 | def test_it_matches_hyphen_step() -> None:
52 | tm = GherkinInMarkdownTokenMatcher("en")
53 | line = GherkinLine(""" - Given I have 3 cukes""", location.line)
54 | token = Token(gherkin_line=line, location=location)
55 | assert tm.match_StepLine(token)
56 | assert token.matched_type == "StepLine"
57 | assert token.matched_keyword == "Given "
58 | assert token.matched_text == "I have 3 cukes"
59 | assert token.location.column == 6
60 |
61 |
62 | def test_it_matches_arbitrary_text_as_other() -> None:
63 | tm = GherkinInMarkdownTokenMatcher("en")
64 | line = GherkinLine("""Whatever""", location.line)
65 | token = Token(gherkin_line=line, location=location)
66 | assert tm.match_Other(token)
67 | assert token.matched_type == "Other"
68 |
69 |
70 | def test_it_matches_a_non_keyword_line_as_other() -> None:
71 | tm = GherkinInMarkdownTokenMatcher("en")
72 | line = GherkinLine("""* whatever Given""", location.line)
73 | token = Token(gherkin_line=line, location=location)
74 | assert tm.match_Other(token)
75 | assert token.matched_type == "Other"
76 |
77 |
78 | def test_it_matches_a_non_keyword_header_line_as_other() -> None:
79 | tm = GherkinInMarkdownTokenMatcher("en")
80 | line = GherkinLine("""## The world is wet""", location.line)
81 | token = Token(gherkin_line=line, location=location)
82 | assert tm.match_Other(token)
83 | assert token.matched_type == "Other"
84 |
85 |
86 | def test_it_matches_3_ticks_doctring_separator() -> None:
87 | tm = GherkinInMarkdownTokenMatcher("en")
88 | line = GherkinLine(""" ```somefink""", location.line)
89 | token = Token(gherkin_line=line, location=location)
90 | assert tm.match_DocStringSeparator(token)
91 | assert token.matched_type == "DocStringSeparator"
92 | assert token.matched_keyword == "```"
93 | assert token.matched_text == "somefink"
94 |
95 |
96 | def test_it_matches_4_ticks_doctring_separator() -> None:
97 | tm = GherkinInMarkdownTokenMatcher("en")
98 | line = GherkinLine(""" ````""", location.line)
99 | t1 = Token(gherkin_line=line, location=location)
100 | assert tm.match_DocStringSeparator(t1)
101 | assert t1.matched_type == "DocStringSeparator"
102 | assert t1.matched_keyword == "````"
103 | assert t1.matched_indent == 2
104 | assert t1.matched_text == ""
105 |
106 | t2 = Token(gherkin_line=GherkinLine(""" ```""", location.line), location=location)
107 | assert tm.match_Other(t2)
108 | assert t2.matched_type == "Other"
109 | assert t2.matched_keyword is None
110 | assert t2.matched_text == "```"
111 |
112 | t3 = Token(gherkin_line=GherkinLine(""" ````""", location.line), location=location)
113 | assert tm.match_DocStringSeparator(t3)
114 | assert t3.matched_type == "DocStringSeparator"
115 | assert t3.matched_keyword == "````"
116 | assert t1.matched_indent == 2
117 | assert t3.matched_text == ""
118 |
119 |
120 | def test_it_matches_table_row_indented_2_spaces() -> None:
121 | tm = GherkinInMarkdownTokenMatcher("en")
122 | gherkin_line = GherkinLine(""" |foo|bar|""", location.line)
123 | token = Token(gherkin_line, location)
124 | assert tm.match_TableRow(token)
125 | assert token.matched_type == "TableRow"
126 | assert token.matched_keyword == "|"
127 | expected_items = [{"column": 4, "text": "foo"}, {"column": 8, "text": "bar"}]
128 | assert token.matched_items == expected_items
129 |
130 |
131 | def test_it_matches_table_row_indented_5_spaces() -> None:
132 | tm = GherkinInMarkdownTokenMatcher("en")
133 | gherkin_line = GherkinLine(""" |foo|bar|""", location.line)
134 | token = Token(gherkin_line, location)
135 | assert tm.match_TableRow(token)
136 | assert token.matched_type == "TableRow"
137 | assert token.matched_keyword == "|"
138 | expected_items = [{"column": 7, "text": "foo"}, {"column": 11, "text": "bar"}]
139 | assert token.matched_items == expected_items
140 |
141 |
142 | # TODO a table in a markdown file can start at column 0 but not in a feature file, why?
143 | # def test_it_does_not_match_table_row_indented_1_space() -> None:
144 | # tm = GherkinInMarkdownTokenMatcher("en")
145 | # gherkin_line = GherkinLine("""|foo|bar|""", location.line)
146 | # token = Token(gherkin_line, location)
147 | # assert not tm.match_TableRow(token)
148 |
149 |
150 | def test_it_does_not_match_table_row_indented_6_space() -> None:
151 | tm = GherkinInMarkdownTokenMatcher("en")
152 | gherkin_line = GherkinLine(""" |foo|bar|""", location.line)
153 | token = Token(gherkin_line, location)
154 | assert not tm.match_TableRow(token)
155 |
156 |
157 | def test_it_matches_table_separator_row_as_comment() -> None:
158 | tm = GherkinInMarkdownTokenMatcher("en")
159 |
160 | l1 = GherkinLine(" | h1 | h2 |", location.line)
161 | t1 = Token(l1, location)
162 | assert tm.match_TableRow(t1)
163 |
164 | l2 = GherkinLine(" | --- | --- |", location.line)
165 | t2 = Token(l2, location)
166 | assert not tm.match_TableRow(t2)
167 | assert tm.match_Comment(t2)
168 |
169 |
170 | def test_it_matches_indented_tags() -> None:
171 | tm = GherkinInMarkdownTokenMatcher("en")
172 |
173 | l1 = GherkinLine(" `@foo` `@bar`", location.line)
174 | t1 = Token(l1, location)
175 | assert tm.match_TagLine(t1)
176 |
177 | assert t1.matched_type == "TagLine"
178 | expected_items = [{"column": 4, "text": "@foo"}, {"column": 11, "text": "@bar"}]
179 | assert t1.matched_items == expected_items
180 |
181 |
182 | def test_it_matches_unindented_tags() -> None:
183 | tm = GherkinInMarkdownTokenMatcher("en")
184 |
185 | l1 = GherkinLine("`@foo` `@bar`", location.line)
186 | t1 = Token(l1, location)
187 | assert tm.match_TagLine(t1)
188 |
189 | assert t1.matched_type == "TagLine"
190 | expected_items = [{"column": 2, "text": "@foo"}, {"column": 11, "text": "@bar"}]
191 | assert t1.matched_items == expected_items
192 |
193 |
194 | def test_it_matches_RuleLine() -> None:
195 | tm = GherkinInMarkdownTokenMatcher("en")
196 | line = GherkinLine("""## Rule: the world""", location.line)
197 | token = Token(gherkin_line=line, location=location)
198 | assert tm.match_RuleLine(token)
199 | assert token.matched_type == "RuleLine"
200 | assert token.matched_keyword == "Rule"
201 | assert token.matched_text == "the world"
202 |
203 |
204 | def test_it_matches_ScenarioLine() -> None:
205 | tm = GherkinInMarkdownTokenMatcher("en")
206 | line = GherkinLine("""## Scenario: the one where""", location.line)
207 | token = Token(gherkin_line=line, location=location)
208 | assert tm.match_ScenarioLine(token)
209 | assert token.matched_type == "ScenarioLine"
210 | assert token.matched_keyword == "Scenario"
211 | assert token.matched_text == "the one where"
212 |
213 |
214 | def test_it_matches_ScenarioLine_outline() -> None:
215 | tm = GherkinInMarkdownTokenMatcher("en")
216 | line = GherkinLine("""## Scenario Outline: the ones where""", location.line)
217 | token = Token(gherkin_line=line, location=location)
218 | assert tm.match_ScenarioLine(token)
219 | assert token.matched_type == "ScenarioLine"
220 | assert token.matched_keyword == "Scenario Outline"
221 | assert token.matched_text == "the ones where"
222 |
223 |
224 | def test_it_matches_backgroundLine() -> None:
225 | tm = GherkinInMarkdownTokenMatcher("en")
226 | line = GherkinLine("""## Background: once upon a time""", location.line)
227 | token = Token(gherkin_line=line, location=location)
228 | assert tm.match_BackgroundLine(token)
229 | assert token.matched_type == "BackgroundLine"
230 | assert token.matched_keyword == "Background"
231 | assert token.matched_text == "once upon a time"
232 |
233 |
234 | def test_it_matches_ExamplesLine() -> None:
235 | tm = GherkinInMarkdownTokenMatcher("en")
236 | line = GherkinLine("""## Examples: """, location.line)
237 | token = Token(gherkin_line=line, location=location)
238 | assert tm.match_ExamplesLine(token)
239 | assert token.matched_type == "ExamplesLine"
240 | assert token.matched_keyword == "Examples"
241 | assert token.matched_text == ""
242 |
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "baseUrl": ".",
4 | "module": "commonjs",
5 | "moduleResolution": "Node",
6 | "target": "ES2021",
7 | "outDir": "out",
8 | "lib": ["es6", "dom", "ES2018", "ES2019", "ES2020", "ES2021"],
9 | "sourceMap": true,
10 | "rootDir": "vscode-client",
11 | "strict": true,
12 | "experimentalDecorators": true,
13 | "emitDecoratorMetadata": true,
14 | //"esModuleInterop": true,
15 | //"allowSyntheticDefaultImports": true,
16 | "noImplicitAny": true,
17 | "noImplicitThis": true,
18 | "noUnusedLocals": true,
19 | //"noUnusedParameters": true,
20 | "noFallthroughCasesInSwitch": true,
21 | "resolveJsonModule": true,
22 | "removeComments": true,
23 | "noImplicitReturns": true
24 | },
25 | "exclude": ["node_modules", ".vscode-test", "out", "htmlcov"]
26 | }
27 |
--------------------------------------------------------------------------------
/vscode-client/extension.ts:
--------------------------------------------------------------------------------
1 | import * as vscode from "vscode";
2 | import { GherkinFormattingEditProvider } from "./formattingEditProvider";
3 | export async function activateAsync(context: vscode.ExtensionContext): Promise {
4 | const robotcode = vscode.extensions.getExtension("d-biehl.robotcode");
5 | if (!robotcode) {
6 | return;
7 | }
8 | await robotcode.activate();
9 | // const robotcodeExtensionApi = robotcode.exports;
10 | // if (!robotcodeExtensionApi) {
11 | // return;
12 | // }
13 |
14 | context.subscriptions.push(
15 | vscode.languages.registerDocumentFormattingEditProvider("gherkin", new GherkinFormattingEditProvider()),
16 | );
17 | }
18 |
19 | function displayProgress(promise: Promise): Thenable {
20 | const progressOptions: vscode.ProgressOptions = {
21 | location: vscode.ProgressLocation.Window,
22 | title: "RobotCode Gherkin extension loading ...",
23 | };
24 | return vscode.window.withProgress(progressOptions, () => promise);
25 | }
26 |
27 | export async function activate(context: vscode.ExtensionContext): Promise {
28 | return displayProgress(activateAsync(context));
29 | }
30 |
31 | export async function deactivate(): Promise {
32 | return Promise.resolve();
33 | }
34 |
--------------------------------------------------------------------------------
/vscode-client/formattingEditProvider.ts:
--------------------------------------------------------------------------------
1 | import * as vscode from "vscode";
2 | import { parseGherkinDocument } from "./parseGherkinDocument";
3 | import { pretty } from "@cucumber/gherkin-utils";
4 |
5 | export class GherkinFormattingEditProvider implements vscode.DocumentFormattingEditProvider {
6 | provideDocumentFormattingEdits(
7 | document: vscode.TextDocument,
8 | options: vscode.FormattingOptions,
9 | token: vscode.CancellationToken,
10 | ): vscode.ProviderResult {
11 | const gherkinSource = document.getText();
12 | const { gherkinDocument } = parseGherkinDocument(gherkinSource);
13 | if (gherkinDocument === undefined) return [];
14 | const newText = pretty(gherkinDocument);
15 | const lines = gherkinSource.split(/\r?\n/);
16 | const line = lines.length - 1;
17 | const character = lines[line].length;
18 | const textEdit: vscode.TextEdit = new vscode.TextEdit(new vscode.Range(0, 0, line, character), newText);
19 | return [textEdit];
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/vscode-client/parseGherkinDocument.ts:
--------------------------------------------------------------------------------
1 | import { AstBuilder, Errors, GherkinClassicTokenMatcher, Parser } from "@cucumber/gherkin";
2 | import { GherkinDocument, IdGenerator } from "@cucumber/messages";
3 |
4 | const uuidFn = IdGenerator.uuid();
5 |
6 | export type ParseResult = {
7 | gherkinDocument?: GherkinDocument;
8 | error?: Errors.GherkinException;
9 | };
10 |
11 | /**
12 | * Incrementally parses a Gherkin Document, allowing some syntax errors to occur.
13 | */
14 | export function parseGherkinDocument(gherkinSource: string): ParseResult {
15 | const builder = new AstBuilder(uuidFn);
16 | const matcher = new GherkinClassicTokenMatcher();
17 | const parser = new Parser(builder, matcher);
18 | try {
19 | return {
20 | gherkinDocument: parser.parse(gherkinSource),
21 | };
22 | } catch (error) {
23 | let gherkinDocument: GherkinDocument;
24 |
25 | for (let i = 0; i < 10; i++) {
26 | gherkinDocument = builder.getResult();
27 | if (gherkinDocument) {
28 | return {
29 | gherkinDocument: gherkinDocument,
30 | error: error as Errors.GherkinException,
31 | };
32 | }
33 |
34 | try {
35 | builder.endRule();
36 | } catch (ignore) {
37 | // no-op
38 | }
39 | }
40 |
41 | return {
42 | error: error as Errors.GherkinException,
43 | };
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/vscode-client/test/runTest.ts:
--------------------------------------------------------------------------------
1 | import * as path from "path";
2 |
3 | import { runTests } from "@vscode/test-electron";
4 |
5 | async function main() {
6 | try {
7 | // The folder containing the Extension Manifest package.json
8 | // Passed to `--extensionDevelopmentPath`
9 | const extensionDevelopmentPath: string = path.resolve(__dirname, "../../");
10 |
11 | // The path to the extension test script
12 | // Passed to --extensionTestsPath
13 | const extensionTestsPath: string = path.resolve(__dirname, "./suite/index");
14 |
15 | // Download VS Code, unzip it and run the integration test
16 | await runTests({ extensionDevelopmentPath, extensionTestsPath });
17 | } catch (err) {
18 | console.error("Failed to run tests");
19 | process.exit(1);
20 | }
21 | }
22 |
23 | void main();
24 |
--------------------------------------------------------------------------------
/webpack.config.js:
--------------------------------------------------------------------------------
1 | "use strict";
2 |
3 | const path = require("path");
4 |
5 | /** @type {import('webpack').Configuration} */
6 | const config = {
7 | target: "node", // vscode extensions run in a Node.js-context 📖 -> https://webpack.js.org/configuration/node/
8 |
9 | entry: "./vscode-client/extension.ts", // the entry point of this extension, 📖 -> https://webpack.js.org/configuration/entry-context/
10 | output: {
11 | // the bundle is stored in the 'dist' folder (check package.json), 📖 -> https://webpack.js.org/configuration/output/
12 | path: path.resolve(__dirname, "out"),
13 | filename: "extension.js",
14 | libraryTarget: "commonjs2",
15 | devtoolModuleFilenameTemplate: "../[resource-path]",
16 | clean: true,
17 | },
18 | devtool: "source-map",
19 | externals: {
20 | vscode: "commonjs vscode", // the vscode-module is created on-the-fly and must be excluded. Add other modules that cannot be webpack'ed, 📖 -> https://webpack.js.org/configuration/externals/
21 | },
22 | resolve: {
23 | // support reading TypeScript and JavaScript files, 📖 -> https://github.com/TypeStrong/ts-loader
24 | extensions: [".ts", ".js"],
25 | },
26 | module: {
27 | rules: [
28 | {
29 | test: /\.ts$/,
30 | exclude: /node_modules/,
31 | use: [
32 | {
33 | loader: "ts-loader",
34 | },
35 | ],
36 | },
37 | ],
38 | },
39 | };
40 | module.exports = config;
41 |
--------------------------------------------------------------------------------