├── .czrc
├── .gitattributes
├── .github
└── workflows
│ ├── ci.yml
│ └── release.yml
├── .gitignore
├── .husky
├── commit-msg
└── pre-commit
├── .npmrc
├── .prettierignore
├── CONTRIBUTING.md
├── LICENSE.txt
├── NOTICE.txt
├── README.md
├── examples
├── .gitignore
├── grammars
│ ├── .gitignore
│ ├── README.md
│ ├── calculator
│ │ ├── calculator_embedded_actions.js
│ │ ├── calculator_pure_grammar.js
│ │ └── calculator_spec.js
│ ├── css
│ │ ├── css.js
│ │ └── css_spec.js
│ ├── csv
│ │ ├── csv.js
│ │ ├── csv_spec.js
│ │ └── sample.csv
│ ├── ecma5
│ │ ├── ecma5_api.js
│ │ ├── ecma5_lexer.js
│ │ ├── ecma5_parser.js
│ │ ├── ecma5_spec.js
│ │ └── ecma5_tokens.js
│ ├── graphql
│ │ ├── diagrams.html
│ │ ├── graphql.js
│ │ └── graphql_spec.js
│ ├── json
│ │ ├── json.js
│ │ ├── json_spec.js
│ │ ├── json_with_comments.js
│ │ └── json_with_comments_spec.js
│ ├── package.json
│ ├── tinyc
│ │ ├── tinyc.js
│ │ └── tinyc_spec.js
│ └── xml
│ │ ├── xml_api.js
│ │ ├── xml_lexer.js
│ │ ├── xml_parser.js
│ │ └── xml_spec.js
├── implementation_languages
│ ├── README.md
│ ├── impl_lang_spec.js
│ ├── modern_ecmascript
│ │ └── modern_ecmascript_json.mjs
│ ├── package.json
│ ├── tsconfig.json
│ └── typescript
│ │ ├── json_cst.d.ts
│ │ ├── scripts
│ │ └── gen_dts_signatures.js
│ │ └── typescript_json.ts
├── lexer
│ ├── LICENSE
│ ├── README.md
│ ├── custom_errors
│ │ ├── custom_errors.js
│ │ └── custom_errors_spec.js
│ ├── custom_patterns
│ │ ├── custom_patterns.js
│ │ ├── custom_patterns_payloads.js
│ │ ├── custom_patterns_payloads_spec.js
│ │ └── custom_patterns_spec.js
│ ├── keywords_vs_identifiers
│ │ ├── keywords_vs_identifiers.js
│ │ └── keywords_vs_identifiers_spec.js
│ ├── multi_mode_lexer
│ │ ├── multi_mode_lexer.js
│ │ └── multi_mode_lexer_spec.js
│ ├── package.json
│ ├── python_indentation
│ │ ├── python_indentation.js
│ │ └── python_indentation_spec.js
│ └── token_groups
│ │ ├── token_group_spec.js
│ │ └── token_groups.js
├── parser
│ ├── .gitignore
│ ├── README.md
│ ├── backtracking
│ │ ├── backtracking.js
│ │ └── backtracking_spec.js
│ ├── content_assist
│ │ ├── README.md
│ │ ├── content_assist_complex.js
│ │ ├── content_assist_complex_spec.js
│ │ ├── content_assist_simple.js
│ │ └── content_assist_simple_spec.js
│ ├── custom_errors
│ │ ├── custom_errors.js
│ │ └── custom_errors_spec.js
│ ├── diagrams
│ │ ├── README.md
│ │ ├── creating_html_file.js
│ │ ├── dynamically_rendering.html
│ │ └── grammar.js
│ ├── dynamic_tokens
│ │ ├── dynamic_delimiters.js
│ │ └── dynamic_delimiters_spec.js
│ ├── inheritance
│ │ ├── inheritance.js
│ │ └── inheritance_spec.js
│ ├── minification
│ │ └── README.md
│ ├── multi_start_rules
│ │ ├── multi_start_rules.js
│ │ └── multi_start_rules_spec.js
│ ├── package.json
│ ├── parametrized_rules
│ │ ├── parametrized.js
│ │ └── parametrized_spec.js
│ ├── predicate_lookahead
│ │ ├── predicate_lookahead.js
│ │ └── predicate_lookahead_spec.js
│ └── versioning
│ │ ├── versioning.js
│ │ └── versioning_spec.js
├── tutorial
│ ├── README.md
│ ├── package.json
│ ├── step1_lexing
│ │ ├── main.js
│ │ ├── step1_lexing.js
│ │ └── step1_lexing_spec.js
│ ├── step2_parsing
│ │ ├── main.js
│ │ ├── step2_parsing.js
│ │ └── step2_parsing_spec.js
│ ├── step3_actions
│ │ ├── main.js
│ │ ├── step3_actions_spec.js
│ │ ├── step3a_actions_visitor.js
│ │ └── step3b_actions_embedded.js
│ └── step4_error_recovery
│ │ ├── main.js
│ │ ├── step4_error_recovery.js
│ │ └── step4_error_recovery_spec.js
└── webpack
│ └── README.md
├── lerna.json
├── package.json
├── packages
├── chevrotain
│ ├── .c8rc.json
│ ├── .mocharc.cjs
│ ├── BREAKING_CHANGES.md
│ ├── CHANGELOG.md
│ ├── README.md
│ ├── benchmark_web
│ │ ├── README.md
│ │ ├── index_latest.html
│ │ ├── index_next.html
│ │ ├── lib
│ │ │ ├── bench_logic.js
│ │ │ └── iframe_loader.js
│ │ ├── parsers
│ │ │ ├── api.js
│ │ │ ├── css
│ │ │ │ ├── 1K_css.js
│ │ │ │ ├── css.html
│ │ │ │ └── css_parser.js
│ │ │ ├── ecma5
│ │ │ │ ├── ecma5.html
│ │ │ │ ├── ecma5_lexer.js
│ │ │ │ ├── ecma5_parser.js
│ │ │ │ └── ecma5_tokens.js
│ │ │ ├── esm_wrappers
│ │ │ │ ├── chevrotain_latest.mjs
│ │ │ │ └── chevrotain_next.mjs
│ │ │ ├── json
│ │ │ │ ├── 1K_json.js
│ │ │ │ ├── json.html
│ │ │ │ └── json_parser.js
│ │ │ ├── options.js
│ │ │ ├── worker_api.js
│ │ │ └── worker_impel.js
│ │ └── style.css
│ ├── chevrotain.d.ts
│ ├── diagrams
│ │ ├── README.md
│ │ ├── diagrams.css
│ │ ├── src
│ │ │ ├── diagrams_behavior.js
│ │ │ ├── diagrams_builder.js
│ │ │ ├── diagrams_serializer.js
│ │ │ └── main.js
│ │ └── vendor
│ │ │ └── railroad-diagrams.js
│ ├── package.json
│ ├── scripts
│ │ ├── version-config.js
│ │ └── version-update.js
│ ├── src
│ │ ├── api.ts
│ │ ├── diagrams
│ │ │ └── render_public.ts
│ │ ├── lang
│ │ │ └── lang_extensions.ts
│ │ ├── parse
│ │ │ ├── constants.ts
│ │ │ ├── cst
│ │ │ │ ├── cst.ts
│ │ │ │ └── cst_visitor.ts
│ │ │ ├── errors_public.ts
│ │ │ ├── exceptions_public.ts
│ │ │ ├── grammar
│ │ │ │ ├── checks.ts
│ │ │ │ ├── first.ts
│ │ │ │ ├── follow.ts
│ │ │ │ ├── gast
│ │ │ │ │ └── gast_resolver_public.ts
│ │ │ │ ├── interpreter.ts
│ │ │ │ ├── keys.ts
│ │ │ │ ├── llk_lookahead.ts
│ │ │ │ ├── lookahead.ts
│ │ │ │ ├── resolver.ts
│ │ │ │ ├── rest.ts
│ │ │ │ └── types.ts
│ │ │ └── parser
│ │ │ │ ├── parser.ts
│ │ │ │ ├── traits
│ │ │ │ ├── README.md
│ │ │ │ ├── context_assist.ts
│ │ │ │ ├── error_handler.ts
│ │ │ │ ├── gast_recorder.ts
│ │ │ │ ├── lexer_adapter.ts
│ │ │ │ ├── looksahead.ts
│ │ │ │ ├── parser_traits.ts
│ │ │ │ ├── perf_tracer.ts
│ │ │ │ ├── recognizer_api.ts
│ │ │ │ ├── recognizer_engine.ts
│ │ │ │ ├── recoverable.ts
│ │ │ │ └── tree_builder.ts
│ │ │ │ ├── types.ts
│ │ │ │ └── utils
│ │ │ │ └── apply_mixins.ts
│ │ ├── scan
│ │ │ ├── lexer.ts
│ │ │ ├── lexer_errors_public.ts
│ │ │ ├── lexer_public.ts
│ │ │ ├── reg_exp.ts
│ │ │ ├── reg_exp_parser.ts
│ │ │ ├── tokens.ts
│ │ │ ├── tokens_constants.ts
│ │ │ └── tokens_public.ts
│ │ ├── text
│ │ │ └── range.ts
│ │ └── version.ts
│ ├── test
│ │ ├── deprecation_spec.ts
│ │ ├── diagrams
│ │ │ └── render_spec.ts
│ │ ├── full_flow
│ │ │ ├── backtracking
│ │ │ │ ├── backtracking_parser.ts
│ │ │ │ └── backtracking_parser_spec.ts
│ │ │ ├── ecma_quirks
│ │ │ │ ├── ecma_quirks.ts
│ │ │ │ └── ecma_quirks_spec.ts
│ │ │ ├── error_recovery
│ │ │ │ ├── sql_statements
│ │ │ │ │ ├── sql_recovery_parser.ts
│ │ │ │ │ ├── sql_recovery_spec.ts
│ │ │ │ │ └── sql_recovery_tokens.ts
│ │ │ │ └── switch_case
│ │ │ │ │ ├── switchcase_recovery_parser.ts
│ │ │ │ │ ├── switchcase_recovery_spec.ts
│ │ │ │ │ └── switchcase_recovery_tokens.ts
│ │ │ └── parse_tree.ts
│ │ ├── parse
│ │ │ ├── cst_spec.ts
│ │ │ ├── cst_visitor_spec.ts
│ │ │ ├── exceptions_spec.ts
│ │ │ ├── grammar
│ │ │ │ ├── checks_spec.ts
│ │ │ │ ├── first_spec.ts
│ │ │ │ ├── follow_spec.ts
│ │ │ │ ├── interperter_spec.ts
│ │ │ │ ├── lookahead_spec.ts
│ │ │ │ └── resolver_spec.ts
│ │ │ ├── predicate_spec.ts
│ │ │ ├── recognizer
│ │ │ │ ├── infinite_loop_spec.ts
│ │ │ │ ├── recognizer_config_spec.ts
│ │ │ │ └── rules_override_spec.ts
│ │ │ ├── recognizer_lookahead_spec.ts
│ │ │ ├── recognizer_spec.ts
│ │ │ └── traits
│ │ │ │ └── perf_tracer_spec.ts
│ │ ├── scan
│ │ │ ├── custom_token_spec.ts
│ │ │ ├── first_char_spec.ts
│ │ │ ├── lexer_errors_public_spec.ts
│ │ │ ├── lexer_spec.ts
│ │ │ ├── perf_tracer_spec.ts
│ │ │ ├── regexp_spec.ts
│ │ │ ├── skip_validations_spec.ts
│ │ │ └── token_spec.ts
│ │ ├── test.config.mjs
│ │ ├── text
│ │ │ └── range_spec.ts
│ │ └── utils
│ │ │ ├── builders.ts
│ │ │ └── matchers.ts
│ └── tsconfig.json
├── cst-dts-gen-test
│ ├── .c8rc.json
│ ├── .mocharc.cjs
│ ├── package.json
│ ├── scripts
│ │ └── update-snapshots.js
│ ├── test
│ │ ├── options_spec.ts
│ │ ├── sample_test.ts
│ │ └── snapshots
│ │ │ ├── alternation
│ │ │ ├── input.ts
│ │ │ ├── output.d.ts
│ │ │ └── sample_spec.ts
│ │ │ ├── alternation_label
│ │ │ ├── input.ts
│ │ │ ├── output.d.ts
│ │ │ └── sample_spec.ts
│ │ │ ├── nonterminal
│ │ │ ├── input.ts
│ │ │ ├── output.d.ts
│ │ │ └── sample_spec.ts
│ │ │ ├── nonterminal_label
│ │ │ ├── input.ts
│ │ │ ├── output.d.ts
│ │ │ └── sample_spec.ts
│ │ │ ├── option
│ │ │ ├── input.ts
│ │ │ ├── output.d.ts
│ │ │ └── sample_spec.ts
│ │ │ ├── repetition
│ │ │ ├── input.ts
│ │ │ ├── output.d.ts
│ │ │ └── sample_spec.ts
│ │ │ ├── repetition_mandatory
│ │ │ ├── input.ts
│ │ │ ├── output.d.ts
│ │ │ └── sample_spec.ts
│ │ │ ├── repetition_mandatory_sep
│ │ │ ├── input.ts
│ │ │ ├── output.d.ts
│ │ │ └── sample_spec.ts
│ │ │ ├── repetition_sep
│ │ │ ├── input.ts
│ │ │ ├── output.d.ts
│ │ │ └── sample_spec.ts
│ │ │ ├── terminal
│ │ │ ├── input.ts
│ │ │ ├── output.d.ts
│ │ │ └── sample_spec.ts
│ │ │ └── terminal_label
│ │ │ ├── input.ts
│ │ │ ├── output.d.ts
│ │ │ └── sample_spec.ts
│ └── tsconfig.json
├── cst-dts-gen
│ ├── package.json
│ ├── src
│ │ ├── api.ts
│ │ ├── generate.ts
│ │ └── model.ts
│ └── tsconfig.json
├── gast
│ ├── .c8rc.json
│ ├── .mocharc.cjs
│ ├── package.json
│ ├── src
│ │ ├── api.ts
│ │ ├── helpers.ts
│ │ ├── model.ts
│ │ └── visitor.ts
│ ├── test
│ │ ├── helpers_spec.ts
│ │ ├── model_spec.ts
│ │ └── visitor_spec.ts
│ └── tsconfig.json
├── regexp-to-ast
│ ├── .c8rc.json
│ ├── .mocharc.cjs
│ ├── package.json
│ ├── src
│ │ ├── api.ts
│ │ ├── base-regexp-visitor.ts
│ │ ├── character-classes.ts
│ │ ├── regexp-parser.ts
│ │ └── utils.ts
│ ├── test
│ │ ├── parser.spec.ts
│ │ └── visitor.spec.ts
│ ├── tsconfig.json
│ └── types.d.ts
├── types
│ ├── api.d.ts
│ ├── package.json
│ ├── scripts
│ │ ├── api-site-upload.sh
│ │ └── update-api-docs.js
│ ├── tsconfig.json
│ └── typedoc.json
├── utils
│ ├── .c8rc.json
│ ├── .mocharc.cjs
│ ├── package.json
│ ├── src
│ │ ├── api.ts
│ │ ├── print.ts
│ │ ├── timer.ts
│ │ └── to-fast-properties.ts
│ ├── test
│ │ └── timer_spec.ts
│ └── tsconfig.json
└── website
│ ├── .gitignore
│ ├── docs
│ ├── .vuepress
│ │ └── config.js
│ ├── FAQ.md
│ ├── README.md
│ ├── changes
│ │ ├── BREAKING_CHANGES.md
│ │ └── CHANGELOG.md
│ ├── features
│ │ ├── backtracking.md
│ │ ├── blazing_fast.md
│ │ ├── custom_errors.md
│ │ ├── custom_token_patterns.md
│ │ ├── easy_debugging.md
│ │ ├── fault_tolerance.md
│ │ ├── gates.md
│ │ ├── grammar_inheritance.md
│ │ ├── images
│ │ │ └── benchmark_chrome67.png
│ │ ├── lexer_modes.md
│ │ ├── llk.md
│ │ ├── multiple_start_rules.md
│ │ ├── parameterized_rules.md
│ │ ├── position_tracking.md
│ │ ├── regexp.md
│ │ ├── separation.md
│ │ ├── syntactic_content_assist.md
│ │ ├── syntax_diagrams.md
│ │ ├── token_alternative_matches.md
│ │ ├── token_categories.md
│ │ ├── token_grouping.md
│ │ └── token_skipping.md
│ ├── guide
│ │ ├── concrete_syntax_tree.md
│ │ ├── custom_token_patterns.md
│ │ ├── generating_syntax_diagrams.md
│ │ ├── initialization_performance.md
│ │ ├── internals.md
│ │ ├── introduction.md
│ │ ├── performance.md
│ │ ├── resolving_grammar_errors.md
│ │ ├── resolving_lexer_errors.md
│ │ └── syntactic_content_assist.md
│ └── tutorial
│ │ ├── step0_introduction.md
│ │ ├── step1_lexing.md
│ │ ├── step2_parsing.md
│ │ ├── step3_adding_actions_root.md
│ │ ├── step3a_adding_actions_visitor.md
│ │ ├── step3b_adding_actions_embedded.md
│ │ └── step4_fault_tolerance.md
│ ├── package.json
│ └── scripts
│ ├── version-config.js
│ ├── version-update.js
│ └── website-upload.sh
├── pnpm-lock.yaml
├── pnpm-workspace.yaml
├── renovate.json5
├── tsconfig.base.json
└── tsconfig.json
/.czrc:
--------------------------------------------------------------------------------
1 | {
2 | "path": "cz-conventional-changelog"
3 | }
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | * text=auto eol=lf
2 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: Continuous Integration
2 | on:
3 | push:
4 | branches:
5 | - master*
6 | pull_request:
7 | branches:
8 | - master*
9 | jobs:
10 | full_build:
11 | name: Full Build (node ${{ matrix.node_version }})
12 | runs-on: ubuntu-latest
13 | strategy:
14 | matrix:
15 | node_version:
16 | - 18.x
17 | - 20.x
18 | - 22.x
19 | steps:
20 | - uses: actions/checkout@v4
21 |
22 | - uses: actions/setup-node@v4
23 | with:
24 | node-version: ${{ matrix.node_version }}
25 |
26 | # pnpm version is taken form package.json `packageManager`
27 | - uses: pnpm/action-setup@v4
28 |
29 | - name: Install dependencies
30 | run: pnpm install
31 |
32 | - name: Build
33 | run: pnpm run ci
34 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Release
2 | on:
3 | push:
4 | tags:
5 | - "v*"
6 | jobs:
7 | release:
8 | runs-on: ubuntu-latest
9 | steps:
10 | - uses: actions/checkout@v4
11 | - uses: actions/setup-node@v4
12 | with:
13 | node-version: lts/*
14 | - uses: pnpm/action-setup@v4
15 | with:
16 | version: 9.12.2
17 | - name: Install dependencies
18 | run: pnpm install
19 | - name: Build
20 | run: pnpm run ci
21 | - name: npm auth setup
22 | run: |
23 | echo "//registry.npmjs.org/:_authToken=$NPM_TOKEN" >> .npmrc
24 | # avoids lerna validations failing the release due to changed tracked file.
25 | git update-index --assume-unchanged ".npmrc"
26 | pnpm whoami
27 | env:
28 | NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
29 | - name: Publish to NPM
30 | run: pnpm run release:publish
31 | env:
32 | NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
33 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | /.idea
2 | node_modules
3 | /.idea/dictionaries/
4 | /.idea/
5 | /.tscache
6 | .baseDir.ts
7 | /bower_components
8 | /npm-debug.log
9 | .pnpm-debug.log
10 | yarn-error.log
11 | .DS_Store
12 |
13 | # Chevrotain packages ignores
14 | # as prettier --ignore-path does not support nested folders ignores
15 | /packages/website/docs/.vuepress/dist/
16 | /packages/types/dev
17 | /packages/types/gh-pages
18 | /packages/**/lib
19 | /packages/**/coverage
20 | /packages/**/.nyc_output
21 | /packages/chevrotain/.nyc_output
22 | /packages/chevrotain/coverage
23 | /packages/chevrotain/scripts/gh-pages
24 | /packages/chevrotain/gh-pages
25 |
26 | # Examples ignore
27 | /examples/webpack/lib
28 | /examples/implementation_languages/typescript/*.js
29 | /examples/implementation_languages/coffeescript/*.js
30 | /examples/parser/minification/gen/
31 |
32 | # used to hold "current" master version for relative performance tests.
33 | /packages/chevrotain/benchmark_web/chevrotain.js
34 |
35 | # Coverage directory used by tools like istanbul
36 | coverage
37 |
38 | # nyc test coverage
39 | .nyc_output
40 | package-lock.json
41 | yarn.lock
42 | lerna-debug.log
--------------------------------------------------------------------------------
/.husky/commit-msg:
--------------------------------------------------------------------------------
1 |
2 | pnpm commitlint --edit $1
3 |
--------------------------------------------------------------------------------
/.husky/pre-commit:
--------------------------------------------------------------------------------
1 |
2 | pnpm lint-staged
3 |
--------------------------------------------------------------------------------
/.npmrc:
--------------------------------------------------------------------------------
1 | # https://github.com/pnpm/pnpm/issues/1755#issuecomment-1173130356
2 | public-hoist-pattern[]=vue
3 | # only one version for the whole project, (oldest nodejs LTS)
4 | public-hoist-pattern[]=@types/node
5 |
6 | # because of vuepress deps mess
7 | strict-peer-dependencies=false
8 |
9 |
10 |
--------------------------------------------------------------------------------
/.prettierignore:
--------------------------------------------------------------------------------
1 | /.idea
2 | **/node_modules/
3 |
4 | # build artifacts
5 | /packages/website/docs/.vuepress/.temp
6 | /packages/website/docs/.vuepress/.cache
7 | /packages/website/docs/.vuepress/dist/
8 | /packages/types/dev
9 | /packages/**/lib
10 | /packages/**/coverage
11 | /packages/**/.nyc_output
12 | /packages/**/gh-pages
13 | /packages/chevrotain/temp
14 |
15 | /examples/webpack/lib
16 | /examples/implementation_languages/typescript/*.js
17 | /examples/implementation_languages/typescript/*.d.ts
18 | /examples/implementation_languages/coffeescript/*.js
19 | /examples/parser/minification/gen/
20 |
21 | # prettier ignores
22 | /packages/cst-dts-gen-test/test/snapshots/**/output.d.ts
23 |
24 | # used to hold "current" master version for relative performance tests.
25 | /packages/chevrotain/benchmark_web/chevrotain.js
26 |
27 | # Coverage directory used by tools like istanbul
28 | coverage
29 |
30 | # nyc test coverage
31 | .nyc_output
--------------------------------------------------------------------------------
/NOTICE.txt:
--------------------------------------------------------------------------------
1 | Copyright (c) 2021 the original author or authors from the Chevrotain project
2 | Copyright (c) 2015-2020 SAP SE or an SAP affiliate company.
3 |
--------------------------------------------------------------------------------
/examples/.gitignore:
--------------------------------------------------------------------------------
1 | # examples should always use latest chevrotain, we don't want to use lock files here.
2 | pnpm-lock.yaml
3 |
--------------------------------------------------------------------------------
/examples/grammars/.gitignore:
--------------------------------------------------------------------------------
1 | graphql/gen/graphql-bundled.min.mjs
--------------------------------------------------------------------------------
/examples/grammars/README.md:
--------------------------------------------------------------------------------
1 | # Grammars Examples
2 |
3 | To run the tests for all the grammars
4 |
5 | - `npm install` (only once)
6 | - `npm test`
7 |
--------------------------------------------------------------------------------
/examples/grammars/calculator/calculator_spec.js:
--------------------------------------------------------------------------------
1 | import assert from "assert";
2 | import { parseEmbedded } from "./calculator_embedded_actions.js";
3 | import { parsePure } from "./calculator_pure_grammar.js";
4 |
5 | describe("The Calculator Grammar", () => {
6 | context("Embedded Actions", () => {
7 | it("can calculate an expression", () => {
8 | assert.equal(parseEmbedded("1 + 2").value, 3);
9 | });
10 |
11 | it("can calculate an expression with operator precedence", () => {
12 | // if it was evaluated left to right without taking into account precedence the result would have been 9
13 | assert.equal(parseEmbedded("1 + 2 * 3").value, 7);
14 | });
15 |
16 | it("can calculate an expression with operator precedence #2", () => {
17 | assert.equal(parseEmbedded("(1 + 2) * 3").value, 9);
18 | });
19 |
20 | it("can calculate an expression with many parenthesis", () => {
21 | assert.equal(parseEmbedded("((((666))))").value, 666);
22 | });
23 |
24 | it("can calculate an expression with power function", () => {
25 | assert.equal(parseEmbedded("1 + power(2,2)").value, 5);
26 | });
27 | });
28 |
29 | context("Pure Grammar with Separated Semantics", () => {
30 | it("can calculate an expression", () => {
31 | assert.equal(parsePure("1 + 2").value, 3);
32 | });
33 |
34 | it("can calculate an expression with operator precedence", () => {
35 | // if it was evaluated left to right without taking into account precedence the result would have been 9
36 | assert.equal(parsePure("1 + 2 * 3").value, 7);
37 | });
38 |
39 | it("can calculate an expression with operator precedence #2", () => {
40 | assert.equal(parsePure("(1 + 2) * 3").value, 9);
41 | });
42 |
43 | it("can calculate an expression with many parenthesis", () => {
44 | assert.equal(parsePure("((((666))))").value, 666);
45 | });
46 |
47 | it("can calculate an expression with power function", () => {
48 | assert.equal(parsePure("1 + power(2,2)").value, 5);
49 | });
50 | });
51 | });
52 |
--------------------------------------------------------------------------------
/examples/grammars/csv/csv_spec.js:
--------------------------------------------------------------------------------
1 | import path from "path";
2 | import { fileURLToPath } from "url";
3 | import fs from "fs";
4 | import assert from "assert";
5 | import { parseCsv } from "./csv.js";
6 |
7 | const __dirname = path.dirname(fileURLToPath(import.meta.url));
8 |
9 | describe("The CSV Grammar", () => {
10 | const samplePath = path.resolve(__dirname, "./sample.csv");
11 | const sampleCsvText = fs.readFileSync(samplePath, "utf8").toString();
12 |
13 | it("can parse a simple CSV without errors", () => {
14 | const lexAndParseResult = parseCsv(sampleCsvText);
15 |
16 | assert.equal(lexAndParseResult.lexResult.errors.length, 0);
17 | assert.equal(lexAndParseResult.parseErrors.length, 0);
18 | });
19 | });
20 |
--------------------------------------------------------------------------------
/examples/grammars/csv/sample.csv:
--------------------------------------------------------------------------------
1 | Year,Make,Model,Description,Price
2 | 1997,Ford,E350,"ac, abs, moon",3000.00
3 | 1999,Chevy,"Venture ""Extended Edition""","",4900.00
4 | 1999,Chevy,"Venture ""Extended Edition, Very Large""",,5000.00
5 | 1996,Jeep,Grand Cherokee,"MUST SELL!
6 | air, moon roof, loaded",4799.00
7 |
--------------------------------------------------------------------------------
/examples/grammars/ecma5/ecma5_api.js:
--------------------------------------------------------------------------------
1 | import { tokenize } from "./ecma5_lexer.js";
2 | import { ECMAScript5Parser } from "./ecma5_parser.js";
3 |
4 | const parserInstance = new ECMAScript5Parser();
5 |
6 | export function parse(str) {
7 | const tokens = tokenize(str);
8 | parserInstance.input = tokens;
9 | parserInstance.orgText = str;
10 | parserInstance.Program();
11 |
12 | if (parserInstance.errors.length > 0) {
13 | throw Error("Sad Sad Panda");
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/examples/grammars/graphql/diagrams.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | GraphQL Syntax Diagrams
7 |
8 |
9 |
10 |
11 |
30 |
31 |
32 |
51 |
52 |
--------------------------------------------------------------------------------
/examples/grammars/graphql/graphql_spec.js:
--------------------------------------------------------------------------------
1 | import { expect } from "chai";
2 | import { parse } from "./graphql.js";
3 |
4 | describe("The GraphQL Grammar", () => {
5 | it("can parse a simple GraphQL without errors", () => {
6 | const input = `
7 | {
8 | hero {
9 | name
10 | # Queries can have comments!
11 | friends {
12 | name
13 | }
14 | }
15 | }
16 | `;
17 | const parseResult = parse(input);
18 | expect(parseResult.lexErrors).to.be.empty;
19 | expect(parseResult.parseErrors).to.be.empty;
20 | });
21 |
22 | it("can parse a simple GraphQL without errors #2", () => {
23 | const input = `
24 | {
25 | human(id: "1000") {
26 | name
27 | height(unit: FOOT)
28 | }
29 | }
30 | `;
31 | const parseResult = parse(input);
32 | expect(parseResult.lexErrors).to.be.empty;
33 | expect(parseResult.parseErrors).to.be.empty;
34 | });
35 |
36 | it("can parse a simple GraphQL without errors #3", () => {
37 | const input = `
38 | type Human implements Character {
39 | id: ID!
40 | name: String!
41 | friends: [Character]
42 | appearsIn: [Episode]!
43 | starships: [Starship]
44 | totalCredits: Int
45 | }
46 |
47 | type Droid implements Character {
48 | id: ID!
49 | name: String!
50 | friends: [Character]
51 | appearsIn: [Episode]!
52 | primaryFunction: String
53 | }
54 | `;
55 | const parseResult = parse(input);
56 | expect(parseResult.lexErrors).to.be.empty;
57 | expect(parseResult.parseErrors).to.be.empty;
58 | });
59 | });
60 |
--------------------------------------------------------------------------------
/examples/grammars/json/json_spec.js:
--------------------------------------------------------------------------------
1 | import { expect } from "chai";
2 | import { parse } from "./json.js";
3 |
4 | describe("The JSON Grammar", () => {
5 | it("can parse a simple Json without errors", () => {
6 | const inputText = '{ "arr": [1,2,3], "obj": {"num":666}}';
7 | const parseResult = parse(inputText);
8 |
9 | expect(parseResult.lexErrors).to.be.empty;
10 | expect(parseResult.parseErrors).to.be.empty;
11 | });
12 | });
13 |
--------------------------------------------------------------------------------
/examples/grammars/json/json_with_comments.js:
--------------------------------------------------------------------------------
1 | import { tokenMatcher, Lexer, createToken } from "chevrotain";
2 | import { jsonTokens, JsonParser } from "./json.js";
3 |
4 | // Upgrade the lexer to support single line comments.
5 | const Comment = createToken({
6 | name: "Comment",
7 | pattern: /\/\/.*/,
8 | });
9 |
10 | const allTokens = jsonTokens.concat([Comment]);
11 |
12 | const JsonWithCommentsLexer = new Lexer(allTokens);
13 |
14 | // ----------------- parser -----------------
15 |
16 | /**
17 | * Our JsonWithComments Parser does not need any new parsing rules.
18 | * Only overrides private methods to automatically collect comments
19 | */
20 | class JsonParserWithComments extends JsonParser {
21 | constructor() {
22 | super();
23 | // We did not define any new rules so no need to call performSelfAnalysis
24 | }
25 |
26 | LA(howMuch) {
27 | // Skip Comments during regular parsing as we wish to auto-magically insert them
28 | // into our CST
29 | while (tokenMatcher(super.LA(howMuch), Comment)) {
30 | super.consumeToken();
31 | }
32 |
33 | return super.LA(howMuch);
34 | }
35 |
36 | cstPostTerminal(key, consumedToken) {
37 | super.cstPostTerminal(key, consumedToken);
38 |
39 | let lookBehindIdx = -1;
40 | let prevToken = super.LA(lookBehindIdx);
41 |
42 | // After every Token (terminal) is successfully consumed
43 | // We will add all the comment that appeared before it to the CST (Parse Tree)
44 | while (tokenMatcher(prevToken, Comment)) {
45 | super.cstPostTerminal(Comment.name, prevToken);
46 | lookBehindIdx--;
47 | prevToken = super.LA(lookBehindIdx);
48 | }
49 | }
50 | }
51 |
52 | // ----------------- wrapping it all together -----------------
53 |
54 | // reuse the same parser instance.
55 | const parser = new JsonParserWithComments([]);
56 |
57 | export function parse(text) {
58 | const lexResult = JsonWithCommentsLexer.tokenize(text);
59 | // setting a new input will RESET the parser instance's state.
60 | parser.input = lexResult.tokens;
61 | // any top level rule may be used as an entry point
62 | const cst = parser.json();
63 |
64 | return {
65 | cst: cst,
66 | lexErrors: lexResult.errors,
67 | parseErrors: parser.errors,
68 | };
69 | }
70 |
--------------------------------------------------------------------------------
/examples/grammars/json/json_with_comments_spec.js:
--------------------------------------------------------------------------------
1 | import { expect } from "chai";
2 | import { parse } from "./json_with_comments.js";
3 |
4 | describe("The JSON Grammar with comments", () => {
5 | it("can parse a simple Json without errors", () => {
6 | const inputText = `
7 | // To Level Comment
8 | {
9 | // nested inner comment
10 | "arr": [
11 | 1,
12 | 2,
13 | 3
14 | ],
15 | "obj": {
16 | "num":666
17 | }
18 | }`;
19 | const parseResult = parse(inputText);
20 |
21 | expect(parseResult.lexErrors).to.be.empty;
22 | expect(parseResult.parseErrors).to.be.empty;
23 |
24 | const cst = parseResult.cst;
25 |
26 | // The top level comment was added to the top level Object CST.
27 | const topLevelComment = cst.children.object[0].children.Comment[0];
28 | expect(topLevelComment.image).to.eql("// To Level Comment");
29 |
30 | // The nested comment was added to the CST of the matching objectItem (key:value pair)
31 | const nestedComment =
32 | cst.children.object[0].children.objectItem[0].children.Comment[0];
33 | expect(nestedComment.image).to.eql("// nested inner comment");
34 | });
35 | });
36 |
--------------------------------------------------------------------------------
/examples/grammars/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "chevrotain_examples_grammars",
3 | "version": "11.0.3",
4 | "type": "module",
5 | "scripts": {
6 | "ci": "pnpm run test && pnpm run bundle:graphql",
7 | "test": "mocha \"!(node_modules)/**/*spec.js\"",
8 | "bundle:graphql": "esbuild ./graphql/graphql.js --bundle --minify --format=esm --outfile=./graphql/gen/graphql-bundled.min.mjs"
9 | },
10 | "dependencies": {
11 | "acorn": "8.8.0",
12 | "chevrotain": "workspace:*",
13 | "xregexp": "5.1.1"
14 | },
15 | "devDependencies": {
16 | "esbuild": "0.18.11"
17 | },
18 | "private": true
19 | }
20 |
--------------------------------------------------------------------------------
/examples/grammars/tinyc/tinyc_spec.js:
--------------------------------------------------------------------------------
1 | import assert from "assert";
2 | import { parseTinyC } from "./tinyc.js";
3 |
4 | describe("The TinyC Grammar", () => {
5 | it("can parse a simple TinyC sample without errors", () => {
6 | const inputText =
7 | "{ " +
8 | "i=125;" +
9 | " j=100;" +
10 | " while (i-j)" +
11 | " if (i {
6 | it("works with ESM", () => {
7 | const inputText = '{ "arr": [1,2,3], "obj": {"num":666}}';
8 | const lexAndParseResult = parseJsonPureJs(inputText);
9 |
10 | assert.equal(lexAndParseResult.lexErrors.length, 0);
11 | assert.equal(lexAndParseResult.parseErrors.length, 0);
12 | });
13 |
14 | it("works with TypeScript generated output ", () => {
15 | const inputText = '{ "arr": [1,2,3], "obj": {"num":666}}';
16 | const lexAndParseResult = parseJsonGenTs(inputText);
17 |
18 | assert.equal(lexAndParseResult.lexErrors.length, 0);
19 | assert.equal(lexAndParseResult.parseErrors.length, 0);
20 | });
21 | });
22 |
--------------------------------------------------------------------------------
/examples/implementation_languages/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "chevrotain_examples_implementation_languages",
3 | "version": "11.0.3",
4 | "type": "module",
5 | "scripts": {
6 | "ci": "pnpm run build test",
7 | "build": "npm-run-all build:ts",
8 | "build:ts": "tsc && node ./typescript/scripts/gen_dts_signatures.js",
9 | "test": "mocha \"*spec.js\""
10 | },
11 | "dependencies": {
12 | "chevrotain": "workspace:*"
13 | },
14 | "devDependencies": {
15 | "coffee-script": "^1.11.1",
16 | "mocha": "^9.0.0",
17 | "npm-run-all": "^4.1.5",
18 | "typescript": "5.6.3"
19 | },
20 | "private": true
21 | }
22 |
--------------------------------------------------------------------------------
/examples/implementation_languages/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compileOnSave": true,
3 | "compilerOptions": {
4 | "target": "ES2015",
5 | "module": "ES2020",
6 | "moduleResolution": "node",
7 | "removeComments": false,
8 | "sourceMap": false,
9 | "declaration": false,
10 | "lib": ["es2015", "dom"]
11 | },
12 | "include": ["./typescript/typescript_json.ts"]
13 | }
14 |
--------------------------------------------------------------------------------
/examples/implementation_languages/typescript/json_cst.d.ts:
--------------------------------------------------------------------------------
1 | import type { CstNode, ICstVisitor, IToken } from "chevrotain";
2 |
3 | export interface JsonCstNode extends CstNode {
4 | name: "json";
5 | children: JsonCstChildren;
6 | }
7 |
8 | export type JsonCstChildren = {
9 | object?: ObjectCstNode[];
10 | array?: ArrayCstNode[];
11 | };
12 |
13 | export interface ObjectCstNode extends CstNode {
14 | name: "object";
15 | children: ObjectCstChildren;
16 | }
17 |
18 | export type ObjectCstChildren = {
19 | LCurly: IToken[];
20 | objectItem?: ObjectItemCstNode[];
21 | Comma?: IToken[];
22 | RCurly: IToken[];
23 | };
24 |
25 | export interface ObjectItemCstNode extends CstNode {
26 | name: "objectItem";
27 | children: ObjectItemCstChildren;
28 | }
29 |
30 | export type ObjectItemCstChildren = {
31 | StringLiteral: IToken[];
32 | Colon: IToken[];
33 | value: ValueCstNode[];
34 | };
35 |
36 | export interface ArrayCstNode extends CstNode {
37 | name: "array";
38 | children: ArrayCstChildren;
39 | }
40 |
41 | export type ArrayCstChildren = {
42 | LSquare: IToken[];
43 | value?: ValueCstNode[];
44 | Comma?: IToken[];
45 | RSquare: IToken[];
46 | };
47 |
48 | export interface ValueCstNode extends CstNode {
49 | name: "value";
50 | children: ValueCstChildren;
51 | }
52 |
53 | export type ValueCstChildren = {
54 | StringLiteral?: IToken[];
55 | NumberLiteral?: IToken[];
56 | object?: ObjectCstNode[];
57 | array?: ArrayCstNode[];
58 | True?: IToken[];
59 | False?: IToken[];
60 | Null?: IToken[];
61 | };
62 |
63 | export interface ICstNodeVisitor extends ICstVisitor {
64 | json(children: JsonCstChildren, param?: IN): OUT;
65 | object(children: ObjectCstChildren, param?: IN): OUT;
66 | objectItem(children: ObjectItemCstChildren, param?: IN): OUT;
67 | array(children: ArrayCstChildren, param?: IN): OUT;
68 | value(children: ValueCstChildren, param?: IN): OUT;
69 | }
70 |
--------------------------------------------------------------------------------
/examples/implementation_languages/typescript/scripts/gen_dts_signatures.js:
--------------------------------------------------------------------------------
1 | /**
2 | * This is a minimal script that generates TypeScript definitions
3 | * from a Chevrotain parser.
4 | */
5 | import { writeFileSync } from "fs";
6 | import { resolve, dirname } from "path";
7 | import { generateCstDts } from "chevrotain";
8 | import { productions } from "../typescript_json.js";
9 | import { fileURLToPath } from "url";
10 |
11 | const __dirname = dirname(fileURLToPath(import.meta.url));
12 |
13 | const dtsString = generateCstDts(productions);
14 | const dtsPath = resolve(__dirname, "..", "json_cst.d.ts");
15 | writeFileSync(dtsPath, dtsString);
16 |
--------------------------------------------------------------------------------
/examples/lexer/README.md:
--------------------------------------------------------------------------------
1 | # Lexer Examples
2 |
3 | Some simple examples of using the Chevrotain Lexer to resolve some common lexing problems/scenarios.
4 |
5 | to run all the lexer examples's tests:
6 |
7 | - `npm install` (only once)
8 | - `npm test`
9 |
--------------------------------------------------------------------------------
/examples/lexer/custom_errors/custom_errors.js:
--------------------------------------------------------------------------------
1 | import { createToken, Lexer } from "chevrotain";
2 |
3 | const A = createToken({ name: "A", pattern: /A/ });
4 | const B = createToken({ name: "B", pattern: /B/ });
5 | const C = createToken({ name: "C", pattern: /C/ });
6 | const Whitespace = createToken({
7 | name: "Whitespace",
8 | pattern: /\s+/,
9 | group: Lexer.SKIPPED,
10 | });
11 |
12 | // A link to the detailed API for the ILexerErrorMessageProvider can be found here:
13 | // https://chevrotain.io/docs/features/custom_errors.html
14 | const OyVeyErrorMessageProvider = {
15 | buildUnexpectedCharactersMessage(
16 | fullText,
17 | startOffset,
18 | length,
19 | // eslint-disable-next-line no-unused-vars -- template
20 | line,
21 | // eslint-disable-next-line no-unused-vars -- template
22 | column,
23 | // eslint-disable-next-line no-unused-vars -- template
24 | mode,
25 | ) {
26 | return (
27 | `Oy Vey!!! unexpected character: ->${fullText.charAt(
28 | startOffset,
29 | )}<- at offset: ${startOffset},` + ` skipped ${length} characters.`
30 | );
31 | },
32 | };
33 |
34 | const CustomErrorsLexer = new Lexer([Whitespace, A, B, C], {
35 | errorMessageProvider: OyVeyErrorMessageProvider,
36 | });
37 |
38 | export function tokenize(text) {
39 | const lexResult = CustomErrorsLexer.tokenize(text);
40 | return lexResult;
41 | }
42 |
--------------------------------------------------------------------------------
/examples/lexer/custom_errors/custom_errors_spec.js:
--------------------------------------------------------------------------------
1 | import { expect } from "chai";
2 | import { tokenize } from "./custom_errors.js";
3 |
4 | describe("A Chevrotain Lexer ability to customize error messages.", () => {
5 | it("Can create an 'Oy Vey' error message", () => {
6 | // Only A-C are supported
7 | const text = `A B C D`;
8 | const lexResult = tokenize(text);
9 |
10 | expect(lexResult.errors).to.have.lengthOf(1);
11 | expect(lexResult.errors[0].message).to.include("Oy Vey!!!");
12 | });
13 | });
14 |
--------------------------------------------------------------------------------
/examples/lexer/custom_patterns/custom_patterns.js:
--------------------------------------------------------------------------------
1 | /**
2 | * This example demonstrate usage of custom token patterns.
3 | * custom token patterns allow implementing token matchers using arbitrary JavaScript code
4 | * instead of being limited to only using regular expressions.
5 | *
6 | * For additional details see the docs:
7 | * https://chevrotain.io/docs/guide/custom_token_patterns.html
8 | */
9 | import { createToken, Lexer } from "chevrotain";
10 |
11 | // First lets define our custom pattern for matching an Integer Literal.
12 | // This function's signature matches the RegExp.prototype.exec function.
13 | // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/RegExp/exec
14 | function matchInteger(text, startOffset) {
15 | let endOffset = startOffset;
16 | let charCode = text.charCodeAt(endOffset);
17 | while (charCode >= 48 && charCode <= 57) {
18 | endOffset++;
19 | charCode = text.charCodeAt(endOffset);
20 | }
21 |
22 | // No match, must return null to conform with the RegExp.prototype.exec signature
23 | if (endOffset === startOffset) {
24 | return null;
25 | } else {
26 | const matchedString = text.substring(startOffset, endOffset);
27 | // according to the RegExp.prototype.exec API the first item in the returned array must be the whole matched string.
28 | return [matchedString];
29 | }
30 | }
31 |
32 | // Now we can simply replace the regExp pattern with our custom pattern.
33 | // Consult the Docs (linked above) for additional syntax variants.
34 | export const IntegerLiteral = createToken({
35 | name: "IntegerLiteral",
36 | pattern: matchInteger,
37 | // custom patterns should explicitly specify the line_breaks option.
38 | line_breaks: false,
39 | });
40 | export const Comma = createToken({ name: "Comma", pattern: /,/ });
41 | const Whitespace = createToken({
42 | name: "Whitespace",
43 | pattern: /\s+/,
44 | group: Lexer.SKIPPED,
45 | });
46 |
47 | const customPatternLexer = new Lexer([Whitespace, Comma, IntegerLiteral]);
48 |
49 | export function tokenize(text) {
50 | const lexResult = customPatternLexer.tokenize(text);
51 |
52 | if (lexResult.errors.length < 0) {
53 | throw new Error("sad sad panda lexing errors detected");
54 | }
55 | return lexResult;
56 | }
57 |
--------------------------------------------------------------------------------
/examples/lexer/custom_patterns/custom_patterns_payloads_spec.js:
--------------------------------------------------------------------------------
1 | import { expect } from "chai";
2 | import { tokenMatcher } from "chevrotain";
3 | import {
4 | tokenize,
5 | StringLiteral,
6 | DateLiteral,
7 | } from "./custom_patterns_payloads.js";
8 |
9 | describe("The Chevrotain Lexer ability to use custom pattern implementations.", () => {
10 | context("Custom Payloads", () => {
11 | it("Can be used to save the text of a string literal **without the quotes**", () => {
12 | const text = `"hello-world"`;
13 | const lexResult = tokenize(text);
14 |
15 | expect(lexResult.errors).to.be.empty;
16 | expect(lexResult.tokens).to.have.lengthOf(1);
17 | const stringLiteralTok = lexResult.tokens[0];
18 | expect(tokenMatcher(stringLiteralTok, StringLiteral));
19 | // Base Token's "image" with quotes
20 | expect(stringLiteralTok.image).to.eql('"hello-world"');
21 | // stripped away quotes in the payload
22 | expect(stringLiteralTok.payload).to.eql("hello-world");
23 | });
24 |
25 | it("Can be used to save the integer values of a DateLiteral parts", () => {
26 | const text = `31-12-1999`;
27 | const lexResult = tokenize(text);
28 |
29 | expect(lexResult.errors).to.be.empty;
30 | expect(lexResult.tokens).to.have.lengthOf(1);
31 | const dateLiteralTok = lexResult.tokens[0];
32 | expect(tokenMatcher(dateLiteralTok, DateLiteral));
33 | // Base Token's image
34 | expect(dateLiteralTok.image).to.eql("31-12-1999");
35 | // The payload includes multiple computed values
36 | expect(dateLiteralTok.payload.day).to.eql(31);
37 | expect(dateLiteralTok.payload.month).to.eql(12);
38 | expect(dateLiteralTok.payload.year).to.eql(1999);
39 | });
40 | });
41 | });
42 |
--------------------------------------------------------------------------------
/examples/lexer/custom_patterns/custom_patterns_spec.js:
--------------------------------------------------------------------------------
1 | import { expect } from "chai";
2 | import { tokenMatcher } from "chevrotain";
3 | import { tokenize, Comma, IntegerLiteral } from "./custom_patterns.js";
4 |
5 | describe("The Chevrotain Lexer ability to use custom pattern implementations.", () => {
6 | it("Can Lex a simple input using a Custom Integer Literal RegExp", () => {
7 | const text = `1 , 2 , 3`;
8 | const lexResult = tokenize(text);
9 |
10 | expect(lexResult.errors).to.be.empty;
11 | expect(lexResult.tokens).to.have.lengthOf(5);
12 | expect(tokenMatcher(lexResult.tokens[0], IntegerLiteral)).to.be.true;
13 | expect(lexResult.tokens[0].image).to.equal("1");
14 | expect(tokenMatcher(lexResult.tokens[1], Comma)).to.be.true;
15 | expect(tokenMatcher(lexResult.tokens[2], IntegerLiteral)).to.be.true;
16 | expect(lexResult.tokens[2].image).to.equal("2");
17 | expect(tokenMatcher(lexResult.tokens[3], Comma)).to.be.true;
18 | expect(tokenMatcher(lexResult.tokens[4], IntegerLiteral)).to.be.true;
19 | expect(lexResult.tokens[4].image).to.equal("3");
20 | });
21 | });
22 |
--------------------------------------------------------------------------------
/examples/lexer/keywords_vs_identifiers/keywords_vs_identifiers_spec.js:
--------------------------------------------------------------------------------
1 | import assert from "assert";
2 | import { tokenize, Do, Identifier } from "./keywords_vs_identifiers.js";
3 | import { tokenMatcher } from "chevrotain";
4 |
5 | describe("The Chevrotain Lexer ability to distinguish keywords and identifiers", () => {
6 | it("will lex do as a keyword", () => {
7 | const text = "do";
8 | const lexResult = tokenize(text);
9 |
10 | assert.equal(lexResult.errors.length, 0);
11 | assert.equal(lexResult.tokens.length, 1);
12 | assert.equal(lexResult.tokens[0].image, "do");
13 | assert.equal(tokenMatcher(lexResult.tokens[0], Do), true);
14 | });
15 |
16 | it("will lex done as an Identifier", () => {
17 | const text = "done";
18 | const lexResult = tokenize(text);
19 |
20 | assert.equal(lexResult.errors.length, 0);
21 | assert.equal(lexResult.tokens.length, 1);
22 | assert.equal(lexResult.tokens[0].image, "done");
23 | assert.equal(tokenMatcher(lexResult.tokens[0], Identifier), true);
24 | });
25 | });
26 |
--------------------------------------------------------------------------------
/examples/lexer/multi_mode_lexer/multi_mode_lexer_spec.js:
--------------------------------------------------------------------------------
1 | import { expect } from "chai";
2 | import { multiModeLexer } from "./multi_mode_lexer.js";
3 |
4 | describe("The Chevrotain Lexer ability switch between Lexer modes", () => {
5 | it("Can Lex an input that requires multiple modes successfully", () => {
6 | const input = "1 LETTERS G A G SIGNS & EXIT_SIGNS B EXIT_LETTERS 3";
7 | const lexResult = multiModeLexer.tokenize(input);
8 | expect(lexResult.errors).to.be.empty;
9 |
10 | const images = lexResult.tokens.map((currTok) => currTok.image);
11 | expect(images).to.deep.equal([
12 | // By default, starting with the "first" mode "numbers_mode."
13 | // The ".tokenize" method can accept an optional initial mode argument as the second parameter.
14 | "1",
15 | "LETTERS", // entering "letters_mode"
16 | "G",
17 | "A",
18 | "G",
19 | "SIGNS", // entering "signs_mode".
20 | "&",
21 | "EXIT_SIGNS", // popping the last mode, we are now back in "letters_mode"
22 | "B",
23 | "EXIT_LETTERS", // popping the last mode, we are now back in "numbers_mode"
24 | "3",
25 | ]);
26 | });
27 |
28 | it("Will create a Lexing error when a Token which is not supported in the current mode is encountred", () => {
29 | const input = "1 LETTERS 2"; // 2 is not allowed in letters mode!
30 | const lexResult = multiModeLexer.tokenize(input);
31 | expect(lexResult.errors).to.have.lengthOf(1);
32 | expect(lexResult.errors[0].message).to.contain("unexpected character");
33 | expect(lexResult.errors[0].message).to.contain("2");
34 | expect(lexResult.errors[0].message).to.contain("at offset: 10");
35 |
36 | const images = lexResult.tokens.map((currTok) => currTok.image);
37 | expect(images).to.deep.equal(["1", "LETTERS"]);
38 | });
39 | });
40 |
--------------------------------------------------------------------------------
/examples/lexer/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "chevrotain_examples_lexer",
3 | "version": "11.0.3",
4 | "type": "module",
5 | "scripts": {
6 | "ci": "pnpm run test",
7 | "test": "mocha \"!(node_modules)/**/*spec.js\""
8 | },
9 | "dependencies": {
10 | "chevrotain": "workspace:*",
11 | "lodash": "4.17.21"
12 | },
13 | "private": true
14 | }
15 |
--------------------------------------------------------------------------------
/examples/lexer/python_indentation/python_indentation_spec.js:
--------------------------------------------------------------------------------
1 | import { expect } from "chai";
2 | import { tokenize } from "./python_indentation.js";
3 |
4 | describe("The Chevrotain Lexer ability to lex python like indentation.", () => {
5 | it("Can Lex a simple python style if-else ", () => {
6 | let input =
7 | "if 1\n" +
8 | " if 2\n" +
9 | " if 3\n" +
10 | " print 666\n" +
11 | " print 777\n" +
12 | " else\n" +
13 | " print 999\n";
14 |
15 | let lexResult = tokenize(input);
16 | const actualTokenTypes = lexResult.tokens.map((tok) => tok.tokenType.name);
17 |
18 | expect(actualTokenTypes).to.eql([
19 | "If",
20 | "IntegerLiteral",
21 | "Indent",
22 | "If",
23 | "IntegerLiteral",
24 | "Indent",
25 | "If",
26 | "IntegerLiteral",
27 | "Indent",
28 | "Print",
29 | "IntegerLiteral",
30 | "Print",
31 | "IntegerLiteral",
32 | "Outdent",
33 | "Outdent",
34 | "Else",
35 | "Indent",
36 | "Print",
37 | "IntegerLiteral",
38 | "Outdent",
39 | "Outdent",
40 | ]);
41 | });
42 |
43 | it("Can Lex another simple python style if-else ", () => {
44 | const input =
45 | "if 1\n" +
46 | " if 2\n" +
47 | " if 3\n" +
48 | "else\n" +
49 | " print 666666666666\n";
50 |
51 | const lexResult = tokenize(input);
52 | const actualTokenTypes = lexResult.tokens.map((tok) => tok.tokenType.name);
53 | expect(actualTokenTypes).to.eql([
54 | "If",
55 | "IntegerLiteral",
56 | "Indent",
57 | "If",
58 | "IntegerLiteral",
59 | "Indent",
60 | "If",
61 | "IntegerLiteral",
62 | "Outdent",
63 | "Outdent",
64 | "Else",
65 | "Indent",
66 | "Print",
67 | "IntegerLiteral",
68 | "Outdent",
69 | ]);
70 | });
71 | });
72 |
--------------------------------------------------------------------------------
/examples/lexer/token_groups/token_group_spec.js:
--------------------------------------------------------------------------------
1 | import assert from "assert";
2 | import { tokenize, Comment, Whitespace } from "./token_groups.js";
3 | import { tokenMatcher } from "chevrotain";
4 |
5 | describe("The Chevrotain Lexer ability to group the Tokens.", () => {
6 | it("will output the comments into a separate output object and will ignore whitespaces", () => {
7 | const text =
8 | "if (666) // some comment!\n" +
9 | " return 333\n" +
10 | "// some other comment!\n" +
11 | "else \n" +
12 | " return 667\n";
13 |
14 | const lexResult = tokenize(text);
15 |
16 | assert.equal(lexResult.errors.length, 0);
17 | assert.equal(lexResult.tokens.length, 9);
18 |
19 | lexResult.tokens.forEach(function (lexedToken) {
20 | // the whitespace has been completely skipped/ignored
21 | assert.notEqual(tokenMatcher(lexedToken, Whitespace), true);
22 | });
23 |
24 | const commentsGroup = lexResult.groups.singleLineComments;
25 | assert.equal(commentsGroup.length, 2);
26 | assert.equal(tokenMatcher(commentsGroup[0], Comment), true);
27 | assert.equal(commentsGroup[0].image, "// some comment!");
28 | assert.equal(tokenMatcher(commentsGroup[1], Comment), true);
29 | assert.equal(commentsGroup[1].image, "// some other comment!");
30 | });
31 | });
32 |
--------------------------------------------------------------------------------
/examples/lexer/token_groups/token_groups.js:
--------------------------------------------------------------------------------
1 | import { createToken, Lexer } from "chevrotain";
2 |
3 | const If = createToken({ name: "if", pattern: /if/ });
4 | const Else = createToken({ name: "else", pattern: /else/ });
5 | const Return = createToken({ name: "return", pattern: /return/ });
6 | const LParen = createToken({ name: "LParen", pattern: /\(/ });
7 | const RParen = createToken({ name: "RParen", pattern: /\)/ });
8 | const IntegerLiteral = createToken({ name: "IntegerLiteral", pattern: /\d+/ });
9 |
10 | export const Whitespace = createToken({
11 | name: "Whitespace",
12 | pattern: /\s+/,
13 | // the Lexer.SKIPPED group is a special group that will cause the lexer to "ignore"
14 | // certain Tokens. these tokens are still consumed from the text, they just don't appear in the
15 | // lexer's output. the is especially useful for ignoring whitespace and in some use cases comments too.
16 | group: Lexer.SKIPPED,
17 | });
18 |
19 | export const Comment = createToken({
20 | name: "Comment",
21 | pattern: /\/\/.+/,
22 | // a Token's group may be a 'free' String, in that case the lexer's result will contain
23 | // an additional array of all the tokens matched for each group under the 'group' object
24 | // for example in this case: lexResult.groups["singleLineComments"]
25 | group: "singleLineComments",
26 | });
27 |
28 | const TokenGroupsLexer = new Lexer([
29 | Whitespace, // Whitespace is very common in most languages so placing it first generally speeds up the lexing.
30 | If,
31 | Else,
32 | Return,
33 | LParen,
34 | RParen,
35 | IntegerLiteral,
36 | Comment,
37 | ]);
38 |
39 | export function tokenize(text) {
40 | const lexResult = TokenGroupsLexer.tokenize(text);
41 |
42 | if (lexResult.errors.length > 0) {
43 | throw new Error("sad sad panda lexing errors detected");
44 | }
45 | return lexResult;
46 | }
47 |
--------------------------------------------------------------------------------
/examples/parser/.gitignore:
--------------------------------------------------------------------------------
1 | diagrams/gen/grammar-bundled.min.mjs
--------------------------------------------------------------------------------
/examples/parser/README.md:
--------------------------------------------------------------------------------
1 | # Parser Examples
2 |
3 | A some simple examples of using the Chevrotain Parser to resolve some common parsing problems/scenarios:
4 |
5 | To run all the parser examples' tests:
6 |
7 | - `npm install` (only once)
8 | - `npm test`
9 |
--------------------------------------------------------------------------------
/examples/parser/backtracking/backtracking_spec.js:
--------------------------------------------------------------------------------
1 | import { expect } from "chai";
2 | import { parse } from "./backtracking.js";
3 |
4 | describe("The Backtracking Example", () => {
5 | it("can parse a statement with Equals and a very long qualified name", () => {
6 | const input = "element age : a.b.c.d.e.f = 666;";
7 | const parseResult = parse(input);
8 | expect(parseResult.parseErrors).to.be.empty;
9 | expect(parseResult.cst.children.withEqualsStatement).to.have.lengthOf(1);
10 | expect(parseResult.cst.children.withDefaultStatement).to.be.undefined;
11 | });
12 |
13 | it("can parse a statement with Default and a very long qualified name", () => {
14 | const input = "element age : a.b.c.d.e.f default 666;";
15 | const parseResult = parse(input);
16 | expect(parseResult.parseErrors).to.be.empty;
17 | expect(parseResult.cst.children.withEqualsStatement).to.be.undefined;
18 | expect(parseResult.cst.children.withDefaultStatement).to.have.lengthOf(1);
19 | });
20 | });
21 |
--------------------------------------------------------------------------------
/examples/parser/content_assist/README.md:
--------------------------------------------------------------------------------
1 | # Content Assist Examples
2 |
3 | Runnable examples for the syntactic content assist feature.
4 |
5 | For farther details, see: [in-depth documentation on Content Assist](http://chevrotain.io/docs/guide/syntactic_content_assist.html).
6 |
--------------------------------------------------------------------------------
/examples/parser/content_assist/content_assist_simple.js:
--------------------------------------------------------------------------------
1 | /*
2 | * Example Of using Chevrotain's built in syntactic content assist
3 | * To implement semantic content assist and content assist on partial inputs.
4 | *
5 | * Examples:
6 | * "Public static " --> ["function"]
7 | * "Public sta" --> ["static"]
8 | * "call f" --> ["foo"] // assuming foo is in the symbol table.
9 | */
10 | import { createToken, Lexer, CstParser } from "chevrotain";
11 |
12 | export const A = createToken({ name: "A", pattern: /A/ });
13 | export const B = createToken({ name: "B", pattern: /B/ });
14 | export const C = createToken({ name: "C", pattern: /C/ });
15 |
16 | const WhiteSpace = createToken({
17 | name: "WhiteSpace",
18 | pattern: /\s+/,
19 | group: Lexer.SKIPPED,
20 | });
21 |
22 | const allTokens = [WhiteSpace, A, B, C];
23 | const StatementsLexer = new Lexer(allTokens);
24 |
25 | // A completely normal Chevrotain Parser, no changes needed to use the content assist capabilities.
26 | class MyParser extends CstParser {
27 | constructor() {
28 | super(allTokens);
29 |
30 | let $ = this;
31 |
32 | $.RULE("myRule", () => {
33 | $.CONSUME(A);
34 |
35 | // prettier-ignore
36 | $.OR([
37 | { ALT: () => $.CONSUME(B) },
38 | { ALT: () => $.CONSUME(C) }
39 | ])
40 | });
41 |
42 | this.performSelfAnalysis();
43 | }
44 | }
45 |
46 | // No need for more than one instance.
47 | const parserInstance = new MyParser();
48 |
49 | export function getContentAssistSuggestions(text) {
50 | const lexResult = StatementsLexer.tokenize(text);
51 | if (lexResult.errors.length > 0) {
52 | throw new Error("sad sad panda, lexing errors detected");
53 | }
54 | const partialTokenVector = lexResult.tokens;
55 |
56 | const syntacticSuggestions = parserInstance.computeContentAssist(
57 | "myRule",
58 | partialTokenVector,
59 | );
60 |
61 | // The suggestions also include the context, we are only interested
62 | // in the TokenTypes in this example.
63 | const tokenTypesSuggestions = syntacticSuggestions.map(
64 | (suggestion) => suggestion.nextTokenType,
65 | );
66 |
67 | return tokenTypesSuggestions;
68 | }
69 |
--------------------------------------------------------------------------------
/examples/parser/content_assist/content_assist_simple_spec.js:
--------------------------------------------------------------------------------
1 | import { expect } from "chai";
2 | import { B, C, getContentAssistSuggestions } from "./content_assist_simple.js";
3 |
4 | describe("The Official Content Assist Feature example Example", () => {
5 | context("can perform content assist for inputs:", () => {
6 | it('Text: "A "', () => {
7 | const inputText = "A";
8 | const suggestions = getContentAssistSuggestions(inputText);
9 | expect(suggestions).to.have.members([B, C]);
10 | });
11 | });
12 | });
13 |
--------------------------------------------------------------------------------
/examples/parser/custom_errors/custom_errors_spec.js:
--------------------------------------------------------------------------------
1 | import { expect } from "chai";
2 | import {
3 | parseEarlyExit,
4 | parseMismatch,
5 | parseMismatchOverride,
6 | parseNoViable,
7 | parseRedundant,
8 | } from "./custom_errors.js";
9 |
10 | describe("The Chevrotain support for custom error provider", () => {
11 | it("can customize a misMatchToken exception", () => {
12 | const errorsOverride = parseMismatch("A C");
13 | expect(errorsOverride).to.have.lengthOf(1);
14 | expect(errorsOverride[0].message).to.equal(
15 | "expecting Bravo at end of mis_match",
16 | );
17 |
18 | // we only modified the error for Bravo mismatches
19 | const errorsDefault = parseMismatch("C");
20 | expect(errorsDefault).to.have.lengthOf(1);
21 | expect(errorsDefault[0].message).to.equal(
22 | "Expecting token of type --> Alpha <-- but found --> 'C' <--",
23 | );
24 | });
25 |
26 | it("can customize a misMatchToken exception by overriding", () => {
27 | const errorsOverride = parseMismatchOverride("A C");
28 | expect(errorsOverride).to.have.lengthOf(1);
29 | expect(errorsOverride[0].message).to.equal("We want Bravo!!!");
30 | });
31 |
32 | it("can customize a NotAllInputParsed exception", () => {
33 | const errors = parseRedundant("A B C");
34 | expect(errors).to.have.lengthOf(1);
35 | expect(errors[0].message).to.equal(
36 | "very bad dog! you still have some input remaining at offset:4",
37 | );
38 | });
39 |
40 | it("can customize a NoViableAlt exception", () => {
41 | const errors = parseNoViable("C");
42 | expect(errors).to.have.lengthOf(1);
43 | expect(errors[0].message).to.equal(
44 | "Expecting: one of these possible Token sequences:\n 1. [Alpha]\n 2. [Bravo]\nbut found: 'C'",
45 | );
46 | });
47 |
48 | it("can customize a EarlyExit exception", () => {
49 | const errors = parseEarlyExit("A");
50 | expect(errors).to.have.lengthOf(1);
51 | expect(errors[0].message).to.equal(
52 | "Esperando por lo menos una iteración de: Bravo",
53 | );
54 | });
55 | });
56 |
--------------------------------------------------------------------------------
/examples/parser/diagrams/README.md:
--------------------------------------------------------------------------------
1 | ## Diagrams
2 |
3 | Examples of producing syntax diagrams for a grammar.
4 |
5 | - See: [additional documentation](https://chevrotain.io/docs/guide/generating_syntax_diagrams.html)
6 |
7 | The grammar is in [grammar.js](./grammar.js).
8 |
9 | ### Creating a new \*.html file
10 |
11 | Run the file below in `node` to generate a `generated_diagrams.html` in this directory
12 |
13 | - [creating_html_file.js](./creating_html_file.js)
14 |
15 | ### Dynamically Rendering the diagrams inside an existing html file
16 |
17 | Prerequisite:
18 |
19 | - Bundle the grammar by running the `bundle:diagrams` in [parent package.json](../package.json)
20 |
21 | - [dynamically_rendering.html](./dynamically_rendering.html)
22 |
--------------------------------------------------------------------------------
/examples/parser/diagrams/creating_html_file.js:
--------------------------------------------------------------------------------
1 | /**
2 | * A template for generating syntax diagrams html file.
3 | * See: https://github.com/chevrotain/chevrotain/tree/master/diagrams for more details
4 | *
5 | * usage:
6 | * - npm install in the parent directory (parser) to install dependencies
7 | * - Run this in file in node.js (node gen_diagrams.js)
8 | * - open the "generated_diagrams.html" that will be created in this folder using
9 | * your favorite browser.
10 | */
11 | import path, { dirname } from "path";
12 | import fs from "fs";
13 | import { fileURLToPath } from "url";
14 | import { createSyntaxDiagramsCode } from "chevrotain";
15 | import { JsonParser } from "./grammar.js";
16 |
17 | const __dirname = dirname(fileURLToPath(import.meta.url));
18 |
19 | // extract the serialized grammar.
20 | const parserInstance = new JsonParser();
21 | const serializedGrammar = parserInstance.getSerializedGastProductions();
22 |
23 | // create the HTML Text
24 | const htmlText = createSyntaxDiagramsCode(serializedGrammar);
25 |
26 | // Write the HTML file to disk
27 | const outPath = path.resolve(__dirname, "./");
28 | fs.writeFileSync(outPath + "/generated_diagrams.html", htmlText);
29 |
--------------------------------------------------------------------------------
/examples/parser/diagrams/dynamically_rendering.html:
--------------------------------------------------------------------------------
1 |
7 |
8 |
9 |
10 |
11 |
12 | Syntax Diagrams generated dynamically in the browser
13 |
14 |
15 |
16 |
17 |
31 |
32 |
33 |
49 |
50 |
--------------------------------------------------------------------------------
/examples/parser/dynamic_tokens/dynamic_delimiters_spec.js:
--------------------------------------------------------------------------------
1 | import { expect } from "chai";
2 | import { parse } from "./dynamic_delimiters.js";
3 |
4 | describe("The Dynamic Delimiters Example", () => {
5 | it("Can Parse an array using built-in comma delimiter", () => {
6 | const actual = parse("[1, 2, 3, 4, 5]").value;
7 | expect(actual).to.equal("12345");
8 | });
9 |
10 | it("Can Parse an array using custom dynamic '_' delimiter", () => {
11 | const actual = parse("[1 _ 2 _ 4 _ 8]", /_/).value;
12 | expect(actual).to.equal("1248");
13 | });
14 |
15 | it("Can Parse an array using BOTH custom and built in delimiters", () => {
16 | const actual = parse("[3 _ 6, 9 _ 12]", /_/).value;
17 | expect(actual).to.equal("36912");
18 | });
19 | });
20 |
--------------------------------------------------------------------------------
/examples/parser/inheritance/inheritance_spec.js:
--------------------------------------------------------------------------------
1 | import assert from "assert";
2 | import { parseCommand } from "./inheritance.js";
3 |
4 | const ENGLISH = "english";
5 | const GERMAN = "german";
6 |
7 | describe("The Advanced Inheritance Parser Example", () => {
8 | it("can parse commands in english", () => {
9 | const inputText = "clean the room after cooking some sausages";
10 | const lexAndParseResult = parseCommand(inputText, ENGLISH);
11 |
12 | assert.equal(lexAndParseResult.lexErrors.length, 0);
13 | assert.equal(lexAndParseResult.parseErrors.length, 0);
14 | });
15 |
16 | it("can parse commands in german", () => {
17 | const inputText = "kochen wurstchen und raum den raum auf";
18 | const lexAndParseResult = parseCommand(inputText, GERMAN);
19 |
20 | assert.equal(lexAndParseResult.lexErrors.length, 0);
21 | assert.equal(lexAndParseResult.parseErrors.length, 0);
22 | });
23 | });
24 |
--------------------------------------------------------------------------------
/examples/parser/minification/README.md:
--------------------------------------------------------------------------------
1 | ### Minificaion of Chevrotain Grammars
2 |
3 | ~~Chevrotain relies on **Function.prototype.toString**
4 | to run. This means that minification of Chevrotain parsers must be done carefully, otherwise
5 | a minified parser may fail during initialization.~~
6 |
7 | The dependence on `Function.prototype.toString` was removed in
8 | [version 6.0.0](http://chevrotain.io/docs/changes/CHANGELOG.html#_6-0-0-8-20-2019) of Chevrotain.
9 | No special handling is needed during minification scenarios.
10 |
--------------------------------------------------------------------------------
/examples/parser/multi_start_rules/multi_start_rules_spec.js:
--------------------------------------------------------------------------------
1 | import { expect } from "chai";
2 | import { parseFirst, parseSecond, parseThird } from "./multi_start_rules.js";
3 |
4 | describe("The Chevrotain support using any rule as a start/top rule", () => {
5 | it("can invoke the first rule successfully", () => {
6 | expect(() => {
7 | parseFirst("A B C");
8 | }).to.not.throw("sad sad panda");
9 | expect(() => {
10 | parseFirst("A");
11 | }).to.not.throw("sad sad panda");
12 | });
13 |
14 | it("can invoke the second rule successfully", () => {
15 | expect(() => {
16 | parseSecond("B C");
17 | }).to.not.throw("sad sad panda");
18 | expect(() => {
19 | parseSecond("B");
20 | }).to.not.throw("sad sad panda");
21 | });
22 |
23 | it("can invoke the third rule successfully", () => {
24 | expect(() => {
25 | parseThird("C");
26 | }).to.not.throw("sad sad panda");
27 | });
28 | });
29 |
--------------------------------------------------------------------------------
/examples/parser/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "chevrotain_examples_parser",
3 | "version": "11.0.3",
4 | "type": "module",
5 | "scripts": {
6 | "ci": "pnpm run test",
7 | "test": "mocha \"!(node_modules)/**/*spec.js\"",
8 | "bundle:diagrams": "esbuild ./diagrams/grammar.js --bundle --minify --format=esm --outfile=./diagrams/gen/grammar-bundled.min.mjs"
9 | },
10 | "dependencies": {
11 | "chevrotain": "workspace:*",
12 | "lodash": "4.17.21"
13 | },
14 | "devDependencies": {
15 | "esbuild": "0.18.11"
16 | },
17 | "private": true
18 | }
19 |
--------------------------------------------------------------------------------
/examples/parser/parametrized_rules/parametrized_spec.js:
--------------------------------------------------------------------------------
1 | import { expect } from "chai";
2 | import { NoViableAltException } from "chevrotain";
3 |
4 | import { parseHello } from "./parametrized.js";
5 |
6 | const POSITIVE = "positive";
7 | const NEGATIVE = "negative";
8 |
9 | describe("The Grammar Parametrized Rules example - using ES6 syntax", () => {
10 | it("can parse a cheerful hello sentence in mode", () => {
11 | const inputText = "hello wonderful world";
12 | const result = parseHello(inputText, POSITIVE);
13 |
14 | expect(result.lexErrors).to.be.empty;
15 | expect(result.parseErrors).to.be.empty;
16 | });
17 |
18 | it("cannot parse a cheerful hello sentence in mode", () => {
19 | const inputText = "hello amazing world";
20 | const result = parseHello(inputText, NEGATIVE);
21 |
22 | expect(result.lexErrors).to.be.empty;
23 | expect(result.parseErrors).to.have.lengthOf(1);
24 | expect(result.parseErrors[0]).to.be.an.instanceof(NoViableAltException);
25 | });
26 |
27 | it("cannot parse a sad hello sentence in mode", () => {
28 | const inputText = "hello evil world";
29 | const result = parseHello(inputText, POSITIVE);
30 |
31 | expect(result.lexErrors).to.be.empty;
32 | expect(result.parseErrors).to.have.lengthOf(1);
33 | expect(result.parseErrors[0]).to.be.an.instanceof(NoViableAltException);
34 | });
35 |
36 | it("can parse a sad hello sentence in mode", () => {
37 | const inputText = "hello cruel world";
38 | const result = parseHello(inputText, NEGATIVE);
39 |
40 | expect(result.lexErrors).to.be.empty;
41 | expect(result.parseErrors).to.be.empty;
42 | });
43 | });
44 |
--------------------------------------------------------------------------------
/examples/parser/predicate_lookahead/predicate_lookahead_spec.js:
--------------------------------------------------------------------------------
1 | import { expect } from "chai";
2 | import { parse, setMaxAllowed } from "./predicate_lookahead.js";
3 |
4 | describe("The Chevrotain support for custom lookahead predicates", () => {
5 | it("can limit the available alternatives in an OR by an some external input number", () => {
6 | setMaxAllowed(3);
7 | expect(parse("1").value).to.equal(1);
8 | expect(parse("2").value).to.equal(2);
9 | expect(parse("3").value).to.equal(3);
10 |
11 | setMaxAllowed(2);
12 | expect(parse("1").value).to.equal(1);
13 | expect(parse("2").value).to.equal(2);
14 | expect(parse("3").parseErrors).to.not.be.empty;
15 |
16 | setMaxAllowed(1);
17 | expect(parse("1").value).to.equal(1);
18 | expect(parse("2").parseErrors).to.not.be.empty;
19 | expect(parse("3").parseErrors).to.not.be.empty;
20 | });
21 | });
22 |
--------------------------------------------------------------------------------
/examples/parser/versioning/versioning_spec.js:
--------------------------------------------------------------------------------
1 | import assert from "assert";
2 | import { parseSelect } from "./versioning.js";
3 |
4 | const VERSION_1 = 1;
5 | const VERSION_2 = 2;
6 |
7 | describe("The Grammar Versioning example", () => {
8 | it("can parse a simple Select statement with Version >1< grammar", () => {
9 | const inputText = "SELECT name FROM employees";
10 | const lexAndParseResult = parseSelect(inputText, VERSION_1);
11 |
12 | assert.equal(lexAndParseResult.lexErrors.length, 0);
13 | assert.equal(lexAndParseResult.parseErrors.length, 0);
14 | });
15 |
16 | it("can parse a simple Select statement with Version >2< grammar", () => {
17 | const inputText = "SELECT name FROM employees , managers";
18 | const lexAndParseResult = parseSelect(inputText, VERSION_2);
19 |
20 | assert.equal(lexAndParseResult.lexErrors.length, 0);
21 | assert.equal(lexAndParseResult.parseErrors.length, 0);
22 | });
23 |
24 | it("can NOT parse Version2 input using Version1 grammar", () => {
25 | // this input is invalid for V1 because there are multipile table names in the 'FROM' clause.
26 | const inputText = "SELECT name FROM employees , managers";
27 | const lexAndParseResult = parseSelect(inputText, VERSION_1);
28 |
29 | assert.equal(lexAndParseResult.lexErrors.length, 0);
30 | assert.equal(lexAndParseResult.parseErrors.length, 1); // has errors
31 | });
32 | });
33 |
--------------------------------------------------------------------------------
/examples/tutorial/README.md:
--------------------------------------------------------------------------------
1 | # Tutorial runnable source code
2 |
3 | - [Step 1: Lexing.](https://github.com/chevrotain/chevrotain/blob/master/examples/tutorial/step1_lexing/)
4 |
5 | - [Step 2: Parsing.](https://github.com/chevrotain/chevrotain/blob/master/examples/tutorial/step2_parsing/)
6 |
7 | - [Step 3a:\_Adding_actions_with a CST **Visitor separately** from the grammar.](https://github.com/chevrotain/chevrotain/tree/master/examples/tutorial/step3_actions/)
8 |
9 | - [Step 3b:\_Adding **embedded** actions **inside** the grammar.](https://github.com/chevrotain/chevrotain/tree/master/examples/tutorial/step3_actions/)
10 |
11 | To run the tests for all the tutorial parts:
12 |
13 | - `npm install` (only once)
14 | - `npm test`
15 |
16 | Each step also contains a **main.js** which can be easily debugged
17 | using your favorite node.js debugger.
18 |
--------------------------------------------------------------------------------
/examples/tutorial/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "chevrotain_examples_tutorial",
3 | "version": "11.0.3",
4 | "type": "module",
5 | "scripts": {
6 | "ci": "pnpm run test",
7 | "test": "mocha \"step*/*spec.js\""
8 | },
9 | "dependencies": {
10 | "chevrotain": "workspace:*",
11 | "lodash": "4.17.21"
12 | },
13 | "private": true
14 | }
15 |
--------------------------------------------------------------------------------
/examples/tutorial/step1_lexing/main.js:
--------------------------------------------------------------------------------
1 | import { lex } from "./step1_lexing.js";
2 |
3 | const inputText = "SELECT column1 FROM table2";
4 | const lexingResult = lex(inputText);
5 | console.log(JSON.stringify(lexingResult, null, "\t"));
6 |
--------------------------------------------------------------------------------
/examples/tutorial/step1_lexing/step1_lexing_spec.js:
--------------------------------------------------------------------------------
1 | import { expect } from "chai";
2 | import { tokenMatcher } from "chevrotain";
3 | import { From, lex, Select, Identifier } from "./step1_lexing.js";
4 |
5 | describe("Chevrotain Tutorial", () => {
6 | context("Step 1 - Lexing", () => {
7 | it("Can Lex a simple input", () => {
8 | const inputText = "SELECT column1 FROM table2";
9 | const lexingResult = lex(inputText);
10 |
11 | expect(lexingResult.errors).to.be.empty;
12 |
13 | const tokens = lexingResult.tokens;
14 | expect(tokens).to.have.lengthOf(4);
15 | expect(tokens[0].image).to.equal("SELECT");
16 | expect(tokens[1].image).to.equal("column1");
17 | expect(tokens[2].image).to.equal("FROM");
18 | expect(tokens[3].image).to.equal("table2");
19 |
20 | // tokenMatcher acts as an "instanceof" check for Tokens
21 | expect(tokenMatcher(tokens[0], Select)).to.be.true;
22 | expect(tokenMatcher(tokens[1], Identifier)).to.be.true;
23 | expect(tokenMatcher(tokens[2], From)).to.be.true;
24 | expect(tokenMatcher(tokens[3], Identifier)).to.be.true;
25 | });
26 | });
27 | });
28 |
--------------------------------------------------------------------------------
/examples/tutorial/step2_parsing/main.js:
--------------------------------------------------------------------------------
1 | import { parse } from "./step2_parsing.js";
2 |
3 | let inputText = "SELECT column1 FROM table2";
4 | // step into the parse function to debug the full flow
5 | parse(inputText);
6 |
7 | // no output here so nothing to show...
8 |
--------------------------------------------------------------------------------
/examples/tutorial/step2_parsing/step2_parsing_spec.js:
--------------------------------------------------------------------------------
1 | import { expect } from "chai";
2 | import { parse } from "./step2_parsing.js";
3 |
4 | describe("Chevrotain Tutorial", () => {
5 | context("Step 2 - Parsing", () => {
6 | it("Can Parse a simple input", () => {
7 | let inputText = "SELECT column1 FROM table2";
8 | expect(() => parse(inputText)).to.not.throw();
9 | });
10 |
11 | it("Will throw an error for an invalid input", () => {
12 | // missing table name
13 | let inputText = "SELECT FROM table2";
14 | expect(() => parse(inputText)).to.throw(
15 | "expecting at least one iteration which starts with one of these possible Token sequences",
16 | );
17 | expect(() => parse(inputText)).to.throw(
18 | "<[Identifier]>\nbut found: 'FROM'",
19 | );
20 | });
21 | });
22 | });
23 |
--------------------------------------------------------------------------------
/examples/tutorial/step3_actions/main.js:
--------------------------------------------------------------------------------
1 | import assert from "assert";
2 | import { toAstVisitor } from "./step3a_actions_visitor.js";
3 | import { toAstEmbedded } from "./step3b_actions_embedded.js";
4 |
5 | let inputText = "SELECT column1, column2 FROM table2 WHERE column2 > 3";
6 |
7 | let astFromVisitor = toAstVisitor(inputText);
8 | let astFromEmbedded = toAstEmbedded(inputText);
9 |
10 | console.log(JSON.stringify(astFromVisitor, null, "\t"));
11 |
12 | assert.deepEqual(
13 | astFromVisitor,
14 | astFromEmbedded,
15 | "Both ASTs should be identical",
16 | );
17 |
--------------------------------------------------------------------------------
/examples/tutorial/step3_actions/step3_actions_spec.js:
--------------------------------------------------------------------------------
1 | import { expect } from "chai";
2 | import { toAstVisitor } from "./step3a_actions_visitor.js";
3 | import { toAstEmbedded } from "./step3b_actions_embedded.js";
4 |
5 | describe("Chevrotain Tutorial", () => {
6 | context("Step 3a - Actions (semantics) using CST Visitor", () => {
7 | it("Can convert a simple input to an AST", () => {
8 | const inputText = "SELECT column1, column2 FROM table2 WHERE column2 > 3";
9 | const ast = toAstVisitor(inputText);
10 |
11 | expect(ast).to.deep.equal({
12 | type: "SELECT_STMT",
13 | selectClause: {
14 | type: "SELECT_CLAUSE",
15 | columns: ["column1", "column2"],
16 | },
17 | fromClause: {
18 | type: "FROM_CLAUSE",
19 | table: "table2",
20 | },
21 | whereClause: {
22 | condition: {
23 | lhs: "column2",
24 | operator: ">",
25 | rhs: "3",
26 | type: "EXPRESSION",
27 | },
28 | type: "WHERE_CLAUSE",
29 | },
30 | });
31 | });
32 | });
33 |
34 | context("Step 3a - Actions (semantics) using embedded actions", () => {
35 | it("Can convert a simple input to an AST", () => {
36 | const inputText = "SELECT column1, column2 FROM table2 WHERE column2 > 3";
37 | const ast = toAstEmbedded(inputText);
38 |
39 | expect(ast).to.deep.equal({
40 | type: "SELECT_STMT",
41 | selectClause: {
42 | type: "SELECT_CLAUSE",
43 | columns: ["column1", "column2"],
44 | },
45 | fromClause: {
46 | type: "FROM_CLAUSE",
47 | table: "table2",
48 | },
49 | whereClause: {
50 | condition: {
51 | lhs: "column2",
52 | operator: ">",
53 | rhs: "3",
54 | type: "EXPRESSION",
55 | },
56 | type: "WHERE_CLAUSE",
57 | },
58 | });
59 | });
60 | });
61 | });
62 |
--------------------------------------------------------------------------------
/examples/tutorial/step4_error_recovery/main.js:
--------------------------------------------------------------------------------
1 | import { parseJsonToCst } from "./step4_error_recovery.js";
2 |
3 | let invalidInput = '{ "key" 666}'; // missing comma
4 | let parsingResult = parseJsonToCst(invalidInput);
5 |
6 | // Even though we had a syntax error (missing comma), the whole input was parsed
7 | // inspect the parsing result to see both the syntax error and that the output Parse Tree (CST)
8 | // Which even includes the '666' and '}'
9 | console.log(JSON.stringify(parsingResult, null, "\t"));
10 |
--------------------------------------------------------------------------------
/examples/webpack/README.md:
--------------------------------------------------------------------------------
1 | ### WebPacking of Chevrotain Grammars.
2 |
3 | ~~Chevrotain relies on **Function.prototype.toString**
4 | to run. This means that webpacking of Chevrotain parsers must be done carefully, otherwise
5 | a bundled parser may fail during initialization.~~
6 |
7 | The dependence on `Function.prototype.toString` was removed in
8 | [version 6.0.0](http://chevrotain.io/docs/changes/CHANGELOG.html#_6-0-0-8-20-2019) of Chevrotain.
9 | Special handling is no longer needed during WebPacking scenarios.
10 |
--------------------------------------------------------------------------------
/lerna.json:
--------------------------------------------------------------------------------
1 | {
2 | "command": {
3 | "version": {
4 | "allowBranch": "master",
5 | "message": "chore(release): release %s",
6 | "exact": true
7 | }
8 | },
9 | "version": "11.0.3"
10 | }
11 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "root",
3 | "private": true,
4 | "workspaces": {
5 | "packages": [
6 | "packages/*",
7 | "examples/*"
8 | ]
9 | },
10 | "packageManager": "pnpm@10.9.0",
11 | "scripts": {
12 | "version": "pnpm install && git add pnpm-lock.yaml",
13 | "preinstall": "npx only-allow pnpm",
14 | "release:version": "pnpm run ci && lerna version --force-publish",
15 | "release:publish": "lerna publish from-git --yes --no-verify-access",
16 | "ci": "npm-run-all format:validate ci:subpackages",
17 | "compile": "pnpm -r run clean && tsc --build",
18 | "compile:watch": "pnpm -r run clean && tsc --build --watch",
19 | "ci:subpackages": "pnpm -r run ci",
20 | "format:fix": "prettier --write \"**/*.@(ts|js|json|md|yml)\"",
21 | "format:validate": "prettier --check \"**/*.@(ts|js|json|md|yml)\"",
22 | "prepare": "husky install"
23 | },
24 | "prettier": {
25 | "endOfLine": "lf"
26 | },
27 | "lint-staged": {
28 | "*.{ts,js,json,md,yml}": [
29 | "prettier --write"
30 | ]
31 | },
32 | "commitlint": {
33 | "extends": [
34 | "@commitlint/config-conventional"
35 | ]
36 | },
37 | "devDependencies": {
38 | "@types/chai": "5.2.1",
39 | "@types/mocha": "10.0.10",
40 | "@types/node": "18.19.87",
41 | "chai": "5.2.0",
42 | "typescript": "5.8.3",
43 | "fs-extra": "11.3.0",
44 | "husky": "9.1.7",
45 | "lerna": "8.2.2",
46 | "lint-staged": "15.5.1",
47 | "mocha": "11.1.0",
48 | "npm-run-all2": "7.0.2",
49 | "prettier": "3.5.3",
50 | "shx": "0.4.0",
51 | "cz-conventional-changelog": "3.3.0",
52 | "@commitlint/cli": "19.8.0",
53 | "@commitlint/config-conventional": "19.8.0",
54 | "source-map-support": "0.5.21",
55 | "c8": "10.1.3"
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/packages/chevrotain/.c8rc.json:
--------------------------------------------------------------------------------
1 | {
2 | "reporter": ["lcov", "text"],
3 | "all": true,
4 | "src": ["src"],
5 | "extension": [".js", ".ts"],
6 | "exclude": ["test/*.*"],
7 | "exclude-after-remap": true
8 | }
9 |
--------------------------------------------------------------------------------
/packages/chevrotain/.mocharc.cjs:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | recursive: true,
3 | require: ["./test/test.config.mjs"],
4 | reporter: "spec",
5 | spec: "./lib/test/**/*spec.js"
6 | }
7 |
--------------------------------------------------------------------------------
/packages/chevrotain/BREAKING_CHANGES.md:
--------------------------------------------------------------------------------
1 | See: https://chevrotain.io/docs/changes/BREAKING_CHANGES.html
2 |
--------------------------------------------------------------------------------
/packages/chevrotain/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | See: https://chevrotain.io/docs/changes/CHANGELOG.html
2 |
--------------------------------------------------------------------------------
/packages/chevrotain/README.md:
--------------------------------------------------------------------------------
1 | # Chevrotain
2 |
3 | For details see:
4 |
5 | - Chevrotain's [website](https://chevrotain.io/docs/).
6 | - Chevrotain's root [README](https://github.com/chevrotain/chevrotain).
7 |
8 | ## Install
9 |
10 | Using npm:
11 |
12 | ```sh
13 | npm install chevrotain
14 | ```
15 |
16 | or using yarn:
17 |
18 | ```sh
19 | yarn add chevrotain
20 | ```
21 |
--------------------------------------------------------------------------------
/packages/chevrotain/benchmark_web/lib/iframe_loader.js:
--------------------------------------------------------------------------------
1 | var lexerOnly = false
2 | var parserOnly = false
3 |
4 | function includeTestIFrame(id, url, mode) {
5 | var iframe = document.createElement("iframe")
6 | iframe.src = url + `?mode=${mode}`
7 | iframe.id = id
8 | iframe.style = "visibility: hidden;"
9 | document.body.appendChild(iframe)
10 | return iframe.contentWindow
11 | }
12 |
13 | function addTest(suite, id, action) {
14 | var $el = $("." + id + " input")
15 | if ($el && $el.is(":checked")) {
16 | suite.add(id, {
17 | defer: true,
18 | fn: function (deferred) {
19 | action({ lexerOnly: lexerOnly, parserOnly: parserOnly }, deferred)
20 | }
21 | })
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/packages/chevrotain/benchmark_web/parsers/api.js:
--------------------------------------------------------------------------------
1 | // ----------------- wrapping it all together -----------------
2 | var parserInstance;
3 | var lexerInstance;
4 | var lexResult;
5 |
6 | self.parseBench = function (
7 | text,
8 | lexerDefinition,
9 | customLexer,
10 | parser,
11 | rootRule,
12 | options,
13 | parserConfig,
14 | ) {
15 | if (lexerInstance === undefined) {
16 | if (customLexer !== undefined) {
17 | lexerInstance = customLexer;
18 | } else {
19 | var start = new Date().getTime();
20 | lexerInstance = new chevrotain.Lexer(lexerDefinition, {
21 | // TODO: extract lexer options to global config
22 | positionTracking: "onlyOffset",
23 | });
24 | var end = new Date().getTime();
25 | console.log("Lexer init time: " + (end - start));
26 | }
27 | }
28 |
29 | if (lexResult === undefined || options.lexerOnly) {
30 | lexResult = lexerInstance.tokenize(text);
31 | if (lexResult.errors.length > 0) {
32 | throw Error("Lexing errors detected");
33 | }
34 | }
35 |
36 | // It is recommended to only initialize a Chevrotain Parser once
37 | // and reset it's state instead of re-initializing it
38 | if (parserInstance === undefined) {
39 | var start = new Date().getTime();
40 | parserInstance = new parser(parserConfig);
41 | var end = new Date().getTime();
42 | console.log("Parser init time: " + (end - start));
43 | }
44 |
45 | if (options.lexerOnly) {
46 | return lexResult.tokens;
47 | } else {
48 | // setting a new input will RESET the parser instance's state.
49 | parserInstance.input = lexResult.tokens;
50 | var lexErrors = lexResult.errors;
51 |
52 | // only performing the lexing ONCE if we are only interested in the parsing speed
53 | if (!options.parserOnly) {
54 | lexResult = undefined;
55 | }
56 |
57 | // any top level rule may be used as an entry point
58 | var value = parserInstance[rootRule]();
59 |
60 | if (parserInstance.errors.length > 0) {
61 | throw Error("Parsing Errors detected");
62 | }
63 | return {
64 | value: value, // this is a pure grammar, the value will always be
65 | lexErrors: lexErrors,
66 | parseErrors: parserInstance.errors,
67 | };
68 | }
69 | };
70 |
--------------------------------------------------------------------------------
/packages/chevrotain/benchmark_web/parsers/css/css.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Title
6 |
7 |
8 |
9 |
10 |
11 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/packages/chevrotain/benchmark_web/parsers/ecma5/ecma5.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Title
6 |
7 |
8 |
9 |
10 |
11 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/packages/chevrotain/benchmark_web/parsers/esm_wrappers/chevrotain_latest.mjs:
--------------------------------------------------------------------------------
1 | import * as chevrotain from "https://unpkg.com/chevrotain/lib/chevrotain.mjs";
2 |
3 | // "import * from" returns a `Module` object which needs to be destructured first
4 | const spreadChevrotain = { ...chevrotain };
5 | // legacy code expects chevrotain on the webWorker "global"
6 | self.chevrotain = spreadChevrotain;
--------------------------------------------------------------------------------
/packages/chevrotain/benchmark_web/parsers/esm_wrappers/chevrotain_next.mjs:
--------------------------------------------------------------------------------
1 | import * as chevrotain from "../../../lib/chevrotain.mjs";
2 |
3 | // "import * from" returns a `Module` object which needs to be destructured first
4 | const spreadChevrotain = { ...chevrotain };
5 | // legacy code expects chevrotain on the webWorker "global"
6 | self.chevrotain = spreadChevrotain;
--------------------------------------------------------------------------------
/packages/chevrotain/benchmark_web/parsers/json/json.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Title
6 |
7 |
8 |
9 |
10 |
11 |
24 |
25 |
26 |
--------------------------------------------------------------------------------
/packages/chevrotain/benchmark_web/parsers/options.js:
--------------------------------------------------------------------------------
1 | const options = {
2 | // Tip: disable CST building to emphasize performance changes
3 | // due to parser engine changes, e.g better implementation of `defineRule`
4 | // or better lookahead logic implementation
5 | next: {
6 | // this path seems to be relative to the `worker_impel.js` file
7 | // where this path will be imported using `WorkerGlobalScope.importScripts()`
8 | bundle: "./esm_wrappers/chevrotain_next.mjs",
9 | parserConfig: {
10 | maxLookahead: 2,
11 | outputCst: false,
12 | },
13 | },
14 | latest: {
15 | // bundle: "../chevrotain.js",
16 | bundle: "./esm_wrappers/chevrotain_latest.mjs",
17 | parserConfig: {
18 | maxLookahead: 2,
19 | outputCst: false,
20 | },
21 | },
22 | };
23 |
24 | // pick the correct options depending on mode
25 | const queryString = window.location.search;
26 | const urlParams = new URLSearchParams(queryString);
27 | const mode = urlParams.get("mode");
28 | self.globalOptions = options[mode];
29 |
30 | console.log(JSON.stringify(self.globalOptions, null, "\t"));
31 |
--------------------------------------------------------------------------------
/packages/chevrotain/benchmark_web/parsers/worker_api.js:
--------------------------------------------------------------------------------
1 | var parserWorker;
2 | var globalDeferred;
3 |
4 | function initWorker(options) {
5 | // relative to the nested iframe
6 | parserWorker = new Worker("../worker_impel.js", { type: "module" });
7 |
8 | parserWorker.postMessage(options);
9 | parserWorker.onmessage = function (errCode) {
10 | globalDeferred.resolve();
11 | };
12 | }
13 |
14 | function parse(options, deferred) {
15 | globalDeferred = deferred;
16 | parserWorker.postMessage([options]);
17 | }
18 |
--------------------------------------------------------------------------------
/packages/chevrotain/benchmark_web/parsers/worker_impel.js:
--------------------------------------------------------------------------------
1 | self.initialized = false;
2 |
3 | onmessage = async function (event) {
4 | if (!initialized) {
5 | self.initialized = true;
6 |
7 | if (event.data.parserConfig) {
8 | self.parserConfig = event.data.parserConfig;
9 | }
10 |
11 | for (const elem of event.data.importScripts) {
12 | await import(elem);
13 | }
14 |
15 | if (event.data.sampleUrl) {
16 | var xhrObj = new XMLHttpRequest();
17 | xhrObj.open("GET", event.data.sampleUrl, false);
18 | xhrObj.send("");
19 |
20 | self.sample = xhrObj.responseText;
21 | }
22 | self.startRule = event.data.startRule;
23 | } else {
24 | var options = event.data[0];
25 |
26 | try {
27 | self.parseBench(
28 | self.sample,
29 | self.lexerDefinition || undefined,
30 | self.customLexer || undefined,
31 | parser,
32 | startRule,
33 | options,
34 | parserConfig,
35 | );
36 | postMessage(0);
37 | } catch (e) {
38 | console.error(e.message);
39 | console.error(e.stack);
40 | postMessage(1);
41 | }
42 | }
43 | };
44 |
--------------------------------------------------------------------------------
/packages/chevrotain/chevrotain.d.ts:
--------------------------------------------------------------------------------
1 | export * from "@chevrotain/types";
2 | export as namespace chevrotain;
3 |
--------------------------------------------------------------------------------
/packages/chevrotain/diagrams/README.md:
--------------------------------------------------------------------------------
1 | See [online docs](https://chevrotain.io/docs/guide/generating_syntax_diagrams.html).
2 |
--------------------------------------------------------------------------------
/packages/chevrotain/diagrams/diagrams.css:
--------------------------------------------------------------------------------
1 | svg.railroad-diagram path {
2 | stroke-width: 3;
3 | stroke: black;
4 | fill: rgba(0, 0, 0, 0);
5 | }
6 |
7 | svg.railroad-diagram text {
8 | font: bold 14px monospace;
9 | text-anchor: middle;
10 | }
11 |
12 | svg.railroad-diagram text.label {
13 | text-anchor: start;
14 | }
15 |
16 | svg.railroad-diagram text.comment {
17 | font: italic 12px monospace;
18 | }
19 |
20 | svg.railroad-diagram g.non-terminal rect {
21 | fill: hsl(223, 100%, 83%);
22 | }
23 |
24 | svg.railroad-diagram rect {
25 | stroke-width: 3;
26 | stroke: black;
27 | fill: hsl(190, 100%, 83%);
28 | }
29 |
30 | .diagramHeader {
31 | display: inline-block;
32 | -webkit-touch-callout: default;
33 | -webkit-user-select: text;
34 | -khtml-user-select: text;
35 | -moz-user-select: text;
36 | -ms-user-select: text;
37 | user-select: text;
38 | font-weight: bold;
39 | font-family: monospace;
40 | font-size: 18px;
41 | margin-bottom: -8px;
42 | text-align: center;
43 | }
44 |
45 | .diagramHeaderDef {
46 | background-color: lightgreen;
47 | }
48 |
49 | svg.railroad-diagram text {
50 | -webkit-touch-callout: default;
51 | -webkit-user-select: text;
52 | -khtml-user-select: text;
53 | -moz-user-select: text;
54 | -ms-user-select: text;
55 | user-select: text;
56 | }
57 |
58 | svg.railroad-diagram g.non-terminal rect.diagramRectUsage {
59 | color: green;
60 | fill: yellow;
61 | stroke: 5;
62 | }
63 |
64 | svg.railroad-diagram g.terminal rect.diagramRectUsage {
65 | color: green;
66 | fill: yellow;
67 | stroke: 5;
68 | }
69 |
70 | div {
71 | -webkit-touch-callout: none;
72 | -webkit-user-select: none;
73 | -khtml-user-select: none;
74 | -moz-user-select: none;
75 | -ms-user-select: none;
76 | user-select: none;
77 | }
78 |
79 | svg {
80 | width: 100%;
81 | }
82 |
83 | svg.railroad-diagram g.non-terminal text {
84 | cursor: pointer;
85 | }
--------------------------------------------------------------------------------
/packages/chevrotain/diagrams/src/diagrams_serializer.js:
--------------------------------------------------------------------------------
1 | /**
2 | * @param {string} targetFilePath - The path and file name to serialize to.
3 | * @param {string} varName - The name of the global variable to expose the serialized contents/
4 | * @param {chevrotain.Parser} parserInstance - A parser instance whose grammar will be serialized.
5 | */
6 | function serializeGrammarToFile(targetFilePath, varName, parserInstance) {
7 | var fs = require("fs");
8 | var serializedGrammar = parserInstance.getSerializedGastProductions();
9 | var serializedGrammarText = JSON.stringify(serializedGrammar, null, "\t");
10 |
11 | // generated a JavaScript file which exports the serialized grammar on the global scope (Window)
12 | fs.writeFileSync(
13 | targetFilePath,
14 | "var " + varName + " = " + serializedGrammarText,
15 | );
16 | }
17 |
18 | module.exports = {
19 | serializeGrammarToFile: serializeGrammarToFile,
20 | };
21 |
--------------------------------------------------------------------------------
/packages/chevrotain/diagrams/src/main.js:
--------------------------------------------------------------------------------
1 | (function (root, factory) {
2 | if (typeof define === "function" && define.amd) {
3 | // AMD. Register as an anonymous module.
4 | define(["./diagrams_builder", "./diagrams_behavior"], factory);
5 | } else if (typeof module === "object" && module.exports) {
6 | // Node. Does not work with strict CommonJS, but
7 | // only CommonJS-like environments that support module.exports,
8 | // like Node.
9 | module.exports = factory(
10 | require("./diagrams_builder"),
11 | require("./diagrams_behavior"),
12 | );
13 | } else {
14 | // Browser globals (root is window)
15 | root.main = factory(root.diagrams_builder, root.diagrams_behavior);
16 | }
17 | })(this, function (builder, behavior) {
18 | return {
19 | drawDiagramsFromParserInstance: function (parserInstanceToDraw, targetDiv) {
20 | var topRules = parserInstanceToDraw.getSerializedGastProductions();
21 | targetDiv.innerHTML = builder.buildSyntaxDiagramsText(topRules);
22 | behavior.initDiagramsBehavior();
23 | },
24 |
25 | drawDiagramsFromSerializedGrammar: function (serializedGrammar, targetDiv) {
26 | targetDiv.innerHTML = builder.buildSyntaxDiagramsText(serializedGrammar);
27 | behavior.initDiagramsBehavior();
28 | },
29 | };
30 | });
31 |
--------------------------------------------------------------------------------
/packages/chevrotain/scripts/version-config.js:
--------------------------------------------------------------------------------
1 | import fs from "fs";
2 | import jf from "jsonfile";
3 | import path, { dirname } from "path";
4 | import { fileURLToPath } from "url";
5 |
6 | const __dirname = dirname(fileURLToPath(import.meta.url));
7 |
8 | export const versionPath = path.join(__dirname, "../src/version.ts");
9 | export const packagePath = path.join(__dirname, "../package.json");
10 | export const readmePath = path.join(__dirname, "../../../README.md");
11 | export const pkgJson = jf.readFileSync(packagePath);
12 | export const apiString = fs.readFileSync(versionPath, "utf8").toString();
13 | export const currVersion = pkgJson.version;
14 |
--------------------------------------------------------------------------------
/packages/chevrotain/scripts/version-update.js:
--------------------------------------------------------------------------------
1 | import git from "gitty";
2 | import fs from "fs";
3 | import {
4 | apiString,
5 | currVersion,
6 | readmePath,
7 | versionPath,
8 | } from "./version-config.js";
9 | import { VERSION as oldVersion } from "../lib/src/version.js";
10 |
11 | const myRepo = git("../../");
12 |
13 | const newVersion = currVersion;
14 | const oldVersionRegExpGlobal = new RegExp(
15 | oldVersion.replace(/\./g, "\\."),
16 | "g",
17 | );
18 |
19 | console.log("bumping version on <" + versionPath + ">");
20 |
21 | const bumpedVersionTsFileContents = apiString.replace(
22 | oldVersionRegExpGlobal,
23 | newVersion,
24 | );
25 | fs.writeFileSync(versionPath, bumpedVersionTsFileContents);
26 |
27 | console.log("bumping unpkg link in: <" + readmePath + ">");
28 | const readmeContents = fs.readFileSync(readmePath, "utf8").toString();
29 | const bumpedReadmeContents = readmeContents.replace(
30 | oldVersionRegExpGlobal,
31 | newVersion,
32 | );
33 | fs.writeFileSync(readmePath, bumpedReadmeContents);
34 |
35 | // Just adding to the current commit is sufficient as lerna does the commit + tag + push
36 | myRepo.addSync([versionPath, readmePath]);
37 |
--------------------------------------------------------------------------------
/packages/chevrotain/src/diagrams/render_public.ts:
--------------------------------------------------------------------------------
1 | import { VERSION } from "../version.js";
2 | import { ISerializedGast } from "@chevrotain/types";
3 |
4 | export function createSyntaxDiagramsCode(
5 | grammar: ISerializedGast[],
6 | {
7 | resourceBase = `https://unpkg.com/chevrotain@${VERSION}/diagrams/`,
8 | css = `https://unpkg.com/chevrotain@${VERSION}/diagrams/diagrams.css`,
9 | }: {
10 | resourceBase?: string;
11 | css?: string;
12 | } = {},
13 | ) {
14 | const header = `
15 |
16 |
17 |
18 |
23 |
24 | `;
25 | const cssHtml = `
26 |
27 | `;
28 |
29 | const scripts = `
30 |
31 |
32 |
33 |
34 | `;
35 | const diagramsDiv = `
36 |
37 | `;
38 | const serializedGrammar = `
39 |
42 | `;
43 |
44 | const initLogic = `
45 |
49 | `;
50 | return (
51 | header + cssHtml + scripts + diagramsDiv + serializedGrammar + initLogic
52 | );
53 | }
54 |
--------------------------------------------------------------------------------
/packages/chevrotain/src/lang/lang_extensions.ts:
--------------------------------------------------------------------------------
1 | const NAME = "name";
2 |
3 | export function defineNameProp(obj: {}, nameValue: string): void {
4 | Object.defineProperty(obj, NAME, {
5 | enumerable: false,
6 | configurable: true,
7 | writable: false,
8 | value: nameValue,
9 | });
10 | }
11 |
--------------------------------------------------------------------------------
/packages/chevrotain/src/parse/constants.ts:
--------------------------------------------------------------------------------
1 | // TODO: can this be removed? where is it used?
2 | export const IN = "_~IN~_";
3 |
--------------------------------------------------------------------------------
/packages/chevrotain/src/parse/grammar/follow.ts:
--------------------------------------------------------------------------------
1 | import { RestWalker } from "./rest.js";
2 | import { first } from "./first.js";
3 | import { assign, forEach } from "lodash-es";
4 | import { IN } from "../constants.js";
5 | import { Alternative, NonTerminal, Rule, Terminal } from "@chevrotain/gast";
6 | import { IProduction, TokenType } from "@chevrotain/types";
7 |
8 | // This ResyncFollowsWalker computes all of the follows required for RESYNC
9 | // (skipping reference production).
10 | export class ResyncFollowsWalker extends RestWalker {
11 | public follows: Record = {};
12 |
13 | constructor(private topProd: Rule) {
14 | super();
15 | }
16 |
17 | startWalking(): Record {
18 | this.walk(this.topProd);
19 | return this.follows;
20 | }
21 |
22 | walkTerminal(
23 | terminal: Terminal,
24 | currRest: IProduction[],
25 | prevRest: IProduction[],
26 | ): void {
27 | // do nothing! just like in the public sector after 13:00
28 | }
29 |
30 | walkProdRef(
31 | refProd: NonTerminal,
32 | currRest: IProduction[],
33 | prevRest: IProduction[],
34 | ): void {
35 | const followName =
36 | buildBetweenProdsFollowPrefix(refProd.referencedRule, refProd.idx) +
37 | this.topProd.name;
38 | const fullRest: IProduction[] = currRest.concat(prevRest);
39 | const restProd = new Alternative({ definition: fullRest });
40 | const t_in_topProd_follows = first(restProd);
41 | this.follows[followName] = t_in_topProd_follows;
42 | }
43 | }
44 |
45 | export function computeAllProdsFollows(
46 | topProductions: Rule[],
47 | ): Record {
48 | const reSyncFollows = {};
49 |
50 | forEach(topProductions, (topProd) => {
51 | const currRefsFollow = new ResyncFollowsWalker(topProd).startWalking();
52 | assign(reSyncFollows, currRefsFollow);
53 | });
54 | return reSyncFollows;
55 | }
56 |
57 | export function buildBetweenProdsFollowPrefix(
58 | inner: Rule,
59 | occurenceInParent: number,
60 | ): string {
61 | return inner.name + occurenceInParent + IN;
62 | }
63 |
64 | export function buildInProdFollowPrefix(terminal: Terminal): string {
65 | const terminalName = terminal.terminalType.name;
66 | return terminalName + terminal.idx + IN;
67 | }
68 |
--------------------------------------------------------------------------------
/packages/chevrotain/src/parse/grammar/gast/gast_resolver_public.ts:
--------------------------------------------------------------------------------
1 | import { Rule } from "@chevrotain/gast";
2 | import { defaults, forEach } from "lodash-es";
3 | import { resolveGrammar as orgResolveGrammar } from "../resolver.js";
4 | import { validateGrammar as orgValidateGrammar } from "../checks.js";
5 | import {
6 | defaultGrammarResolverErrorProvider,
7 | defaultGrammarValidatorErrorProvider,
8 | } from "../../errors_public.js";
9 | import { TokenType } from "@chevrotain/types";
10 | import {
11 | IGrammarResolverErrorMessageProvider,
12 | IGrammarValidatorErrorMessageProvider,
13 | IParserDefinitionError,
14 | } from "../types.js";
15 |
16 | type ResolveGrammarOpts = {
17 | rules: Rule[];
18 | errMsgProvider?: IGrammarResolverErrorMessageProvider;
19 | };
20 | export function resolveGrammar(
21 | options: ResolveGrammarOpts,
22 | ): IParserDefinitionError[] {
23 | const actualOptions: Required = defaults(options, {
24 | errMsgProvider: defaultGrammarResolverErrorProvider,
25 | });
26 |
27 | const topRulesTable: { [ruleName: string]: Rule } = {};
28 | forEach(options.rules, (rule) => {
29 | topRulesTable[rule.name] = rule;
30 | });
31 | return orgResolveGrammar(topRulesTable, actualOptions.errMsgProvider);
32 | }
33 |
34 | export function validateGrammar(options: {
35 | rules: Rule[];
36 | tokenTypes: TokenType[];
37 | grammarName: string;
38 | errMsgProvider: IGrammarValidatorErrorMessageProvider;
39 | }): IParserDefinitionError[] {
40 | options = defaults(options, {
41 | errMsgProvider: defaultGrammarValidatorErrorProvider,
42 | });
43 |
44 | return orgValidateGrammar(
45 | options.rules,
46 | options.tokenTypes,
47 | options.errMsgProvider,
48 | options.grammarName,
49 | );
50 | }
51 |
--------------------------------------------------------------------------------
/packages/chevrotain/src/parse/grammar/keys.ts:
--------------------------------------------------------------------------------
1 | // Lookahead keys are 32Bit integers in the form
2 | // TTTTTTTT-ZZZZZZZZZZZZ-YYYY-XXXXXXXX
3 | // XXXX -> Occurrence Index bitmap.
4 | // YYYY -> DSL Method Type bitmap.
5 | // ZZZZZZZZZZZZZZZ -> Rule short Index bitmap.
6 | // TTTTTTTTT -> alternation alternative index bitmap
7 |
8 | export const BITS_FOR_METHOD_TYPE = 4;
9 | export const BITS_FOR_OCCURRENCE_IDX = 8;
10 | export const BITS_FOR_RULE_IDX = 12;
11 | // TODO: validation, this means that there may at most 2^8 --> 256 alternatives for an alternation.
12 | export const BITS_FOR_ALT_IDX = 8;
13 |
14 | // short string used as part of mapping keys.
15 | // being short improves the performance when composing KEYS for maps out of these
16 | // The 5 - 8 bits (16 possible values, are reserved for the DSL method indices)
17 | export const OR_IDX = 1 << BITS_FOR_OCCURRENCE_IDX;
18 | export const OPTION_IDX = 2 << BITS_FOR_OCCURRENCE_IDX;
19 | export const MANY_IDX = 3 << BITS_FOR_OCCURRENCE_IDX;
20 | export const AT_LEAST_ONE_IDX = 4 << BITS_FOR_OCCURRENCE_IDX;
21 | export const MANY_SEP_IDX = 5 << BITS_FOR_OCCURRENCE_IDX;
22 | export const AT_LEAST_ONE_SEP_IDX = 6 << BITS_FOR_OCCURRENCE_IDX;
23 |
24 | // this actually returns a number, but it is always used as a string (object prop key)
25 | export function getKeyForAutomaticLookahead(
26 | ruleIdx: number,
27 | dslMethodIdx: number,
28 | occurrence: number,
29 | ): number {
30 | return occurrence | dslMethodIdx | ruleIdx;
31 | }
32 |
33 | const BITS_START_FOR_ALT_IDX = 32 - BITS_FOR_ALT_IDX;
34 |
--------------------------------------------------------------------------------
/packages/chevrotain/src/parse/grammar/resolver.ts:
--------------------------------------------------------------------------------
1 | import {
2 | IParserUnresolvedRefDefinitionError,
3 | ParserDefinitionErrorType,
4 | } from "../parser/parser.js";
5 | import { forEach, values } from "lodash-es";
6 | import { GAstVisitor, NonTerminal, Rule } from "@chevrotain/gast";
7 | import {
8 | IGrammarResolverErrorMessageProvider,
9 | IParserDefinitionError,
10 | } from "./types.js";
11 |
12 | export function resolveGrammar(
13 | topLevels: Record,
14 | errMsgProvider: IGrammarResolverErrorMessageProvider,
15 | ): IParserDefinitionError[] {
16 | const refResolver = new GastRefResolverVisitor(topLevels, errMsgProvider);
17 | refResolver.resolveRefs();
18 | return refResolver.errors;
19 | }
20 |
21 | export class GastRefResolverVisitor extends GAstVisitor {
22 | public errors: IParserUnresolvedRefDefinitionError[] = [];
23 | private currTopLevel: Rule;
24 |
25 | constructor(
26 | private nameToTopRule: Record,
27 | private errMsgProvider: IGrammarResolverErrorMessageProvider,
28 | ) {
29 | super();
30 | }
31 |
32 | public resolveRefs(): void {
33 | forEach(values(this.nameToTopRule), (prod) => {
34 | this.currTopLevel = prod;
35 | prod.accept(this);
36 | });
37 | }
38 |
39 | public visitNonTerminal(node: NonTerminal): void {
40 | const ref = this.nameToTopRule[node.nonTerminalName];
41 |
42 | if (!ref) {
43 | const msg = this.errMsgProvider.buildRuleNotFoundError(
44 | this.currTopLevel,
45 | node,
46 | );
47 | this.errors.push({
48 | message: msg,
49 | type: ParserDefinitionErrorType.UNRESOLVED_SUBRULE_REF,
50 | ruleName: this.currTopLevel.name,
51 | unresolvedRefName: node.nonTerminalName,
52 | });
53 | } else {
54 | node.referencedRule = ref;
55 | }
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/packages/chevrotain/src/parse/parser/traits/context_assist.ts:
--------------------------------------------------------------------------------
1 | import {
2 | ISyntacticContentAssistPath,
3 | IToken,
4 | ITokenGrammarPath,
5 | TokenType,
6 | } from "@chevrotain/types";
7 | import {
8 | NextAfterTokenWalker,
9 | nextPossibleTokensAfter,
10 | } from "../../grammar/interpreter.js";
11 | import { first, isUndefined } from "lodash-es";
12 | import { MixedInParser } from "./parser_traits.js";
13 |
14 | export class ContentAssist {
15 | initContentAssist() {}
16 |
17 | public computeContentAssist(
18 | this: MixedInParser,
19 | startRuleName: string,
20 | precedingInput: IToken[],
21 | ): ISyntacticContentAssistPath[] {
22 | const startRuleGast = this.gastProductionsCache[startRuleName];
23 |
24 | if (isUndefined(startRuleGast)) {
25 | throw Error(`Rule ->${startRuleName}<- does not exist in this grammar.`);
26 | }
27 |
28 | return nextPossibleTokensAfter(
29 | [startRuleGast],
30 | precedingInput,
31 | this.tokenMatcher,
32 | this.maxLookahead,
33 | );
34 | }
35 |
36 | // TODO: should this be a member method or a utility? it does not have any state or usage of 'this'...
37 | // TODO: should this be more explicitly part of the public API?
38 | public getNextPossibleTokenTypes(
39 | this: MixedInParser,
40 | grammarPath: ITokenGrammarPath,
41 | ): TokenType[] {
42 | const topRuleName = first(grammarPath.ruleStack)!;
43 | const gastProductions = this.getGAstProductions();
44 | const topProduction = gastProductions[topRuleName];
45 | const nextPossibleTokenTypes = new NextAfterTokenWalker(
46 | topProduction,
47 | grammarPath,
48 | ).startWalking();
49 | return nextPossibleTokenTypes;
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/packages/chevrotain/src/parse/parser/traits/parser_traits.ts:
--------------------------------------------------------------------------------
1 | import { ErrorHandler } from "./error_handler.js";
2 | import { LexerAdapter } from "./lexer_adapter.js";
3 | import { LooksAhead } from "./looksahead.js";
4 | import { RecognizerApi } from "./recognizer_api.js";
5 | import { RecognizerEngine } from "./recognizer_engine.js";
6 | import { Recoverable } from "./recoverable.js";
7 | import { TreeBuilder } from "./tree_builder.js";
8 | import {
9 | CstParser as CstParserConstructorImpel,
10 | EmbeddedActionsParser as EmbeddedActionsParserConstructorImpl,
11 | Parser as ParserConstructorImpel,
12 | } from "../parser.js";
13 | import * as defs from "@chevrotain/types";
14 | import { ContentAssist } from "./context_assist.js";
15 | import { GastRecorder } from "./gast_recorder.js";
16 | import { PerformanceTracer } from "./perf_tracer.js";
17 |
18 | /**
19 | * This Type combines all the Parser traits.
20 | * It is used in all traits in the "this type assertion"
21 | * - https://github.com/Microsoft/TypeScript/wiki/What%27s-new-in-TypeScript#specifying-the-type-of-this-for-functions
22 | * This enables strong Type Checks inside trait methods that invoke methods from other traits.
23 | * This pattern is very similar to "self types" in Scala.
24 | * - https://docs.scala-lang.org/tour/self-types.html
25 | */
26 | export type MixedInParser = ParserConstructorImpel &
27 | ErrorHandler &
28 | LexerAdapter &
29 | LooksAhead &
30 | RecognizerApi &
31 | RecognizerEngine &
32 | Recoverable &
33 | TreeBuilder &
34 | ContentAssist &
35 | GastRecorder &
36 | PerformanceTracer;
37 |
38 | interface MixedInCstParserConstructor {
39 | new (
40 | tokenVocabulary: defs.TokenVocabulary,
41 | config?: defs.IParserConfig,
42 | ): defs.CstParser;
43 | }
44 |
45 | export const CstParser: MixedInCstParserConstructor = (
46 | CstParserConstructorImpel
47 | );
48 |
49 | interface MixedInEmbeddedActionsParserConstructor {
50 | new (
51 | tokenVocabulary: defs.TokenVocabulary,
52 | config?: defs.IParserConfig,
53 | ): defs.EmbeddedActionsParser;
54 | }
55 |
56 | export const EmbeddedActionsParser: MixedInEmbeddedActionsParserConstructor = <
57 | any
58 | >EmbeddedActionsParserConstructorImpl;
59 |
--------------------------------------------------------------------------------
/packages/chevrotain/src/parse/parser/traits/perf_tracer.ts:
--------------------------------------------------------------------------------
1 | import { IParserConfig } from "@chevrotain/types";
2 | import { has } from "lodash-es";
3 | import { timer } from "@chevrotain/utils";
4 | import { MixedInParser } from "./parser_traits.js";
5 | import { DEFAULT_PARSER_CONFIG } from "../parser.js";
6 |
7 | /**
8 | * Trait responsible for runtime parsing errors.
9 | */
10 | export class PerformanceTracer {
11 | traceInitPerf: boolean | number;
12 | traceInitMaxIdent: number;
13 | traceInitIndent: number;
14 |
15 | initPerformanceTracer(config: IParserConfig) {
16 | if (has(config, "traceInitPerf")) {
17 | const userTraceInitPerf = config.traceInitPerf;
18 | const traceIsNumber = typeof userTraceInitPerf === "number";
19 | this.traceInitMaxIdent = traceIsNumber
20 | ? userTraceInitPerf
21 | : Infinity;
22 | this.traceInitPerf = traceIsNumber
23 | ? userTraceInitPerf > 0
24 | : (userTraceInitPerf as boolean); // assumes end user provides the correct config value/type
25 | } else {
26 | this.traceInitMaxIdent = 0;
27 | this.traceInitPerf = DEFAULT_PARSER_CONFIG.traceInitPerf;
28 | }
29 |
30 | this.traceInitIndent = -1;
31 | }
32 |
33 | TRACE_INIT(this: MixedInParser, phaseDesc: string, phaseImpl: () => T): T {
34 | // No need to optimize this using NOOP pattern because
35 | // It is not called in a hot spot...
36 | if (this.traceInitPerf === true) {
37 | this.traceInitIndent++;
38 | const indent = new Array(this.traceInitIndent + 1).join("\t");
39 | if (this.traceInitIndent < this.traceInitMaxIdent) {
40 | console.log(`${indent}--> <${phaseDesc}>`);
41 | }
42 | const { time, value } = timer(phaseImpl);
43 | /* istanbul ignore next - Difficult to reproduce specific performance behavior (>10ms) in tests */
44 | const traceMethod = time > 10 ? console.warn : console.log;
45 | if (this.traceInitIndent < this.traceInitMaxIdent) {
46 | traceMethod(`${indent}<-- <${phaseDesc}> time: ${time}ms`);
47 | }
48 | this.traceInitIndent--;
49 | return value;
50 | } else {
51 | return phaseImpl();
52 | }
53 | }
54 | }
55 |
--------------------------------------------------------------------------------
/packages/chevrotain/src/parse/parser/types.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * Helper common type definitions
3 | * Particularly useful when expending the public API
4 | * to include additional **internal** properties.
5 | */
6 | import { IParserConfig, ParserMethod } from "@chevrotain/types";
7 |
8 | export type ParserMethodInternal = ParserMethod<
9 | ARGS,
10 | R
11 | > & {
12 | ruleName: string;
13 | originalGrammarAction: Function;
14 | };
15 |
16 | export type IParserConfigInternal = IParserConfig & { outputCst: boolean };
17 |
--------------------------------------------------------------------------------
/packages/chevrotain/src/parse/parser/utils/apply_mixins.ts:
--------------------------------------------------------------------------------
1 | export function applyMixins(derivedCtor: any, baseCtors: any[]) {
2 | baseCtors.forEach((baseCtor) => {
3 | const baseProto = baseCtor.prototype;
4 | Object.getOwnPropertyNames(baseProto).forEach((propName) => {
5 | if (propName === "constructor") {
6 | return;
7 | }
8 |
9 | const basePropDescriptor = Object.getOwnPropertyDescriptor(
10 | baseProto,
11 | propName,
12 | );
13 | // Handle Accessors
14 | if (
15 | basePropDescriptor &&
16 | (basePropDescriptor.get || basePropDescriptor.set)
17 | ) {
18 | Object.defineProperty(
19 | derivedCtor.prototype,
20 | propName,
21 | basePropDescriptor,
22 | );
23 | } else {
24 | derivedCtor.prototype[propName] = baseCtor.prototype[propName];
25 | }
26 | });
27 | });
28 | }
29 |
--------------------------------------------------------------------------------
/packages/chevrotain/src/scan/lexer_errors_public.ts:
--------------------------------------------------------------------------------
1 | import { ILexerErrorMessageProvider, IToken } from "@chevrotain/types";
2 |
3 | export const defaultLexerErrorProvider: ILexerErrorMessageProvider = {
4 | buildUnableToPopLexerModeMessage(token: IToken): string {
5 | return `Unable to pop Lexer Mode after encountering Token ->${token.image}<- The Mode Stack is empty`;
6 | },
7 |
8 | buildUnexpectedCharactersMessage(
9 | fullText: string,
10 | startOffset: number,
11 | length: number,
12 | line?: number,
13 | column?: number,
14 | mode?: string,
15 | ): string {
16 | return (
17 | `unexpected character: ->${fullText.charAt(
18 | startOffset,
19 | )}<- at offset: ${startOffset},` + ` skipped ${length} characters.`
20 | );
21 | },
22 | };
23 |
--------------------------------------------------------------------------------
/packages/chevrotain/src/scan/reg_exp_parser.ts:
--------------------------------------------------------------------------------
1 | import {
2 | Alternative,
3 | Assertion,
4 | Atom,
5 | Disjunction,
6 | RegExpParser,
7 | RegExpPattern,
8 | } from "@chevrotain/regexp-to-ast";
9 |
10 | let regExpAstCache: { [regex: string]: RegExpPattern } = {};
11 | const regExpParser = new RegExpParser();
12 |
13 | // this should be moved to regexp-to-ast
14 | export type ASTNode =
15 | | RegExpPattern
16 | | Disjunction
17 | | Alternative
18 | | Assertion
19 | | Atom;
20 |
21 | export function getRegExpAst(regExp: RegExp): RegExpPattern {
22 | const regExpStr = regExp.toString();
23 | if (regExpAstCache.hasOwnProperty(regExpStr)) {
24 | return regExpAstCache[regExpStr];
25 | } else {
26 | const regExpAst = regExpParser.pattern(regExpStr);
27 | regExpAstCache[regExpStr] = regExpAst;
28 | return regExpAst;
29 | }
30 | }
31 |
32 | export function clearRegExpParserCache() {
33 | regExpAstCache = {};
34 | }
35 |
--------------------------------------------------------------------------------
/packages/chevrotain/src/scan/tokens_constants.ts:
--------------------------------------------------------------------------------
1 | export const EOF_TOKEN_TYPE = 1;
2 |
--------------------------------------------------------------------------------
/packages/chevrotain/src/text/range.ts:
--------------------------------------------------------------------------------
1 | export interface IRange {
2 | start: number;
3 | end: number;
4 |
5 | contains(num: number): boolean;
6 |
7 | containsRange(other: IRange): boolean;
8 |
9 | isContainedInRange(other: IRange): boolean;
10 |
11 | strictlyContainsRange(other: IRange): boolean;
12 |
13 | isStrictlyContainedInRange(other: IRange): boolean;
14 | }
15 |
16 | export class Range implements IRange {
17 | constructor(
18 | public start: number,
19 | public end: number,
20 | ) {
21 | if (!isValidRange(start, end)) {
22 | throw new Error("INVALID RANGE");
23 | }
24 | }
25 |
26 | contains(num: number): boolean {
27 | return this.start <= num && this.end >= num;
28 | }
29 |
30 | containsRange(other: IRange): boolean {
31 | return this.start <= other.start && this.end >= other.end;
32 | }
33 |
34 | isContainedInRange(other: IRange): boolean {
35 | return other.containsRange(this);
36 | }
37 |
38 | strictlyContainsRange(other: IRange): boolean {
39 | return this.start < other.start && this.end > other.end;
40 | }
41 |
42 | isStrictlyContainedInRange(other: IRange): boolean {
43 | return other.strictlyContainsRange(this);
44 | }
45 | }
46 |
47 | export function isValidRange(start: number, end: number): boolean {
48 | return !(start < 0 || end < start);
49 | }
50 |
--------------------------------------------------------------------------------
/packages/chevrotain/src/version.ts:
--------------------------------------------------------------------------------
1 | // needs a separate module as this is required inside chevrotain productive code
2 | // and also in the entry point for webpack(api.ts).
3 | // A separate file avoids cyclic dependencies and webpack errors.
4 | export const VERSION = "11.0.3";
5 |
--------------------------------------------------------------------------------
/packages/chevrotain/test/deprecation_spec.ts:
--------------------------------------------------------------------------------
1 | import { expect } from "chai";
2 | import { createToken, Parser } from "../src/api.js";
3 | import { CstParser } from "../src/parse/parser/traits/parser_traits.js";
4 |
5 | describe("Chevrotain's runtime deprecation checks", () => {
6 | it("Will throw an error if someone tries to use the deprecated Parser class", () => {
7 | expect(() => new Parser()).to.throw("The Parser class has been deprecated");
8 | expect(() => new Parser()).to.throw("CstParser or EmbeddedActionsParser");
9 | expect(() => new Parser()).to.throw(
10 | "https://chevrotain.io/docs/changes/BREAKING_CHANGES.html#_7-0-0",
11 | );
12 | });
13 |
14 | it("Will throw an error if someone tries to use the deprecated Parser class", () => {
15 | const tokA = createToken({ name: "foo", pattern: "bar" });
16 | class StaticSelfAnalysisParser extends CstParser {
17 | constructor() {
18 | super([tokA]);
19 | (CstParser as any).performSelfAnalysis();
20 | }
21 | }
22 |
23 | expect(() => new StaticSelfAnalysisParser()).to.throw(
24 | "The **static** `performSelfAnalysis` method has been deprecated",
25 | );
26 | });
27 | });
28 |
--------------------------------------------------------------------------------
/packages/chevrotain/test/full_flow/ecma_quirks/ecma_quirks_spec.ts:
--------------------------------------------------------------------------------
1 | import { expect } from "chai";
2 | import { parse } from "./ecma_quirks.js";
3 |
4 | describe("ECMAScript Quirks Example (ScannerLess Mode)", () => {
5 | it("can parse a valid text successfully", () => {
6 | const result = parse("return ;");
7 | expect(result.errors).to.be.empty;
8 | });
9 |
10 | it("can parse a valid text successfully #2", () => {
11 | const result = parse("return 1;");
12 | expect(result.errors).to.be.empty;
13 | });
14 |
15 | it("can parse a valid text successfully #3 - Division", () => {
16 | const result = parse("return 8 / 2 ;");
17 | expect(result.errors).to.be.empty;
18 | });
19 |
20 | it("can parse a valid text successfully #3 - RegExp", () => {
21 | const result = parse("return /123/ ;");
22 | expect(result.errors).to.be.empty;
23 | });
24 |
25 | it("can parse a valid text successfully #3 - RegExp and Division", () => {
26 | const result = parse("return /123/ / 5 ;");
27 | expect(result.errors).to.be.empty;
28 | });
29 | });
30 |
--------------------------------------------------------------------------------
/packages/chevrotain/test/full_flow/error_recovery/sql_statements/sql_recovery_tokens.ts:
--------------------------------------------------------------------------------
1 | export class VirtualToken {
2 | static PATTERN = /NA/;
3 | }
4 |
5 | export class IdentTok {
6 | static PATTERN = /NA/;
7 | }
8 |
9 | // DOCS: once again an example of Token types hierarchies
10 | export class LiteralTok {
11 | static PATTERN = /NA/;
12 | }
13 | export class StringTok extends LiteralTok {}
14 | export class IntTok extends LiteralTok {}
15 |
16 | export class BigIntTok extends IntTok {}
17 |
18 | export class Keyword {
19 | static PATTERN = /NA/;
20 | }
21 |
22 | export class CreateTok extends Keyword {}
23 |
24 | export class TableTok extends Keyword {}
25 |
26 | export class InsertTok extends Keyword {}
27 |
28 | export class IntoTok extends Keyword {}
29 |
30 | export class DeleteTok extends Keyword {}
31 |
32 | export class FromTok extends Keyword {}
33 |
34 | export class LParenTok {
35 | static PATTERN = /NA/;
36 | }
37 |
38 | export class RParenTok {
39 | static PATTERN = /NA/;
40 | }
41 |
42 | export class CommaTok {
43 | static PATTERN = /NA/;
44 | }
45 |
46 | export class SemiColonTok {
47 | static PATTERN = /NA/;
48 | }
49 |
50 | export class DotTok {
51 | static PATTERN = /NA/;
52 | }
53 |
54 | // virtual tokens for Building the parseTree, these just give a "type/specification/categorization" to a ParseTree
55 | export class STATEMENTS extends VirtualToken {}
56 | export class CREATE_STMT extends VirtualToken {}
57 | export class INSERT_STMT extends VirtualToken {}
58 | export class DELETE_STMT extends VirtualToken {}
59 | export class QUALIFIED_NAME extends VirtualToken {}
60 | export class DOTS extends VirtualToken {}
61 | export class COMMAS extends VirtualToken {}
62 |
63 | // some "INVALID" virtual tokens can be defined to output a more "precise" ParseTree in case of an re-sync error
64 | // defining them as subclasses of the "valid" virtual tokens can making handling of invalid input easier in whatever
65 | // component which consumes the output ParseTree in order to build some Ast or other data structure.
66 | export class INVALID_DDL extends VirtualToken {}
67 | export class INVALID_CREATE_STMT extends CREATE_STMT {}
68 | export class INVALID_INSERT_STMT extends INSERT_STMT {}
69 | export class INVALID_DELETE_STMT extends DELETE_STMT {}
70 | export class INVALID_QUALIFIED_NAME extends QUALIFIED_NAME {}
71 |
--------------------------------------------------------------------------------
/packages/chevrotain/test/full_flow/error_recovery/switch_case/switchcase_recovery_tokens.ts:
--------------------------------------------------------------------------------
1 | export class IdentTok {
2 | static PATTERN = /NA/;
3 | }
4 | export class LiteralTok {
5 | static PATTERN = /NA/;
6 | }
7 | export class IntTok extends LiteralTok {}
8 | export class StringTok extends LiteralTok {}
9 | export class Keyword {
10 | static PATTERN = /NA/;
11 | }
12 |
13 | export class SwitchTok extends Keyword {}
14 |
15 | export class CaseTok extends Keyword {}
16 |
17 | export class ReturnTok extends Keyword {}
18 |
19 | export class LParenTok {
20 | static PATTERN = /NA/;
21 | }
22 |
23 | export class RParenTok {
24 | static PATTERN = /NA/;
25 | }
26 |
27 | export class LCurlyTok {
28 | static PATTERN = /NA/;
29 | }
30 |
31 | export class RCurlyTok {
32 | static PATTERN = /NA/;
33 | }
34 |
35 | export class ColonTok {
36 | static PATTERN = /NA/;
37 | }
38 |
39 | export class SemiColonTok {
40 | static PATTERN = /NA/;
41 | }
42 |
43 | // to force some branches for coverage
44 | export class DoubleSemiColonTok extends SemiColonTok {}
45 |
--------------------------------------------------------------------------------
/packages/chevrotain/test/full_flow/parse_tree.ts:
--------------------------------------------------------------------------------
1 | import { compact, isFunction, isUndefined } from "lodash-es";
2 | import { IToken, TokenType } from "@chevrotain/types";
3 |
4 | export class ParseTree {
5 | getImage(): string {
6 | return this.payload.image;
7 | }
8 |
9 | getLine(): number | undefined {
10 | return this.payload.startLine;
11 | }
12 |
13 | getColumn(): number | undefined {
14 | return this.payload.startColumn;
15 | }
16 |
17 | constructor(
18 | public payload: IToken,
19 | public children: ParseTree[] = [],
20 | ) {}
21 | }
22 |
23 | /**
24 | * convenience factory for ParseTrees
25 | *
26 | * @param {TokenType|Token} tokenOrTokenClass The Token instance to be used as the root node, or a constructor Function
27 | * that will create the root node.
28 | * @param {ParseTree[]} children The sub nodes of the ParseTree to the built
29 | * @returns {ParseTree}
30 | */
31 | export function PT(
32 | tokenOrTokenClass: TokenType | IToken,
33 | children: ParseTree[] = [],
34 | ): ParseTree | null {
35 | const childrenCompact = compact(children);
36 |
37 | if ((tokenOrTokenClass).image !== undefined) {
38 | return new ParseTree(tokenOrTokenClass, childrenCompact);
39 | } else if (isFunction(tokenOrTokenClass)) {
40 | return new ParseTree(new (tokenOrTokenClass)(), childrenCompact);
41 | } else if (isUndefined(tokenOrTokenClass) || tokenOrTokenClass === null) {
42 | return null;
43 | } else {
44 | throw `Invalid parameter ${tokenOrTokenClass} to PT factory.`;
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/packages/chevrotain/test/parse/grammar/resolver_spec.ts:
--------------------------------------------------------------------------------
1 | import { GastRefResolverVisitor } from "../../../src/parse/grammar/resolver.js";
2 | import { ParserDefinitionErrorType } from "../../../src/parse/parser/parser.js";
3 | import { NonTerminal, Rule } from "@chevrotain/gast";
4 | import { defaultGrammarResolverErrorProvider } from "../../../src/parse/errors_public.js";
5 | import { expect } from "chai";
6 |
7 | describe("The RefResolverVisitor", () => {
8 | it("will fail when trying to resolve a ref to a grammar rule that does not exist", () => {
9 | const ref = new NonTerminal({ nonTerminalName: "missingRule" });
10 | const topLevel = new Rule({ name: "TOP", definition: [ref] });
11 | const topLevelRules: { [ruleName: string]: Rule } = {};
12 | topLevelRules["TOP"] = topLevel;
13 | const resolver = new GastRefResolverVisitor(
14 | topLevelRules,
15 | defaultGrammarResolverErrorProvider,
16 | );
17 | resolver.resolveRefs();
18 | expect(resolver.errors).to.have.lengthOf(1);
19 | expect(resolver.errors[0].message).to.contain(
20 | "Invalid grammar, reference to a rule which is not defined: ->missingRule<-",
21 | );
22 | expect(resolver.errors[0].message).to.contain(
23 | "inside top level rule: ->TOP<-",
24 | );
25 | expect(resolver.errors[0].type).to.equal(
26 | ParserDefinitionErrorType.UNRESOLVED_SUBRULE_REF,
27 | );
28 | expect(resolver.errors[0].ruleName).to.equal("TOP");
29 | });
30 | });
31 |
--------------------------------------------------------------------------------
/packages/chevrotain/test/parse/recognizer/recognizer_config_spec.ts:
--------------------------------------------------------------------------------
1 | import {
2 | CstParser,
3 | EmbeddedActionsParser,
4 | } from "../../../src/parse/parser/traits/parser_traits.js";
5 | import { createToken } from "../../../src/scan/tokens_public.js";
6 | import { expect } from "chai";
7 |
8 | describe("The Recognizer's Configuration", () => {
9 | it("default config values - empty config", () => {
10 | const A = createToken({ name: "A" });
11 |
12 | class InvalidNodeLocationTrackingOption extends CstParser {
13 | constructor() {
14 | super([A], { nodeLocationTracking: "oops" });
15 | }
16 | }
17 |
18 | expect(() => new InvalidNodeLocationTrackingOption()).to.throw(
19 | 'Invalid config option: "oops"',
20 | );
21 | });
22 |
23 | it("default config values - empty config", () => {
24 | const A = createToken({ name: "A" });
25 |
26 | class EmptyConfigParser extends EmbeddedActionsParser {
27 | constructor() {
28 | super([A], {});
29 | }
30 | }
31 |
32 | const parser = new EmptyConfigParser();
33 | expect((parser).recoveryEnabled).to.be.false;
34 | expect((parser).maxLookahead).to.equal(3);
35 | expect((parser).nodeLocationTracking).to.be.equal("none");
36 | });
37 |
38 | it("default config values - no config", () => {
39 | const A = createToken({ name: "A" });
40 |
41 | class NoConfigParser extends EmbeddedActionsParser {
42 | constructor() {
43 | super([A]);
44 | }
45 | }
46 |
47 | const parser = new NoConfigParser();
48 | expect((parser).recoveryEnabled).to.be.false;
49 | expect((parser).maxLookahead).to.equal(3);
50 | expect((parser).nodeLocationTracking).to.be.equal("none");
51 | });
52 |
53 | it("default config values - no config", () => {
54 | const A = createToken({ name: "A" });
55 |
56 | const invalidConfig = { ignoredIssues: {} };
57 | class IgnoredIssuesParser extends EmbeddedActionsParser {
58 | constructor() {
59 | super([A], invalidConfig as any);
60 | }
61 | }
62 | expect(() => new IgnoredIssuesParser()).to.throw(
63 | "The IParserConfig property has been deprecated",
64 | );
65 | });
66 | });
67 |
--------------------------------------------------------------------------------
/packages/chevrotain/test/parse/traits/perf_tracer_spec.ts:
--------------------------------------------------------------------------------
1 | import { createToken } from "../../../src/scan/tokens_public.js";
2 | import { EmbeddedActionsParser } from "../../../src/parse/parser/traits/parser_traits.js";
3 | import { expect } from "chai";
4 | import { SinonSpy, spy } from "sinon";
5 |
6 | describe("Chevrotain's Init Performance Tracing", () => {
7 | let consoleLogSpy: SinonSpy;
8 |
9 | beforeEach(() => {
10 | consoleLogSpy = spy(console, "log");
11 | });
12 |
13 | afterEach(() => {
14 | // @ts-ignore
15 | console.log.restore();
16 | });
17 |
18 | let TracerParserConstructor: any;
19 |
20 | before(() => {
21 | const PlusTok = createToken({ name: "PlusTok" });
22 |
23 | class TraceParser extends EmbeddedActionsParser {
24 | constructor(traceInitVal: boolean | number) {
25 | super([PlusTok], {
26 | traceInitPerf: traceInitVal,
27 | });
28 | this.performSelfAnalysis();
29 | }
30 |
31 | public topRule = this.RULE("topRule", () => {
32 | this.CONSUME(PlusTok);
33 | });
34 | }
35 |
36 | TracerParserConstructor = TraceParser;
37 | });
38 |
39 | it("Will not trace with traceInitPerf = false", () => {
40 | new TracerParserConstructor(false);
41 |
42 | expect(consoleLogSpy).to.have.not.been.called;
43 | });
44 |
45 | it("Will trace nested with traceInitPerf = true", () => {
46 | new TracerParserConstructor(true);
47 |
48 | expect(consoleLogSpy).to.have.been.called;
49 | expect(consoleLogSpy.args[0][0]).to.include("--> ");
50 | expect(consoleLogSpy.args[1][0]).to.include("\t--> ");
51 | });
52 |
53 | it("Will trace one level with traceInitPerf = 1", () => {
54 | new TracerParserConstructor(1);
55 |
56 | expect(consoleLogSpy).to.have.been.called;
57 | expect(consoleLogSpy.args[0][0]).to.include("--> ");
58 | expect(consoleLogSpy.args[1][0]).to.not.include("\t");
59 | });
60 |
61 | it("Will trace 2 levels with traceInitPerf = 2", () => {
62 | new TracerParserConstructor(2);
63 |
64 | expect(consoleLogSpy).to.have.been.called;
65 | expect(consoleLogSpy.args[0][0]).to.include("--> ");
66 | expect(consoleLogSpy.args[1][0]).to.include("\t");
67 | });
68 | });
69 |
--------------------------------------------------------------------------------
/packages/chevrotain/test/scan/first_char_spec.ts:
--------------------------------------------------------------------------------
1 | import { getRegExpAst } from "../../src/scan/reg_exp_parser.js";
2 | import { firstCharOptimizedIndices } from "../../src/scan/reg_exp.js";
3 | import { expect } from "chai";
4 |
5 | describe("The Chevrotain Lexer First Char Optimization", () => {
6 | it("considers ignoreCase flag", () => {
7 | const ast = getRegExpAst(/a/i);
8 | const firstChars = firstCharOptimizedIndices(
9 | ast.value,
10 | {},
11 | ast.flags.ignoreCase,
12 | );
13 | expect(firstChars).to.deep.equal([65, 97]);
14 | });
15 |
16 | it("considers ignoreCase in range", () => {
17 | const ast = getRegExpAst(/[a-b]/i);
18 | const firstChars = firstCharOptimizedIndices(
19 | ast.value,
20 | {},
21 | ast.flags.ignoreCase,
22 | );
23 | expect(firstChars).to.deep.equal([65, 66, 97, 98]);
24 | });
25 |
26 | it("Handles Large CharCode ranges", () => {
27 | const ast = getRegExpAst(/[\u0100-\u04C4]/);
28 | const firstChars = firstCharOptimizedIndices(
29 | ast.value,
30 | {},
31 | ast.flags.ignoreCase,
32 | );
33 | expect(firstChars).to.deep.equal([256, 257, 258, 259]);
34 | });
35 |
36 | it("Handles Large CharCode ranges #2", () => {
37 | const ast = getRegExpAst(/[\u00ff-\u04C4]/);
38 | const firstChars = firstCharOptimizedIndices(
39 | ast.value,
40 | {},
41 | ast.flags.ignoreCase,
42 | );
43 | expect(firstChars).to.deep.equal([255, 256, 257, 258, 259]);
44 | });
45 | });
46 |
--------------------------------------------------------------------------------
/packages/chevrotain/test/scan/lexer_errors_public_spec.ts:
--------------------------------------------------------------------------------
1 | import { defaultLexerErrorProvider } from "../../src/scan/lexer_errors_public.js";
2 | import { IToken } from "@chevrotain/types";
3 | import { expect } from "chai";
4 |
5 | describe("The Chevrotain default lexer error message provider", () => {
6 | it("Will build unexpected character message", () => {
7 | const input = "1 LETTERS EXIT_LETTERS +";
8 | const msg = defaultLexerErrorProvider.buildUnexpectedCharactersMessage(
9 | input,
10 | 23,
11 | 1,
12 | 0,
13 | 23,
14 | "example_mode",
15 | );
16 |
17 | expect(msg).to.equal(
18 | "unexpected character: ->+<- at offset: 23, skipped 1 characters.",
19 | );
20 | });
21 |
22 | it("Will build an unable to pop lexer mode error message ", () => {
23 | const popToken: IToken = {
24 | image: "EXIT_NUMBERS",
25 | startOffset: 3,
26 | } as IToken; // the token type is not relevant for this test
27 |
28 | const msg =
29 | defaultLexerErrorProvider.buildUnableToPopLexerModeMessage(popToken);
30 |
31 | expect(msg).to.equal(
32 | "Unable to pop Lexer Mode after encountering Token ->EXIT_NUMBERS<- The Mode Stack is empty",
33 | );
34 | });
35 | });
36 |
--------------------------------------------------------------------------------
/packages/chevrotain/test/scan/perf_tracer_spec.ts:
--------------------------------------------------------------------------------
1 | import { Lexer } from "../../src/scan/lexer_public.js";
2 | import { expect } from "chai";
3 | import { SinonSpy, spy } from "sinon";
4 |
5 | describe("Chevrotain's Lexer Init Performance Tracing", () => {
6 | let consoleLogSpy: SinonSpy;
7 |
8 | beforeEach(() => {
9 | consoleLogSpy = spy(console, "log");
10 | });
11 |
12 | afterEach(() => {
13 | // @ts-ignore
14 | console.log.restore();
15 | });
16 |
17 | it("Will not trace with traceInitPerf = false", () => {
18 | new Lexer([], { traceInitPerf: false });
19 |
20 | expect(consoleLogSpy).to.have.not.been.called;
21 | });
22 |
23 | it("Will trace nested with traceInitPerf = true", () => {
24 | new Lexer([], { traceInitPerf: true });
25 |
26 | expect(consoleLogSpy).to.have.been.called;
27 | expect(consoleLogSpy.args[0][0]).to.include("--> ");
28 | expect(consoleLogSpy.args[1][0]).to.include(
29 | "\t--> ",
30 | );
31 | });
32 |
33 | it("Will trace one level with traceInitPerf = 1", () => {
34 | new Lexer([], { traceInitPerf: 1 });
35 |
36 | expect(consoleLogSpy).to.have.been.called;
37 | expect(consoleLogSpy.args[0][0]).to.include("--> ");
38 | expect(consoleLogSpy.args[1][0]).to.not.include("\t");
39 | });
40 |
41 | it("Will trace 2 levels with traceInitPerf = 2", () => {
42 | new Lexer([], { traceInitPerf: 2 });
43 |
44 | expect(consoleLogSpy).to.have.been.called;
45 | expect(consoleLogSpy.args[0][0]).to.include("--> ");
46 | expect(consoleLogSpy.args[1][0]).to.include("\t");
47 | });
48 | });
49 |
--------------------------------------------------------------------------------
/packages/chevrotain/test/scan/skip_validations_spec.ts:
--------------------------------------------------------------------------------
1 | import { Lexer } from "../../src/scan/lexer_public.js";
2 | import { find, flatten } from "lodash-es";
3 | import { expect } from "chai";
4 | import { SinonSpy, spy } from "sinon";
5 |
6 | describe("Chevrotain's Lexer Init Performance Tracing", () => {
7 | let consoleLogSpy: SinonSpy;
8 |
9 | beforeEach(() => {
10 | consoleLogSpy = spy(console, "log");
11 | });
12 |
13 | afterEach(() => {
14 | // @ts-ignore
15 | console.log.restore();
16 | });
17 |
18 | it("Will not skipValidation by default", () => {
19 | new Lexer([], { traceInitPerf: true });
20 |
21 | expect(consoleLogSpy).to.have.been.called;
22 | const consoleArgs = flatten(consoleLogSpy.args);
23 |
24 | const runtimeChecksArg = find(consoleArgs, (item: string) =>
25 | /performRuntimeChecks/.test(item),
26 | );
27 | expect(runtimeChecksArg).to.not.be.undefined;
28 | const warningRuntimeChecksAra = find(consoleArgs, (item: string) =>
29 | /performWarningRuntimeChecks/.test(item),
30 | );
31 | expect(warningRuntimeChecksAra).to.not.be.undefined;
32 | const validateArg = find(consoleArgs, (item: string) =>
33 | /validatePatterns/.test(item),
34 | );
35 | expect(validateArg).to.not.be.undefined;
36 | });
37 |
38 | it("Will avoid running lexer validations when `skipValidations` is enabled", () => {
39 | new Lexer([], { traceInitPerf: true, skipValidations: true });
40 |
41 | expect(consoleLogSpy).to.have.been.called;
42 | const consoleArgs = flatten(consoleLogSpy.args);
43 |
44 | const runtimeChecksArg = find(consoleArgs, (item: string) =>
45 | /performRuntimeChecks/.test(item),
46 | );
47 | expect(runtimeChecksArg).to.be.undefined;
48 | const warningRuntimeChecksAra = find(consoleArgs, (item: string) =>
49 | /performWarningRuntimeChecks/.test(item),
50 | );
51 | expect(warningRuntimeChecksAra).to.be.undefined;
52 | const validateArg = find(consoleArgs, (item: string) =>
53 | /validatePatterns/.test(item),
54 | );
55 | expect(validateArg).to.be.undefined;
56 | });
57 | });
58 |
--------------------------------------------------------------------------------
/packages/chevrotain/test/test.config.mjs:
--------------------------------------------------------------------------------
1 | /* eslint-disable -- config file */
2 | import * as chai from "chai"
3 | import sinonChai from "sinon-chai"
4 |
5 | chai.use(sinonChai)
6 | /* eslint-enable -- config file */
7 |
--------------------------------------------------------------------------------
/packages/chevrotain/test/utils/builders.ts:
--------------------------------------------------------------------------------
1 | import { ITokenConfig, TokenType } from "@chevrotain/types";
2 | import { createToken } from "../../src/scan/tokens_public.js";
3 |
4 | export function createDeferredTokenBuilder(
5 | config: ITokenConfig,
6 | ): () => TokenType {
7 | let tokenCache: TokenType;
8 | return function createTokenOnDemand(): TokenType {
9 | if (tokenCache === undefined) {
10 | tokenCache = createToken(config);
11 | }
12 | return tokenCache;
13 | };
14 | }
15 |
--------------------------------------------------------------------------------
/packages/chevrotain/test/utils/matchers.ts:
--------------------------------------------------------------------------------
1 | import { IToken, TokenType } from "@chevrotain/types";
2 | import { expect } from "chai";
3 |
4 | export function setEquality(actual: any[], expected: any[]): void {
5 | expect(actual).to.deep.include.members(expected);
6 | expect(expected).to.deep.include.members(actual);
7 | expect(expected).to.have.lengthOf(actual.length);
8 | }
9 |
10 | export function createRegularToken(
11 | tokType: TokenType,
12 | image = "",
13 | startOffset = 1,
14 | startLine?: number,
15 | startColumn?: number,
16 | endOffset?: number,
17 | endLine?: number,
18 | endColumn?: number,
19 | ): IToken {
20 | return {
21 | image: image,
22 | startOffset: startOffset,
23 | startLine: startLine,
24 | startColumn: startColumn,
25 | endOffset: endOffset,
26 | endLine: endLine,
27 | endColumn: endColumn,
28 | tokenTypeIdx: tokType.tokenTypeIdx!,
29 | tokenType: tokType,
30 | };
31 | }
32 |
--------------------------------------------------------------------------------
/packages/chevrotain/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.base.json",
3 | "compilerOptions": {
4 | "rootDir": ".",
5 | "outDir": "lib",
6 | "baseUrl": ".",
7 | // raises too many errors to fix at once.
8 | "strict": false,
9 | "strictNullChecks": true,
10 | "skipLibCheck": true,
11 | "noImplicitAny": true,
12 | "strictFunctionTypes": true
13 | },
14 | "include": ["src/**/*.ts", "test/**/*.ts", "api.d.ts"]
15 | }
16 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/.c8rc.json:
--------------------------------------------------------------------------------
1 | {
2 | "reporter": ["lcov", "text"],
3 | "all": true,
4 | "src": ["src"],
5 | "extension": [".js", ".ts"],
6 | "exclude": ["test/*.*"],
7 | "exclude-after-remap": true
8 | }
9 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/.mocharc.cjs:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | recursive: true,
3 | require: ["source-map-support/register"],
4 | reporter: "spec",
5 | spec: "./lib/test/**/*spec.js"
6 | }
7 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@chevrotain/cst-dts-gen-test",
3 | "version": "11.0.3",
4 | "private": true,
5 | "description": "tests for @chevrotain/cst-dts-gen",
6 | "license": "Apache-2.0",
7 | "type": "module",
8 | "repository": {
9 | "type": "git",
10 | "url": "git://github.com/Chevrotain/chevrotain.git"
11 | },
12 | "scripts": {
13 | "---------- CI FLOWS --------": "",
14 | "ci": "pnpm run build test",
15 | "build": "npm-run-all clean compile",
16 | "test": "npm-run-all coverage",
17 | "---------- DEV FLOWS --------": "",
18 | "update-snapshots": "node ./scripts/update-snapshots.js",
19 | "---------- BUILD STEPS --------": "",
20 | "clean": "shx rm -rf lib coverage",
21 | "compile:watch": "tsc -w",
22 | "compile": "tsc",
23 | "coverage": "c8 mocha --enable-source-maps"
24 | },
25 | "dependencies": {
26 | "@chevrotain/cst-dts-gen": "workspace:*",
27 | "@chevrotain/gast": "workspace:*",
28 | "@chevrotain/types": "workspace:*",
29 | "chevrotain": "workspace:*",
30 | "lodash-es": "4.17.21"
31 | },
32 | "devDependencies": {
33 | "@types/lodash-es": "4.17.12",
34 | "glob": "11.0.2"
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/scripts/update-snapshots.js:
--------------------------------------------------------------------------------
1 | import fs from "fs-extra";
2 | import path, { dirname } from "path";
3 | import glob from "glob";
4 | import { generateCstDts } from "@chevrotain/cst-dts-gen";
5 | import { getOutputFileForSnapshot } from "../lib/test/sample_test.js";
6 | import { fileURLToPath } from "url";
7 |
8 | const __dirname = dirname(fileURLToPath(import.meta.url));
9 |
10 | const inputFiles = glob.sync("../lib/test/snapshots/**/input.js", {
11 | cwd: __dirname,
12 | absolute: true,
13 | });
14 |
15 | for (const inputFile of inputFiles) {
16 | const module = await import(inputFile);
17 | const parser = module.parser;
18 | const result = generateCstDts(parser.getGAstProductions());
19 |
20 | const libSnapshotDir = path.dirname(inputFile);
21 | const expectedOutputPath = getOutputFileForSnapshot(libSnapshotDir);
22 |
23 | fs.writeFileSync(expectedOutputPath, result);
24 | }
25 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/options_spec.ts:
--------------------------------------------------------------------------------
1 | import { GenerateDtsOptions } from "@chevrotain/types";
2 | import { generateCstDts } from "@chevrotain/cst-dts-gen";
3 | import { createToken, CstParser } from "chevrotain";
4 | import { expect } from "chai";
5 |
6 | describe("The DTS generator", () => {
7 | it("can generate only cst types", () => {
8 | const result = genDts({
9 | includeVisitorInterface: false,
10 | });
11 |
12 | expect(result).to.not.include("export interface ICstNodeVisitor");
13 | expect(result).to.include("export interface TestRuleCstNode");
14 | expect(result).to.include("export type TestRuleCstChildren");
15 | });
16 |
17 | it("can generate a cst visitor with specific name", () => {
18 | const result = genDts({
19 | includeVisitorInterface: true,
20 | visitorInterfaceName: "ITestCstVisitor",
21 | });
22 |
23 | expect(result).to.include("export interface ITestCstVisitor");
24 | expect(result).to.not.include("export interface ICstNodeVisitor");
25 | });
26 |
27 | function genDts(options: GenerateDtsOptions) {
28 | const parser = new TestParser();
29 | const productions = parser.getGAstProductions();
30 | return generateCstDts(productions, options);
31 | }
32 | });
33 |
34 | const TestToken = createToken({
35 | name: "TestToken",
36 | pattern: /TESTTOKEN/,
37 | });
38 |
39 | class TestParser extends CstParser {
40 | constructor() {
41 | super([TestToken]);
42 |
43 | this.performSelfAnalysis();
44 | }
45 |
46 | testRule = this.RULE("testRule", () => {
47 | this.CONSUME(TestToken);
48 | });
49 | }
50 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/sample_test.ts:
--------------------------------------------------------------------------------
1 | import { BaseParser } from "chevrotain";
2 | import { expect } from "chai";
3 | import { readFileSync } from "fs";
4 | import { basename, dirname, relative, resolve } from "path";
5 | import { generateCstDts } from "@chevrotain/cst-dts-gen";
6 | import { fileURLToPath } from "url";
7 |
8 | export function executeSampleTest(dirPath: string, parser: BaseParser): void {
9 | it("Can generate type definition", () => {
10 | const productions = parser.getGAstProductions();
11 | const result = generateCstDts(productions);
12 | const expectedOutputPath = getOutputFileForSnapshot(dirPath);
13 | const expectedOutput = readFileSync(expectedOutputPath).toString("utf8");
14 | const simpleNewLinesOutput = expectedOutput.replace(/\r\n/g, "\n");
15 | expect(result).to.equal(simpleNewLinesOutput);
16 | });
17 | }
18 |
19 | export function testNameFromDir(dirPath: string): string {
20 | return basename(dirPath);
21 | }
22 |
23 | export function getOutputFileForSnapshot(libSnapshotDir: string): string {
24 | const srcSnapshotDir = getSourceFilePath(libSnapshotDir);
25 | return resolve(srcSnapshotDir, "output.d.ts");
26 | }
27 |
28 | // paths are for compiled typescript
29 | const __dirname = dirname(fileURLToPath(import.meta.url));
30 | const packageDir = resolve(__dirname, "../..");
31 | const libDir = resolve(packageDir, "lib");
32 |
33 | function getSourceFilePath(libFilePath: string): string {
34 | const relativeDirPath = relative(libDir, libFilePath);
35 | return resolve(packageDir, relativeDirPath);
36 | }
37 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/alternation/input.ts:
--------------------------------------------------------------------------------
1 | import { createToken, CstParser } from "chevrotain";
2 |
3 | const Token1 = createToken({
4 | name: "Token1",
5 | pattern: /TOKEN1/,
6 | });
7 |
8 | const Token2 = createToken({
9 | name: "Token2",
10 | pattern: /TOKEN2/,
11 | });
12 |
13 | class TestParser extends CstParser {
14 | constructor() {
15 | super([Token1, Token2]);
16 |
17 | this.performSelfAnalysis();
18 | }
19 |
20 | testRule = this.RULE("testRule", () => {
21 | this.OR([
22 | { ALT: () => this.CONSUME(Token1) },
23 | { ALT: () => this.CONSUME(Token2) },
24 | ]);
25 | });
26 | }
27 |
28 | export const parser = new TestParser();
29 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/alternation/output.d.ts:
--------------------------------------------------------------------------------
1 | import type { CstNode, ICstVisitor, IToken } from "chevrotain";
2 |
3 | export interface TestRuleCstNode extends CstNode {
4 | name: "testRule";
5 | children: TestRuleCstChildren;
6 | }
7 |
8 | export type TestRuleCstChildren = {
9 | Token1?: IToken[];
10 | Token2?: IToken[];
11 | };
12 |
13 | export interface ICstNodeVisitor extends ICstVisitor {
14 | testRule(children: TestRuleCstChildren, param?: IN): OUT;
15 | }
16 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/alternation/sample_spec.ts:
--------------------------------------------------------------------------------
1 | import { executeSampleTest, testNameFromDir } from "../../sample_test.js";
2 | import { parser } from "./input.js";
3 | import { dirname } from "path";
4 | import { fileURLToPath } from "url";
5 |
6 | const __dirname = dirname(fileURLToPath(import.meta.url));
7 |
8 | describe(`${testNameFromDir(__dirname)}`, () => {
9 | executeSampleTest(__dirname, parser);
10 | });
11 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/alternation_label/input.ts:
--------------------------------------------------------------------------------
1 | import { createToken, CstParser } from "chevrotain";
2 |
3 | const Token1 = createToken({
4 | name: "Token1",
5 | pattern: /TOKEN1/,
6 | });
7 |
8 | const Token2 = createToken({
9 | name: "Token2",
10 | pattern: /TOKEN2/,
11 | });
12 |
13 | class TestParser extends CstParser {
14 | constructor() {
15 | super([Token1, Token2]);
16 |
17 | this.performSelfAnalysis();
18 | }
19 |
20 | testRule = this.RULE("testRule", () => {
21 | this.OR([
22 | { ALT: () => this.SUBRULE(this.otherRule, { LABEL: "item" }) },
23 | { ALT: () => this.CONSUME(Token1, { LABEL: "item" }) },
24 | ]);
25 | });
26 |
27 | otherRule = this.RULE("otherRule1", () => {
28 | this.CONSUME(Token2);
29 | });
30 | }
31 |
32 | export const parser = new TestParser();
33 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/alternation_label/output.d.ts:
--------------------------------------------------------------------------------
1 | import type { CstNode, ICstVisitor, IToken } from "chevrotain";
2 |
3 | export interface TestRuleCstNode extends CstNode {
4 | name: "testRule";
5 | children: TestRuleCstChildren;
6 | }
7 |
8 | export type TestRuleCstChildren = {
9 | item?: (OtherRule1CstNode | IToken)[];
10 | };
11 |
12 | export interface OtherRule1CstNode extends CstNode {
13 | name: "otherRule1";
14 | children: OtherRule1CstChildren;
15 | }
16 |
17 | export type OtherRule1CstChildren = {
18 | Token2: IToken[];
19 | };
20 |
21 | export interface ICstNodeVisitor extends ICstVisitor {
22 | testRule(children: TestRuleCstChildren, param?: IN): OUT;
23 | otherRule1(children: OtherRule1CstChildren, param?: IN): OUT;
24 | }
25 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/alternation_label/sample_spec.ts:
--------------------------------------------------------------------------------
1 | import { executeSampleTest, testNameFromDir } from "../../sample_test.js";
2 | import { parser } from "./input.js";
3 | import { dirname } from "path";
4 | import { fileURLToPath } from "url";
5 |
6 | const __dirname = dirname(fileURLToPath(import.meta.url));
7 |
8 | describe(`${testNameFromDir(__dirname)}`, () => {
9 | executeSampleTest(__dirname, parser);
10 | });
11 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/nonterminal/input.ts:
--------------------------------------------------------------------------------
1 | import { createToken, CstParser } from "chevrotain";
2 |
3 | const Token1 = createToken({
4 | name: "Token1",
5 | pattern: /TOKEN1/,
6 | });
7 |
8 | const Token2 = createToken({
9 | name: "Token2",
10 | pattern: /TOKEN2/,
11 | });
12 |
13 | class TestParser extends CstParser {
14 | constructor() {
15 | super([Token1, Token2]);
16 |
17 | this.performSelfAnalysis();
18 | }
19 |
20 | testRule = this.RULE("testRule", () => {
21 | this.CONSUME(Token1);
22 | this.SUBRULE(this.otherRule);
23 | });
24 |
25 | otherRule = this.RULE("otherRule", () => {
26 | this.CONSUME(Token1);
27 | });
28 | }
29 |
30 | export const parser = new TestParser();
31 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/nonterminal/output.d.ts:
--------------------------------------------------------------------------------
1 | import type { CstNode, ICstVisitor, IToken } from "chevrotain";
2 |
3 | export interface TestRuleCstNode extends CstNode {
4 | name: "testRule";
5 | children: TestRuleCstChildren;
6 | }
7 |
8 | export type TestRuleCstChildren = {
9 | Token1: IToken[];
10 | otherRule: OtherRuleCstNode[];
11 | };
12 |
13 | export interface OtherRuleCstNode extends CstNode {
14 | name: "otherRule";
15 | children: OtherRuleCstChildren;
16 | }
17 |
18 | export type OtherRuleCstChildren = {
19 | Token1: IToken[];
20 | };
21 |
22 | export interface ICstNodeVisitor extends ICstVisitor {
23 | testRule(children: TestRuleCstChildren, param?: IN): OUT;
24 | otherRule(children: OtherRuleCstChildren, param?: IN): OUT;
25 | }
26 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/nonterminal/sample_spec.ts:
--------------------------------------------------------------------------------
1 | import { executeSampleTest, testNameFromDir } from "../../sample_test.js";
2 | import { parser } from "./input.js";
3 | import { dirname } from "path";
4 | import { fileURLToPath } from "url";
5 |
6 | const __dirname = dirname(fileURLToPath(import.meta.url));
7 |
8 | describe(`${testNameFromDir(__dirname)}`, () => {
9 | executeSampleTest(__dirname, parser);
10 | });
11 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/nonterminal_label/input.ts:
--------------------------------------------------------------------------------
1 | import { createToken, CstParser } from "chevrotain";
2 |
3 | const Token1 = createToken({
4 | name: "Token1",
5 | pattern: /TOKEN1/,
6 | });
7 |
8 | const Token2 = createToken({
9 | name: "Token2",
10 | pattern: /TOKEN2/,
11 | });
12 |
13 | class TestParser extends CstParser {
14 | constructor() {
15 | super([Token1, Token2]);
16 |
17 | this.performSelfAnalysis();
18 | }
19 |
20 | testRule = this.RULE("testRule", () => {
21 | this.CONSUME(Token1);
22 | this.SUBRULE(this.otherRule);
23 | this.SUBRULE1(this.otherRule, { LABEL: "labeled" });
24 | });
25 |
26 | otherRule = this.RULE("otherRule", () => {
27 | this.CONSUME(Token1);
28 | });
29 | }
30 |
31 | export const parser = new TestParser();
32 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/nonterminal_label/output.d.ts:
--------------------------------------------------------------------------------
1 | import type { CstNode, ICstVisitor, IToken } from "chevrotain";
2 |
3 | export interface TestRuleCstNode extends CstNode {
4 | name: "testRule";
5 | children: TestRuleCstChildren;
6 | }
7 |
8 | export type TestRuleCstChildren = {
9 | Token1: IToken[];
10 | otherRule: OtherRuleCstNode[];
11 | labeled: OtherRuleCstNode[];
12 | };
13 |
14 | export interface OtherRuleCstNode extends CstNode {
15 | name: "otherRule";
16 | children: OtherRuleCstChildren;
17 | }
18 |
19 | export type OtherRuleCstChildren = {
20 | Token1: IToken[];
21 | };
22 |
23 | export interface ICstNodeVisitor extends ICstVisitor {
24 | testRule(children: TestRuleCstChildren, param?: IN): OUT;
25 | otherRule(children: OtherRuleCstChildren, param?: IN): OUT;
26 | }
27 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/nonterminal_label/sample_spec.ts:
--------------------------------------------------------------------------------
1 | import { executeSampleTest, testNameFromDir } from "../../sample_test.js";
2 | import { parser } from "./input.js";
3 | import { dirname } from "path";
4 | import { fileURLToPath } from "url";
5 |
6 | const __dirname = dirname(fileURLToPath(import.meta.url));
7 |
8 | describe(`${testNameFromDir(__dirname)}`, () => {
9 | executeSampleTest(__dirname, parser);
10 | });
11 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/option/input.ts:
--------------------------------------------------------------------------------
1 | import { createToken, CstParser } from "chevrotain";
2 |
3 | const Token1 = createToken({
4 | name: "Token1",
5 | pattern: /TOKEN1/,
6 | });
7 |
8 | const Token2 = createToken({
9 | name: "Token2",
10 | pattern: /TOKEN2/,
11 | });
12 |
13 | class TestParser extends CstParser {
14 | constructor() {
15 | super([Token1, Token2]);
16 |
17 | this.performSelfAnalysis();
18 | }
19 |
20 | testRule = this.RULE("testRule", () => {
21 | this.CONSUME(Token1);
22 | this.OPTION(() => this.CONSUME(Token2));
23 | });
24 | }
25 |
26 | export const parser = new TestParser();
27 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/option/output.d.ts:
--------------------------------------------------------------------------------
1 | import type { CstNode, ICstVisitor, IToken } from "chevrotain";
2 |
3 | export interface TestRuleCstNode extends CstNode {
4 | name: "testRule";
5 | children: TestRuleCstChildren;
6 | }
7 |
8 | export type TestRuleCstChildren = {
9 | Token1: IToken[];
10 | Token2?: IToken[];
11 | };
12 |
13 | export interface ICstNodeVisitor extends ICstVisitor {
14 | testRule(children: TestRuleCstChildren, param?: IN): OUT;
15 | }
16 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/option/sample_spec.ts:
--------------------------------------------------------------------------------
1 | import { executeSampleTest, testNameFromDir } from "../../sample_test.js";
2 | import { parser } from "./input.js";
3 | import { dirname } from "path";
4 | import { fileURLToPath } from "url";
5 |
6 | const __dirname = dirname(fileURLToPath(import.meta.url));
7 |
8 | describe(`${testNameFromDir(__dirname)}`, () => {
9 | executeSampleTest(__dirname, parser);
10 | });
11 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/repetition/input.ts:
--------------------------------------------------------------------------------
1 | import { createToken, CstParser } from "chevrotain";
2 |
3 | const Token1 = createToken({
4 | name: "Token1",
5 | pattern: /TOKEN1/,
6 | });
7 |
8 | const Token2 = createToken({
9 | name: "Token2",
10 | pattern: /TOKEN2/,
11 | });
12 |
13 | class TestParser extends CstParser {
14 | constructor() {
15 | super([Token1, Token2]);
16 |
17 | this.performSelfAnalysis();
18 | }
19 |
20 | testRule = this.RULE("testRule", () => {
21 | this.MANY(() => {
22 | this.CONSUME(Token1);
23 | this.CONSUME(Token2);
24 | });
25 | });
26 | }
27 |
28 | export const parser = new TestParser();
29 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/repetition/output.d.ts:
--------------------------------------------------------------------------------
1 | import type { CstNode, ICstVisitor, IToken } from "chevrotain";
2 |
3 | export interface TestRuleCstNode extends CstNode {
4 | name: "testRule";
5 | children: TestRuleCstChildren;
6 | }
7 |
8 | export type TestRuleCstChildren = {
9 | Token1?: IToken[];
10 | Token2?: IToken[];
11 | };
12 |
13 | export interface ICstNodeVisitor extends ICstVisitor {
14 | testRule(children: TestRuleCstChildren, param?: IN): OUT;
15 | }
16 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/repetition/sample_spec.ts:
--------------------------------------------------------------------------------
1 | import { executeSampleTest, testNameFromDir } from "../../sample_test.js";
2 | import { parser } from "./input.js";
3 | import { dirname } from "path";
4 | import { fileURLToPath } from "url";
5 |
6 | const __dirname = dirname(fileURLToPath(import.meta.url));
7 |
8 | describe(`${testNameFromDir(__dirname)}`, () => {
9 | executeSampleTest(__dirname, parser);
10 | });
11 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/repetition_mandatory/input.ts:
--------------------------------------------------------------------------------
1 | import { createToken, CstParser } from "chevrotain";
2 |
3 | const Token1 = createToken({
4 | name: "Token1",
5 | pattern: /TOKEN1/,
6 | });
7 |
8 | const Token2 = createToken({
9 | name: "Token2",
10 | pattern: /TOKEN2/,
11 | });
12 |
13 | class TestParser extends CstParser {
14 | constructor() {
15 | super([Token1, Token2]);
16 |
17 | this.performSelfAnalysis();
18 | }
19 |
20 | testRule = this.RULE("testRule", () => {
21 | this.AT_LEAST_ONE(() => {
22 | this.CONSUME(Token1);
23 | this.CONSUME(Token2);
24 | });
25 | });
26 | }
27 |
28 | export const parser = new TestParser();
29 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/repetition_mandatory/output.d.ts:
--------------------------------------------------------------------------------
1 | import type { CstNode, ICstVisitor, IToken } from "chevrotain";
2 |
3 | export interface TestRuleCstNode extends CstNode {
4 | name: "testRule";
5 | children: TestRuleCstChildren;
6 | }
7 |
8 | export type TestRuleCstChildren = {
9 | Token1: IToken[];
10 | Token2: IToken[];
11 | };
12 |
13 | export interface ICstNodeVisitor extends ICstVisitor {
14 | testRule(children: TestRuleCstChildren, param?: IN): OUT;
15 | }
16 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/repetition_mandatory/sample_spec.ts:
--------------------------------------------------------------------------------
1 | import { executeSampleTest, testNameFromDir } from "../../sample_test.js";
2 | import { parser } from "./input.js";
3 | import { dirname } from "path";
4 | import { fileURLToPath } from "url";
5 |
6 | const __dirname = dirname(fileURLToPath(import.meta.url));
7 |
8 | describe(`${testNameFromDir(__dirname)}`, () => {
9 | executeSampleTest(__dirname, parser);
10 | });
11 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/repetition_mandatory_sep/input.ts:
--------------------------------------------------------------------------------
1 | import { createToken, CstParser } from "chevrotain";
2 |
3 | const Token1 = createToken({
4 | name: "Token1",
5 | pattern: /TOKEN1/,
6 | });
7 |
8 | const Token2 = createToken({
9 | name: "Token2",
10 | pattern: /TOKEN2/,
11 | });
12 |
13 | const Comma = createToken({
14 | name: "Comma",
15 | pattern: /,/,
16 | });
17 |
18 | class TestParser extends CstParser {
19 | constructor() {
20 | super([Token1, Token2]);
21 |
22 | this.performSelfAnalysis();
23 | }
24 |
25 | testRule = this.RULE("testRule", () => {
26 | this.AT_LEAST_ONE_SEP({
27 | SEP: Comma,
28 | DEF: () => {
29 | this.CONSUME(Token1);
30 | this.CONSUME(Token2);
31 | },
32 | });
33 | });
34 | }
35 |
36 | export const parser = new TestParser();
37 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/repetition_mandatory_sep/output.d.ts:
--------------------------------------------------------------------------------
1 | import type { CstNode, ICstVisitor, IToken } from "chevrotain";
2 |
3 | export interface TestRuleCstNode extends CstNode {
4 | name: "testRule";
5 | children: TestRuleCstChildren;
6 | }
7 |
8 | export type TestRuleCstChildren = {
9 | Token1: IToken[];
10 | Token2: IToken[];
11 | Comma?: IToken[];
12 | };
13 |
14 | export interface ICstNodeVisitor extends ICstVisitor {
15 | testRule(children: TestRuleCstChildren, param?: IN): OUT;
16 | }
17 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/repetition_mandatory_sep/sample_spec.ts:
--------------------------------------------------------------------------------
1 | import { executeSampleTest, testNameFromDir } from "../../sample_test.js";
2 | import { parser } from "./input.js";
3 | import { dirname } from "path";
4 | import { fileURLToPath } from "url";
5 |
6 | const __dirname = dirname(fileURLToPath(import.meta.url));
7 |
8 | describe(`${testNameFromDir(__dirname)}`, () => {
9 | executeSampleTest(__dirname, parser);
10 | });
11 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/repetition_sep/input.ts:
--------------------------------------------------------------------------------
1 | import { createToken, CstParser } from "chevrotain";
2 |
3 | const Token1 = createToken({
4 | name: "Token1",
5 | pattern: /TOKEN1/,
6 | });
7 |
8 | const Token2 = createToken({
9 | name: "Token2",
10 | pattern: /TOKEN2/,
11 | });
12 |
13 | const Comma = createToken({
14 | name: "Comma",
15 | pattern: /,/,
16 | });
17 |
18 | class TestParser extends CstParser {
19 | constructor() {
20 | super([Token1, Token2]);
21 |
22 | this.performSelfAnalysis();
23 | }
24 |
25 | testRule = this.RULE("testRule", () => {
26 | this.MANY_SEP({
27 | SEP: Comma,
28 | DEF: () => {
29 | this.CONSUME(Token1);
30 | this.CONSUME(Token2);
31 | },
32 | });
33 | });
34 | }
35 |
36 | export const parser = new TestParser();
37 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/repetition_sep/output.d.ts:
--------------------------------------------------------------------------------
1 | import type { CstNode, ICstVisitor, IToken } from "chevrotain";
2 |
3 | export interface TestRuleCstNode extends CstNode {
4 | name: "testRule";
5 | children: TestRuleCstChildren;
6 | }
7 |
8 | export type TestRuleCstChildren = {
9 | Token1?: IToken[];
10 | Token2?: IToken[];
11 | Comma?: IToken[];
12 | };
13 |
14 | export interface ICstNodeVisitor extends ICstVisitor {
15 | testRule(children: TestRuleCstChildren, param?: IN): OUT;
16 | }
17 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/repetition_sep/sample_spec.ts:
--------------------------------------------------------------------------------
1 | import { executeSampleTest, testNameFromDir } from "../../sample_test.js";
2 | import { parser } from "./input.js";
3 | import { dirname } from "path";
4 | import { fileURLToPath } from "url";
5 |
6 | const __dirname = dirname(fileURLToPath(import.meta.url));
7 |
8 | describe(`${testNameFromDir(__dirname)}`, () => {
9 | executeSampleTest(__dirname, parser);
10 | });
11 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/terminal/input.ts:
--------------------------------------------------------------------------------
1 | import { createToken, CstParser } from "chevrotain";
2 |
3 | const Token1 = createToken({
4 | name: "Token1",
5 | pattern: /TOKEN1/,
6 | });
7 |
8 | const Token2 = createToken({
9 | name: "Token2",
10 | pattern: /TOKEN2/,
11 | });
12 |
13 | class TestParser extends CstParser {
14 | constructor() {
15 | super([Token1, Token2]);
16 |
17 | this.performSelfAnalysis();
18 | }
19 |
20 | testRule = this.RULE("testRule", () => {
21 | this.CONSUME(Token1);
22 | this.CONSUME(Token2);
23 | });
24 | }
25 |
26 | export const parser = new TestParser();
27 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/terminal/output.d.ts:
--------------------------------------------------------------------------------
1 | import type { CstNode, ICstVisitor, IToken } from "chevrotain";
2 |
3 | export interface TestRuleCstNode extends CstNode {
4 | name: "testRule";
5 | children: TestRuleCstChildren;
6 | }
7 |
8 | export type TestRuleCstChildren = {
9 | Token1: IToken[];
10 | Token2: IToken[];
11 | };
12 |
13 | export interface ICstNodeVisitor extends ICstVisitor {
14 | testRule(children: TestRuleCstChildren, param?: IN): OUT;
15 | }
16 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/terminal/sample_spec.ts:
--------------------------------------------------------------------------------
1 | import { executeSampleTest, testNameFromDir } from "../../sample_test.js";
2 | import { parser } from "./input.js";
3 | import { dirname } from "path";
4 | import { fileURLToPath } from "url";
5 |
6 | const __dirname = dirname(fileURLToPath(import.meta.url));
7 |
8 | describe(`${testNameFromDir(__dirname)}`, () => {
9 | executeSampleTest(__dirname, parser);
10 | });
11 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/terminal_label/input.ts:
--------------------------------------------------------------------------------
1 | import { createToken, CstParser } from "chevrotain";
2 |
3 | const Token1 = createToken({
4 | name: "Token1",
5 | pattern: /TOKEN1/,
6 | });
7 |
8 | const Token2 = createToken({
9 | name: "Token2",
10 | pattern: /TOKEN2/,
11 | });
12 |
13 | class TestParser extends CstParser {
14 | constructor() {
15 | super([Token1, Token2]);
16 |
17 | this.performSelfAnalysis();
18 | }
19 |
20 | testRule = this.RULE("testRule", () => {
21 | this.CONSUME(Token1);
22 | this.CONSUME1(Token1, { LABEL: "labeled" });
23 | this.CONSUME(Token2);
24 | });
25 | }
26 |
27 | export const parser = new TestParser();
28 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/terminal_label/output.d.ts:
--------------------------------------------------------------------------------
1 | import type { CstNode, ICstVisitor, IToken } from "chevrotain";
2 |
3 | export interface TestRuleCstNode extends CstNode {
4 | name: "testRule";
5 | children: TestRuleCstChildren;
6 | }
7 |
8 | export type TestRuleCstChildren = {
9 | Token1: IToken[];
10 | labeled: IToken[];
11 | Token2: IToken[];
12 | };
13 |
14 | export interface ICstNodeVisitor extends ICstVisitor {
15 | testRule(children: TestRuleCstChildren, param?: IN): OUT;
16 | }
17 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/test/snapshots/terminal_label/sample_spec.ts:
--------------------------------------------------------------------------------
1 | import { executeSampleTest, testNameFromDir } from "../../sample_test.js";
2 | import { parser } from "./input.js";
3 | import { dirname } from "path";
4 | import { fileURLToPath } from "url";
5 |
6 | const __dirname = dirname(fileURLToPath(import.meta.url));
7 |
8 | describe(`${testNameFromDir(__dirname)}`, () => {
9 | executeSampleTest(__dirname, parser);
10 | });
11 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen-test/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.base.json",
3 | "compilerOptions": {
4 | "rootDir": ".",
5 | "outDir": "lib",
6 | "baseUrl": "."
7 | },
8 | "include": ["./test/**/*"]
9 | }
10 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@chevrotain/cst-dts-gen",
3 | "version": "11.0.3",
4 | "description": "Generates type definitions for Chevrotain CST nodes",
5 | "keywords": [],
6 | "bugs": {
7 | "url": "https://github.com/Chevrotain/chevrotain/issues"
8 | },
9 | "license": "Apache-2.0",
10 | "type": "module",
11 | "types": "./lib/src/api.d.ts",
12 | "exports": {
13 | ".": {
14 | "import": "./lib/src/api.js",
15 | "types": "./lib/src/api.d.ts"
16 | }
17 | },
18 | "files": [
19 | "lib/src/**/*.js",
20 | "lib/src/**/*.map",
21 | "lib/src/**/*.d.ts",
22 | "src/**/*.ts"
23 | ],
24 | "repository": {
25 | "type": "git",
26 | "url": "git://github.com/Chevrotain/chevrotain.git"
27 | },
28 | "scripts": {
29 | "---------- CI FLOWS --------": "",
30 | "ci": "pnpm run build test",
31 | "build": "npm-run-all clean compile",
32 | "test": "echo \"tests are in a different package\"",
33 | "---------- BUILD STEPS --------": "",
34 | "clean": "shx rm -rf lib",
35 | "compile:watch": "tsc -w",
36 | "compile": "tsc"
37 | },
38 | "dependencies": {
39 | "@chevrotain/gast": "workspace:*",
40 | "@chevrotain/types": "workspace:*",
41 | "lodash-es": "4.17.21"
42 | },
43 | "devDependencies": {
44 | "@types/lodash-es": "4.17.12"
45 | },
46 | "publishConfig": {
47 | "access": "public"
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen/src/api.ts:
--------------------------------------------------------------------------------
1 | import { GenerateDtsOptions, Rule } from "@chevrotain/types";
2 | import { buildModel } from "./model.js";
3 | import { genDts } from "./generate.js";
4 |
5 | const defaultOptions: Required = {
6 | includeVisitorInterface: true,
7 | visitorInterfaceName: "ICstNodeVisitor",
8 | };
9 |
10 | export function generateCstDts(
11 | productions: Record,
12 | options?: GenerateDtsOptions,
13 | ): string {
14 | const effectiveOptions = {
15 | ...defaultOptions,
16 | ...options,
17 | };
18 |
19 | const model = buildModel(productions);
20 |
21 | return genDts(model, effectiveOptions);
22 | }
23 |
--------------------------------------------------------------------------------
/packages/cst-dts-gen/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.base.json",
3 | "compilerOptions": {
4 | "rootDir": ".",
5 | "outDir": "lib",
6 | "baseUrl": "."
7 | },
8 | "include": ["./src/**/*", "api.d.ts"]
9 | }
10 |
--------------------------------------------------------------------------------
/packages/gast/.c8rc.json:
--------------------------------------------------------------------------------
1 | {
2 | "reporter": ["lcov", "text"],
3 | "all": true,
4 | "src": ["src"],
5 | "extension": [".js", ".ts"],
6 | "exclude": ["test/*.*"],
7 | "exclude-after-remap": true
8 | }
9 |
--------------------------------------------------------------------------------
/packages/gast/.mocharc.cjs:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | recursive: true,
3 | require: ["source-map-support/register"],
4 | reporter: "spec",
5 | spec: "./lib/test/**/*spec.js"
6 | }
7 |
--------------------------------------------------------------------------------
/packages/gast/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@chevrotain/gast",
3 | "version": "11.0.3",
4 | "description": "Grammar AST structure for Chevrotain Parsers",
5 | "keywords": [],
6 | "bugs": {
7 | "url": "https://github.com/Chevrotain/chevrotain/issues"
8 | },
9 | "license": "Apache-2.0",
10 | "type": "module",
11 | "types": "./lib/src/api.d.ts",
12 | "exports": {
13 | ".": {
14 | "import": "./lib/src/api.js",
15 | "types": "./lib/src/api.d.ts"
16 | }
17 | },
18 | "files": [
19 | "lib/src/**/*.js",
20 | "lib/src/**/*.map",
21 | "lib/src/**/*.d.ts",
22 | "src/**/*.ts"
23 | ],
24 | "repository": {
25 | "type": "git",
26 | "url": "git://github.com/Chevrotain/chevrotain.git"
27 | },
28 | "scripts": {
29 | "---------- CI FLOWS --------": "",
30 | "ci": "pnpm run build test",
31 | "build": "npm-run-all clean compile",
32 | "test": "npm-run-all coverage",
33 | "---------- DEV FLOWS --------": "",
34 | "update-snapshots": "node ./scripts/update-snapshots.js",
35 | "---------- BUILD STEPS --------": "",
36 | "clean": "shx rm -rf lib coverage",
37 | "compile:watch": "tsc -w",
38 | "compile": "tsc",
39 | "coverage": "c8 mocha --enable-source-maps"
40 | },
41 | "dependencies": {
42 | "@chevrotain/types": "workspace:*",
43 | "lodash-es": "4.17.21"
44 | },
45 | "devDependencies": {
46 | "@types/lodash-es": "4.17.12"
47 | },
48 | "publishConfig": {
49 | "access": "public"
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/packages/gast/src/api.ts:
--------------------------------------------------------------------------------
1 | export {
2 | Rule,
3 | Terminal,
4 | NonTerminal,
5 | Option,
6 | Repetition,
7 | RepetitionMandatory,
8 | RepetitionMandatoryWithSeparator,
9 | RepetitionWithSeparator,
10 | Alternation,
11 | Alternative,
12 | serializeGrammar,
13 | serializeProduction,
14 | } from "./model.js";
15 |
16 | export { GAstVisitor } from "./visitor.js";
17 |
18 | export {
19 | getProductionDslName,
20 | isOptionalProd,
21 | isBranchingProd,
22 | isSequenceProd,
23 | } from "./helpers.js";
24 |
--------------------------------------------------------------------------------
/packages/gast/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.base.json",
3 | "compilerOptions": {
4 | "rootDir": ".",
5 | "outDir": "lib",
6 | "baseUrl": "."
7 | },
8 | "include": ["./src/**/*", "./test/**/*", "api.d.ts"]
9 | }
10 |
--------------------------------------------------------------------------------
/packages/regexp-to-ast/.c8rc.json:
--------------------------------------------------------------------------------
1 | {
2 | "reporter": ["lcov", "text"],
3 | "all": true,
4 | "src": ["src"],
5 | "extension": [".js", ".ts"],
6 | "exclude": ["test/*.*"],
7 | "exclude-after-remap": true
8 | }
9 |
--------------------------------------------------------------------------------
/packages/regexp-to-ast/.mocharc.cjs:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | recursive: true,
3 | require: ["source-map-support/register"],
4 | reporter: "spec",
5 | spec: "./lib/test/**/*spec.js"
6 | }
7 |
--------------------------------------------------------------------------------
/packages/regexp-to-ast/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@chevrotain/regexp-to-ast",
3 | "version": "11.0.3",
4 | "description": "Parses a Regular Expression and outputs an AST",
5 | "keywords": [
6 | "regExp",
7 | "parser",
8 | "regular expression"
9 | ],
10 | "bugs": {
11 | "url": "https://github.com/Chevrotain/chevrotain/issues"
12 | },
13 | "license": "Apache-2.0",
14 | "type": "module",
15 | "types": "./types.d.ts",
16 | "exports": {
17 | ".": {
18 | "import": "./lib/src/api.js",
19 | "types": "./types.d.ts"
20 | }
21 | },
22 | "files": [
23 | "lib/src/**/*.js",
24 | "lib/src/**/*.map",
25 | "src/**/*.ts",
26 | "types.d.ts"
27 | ],
28 | "repository": {
29 | "type": "git",
30 | "url": "git://github.com/Chevrotain/chevrotain.git"
31 | },
32 | "scripts": {
33 | "---------- CI FLOWS --------": "",
34 | "ci": "pnpm run build test",
35 | "build": "npm-run-all clean compile",
36 | "test": "npm-run-all coverage",
37 | "---------- BUILD STEPS --------": "",
38 | "clean": "shx rm -rf lib coverage",
39 | "compile:watch": "tsc -w",
40 | "compile": "tsc",
41 | "coverage": "c8 mocha"
42 | },
43 | "publishConfig": {
44 | "access": "public"
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/packages/regexp-to-ast/src/api.ts:
--------------------------------------------------------------------------------
1 | export { RegExpParser } from "./regexp-parser.js";
2 | export { BaseRegExpVisitor } from "./base-regexp-visitor.js";
3 |
--------------------------------------------------------------------------------
/packages/regexp-to-ast/src/character-classes.ts:
--------------------------------------------------------------------------------
1 | import { cc } from "./utils.js";
2 |
3 | export const digitsCharCodes: number[] = [];
4 | for (let i = cc("0"); i <= cc("9"); i++) {
5 | digitsCharCodes.push(i);
6 | }
7 |
8 | export const wordCharCodes: number[] = [cc("_")].concat(digitsCharCodes);
9 | for (let i = cc("a"); i <= cc("z"); i++) {
10 | wordCharCodes.push(i);
11 | }
12 |
13 | for (let i = cc("A"); i <= cc("Z"); i++) {
14 | wordCharCodes.push(i);
15 | }
16 |
17 | // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/RegExp#character-classes
18 | export const whitespaceCodes: number[] = [
19 | cc(" "),
20 | cc("\f"),
21 | cc("\n"),
22 | cc("\r"),
23 | cc("\t"),
24 | cc("\v"),
25 | cc("\t"),
26 | cc("\u00a0"),
27 | cc("\u1680"),
28 | cc("\u2000"),
29 | cc("\u2001"),
30 | cc("\u2002"),
31 | cc("\u2003"),
32 | cc("\u2004"),
33 | cc("\u2005"),
34 | cc("\u2006"),
35 | cc("\u2007"),
36 | cc("\u2008"),
37 | cc("\u2009"),
38 | cc("\u200a"),
39 | cc("\u2028"),
40 | cc("\u2029"),
41 | cc("\u202f"),
42 | cc("\u205f"),
43 | cc("\u3000"),
44 | cc("\ufeff"),
45 | ];
46 |
--------------------------------------------------------------------------------
/packages/regexp-to-ast/src/utils.ts:
--------------------------------------------------------------------------------
1 | import type { Character, IRegExpAST, RegExpFlags } from "../types";
2 |
3 | export function cc(char: string): number {
4 | return char.charCodeAt(0);
5 | }
6 |
7 | export function insertToSet(item: T | T[], set: T[]) {
8 | if (Array.isArray(item)) {
9 | item.forEach(function (subItem) {
10 | set.push(subItem);
11 | });
12 | } else {
13 | set.push(item);
14 | }
15 | }
16 |
17 | export function addFlag(
18 | flagObj: RegExpFlags,
19 | flagKey: keyof Omit,
20 | ) {
21 | if (flagObj[flagKey] === true) {
22 | throw "duplicate flag " + flagKey;
23 | }
24 |
25 | const x: boolean = flagObj[flagKey];
26 | flagObj[flagKey] = true;
27 | }
28 |
29 | export function ASSERT_EXISTS(obj: any): obj is T {
30 | // istanbul ignore next
31 | if (obj === undefined) {
32 | throw Error("Internal Error - Should never get here!");
33 | }
34 | return true;
35 | }
36 |
37 | // istanbul ignore next
38 | export function ASSERT_NEVER_REACH_HERE(): any {
39 | throw Error("Internal Error - Should never get here!");
40 | }
41 |
42 | export function isCharacter(obj: { type: string }): obj is Character {
43 | return obj["type"] === "Character";
44 | }
45 |
--------------------------------------------------------------------------------
/packages/regexp-to-ast/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.base.json",
3 | "compilerOptions": {
4 | "rootDir": ".",
5 | "outDir": "lib"
6 | },
7 | "include": ["./src/**/*", "./test/**/*", "api.d.ts"]
8 | }
9 |
--------------------------------------------------------------------------------
/packages/types/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@chevrotain/types",
3 | "version": "11.0.3",
4 | "description": "Type Signatures and API Website for Chevrotain",
5 | "keywords": [],
6 | "bugs": {
7 | "url": "https://github.com/Chevrotain/chevrotain/issues"
8 | },
9 | "license": "Apache-2.0",
10 | "author": {
11 | "name": "Shahar Soel"
12 | },
13 | "files": [
14 | "api.d.ts",
15 | "README.md",
16 | "LICENSE.TXT"
17 | ],
18 | "type": "module",
19 | "types": "./api.d.ts",
20 | "exports": {
21 | ".": {
22 | "types": "./api.d.ts"
23 | }
24 | },
25 | "repository": {
26 | "type": "git",
27 | "url": "git://github.com/Chevrotain/chevrotain.git"
28 | },
29 | "homepage": "https://chevrotain.io/documentation/",
30 | "scripts": {
31 | "---------- CI FLOWS --------": "",
32 | "ci": "pnpm run build",
33 | "build": "npm-run-all clean compile api-site:build",
34 | "postversion": "npm-run-all api-site:build api-site:upload",
35 | "---------- BUILD STEPS --------": "",
36 | "clean": "shx rm -rf dev lib",
37 | "compile:watch": "tsc -w",
38 | "compile": "tsc",
39 | "api-site:build": "typedoc api.d.ts --out dev/docs --excludeExternals --excludePrivate",
40 | "api-site:upload": "./scripts/api-site-upload.sh"
41 | },
42 | "devDependencies": {
43 | "typedoc": "0.26.10",
44 | "typescript": "5.8.3"
45 | },
46 | "publishConfig": {
47 | "access": "public"
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/packages/types/scripts/api-site-upload.sh:
--------------------------------------------------------------------------------
1 | echo "uploading new api docs website"
2 | set -e
3 | rm -rf gh-pages
4 | mkdir gh-pages
5 | cd gh-pages
6 | git clone https://github.com/chevrotain/chevrotain.git .
7 |
8 | echo "checkout gh-pages"
9 | git checkout gh-pages
10 |
11 | node ../scripts/update-api-docs.js
12 |
13 | git add -A
14 | git commit -m "update api website"
15 | git push
16 |
17 | # cleanup
18 | cd ..
19 | rm -rf gh-pages
20 |
21 |
--------------------------------------------------------------------------------
/packages/types/scripts/update-api-docs.js:
--------------------------------------------------------------------------------
1 | import fs from "fs-extra";
2 | import { dirname, join } from "path";
3 | import { fileURLToPath } from "url";
4 |
5 | const __dirname = dirname(fileURLToPath(import.meta.url));
6 |
7 | const pkgPath = join(__dirname, "../package.json");
8 | const pkg = fs.readJsonSync(pkgPath);
9 |
10 | console.log("updating api docs re-direct");
11 |
12 | const version = pkg.version;
13 | const noDotsVersion = version.replace(/\./g, "_");
14 | const newVersionApiDocsDir = join(
15 | __dirname,
16 | "../gh-pages/documentation/" + noDotsVersion,
17 | );
18 |
19 | try {
20 | const stats = fs.lstatSync(newVersionApiDocsDir);
21 |
22 | if (stats.isDirectory()) {
23 | console.error("docs directory for " + noDotsVersion + " already exists");
24 | process.exit(-1);
25 | }
26 | } catch (e) {
27 | // no issues it does not exist
28 | }
29 |
30 | // Update redirect to latest docs
31 | const docsIndexHtmlPath = join(
32 | __dirname,
33 | "../gh-pages/documentation/index.html",
34 | );
35 | const docsIndexHtmlString = fs
36 | .readFileSync(docsIndexHtmlPath, "utf8")
37 | .toString();
38 | const bumpedDocsIndexHtmlString = docsIndexHtmlString.replace(
39 | /\d+_\d+_\d+/,
40 | noDotsVersion,
41 | );
42 | fs.writeFileSync(docsIndexHtmlPath, bumpedDocsIndexHtmlString);
43 |
44 | const orgDocsLocation = join(__dirname, "../dev/docs");
45 | fs.copySync(orgDocsLocation, newVersionApiDocsDir);
46 |
--------------------------------------------------------------------------------
/packages/types/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.base.json",
3 | "compilerOptions": {
4 | "rootDir": ".",
5 | "outDir": "lib",
6 | "baseUrl": "."
7 | },
8 | "include": ["api.d.ts"]
9 | }
10 |
--------------------------------------------------------------------------------
/packages/types/typedoc.json:
--------------------------------------------------------------------------------
1 | {
2 | "visibilityFilters": {
3 | "protected": true,
4 | "private": false,
5 | "inherited": true,
6 | "external": false
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/packages/utils/.c8rc.json:
--------------------------------------------------------------------------------
1 | {
2 | "reporter": ["lcov", "text"],
3 | "all": true,
4 | "src": ["src"],
5 | "extension": [".js", ".ts"],
6 | "exclude": ["src/print.ts", "src/to-fast-properties.ts", "test/*.*"],
7 | "exclude-after-remap": true
8 | }
9 |
--------------------------------------------------------------------------------
/packages/utils/.mocharc.cjs:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | recursive: true,
3 | require: ["source-map-support/register"],
4 | reporter: "spec",
5 | spec: "./lib/test/**/*spec.js"
6 | }
7 |
--------------------------------------------------------------------------------
/packages/utils/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@chevrotain/utils",
3 | "version": "11.0.3",
4 | "description": "common utilities",
5 | "keywords": [],
6 | "bugs": {
7 | "url": "https://github.com/Chevrotain/chevrotain/issues"
8 | },
9 | "license": "Apache-2.0",
10 | "author": {
11 | "name": "Shahar Soel"
12 | },
13 | "type": "module",
14 | "types": "./lib/src/api.d.ts",
15 | "exports": {
16 | ".": {
17 | "import": "./lib/src/api.js",
18 | "types": "./lib/src/api.d.ts"
19 | }
20 | },
21 | "files": [
22 | "lib/src/**/*.js",
23 | "lib/src/**/*.map",
24 | "lib/src/**/*.d.ts",
25 | "src/**/*.ts"
26 | ],
27 | "repository": {
28 | "type": "git",
29 | "url": "git://github.com/Chevrotain/chevrotain.git"
30 | },
31 | "scripts": {
32 | "---------- CI FLOWS --------": "",
33 | "ci": "npm-run-all build test",
34 | "build": "npm-run-all clean compile",
35 | "test": "npm-run-all coverage",
36 | "---------- BUILD STEPS --------": "",
37 | "clean": "shx rm -rf lib coverage",
38 | "compile:watch": "tsc -w",
39 | "compile": "tsc",
40 | "coverage": "c8 mocha --enable-source-maps"
41 | },
42 | "publishConfig": {
43 | "access": "public"
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/packages/utils/src/api.ts:
--------------------------------------------------------------------------------
1 | export { PRINT_WARNING, PRINT_ERROR } from "./print.js";
2 | export { timer } from "./timer.js";
3 | export { toFastProperties } from "./to-fast-properties.js";
4 |
--------------------------------------------------------------------------------
/packages/utils/src/print.ts:
--------------------------------------------------------------------------------
1 | export function PRINT_ERROR(msg: string) {
2 | /* istanbul ignore else - can't override global.console in node.js */
3 | if (console && console.error) {
4 | console.error(`Error: ${msg}`);
5 | }
6 | }
7 |
8 | export function PRINT_WARNING(msg: string) {
9 | /* istanbul ignore else - can't override global.console in node.js*/
10 | if (console && console.warn) {
11 | // TODO: modify docs accordingly
12 | console.warn(`Warning: ${msg}`);
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/packages/utils/src/timer.ts:
--------------------------------------------------------------------------------
1 | export function timer(func: () => T): { time: number; value: T } {
2 | const start = new Date().getTime();
3 | const val = func();
4 | const end = new Date().getTime();
5 | const total = end - start;
6 | return { time: total, value: val };
7 | }
8 |
--------------------------------------------------------------------------------
/packages/utils/src/to-fast-properties.ts:
--------------------------------------------------------------------------------
1 | // based on: https://github.com/petkaantonov/bluebird/blob/b97c0d2d487e8c5076e8bd897e0dcd4622d31846/src/util.js#L201-L216
2 | export function toFastProperties(toBecomeFast: any) {
3 | function FakeConstructor() {}
4 |
5 | // If our object is used as a constructor, it would receive
6 | FakeConstructor.prototype = toBecomeFast;
7 | const fakeInstance = new (FakeConstructor as any)();
8 |
9 | function fakeAccess() {
10 | return typeof fakeInstance.bar;
11 | }
12 |
13 | // help V8 understand this is a "real" prototype by actually using
14 | // the fake instance.
15 | fakeAccess();
16 | fakeAccess();
17 |
18 | // Always true condition to suppress the Firefox warning of unreachable
19 | // code after a return statement.
20 | if (1) return toBecomeFast;
21 |
22 | // Eval prevents optimization of this method (even though this is dead code)
23 | // - https://esbuild.github.io/content-types/#direct-eval
24 | /* istanbul ignore next */
25 | // tslint:disable-next-line
26 | (0, eval)(toBecomeFast);
27 | }
28 |
--------------------------------------------------------------------------------
/packages/utils/test/timer_spec.ts:
--------------------------------------------------------------------------------
1 | import { timer } from "../src/api.js";
2 | import { expect } from "chai";
3 |
4 | describe("The timer helper", () => {
5 | it("will return the total execution time of a sync function", () => {
6 | const { time } = timer(() => {
7 | const sab = new SharedArrayBuffer(1024);
8 | const int32 = new Int32Array(sab);
9 | Atomics.wait(int32, 0, 0, 100);
10 | });
11 | expect(time).to.be.greaterThanOrEqual(100);
12 | });
13 |
14 | it("will return the value of the callback function", () => {
15 | const { value } = timer(() => {
16 | return 2 * 2;
17 | });
18 | expect(value).to.eql(4);
19 | });
20 | });
21 |
--------------------------------------------------------------------------------
/packages/utils/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../../tsconfig.base.json",
3 | "compilerOptions": {
4 | "rootDir": ".",
5 | "outDir": "lib",
6 | "baseUrl": "."
7 | },
8 | "include": ["./src/**/*", "./test/**/*", "api.d.ts"]
9 | }
10 |
--------------------------------------------------------------------------------
/packages/website/.gitignore:
--------------------------------------------------------------------------------
1 | /docs/.vuepress/.cache
2 | /docs/.vuepress/.temp
--------------------------------------------------------------------------------
/packages/website/docs/README.md:
--------------------------------------------------------------------------------
1 | ---
2 | home: true
3 | actionText: Get Started →
4 | actionLink: /tutorial/step0_introduction
5 | features:
6 | - title: Blazing Fast
7 | details: Several times faster than other JavaScript parsing libraries. Can even compete with the performance of hand-crafted parsers.
8 | - title: Ease of Development
9 | details: Chevrotain is an internal JavaScript DSL, no code generation or new tools needed to develop, run & debug. Just use your favorite JavaScript IDE.
10 | - title: Maintainability
11 | details: Separation of syntax and semantics enables cleaner code and grammar reuse.
12 | - title: Fault Tolerant
13 | details: Implements error recovery heuristics to parse even partially invalid inputs.
14 | - title: Syntax Diagrams
15 | details: Easily generate syntax diagrams for documentation purposes.
16 |
17 | footer: Apache V2.0 Licensed | Copyright © 2018-present SAP SE or an SAP affiliate company
18 | ---
19 |
--------------------------------------------------------------------------------
/packages/website/docs/features/backtracking.md:
--------------------------------------------------------------------------------
1 | # Backtracking
2 |
3 | Chevrotain supports backtracking to resolve ambiguities.
4 | Backtracking means **fully** trying an alternative instead of using a fixed
5 | token lookahead, this is similar to a DFS versus a BFS.
6 |
7 | Backtracking is not automatic and must be **explicitly** invoked.
8 | This is because it is inefficient and is mutually exclusive with error recovery.
9 | It is strongly recommended to avoid using backtracking if possible.
10 |
11 | Backtracking is implemented by using [Gates](https://chevrotain.io/docs/features/gates.html)
12 |
13 | For example, given the following grammar which is not LL(K), as
14 | both the alternatives in "statement" have a potentially infinitely long common prefix.
15 |
16 | ```antlr
17 | statement:
18 | longRule1 |
19 | longRule2 |
20 |
21 | longRule1:
22 | A+ B
23 |
24 | longRule2:
25 | A+ C
26 | ```
27 |
28 | We can resolve the ambiguity by using backtracking, effectively fully trying out
29 | the alternatives (in order) instead of trying to choose one using a limited token lookahead.
30 |
31 | ```javascript
32 | $.RULE("statement", () => {
33 | $.OR([
34 | {
35 | GATE: $.BACKTRACK($.longRule1),
36 | ALT: () => $.SUBRULE($.longRule1),
37 | },
38 | {
39 | GATE: $.BACKTRACK($.longRule2),
40 | ALT: () => $.SUBRULE($.longRule2),
41 | },
42 | ]);
43 | });
44 | ```
45 |
46 | See [executable example](https://github.com/chevrotain/chevrotain/tree/master/examples/parser/backtracking)
47 | for further details.
48 |
--------------------------------------------------------------------------------
/packages/website/docs/features/blazing_fast.md:
--------------------------------------------------------------------------------
1 | # Blazing Fast
2 |
3 | Chevrotain treats performance as a feature, not as a secondary concern.
4 | This means that a great deal of work has gone into profiling and optimization with a focus on the [V8 JavaScript engine](https://github.com/v8/v8/wiki).
5 | It also means that new features are also considered in terms of performance cost.
6 |
7 | The result of this work is that Chevrotain is **Blazing Fast** it is **several times faster** than most other
8 | JavaScript parsing libraries and can even be one **order of magnitude faster** than some popular ones.
9 |
10 | See results of the [performance benchmark](https://chevrotain.io/performance/) on Chrome 67 (V8 6.7.288.46):
11 | 
12 |
13 | Or [run the benchmark](https://chevrotain.io/performance/) yourself.
14 |
--------------------------------------------------------------------------------
/packages/website/docs/features/custom_errors.md:
--------------------------------------------------------------------------------
1 | # Customizable Error Messages
2 |
3 | Chevrotain allows users to customize both the parser and lexer error messages.
4 | This can be accomplished by implementing the following interfaces:
5 |
6 | - [IParserErrorMessageProvider](https://chevrotain.io/documentation/11_0_3/interfaces/IParserErrorMessageProvider.html)
7 | - [ILexerErrorMessageProvider](https://chevrotain.io/documentation/11_0_3/interfaces/ILexerErrorMessageProvider.html)
8 |
9 | See executable examples:
10 |
11 | - [Custom Parser Errors](https://github.com/chevrotain/chevrotain/blob/master/examples/parser/custom_errors/custom_errors.js).
12 | - [Custom Lexer Errors](https://github.com/chevrotain/chevrotain/blob/master/examples/lexer/custom_errors/custom_errors.js).
13 |
14 | In addition it is also possible to directly provide strings values to be used in errors
15 | for specific parsing DSL methods, for example:
16 |
17 | ```javascript
18 | $.RULE("myStatement", () => {
19 | // ...
20 | $.CONSUME(SemiColon, {
21 | ERR_MSG: "expecting semiColon at end of myStatement",
22 | });
23 | });
24 | ```
25 |
26 | The **ERR_MSG** config property is available for the following DSL methods:
27 |
28 | - [CONSUME](https://chevrotain.io/documentation/11_0_3/classes/CstParser.html#CONSUME)
29 | - [OR](https://chevrotain.io/documentation/11_0_3/classes/CstParser.html#OR)
30 | - [AT_LEAST_ONE](https://chevrotain.io/documentation/11_0_3/classes/CstParser.html#AT_LEAST_ONE)
31 | - [AT_LEAST_ONE_SEP](https://chevrotain.io/documentation/11_0_3/classes/CstParser.html#AT_LEAST_ONE_SEP)
32 |
--------------------------------------------------------------------------------
/packages/website/docs/features/custom_token_patterns.md:
--------------------------------------------------------------------------------
1 | # Custom Token Patterns
2 |
3 | Chevrotain is not limited to only using JavaScript regular expressions to define Tokens.
4 | Tokens can also be defined using arbitrary JavaScript code, for example:
5 |
6 | ```javascript
7 | // our custom matcher
8 | function matchInteger(text, startOffset) {
9 | let endOffset = startOffset;
10 | let charCode = text.charCodeAt(endOffset);
11 | // 0-9 digits
12 | while (charCode >= 48 && charCode <= 57) {
13 | endOffset++;
14 | charCode = text.charCodeAt(endOffset);
15 | }
16 |
17 | // No match, must return null to conform with the RegExp.prototype.exec signature
18 | if (endOffset === startOffset) {
19 | return null;
20 | } else {
21 | let matchedString = text.substring(startOffset, endOffset);
22 | // according to the RegExp.prototype.exec API the first item in the returned array must be the whole matched string.
23 | return [matchedString];
24 | }
25 | }
26 |
27 | const IntegerToken = createToken({
28 | name: "IntegerToken",
29 | pattern: matchInteger,
30 | });
31 | ```
32 |
33 | This feature is often used to implement complex lexing logic, such as [python indentation](https://github.com/chevrotain/chevrotain/tree/master/examples/lexer/python_indentation).
34 |
35 | See [in depth guide](../guide/custom_token_patterns.md) for further details.
36 |
--------------------------------------------------------------------------------
/packages/website/docs/features/easy_debugging.md:
--------------------------------------------------------------------------------
1 | # Easy Debugging
2 |
3 | Chevrotain is an **internal** JavaScript DSL. This means that Chevrotain grammars
4 | are just plain JavaScript source code without any additional levels of abstraction
5 | as in parser generators (EBNF vs generated code).
6 |
7 | In practical terms this means that debugging a Chevrotain parser is the same as debugging any
8 | other JavaScript code, just setup breakpoints or debugger statements using your favorite IDE.
9 |
10 | For example:
11 |
12 | ```javascript
13 | $.RULE("statement", () => {
14 | debugger;
15 | $.RULE("objectItem", () => {
16 | $.CONSUME(StringLiteral)
17 | debugger;
18 | $.CONSUME(Colon);
19 | $.SUBRULE($.value);
20 | });
21 | ```
22 |
--------------------------------------------------------------------------------
/packages/website/docs/features/fault_tolerance.md:
--------------------------------------------------------------------------------
1 | # Fault Tolerance
2 |
3 | Chevrotain provides automatic error recovery capabilities, this means
4 | that Chevrotain parsers are fault-tolerant which is an important capability
5 | when creating editor and language services tooling.
6 |
7 | In practical terms this means that Chevrotain will be able to report multiple
8 | syntax errors instead of stopping on the first one and also provide
9 | a partial output structure for an invalid input.
10 |
11 | For more details on Fault Tolerance and error recovery see
12 | the [in-depth](https://chevrotain.io/docs/tutorial/step4_fault_tolerance.html) guide.
13 |
--------------------------------------------------------------------------------
/packages/website/docs/features/gates.md:
--------------------------------------------------------------------------------
1 | # Gates
2 |
3 | Chevrotain supports Gates on parsing DSL method.
4 | Gates act as a type of **guard condition** that prevents an alternative
5 | from being taken. Gates are often used in combination with parametrized rules
6 | to represent multiple variants of the same parsing rule while avoiding code duplication.
7 |
8 | For example:
9 |
10 | ```javascript
11 | // isConst is a parameter passed from another rule.
12 | $.RULE("Value", (isConst) => {
13 | $.OR([
14 | // the Variable alternative is only possible when "isConst" is Falsey
15 | { GATE: () => !isConst, ALT: () => $.SUBRULE($.Variable) },
16 | { ALT: () => $.CONSUME(IntValue) },
17 | { ALT: () => $.CONSUME(FloatValue) },
18 | { ALT: () => $.CONSUME(StringValue) },
19 | ]);
20 | });
21 | ```
22 |
23 | Using the [Look Ahead](https://chevrotain.io/documentation/11_0_3/classes/CstParser.html#LA) method is often helpful with the use of Gates to determine if a path should be followed or not, for example:
24 |
25 | ```javascript
26 | // SELECT LIMIT.ID FROM USER_LIMIT LIMIT
27 | // SELECT ID, NAME FROM USER_LIMIT LIMIT 1
28 | $.RULE("FromClause", () => {
29 | $.CONSUME(From);
30 | $.CONSUME(Identifier);
31 |
32 | $.OPTION({
33 | GATE: () => $.LA(2).tokenType !== UnsignedInteger,
34 | DEF: () => $.CONSUME1(Identifier, { LABEL: "alias" }),
35 | });
36 | });
37 | ```
38 |
39 | If **LIMIT** is an identifier or a keyword based on the surrounding tokens, looking ahead at subsequent tokens is required to know if the token should be consumed as an identifer or should be skipped to be parsed up by a subsequent rule.
40 |
41 | See [executable example](https://github.com/chevrotain/chevrotain/tree/master/examples/parser/predicate_lookahead)
42 | for further details.
43 |
--------------------------------------------------------------------------------
/packages/website/docs/features/grammar_inheritance.md:
--------------------------------------------------------------------------------
1 | # Grammar Inheritance
2 |
3 | Chevrotain supports Grammar Inheritance, This is useful to represent multiple variants of the same grammar
4 | for example a grammar for ECMAScript 6 **extends** an ECMAScript 5.1 grammar.
5 |
6 | Chevrotain Grammars are JavaScript classes, so Grammar inheritance is simply JavaScript inheritance
7 | with the replacement of the [**RULE**](https://chevrotain.io/documentation/11_0_3/classes/CstParser.html#RULE)
8 | DSL method with [**OVERRIDE_RULE**](https://chevrotain.io/documentation/11_0_3/classes/CstParser.html#OVERRIDE_RULE) method when needed.
9 |
10 | See [executable example](https://github.com/chevrotain/chevrotain/tree/master/examples/parser/inheritance)
11 | for further details.
12 |
--------------------------------------------------------------------------------
/packages/website/docs/features/images/benchmark_chrome67.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Chevrotain/chevrotain/5b878b87dc1388463caca94f7b6e21265b9d9bfb/packages/website/docs/features/images/benchmark_chrome67.png
--------------------------------------------------------------------------------
/packages/website/docs/features/lexer_modes.md:
--------------------------------------------------------------------------------
1 | # Lexer Modes
2 |
3 | Chevrotain supports different lexing rules depending on lexer context.
4 | In essence this means having multiple separate lexers that can be switched between.
5 |
6 | This capability is necessary to tokenize some languages such as HTML.
7 |
8 | See [executable example](https://github.com/chevrotain/chevrotain/tree/master/examples/lexer/multi_mode_lexer)
9 | for further details.
10 |
--------------------------------------------------------------------------------
/packages/website/docs/features/llk.md:
--------------------------------------------------------------------------------
1 | # LL(K) Grammars
2 |
3 | Chevrotain can be used to build parsers for [LL(K)](https://en.wikipedia.org/wiki/LL_grammar) Grammars.
4 | This means that the number of lookahead tokens needed to disambiguate two alternatives must
5 | be a fixed number and known in advance.
6 |
7 | For example given the grammar
8 |
9 | ```antlr
10 | statement:
11 | A B C |
12 | A B D |
13 | A B E
14 | ```
15 |
16 | Chevrotain will look **three** tokens ahead to decide between the two alternatives.
17 |
18 | But given the following grammar
19 |
20 | ```antlr
21 | statement:
22 | longRule B |
23 | longRule C |
24 | longRule D
25 |
26 | longRule:
27 | A+
28 | ```
29 |
30 | Chevrotain will throw a an error during the parser initialization in this case.
31 | This is because there is no fixed number of tokens we can use to choose between the alternatives
32 | that is due to a potentially **infinite** number of "A" tokens that can appear before the "B" - "C" tokens.
33 |
--------------------------------------------------------------------------------
/packages/website/docs/features/multiple_start_rules.md:
--------------------------------------------------------------------------------
1 | # Multiple Start Rules
2 |
3 | Chevrotain supports using **any** of the grammar rules as a starting rule.
4 | This means that any subset of a language can be parsed without being wrapped in
5 | other constructs, For example this can be used for:
6 |
7 | - Implementing "debugger watch expressions" and "evaluate expression" in an IDE.
8 | - Parsing only modified text in an IDE for performance.
9 | - Easy unit testing for small language snippets.
10 |
11 | See [executable example](https://github.com/chevrotain/chevrotain/tree/master/examples/parser/multi_start_rules)
12 | for further details.
13 |
--------------------------------------------------------------------------------
/packages/website/docs/features/parameterized_rules.md:
--------------------------------------------------------------------------------
1 | # Parameterized Rules
2 |
3 | Chevrotain supports passing parameters to rules.
4 | This means that grammar rules may accept arguments from the calling rule.
5 | This is often used in combination with [gates](./gates.md) to
6 | represent multiple variants of the same parsing rule while avoiding code duplication.
7 |
8 | For example:
9 |
10 | ```javascript
11 | $.RULE("ArgumentInConst", () => {
12 | $.CONSUME(Name);
13 | $.CONSUME(Colon);
14 | // passing the argument using the "ARGS" property
15 | $.SUBRULE($.Value, { ARGS: [true] });
16 | });
17 |
18 | // isConst is a parameter passed from another rule.
19 | $.RULE("Value", (isConst) => {
20 | $.OR([
21 | // the Variable alternative is only possible when "isConst" is Falsey
22 | { GATE: () => !isConst, ALT: () => $.SUBRULE($.Variable) },
23 | { ALT: () => $.CONSUME(IntValue) },
24 | { ALT: () => $.CONSUME(FloatValue) },
25 | { ALT: () => $.CONSUME(StringValue) },
26 | ]);
27 | });
28 | ```
29 |
30 | See [executable example](https://github.com/chevrotain/chevrotain/tree/master/examples/parser/parametrized_rules)
31 | for further details.
32 |
--------------------------------------------------------------------------------
/packages/website/docs/features/position_tracking.md:
--------------------------------------------------------------------------------
1 | # Position Tracking
2 |
3 | // TODO: we should document the location tracking for both Lexer and Parser in this section...
4 |
5 | Chevrotain lexers will track the full token position information by default.
6 | This means:
7 | token_skipping
8 |
9 | - start and end offsets.
10 | - start and end lines.
11 | - start and end columns.
12 |
13 | The level of position information tracking can be reduced by using the [**positionTracking**](https://chevrotain.io/documentation/11_0_3/interfaces/ILexerConfig.html#positionTracking) lexer config option.
14 | For example:
15 |
16 | ```javascript
17 | import { Lexer } from "chevrotain";
18 | const allTokens = [];
19 | // createTokens...
20 | const myLexer = new Lexer(allTokens, { positionTracking: "onlyOffset" });
21 | ```
22 |
--------------------------------------------------------------------------------
/packages/website/docs/features/regexp.md:
--------------------------------------------------------------------------------
1 | # RegExp Based Lexers
2 |
3 | Chevrotain Lexers are defined using **standard** ECMAScript regular expressions.
4 | This means there is no need to learn a new syntax and/or semantics.
5 |
6 | In addition existing JavaScript regExp libraries can be easily used,
7 | for example by using the awesome [xRegExp library](https://github.com/slevithan/XRegExp) one can simplify the creation of complex patterns and avoid code duplication.
8 |
9 | ```javascript
10 | $.RULE("statement", () => {
11 | const fragments = {};
12 |
13 | // A utility to create re-usable fragments using xRegExp
14 | function FRAGMENT(name, def) {
15 | fragments[name] = XRegExp.build(def, fragments);
16 | }
17 |
18 | // a utility to create a pattern using previously defined fragments
19 | function MAKE_PATTERN(def, flags) {
20 | return XRegExp.build(def, fragments, flags);
21 | }
22 |
23 | // define fragments
24 | FRAGMENT("IntegerPart", "-?(0|[1-9][0-9]*)");
25 | FRAGMENT("FractionalPart", "\\.[0-9]+");
26 | FRAGMENT("ExponentPart", "[eE][+-]?[0-9]+");
27 |
28 | const IntValue = createToken({
29 | name: "IntValue",
30 | // Simple use case, not really needed in this case except for avoiding duplication.
31 | pattern: MAKE_PATTERN("{{IntegerPart}}"),
32 | });
33 |
34 | const FloatValue = createToken({
35 | name: "FloatValue",
36 | pattern: MAKE_PATTERN(
37 | // This regExp would be very hard to read without "named fragments"
38 | "{{IntegerPart}}{{FractionalPart}}({{ExponentPart}})?|{{IntegerPart}}{{ExponentPart}}",
39 | ),
40 | });
41 | });
42 | ```
43 |
44 | See [full executable example](https://github.com/chevrotain/chevrotain/blob/master/examples/grammars/graphql/graphql.js)
45 | as part of the graphQL example grammar.
46 |
--------------------------------------------------------------------------------
/packages/website/docs/features/separation.md:
--------------------------------------------------------------------------------
1 | # Separation of Grammar and Semantics
2 |
3 | Chevrotain is not limited like many other parsing libraries to only embedding actions inside the grammar,
4 | It can also **automatically** create a [**C**oncrete **S**yntax **T**ree](https://chevrotain.io/docs/guide/concrete_syntax_tree.html)
5 | Which can later be traversed using the [visitor pattern](https://en.wikipedia.org/wiki/Visitor_pattern).
6 |
7 | This implements the design principle of [Separation of Concerns](https://en.wikipedia.org/wiki/Separation_of_concerns)
8 | which enables **re-use** of the same **pure** grammar for multiple purposes.
9 |
10 | See example of two identical mathematical expression grammars:
11 |
12 | - Firstly using [embedded actions](https://github.com/chevrotain/chevrotain/blob/master/examples/grammars/calculator/calculator_embedded_actions.js) for semantics.
13 | - Secondly using [Separated semantics](https://github.com/chevrotain/chevrotain/blob/master/examples/grammars/calculator/calculator_pure_grammar.js) with a CST Visitor.
14 |
--------------------------------------------------------------------------------
/packages/website/docs/features/syntactic_content_assist.md:
--------------------------------------------------------------------------------
1 | # Syntactic Content Assist
2 |
3 | Chevrotain can suggest the next possible Token after a partial input.
4 | This feature can be used as a building block for full **semantic** content assist.
5 |
6 | See the [full guide](../guide/syntactic_content_assist.md) for further details.
7 |
--------------------------------------------------------------------------------
/packages/website/docs/features/syntax_diagrams.md:
--------------------------------------------------------------------------------
1 | # Syntax Diagrams
2 |
3 | Chevrotain exposes APIs to generate html syntax diagrams.
4 | These **visual** diagrams are a very useful development tool for grammars
5 | and can also be used as a kind of automatic self documentation.
6 |
7 | For a quick preview see:
8 |
9 | - [JSON Syntax diagrams](https://chevrotain.io/diagrams_samples/json.html).
10 | - [CSS Syntax diagrams](https://chevrotain.io/diagrams_samples/css.html).
11 |
12 | And see the [full guide](../guide/generating_syntax_diagrams.md) for further details.
13 |
--------------------------------------------------------------------------------
/packages/website/docs/features/token_alternative_matches.md:
--------------------------------------------------------------------------------
1 | # Token Alternative Matches
2 |
3 | Chevrotain supports attempting a secondary **longer** match after a token has (already) been matched.
4 | This capability is most often used to disambiguate the keywords vs identifiers ambiguity.
5 |
6 | For example:
7 |
8 | ```javascript
9 | import { createToken } from "chevrotain";
10 |
11 | const Identifier = createToken({
12 | name: "Identifier",
13 | pattern: /[a-zA-Z][\w+]/,
14 | });
15 |
16 | const ClassKeyword = createToken({
17 | name: "ClassKeyword",
18 | pattern: /class/,
19 | longer_alt: Identifier,
20 | });
21 | ```
22 |
23 | Note that the `longer_alt` capability **cannot be chained**, only a single longer_alt will be checked for a specific Token. A token may define multiple longer alternatives using an array. As per usual with the lexer, the first matching token in the array will be chosen for lexing.
24 |
25 | See [executable example](https://github.com/chevrotain/chevrotain/tree/master/examples/lexer/keywords_vs_identifiers)
26 | for further details.
27 |
--------------------------------------------------------------------------------
/packages/website/docs/features/token_categories.md:
--------------------------------------------------------------------------------
1 | # Token Categories
2 |
3 | When parsing it is sometimes useful to match a terminal against a **set of Token Types**.
4 | This can be accomplished by using Token Categories.
5 |
6 | For example:
7 |
8 | ```javascript
9 | // "KeywordOrIdentifier" is our Token category used to match any keyword or Identifier
10 | const KeywordOrIdentifier = createToken({
11 | name: "AnyWord",
12 | pattern: Lexer.NA,
13 | });
14 |
15 | // General Identifier
16 | export const Identifier = createToken({
17 | name: "Identifier",
18 | pattern: /[a-zA-Z]\w*/,
19 | categories: [KeywordOrIdentifier],
20 | });
21 |
22 | // a Keyword
23 | export const Class = createToken({
24 | name: "Class",
25 | pattern: /Class/,
26 | longer_alt: Identifier,
27 | categories: [KeywordOrIdentifier],
28 | });
29 | ```
30 |
31 | ```javascript
32 | $.RULE("SomeRule", () => {
33 | // This would match either an Identifier or a keyword thus allowing for
34 | // "None Reserved keywords"
35 | $.CONSUME(KeywordOrIdentifier);
36 | });
37 | ```
38 |
39 | Note that:
40 |
41 | - A Token category is simply another Token Type.
42 | - A Token Type may have **multiple** Token Categories.
43 |
--------------------------------------------------------------------------------
/packages/website/docs/features/token_grouping.md:
--------------------------------------------------------------------------------
1 | # Token Grouping
2 |
3 | Chevrotain lexers support grouping Tokens Types **separately** from the main token vector in the lexing result.
4 | This is often useful to **collect** a specific set of Token Types for later processing, for example to collect comments tokens.
5 |
6 | To group a Token Type simply specify the [**group**](https://chevrotain.io/documentation/11_0_3/interfaces/ITokenConfig.html#group) property in its configuration.
7 | For example:
8 |
9 | ```javascript
10 | const Comment = createToken({
11 | name: "Comment",
12 | pattern: /\/\/.+/,
13 | group: "comments",
14 | });
15 | ```
16 |
17 | See [executable example](https://github.com/chevrotain/chevrotain/tree/master/examples/lexer/token_groups)
18 | for further details.
19 |
--------------------------------------------------------------------------------
/packages/website/docs/features/token_skipping.md:
--------------------------------------------------------------------------------
1 | # Token Skipping
2 |
3 | Chevrotain support ignoring specific Token Types.
4 | This means that these Token Types would be lexed but would not appear in the Token Vector the lexer produces.
5 | This capability is often used to ignore certain types of Tokens most commonly whitespace.
6 |
7 | To skip a Token define its group as the special **Lexer.SKIPPED**
8 | For example:
9 |
10 | ```javascript
11 | import { createToken, Lexer } from "chevrotain";
12 | const WhiteSpace = createToken({
13 | name: "WhiteSpace",
14 | pattern: /\s+/,
15 | group: Lexer.SKIPPED,
16 | });
17 | ```
18 |
--------------------------------------------------------------------------------
/packages/website/docs/guide/generating_syntax_diagrams.md:
--------------------------------------------------------------------------------
1 | # Syntax Diagrams
2 |
3 | It is often useful to visually inspect a grammar's syntax diagrams during development
4 | or for documentation purposes.
5 |
6 | This document contains instructions on how to generate Syntax railroad diagrams for a Chevrotain
7 | grammar using the [railroad-diagrams](https://github.com/tabatkins/railroad-diagrams)
8 | library by @tabatkins.
9 |
10 | ## Examples
11 |
12 | - [JSON Syntax diagrams](https://chevrotain.io/diagrams_samples/json.html).
13 | - [CSS Syntax diagrams](https://chevrotain.io/diagrams_samples/css.html).
14 |
15 | ## Features
16 |
17 | - Highlight usages and definitions on mouse hover.
18 | - Scroll to definition of non-terminal on mouse click.
19 |
20 | ## Instructions
21 |
22 | Chevrotain provides the [**createSyntaxDiagramsCode**](https://chevrotain.io/documentation/11_0_3/modules.html#createsyntaxdiagramscode) API to generate the **html source code**
23 | of syntax diagrams. This html source code can then be used by an end user in either node.js or a browser:
24 |
25 | 1. By writing it directly to the disk in a pure node.js runtime scenario.
26 | 2. By inserting it dynamically into an iframe in a browser scenario.
27 |
28 | **Examples:**
29 |
30 | - [Generating syntax diagrams to a file](https://github.com/chevrotain/chevrotain/blob/master/examples/parser/diagrams/creating_html_file.js)
31 |
32 | - Self contained, no need for Chevrotain or the grammar when rendering the html.
33 |
34 | - [Generating syntax diagrams dynamically into an iframe](https://github.com/chevrotain/chevrotain/blob/master/examples/parser/diagrams/diagrams_browser.html)
35 | - Requires loading **both** Chevrotain and the grammar (and dependencies!) when rendering the html.
36 |
37 | ## Customization
38 |
39 | The [logic for generating the HTML](https://github.com/Chevrotain/chevrotain/blob/master/packages/chevrotain/src/diagrams/render_public.ts)
40 | is quite trivial and the generated code itself is also very simple with a decent separation of concerns.
41 | These can be used as a basis for creating more advanced custom scenarios, for example:
42 |
43 | - Adding a module loader such as system.js/require.js
44 | - Dynamically rendering diagrams of a Grammar in an IDE.
45 | - Rendering diagrams of a pure EBNF grammar (Not a Chevrotain grammar) as the diagrams are rendered
46 | using a serialized format.
47 |
--------------------------------------------------------------------------------
/packages/website/docs/guide/introduction.md:
--------------------------------------------------------------------------------
1 | # Introduction
2 |
3 | These guides describe capabilities of Chevrotain in depth.
4 | For a full understanding it is recommended to also
5 | run and debug the provided code samples linked in each guide.
6 |
--------------------------------------------------------------------------------
/packages/website/docs/tutorial/step0_introduction.md:
--------------------------------------------------------------------------------
1 | # Introduction
2 |
3 | ## Installation
4 |
5 | ```shell
6 | yarn add chevrotain
7 | # OR
8 | npm install chevrotain
9 | ```
10 |
11 | ## Scenario
12 |
13 | In this tutorial we will implement a parser for a simplified SQL syntax which will contain only SELECT statements.
14 | The output of the parser will be an Abstract Syntax Tree (AST).
15 |
16 | ## Running & Debugging
17 |
18 | The code snippets in this tutorial steps are part of an
19 | executable and debuggable [example](https://github.com/chevrotain/chevrotain/tree/master/examples/tutorial).
20 | It is recommended to use these sources either as an initial template
21 | or as an executable example in order to gain a deeper understanding.
22 |
23 | ## Samples Syntax
24 |
25 | This tutorial uses ES2015+ syntax.
26 | See examples of how to use Chevrotain with other [implementation languages](https://github.com/chevrotain/chevrotain/tree/master/examples/implementation_languages).
27 |
--------------------------------------------------------------------------------
/packages/website/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@chevrotain/website",
3 | "version": "11.0.3",
4 | "type": "module",
5 | "private": true,
6 | "description": "source Code for https://chevrotain.io/docs/",
7 | "license": "Apache-2.0",
8 | "author": {
9 | "name": "Shahar Soel"
10 | },
11 | "homepage": "https://chevrotain.io/docs/",
12 | "scripts": {
13 | "ci": "pnpm run build",
14 | "build": "npm-run-all website:build",
15 | "version": "node ./scripts/version-update.js",
16 | "postversion": "npm-run-all website:build website:upload",
17 | "website:dev": "vuepress dev ./docs",
18 | "website:build": "vuepress build ./docs",
19 | "website:upload": "./scripts/website-upload.sh"
20 | },
21 | "devDependencies": {
22 | "@mdit-vue/shared": "2.1.3",
23 | "@vuepress/client": "2.0.0-beta.64",
24 | "@vuepress/plugin-docsearch": "2.0.0-beta.64",
25 | "@vuepress/theme-default": "2.0.0-beta.64",
26 | "gitty": "3.7.2",
27 | "jsonfile": "6.1.0",
28 | "lodash": "4.17.21",
29 | "vuepress": "2.0.0-beta.64"
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/packages/website/scripts/version-config.js:
--------------------------------------------------------------------------------
1 | import fs from "fs";
2 | import jf from "jsonfile";
3 | import path, { dirname } from "path";
4 | import _ from "lodash";
5 | import { fileURLToPath } from "url";
6 |
7 | const __dirname = dirname(fileURLToPath(import.meta.url));
8 |
9 | const packagePath = path.join(__dirname, "../package.json");
10 | export const changeLogPath = path.join(
11 | __dirname,
12 | "../docs/changes/CHANGELOG.md",
13 | );
14 |
15 | const docsDirPath = path.join(__dirname, "../docs");
16 | const docFiles = fs.readdirSync(docsDirPath);
17 |
18 | const allDocFilesPaths = _.map(docFiles, function (file) {
19 | return path.join(docsDirPath, file);
20 | });
21 |
22 | function notChangesDocs(path) {
23 | return !_.includes(path, "changes/");
24 | }
25 |
26 | export const markdownDocsFiles = _.reduce(
27 | allDocFilesPaths,
28 | (result, currPath) => {
29 | // Only scan 2 directories deep.
30 | if (fs.lstatSync(currPath).isDirectory()) {
31 | const nestedFiles = fs.readdirSync(currPath);
32 | const nestedPaths = _.map(nestedFiles, (currFile) =>
33 | path.join(currPath, currFile),
34 | );
35 | const newMarkdowns = _.filter(
36 | nestedPaths,
37 | (currPath) => _.endsWith(currPath, ".md") && notChangesDocs(currPath),
38 | );
39 |
40 | result = result.concat(newMarkdowns);
41 | } else if (
42 | fs.lstatSync(currPath).isFile() &&
43 | _.endsWith(currPath, ".md") &&
44 | notChangesDocs(currPath)
45 | ) {
46 | result.push(currPath);
47 | }
48 |
49 | return result;
50 | },
51 | [],
52 | );
53 |
54 | const pkgJson = jf.readFileSync(packagePath);
55 | export const currVersion = pkgJson.version;
56 |
57 | export const changeLogString = fs
58 | .readFileSync(changeLogPath, "utf8")
59 | .toString();
60 |
--------------------------------------------------------------------------------
/packages/website/scripts/version-update.js:
--------------------------------------------------------------------------------
1 | import fs from "fs";
2 | import git from "gitty";
3 | import _ from "lodash";
4 | import {
5 | changeLogPath,
6 | changeLogString,
7 | currVersion,
8 | markdownDocsFiles,
9 | } from "./version-config.js";
10 |
11 | const myRepo = git("");
12 |
13 | const newVersion = currVersion;
14 | const dateTemplateRegExp = /(## X\.Y\.Z )\(INSERT_DATE_HERE\)/;
15 | if (!dateTemplateRegExp.test(changeLogString)) {
16 | console.log("CHANGELOG.md must contain '## X.Y.Z (INSERT_DATE_HERE)'");
17 | process.exit(-1);
18 | }
19 |
20 | // updating CHANGELOG.md date
21 | const nowDate = new Date();
22 | const nowDateString = nowDate.toLocaleDateString("en-US").replace(/\//g, "-");
23 | const changeLogDate = changeLogString.replace(
24 | dateTemplateRegExp,
25 | "## " + newVersion + " " + "(" + nowDateString + ")",
26 | );
27 | fs.writeFileSync(changeLogPath, changeLogDate);
28 |
29 | _.forEach(markdownDocsFiles, function (currDocPath) {
30 | if (_.includes(currDocPath, "changes")) {
31 | console.log("SKIPPING bumping file: <" + currDocPath + ">");
32 | return;
33 | }
34 | console.log("bumping file: <" + currDocPath + ">");
35 | const currItemContents = fs.readFileSync(currDocPath, "utf8").toString();
36 | const bumpedItemContents = currItemContents.replace(
37 | /\d+_\d+_\d+/g,
38 | newVersion.replace(/\./g, "_"),
39 | );
40 | fs.writeFileSync(currDocPath, bumpedItemContents);
41 | });
42 |
43 | // Just adding to the current commit is sufficient as lerna does the commit + tag + push
44 | myRepo.addSync([changeLogPath].concat(markdownDocsFiles));
45 |
--------------------------------------------------------------------------------
/packages/website/scripts/website-upload.sh:
--------------------------------------------------------------------------------
1 | set -e
2 | rm -rf gh-pages
3 | mkdir gh-pages
4 | cd gh-pages
5 |
6 | git clone https://github.com/chevrotain/chevrotain.git .
7 | git checkout gh-pages
8 |
9 | # update contents
10 | rm -rf docs
11 | cp -r ../docs/.vuepress/dist/ docs
12 |
13 | git add -A
14 | git commit -m 'Update Website'
15 | git push
16 |
17 | # cleanup
18 | cd ..
19 | rm -rf gh-pages
--------------------------------------------------------------------------------
/pnpm-workspace.yaml:
--------------------------------------------------------------------------------
1 | packages:
2 | - 'packages/*'
3 | - 'examples/*'
--------------------------------------------------------------------------------
/renovate.json5:
--------------------------------------------------------------------------------
1 | {
2 | extends: ["config:base"],
3 | dependencyDashboard: true,
4 | labels: ["dependencies"],
5 | packageRules: [
6 | {
7 | groupName: "all non-major dev dependencies",
8 | groupSlug: "all-dev-minor-patch",
9 | matchPackagePatterns: ["*"],
10 | excludePackagePatterns: [
11 | // typedoc upgrades require manually validating the generated website
12 | "typedoc",
13 | // vuepress and related deps tend to break things often and require manual inspection
14 | "vue",
15 | "vuepress",
16 | ],
17 | matchDepTypes: ["devDependencies"],
18 | matchUpdateTypes: ["minor", "patch"],
19 | },
20 | ],
21 | }
22 |
--------------------------------------------------------------------------------
/tsconfig.base.json:
--------------------------------------------------------------------------------
1 | {
2 | "compileOnSave": true,
3 | "compilerOptions": {
4 | "target": "ES2015",
5 | "composite": true,
6 | "moduleResolution": "node",
7 | "module": "ES2020",
8 | "esModuleInterop": true,
9 | "removeComments": false,
10 | "sourceMap": true,
11 | "declaration": true,
12 | "lib": ["es2015", "dom"],
13 | "strict": true
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "./tsconfig.base.json",
3 | "references": [
4 | {
5 | "path": "./packages/regexp-to-ast"
6 | },
7 | {
8 | "path": "./packages/utils"
9 | },
10 | {
11 | "path": "./packages/types"
12 | },
13 | {
14 | "path": "./packages/gast"
15 | },
16 | {
17 | "path": "./packages/cst-dts-gen"
18 | },
19 | {
20 | "path": "./packages/chevrotain"
21 | },
22 | {
23 | "path": "./packages/cst-dts-gen-test"
24 | }
25 | ],
26 | // needed to avoid trying to compile other .ts files in this repo
27 | // and/or try to compile files included in the above references, only with incorrect settings
28 | "files": [],
29 | "include": [],
30 | "exclude": []
31 | }
32 |
--------------------------------------------------------------------------------