├── .editorconfig
├── .github
└── workflows
│ ├── checks.yml
│ ├── labels.yml
│ ├── release.yml
│ └── stale.yml
├── .gitignore
├── .npmrc
├── .prettierignore
├── LICENSE.md
├── README.md
├── bin
└── test.ts
├── errors
├── E_CANNOT_SEEK_STATEMENT.md
├── E_UNCLOSED_CURLY_BRACE.md
├── E_UNCLOSED_PAREN.md
├── E_UNCLOSED_TAG.md
└── E_UNOPENED_PAREN.md
├── eslint.config.js
├── examples
└── index.js
├── fixtures
└── index.ts
├── index.ts
├── package.json
├── perf
├── index.js
└── tags.js
├── src
├── detector.ts
├── enums.ts
├── exceptions.ts
├── scanner.ts
├── tokenizer.ts
├── types.ts
└── utils.ts
├── tests
├── detector.spec.ts
├── fixtures.spec.ts
├── scanner.spec.ts
├── tokenizer_comment.spec.ts
├── tokenizer_mustache.spec.ts
├── tokenizer_tags.spec.ts
├── tokenizer_tags_generic_end.spec.ts
└── utils.spec.ts
└── tsconfig.json
/.editorconfig:
--------------------------------------------------------------------------------
1 | # http://editorconfig.org
2 | root = true
3 |
4 | [*]
5 | indent_style = space
6 | indent_size = 2
7 | end_of_line = lf
8 | charset = utf-8
9 | trim_trailing_whitespace = true
10 | insert_final_newline = true
11 |
12 | [*.json]
13 | insert_final_newline = ignore
14 |
15 | [**.min.js]
16 | indent_style = ignore
17 | insert_final_newline = ignore
18 |
19 | [Makefile]
20 | indent_style = tab
21 |
22 | [*.md]
23 | trim_trailing_whitespace = false
24 |
25 | [MakeFile]
26 | indent_style = space
27 |
--------------------------------------------------------------------------------
/.github/workflows/checks.yml:
--------------------------------------------------------------------------------
1 | name: checks
2 | on:
3 | - push
4 | - pull_request
5 | - workflow_call
6 |
7 | jobs:
8 | test:
9 | uses: edge-js/.github/.github/workflows/test.yml@main
10 |
11 | lint:
12 | uses: edge-js/.github/.github/workflows/lint.yml@main
13 |
14 | typecheck:
15 | uses: edge-js/.github/.github/workflows/typecheck.yml@main
16 |
--------------------------------------------------------------------------------
/.github/workflows/labels.yml:
--------------------------------------------------------------------------------
1 | name: Sync labels
2 | on:
3 | workflow_dispatch:
4 | permissions:
5 | issues: write
6 | jobs:
7 | labels:
8 | runs-on: ubuntu-latest
9 | steps:
10 | - uses: actions/checkout@v4
11 | - uses: EndBug/label-sync@v2
12 | with:
13 | config-file: 'https://raw.githubusercontent.com/thetutlage/static/main/labels.yml'
14 | delete-other-labels: true
15 | token: ${{ secrets.GITHUB_TOKEN }}
16 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: release
2 | on: workflow_dispatch
3 | permissions:
4 | contents: write
5 | id-token: write
6 | jobs:
7 | checks:
8 | uses: ./.github/workflows/checks.yml
9 | release:
10 | needs: checks
11 | runs-on: ubuntu-latest
12 | steps:
13 | - uses: actions/checkout@v4
14 | with:
15 | fetch-depth: 0
16 | - uses: actions/setup-node@v4
17 | with:
18 | node-version: 20
19 | - name: git config
20 | run: |
21 | git config user.name "${GITHUB_ACTOR}"
22 | git config user.email "${GITHUB_ACTOR}@users.noreply.github.com"
23 | - name: Init npm config
24 | run: npm config set //registry.npmjs.org/:_authToken $NPM_TOKEN
25 | env:
26 | NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
27 | - run: npm install
28 | - run: npm run release -- --ci
29 | env:
30 | NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
31 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
32 | NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
33 |
--------------------------------------------------------------------------------
/.github/workflows/stale.yml:
--------------------------------------------------------------------------------
1 | name: 'Close stale issues and PRs'
2 | on:
3 | schedule:
4 | - cron: '30 0 * * *'
5 |
6 | jobs:
7 | stale:
8 | runs-on: ubuntu-latest
9 | steps:
10 | - uses: actions/stale@v9
11 | with:
12 | stale-issue-message: 'This issue has been marked as stale because it has been inactive for more than 21 days. Please reopen if you still need help on this issue'
13 | stale-pr-message: 'This pull request has been marked as stale because it has been inactive for more than 21 days. Please reopen if you still intend to submit this pull request'
14 | close-issue-message: 'This issue has been automatically closed because it has been inactive for more than 4 weeks. Please reopen if you still need help on this issue'
15 | close-pr-message: 'This pull request has been automatically closed because it has been inactive for more than 4 weeks. Please reopen if you still intend to submit this pull request'
16 | days-before-stale: 21
17 | days-before-close: 5
18 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | coverage
2 | node_modules
3 | .DS_Store
4 | npm-debug.log
5 | .idea
6 | .nyc_output
7 | .DS_STORE
8 | .vscode/
9 | *.sublime-project
10 | *.sublime-workspace
11 | *.log
12 | yarn.lock
13 | build
14 | dist
15 | shrinkwrap.yaml
16 | test/__app
17 |
--------------------------------------------------------------------------------
/.npmrc:
--------------------------------------------------------------------------------
1 | message="chore(release): %s"
2 | package-lock=false
3 |
--------------------------------------------------------------------------------
/.prettierignore:
--------------------------------------------------------------------------------
1 | build
2 | docs
3 | config.json
4 | .eslintrc.json
5 | package.json
6 | *.html
7 | *.md
8 | *.txt
9 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | # The MIT License
2 |
3 | Copyright 2021 Harminder Virk, contributors
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6 |
7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8 |
9 | THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
10 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # edge-lexer
2 | > Generate tokens by parsing a raw edge markup file
3 |
4 | [![gh-workflow-image]][gh-workflow-url] [![npm-image]][npm-url] ![][typescript-image] [![license-image]][license-url]
5 |
6 | Edge lexer produces a list of `tokens` by scanning for [Edge whitelisted syntax](https://github.com/edge-js/syntax).
7 |
8 | This module is a blend of a `lexer` and an `AST generator`, since Edge doesn't need a pure [lexer](https://en.wikipedia.org/wiki/Lexical_analysis) that scans for each character. Edge markup is written within other markup languages like **HTML** or **Markdown** and walking over each character is waste of resources.
9 |
10 | Instead, this module starts by detecting for the [Edge whitelisted syntax](https://github.com/edge-js/syntax) and then starts the lexical analysis within the detected markup.
11 |
12 |
13 |
14 | ## Table of contents
15 |
16 | - [Table of contents](#table-of-contents)
17 | - [Highlights](#highlights)
18 | - [Performance](#performance)
19 | - [Usage](#usage)
20 | - [Pre-processing lines](#pre-processing-lines)
21 | - [Terms used](#terms-used)
22 | - [Tokens](#tokens)
23 | - [Tag Token](#tag-token)
24 | - [Escaped Tag Token](#escaped-tag-token)
25 | - [Raw Token](#raw-token)
26 | - [Comment Token](#comment-token)
27 | - [NewLine Token](#newline-token)
28 | - [Mustache Token](#mustache-token)
29 | - [Safe Mustache Token](#safe-mustache-token)
30 | - [Escaped Mustache Token](#escaped-mustache-token)
31 | - [Escaped Safe Mustache Token](#escaped-safe-mustache-token)
32 | - [Properties](#properties)
33 | - [BlockProp](#blockprop)
34 | - [Prop](#prop)
35 | - [Mustache expressions](#mustache-expressions)
36 | - [Errors](#errors)
37 | - [Example](#example)
38 | - [Raised exceptions](#raised-exceptions)
39 | - [Maintainers](#maintainers)
40 |
41 |
42 |
43 | ## Highlights
44 |
45 | - Just one dependency to standardize edge errors.
46 | - Uses only one regex statement. Tested against [safe-regex](https://github.com/substack/safe-regex) for ReDOS
47 | - Support for multiline expressions
48 | - Collects line and columns for accurate stack traces
49 | - Detects for unclosed tags
50 | - Detects for unwrapped expressions and raises appropriate errors
51 |
52 | ---
53 |
54 | ## Performance
55 |
56 | Following measures are taken to keep the analysis performant.
57 |
58 | 1. Only analyse markup that is detected as Edge whitelisted syntax.
59 | 2. Only analyse `tags`, that are passed to the tokenizer. Which means even if the syntax for tags is whitelisted, the tokeniser will analyse them if they are used by your app.
60 | 3. Do not analyse Javascript expression and leave that for [edge-parser](https://github.com/edge-js/parser).
61 | 4. Only uses one Regular expression.
62 |
63 | ---
64 |
65 | ## Usage
66 |
67 | ```js
68 | import { Tokenizer } from 'edge-lexer'
69 |
70 | const template = `Hello {{ username }}`
71 | const tags = {
72 | if: {
73 | block: true,
74 | seekable: true,
75 | },
76 | }
77 |
78 | // Filename is required to add it to error messages
79 | const options = {
80 | filename: 'welcome.edge',
81 | }
82 |
83 | const tokenizer = new Tokenizer(template, tags, options)
84 |
85 | tokenizer.parse()
86 | console.log(tokenizer.tokens)
87 | ```
88 |
89 | ---
90 |
91 | ## Pre-processing lines
92 |
93 | You can also pre-process lines before the tokenizer tokenizes them.
94 |
95 | ```ts
96 | const options = {
97 | filename: 'welcome.edge',
98 | onLine: (line: string) => {
99 | // transform here and return new string value
100 | return line
101 | },
102 | }
103 |
104 | const tokenizer = new Tokenizer(template, {}, options)
105 | ```
106 |
107 | ---
108 |
109 | ## Terms used
110 |
111 | This guide makes use of the following terms to identify core pieces of the tokenizer.
112 |
113 | | Term | Token Type | Description |
114 | | --------------------- | -------------- | ----------------------------------------------------------------------------------------------------- |
115 | | Tag | tag | Tags are used to define logical blocks in the template engine. For example `if tag` or `include tag`. |
116 | | Escaped Tag | e\_\_tag | Escaped tag, Edge will not evaluate it at runtime. |
117 | | Mustache | mustache | Javascript expression wrapped in curly braces. `{{ }}` |
118 | | Safe Mustache | s\_\_mustache | Safe mustache, that doesn't escape the output `{{{ }}}` |
119 | | Escaped Mustache | e\_\_mustache | Mustache tag that is escaped |
120 | | Escaped Safe Mustache | es\_\_mustache | Safe Mustache tag that is escaped |
121 | | Raw | raw | A raw string, which has no meaning for the template engine |
122 | | NewLine | newline | Newline |
123 | | Comment | comment | Edge specific comment block. This will be ripped off in the output. |
124 |
125 | ---
126 |
127 | ## Tokens
128 |
129 | Following is the list of Nodes returned by the tokenizer.
130 |
131 | #### Tag Token
132 |
133 | ```js
134 | {
135 | type: 'tag'
136 | filename: 'eval.edge',
137 | loc: {
138 | start: {
139 | line: 1,
140 | col: 4
141 | },
142 | end: {
143 | line: 1,
144 | col: 13
145 | }
146 | },
147 | properties: BlockProp,
148 | children: []
149 | }
150 | ```
151 |
152 | #### Escaped Tag Token
153 |
154 | ```diff
155 | {
156 | - type: 'tag',
157 | + type: 'e__tag',
158 | filename: 'eval.edge',
159 | loc: {
160 | start: {
161 | line: 1,
162 | col: 4
163 | },
164 | end: {
165 | line: 1,
166 | col: 13
167 | }
168 | },
169 | properties: BlockProp,
170 | children: []
171 | }
172 | ```
173 |
174 | #### Raw Token
175 |
176 | ```js
177 | {
178 | type: 'raw',
179 | filename: 'eval.edge',
180 | line: number,
181 | value: string
182 | }
183 | ```
184 |
185 | #### Comment Token
186 |
187 | ```js
188 | {
189 | type: 'comment',
190 | filename: 'eval.edge',
191 | line: number,
192 | value: string
193 | }
194 | ```
195 |
196 | #### NewLine Token
197 |
198 | ```js
199 | {
200 | type: 'newline',
201 | line: number
202 | }
203 | ```
204 |
205 | #### Mustache Token
206 |
207 | ```js
208 | {
209 | type: 'mustache',
210 | filename: 'eval.edge',
211 | loc: {
212 | start: {
213 | line: 1,
214 | col: 4
215 | },
216 | end: {
217 | line: 1,
218 | col: 13
219 | }
220 | },
221 | properties: Prop
222 | }
223 | ```
224 |
225 | #### Safe Mustache Token
226 |
227 | ```diff
228 | {
229 | - type: 'mustache',
230 | + type: 's__mustache',
231 | filename: 'eval.edge',
232 | loc: {
233 | start: {
234 | line: 1,
235 | col: 4
236 | },
237 | end: {
238 | line: 1,
239 | col: 13
240 | }
241 | },
242 | properties: Prop
243 | }
244 | ```
245 |
246 | #### Escaped Mustache Token
247 |
248 | ```diff
249 | {
250 | - type: 'mustache',
251 | + type: 'e__mustache',
252 | filename: 'eval.edge',
253 | loc: {
254 | start: {
255 | line: 1,
256 | col: 4
257 | },
258 | end: {
259 | line: 1,
260 | col: 13
261 | }
262 | },
263 | properties: Prop
264 | }
265 | ```
266 |
267 | #### Escaped Safe Mustache Token
268 |
269 | ```diff
270 | {
271 | - type: 'mustache',
272 | + type: 'es__mustache',
273 | filename: 'eval.edge',
274 | loc: {
275 | start: {
276 | line: 1,
277 | col: 4
278 | },
279 | end: {
280 | line: 1,
281 | col: 13
282 | }
283 | },
284 | properties: Prop
285 | }
286 | ```
287 |
288 | | Key | Value | Description |
289 | | ---------- | ------ | ------------------------------------------------------------------------------- |
290 | | type | string | The type of node determines the behavior of node |
291 | | loc | object | `loc` is only present for tags and mustache tokens |
292 | | line | number | `line` is not present for tags and mustache tokens |
293 | | properties | Prop | Meta data for the node. See [Properties](#properties) to more info |
294 | | value | string | If token is a raw or comment token, then value is the string in the source file |
295 | | children | array | Array of recursive nodes. Only exists, when token is a tag |
296 |
297 | ---
298 |
299 | ## Properties
300 |
301 | The properties `Prop` is used to define meta data for a given Node. Nodes like `raw`, `comment` and `newline`, doesn't need any metadata.
302 |
303 | #### BlockProp
304 |
305 | The block prop is used by the `Block` node. The only difference from the regular `Prop` is the addition of `selfclosed` attribute.
306 |
307 | ```js
308 | {
309 | name: string
310 | jsArg: string,
311 | selfclosed: boolean
312 | }
313 | ```
314 |
315 | #### Prop
316 |
317 | ```js
318 | {
319 | jsArg: string,
320 | }
321 | ```
322 |
323 | | Key | Description |
324 | | ---------- | ------------------------------------------------------------------------------------------------------------- |
325 | | jsArg | The `jsArg` is the Javascript expression to evaluate. Whitespaces and newlines are preserved inside the jsArg |
326 | | selfclosed | Whether or not the tag was `selfclosed` during usage. |
327 |
328 | ---
329 |
330 | ## Mustache expressions
331 |
332 | For mustache nodes props, the `name` is the type of mustache expressions. The lexer supports 4 mustache expressions.
333 |
334 | **mustache**
335 |
336 | ```
337 | {{ username }}
338 | ```
339 |
340 | **e\_\_mustache (Escaped mustache)**
341 |
342 | The following expression is ignored by edge. Helpful when you want this expression to be parsed by a frontend template engine
343 |
344 | ```
345 | @{{ username }}
346 | ```
347 |
348 | **s\_\_mustache (Safe mustache)**
349 |
350 | The following expression output is considered HTML safe.
351 |
352 | ```
353 | {{{ '
Hello world
' }}}
354 | ```
355 |
356 | **es\_\_mustache (Escaped safe mustache)**
357 |
358 | ```
359 | @{{{ ' Not touched
' }}}
360 | ```
361 |
362 | ---
363 |
364 | ## Errors
365 |
366 | Errors raised by the `lexer` are always an instance of [edge-error](https://github.com/edge-js/error) and will contain following properties.
367 |
368 | ```js
369 | error.message
370 | error.line
371 | error.col
372 | error.filename
373 | error.code
374 | ```
375 |
376 | ---
377 |
378 | ## Example
379 |
380 | ```html
381 | {{-- Show username when exists --}} @if(username) {{-- Wrap inside h2 --}}
382 | Hello {{ username }}
383 | @endif
384 | ```
385 |
386 | The output of the above text will be
387 |
388 | ```json
389 | [
390 | {
391 | "type": "comment",
392 | "filename": "eval.edge",
393 | "value": " Show username when exists ",
394 | "loc": {
395 | "start": {
396 | "line": 1,
397 | "col": 4
398 | },
399 | "end": {
400 | "line": 1,
401 | "col": 35
402 | }
403 | }
404 | },
405 | {
406 | "type": "tag",
407 | "filename": "eval.edge",
408 | "properties": {
409 | "name": "if",
410 | "jsArg": "username",
411 | "selfclosed": false
412 | },
413 | "loc": {
414 | "start": {
415 | "line": 2,
416 | "col": 4
417 | },
418 | "end": {
419 | "line": 2,
420 | "col": 13
421 | }
422 | },
423 | "children": [
424 | {
425 | "type": "newline",
426 | "filename": "eval.edge",
427 | "line": 2
428 | },
429 | {
430 | "type": "comment",
431 | "filename": "eval.edge",
432 | "value": " Wrap inside h2 ",
433 | "loc": {
434 | "start": {
435 | "line": 3,
436 | "col": 4
437 | },
438 | "end": {
439 | "line": 3,
440 | "col": 24
441 | }
442 | }
443 | },
444 | {
445 | "type": "newline",
446 | "filename": "eval.edge",
447 | "line": 3
448 | },
449 | {
450 | "type": "raw",
451 | "value": " Hello ",
452 | "filename": "eval.edge",
453 | "line": 4
454 | },
455 | {
456 | "type": "mustache",
457 | "filename": "eval.edge",
458 | "properties": {
459 | "jsArg": " username "
460 | },
461 | "loc": {
462 | "start": {
463 | "line": 4,
464 | "col": 13
465 | },
466 | "end": {
467 | "line": 4,
468 | "col": 25
469 | }
470 | }
471 | },
472 | {
473 | "type": "raw",
474 | "value": "
",
475 | "filename": "eval.edge",
476 | "line": 4
477 | }
478 | ]
479 | }
480 | ]
481 | ```
482 |
483 | ## Raised exceptions
484 |
485 | Following the links to documented error codes raised by the lexer.
486 |
487 | - [E_CANNOT_SEEK_STATEMENT](errors/E_CANNOT_SEEK_STATEMENT.md)
488 | - [E_UNCLOSED_CURLY_BRACE](errors/E_UNCLOSED_CURLY_BRACE.md)
489 | - [E_UNCLOSED_PAREN](errors/E_UNCLOSED_PAREN.md)
490 | - [E_UNCLOSED_TAG](errors/E_UNCLOSED_TAG.md)
491 | - [E_UNOPENED_PAREN](errors/E_UNOPENED_PAREN.md)
492 |
493 | ## Maintainers
494 |
495 | [Harminder virk](https://github.com/sponsors/thetutlage)
496 |
497 | [gh-workflow-image]: https://img.shields.io/github/actions/workflow/status/edge-js/lexer/checks.yml?style=for-the-badge
498 | [gh-workflow-url]: https://github.com/edge-js/lexer/actions/workflows/checks.yml "Github action"
499 |
500 | [npm-image]: https://img.shields.io/npm/v/edge-lexer.svg?style=for-the-badge&logo=npm
501 | [npm-url]: https://npmjs.org/package/edge-lexer 'npm'
502 |
503 | [typescript-image]: https://img.shields.io/badge/Typescript-294E80.svg?style=for-the-badge&logo=typescript
504 |
505 | [license-url]: LICENSE.md
506 | [license-image]: https://img.shields.io/github/license/edge-js/lexer?style=for-the-badge
507 |
--------------------------------------------------------------------------------
/bin/test.ts:
--------------------------------------------------------------------------------
1 | import { assert } from '@japa/assert'
2 | import { processCLIArgs, configure, run } from '@japa/runner'
3 |
4 | /*
5 | |--------------------------------------------------------------------------
6 | | Configure tests
7 | |--------------------------------------------------------------------------
8 | |
9 | | The configure method accepts the configuration to configure the Japa
10 | | tests runner.
11 | |
12 | | The first method call "processCliArgs" process the command line arguments
13 | | and turns them into a config object. Using this method is not mandatory.
14 | |
15 | | Please consult japa.dev/runner-config for the config docs.
16 | */
17 | processCLIArgs(process.argv.slice(2))
18 | configure({
19 | files: ['tests/**/*.spec.ts'],
20 | plugins: [assert()],
21 | })
22 |
23 | /*
24 | |--------------------------------------------------------------------------
25 | | Run tests
26 | |--------------------------------------------------------------------------
27 | |
28 | | The following "run" method is required to execute all the tests.
29 | |
30 | */
31 | run()
32 |
--------------------------------------------------------------------------------
/errors/E_CANNOT_SEEK_STATEMENT.md:
--------------------------------------------------------------------------------
1 | # E_CANNOT_SEEK_STATEMENT
2 |
3 | This exception is raised when you write raw text in the same line as the tag.
4 |
5 | #### Invalid
6 |
7 | ```edge
8 | @if(username === 'virk') Hello {{ username }} @endif
9 | ```
10 |
11 | #### Invalid
12 |
13 | ```edge
14 | @if(username === 'virk') Hello
15 | {{ username }}
16 | @endif
17 | ```
18 |
19 | Following is a valid expression
20 |
21 | #### Valid
22 |
23 | ```edge
24 | @if(username === 'virk')
25 | Hello {{ username }}
26 | @endif
27 | ```
28 |
--------------------------------------------------------------------------------
/errors/E_UNCLOSED_CURLY_BRACE.md:
--------------------------------------------------------------------------------
1 | # E_UNCLOSED_CURLY_BRACE
2 |
3 | This exception is raised when the number of `opened` and `closed` mustache braces mis-matches.
4 |
5 | #### Invalid
6 |
7 | ```edge
8 | {{ 2 + 2 }
9 | ```
10 |
11 | ```edge
12 | {{{ `${username}
` }}
13 | ```
14 |
15 | Following are the valid expressions
16 |
17 | #### Valid
18 |
19 | ```edge
20 | {{ 2 + 2 }}
21 | ```
22 |
23 | ```edge
24 | {{{ `${username}
` }}}
25 | ```
26 |
--------------------------------------------------------------------------------
/errors/E_UNCLOSED_PAREN.md:
--------------------------------------------------------------------------------
1 | # E_UNCLOSED_PAREN
2 |
3 | This exception is raised when the number of `opened` and `closed` parentheses _( )_ mis-matches
4 |
5 | #### Invalid
6 |
7 | ```edge
8 | @if((2 + 2) * (3)
9 | ```
10 |
11 | Following are the valid expressions
12 |
13 | #### Valid
14 |
15 | ```edge
16 | @if((2 + 2) * (3))
17 | ```
18 |
19 | or expand to multiple lines for clarity
20 |
21 | ```edge
22 | @if(
23 | (2 + 2) * (3)
24 | )
25 | ```
26 |
--------------------------------------------------------------------------------
/errors/E_UNCLOSED_TAG.md:
--------------------------------------------------------------------------------
1 | # E_UNCLOSED_TAG
2 |
3 | This exception is raised when a block level like `@if`, `@else` was opened but never closed.
4 |
5 | #### Invalid
6 |
7 | ```edge
8 | @each (user in users)
9 | {{ user.username }}
10 | ```
11 |
12 | Following is a valid expression
13 |
14 | #### Valid
15 |
16 | ```edge
17 | @each (user in users)
18 | {{ user.username }}
19 | @endeach
20 | ```
21 |
--------------------------------------------------------------------------------
/errors/E_UNOPENED_PAREN.md:
--------------------------------------------------------------------------------
1 | # E_UNOPENED_PAREN
2 |
3 | This exception is raised when Javascript expression for tags is not wrapped inside opening and closing braces.
4 |
5 | #### Invalid
6 |
7 | ```edge
8 | @if auth.user
9 | @endif
10 | ```
11 |
12 | Following is the valid expression
13 |
14 | #### Valid
15 |
16 | ```edge
17 | @if(auth.user)
18 | @endif
19 | ```
20 |
--------------------------------------------------------------------------------
/eslint.config.js:
--------------------------------------------------------------------------------
1 | import { configPkg } from '@adonisjs/eslint-config'
2 | export default configPkg()
3 |
--------------------------------------------------------------------------------
/examples/index.js:
--------------------------------------------------------------------------------
1 | import dedent from 'dedent'
2 | import { Tokenizer } from '../build/index.js'
3 |
4 | const exampleCode = dedent`
5 | {{-- Show username when exists --}}
6 | @if(username)
7 | {{-- Wrap inside h2 --}}
8 | Hello {{ username }}
9 | @endif
10 | `
11 | const tagsDef = {
12 | if: {
13 | block: true,
14 | seekable: true,
15 | },
16 | }
17 |
18 | const tokenizer = new Tokenizer(exampleCode, tagsDef, { filename: 'eval.edge' })
19 | tokenizer.parse()
20 |
21 | console.log(JSON.stringify(tokenizer['tokens'], null, 2))
22 |
--------------------------------------------------------------------------------
/fixtures/index.ts:
--------------------------------------------------------------------------------
1 | import dedent from 'dedent'
2 |
3 | export const fixtures: { name: string; in: string; out: any }[] = [
4 | {
5 | name: 'block tag',
6 | in: dedent`@if(username)
7 | @endif`,
8 | out: [
9 | {
10 | type: 'tag',
11 | filename: 'eval.edge',
12 | properties: {
13 | name: 'if',
14 | jsArg: 'username',
15 | selfclosed: false,
16 | },
17 | loc: {
18 | start: {
19 | line: 1,
20 | col: 4,
21 | },
22 | end: {
23 | line: 1,
24 | col: 13,
25 | },
26 | },
27 | children: [],
28 | },
29 | ],
30 | },
31 | {
32 | name: 'multiline opening tag statement',
33 | in: dedent`@if(
34 | username
35 | )
36 | @endif`,
37 | out: [
38 | {
39 | type: 'tag',
40 | filename: 'eval.edge',
41 | properties: {
42 | name: 'if',
43 | jsArg: '\nusername\n',
44 | selfclosed: false,
45 | },
46 | loc: {
47 | start: {
48 | line: 1,
49 | col: 4,
50 | },
51 | end: {
52 | line: 3,
53 | col: 1,
54 | },
55 | },
56 | children: [],
57 | },
58 | ],
59 | },
60 | {
61 | name: 'inline tag',
62 | in: dedent`@include('header')`,
63 | out: [
64 | {
65 | type: 'tag',
66 | filename: 'eval.edge',
67 | properties: {
68 | name: 'include',
69 | jsArg: "'header'",
70 | selfclosed: false,
71 | },
72 | loc: {
73 | start: {
74 | line: 1,
75 | col: 9,
76 | },
77 | end: {
78 | line: 1,
79 | col: 18,
80 | },
81 | },
82 | children: [],
83 | },
84 | ],
85 | },
86 | {
87 | name: 'multiline inline tag',
88 | in: dedent`@include(
89 | 'header'
90 | )`,
91 | out: [
92 | {
93 | type: 'tag',
94 | filename: 'eval.edge',
95 | properties: {
96 | name: 'include',
97 | jsArg: "\n'header'\n",
98 | selfclosed: false,
99 | },
100 | loc: {
101 | start: {
102 | line: 1,
103 | col: 9,
104 | },
105 | end: {
106 | line: 3,
107 | col: 1,
108 | },
109 | },
110 | children: [],
111 | },
112 | ],
113 | },
114 | {
115 | name: 'selfclosed tag',
116 | in: dedent`@!if(username)`,
117 | out: [
118 | {
119 | type: 'tag',
120 | filename: 'eval.edge',
121 | properties: {
122 | name: 'if',
123 | jsArg: 'username',
124 | selfclosed: true,
125 | },
126 | loc: {
127 | start: {
128 | line: 1,
129 | col: 5,
130 | },
131 | end: {
132 | line: 1,
133 | col: 14,
134 | },
135 | },
136 | children: [],
137 | },
138 | ],
139 | },
140 | {
141 | name: 'escaped tag',
142 | in: dedent`@@if(username)
143 | @endif`,
144 | out: [
145 | {
146 | type: 'e__tag',
147 | filename: 'eval.edge',
148 | properties: {
149 | name: 'if',
150 | jsArg: 'username',
151 | selfclosed: false,
152 | },
153 | loc: {
154 | start: {
155 | line: 1,
156 | col: 5,
157 | },
158 | end: {
159 | line: 1,
160 | col: 14,
161 | },
162 | },
163 | children: [],
164 | },
165 | ],
166 | },
167 | {
168 | name: 'mustache',
169 | in: dedent`{{ username }}`,
170 | out: [
171 | {
172 | type: 'mustache',
173 | filename: 'eval.edge',
174 | properties: {
175 | jsArg: ' username ',
176 | },
177 | loc: {
178 | start: {
179 | line: 1,
180 | col: 2,
181 | },
182 | end: {
183 | line: 1,
184 | col: 14,
185 | },
186 | },
187 | },
188 | ],
189 | },
190 | {
191 | name: 'multiline mustache',
192 | in: dedent`{{
193 | username
194 | }}`,
195 | out: [
196 | {
197 | type: 'mustache',
198 | filename: 'eval.edge',
199 | properties: {
200 | jsArg: '\nusername\n',
201 | },
202 | loc: {
203 | start: {
204 | line: 1,
205 | col: 2,
206 | },
207 | end: {
208 | line: 3,
209 | col: 2,
210 | },
211 | },
212 | },
213 | ],
214 | },
215 | {
216 | name: 'complex multiline mustache',
217 | in: dedent`Your friends are {{
218 | users.map((user) => {
219 | return user.username
220 | }).join(',')
221 | }}`,
222 | out: [
223 | {
224 | type: 'raw',
225 | filename: 'eval.edge',
226 | value: 'Your friends are ',
227 | line: 1,
228 | },
229 | {
230 | type: 'mustache',
231 | filename: 'eval.edge',
232 | properties: {
233 | jsArg: "\nusers.map((user) => {\n return user.username\n}).join(',')\n",
234 | },
235 | loc: {
236 | start: {
237 | line: 1,
238 | col: 19,
239 | },
240 | end: {
241 | line: 5,
242 | col: 2,
243 | },
244 | },
245 | },
246 | ],
247 | },
248 | {
249 | name: 'escaped mustache',
250 | in: dedent`@{{ username }}`,
251 | out: [
252 | {
253 | type: 'e__mustache',
254 | filename: 'eval.edge',
255 | properties: {
256 | jsArg: ' username ',
257 | },
258 | loc: {
259 | start: {
260 | line: 1,
261 | col: 3,
262 | },
263 | end: {
264 | line: 1,
265 | col: 15,
266 | },
267 | },
268 | },
269 | ],
270 | },
271 | ]
272 |
--------------------------------------------------------------------------------
/index.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * edge-lexer
3 | *
4 | * (c) Edge
5 | *
6 | * For the full copyright and license information, please view the LICENSE
7 | * file that was distributed with this source code.
8 | */
9 |
10 | export { Tokenizer } from './src/tokenizer.js'
11 | export { MustacheTypes, TagTypes } from './src/enums.js'
12 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "edge-lexer",
3 | "description": "Edge parser to convert text markup to lexer tokens",
4 | "version": "6.0.3",
5 | "engines": {
6 | "node": ">=18.16.0"
7 | },
8 | "main": "build/index.js",
9 | "type": "module",
10 | "files": [
11 | "build",
12 | "!build/bin",
13 | "!build/fixtures",
14 | "!build/tests"
15 | ],
16 | "exports": {
17 | ".": "./build/index.js",
18 | "./types": "./build/src/types.js",
19 | "./utils": "./build/src/utils.js"
20 | },
21 | "scripts": {
22 | "pretest": "npm run lint",
23 | "test": "c8 npm run quick:test",
24 | "lint": "eslint",
25 | "format": "prettier --write .",
26 | "typecheck": "tsc --noEmit",
27 | "precompile": "npm run lint",
28 | "compile": "tsup-node && tsc --emitDeclarationOnly --declaration",
29 | "build": "npm run compile",
30 | "version": "npm run build",
31 | "prepublishOnly": "npm run build",
32 | "release": "release-it",
33 | "quick:test": "node --enable-source-maps --import=ts-node-maintained/register/esm bin/test.ts"
34 | },
35 | "devDependencies": {
36 | "@adonisjs/eslint-config": "^2.0.0-beta.6",
37 | "@adonisjs/prettier-config": "^1.4.0",
38 | "@adonisjs/tsconfig": "^1.4.0",
39 | "@japa/assert": "^4.0.1",
40 | "@japa/runner": "^4.1.0",
41 | "@release-it/conventional-changelog": "^10.0.0",
42 | "@swc/core": "^1.10.7",
43 | "@types/dedent": "^0.7.2",
44 | "@types/node": "^22.10.7",
45 | "benchmark": "^2.1.4",
46 | "c8": "^10.1.3",
47 | "dedent": "^1.5.3",
48 | "eslint": "^9.18.0",
49 | "prettier": "^3.4.2",
50 | "release-it": "^18.1.1",
51 | "ts-node-maintained": "^10.9.5",
52 | "tsup": "^8.3.5",
53 | "typescript": "^5.7.3"
54 | },
55 | "dependencies": {
56 | "edge-error": "^4.0.2"
57 | },
58 | "homepage": "https://github.com/edge-js/lexer#readme",
59 | "repository": {
60 | "type": "git",
61 | "url": "git+https://github.com/edge-js/lexer.git"
62 | },
63 | "bugs": {
64 | "url": "https://github.com/edge-js/lexer/issues"
65 | },
66 | "keywords": [
67 | "edge",
68 | "template",
69 | "template-engine"
70 | ],
71 | "author": "Harminder Virk ",
72 | "license": "MIT",
73 | "publishConfig": {
74 | "provenance": true,
75 | "access": "public"
76 | },
77 | "tsup": {
78 | "entry": [
79 | "./index.ts",
80 | "./src/types.ts",
81 | "./src/utils.ts"
82 | ],
83 | "outDir": "./build",
84 | "clean": true,
85 | "format": "esm",
86 | "dts": false,
87 | "sourcemap": false,
88 | "target": "esnext"
89 | },
90 | "release-it": {
91 | "git": {
92 | "requireCleanWorkingDir": true,
93 | "requireUpstream": true,
94 | "commitMessage": "chore(release): ${version}",
95 | "tagAnnotation": "v${version}",
96 | "push": true,
97 | "tagName": "v${version}"
98 | },
99 | "github": {
100 | "release": true
101 | },
102 | "npm": {
103 | "publish": true,
104 | "skipChecks": true
105 | },
106 | "plugins": {
107 | "@release-it/conventional-changelog": {
108 | "preset": {
109 | "name": "angular"
110 | }
111 | }
112 | }
113 | },
114 | "c8": {
115 | "reporter": [
116 | "text",
117 | "html"
118 | ],
119 | "exclude": [
120 | "tests/**"
121 | ]
122 | },
123 | "prettier": "@adonisjs/prettier-config"
124 | }
125 |
--------------------------------------------------------------------------------
/perf/index.js:
--------------------------------------------------------------------------------
1 | import { Tokenizer } from '../build/index.js'
2 | import Benchmark from 'benchmark'
3 |
4 | const { Suite } = Benchmark
5 | const suite = new Suite()
6 |
7 | const template = `
8 | This is a dummy template string to run some performance checks against
9 | the tokenizer and see if there is room for improvements or not?
10 |
11 | Let's start with some {{ variables }} and some multiline operations {{
12 | users.map((user) => {
13 | return user.username
14 | }).join(',')
15 | }}
16 |
17 | What if we have some tags too?
18 |
19 | @if(users.length)
20 | @each(user in users)
21 | {{ user.username }}
22 | @endeach
23 | @endif
24 | `
25 |
26 | suite
27 | .add('Tokenizer', function () {
28 | const tokenizer = new Tokenizer(
29 | template,
30 | {
31 | if: {
32 | seekable: true,
33 | selfclosed: false,
34 | block: true,
35 | },
36 | each: {
37 | seekable: true,
38 | selfclosed: true,
39 | block: true,
40 | },
41 | },
42 | { filename: 'welcome.edge' }
43 | )
44 | tokenizer.parse()
45 | })
46 | .on('cycle', function (event) {
47 | console.log(String(event.target))
48 | })
49 | .run()
50 |
--------------------------------------------------------------------------------
/perf/tags.js:
--------------------------------------------------------------------------------
1 | import { Tokenizer } from '../build/index.js'
2 | import Benchmark from 'benchmark'
3 |
4 | const { Suite } = Benchmark
5 | const suite = new Suite()
6 |
7 | const template = `
8 | This is a dummy template string to run some performance checks against
9 | the tokenizer and see if there is room for improvements or not?
10 |
11 | Here we will focus on tags only
12 |
13 | @if(username)
14 | @each(user in users)
15 | @if(user.age)
16 | {{ user.age }}
17 | @endif
18 | @endeach
19 | @endif
20 | `
21 |
22 | suite
23 | .add('Tokenizer tags', function () {
24 | const tokenizer = new Tokenizer(
25 | template,
26 | {
27 | if: {
28 | seekable: true,
29 | selfclosed: false,
30 | block: true,
31 | },
32 | each: {
33 | seekable: true,
34 | selfclosed: true,
35 | block: true,
36 | },
37 | },
38 | { filename: 'welcome.edge' }
39 | )
40 |
41 | tokenizer.parse()
42 | })
43 | .on('cycle', function (event) {
44 | console.log(String(event.target))
45 | })
46 | .run()
47 |
--------------------------------------------------------------------------------
/src/detector.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * edge-lexer
3 | *
4 | * (c) Edge
5 | *
6 | * For the full copyright and license information, please view the LICENSE
7 | * file that was distributed with this source code.
8 | */
9 |
10 | import type {
11 | Tags,
12 | RuntimeTag,
13 | RuntimeComment,
14 | RuntimeMustache,
15 | LexerTagDefinitionContract,
16 | } from './types.js'
17 |
18 | /**
19 | * The only regex we need in the entire lexer. Also tested
20 | * with https://github.com/substack/safe-regex
21 | */
22 | const TAG_REGEX = /^(\s*)(@{1,2})(!)?([a-zA-Z._]+)(\s{0,2})/
23 |
24 | /**
25 | * Returns runtime tag node if tag is detected and is a registered tag
26 | */
27 | export function getTag(
28 | content: string,
29 | filename: string,
30 | line: number,
31 | col: number,
32 | tags: Tags,
33 | claimTag?: (name: string) => LexerTagDefinitionContract | null
34 | ): RuntimeTag | null {
35 | const match = TAG_REGEX.exec(content)
36 |
37 | /**
38 | * Return when their is no match
39 | */
40 | if (!match) {
41 | return null
42 | }
43 |
44 | const name = match[4]
45 | let tag: null | LexerTagDefinitionContract = tags[name]
46 |
47 | /**
48 | * See if the tag can be claimed
49 | */
50 | if (!tag && claimTag) {
51 | tag = claimTag(name)
52 | }
53 |
54 | /**
55 | * Return when not a registered tag
56 | */
57 | if (!tag) {
58 | return null
59 | }
60 |
61 | const escaped = match[2] === '@@'
62 | const selfclosed = !!match[3]
63 | const whitespaceLeft = match[1].length
64 | const whitespaceRight = match[5].length
65 | const seekable = tag.seekable
66 | const block = tag.block
67 | const noNewLine = !!tag.noNewLine
68 |
69 | /**
70 | * Advanced the col position
71 | */
72 | col += whitespaceLeft + match[2].length + name.length + whitespaceRight
73 | if (selfclosed) {
74 | col++
75 | }
76 |
77 | /**
78 | * Seekable tags without the brace in same line are invalid
79 | */
80 | const hasBrace = seekable && content[col] === '('
81 |
82 | return {
83 | name,
84 | filename,
85 | seekable,
86 | selfclosed,
87 | block,
88 | line,
89 | col,
90 | escaped,
91 | hasBrace,
92 | noNewLine,
93 | }
94 | }
95 |
96 | /**
97 | * Returns the runtime mustache node if mustache is detected. It will look for 3 types of
98 | * mustache statements.
99 | *
100 | * - Comments `{{-- --}}`
101 | * - Safe Mustache `{{{ }}}`
102 | * - Escaped Mustache `@{{}}`
103 | */
104 | export function getMustache(
105 | content: string,
106 | filename: string,
107 | line: number,
108 | col: number
109 | ): RuntimeMustache | RuntimeComment | null {
110 | const mustacheIndex = content.indexOf('{{')
111 |
112 | if (mustacheIndex === -1) {
113 | return null
114 | }
115 |
116 | const realCol = mustacheIndex
117 |
118 | /**
119 | * Mustache is a comment
120 | */
121 | const isComment = content[mustacheIndex + 2] === '-' && content[mustacheIndex + 3] === '-'
122 | if (isComment) {
123 | return {
124 | isComment,
125 | filename,
126 | line,
127 | col: col + realCol,
128 | realCol,
129 | }
130 | }
131 |
132 | /**
133 | * Mustache is for interpolation
134 | */
135 | const safe = content[mustacheIndex + 2] === '{'
136 | const escaped = content[mustacheIndex - 1] === '@'
137 |
138 | return {
139 | isComment,
140 | safe,
141 | filename,
142 | escaped,
143 | line,
144 | col: col + realCol,
145 | realCol,
146 | }
147 | }
148 |
--------------------------------------------------------------------------------
/src/enums.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * edge-lexer
3 | *
4 | * (c) Edge
5 | *
6 | * For the full copyright and license information, please view the LICENSE
7 | * file that was distributed with this source code.
8 | */
9 |
10 | /**
11 | * Types for mustache statements
12 | */
13 | export enum MustacheTypes {
14 | SMUSTACHE = 's__mustache',
15 | ESMUSTACHE = 'es__mustache',
16 | MUSTACHE = 'mustache',
17 | EMUSTACHE = 'e__mustache',
18 | }
19 |
20 | /**
21 | * The type of node types. Each token
22 | * will have one of these types
23 | */
24 | export enum TagTypes {
25 | TAG = 'tag',
26 | ETAG = 'e__tag',
27 | }
28 |
--------------------------------------------------------------------------------
/src/exceptions.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * edge-lexer
3 | *
4 | * (c) Edge
5 | *
6 | * For the full copyright and license information, please view the LICENSE
7 | * file that was distributed with this source code.
8 | */
9 |
10 | import { EdgeError } from 'edge-error'
11 |
12 | /**
13 | * Raised when there is inline content next to a tag opening
14 | * block. For example:
15 | *
16 | * Incorrect
17 | * ```
18 | * @if(username) Hello {{ username }} @endif
19 | * ```
20 | *
21 | * Correct
22 | * ```
23 | * @if(username)
24 | * Hello {{ username }}
25 | * @endif
26 | * ```
27 | */
28 | export function cannotSeekStatement(
29 | chars: string,
30 | pos: { line: number; col: number },
31 | filename: string
32 | ): EdgeError {
33 | return new EdgeError(`Unexpected token "${chars}"`, 'E_CANNOT_SEEK_STATEMENT', {
34 | line: pos.line,
35 | col: pos.col,
36 | filename: filename,
37 | })
38 | }
39 |
40 | /**
41 | * Raised when a tag opening body doesn't have a closing brace. For example:
42 | *
43 | * Incorrect
44 | * ```
45 | * @if(username
46 | * ```
47 | *
48 | * Correct
49 | * ```
50 | * @if(username)
51 | * ```
52 | */
53 | export function unclosedParen(pos: { line: number; col: number }, filename: string): EdgeError {
54 | return new EdgeError('Missing token ")"', 'E_UNCLOSED_PAREN', {
55 | line: pos.line,
56 | col: pos.col,
57 | filename: filename,
58 | })
59 | }
60 |
61 | /**
62 | * Raised when a tag is used without an opening brace. For example:
63 | *
64 | * Incorrect
65 | * ```
66 | * @if username
67 | * ```
68 | *
69 | * Correct
70 | * ```
71 | * @if(username)
72 | * ```
73 | */
74 | export function unopenedParen(pos: { line: number; col: number }, filename: string): EdgeError {
75 | return new EdgeError('Missing token "("', 'E_UNOPENED_PAREN', {
76 | line: pos.line,
77 | col: pos.col,
78 | filename: filename,
79 | })
80 | }
81 |
82 | /**
83 | * Raised when the curly closing brace is missing from the mustache
84 | * statement. For example:
85 | *
86 | * Incorrect
87 | * ```
88 | * {{ username }
89 | * ```
90 | *
91 | * Correct
92 | *
93 | * ```
94 | * {{ username }}
95 | * ```
96 | */
97 | export function unclosedCurlyBrace(
98 | pos: { line: number; col: number },
99 | filename: string
100 | ): EdgeError {
101 | return new EdgeError('Missing token "}"', 'E_UNCLOSED_CURLY_BRACE', {
102 | line: pos.line,
103 | col: pos.col,
104 | filename: filename,
105 | })
106 | }
107 |
108 | /**
109 | * Raised when a block level tag is opened but never closed. For example:
110 | *
111 | * Incorrect
112 | * ```
113 | * @if(username)
114 | * ```
115 | *
116 | * Correct
117 | * ```
118 | * @if(username)
119 | * @endif
120 | * ```
121 | */
122 | export function unclosedTag(
123 | tag: string,
124 | pos: { line: number; col: number },
125 | filename: string
126 | ): EdgeError {
127 | return new EdgeError(`Unclosed tag ${tag}`, 'E_UNCLOSED_TAG', {
128 | line: pos.line,
129 | col: pos.col,
130 | filename: filename,
131 | })
132 | }
133 |
--------------------------------------------------------------------------------
/src/scanner.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * edge-lexer
3 | *
4 | * (c) Edge
5 | *
6 | * For the full copyright and license information, please view the LICENSE
7 | * file that was distributed with this source code.
8 | */
9 |
10 | /**
11 | * Scan a string and seperate it into 2 pairs. The first pair will be series
12 | * of characters until the ending pattern is found and 2nd pair is the
13 | * left over.
14 | *
15 | * Their are some special behaviors over the regular `string.split` method.
16 | *
17 | * 1. Multiple lines can be passed by calling `scan` method for each line.
18 | * 2. Tolerates characters when they conflict with the ending pattern.
19 | *
20 | * ```js
21 | * const pattern = ')'
22 | * const tolerations = ['(', ')']
23 | * const scanner = new Scanner(pattern, tolerations)
24 | *
25 | * scanner.scan('2 + 2 * (3))')
26 | * if (scanner.closed) {
27 | * scanner.match // 2 + 2 * (3)
28 | * scanner.leftOver // ''
29 | * }
30 | * ```
31 | *
32 | * If we take the same string `2 + 2 * (3))` and split it using ')', then we
33 | * will get unexpected result, since the split method splits by finding the
34 | * first match.
35 | */
36 | export class Scanner {
37 | #pattern: string
38 |
39 | #tolaretionCounts: number = 0
40 | #tolerateLhs: string = ''
41 | #tolerateRhs: string = ''
42 | #patternLength: number = 0
43 |
44 | /**
45 | * Tracking if the scanner has been closed
46 | */
47 | closed: boolean = false
48 |
49 | /**
50 | * The matched content within the pattern
51 | */
52 | match: string = ''
53 |
54 | /**
55 | * The content in the same line but after the closing
56 | * of the pattern
57 | */
58 | leftOver: string = ''
59 |
60 | loc: { line: number; col: number }
61 |
62 | constructor(pattern: string, toleratePair: [string, string], line: number, col: number) {
63 | this.#tolerateLhs = toleratePair[0]
64 | this.#tolerateRhs = toleratePair[1]
65 | this.#patternLength = pattern.length
66 | this.#pattern = pattern
67 | this.loc = {
68 | line: line,
69 | col: col,
70 | }
71 | }
72 |
73 | /**
74 | * Returns a boolean telling if the pattern matches the current
75 | * char and the upcoming chars or not.
76 | *
77 | * This will be used to mark the scanner as closed and stop scanning
78 | * for more chars
79 | */
80 | #matchesPattern(chars: string, iterationCount: number) {
81 | for (let i = 0; i < this.#patternLength; i++) {
82 | if (this.#pattern[i] !== chars[iterationCount + i]) {
83 | return false
84 | }
85 | }
86 |
87 | return true
88 | }
89 |
90 | /**
91 | * Scan a string and look for the closing pattern. The string will
92 | * be seperated with the closing pattern and also tracks the
93 | * toleration patterns to make sure they are not making the
94 | * scanner to end due to pattern mis-match.
95 | */
96 | scan(chunk: string): void {
97 | if (chunk === '\n') {
98 | this.loc.line++
99 | this.loc.col = 0
100 | this.match += '\n'
101 | return
102 | }
103 |
104 | if (!chunk.trim()) {
105 | return
106 | }
107 |
108 | const chunkLength = chunk.length
109 | let iterations = 0
110 |
111 | while (iterations < chunkLength) {
112 | const char = chunk[iterations]
113 |
114 | /**
115 | * Toleration count is 0 and closing pattern matches the current
116 | * or series of upcoming characters
117 | */
118 | if (this.#tolaretionCounts === 0 && this.#matchesPattern(chunk, iterations)) {
119 | iterations += this.#patternLength
120 | this.closed = true
121 | break
122 | }
123 |
124 | /**
125 | * Increments the tolarate counts when char is the
126 | * tolerate lhs character
127 | */
128 | if (char === this.#tolerateLhs) {
129 | this.#tolaretionCounts++
130 | }
131 |
132 | /**
133 | * Decrements the tolare counts when char is the
134 | * tolerate rhs character
135 | */
136 | if (char === this.#tolerateRhs) {
137 | this.#tolaretionCounts--
138 | }
139 |
140 | /**
141 | * Append to the matched string and waiting for the
142 | * closing pattern
143 | */
144 | this.match += char
145 |
146 | iterations++
147 | }
148 |
149 | /**
150 | * If closed, then return the matched string and also the
151 | * left over string
152 | */
153 | if (this.closed) {
154 | this.loc.col += iterations
155 | this.leftOver = chunk.slice(iterations)
156 | }
157 | }
158 | }
159 |
--------------------------------------------------------------------------------
/src/tokenizer.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * edge-lexer
3 | *
4 | * (c) Edge
5 | *
6 | * For the full copyright and license information, please view the LICENSE
7 | * file that was distributed with this source code.
8 | */
9 |
10 | import { Scanner } from './scanner.js'
11 | import { getTag, getMustache } from './detector.js'
12 | import { TagTypes, MustacheTypes } from './enums.js'
13 |
14 | import {
15 | unclosedTag,
16 | unclosedParen,
17 | unopenedParen,
18 | unclosedCurlyBrace,
19 | cannotSeekStatement,
20 | } from './exceptions.js'
21 |
22 | import type {
23 | Tags,
24 | Token,
25 | LexerLoc,
26 | TagToken,
27 | RawToken,
28 | RuntimeTag,
29 | CommentToken,
30 | NewLineToken,
31 | MustacheToken,
32 | RuntimeComment,
33 | RuntimeMustache,
34 | TokenizerOptions,
35 | } from './types.js'
36 |
37 | /**
38 | * Tokenizer converts a bunch of text into an array of tokens. Later
39 | * these tokens can be used to build the transformed text.
40 | *
41 | * Go through the README file to learn more about the syntax and
42 | * the tokens output.
43 | */
44 | export class Tokenizer {
45 | /**
46 | * Current line number
47 | */
48 | #line: number = 0
49 |
50 | /**
51 | * Tracking if two back to back lines are tags or not. Need it for inserting
52 | * whitespace between them
53 | */
54 | #isLastLineATag: boolean = false
55 |
56 | /**
57 | * When true, the tokenizer will drop the newline
58 | */
59 | #dropNewLine: boolean = false
60 |
61 | /**
62 | * An array of opened block level tags
63 | */
64 | #openedTags: TagToken[] = []
65 |
66 | /**
67 | * Copy of options
68 | */
69 | #options: TokenizerOptions
70 |
71 | /**
72 | * Template to parse
73 | */
74 | #template: string
75 |
76 | /**
77 | * Tags known ahead of time
78 | */
79 | #tagsDef: Tags
80 |
81 | /**
82 | * Lexer tokens
83 | */
84 | tokens: Token[] = []
85 |
86 | /**
87 | * Holds the current tag statement, until it is closed
88 | */
89 | tagStatement: null | { scanner: Scanner; tag: RuntimeTag } = null
90 |
91 | /**
92 | * Holds the current mustache statement, until it is closed
93 | */
94 | mustacheStatement: null | {
95 | scanner: Scanner
96 | mustache: RuntimeMustache | RuntimeComment
97 | } = null
98 |
99 | constructor(template: string, tagsDef: Tags, options: TokenizerOptions) {
100 | this.#template = template
101 | this.#tagsDef = tagsDef
102 | this.#options = options
103 | }
104 |
105 | /**
106 | * Returns the raw token
107 | */
108 | #getRawNode(text: string): RawToken {
109 | return {
110 | type: 'raw',
111 | value: text,
112 | filename: this.#options.filename,
113 | line: this.#line,
114 | }
115 | }
116 |
117 | /**
118 | * Returns the new line token
119 | */
120 | #getNewLineNode(line?: number): NewLineToken {
121 | return {
122 | type: 'newline',
123 | filename: this.#options.filename,
124 | line: (line || this.#line) - 1,
125 | }
126 | }
127 |
128 | /**
129 | * Returns the TagToken for a runtime tag. The `jsArg` and ending
130 | * loc is computed using the scanner and must be passed to this
131 | * method.
132 | */
133 | #getTagNode(tag: RuntimeTag, jsArg: string, closingLoc: LexerLoc['end']): TagToken {
134 | return {
135 | type: tag.escaped ? TagTypes.ETAG : TagTypes.TAG,
136 | filename: tag.filename,
137 | properties: {
138 | name: tag.name,
139 | jsArg: jsArg,
140 | selfclosed: tag.selfclosed,
141 | },
142 | loc: {
143 | start: {
144 | line: tag.line,
145 | col: tag.col,
146 | },
147 | end: closingLoc,
148 | },
149 | children: [],
150 | }
151 | }
152 |
153 | /**
154 | * Consume the runtime tag node.
155 | *
156 | * If tag is `block`, then we push it to the list of
157 | * opened tags and wait for the closing statement to
158 | * appear.
159 | *
160 | * Otherwise, we move it to the tokens array directly.
161 | */
162 | #consumeTag(tag: RuntimeTag, jsArg: string, loc: LexerLoc['end']) {
163 | if (tag.block && !tag.selfclosed) {
164 | this.#openedTags.push(this.#getTagNode(tag, jsArg, loc))
165 | } else {
166 | this.#consumeNode(this.#getTagNode(tag, jsArg, loc))
167 | }
168 | }
169 |
170 | /**
171 | * Handles the opening of the tag.
172 | */
173 | #handleTagOpening(line: string, tag: RuntimeTag) {
174 | if (tag.seekable && !tag.hasBrace) {
175 | throw unopenedParen({ line: tag.line, col: tag.col }, tag.filename)
176 | }
177 |
178 | /**
179 | * When tag is not seekable, then their is no need to create
180 | * a scanner instance, just consume it right away.
181 | */
182 | if (!tag.seekable) {
183 | this.#consumeTag(tag, '', { line: tag.line, col: tag.col })
184 | if (tag.noNewLine || line.endsWith('~')) {
185 | this.#dropNewLine = true
186 | }
187 | return
188 | }
189 |
190 | /**
191 | * Advance the `col`, since we do not want to start from the
192 | * starting brace `(`.
193 | */
194 | tag.col += 1
195 |
196 | /**
197 | * Create a new block statement with the scanner to find
198 | * the closing brace ')'
199 | */
200 | this.tagStatement = {
201 | tag: tag,
202 | scanner: new Scanner(')', ['(', ')'], this.#line, tag.col),
203 | }
204 |
205 | /**
206 | * Pass all remaining content to the scanner
207 | */
208 | this.#feedCharsToCurrentTag(line.slice(tag.col))
209 | }
210 |
211 | /**
212 | * Scans the string using the scanner and waits for the
213 | * closing brace ')' to appear
214 | */
215 | #feedCharsToCurrentTag(content: string) {
216 | const { tag, scanner } = this.tagStatement!
217 |
218 | scanner.scan(content)
219 |
220 | /**
221 | * If scanner is not closed, then we need to keep on
222 | * feeding more content
223 | */
224 | if (!scanner.closed) {
225 | return
226 | }
227 |
228 | /**
229 | * Consume the tag once we have found the closing brace and set
230 | * block statement to null
231 | */
232 | this.#consumeTag(tag, scanner.match, scanner.loc)
233 |
234 | /**
235 | * If tag endswith `~`. Then instruct the tokenizer to drop the
236 | * next new line
237 | */
238 | if (scanner.leftOver.trim() === '~') {
239 | this.tagStatement = null
240 | this.#dropNewLine = true
241 | return
242 | }
243 |
244 | /**
245 | * Raise error, if there is inline content after the closing brace ')'
246 | * `@if(username) hello {{ username }}` is invalid
247 | */
248 | if (scanner.leftOver.trim()) {
249 | throw cannotSeekStatement(scanner.leftOver, scanner.loc, tag.filename)
250 | }
251 |
252 | /**
253 | * Do not add newline when tag instructs for it
254 | */
255 | if (tag.noNewLine) {
256 | this.#dropNewLine = true
257 | }
258 |
259 | this.tagStatement = null
260 | }
261 |
262 | /**
263 | * Returns the mustache type by checking for `safe` and `escaped`
264 | * properties.
265 | */
266 | #getMustacheType(mustache: RuntimeMustache): MustacheTypes {
267 | if (mustache.safe) {
268 | return mustache.escaped ? MustacheTypes.ESMUSTACHE : MustacheTypes.SMUSTACHE
269 | }
270 |
271 | return mustache.escaped ? MustacheTypes.EMUSTACHE : MustacheTypes.MUSTACHE
272 | }
273 |
274 | /**
275 | * Returns the mustache token using the runtime mustache node. The `jsArg` and
276 | * ending `loc` is fetched using the scanner.
277 | */
278 | #getMustacheNode(
279 | mustache: RuntimeMustache,
280 | jsArg: string,
281 | closingLoc: LexerLoc['end']
282 | ): MustacheToken {
283 | return {
284 | type: this.#getMustacheType(mustache),
285 | filename: mustache.filename,
286 | properties: {
287 | jsArg: jsArg,
288 | },
289 | loc: {
290 | start: {
291 | line: mustache.line,
292 | col: mustache.col,
293 | },
294 | end: closingLoc,
295 | },
296 | }
297 | }
298 |
299 | /**
300 | * Returns the comment token using the runtime comment node.
301 | */
302 | #getCommentNode(
303 | comment: RuntimeComment,
304 | value: string,
305 | closingLoc: LexerLoc['end']
306 | ): CommentToken {
307 | return {
308 | type: 'comment',
309 | filename: comment.filename,
310 | value: value,
311 | loc: {
312 | start: {
313 | line: comment.line,
314 | col: comment.col,
315 | },
316 | end: closingLoc,
317 | },
318 | }
319 | }
320 |
321 | /**
322 | * Handles the line which has mustache opening braces.
323 | */
324 | #handleMustacheOpening(line: string, mustache: RuntimeMustache | RuntimeComment) {
325 | const pattern = mustache.isComment ? '--}}' : mustache.safe ? '}}}' : '}}'
326 | const textLeftIndex =
327 | mustache.isComment || !mustache.escaped ? mustache.realCol : mustache.realCol - 1
328 |
329 | /**
330 | * Pull everything that is on the left of the mustache
331 | * statement and use it as a raw node
332 | */
333 | if (textLeftIndex > 0) {
334 | this.#consumeNode(this.#getRawNode(line.slice(0, textLeftIndex)))
335 | }
336 |
337 | /**
338 | * Skip the curly braces when reading the expression inside
339 | * it. We are actually skipping opening curly braces
340 | * `{{`, however, their length will be same as the
341 | * closing one's/
342 | */
343 | mustache.col += pattern.length
344 | mustache.realCol += pattern.length
345 |
346 | /**
347 | * Create a new mustache statement with a scanner to scan for
348 | * closing mustache braces. Note the closing `pattern` is
349 | * different for safe and normal mustache.
350 | */
351 | this.mustacheStatement = {
352 | mustache,
353 | scanner: new Scanner(pattern, ['{', '}'], mustache.line, mustache.col),
354 | }
355 |
356 | /**
357 | * Feed text to the mustache statement and wait for the closing braces
358 | */
359 | this.#feedCharsToCurrentMustache(line.slice(mustache.realCol))
360 | }
361 |
362 | /**
363 | * Feed chars to the mustache statement, which isn't closed yet.
364 | */
365 | #feedCharsToCurrentMustache(content: string): void {
366 | const { mustache, scanner } = this.mustacheStatement!
367 | scanner.scan(content)
368 |
369 | /**
370 | * If scanner is not closed, then return early, since their
371 | * not much we can do here.
372 | */
373 | if (!scanner.closed) {
374 | return
375 | }
376 |
377 | /**
378 | * Consume the node as soon as we have found the closing brace
379 | */
380 | if (mustache.isComment) {
381 | this.#consumeNode(this.#getCommentNode(mustache, scanner.match, scanner.loc))
382 | } else {
383 | this.#consumeNode(this.#getMustacheNode(mustache, scanner.match, scanner.loc))
384 | }
385 |
386 | /**
387 | * If their is leftOver text after the mustache closing brace, then re-scan
388 | * it for more mustache statements. Example:
389 | *
390 | * I following statement, `, and {{ age }}` is the left over.
391 | * ```
392 | * {{ username }}, and {{ age }}
393 | * ```
394 | *
395 | * This block is same the generic new line handler method. However, their is
396 | * no need to check for tags and comments, so we ditch that method and
397 | * process it here by duplicating code (which is fine).
398 | */
399 | if (scanner.leftOver.trim()) {
400 | /**
401 | * Scan for another mustache in the same line
402 | */
403 | const anotherMustache = getMustache(
404 | scanner.leftOver,
405 | this.#options.filename,
406 | scanner.loc.line,
407 | scanner.loc.col
408 | )
409 | if (anotherMustache) {
410 | this.#handleMustacheOpening(scanner.leftOver, anotherMustache)
411 | return
412 | }
413 |
414 | this.#consumeNode(this.#getRawNode(scanner.leftOver))
415 | }
416 |
417 | /**
418 | * Set mustache statement to null
419 | */
420 | this.mustacheStatement = null
421 | }
422 |
423 | /**
424 | * Returns a boolean telling if the content of the line is the
425 | * closing tag for the most recently opened tag.
426 | *
427 | * The opening and closing has to be in a order, otherwise the
428 | * compiler will get mad.
429 | */
430 | #isClosingTag(line: string): boolean {
431 | if (!this.#openedTags.length) {
432 | return false
433 | }
434 |
435 | line = line.trim()
436 |
437 | const recentTag = this.#openedTags[this.#openedTags.length - 1]
438 | const endStatement = `@end${recentTag.properties.name}`
439 | return (
440 | line === endStatement || line === `${endStatement}~` || line === '@end' || line === '@end~'
441 | )
442 | }
443 |
444 | /**
445 | * Consume any type of token by moving it to the correct list. If there are
446 | * opened tags, then the token becomes part of the tag children. Otherwise
447 | * moved as top level token.
448 | */
449 | #consumeNode(tag: Token): void {
450 | if (this.#openedTags.length) {
451 | this.#openedTags[this.#openedTags.length - 1].children.push(tag)
452 | return
453 | }
454 |
455 | this.tokens.push(tag)
456 | }
457 |
458 | /**
459 | * Pushes a new line to the list. This method avoids
460 | * new lines at position 0.
461 | */
462 | #pushNewLine(line?: number) {
463 | if ((line || this.#line) === 1) {
464 | return
465 | }
466 |
467 | /**
468 | * Ignore incoming new line
469 | */
470 | if (this.#dropNewLine) {
471 | this.#dropNewLine = false
472 | return
473 | }
474 |
475 | this.#consumeNode(this.#getNewLineNode(line))
476 | }
477 |
478 | /**
479 | * Process the current line based upon what it is. What it is?
480 | * That's the job of this method to find out.
481 | */
482 | #processText(line: string): void {
483 | /**
484 | * Pre process line when the onLine listener is defined
485 | */
486 | if (typeof this.#options.onLine === 'function') {
487 | line = this.#options.onLine(line)
488 | }
489 |
490 | /**
491 | * There is an open block statement, so feed line to it
492 | */
493 | if (this.tagStatement) {
494 | this.#feedCharsToCurrentTag('\n')
495 | this.#feedCharsToCurrentTag(line)
496 | return
497 | }
498 |
499 | /**
500 | * There is an open mustache statement, so feed line to it
501 | */
502 | if (this.mustacheStatement) {
503 | this.#feedCharsToCurrentMustache('\n')
504 | this.#feedCharsToCurrentMustache(line)
505 | return
506 | }
507 |
508 | /**
509 | * The line is an closing statement for a previously opened
510 | * block level tag
511 | */
512 | if (this.#isClosingTag(line)) {
513 | this.#consumeNode(this.#openedTags.pop()!)
514 |
515 | /**
516 | * Do not add next newline when statement ends with `~`
517 | */
518 | if (line.endsWith('~')) {
519 | this.#dropNewLine = true
520 | }
521 | return
522 | }
523 |
524 | /**
525 | * Check if the current line is a tag or not. If yes, then handle
526 | * it appropriately
527 | */
528 | const tag = getTag(
529 | line,
530 | this.#options.filename,
531 | this.#line,
532 | 0,
533 | this.#tagsDef,
534 | this.#options.claimTag
535 | )
536 | if (tag) {
537 | /**
538 | * When two back to back lines are tags, then we put a newline between them
539 | * and one can use `skipNewLines` syntax to remove new lines (if required)
540 | */
541 | if (this.#isLastLineATag) {
542 | this.#pushNewLine()
543 | }
544 |
545 | this.#isLastLineATag = true
546 | this.#handleTagOpening(line, tag)
547 | return
548 | }
549 |
550 | this.#isLastLineATag = false
551 |
552 | /**
553 | * Check if the current line contains a mustache statement or not. If yes,
554 | * then handle it appropriately.
555 | */
556 | const mustache = getMustache(line, this.#options.filename, this.#line, 0)
557 | if (mustache) {
558 | this.#pushNewLine()
559 | this.#handleMustacheOpening(line, mustache)
560 | return
561 | }
562 |
563 | this.#pushNewLine()
564 | this.#consumeNode(this.#getRawNode(line))
565 | }
566 |
567 | /**
568 | * Checks for errors after the tokenizer completes it's work, so that we
569 | * can find broken statements or unclosed tags.
570 | */
571 | #checkForErrors() {
572 | /**
573 | * We are done scanning the content and there is an open tagStatement
574 | * seeking for new content. Which means we are missing a closing
575 | * brace `)`.
576 | */
577 | if (this.tagStatement) {
578 | const { tag } = this.tagStatement
579 | throw unclosedParen({ line: tag.line, col: tag.col }, tag.filename)
580 | }
581 |
582 | /**
583 | * We are done scanning the content and there is an open mustache statement
584 | * seeking for new content. Which means we are missing closing braces `}}`.
585 | */
586 | if (this.mustacheStatement) {
587 | const { mustache, scanner } = this.mustacheStatement
588 | throw unclosedCurlyBrace(scanner.loc, mustache.filename)
589 | }
590 |
591 | /**
592 | * A tag was opened, but forgot to close it
593 | */
594 | if (this.#openedTags.length) {
595 | const openedTag = this.#openedTags[this.#openedTags.length - 1]
596 | throw unclosedTag(openedTag.properties.name, openedTag.loc.start, openedTag.filename)
597 | }
598 | }
599 |
600 | /**
601 | * Parse the template and generate an AST out of it
602 | */
603 | parse(): void {
604 | const lines = this.#template.split(/\r\n|\r|\n/g)
605 | const linesLength = lines.length
606 |
607 | while (this.#line < linesLength) {
608 | const line = lines[this.#line]
609 | this.#line++
610 | this.#processText(line)
611 | }
612 |
613 | this.#checkForErrors()
614 | }
615 | }
616 |
--------------------------------------------------------------------------------
/src/types.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * edge-lexer
3 | *
4 | * (c) Edge
5 | *
6 | * For the full copyright and license information, please view the LICENSE
7 | * file that was distributed with this source code.
8 | */
9 |
10 | import type { MustacheTypes, TagTypes } from './enums.js'
11 |
12 | /**
13 | * Properties node for a tag
14 | */
15 | export type TagProps = {
16 | name: string
17 | jsArg: string
18 | selfclosed: boolean
19 | }
20 |
21 | /**
22 | * Properties for a mustache block
23 | */
24 | export type MustacheProps = {
25 | jsArg: string
26 | }
27 |
28 | /**
29 | * Location node for tags and mustache braces
30 | */
31 | export type LexerLoc = {
32 | start: {
33 | line: number
34 | col: number
35 | }
36 | end: {
37 | line: number
38 | col: number
39 | }
40 | }
41 |
42 | /**
43 | * The properties required by the lexer on a tag
44 | * definition
45 | */
46 | export interface LexerTagDefinitionContract {
47 | block: boolean
48 | seekable: boolean
49 | noNewLine?: boolean
50 | }
51 |
52 | /**
53 | * Raw line token
54 | */
55 | export type RawToken = {
56 | type: 'raw'
57 | value: string
58 | line: number
59 | filename: string
60 | }
61 |
62 | /**
63 | * New line token
64 | */
65 | export type NewLineToken = {
66 | type: 'newline'
67 | line: number
68 | filename: string
69 | }
70 |
71 | /**
72 | * Comment token
73 | */
74 | export type CommentToken = {
75 | type: 'comment'
76 | value: string
77 | loc: LexerLoc
78 | filename: string
79 | }
80 |
81 | /**
82 | * Mustache token
83 | */
84 | export type MustacheToken = {
85 | type: MustacheTypes
86 | properties: MustacheProps
87 | loc: LexerLoc
88 | filename: string
89 | }
90 |
91 | /**
92 | * Tag token
93 | */
94 | export type TagToken = {
95 | type: TagTypes
96 | properties: TagProps
97 | loc: LexerLoc
98 | children: Token[]
99 | filename: string
100 | }
101 |
102 | export type Token = RawToken | NewLineToken | TagToken | MustacheToken | CommentToken
103 |
104 | /**
105 | * The runtime tag node to know the shape of a tag
106 | */
107 | export type RuntimeTag = LexerTagDefinitionContract & {
108 | name: string
109 | filename: string
110 | selfclosed: boolean
111 | col: number
112 | line: number
113 | escaped: boolean
114 | hasBrace: boolean
115 | }
116 |
117 | /**
118 | * Runtime mustache node to know the shape of the mustache
119 | */
120 | export type RuntimeMustache = {
121 | isComment: false
122 | escaped: boolean
123 | filename: string
124 | safe: boolean
125 | line: number
126 | col: number
127 | realCol: number
128 | }
129 |
130 | /**
131 | * Runtime comment node to know the shape of the comment
132 | */
133 | export type RuntimeComment = {
134 | isComment: true
135 | filename: string
136 | line: number
137 | col: number
138 | realCol: number
139 | }
140 |
141 | /**
142 | * Tags accepted by the tokenie=zer
143 | */
144 | export interface Tags {
145 | [name: string]: LexerTagDefinitionContract
146 | }
147 |
148 | /**
149 | * Options accepted by the tokenizer
150 | */
151 | export type TokenizerOptions = {
152 | filename: string
153 | onLine?: (line: string) => string
154 | claimTag?: (name: string) => LexerTagDefinitionContract | null
155 | }
156 |
--------------------------------------------------------------------------------
/src/utils.ts:
--------------------------------------------------------------------------------
1 | /*
2 | * edge-lexer
3 | *
4 | * (c) Edge
5 | *
6 | * For the full copyright and license information, please view the LICENSE
7 | * file that was distributed with this source code.
8 | */
9 |
10 | import { TagTypes, MustacheTypes } from './enums.js'
11 | import type { Token, TagToken, MustacheToken } from './types.js'
12 |
13 | /**
14 | * Returns true when token is a tag with a given name
15 | */
16 | export function isTag(token: Token, name?: string): token is TagToken {
17 | if (token.type === TagTypes.TAG || token.type === TagTypes.ETAG) {
18 | return name ? token.properties.name === name : true
19 | }
20 | return false
21 | }
22 |
23 | /**
24 | * Returns true when token is an escape tag with a given name
25 | */
26 | export function isEscapedTag(token: Token, name?: string): token is TagToken {
27 | if (token.type === TagTypes.ETAG) {
28 | return name ? token.properties.name === name : true
29 | }
30 | return false
31 | }
32 |
33 | /**
34 | * Returns true when token.type is a mustache type
35 | */
36 | export function isMustache(token: Token): token is MustacheToken {
37 | return (
38 | token.type === MustacheTypes.EMUSTACHE ||
39 | token.type === MustacheTypes.ESMUSTACHE ||
40 | token.type === MustacheTypes.MUSTACHE ||
41 | token.type === MustacheTypes.SMUSTACHE
42 | )
43 | }
44 |
45 | /**
46 | * Returns true when token.type is a safe mustache type
47 | */
48 | export function isSafeMustache(token: Token): token is MustacheToken {
49 | return token.type === MustacheTypes.ESMUSTACHE || token.type === MustacheTypes.SMUSTACHE
50 | }
51 |
52 | /**
53 | * Returns true when toke.type is an escaped mustache type
54 | */
55 | export function isEscapedMustache(token: Token): token is MustacheToken {
56 | return token.type === MustacheTypes.EMUSTACHE || token.type === MustacheTypes.ESMUSTACHE
57 | }
58 |
59 | /**
60 | * Returns line and column number for a given lexer token
61 | */
62 | export function getLineAndColumn(token: Token): [number, number] {
63 | if (token.type === 'newline' || token.type === 'raw') {
64 | return [token.line, 0]
65 | }
66 | return [token.loc.start.line, token.loc.start.col]
67 | }
68 |
--------------------------------------------------------------------------------
/tests/detector.spec.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * edge-lexer
3 | *
4 | * (c) Edge
5 | *
6 | * For the full copyright and license information, please view the LICENSE
7 | * file that was distributed with this source code.
8 | */
9 |
10 | import { test } from '@japa/runner'
11 | import { getTag, getMustache } from '../src/detector.js'
12 |
13 | test.group('Detector (tag)', () => {
14 | test("return null when statement isn't starting with @", ({ assert }) => {
15 | assert.isNull(getTag('hello world', 'eval.edge', 1, 0, {}))
16 | })
17 |
18 | test('return null when statement has @ but in between the text', ({ assert }) => {
19 | assert.isNull(getTag('hello @world', 'eval.edge', 1, 0, {}))
20 | })
21 |
22 | test('return tag node when does starts with @', ({ assert }) => {
23 | const tags = { hello: { seekable: true, block: false } }
24 |
25 | assert.deepEqual(getTag('@hello()', 'eval.edge', 1, 0, tags), {
26 | name: 'hello',
27 | filename: 'eval.edge',
28 | escaped: false,
29 | selfclosed: false,
30 | line: 1,
31 | col: 6,
32 | hasBrace: true,
33 | seekable: true,
34 | block: false,
35 | noNewLine: false,
36 | })
37 | })
38 |
39 | test('computed col for non seekable tags', ({ assert }) => {
40 | const tags = { hello: { seekable: false, block: false } }
41 |
42 | assert.deepEqual(getTag('@hello', 'eval.edge', 1, 0, tags), {
43 | name: 'hello',
44 | filename: 'eval.edge',
45 | escaped: false,
46 | selfclosed: false,
47 | line: 1,
48 | col: 6,
49 | hasBrace: false,
50 | seekable: false,
51 | block: false,
52 | noNewLine: false,
53 | })
54 | })
55 |
56 | test('ignore whitespace for non-seekable tags', ({ assert }) => {
57 | const tags = { hello: { seekable: false, block: false } }
58 |
59 | assert.deepEqual(getTag('@hello ', 'eval.edge', 1, 0, tags), {
60 | name: 'hello',
61 | filename: 'eval.edge',
62 | escaped: false,
63 | selfclosed: false,
64 | line: 1,
65 | col: 8,
66 | hasBrace: false,
67 | seekable: false,
68 | block: false,
69 | noNewLine: false,
70 | })
71 | })
72 |
73 | test('detect escaped tags', ({ assert }) => {
74 | const tags = { hello: { seekable: false, block: false } }
75 |
76 | assert.deepEqual(getTag('@@hello ', 'eval.edge', 1, 0, tags), {
77 | name: 'hello',
78 | filename: 'eval.edge',
79 | escaped: true,
80 | selfclosed: false,
81 | line: 1,
82 | col: 9,
83 | hasBrace: false,
84 | seekable: false,
85 | block: false,
86 | noNewLine: false,
87 | })
88 | })
89 |
90 | test('detect selfclosed non seekable tags', ({ assert }) => {
91 | const tags = { hello: { seekable: false, block: false } }
92 |
93 | assert.deepEqual(getTag('@!hello', 'eval.edge', 1, 0, tags), {
94 | name: 'hello',
95 | filename: 'eval.edge',
96 | escaped: false,
97 | selfclosed: true,
98 | line: 1,
99 | col: 7,
100 | hasBrace: false,
101 | seekable: false,
102 | block: false,
103 | noNewLine: false,
104 | })
105 | })
106 |
107 | test('do not include special chars in non seekable tag names', ({ assert }) => {
108 | const tags = { hel: { seekable: false, block: false } }
109 |
110 | assert.deepEqual(getTag('@hel-lo', 'eval.edge', 1, 0, tags), {
111 | name: 'hel',
112 | filename: 'eval.edge',
113 | escaped: false,
114 | selfclosed: false,
115 | line: 1,
116 | col: 4,
117 | hasBrace: false,
118 | seekable: false,
119 | block: false,
120 | noNewLine: false,
121 | })
122 | })
123 |
124 | test('detect name for seekable tags', ({ assert }) => {
125 | const tags = { if: { seekable: true, block: true } }
126 |
127 | assert.deepEqual(getTag('@if(username)', 'eval.edge', 1, 0, tags), {
128 | name: 'if',
129 | filename: 'eval.edge',
130 | escaped: false,
131 | selfclosed: false,
132 | line: 1,
133 | col: 3,
134 | hasBrace: true,
135 | seekable: true,
136 | block: true,
137 | noNewLine: false,
138 | })
139 | })
140 |
141 | test('set hasBrace to false when seekable tag is missing the brace', ({ assert }) => {
142 | const tags = { if: { seekable: true, block: true } }
143 |
144 | assert.deepEqual(getTag('@if', 'eval.edge', 1, 0, tags), {
145 | name: 'if',
146 | filename: 'eval.edge',
147 | escaped: false,
148 | selfclosed: false,
149 | line: 1,
150 | col: 3,
151 | hasBrace: false,
152 | seekable: true,
153 | block: true,
154 | noNewLine: false,
155 | })
156 | })
157 |
158 | test('collect whitespace in front of the tag', ({ assert }) => {
159 | const tags = { if: { seekable: true, block: true } }
160 |
161 | assert.deepEqual(getTag(' @if(username)', 'eval.edge', 1, 0, tags), {
162 | name: 'if',
163 | filename: 'eval.edge',
164 | escaped: false,
165 | selfclosed: false,
166 | line: 1,
167 | col: 5,
168 | hasBrace: true,
169 | seekable: true,
170 | block: true,
171 | noNewLine: false,
172 | })
173 | })
174 |
175 | test('collect max of 2 whitspaces before the opening brace', ({ assert }) => {
176 | const tags = { if: { seekable: true, block: true } }
177 |
178 | assert.deepEqual(getTag(' @if (username)', 'eval.edge', 1, 0, tags), {
179 | name: 'if',
180 | filename: 'eval.edge',
181 | escaped: false,
182 | selfclosed: false,
183 | line: 1,
184 | col: 7,
185 | hasBrace: true,
186 | seekable: true,
187 | block: true,
188 | noNewLine: false,
189 | })
190 | })
191 | })
192 |
193 | test.group('Detector (mustache)', () => {
194 | test("return null when statement doesn't have mustache blocks", ({ assert }) => {
195 | assert.isNull(getMustache('hello world', 'eval.edge', 1, 0))
196 | })
197 |
198 | test('return mustache details when has mustache braces', ({ assert }) => {
199 | assert.deepEqual(getMustache('Hello {{ username }}', 'eval.edge', 1, 0), {
200 | filename: 'eval.edge',
201 | col: 6,
202 | line: 1,
203 | realCol: 6,
204 | escaped: false,
205 | safe: false,
206 | isComment: false,
207 | })
208 | })
209 |
210 | test('return mustache details when mustache has 3 braces', ({ assert }) => {
211 | assert.deepEqual(getMustache('Hello {{{ username }}}', 'eval.edge', 1, 0), {
212 | filename: 'eval.edge',
213 | col: 6,
214 | line: 1,
215 | realCol: 6,
216 | escaped: false,
217 | safe: true,
218 | isComment: false,
219 | })
220 | })
221 |
222 | test('return mustache details when mustache is escaped', ({ assert }) => {
223 | assert.deepEqual(getMustache('Hello @{{{ username }}}', 'eval.edge', 1, 0), {
224 | filename: 'eval.edge',
225 | col: 7,
226 | line: 1,
227 | realCol: 7,
228 | escaped: true,
229 | safe: true,
230 | isComment: false,
231 | })
232 | })
233 |
234 | test('return correct col when mustache is in between the content', ({ assert }) => {
235 | assert.deepEqual(getMustache('Hello @{{{ username }}}', 'eval.edge', 1, 8), {
236 | filename: 'eval.edge',
237 | col: 15,
238 | line: 1,
239 | realCol: 7,
240 | escaped: true,
241 | safe: true,
242 | isComment: false,
243 | })
244 | })
245 | })
246 |
--------------------------------------------------------------------------------
/tests/fixtures.spec.ts:
--------------------------------------------------------------------------------
1 | /*
2 | * edge-lexer
3 | *
4 | * (c) Edge
5 | *
6 | * For the full copyright and license information, please view the LICENSE
7 | * file that was distributed with this source code.
8 | */
9 |
10 | import { test } from '@japa/runner'
11 | import { fixtures } from '../fixtures/index.js'
12 | import { Tokenizer } from '../src/tokenizer.js'
13 |
14 | const tags = {
15 | if: {
16 | seekable: true,
17 | block: true,
18 | },
19 | include: {
20 | seekable: true,
21 | block: false,
22 | },
23 | }
24 |
25 | test.group('fixtures', () => {
26 | fixtures.forEach((fixture) => {
27 | test(fixture.name, ({ assert }) => {
28 | const tokenizer = new Tokenizer(fixture.in, tags, { filename: 'eval.edge' })
29 | tokenizer.parse()
30 | assert.deepEqual(tokenizer.tokens, fixture.out)
31 | })
32 | })
33 | })
34 |
--------------------------------------------------------------------------------
/tests/scanner.spec.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * edge-lexer
3 | *
4 | * (c) Edge
5 | *
6 | * For the full copyright and license information, please view the LICENSE
7 | * file that was distributed with this source code.
8 | */
9 |
10 | import dedent from 'dedent'
11 | import { test } from '@japa/runner'
12 | import { Scanner } from '../src/scanner.js'
13 |
14 | test.group('Scanner', () => {
15 | test('scan characters till end of a pattern', ({ assert }) => {
16 | const scanner = new Scanner(')', ['(', ')'], 1, 0)
17 | scanner.scan('username)')
18 |
19 | assert.isTrue(scanner.closed)
20 | assert.equal(scanner.match, 'username')
21 | assert.equal(scanner.leftOver, '')
22 | })
23 |
24 | test('scan characters till end of a pattern with tolerations', ({ assert }) => {
25 | const scanner = new Scanner(')', ['(', ')'], 1, 0)
26 | scanner.scan('(2 + 2) * 3)')
27 |
28 | assert.isTrue(scanner.closed)
29 | assert.equal(scanner.match, '(2 + 2) * 3')
30 | assert.equal(scanner.leftOver, '')
31 | })
32 |
33 | test('scan characters and return left over after the match', ({ assert }) => {
34 | const scanner = new Scanner(')', ['(', ')'], 1, 0)
35 | scanner.scan('(2 + 2) * 3) is 12')
36 |
37 | assert.isTrue(scanner.closed)
38 | assert.equal(scanner.match, '(2 + 2) * 3')
39 | assert.equal(scanner.leftOver, ' is 12')
40 | })
41 |
42 | test('return null when unable to find closing pattern', ({ assert }) => {
43 | const scanner = new Scanner(')', ['(', ')'], 1, 0)
44 | scanner.scan('(2 + 2) * 3')
45 | assert.isFalse(scanner.closed)
46 | })
47 |
48 | test('scan multiple times unless closing pattern matches', ({ assert }) => {
49 | const scanner = new Scanner(')', ['(', ')'], 1, 0)
50 | scanner.scan('(2 + 2)')
51 | scanner.scan(' * 3')
52 |
53 | scanner.scan(') is 12')
54 |
55 | assert.isTrue(scanner.closed)
56 | assert.equal(scanner.match, '(2 + 2) * 3')
57 | assert.equal(scanner.leftOver, ' is 12')
58 | })
59 |
60 | test('scan for pair of ending patterns', ({ assert }) => {
61 | const scanner = new Scanner('}}', ['{', '}'], 1, 0)
62 | scanner.scan(' username }}')
63 |
64 | assert.isTrue(scanner.closed)
65 | assert.equal(scanner.match, ' username ')
66 | assert.equal(scanner.leftOver, '')
67 | })
68 |
69 | test('tolerate when scaning for ending pairs', ({ assert }) => {
70 | const scanner = new Scanner('}}', ['{', '}'], 1, 0)
71 | scanner.scan(' {username} }}')
72 |
73 | assert.isTrue(scanner.closed)
74 | assert.equal(scanner.match, ' {username} ')
75 | assert.equal(scanner.leftOver, '')
76 | })
77 |
78 | test('return null when ending pairs are not matched', ({ assert }) => {
79 | const scanner = new Scanner('}}}', ['{', '}'], 1, 0)
80 | scanner.scan(' {username} }}')
81 |
82 | assert.isFalse(scanner.closed)
83 | })
84 |
85 | test('work fine when ending patterns are mixed in multiple lines', ({ assert }) => {
86 | const template = dedent`
87 | {{
88 | {username
89 | }}}`
90 |
91 | const scanner = new Scanner('}}', ['{', '}'], 1, 0)
92 | const lines = template.split('\n')
93 |
94 | scanner.scan(lines[1])
95 | scanner.scan(lines[2])
96 |
97 | assert.isTrue(scanner.closed)
98 | assert.equal(scanner.match, ' {username}')
99 | assert.equal(scanner.leftOver, '')
100 | })
101 | })
102 |
--------------------------------------------------------------------------------
/tests/tokenizer_comment.spec.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * edge-lexer
3 | *
4 | * (c) Edge
5 | *
6 | * For the full copyright and license information, please view the LICENSE
7 | * file that was distributed with this source code.
8 | */
9 |
10 | import dedent from 'dedent'
11 | import { test } from '@japa/runner'
12 |
13 | import { Tokenizer } from '../src/tokenizer.js'
14 | import { MustacheTypes, TagTypes } from '../src/enums.js'
15 |
16 | const tagsDef = {
17 | if: class If {
18 | static block = true
19 | static selfclosed = false
20 | static seekable = true
21 | },
22 | else: class Else {
23 | static block = false
24 | static selfclosed = false
25 | static seekable = false
26 | },
27 | include: class Include {
28 | static block = false
29 | static selfclosed = false
30 | static seekable = true
31 | },
32 | }
33 |
34 | test.group('Tokenizer Comment', () => {
35 | test('process block level comments', ({ assert }) => {
36 | const template = dedent`
37 | {{-- This is a comment --}}
38 | `
39 |
40 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
41 | tokenizer.parse()
42 |
43 | assert.isNull(tokenizer.tagStatement)
44 | assert.isNull(tokenizer.mustacheStatement)
45 | assert.deepEqual(tokenizer.tokens, [
46 | {
47 | type: 'comment',
48 | filename: 'eval.edge',
49 | loc: {
50 | start: {
51 | line: 1,
52 | col: 4,
53 | },
54 | end: {
55 | line: 1,
56 | col: 27,
57 | },
58 | },
59 | value: ' This is a comment ',
60 | },
61 | ])
62 | })
63 |
64 | test('process block level comments spanned to multiple lines', ({ assert }) => {
65 | const template = dedent`
66 | {{--
67 | This is a comment
68 | --}}
69 | `
70 |
71 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
72 | tokenizer.parse()
73 |
74 | assert.isNull(tokenizer.tagStatement)
75 | assert.isNull(tokenizer.mustacheStatement)
76 | assert.deepEqual(tokenizer.tokens, [
77 | {
78 | type: 'comment',
79 | filename: 'eval.edge',
80 | loc: {
81 | start: {
82 | line: 1,
83 | col: 4,
84 | },
85 | end: {
86 | line: 3,
87 | col: 4,
88 | },
89 | },
90 | value: '\n This is a comment\n',
91 | },
92 | ])
93 | })
94 |
95 | test('process inline comments with text', ({ assert }) => {
96 | const template = dedent`
97 | Hello {{-- This is inline comment --}}
98 | `
99 |
100 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
101 | tokenizer.parse()
102 |
103 | assert.isNull(tokenizer.tagStatement)
104 | assert.isNull(tokenizer.mustacheStatement)
105 | assert.deepEqual(tokenizer.tokens, [
106 | {
107 | type: 'raw',
108 | filename: 'eval.edge',
109 | line: 1,
110 | value: 'Hello ',
111 | },
112 | {
113 | type: 'comment',
114 | filename: 'eval.edge',
115 | loc: {
116 | start: {
117 | line: 1,
118 | col: 10,
119 | },
120 | end: {
121 | line: 1,
122 | col: 38,
123 | },
124 | },
125 | value: ' This is inline comment ',
126 | },
127 | ])
128 | })
129 |
130 | test('process inline comments with mustache', ({ assert }) => {
131 | const template = dedent`
132 | {{ username }} {{-- This is an inline comment --}}
133 | `
134 |
135 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
136 | tokenizer.parse()
137 |
138 | assert.isNull(tokenizer.tagStatement)
139 | assert.isNull(tokenizer.mustacheStatement)
140 | assert.deepEqual(tokenizer.tokens, [
141 | {
142 | type: MustacheTypes.MUSTACHE,
143 | filename: 'eval.edge',
144 | loc: {
145 | start: {
146 | line: 1,
147 | col: 2,
148 | },
149 | end: {
150 | line: 1,
151 | col: 14,
152 | },
153 | },
154 | properties: {
155 | jsArg: ' username ',
156 | },
157 | },
158 | {
159 | type: 'raw',
160 | filename: 'eval.edge',
161 | value: ' ',
162 | line: 1,
163 | },
164 | {
165 | type: 'comment',
166 | filename: 'eval.edge',
167 | loc: {
168 | start: {
169 | line: 1,
170 | col: 19,
171 | },
172 | end: {
173 | line: 1,
174 | col: 50,
175 | },
176 | },
177 | value: ' This is an inline comment ',
178 | },
179 | ])
180 | })
181 |
182 | test('process inline comments spanning over multiple lines with mustache', ({ assert }) => {
183 | const template = dedent`
184 | {{ username }} {{--
185 | This is an inline comment
186 | --}}
187 | `
188 |
189 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
190 | tokenizer.parse()
191 |
192 | assert.isNull(tokenizer.tagStatement)
193 | assert.isNull(tokenizer.mustacheStatement)
194 | assert.deepEqual(tokenizer.tokens, [
195 | {
196 | type: MustacheTypes.MUSTACHE,
197 | filename: 'eval.edge',
198 | loc: {
199 | start: {
200 | line: 1,
201 | col: 2,
202 | },
203 | end: {
204 | line: 1,
205 | col: 14,
206 | },
207 | },
208 | properties: {
209 | jsArg: ' username ',
210 | },
211 | },
212 | {
213 | type: 'raw',
214 | filename: 'eval.edge',
215 | value: ' ',
216 | line: 1,
217 | },
218 | {
219 | type: 'comment',
220 | filename: 'eval.edge',
221 | loc: {
222 | start: {
223 | line: 1,
224 | col: 19,
225 | },
226 | end: {
227 | line: 3,
228 | col: 4,
229 | },
230 | },
231 | value: '\n This is an inline comment\n',
232 | },
233 | ])
234 | })
235 |
236 | test('process inline comments surrounded by interpolation braces', ({ assert }) => {
237 | const template = dedent`
238 | {{ username }} {{-- This is an inline comment --}} {{ age }}
239 | `
240 |
241 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
242 | tokenizer.parse()
243 |
244 | assert.isNull(tokenizer.tagStatement)
245 | assert.isNull(tokenizer.mustacheStatement)
246 | assert.deepEqual(tokenizer.tokens, [
247 | {
248 | type: MustacheTypes.MUSTACHE,
249 | filename: 'eval.edge',
250 | loc: {
251 | start: {
252 | line: 1,
253 | col: 2,
254 | },
255 | end: {
256 | line: 1,
257 | col: 14,
258 | },
259 | },
260 | properties: {
261 | jsArg: ' username ',
262 | },
263 | },
264 | {
265 | type: 'raw',
266 | filename: 'eval.edge',
267 | value: ' ',
268 | line: 1,
269 | },
270 | {
271 | type: 'comment',
272 | filename: 'eval.edge',
273 | loc: {
274 | start: {
275 | line: 1,
276 | col: 19,
277 | },
278 | end: {
279 | line: 1,
280 | col: 50,
281 | },
282 | },
283 | value: ' This is an inline comment ',
284 | },
285 | {
286 | type: 'raw',
287 | filename: 'eval.edge',
288 | value: ' ',
289 | line: 1,
290 | },
291 | {
292 | type: MustacheTypes.MUSTACHE,
293 | filename: 'eval.edge',
294 | loc: {
295 | start: {
296 | line: 1,
297 | col: 53,
298 | },
299 | end: {
300 | line: 1,
301 | col: 60,
302 | },
303 | },
304 | properties: {
305 | jsArg: ' age ',
306 | },
307 | },
308 | ])
309 | })
310 |
311 | test('process inline comments spanning over multiple lines surrounded by interpolation braces', ({
312 | assert,
313 | }) => {
314 | const template = dedent`
315 | {{ username }} {{--
316 | This is an inline comment
317 | --}} {{ age }}
318 | `
319 |
320 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
321 | tokenizer.parse()
322 |
323 | assert.isNull(tokenizer.tagStatement)
324 | assert.isNull(tokenizer.mustacheStatement)
325 | assert.deepEqual(tokenizer.tokens, [
326 | {
327 | type: MustacheTypes.MUSTACHE,
328 | filename: 'eval.edge',
329 | loc: {
330 | start: {
331 | line: 1,
332 | col: 2,
333 | },
334 | end: {
335 | line: 1,
336 | col: 14,
337 | },
338 | },
339 | properties: {
340 | jsArg: ' username ',
341 | },
342 | },
343 | {
344 | type: 'raw',
345 | filename: 'eval.edge',
346 | value: ' ',
347 | line: 1,
348 | },
349 | {
350 | type: 'comment',
351 | filename: 'eval.edge',
352 | loc: {
353 | start: {
354 | line: 1,
355 | col: 19,
356 | },
357 | end: {
358 | line: 3,
359 | col: 4,
360 | },
361 | },
362 | value: '\n This is an inline comment\n',
363 | },
364 | {
365 | type: 'raw',
366 | filename: 'eval.edge',
367 | value: ' ',
368 | line: 3,
369 | },
370 | {
371 | type: MustacheTypes.MUSTACHE,
372 | filename: 'eval.edge',
373 | loc: {
374 | start: {
375 | line: 3,
376 | col: 7,
377 | },
378 | end: {
379 | line: 3,
380 | col: 14,
381 | },
382 | },
383 | properties: {
384 | jsArg: ' age ',
385 | },
386 | },
387 | ])
388 | })
389 |
390 | test('process inline comments surrounded by text', ({ assert }) => {
391 | const template = dedent`
392 | Hello {{-- This is inline comment --}} world
393 | `
394 |
395 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
396 | tokenizer.parse()
397 |
398 | assert.isNull(tokenizer.tagStatement)
399 | assert.isNull(tokenizer.mustacheStatement)
400 | assert.deepEqual(tokenizer.tokens, [
401 | {
402 | type: 'raw',
403 | filename: 'eval.edge',
404 | line: 1,
405 | value: 'Hello ',
406 | },
407 | {
408 | type: 'comment',
409 | filename: 'eval.edge',
410 | loc: {
411 | start: {
412 | line: 1,
413 | col: 10,
414 | },
415 | end: {
416 | line: 1,
417 | col: 38,
418 | },
419 | },
420 | value: ' This is inline comment ',
421 | },
422 | {
423 | type: 'raw',
424 | filename: 'eval.edge',
425 | line: 1,
426 | value: ' world',
427 | },
428 | ])
429 | })
430 |
431 | test('process inline comments spanning over multiple lines surrounded by text', ({ assert }) => {
432 | const template = dedent`
433 | Hello {{--
434 | This is inline comment
435 | --}} world
436 | `
437 |
438 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
439 | tokenizer.parse()
440 |
441 | assert.isNull(tokenizer.tagStatement)
442 | assert.isNull(tokenizer.mustacheStatement)
443 | assert.deepEqual(tokenizer.tokens, [
444 | {
445 | type: 'raw',
446 | filename: 'eval.edge',
447 | line: 1,
448 | value: 'Hello ',
449 | },
450 | {
451 | type: 'comment',
452 | filename: 'eval.edge',
453 | loc: {
454 | start: {
455 | line: 1,
456 | col: 10,
457 | },
458 | end: {
459 | line: 3,
460 | col: 4,
461 | },
462 | },
463 | value: '\n This is inline comment\n',
464 | },
465 | {
466 | type: 'raw',
467 | filename: 'eval.edge',
468 | line: 3,
469 | value: ' world',
470 | },
471 | ])
472 | })
473 |
474 | test('disallow inline comments with tags', ({ assert }) => {
475 | const template = dedent`
476 | @if(username) {{-- This is a comment --}}
477 | @endif
478 | `
479 |
480 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
481 | const fn = () => tokenizer.parse()
482 | assert.throws(fn, 'Unexpected token " {{-- This is a comment --}}"')
483 | })
484 |
485 | test('shrink newlines after block level comments', ({ assert }) => {
486 | const template = dedent`
487 | {{-- This is a comment --}}
488 | Hello world
489 | `
490 |
491 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
492 | tokenizer.parse()
493 |
494 | assert.isNull(tokenizer.tagStatement)
495 | assert.isNull(tokenizer.mustacheStatement)
496 | assert.deepEqual(tokenizer.tokens, [
497 | {
498 | type: 'comment',
499 | filename: 'eval.edge',
500 | loc: {
501 | start: {
502 | line: 1,
503 | col: 4,
504 | },
505 | end: {
506 | line: 1,
507 | col: 27,
508 | },
509 | },
510 | value: ' This is a comment ',
511 | },
512 | {
513 | type: 'newline',
514 | filename: 'eval.edge',
515 | line: 1,
516 | },
517 | {
518 | type: 'raw',
519 | filename: 'eval.edge',
520 | line: 2,
521 | value: 'Hello world',
522 | },
523 | ])
524 | })
525 |
526 | test('do not shrink newlines for inline comments', ({ assert }) => {
527 | const template = dedent`
528 | Hey {{-- This is a comment --}}
529 | Hello world
530 | `
531 |
532 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
533 | tokenizer.parse()
534 |
535 | assert.isNull(tokenizer.tagStatement)
536 | assert.isNull(tokenizer.mustacheStatement)
537 | assert.deepEqual(tokenizer.tokens, [
538 | {
539 | type: 'raw',
540 | filename: 'eval.edge',
541 | line: 1,
542 | value: 'Hey ',
543 | },
544 | {
545 | type: 'comment',
546 | filename: 'eval.edge',
547 | loc: {
548 | start: {
549 | line: 1,
550 | col: 8,
551 | },
552 | end: {
553 | line: 1,
554 | col: 31,
555 | },
556 | },
557 | value: ' This is a comment ',
558 | },
559 | {
560 | type: 'newline',
561 | filename: 'eval.edge',
562 | line: 1,
563 | },
564 | {
565 | type: 'raw',
566 | filename: 'eval.edge',
567 | line: 2,
568 | value: 'Hello world',
569 | },
570 | ])
571 | })
572 |
573 | test('do not shrink newlines for inline comments when raw text is after the comment', ({
574 | assert,
575 | }) => {
576 | const template = dedent`
577 | {{-- This is a comment --}} Hey
578 | Hello world
579 | `
580 |
581 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
582 | tokenizer.parse()
583 |
584 | assert.isNull(tokenizer.tagStatement)
585 | assert.isNull(tokenizer.mustacheStatement)
586 | assert.deepEqual(tokenizer.tokens, [
587 | {
588 | type: 'comment',
589 | filename: 'eval.edge',
590 | loc: {
591 | start: {
592 | line: 1,
593 | col: 4,
594 | },
595 | end: {
596 | line: 1,
597 | col: 27,
598 | },
599 | },
600 | value: ' This is a comment ',
601 | },
602 | {
603 | type: 'raw',
604 | filename: 'eval.edge',
605 | line: 1,
606 | value: ' Hey',
607 | },
608 | {
609 | type: 'newline',
610 | filename: 'eval.edge',
611 | line: 1,
612 | },
613 | {
614 | type: 'raw',
615 | filename: 'eval.edge',
616 | line: 2,
617 | value: 'Hello world',
618 | },
619 | ])
620 | })
621 |
622 | test('do not shrink newlines when comment is next to an interpolation brace', ({ assert }) => {
623 | const template = dedent`
624 | {{-- This is a comment --}} {{ username }}
625 | Hello world
626 | `
627 |
628 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
629 | tokenizer.parse()
630 |
631 | assert.isNull(tokenizer.tagStatement)
632 | assert.isNull(tokenizer.mustacheStatement)
633 | assert.deepEqual(tokenizer.tokens, [
634 | {
635 | type: 'comment',
636 | filename: 'eval.edge',
637 | loc: {
638 | start: {
639 | line: 1,
640 | col: 4,
641 | },
642 | end: {
643 | line: 1,
644 | col: 27,
645 | },
646 | },
647 | value: ' This is a comment ',
648 | },
649 | {
650 | type: 'raw',
651 | filename: 'eval.edge',
652 | line: 1,
653 | value: ' ',
654 | },
655 | {
656 | type: MustacheTypes.MUSTACHE,
657 | filename: 'eval.edge',
658 | loc: {
659 | start: {
660 | line: 1,
661 | col: 30,
662 | },
663 | end: {
664 | line: 1,
665 | col: 42,
666 | },
667 | },
668 | properties: {
669 | jsArg: ' username ',
670 | },
671 | },
672 | {
673 | type: 'newline',
674 | filename: 'eval.edge',
675 | line: 1,
676 | },
677 | {
678 | type: 'raw',
679 | filename: 'eval.edge',
680 | line: 2,
681 | value: 'Hello world',
682 | },
683 | ])
684 | })
685 |
686 | test('do not shrink newlines when comment spanned over multiple lines, after interpolation brace', ({
687 | assert,
688 | }) => {
689 | const template = dedent`
690 | {{ username }} {{--
691 | This is an inline comment
692 | --}}
693 | {{ age }}
694 | `
695 |
696 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
697 | tokenizer.parse()
698 |
699 | assert.isNull(tokenizer.tagStatement)
700 | assert.isNull(tokenizer.mustacheStatement)
701 | // console.log(JSON.stringify(tokenizer.tokens, null, 2))
702 |
703 | assert.deepEqual(tokenizer.tokens, [
704 | {
705 | type: MustacheTypes.MUSTACHE,
706 | filename: 'eval.edge',
707 | loc: {
708 | start: {
709 | line: 1,
710 | col: 2,
711 | },
712 | end: {
713 | line: 1,
714 | col: 14,
715 | },
716 | },
717 | properties: {
718 | jsArg: ' username ',
719 | },
720 | },
721 | {
722 | type: 'raw',
723 | filename: 'eval.edge',
724 | value: ' ',
725 | line: 1,
726 | },
727 | {
728 | type: 'comment',
729 | filename: 'eval.edge',
730 | loc: {
731 | start: {
732 | line: 1,
733 | col: 19,
734 | },
735 | end: {
736 | line: 3,
737 | col: 4,
738 | },
739 | },
740 | value: '\n This is an inline comment\n',
741 | },
742 | {
743 | type: 'newline',
744 | filename: 'eval.edge',
745 | line: 3,
746 | },
747 | {
748 | type: MustacheTypes.MUSTACHE,
749 | filename: 'eval.edge',
750 | loc: {
751 | start: {
752 | line: 4,
753 | col: 2,
754 | },
755 | end: {
756 | line: 4,
757 | col: 9,
758 | },
759 | },
760 | properties: {
761 | jsArg: ' age ',
762 | },
763 | },
764 | ])
765 | })
766 |
767 | test('do shrink comment spanned over multiple lines', ({ assert }) => {
768 | const template = dedent`
769 | {{--
770 | This is an inline comment
771 | --}}
772 | {{ age }}
773 | `
774 |
775 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
776 | tokenizer.parse()
777 |
778 | assert.isNull(tokenizer.tagStatement)
779 | assert.isNull(tokenizer.mustacheStatement)
780 | assert.deepEqual(tokenizer.tokens, [
781 | {
782 | type: 'comment',
783 | filename: 'eval.edge',
784 | loc: {
785 | start: {
786 | line: 1,
787 | col: 4,
788 | },
789 | end: {
790 | line: 3,
791 | col: 4,
792 | },
793 | },
794 | value: '\n This is an inline comment\n',
795 | },
796 | {
797 | type: 'newline',
798 | filename: 'eval.edge',
799 | line: 3,
800 | },
801 | {
802 | type: MustacheTypes.MUSTACHE,
803 | filename: 'eval.edge',
804 | loc: {
805 | start: {
806 | line: 4,
807 | col: 2,
808 | },
809 | end: {
810 | line: 4,
811 | col: 9,
812 | },
813 | },
814 | properties: {
815 | jsArg: ' age ',
816 | },
817 | },
818 | ])
819 | })
820 |
821 | test('do not shrink newlines when comment spanned over multiple lines, after raw text', ({
822 | assert,
823 | }) => {
824 | const template = dedent`
825 | Hello {{--
826 | This is an inline comment
827 | --}}
828 | {{ age }}
829 | `
830 |
831 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
832 | tokenizer.parse()
833 |
834 | assert.isNull(tokenizer.tagStatement)
835 | assert.isNull(tokenizer.mustacheStatement)
836 | assert.deepEqual(tokenizer.tokens, [
837 | {
838 | type: 'raw',
839 | filename: 'eval.edge',
840 | value: 'Hello ',
841 | line: 1,
842 | },
843 | {
844 | type: 'comment',
845 | filename: 'eval.edge',
846 | loc: {
847 | start: {
848 | line: 1,
849 | col: 10,
850 | },
851 | end: {
852 | line: 3,
853 | col: 4,
854 | },
855 | },
856 | value: '\n This is an inline comment\n',
857 | },
858 | {
859 | type: 'newline',
860 | filename: 'eval.edge',
861 | line: 3,
862 | },
863 | {
864 | type: MustacheTypes.MUSTACHE,
865 | filename: 'eval.edge',
866 | loc: {
867 | start: {
868 | line: 4,
869 | col: 2,
870 | },
871 | end: {
872 | line: 4,
873 | col: 9,
874 | },
875 | },
876 | properties: {
877 | jsArg: ' age ',
878 | },
879 | },
880 | ])
881 |
882 | const templateWithRawTextAround = dedent`
883 | Hello {{--
884 | This is an inline comment
885 | --}}
886 | world
887 | `
888 |
889 | const tokenizer1 = new Tokenizer(templateWithRawTextAround, tagsDef, { filename: 'eval.edge' })
890 | tokenizer1.parse()
891 |
892 | assert.isNull(tokenizer1.tagStatement)
893 | assert.isNull(tokenizer1.mustacheStatement)
894 | assert.deepEqual(tokenizer1.tokens, [
895 | {
896 | type: 'raw',
897 | filename: 'eval.edge',
898 | value: 'Hello ',
899 | line: 1,
900 | },
901 | {
902 | type: 'comment',
903 | filename: 'eval.edge',
904 | loc: {
905 | start: {
906 | line: 1,
907 | col: 10,
908 | },
909 | end: {
910 | line: 3,
911 | col: 4,
912 | },
913 | },
914 | value: '\n This is an inline comment\n',
915 | },
916 | {
917 | type: 'newline',
918 | filename: 'eval.edge',
919 | line: 3,
920 | },
921 | {
922 | type: 'raw',
923 | filename: 'eval.edge',
924 | value: 'world',
925 | line: 4,
926 | },
927 | ])
928 | })
929 |
930 | test('do not emit newline when firstline is a comment', ({ assert }) => {
931 | const template = dedent`
932 | {{-- This is a comment --}}
933 | @if(username)
934 | @endif
935 | Hello world
936 | `
937 |
938 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
939 | tokenizer.parse()
940 |
941 | assert.isNull(tokenizer.tagStatement)
942 | assert.isNull(tokenizer.mustacheStatement)
943 | assert.deepEqual(tokenizer.tokens, [
944 | {
945 | type: 'comment',
946 | filename: 'eval.edge',
947 | loc: {
948 | start: {
949 | line: 1,
950 | col: 4,
951 | },
952 | end: {
953 | line: 1,
954 | col: 27,
955 | },
956 | },
957 | value: ' This is a comment ',
958 | },
959 | {
960 | filename: 'eval.edge',
961 | type: TagTypes.TAG,
962 | properties: {
963 | name: 'if',
964 | jsArg: 'username',
965 | selfclosed: false,
966 | },
967 | loc: {
968 | start: {
969 | line: 2,
970 | col: 4,
971 | },
972 | end: {
973 | line: 2,
974 | col: 13,
975 | },
976 | },
977 | children: [],
978 | },
979 | {
980 | type: 'newline',
981 | filename: 'eval.edge',
982 | line: 3,
983 | },
984 | {
985 | type: 'raw',
986 | filename: 'eval.edge',
987 | line: 4,
988 | value: 'Hello world',
989 | },
990 | ])
991 | })
992 | })
993 |
--------------------------------------------------------------------------------
/tests/tokenizer_mustache.spec.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * edge-lexer
3 | *
4 | * (c) Edge
5 | *
6 | * For the full copyright and license information, please view the LICENSE
7 | * file that was distributed with this source code.
8 | */
9 |
10 | import dedent from 'dedent'
11 | import { test } from '@japa/runner'
12 |
13 | import { Tokenizer } from '../src/tokenizer.js'
14 | import { MustacheTypes } from '../src/enums.js'
15 |
16 | const tagsDef = {
17 | if: class If {
18 | static block = true
19 | static selfclosed = false
20 | static seekable = true
21 | },
22 | else: class Else {
23 | static block = false
24 | static selfclosed = false
25 | static seekable = false
26 | },
27 | include: class Include {
28 | static block = false
29 | static selfclosed = false
30 | static seekable = true
31 | },
32 | }
33 |
34 | test.group('Tokenizer Mustache', () => {
35 | test('process mustache blocks', ({ assert }) => {
36 | const template = 'Hello {{ username }}'
37 |
38 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
39 | tokenizer.parse()
40 |
41 | assert.isNull(tokenizer.tagStatement)
42 | assert.isNull(tokenizer.mustacheStatement)
43 | assert.deepEqual(tokenizer.tokens, [
44 | {
45 | type: 'raw',
46 | filename: 'eval.edge',
47 | line: 1,
48 | value: 'Hello ',
49 | },
50 | {
51 | type: MustacheTypes.MUSTACHE,
52 | filename: 'eval.edge',
53 | loc: {
54 | start: {
55 | line: 1,
56 | col: 8,
57 | },
58 | end: {
59 | line: 1,
60 | col: 20,
61 | },
62 | },
63 | properties: {
64 | jsArg: ' username ',
65 | },
66 | },
67 | ])
68 | })
69 |
70 | test('process mustache blocks with text around it', ({ assert }) => {
71 | const template = 'Hello {{ username }}!'
72 |
73 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
74 | tokenizer.parse()
75 |
76 | assert.isNull(tokenizer.tagStatement)
77 | assert.isNull(tokenizer.mustacheStatement)
78 | assert.deepEqual(tokenizer.tokens, [
79 | {
80 | type: 'raw',
81 | filename: 'eval.edge',
82 | line: 1,
83 | value: 'Hello ',
84 | },
85 | {
86 | type: MustacheTypes.MUSTACHE,
87 | filename: 'eval.edge',
88 | loc: {
89 | start: {
90 | line: 1,
91 | col: 8,
92 | },
93 | end: {
94 | line: 1,
95 | col: 20,
96 | },
97 | },
98 | properties: {
99 | jsArg: ' username ',
100 | },
101 | },
102 | {
103 | type: 'raw',
104 | filename: 'eval.edge',
105 | line: 1,
106 | value: '!',
107 | },
108 | ])
109 | })
110 |
111 | test('parse multiline mustache', ({ assert }) => {
112 | const template = dedent`List of users are {{
113 | users.map((user) => {
114 | return user.username
115 | }).join(', ')
116 | }}.`
117 |
118 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
119 | tokenizer.parse()
120 |
121 | assert.isNull(tokenizer.tagStatement)
122 | assert.isNull(tokenizer.mustacheStatement)
123 | assert.deepEqual(tokenizer.tokens, [
124 | {
125 | type: 'raw',
126 | filename: 'eval.edge',
127 | line: 1,
128 | value: 'List of users are ',
129 | },
130 | {
131 | type: MustacheTypes.MUSTACHE,
132 | filename: 'eval.edge',
133 | loc: {
134 | start: {
135 | line: 1,
136 | col: 20,
137 | },
138 | end: {
139 | line: 5,
140 | col: 2,
141 | },
142 | },
143 | properties: {
144 | jsArg: "\n users.map((user) => {\n return user.username\n }).join(', ')\n",
145 | },
146 | },
147 | {
148 | type: 'raw',
149 | filename: 'eval.edge',
150 | line: 5,
151 | value: '.',
152 | },
153 | ])
154 | })
155 |
156 | test('Allow safe mustache', ({ assert }) => {
157 | const template = dedent`List of users are {{{
158 | users.map((user) => {
159 | return user.username
160 | }).join(', ')
161 | }}}.`
162 |
163 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
164 | tokenizer.parse()
165 |
166 | assert.isNull(tokenizer.tagStatement)
167 | assert.isNull(tokenizer.mustacheStatement)
168 | assert.deepEqual(tokenizer.tokens, [
169 | {
170 | type: 'raw',
171 | filename: 'eval.edge',
172 | line: 1,
173 | value: 'List of users are ',
174 | },
175 | {
176 | type: MustacheTypes.SMUSTACHE,
177 | filename: 'eval.edge',
178 | loc: {
179 | start: {
180 | line: 1,
181 | col: 21,
182 | },
183 | end: {
184 | line: 5,
185 | col: 3,
186 | },
187 | },
188 | properties: {
189 | jsArg: "\n users.map((user) => {\n return user.username\n }).join(', ')\n",
190 | },
191 | },
192 | {
193 | type: 'raw',
194 | filename: 'eval.edge',
195 | line: 5,
196 | value: '.',
197 | },
198 | ])
199 | })
200 |
201 | test('Allow safe escaped mustache', ({ assert }) => {
202 | const template = dedent`List of users are @{{{
203 | users.map((user) => {
204 | return user.username
205 | }).join(', ')
206 | }}}.`
207 |
208 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
209 | tokenizer.parse()
210 |
211 | assert.isNull(tokenizer.tagStatement)
212 | assert.isNull(tokenizer.mustacheStatement)
213 | assert.deepEqual(tokenizer.tokens, [
214 | {
215 | type: 'raw',
216 | filename: 'eval.edge',
217 | line: 1,
218 | value: 'List of users are ',
219 | },
220 | {
221 | type: MustacheTypes.ESMUSTACHE,
222 | filename: 'eval.edge',
223 | loc: {
224 | start: {
225 | line: 1,
226 | col: 22,
227 | },
228 | end: {
229 | line: 5,
230 | col: 3,
231 | },
232 | },
233 | properties: {
234 | jsArg: "\n users.map((user) => {\n return user.username\n }).join(', ')\n",
235 | },
236 | },
237 | {
238 | type: 'raw',
239 | filename: 'eval.edge',
240 | line: 5,
241 | value: '.',
242 | },
243 | ])
244 | })
245 |
246 | test('parse multiple mustache statements in a single line', ({ assert }) => {
247 | const template = dedent`Hello {{ username }}, your age is {{ age }}`
248 |
249 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
250 | tokenizer.parse()
251 |
252 | assert.isNull(tokenizer.tagStatement)
253 | assert.isNull(tokenizer.mustacheStatement)
254 |
255 | assert.deepEqual(tokenizer.tokens, [
256 | {
257 | type: 'raw',
258 | filename: 'eval.edge',
259 | line: 1,
260 | value: 'Hello ',
261 | },
262 | {
263 | type: MustacheTypes.MUSTACHE,
264 | filename: 'eval.edge',
265 | loc: {
266 | start: {
267 | line: 1,
268 | col: 8,
269 | },
270 | end: {
271 | line: 1,
272 | col: 20,
273 | },
274 | },
275 | properties: {
276 | jsArg: ' username ',
277 | },
278 | },
279 | {
280 | type: 'raw',
281 | filename: 'eval.edge',
282 | line: 1,
283 | value: ', your age is ',
284 | },
285 | {
286 | type: MustacheTypes.MUSTACHE,
287 | filename: 'eval.edge',
288 | loc: {
289 | start: {
290 | line: 1,
291 | col: 36,
292 | },
293 | end: {
294 | line: 1,
295 | col: 43,
296 | },
297 | },
298 | properties: {
299 | jsArg: ' age ',
300 | },
301 | },
302 | ])
303 | })
304 |
305 | test('parse multiple mustache statements in multiple lines', ({ assert }) => {
306 | const template = dedent`
307 | Hello {{ username }}, your friends are {{
308 | users.map((user) => {
309 | return user.username
310 | }).join(', ')
311 | }}!
312 | Bye
313 | `
314 |
315 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
316 | tokenizer.parse()
317 |
318 | assert.isNull(tokenizer.tagStatement)
319 | assert.isNull(tokenizer.mustacheStatement)
320 |
321 | assert.deepEqual(tokenizer.tokens, [
322 | {
323 | type: 'raw',
324 | filename: 'eval.edge',
325 | line: 1,
326 | value: 'Hello ',
327 | },
328 | {
329 | type: MustacheTypes.MUSTACHE,
330 | filename: 'eval.edge',
331 | loc: {
332 | start: {
333 | line: 1,
334 | col: 8,
335 | },
336 | end: {
337 | line: 1,
338 | col: 20,
339 | },
340 | },
341 | properties: {
342 | jsArg: ' username ',
343 | },
344 | },
345 | {
346 | type: 'raw',
347 | filename: 'eval.edge',
348 | line: 1,
349 | value: ', your friends are ',
350 | },
351 | {
352 | filename: 'eval.edge',
353 | type: MustacheTypes.MUSTACHE,
354 | loc: {
355 | start: {
356 | line: 1,
357 | col: 41,
358 | },
359 | end: {
360 | line: 5,
361 | col: 2,
362 | },
363 | },
364 | properties: {
365 | jsArg: "\n users.map((user) => {\n return user.username\n }).join(', ')\n",
366 | },
367 | },
368 | {
369 | type: 'raw',
370 | filename: 'eval.edge',
371 | line: 5,
372 | value: '!',
373 | },
374 | {
375 | type: 'newline',
376 | filename: 'eval.edge',
377 | line: 5,
378 | },
379 | {
380 | type: 'raw',
381 | filename: 'eval.edge',
382 | line: 6,
383 | value: 'Bye',
384 | },
385 | ])
386 | })
387 |
388 | test('raise error if incomplete mustache statements', ({ assert }) => {
389 | assert.plan(2)
390 | const template = 'Hello {{ username'
391 |
392 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
393 | try {
394 | tokenizer.parse()
395 | } catch ({ message, line }) {
396 | assert.equal(message, 'Missing token "}"')
397 | assert.equal(line, 1)
398 | }
399 | })
400 |
401 | test('parse 3 mustache statements in a single line', ({ assert }) => {
402 | const template = dedent`{{ username }}, {{ age }} and {{ state }}`
403 |
404 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
405 | tokenizer.parse()
406 |
407 | assert.isNull(tokenizer.tagStatement)
408 | assert.isNull(tokenizer.mustacheStatement)
409 |
410 | assert.deepEqual(tokenizer.tokens, [
411 | {
412 | filename: 'eval.edge',
413 | type: MustacheTypes.MUSTACHE,
414 | loc: {
415 | start: {
416 | line: 1,
417 | col: 2,
418 | },
419 | end: {
420 | line: 1,
421 | col: 14,
422 | },
423 | },
424 | properties: {
425 | jsArg: ' username ',
426 | },
427 | },
428 | {
429 | type: 'raw',
430 | filename: 'eval.edge',
431 | line: 1,
432 | value: ', ',
433 | },
434 | {
435 | filename: 'eval.edge',
436 | type: MustacheTypes.MUSTACHE,
437 | loc: {
438 | start: {
439 | line: 1,
440 | col: 18,
441 | },
442 | end: {
443 | line: 1,
444 | col: 25,
445 | },
446 | },
447 | properties: {
448 | jsArg: ' age ',
449 | },
450 | },
451 | {
452 | type: 'raw',
453 | filename: 'eval.edge',
454 | line: 1,
455 | value: ' and ',
456 | },
457 | {
458 | filename: 'eval.edge',
459 | type: MustacheTypes.MUSTACHE,
460 | loc: {
461 | start: {
462 | line: 1,
463 | col: 32,
464 | },
465 | end: {
466 | line: 1,
467 | col: 41,
468 | },
469 | },
470 | properties: {
471 | jsArg: ' state ',
472 | },
473 | },
474 | ])
475 | })
476 |
477 | test('work fine with escaped and regular mustache braces', ({ assert }) => {
478 | const template = dedent`{{ username }}, @{{ age }}`
479 |
480 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
481 | tokenizer.parse()
482 |
483 | assert.isNull(tokenizer.tagStatement)
484 | assert.isNull(tokenizer.mustacheStatement)
485 |
486 | assert.deepEqual(tokenizer.tokens, [
487 | {
488 | filename: 'eval.edge',
489 | type: MustacheTypes.MUSTACHE,
490 | loc: {
491 | start: {
492 | line: 1,
493 | col: 2,
494 | },
495 | end: {
496 | line: 1,
497 | col: 14,
498 | },
499 | },
500 | properties: {
501 | jsArg: ' username ',
502 | },
503 | },
504 | {
505 | type: 'raw',
506 | filename: 'eval.edge',
507 | line: 1,
508 | value: ', ',
509 | },
510 | {
511 | filename: 'eval.edge',
512 | type: MustacheTypes.EMUSTACHE,
513 | loc: {
514 | start: {
515 | line: 1,
516 | col: 19,
517 | },
518 | end: {
519 | line: 1,
520 | col: 26,
521 | },
522 | },
523 | properties: {
524 | jsArg: ' age ',
525 | },
526 | },
527 | ])
528 | })
529 |
530 | test('work fine with multiline escaped', ({ assert }) => {
531 | const template = dedent`{{ username }}, @{{
532 | users.map((user) => user.username)
533 | }}`
534 |
535 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
536 | tokenizer.parse()
537 |
538 | assert.isNull(tokenizer.tagStatement)
539 | assert.isNull(tokenizer.mustacheStatement)
540 |
541 | assert.deepEqual(tokenizer.tokens, [
542 | {
543 | filename: 'eval.edge',
544 | type: MustacheTypes.MUSTACHE,
545 | loc: {
546 | start: {
547 | line: 1,
548 | col: 2,
549 | },
550 | end: {
551 | line: 1,
552 | col: 14,
553 | },
554 | },
555 | properties: {
556 | jsArg: ' username ',
557 | },
558 | },
559 | {
560 | type: 'raw',
561 | filename: 'eval.edge',
562 | line: 1,
563 | value: ', ',
564 | },
565 | {
566 | filename: 'eval.edge',
567 | type: MustacheTypes.EMUSTACHE,
568 | loc: {
569 | start: {
570 | line: 1,
571 | col: 19,
572 | },
573 | end: {
574 | line: 3,
575 | col: 2,
576 | },
577 | },
578 | properties: {
579 | jsArg: '\n users.map((user) => user.username)\n',
580 | },
581 | },
582 | ])
583 | })
584 |
585 | test('parse multiple mustache statements when escaped and unescaped', ({ assert }) => {
586 | const template = dedent`Hello @{{ username }}, your age is {{ age }}`
587 |
588 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
589 | tokenizer.parse()
590 |
591 | assert.isNull(tokenizer.tagStatement)
592 | assert.isNull(tokenizer.mustacheStatement)
593 |
594 | assert.deepEqual(tokenizer.tokens, [
595 | {
596 | type: 'raw',
597 | filename: 'eval.edge',
598 | line: 1,
599 | value: 'Hello ',
600 | },
601 | {
602 | filename: 'eval.edge',
603 | type: MustacheTypes.EMUSTACHE,
604 | loc: {
605 | start: {
606 | line: 1,
607 | col: 9,
608 | },
609 | end: {
610 | line: 1,
611 | col: 21,
612 | },
613 | },
614 | properties: {
615 | jsArg: ' username ',
616 | },
617 | },
618 | {
619 | type: 'raw',
620 | filename: 'eval.edge',
621 | line: 1,
622 | value: ', your age is ',
623 | },
624 | {
625 | filename: 'eval.edge',
626 | type: MustacheTypes.MUSTACHE,
627 | loc: {
628 | start: {
629 | line: 1,
630 | col: 37,
631 | },
632 | end: {
633 | line: 1,
634 | col: 44,
635 | },
636 | },
637 | properties: {
638 | jsArg: ' age ',
639 | },
640 | },
641 | ])
642 | })
643 |
644 | test('raise error if mustache is not properly closed', ({ assert }) => {
645 | assert.plan(2)
646 |
647 | const template = dedent`Hello {{ username }.`
648 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
649 |
650 | try {
651 | tokenizer.parse()
652 | } catch ({ message, line }) {
653 | assert.equal(message, 'Missing token "}"')
654 | assert.equal(line, 1)
655 | }
656 | })
657 |
658 | test('raise error if multiple mustache is not properly closed', ({ assert }) => {
659 | assert.plan(2)
660 |
661 | const template = dedent`Hello {{
662 | users.map((user) => {
663 | return user.username
664 | }) }
665 | }`
666 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
667 |
668 | try {
669 | tokenizer.parse()
670 | } catch ({ message, line }) {
671 | assert.equal(message, 'Missing token "}"')
672 | assert.equal(line, 5)
673 | }
674 | })
675 | })
676 |
--------------------------------------------------------------------------------
/tests/tokenizer_tags.spec.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * edge-lexer
3 | *
4 | * (c) Edge
5 | *
6 | * For the full copyright and license information, please view the LICENSE
7 | * file that was distributed with this source code.
8 | */
9 |
10 | import dedent from 'dedent'
11 | import { test } from '@japa/runner'
12 |
13 | import { Tokenizer } from '../src/tokenizer.js'
14 | import { TagTypes, MustacheTypes } from '../src/enums.js'
15 |
16 | const tagsDef = {
17 | if: class If {
18 | static block = true
19 | static seekable = true
20 | },
21 | else: class Else {
22 | static block = false
23 | static seekable = false
24 | },
25 | include: class Include {
26 | static block = false
27 | static seekable = true
28 | },
29 | each: class Each {
30 | static block = true
31 | static seekable = true
32 | },
33 | }
34 |
35 | test.group('Tokenizer | Tags', () => {
36 | test('tokenize a template into tokens', ({ assert }) => {
37 | const template = dedent`
38 | Hello
39 |
40 | @if(username)
41 | @endif
42 | `
43 |
44 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
45 | tokenizer.parse()
46 |
47 | assert.isNull(tokenizer.tagStatement)
48 | assert.deepEqual(tokenizer.tokens, [
49 | {
50 | type: 'raw',
51 | filename: 'eval.edge',
52 | value: 'Hello',
53 | line: 1,
54 | },
55 | {
56 | type: 'newline',
57 | filename: 'eval.edge',
58 | line: 1,
59 | },
60 | {
61 | type: 'raw',
62 | filename: 'eval.edge',
63 | value: '',
64 | line: 2,
65 | },
66 | {
67 | filename: 'eval.edge',
68 | type: TagTypes.TAG,
69 | properties: {
70 | name: 'if',
71 | jsArg: 'username',
72 | selfclosed: false,
73 | },
74 | loc: {
75 | start: {
76 | line: 3,
77 | col: 4,
78 | },
79 | end: {
80 | line: 3,
81 | col: 13,
82 | },
83 | },
84 | children: [],
85 | },
86 | ])
87 | })
88 |
89 | test('add content inside tags as the tag children', ({ assert }) => {
90 | const template = dedent`
91 | Hello
92 |
93 | @if(username)
94 | Hello
95 | @endif
96 | `
97 |
98 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
99 | tokenizer.parse()
100 |
101 | assert.isNull(tokenizer.tagStatement)
102 | assert.deepEqual(tokenizer.tokens, [
103 | {
104 | type: 'raw',
105 | filename: 'eval.edge',
106 | value: 'Hello',
107 | line: 1,
108 | },
109 | {
110 | type: 'newline',
111 | filename: 'eval.edge',
112 | line: 1,
113 | },
114 | {
115 | type: 'raw',
116 | filename: 'eval.edge',
117 | value: '',
118 | line: 2,
119 | },
120 | {
121 | type: TagTypes.TAG,
122 | filename: 'eval.edge',
123 | loc: {
124 | start: {
125 | line: 3,
126 | col: 4,
127 | },
128 | end: {
129 | line: 3,
130 | col: 13,
131 | },
132 | },
133 | properties: {
134 | name: 'if',
135 | jsArg: 'username',
136 | selfclosed: false,
137 | },
138 | children: [
139 | {
140 | type: 'newline',
141 | filename: 'eval.edge',
142 | line: 3,
143 | },
144 | {
145 | type: 'raw',
146 | filename: 'eval.edge',
147 | value: ' Hello',
148 | line: 4,
149 | },
150 | ],
151 | },
152 | ])
153 | })
154 |
155 | test('allow nested tags', ({ assert }) => {
156 | const template = dedent`
157 | Hello
158 |
159 | @if(username)
160 | @if(username === 'virk')
161 | Hi
162 | @endif
163 | @endif
164 | `
165 |
166 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
167 | tokenizer.parse()
168 |
169 | assert.isNull(tokenizer.tagStatement)
170 | assert.deepEqual(tokenizer.tokens, [
171 | {
172 | type: 'raw',
173 | filename: 'eval.edge',
174 | value: 'Hello',
175 | line: 1,
176 | },
177 | {
178 | type: 'newline',
179 | filename: 'eval.edge',
180 | line: 1,
181 | },
182 | {
183 | type: 'raw',
184 | filename: 'eval.edge',
185 | value: '',
186 | line: 2,
187 | },
188 | {
189 | filename: 'eval.edge',
190 | type: TagTypes.TAG,
191 | loc: {
192 | start: {
193 | line: 3,
194 | col: 4,
195 | },
196 | end: {
197 | line: 3,
198 | col: 13,
199 | },
200 | },
201 | properties: {
202 | name: 'if',
203 | jsArg: 'username',
204 | selfclosed: false,
205 | },
206 | children: [
207 | {
208 | type: 'newline',
209 | filename: 'eval.edge',
210 | line: 3,
211 | },
212 | {
213 | type: TagTypes.TAG,
214 | filename: 'eval.edge',
215 | loc: {
216 | start: {
217 | line: 4,
218 | col: 6,
219 | },
220 | end: {
221 | line: 4,
222 | col: 26,
223 | },
224 | },
225 | properties: {
226 | name: 'if',
227 | jsArg: "username === 'virk'",
228 | selfclosed: false,
229 | },
230 | children: [
231 | {
232 | type: 'newline',
233 | filename: 'eval.edge',
234 | line: 4,
235 | },
236 | {
237 | type: 'raw',
238 | filename: 'eval.edge',
239 | value: ' Hi',
240 | line: 5,
241 | },
242 | ],
243 | },
244 | ],
245 | },
246 | ])
247 | })
248 |
249 | test('parse when statement is in multiple lines', ({ assert }) => {
250 | const template = dedent`
251 | Hello
252 |
253 | @if(
254 | username
255 | )
256 | Hello
257 | @endif
258 | `
259 |
260 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
261 | tokenizer.parse()
262 |
263 | assert.isNull(tokenizer.tagStatement)
264 | assert.deepEqual(tokenizer.tokens, [
265 | {
266 | type: 'raw',
267 | filename: 'eval.edge',
268 | value: 'Hello',
269 | line: 1,
270 | },
271 | {
272 | type: 'newline',
273 | filename: 'eval.edge',
274 | line: 1,
275 | },
276 | {
277 | type: 'raw',
278 | filename: 'eval.edge',
279 | value: '',
280 | line: 2,
281 | },
282 | {
283 | filename: 'eval.edge',
284 | type: TagTypes.TAG,
285 | loc: {
286 | start: {
287 | line: 3,
288 | col: 4,
289 | },
290 | end: {
291 | line: 5,
292 | col: 1,
293 | },
294 | },
295 | properties: {
296 | name: 'if',
297 | jsArg: '\n username\n',
298 | selfclosed: false,
299 | },
300 | children: [
301 | {
302 | type: 'newline',
303 | filename: 'eval.edge',
304 | line: 5,
305 | },
306 | {
307 | type: 'raw',
308 | filename: 'eval.edge',
309 | value: ' Hello',
310 | line: 6,
311 | },
312 | ],
313 | },
314 | ])
315 | })
316 |
317 | test('parse when statement is in multiple lines and has internal parens too', ({ assert }) => {
318 | const template = dedent`
319 | Hello
320 |
321 | @if((
322 | 2 + 2) * 3 === 12
323 | )
324 | Answer is 12
325 | @endif
326 | `
327 |
328 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
329 | tokenizer.parse()
330 |
331 | assert.isNull(tokenizer.tagStatement)
332 | assert.deepEqual(tokenizer.tokens, [
333 | {
334 | type: 'raw',
335 | filename: 'eval.edge',
336 | value: 'Hello',
337 | line: 1,
338 | },
339 | {
340 | type: 'newline',
341 | filename: 'eval.edge',
342 | line: 1,
343 | },
344 | {
345 | type: 'raw',
346 | filename: 'eval.edge',
347 | value: '',
348 | line: 2,
349 | },
350 | {
351 | filename: 'eval.edge',
352 | type: TagTypes.TAG,
353 | loc: {
354 | start: {
355 | line: 3,
356 | col: 4,
357 | },
358 | end: {
359 | line: 5,
360 | col: 1,
361 | },
362 | },
363 | properties: {
364 | name: 'if',
365 | jsArg: '(\n 2 + 2) * 3 === 12\n',
366 | selfclosed: false,
367 | },
368 | children: [
369 | {
370 | type: 'newline',
371 | filename: 'eval.edge',
372 | line: 5,
373 | },
374 | {
375 | type: 'raw',
376 | filename: 'eval.edge',
377 | value: ' Answer is 12',
378 | line: 6,
379 | },
380 | ],
381 | },
382 | ])
383 | })
384 |
385 | test('parse inline tags', ({ assert }) => {
386 | const template = dedent`@include('partials.user')`
387 |
388 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
389 | tokenizer.parse()
390 |
391 | assert.isNull(tokenizer.tagStatement)
392 | assert.deepEqual(tokenizer.tokens, [
393 | {
394 | filename: 'eval.edge',
395 | type: TagTypes.TAG,
396 | loc: {
397 | start: {
398 | line: 1,
399 | col: 9,
400 | },
401 | end: {
402 | line: 1,
403 | col: 25,
404 | },
405 | },
406 | properties: {
407 | name: 'include',
408 | jsArg: "'partials.user'",
409 | selfclosed: false,
410 | },
411 | children: [],
412 | },
413 | ])
414 | })
415 |
416 | test('parse inline tags which are not seekable', ({ assert }) => {
417 | const template = dedent`
418 | @if(username)
419 | Hello
420 | @else
421 | Hello guest
422 | @endif
423 | `
424 |
425 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
426 | tokenizer.parse()
427 |
428 | assert.isNull(tokenizer.tagStatement)
429 | assert.deepEqual(tokenizer.tokens, [
430 | {
431 | type: TagTypes.TAG,
432 | filename: 'eval.edge',
433 | loc: {
434 | start: {
435 | line: 1,
436 | col: 4,
437 | },
438 | end: {
439 | line: 1,
440 | col: 13,
441 | },
442 | },
443 | properties: {
444 | name: 'if',
445 | jsArg: 'username',
446 | selfclosed: false,
447 | },
448 | children: [
449 | {
450 | type: 'newline',
451 | filename: 'eval.edge',
452 | line: 1,
453 | },
454 | {
455 | type: 'raw',
456 | filename: 'eval.edge',
457 | value: ' Hello',
458 | line: 2,
459 | },
460 | {
461 | filename: 'eval.edge',
462 | type: TagTypes.TAG,
463 | loc: {
464 | start: {
465 | line: 3,
466 | col: 5,
467 | },
468 | end: {
469 | line: 3,
470 | col: 5,
471 | },
472 | },
473 | properties: {
474 | name: 'else',
475 | jsArg: '',
476 | selfclosed: false,
477 | },
478 | children: [],
479 | },
480 | {
481 | type: 'newline',
482 | filename: 'eval.edge',
483 | line: 3,
484 | },
485 | {
486 | type: 'raw',
487 | filename: 'eval.edge',
488 | value: ' Hello guest',
489 | line: 4,
490 | },
491 | ],
492 | },
493 | ])
494 | })
495 |
496 | test('ignore tag when not registered', ({ assert }) => {
497 | const template = dedent`
498 | @foo('hello world')
499 | `
500 |
501 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
502 | tokenizer.parse()
503 |
504 | assert.isNull(tokenizer.tagStatement)
505 | assert.deepEqual(tokenizer.tokens, [
506 | {
507 | type: 'raw',
508 | filename: 'eval.edge',
509 | value: "@foo('hello world')",
510 | line: 1,
511 | },
512 | ])
513 | })
514 |
515 | test('ignore tag when escaped', ({ assert }) => {
516 | const template = dedent`@@if(username)
517 | @endif
518 | `
519 |
520 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
521 | tokenizer.parse()
522 |
523 | assert.isNull(tokenizer.tagStatement)
524 | assert.deepEqual(tokenizer.tokens, [
525 | {
526 | filename: 'eval.edge',
527 | type: TagTypes.ETAG,
528 | properties: {
529 | name: 'if',
530 | jsArg: 'username',
531 | selfclosed: false,
532 | },
533 | loc: {
534 | start: {
535 | line: 1,
536 | col: 5,
537 | },
538 | end: {
539 | line: 1,
540 | col: 14,
541 | },
542 | },
543 | children: [],
544 | },
545 | ])
546 | })
547 |
548 | test('throw exception when tag is still seeking', ({ assert }) => {
549 | assert.plan(2)
550 |
551 | const template = dedent`@if((2 + 2)
552 | @endif`
553 |
554 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
555 | try {
556 | tokenizer.parse()
557 | } catch ({ message, line }) {
558 | assert.equal(message, 'Missing token ")"')
559 | assert.equal(line, 1)
560 | }
561 | })
562 |
563 | test('throw exception when there is inline content in the tag opening statement', ({
564 | assert,
565 | }) => {
566 | assert.plan(3)
567 | const template = dedent`@include('foo') hello world`
568 |
569 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
570 | try {
571 | tokenizer.parse()
572 | } catch ({ message, line, col }) {
573 | assert.equal(message, 'Unexpected token " hello world"')
574 | assert.equal(line, 1)
575 | assert.equal(col, 15)
576 | }
577 | })
578 |
579 | test('throw exception when opening brace is in a different line', ({ assert }) => {
580 | assert.plan(3)
581 | const template = dedent`
582 | @if
583 | (
584 | username
585 | )
586 | @endif
587 | `
588 |
589 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
590 | try {
591 | tokenizer.parse()
592 | } catch ({ message, line, col }) {
593 | assert.equal(message, 'Missing token "("')
594 | assert.equal(line, 1)
595 | assert.equal(col, 3)
596 | }
597 | })
598 |
599 | test('do not raise exception when tag is not seekable and has no parens', ({ assert }) => {
600 | const template = dedent`
601 | @else
602 | `
603 |
604 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
605 | tokenizer.parse()
606 | assert.deepEqual(tokenizer.tokens, [
607 | {
608 | filename: 'eval.edge',
609 | type: TagTypes.TAG,
610 | properties: {
611 | name: 'else',
612 | jsArg: '',
613 | selfclosed: false,
614 | },
615 | loc: {
616 | start: {
617 | line: 1,
618 | col: 5,
619 | },
620 | end: {
621 | line: 1,
622 | col: 5,
623 | },
624 | },
625 | children: [],
626 | },
627 | ])
628 | })
629 |
630 | test('consume one liner inline tag', ({ assert }) => {
631 | const template = "@include('header')"
632 |
633 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
634 | tokenizer.parse()
635 |
636 | assert.isNull(tokenizer.tagStatement)
637 | assert.deepEqual(tokenizer.tokens, [
638 | {
639 | filename: 'eval.edge',
640 | type: TagTypes.TAG,
641 | loc: {
642 | start: {
643 | line: 1,
644 | col: 9,
645 | },
646 | end: {
647 | line: 1,
648 | col: 18,
649 | },
650 | },
651 | properties: {
652 | name: 'include',
653 | jsArg: "'header'",
654 | selfclosed: false,
655 | },
656 | children: [],
657 | },
658 | ])
659 | })
660 |
661 | test('throw exception when there are unclosed tags', ({ assert }) => {
662 | const template = dedent`
663 | @if(username)
664 | Hello world
665 | `
666 |
667 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
668 | const fn = () => tokenizer.parse()
669 | assert.throws(fn, 'Unclosed tag if')
670 | })
671 |
672 | test('throw exception when there are unclosed nested tags', ({ assert }) => {
673 | const template = dedent`
674 | @if(username)
675 | @each(user in users)
676 | @endif
677 | `
678 |
679 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
680 | const fn = () => tokenizer.parse()
681 | assert.throws(fn, 'Unclosed tag each')
682 | })
683 |
684 | test('work fine if a tag is self closed', ({ assert }) => {
685 | const template = dedent`
686 | @!each(user in users, include = 'user')
687 | `
688 |
689 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
690 | tokenizer.parse()
691 |
692 | assert.isNull(tokenizer.tagStatement)
693 | assert.deepEqual(tokenizer.tokens, [
694 | {
695 | filename: 'eval.edge',
696 | type: TagTypes.TAG,
697 | loc: {
698 | start: {
699 | line: 1,
700 | col: 7,
701 | },
702 | end: {
703 | line: 1,
704 | col: 39,
705 | },
706 | },
707 | properties: {
708 | name: 'each',
709 | jsArg: "user in users, include = 'user'",
710 | selfclosed: true,
711 | },
712 | children: [],
713 | },
714 | ])
715 | })
716 |
717 | test('work fine when bang is defined in tag jsArg', ({ assert }) => {
718 | const template = dedent`
719 | @if(!user)
720 | @endif
721 | `
722 |
723 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
724 | tokenizer.parse()
725 |
726 | assert.isNull(tokenizer.tagStatement)
727 | assert.deepEqual(tokenizer.tokens, [
728 | {
729 | filename: 'eval.edge',
730 | type: TagTypes.TAG,
731 | loc: {
732 | start: {
733 | line: 1,
734 | col: 4,
735 | },
736 | end: {
737 | line: 1,
738 | col: 10,
739 | },
740 | },
741 | properties: {
742 | name: 'if',
743 | jsArg: '!user',
744 | selfclosed: false,
745 | },
746 | children: [],
747 | },
748 | ])
749 | })
750 |
751 | test('remove newline after the tag', ({ assert }) => {
752 | const template = dedent`
753 | Hello
754 |
755 | @if(username)~
756 | Hello
757 | @endif
758 | `
759 |
760 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
761 | tokenizer.parse()
762 |
763 | assert.isNull(tokenizer.tagStatement)
764 | assert.deepEqual(tokenizer.tokens, [
765 | {
766 | type: 'raw',
767 | filename: 'eval.edge',
768 | value: 'Hello',
769 | line: 1,
770 | },
771 | {
772 | type: 'newline',
773 | filename: 'eval.edge',
774 | line: 1,
775 | },
776 | {
777 | type: 'raw',
778 | filename: 'eval.edge',
779 | value: '',
780 | line: 2,
781 | },
782 | {
783 | type: TagTypes.TAG,
784 | filename: 'eval.edge',
785 | loc: {
786 | start: {
787 | line: 3,
788 | col: 4,
789 | },
790 | end: {
791 | line: 3,
792 | col: 13,
793 | },
794 | },
795 | properties: {
796 | name: 'if',
797 | jsArg: 'username',
798 | selfclosed: false,
799 | },
800 | children: [
801 | {
802 | type: 'raw',
803 | filename: 'eval.edge',
804 | value: ' Hello',
805 | line: 4,
806 | },
807 | ],
808 | },
809 | ])
810 | })
811 |
812 | test('remove newline after the tag spanned over multiple lines', ({ assert }) => {
813 | const template = dedent`
814 | Hello
815 |
816 | @if(
817 | username
818 | )~
819 | Hello
820 | @endif
821 | `
822 |
823 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
824 | tokenizer.parse()
825 |
826 | assert.isNull(tokenizer.tagStatement)
827 | assert.deepEqual(tokenizer.tokens, [
828 | {
829 | type: 'raw',
830 | filename: 'eval.edge',
831 | value: 'Hello',
832 | line: 1,
833 | },
834 | {
835 | type: 'newline',
836 | filename: 'eval.edge',
837 | line: 1,
838 | },
839 | {
840 | type: 'raw',
841 | filename: 'eval.edge',
842 | value: '',
843 | line: 2,
844 | },
845 | {
846 | type: TagTypes.TAG,
847 | filename: 'eval.edge',
848 | loc: {
849 | start: {
850 | line: 3,
851 | col: 4,
852 | },
853 | end: {
854 | line: 5,
855 | col: 1,
856 | },
857 | },
858 | properties: {
859 | name: 'if',
860 | jsArg: '\n username\n',
861 | selfclosed: false,
862 | },
863 | children: [
864 | {
865 | type: 'raw',
866 | filename: 'eval.edge',
867 | value: ' Hello',
868 | line: 6,
869 | },
870 | ],
871 | },
872 | ])
873 | })
874 |
875 | test('remove newline between two tags', ({ assert }) => {
876 | const template = dedent`
877 | Hello
878 |
879 | @if(username)~
880 | @if(age)
881 | Hello
882 | @endif
883 | @endif
884 | `
885 |
886 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
887 | tokenizer.parse()
888 |
889 | assert.isNull(tokenizer.tagStatement)
890 | assert.deepEqual(tokenizer.tokens, [
891 | {
892 | type: 'raw',
893 | filename: 'eval.edge',
894 | value: 'Hello',
895 | line: 1,
896 | },
897 | {
898 | type: 'newline',
899 | filename: 'eval.edge',
900 | line: 1,
901 | },
902 | {
903 | type: 'raw',
904 | filename: 'eval.edge',
905 | value: '',
906 | line: 2,
907 | },
908 | {
909 | type: TagTypes.TAG,
910 | filename: 'eval.edge',
911 | loc: {
912 | start: {
913 | line: 3,
914 | col: 4,
915 | },
916 | end: {
917 | line: 3,
918 | col: 13,
919 | },
920 | },
921 | properties: {
922 | name: 'if',
923 | jsArg: 'username',
924 | selfclosed: false,
925 | },
926 | children: [
927 | {
928 | type: TagTypes.TAG,
929 | filename: 'eval.edge',
930 | loc: {
931 | start: {
932 | line: 4,
933 | col: 6,
934 | },
935 | end: {
936 | line: 4,
937 | col: 10,
938 | },
939 | },
940 | properties: {
941 | name: 'if',
942 | jsArg: 'age',
943 | selfclosed: false,
944 | },
945 | children: [
946 | {
947 | type: 'newline',
948 | filename: 'eval.edge',
949 | line: 4,
950 | },
951 | {
952 | type: 'raw',
953 | filename: 'eval.edge',
954 | value: ' Hello',
955 | line: 5,
956 | },
957 | ],
958 | },
959 | ],
960 | },
961 | ])
962 | })
963 |
964 | test('remove newline between two tags when spanned over multiple lines', ({ assert }) => {
965 | const template = dedent`
966 | Hello
967 |
968 | @if(
969 | username
970 | )~
971 | @if(age)
972 | Hello
973 | @endif
974 | @endif
975 | `
976 |
977 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
978 | tokenizer.parse()
979 |
980 | assert.isNull(tokenizer.tagStatement)
981 | assert.deepEqual(tokenizer.tokens, [
982 | {
983 | type: 'raw',
984 | filename: 'eval.edge',
985 | value: 'Hello',
986 | line: 1,
987 | },
988 | {
989 | type: 'newline',
990 | filename: 'eval.edge',
991 | line: 1,
992 | },
993 | {
994 | type: 'raw',
995 | filename: 'eval.edge',
996 | value: '',
997 | line: 2,
998 | },
999 | {
1000 | type: TagTypes.TAG,
1001 | filename: 'eval.edge',
1002 | loc: {
1003 | start: {
1004 | line: 3,
1005 | col: 4,
1006 | },
1007 | end: {
1008 | line: 5,
1009 | col: 1,
1010 | },
1011 | },
1012 | properties: {
1013 | name: 'if',
1014 | jsArg: '\n username\n',
1015 | selfclosed: false,
1016 | },
1017 | children: [
1018 | {
1019 | type: TagTypes.TAG,
1020 | filename: 'eval.edge',
1021 | loc: {
1022 | start: {
1023 | line: 6,
1024 | col: 6,
1025 | },
1026 | end: {
1027 | line: 6,
1028 | col: 10,
1029 | },
1030 | },
1031 | properties: {
1032 | name: 'if',
1033 | jsArg: 'age',
1034 | selfclosed: false,
1035 | },
1036 | children: [
1037 | {
1038 | type: 'newline',
1039 | filename: 'eval.edge',
1040 | line: 6,
1041 | },
1042 | {
1043 | type: 'raw',
1044 | filename: 'eval.edge',
1045 | value: ' Hello',
1046 | line: 7,
1047 | },
1048 | ],
1049 | },
1050 | ],
1051 | },
1052 | ])
1053 | })
1054 |
1055 | test('remove newline after the tag when tag has noNewLine property', ({ assert }) => {
1056 | const template = dedent`
1057 | Hello
1058 |
1059 | @if(username)
1060 | Hello
1061 | @endif
1062 | `
1063 |
1064 | const tags = {
1065 | if: class If {
1066 | static block = true
1067 | static seekable = true
1068 | static noNewLine = true
1069 | },
1070 | }
1071 |
1072 | const tokenizer = new Tokenizer(template, tags, { filename: 'eval.edge' })
1073 | tokenizer.parse()
1074 |
1075 | assert.isNull(tokenizer.tagStatement)
1076 | assert.deepEqual(tokenizer.tokens, [
1077 | {
1078 | type: 'raw',
1079 | filename: 'eval.edge',
1080 | value: 'Hello',
1081 | line: 1,
1082 | },
1083 | {
1084 | type: 'newline',
1085 | filename: 'eval.edge',
1086 | line: 1,
1087 | },
1088 | {
1089 | type: 'raw',
1090 | filename: 'eval.edge',
1091 | value: '',
1092 | line: 2,
1093 | },
1094 | {
1095 | type: TagTypes.TAG,
1096 | filename: 'eval.edge',
1097 | loc: {
1098 | start: {
1099 | line: 3,
1100 | col: 4,
1101 | },
1102 | end: {
1103 | line: 3,
1104 | col: 13,
1105 | },
1106 | },
1107 | properties: {
1108 | name: 'if',
1109 | jsArg: 'username',
1110 | selfclosed: false,
1111 | },
1112 | children: [
1113 | {
1114 | type: 'raw',
1115 | filename: 'eval.edge',
1116 | value: ' Hello',
1117 | line: 4,
1118 | },
1119 | ],
1120 | },
1121 | ])
1122 | })
1123 |
1124 | test('remove newline between two tags when tag has noNewLine property', ({ assert }) => {
1125 | const template = dedent`
1126 | Hello
1127 |
1128 | @if(
1129 | username
1130 | )
1131 | @if(age)
1132 | Hello
1133 | @endif
1134 | @endif
1135 | `
1136 |
1137 | const tags = {
1138 | if: class If {
1139 | static block = true
1140 | static seekable = true
1141 | static noNewLine = true
1142 | },
1143 | }
1144 |
1145 | const tokenizer = new Tokenizer(template, tags, { filename: 'eval.edge' })
1146 | tokenizer.parse()
1147 |
1148 | assert.isNull(tokenizer.tagStatement)
1149 | assert.deepEqual(tokenizer.tokens, [
1150 | {
1151 | type: 'raw',
1152 | filename: 'eval.edge',
1153 | value: 'Hello',
1154 | line: 1,
1155 | },
1156 | {
1157 | type: 'newline',
1158 | filename: 'eval.edge',
1159 | line: 1,
1160 | },
1161 | {
1162 | type: 'raw',
1163 | filename: 'eval.edge',
1164 | value: '',
1165 | line: 2,
1166 | },
1167 | {
1168 | type: TagTypes.TAG,
1169 | filename: 'eval.edge',
1170 | loc: {
1171 | start: {
1172 | line: 3,
1173 | col: 4,
1174 | },
1175 | end: {
1176 | line: 5,
1177 | col: 1,
1178 | },
1179 | },
1180 | properties: {
1181 | name: 'if',
1182 | jsArg: '\n username\n',
1183 | selfclosed: false,
1184 | },
1185 | children: [
1186 | {
1187 | type: TagTypes.TAG,
1188 | filename: 'eval.edge',
1189 | loc: {
1190 | start: {
1191 | line: 6,
1192 | col: 6,
1193 | },
1194 | end: {
1195 | line: 6,
1196 | col: 10,
1197 | },
1198 | },
1199 | properties: {
1200 | name: 'if',
1201 | jsArg: 'age',
1202 | selfclosed: false,
1203 | },
1204 | children: [
1205 | {
1206 | type: 'raw',
1207 | filename: 'eval.edge',
1208 | value: ' Hello',
1209 | line: 7,
1210 | },
1211 | ],
1212 | },
1213 | ],
1214 | },
1215 | ])
1216 | })
1217 |
1218 | test('remove newline after the endblock', ({ assert }) => {
1219 | const template = dedent`
1220 | @if(username)~
1221 | Hello
1222 | @endif~
1223 | world
1224 | `
1225 |
1226 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
1227 | tokenizer.parse()
1228 |
1229 | assert.isNull(tokenizer.tagStatement)
1230 | assert.deepEqual(tokenizer.tokens, [
1231 | {
1232 | type: TagTypes.TAG,
1233 | filename: 'eval.edge',
1234 | loc: {
1235 | start: {
1236 | line: 1,
1237 | col: 4,
1238 | },
1239 | end: {
1240 | line: 1,
1241 | col: 13,
1242 | },
1243 | },
1244 | properties: {
1245 | name: 'if',
1246 | jsArg: 'username',
1247 | selfclosed: false,
1248 | },
1249 | children: [
1250 | {
1251 | type: 'raw',
1252 | filename: 'eval.edge',
1253 | value: ' Hello',
1254 | line: 2,
1255 | },
1256 | ],
1257 | },
1258 | {
1259 | type: 'raw',
1260 | filename: 'eval.edge',
1261 | value: 'world',
1262 | line: 4,
1263 | },
1264 | ])
1265 | })
1266 |
1267 | test('remove newline with non-seekable tag', ({ assert }) => {
1268 | const template = dedent`
1269 | Hello
1270 | @include~
1271 | Hi
1272 | `
1273 |
1274 | const tags = {
1275 | include: class Include {
1276 | static block = false
1277 | static seekable = false
1278 | static noNewLine = false
1279 | },
1280 | }
1281 |
1282 | const tokenizer = new Tokenizer(template, tags, { filename: 'eval.edge' })
1283 | tokenizer.parse()
1284 |
1285 | assert.isNull(tokenizer.tagStatement)
1286 | assert.deepEqual(tokenizer.tokens, [
1287 | {
1288 | type: 'raw',
1289 | filename: 'eval.edge',
1290 | value: 'Hello',
1291 | line: 1,
1292 | },
1293 | {
1294 | type: TagTypes.TAG,
1295 | filename: 'eval.edge',
1296 | children: [],
1297 | loc: {
1298 | start: {
1299 | line: 2,
1300 | col: 8,
1301 | },
1302 | end: {
1303 | line: 2,
1304 | col: 8,
1305 | },
1306 | },
1307 | properties: {
1308 | name: 'include',
1309 | jsArg: '',
1310 | selfclosed: false,
1311 | },
1312 | },
1313 | {
1314 | type: 'raw',
1315 | filename: 'eval.edge',
1316 | value: 'Hi',
1317 | line: 3,
1318 | },
1319 | ])
1320 | })
1321 |
1322 | test('transform lines using onLine method', ({ assert }) => {
1323 | const template = dedent`
1324 | @ui.form()
1325 | Hello
1326 | @end
1327 | world
1328 | `
1329 |
1330 | const tokenizer = new Tokenizer(
1331 | template,
1332 | {
1333 | component: { block: true, seekable: true },
1334 | },
1335 | {
1336 | filename: 'eval.edge',
1337 | onLine(line: string) {
1338 | return line.trim() === '@ui.form()' ? `@component('ui.form')` : line
1339 | },
1340 | }
1341 | )
1342 | tokenizer.parse()
1343 |
1344 | assert.isNull(tokenizer.tagStatement)
1345 | assert.deepEqual(tokenizer.tokens, [
1346 | {
1347 | type: TagTypes.TAG,
1348 | filename: 'eval.edge',
1349 | loc: {
1350 | start: {
1351 | line: 1,
1352 | col: 11,
1353 | },
1354 | end: {
1355 | line: 1,
1356 | col: 21,
1357 | },
1358 | },
1359 | properties: {
1360 | name: 'component',
1361 | jsArg: `'ui.form'`,
1362 | selfclosed: false,
1363 | },
1364 | children: [
1365 | {
1366 | type: 'newline',
1367 | filename: 'eval.edge',
1368 | line: 1,
1369 | },
1370 | {
1371 | type: 'raw',
1372 | filename: 'eval.edge',
1373 | value: ' Hello',
1374 | line: 2,
1375 | },
1376 | ],
1377 | },
1378 | {
1379 | type: 'newline',
1380 | filename: 'eval.edge',
1381 | line: 3,
1382 | },
1383 | {
1384 | type: 'raw',
1385 | filename: 'eval.edge',
1386 | value: 'world',
1387 | line: 4,
1388 | },
1389 | ])
1390 | })
1391 |
1392 | test('allow claiming tags', ({ assert }) => {
1393 | const template = dedent`
1394 | Hello
1395 |
1396 | @hl.modal(username)
1397 | @end
1398 | `
1399 |
1400 | const tokenizer = new Tokenizer(
1401 | template,
1402 | {},
1403 | {
1404 | filename: 'eval.edge',
1405 | claimTag: (name) => {
1406 | if (name === 'hl.modal') {
1407 | return { seekable: true, block: true }
1408 | }
1409 | return null
1410 | },
1411 | }
1412 | )
1413 | tokenizer.parse()
1414 |
1415 | assert.isNull(tokenizer.tagStatement)
1416 | assert.deepEqual(tokenizer.tokens, [
1417 | {
1418 | type: 'raw',
1419 | filename: 'eval.edge',
1420 | value: 'Hello',
1421 | line: 1,
1422 | },
1423 | {
1424 | type: 'newline',
1425 | filename: 'eval.edge',
1426 | line: 1,
1427 | },
1428 | {
1429 | type: 'raw',
1430 | filename: 'eval.edge',
1431 | value: '',
1432 | line: 2,
1433 | },
1434 | {
1435 | filename: 'eval.edge',
1436 | type: TagTypes.TAG,
1437 | properties: {
1438 | name: 'hl.modal',
1439 | jsArg: 'username',
1440 | selfclosed: false,
1441 | },
1442 | loc: {
1443 | start: {
1444 | line: 3,
1445 | col: 10,
1446 | },
1447 | end: {
1448 | line: 3,
1449 | col: 19,
1450 | },
1451 | },
1452 | children: [],
1453 | },
1454 | ])
1455 | })
1456 | })
1457 |
1458 | test.group('Tokenizer columns', () => {
1459 | test('track whitespaces before the opening parenthesis', ({ assert }) => {
1460 | const template = dedent`
1461 | Hello
1462 |
1463 | @if (username)
1464 | @endif
1465 | `
1466 |
1467 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
1468 | tokenizer.parse()
1469 |
1470 | assert.isNull(tokenizer.tagStatement)
1471 | assert.deepEqual(tokenizer.tokens, [
1472 | {
1473 | type: 'raw',
1474 | filename: 'eval.edge',
1475 | value: 'Hello',
1476 | line: 1,
1477 | },
1478 | {
1479 | type: 'newline',
1480 | filename: 'eval.edge',
1481 | line: 1,
1482 | },
1483 | {
1484 | type: 'raw',
1485 | filename: 'eval.edge',
1486 | value: '',
1487 | line: 2,
1488 | },
1489 | {
1490 | filename: 'eval.edge',
1491 | type: TagTypes.TAG,
1492 | properties: {
1493 | name: 'if',
1494 | jsArg: 'username',
1495 | selfclosed: false,
1496 | },
1497 | loc: {
1498 | start: {
1499 | line: 3,
1500 | col: 6,
1501 | },
1502 | end: {
1503 | line: 3,
1504 | col: 15,
1505 | },
1506 | },
1507 | children: [],
1508 | },
1509 | ])
1510 | })
1511 |
1512 | test('do not track whitespaces before the closing parenthesis', ({ assert }) => {
1513 | const template = dedent`
1514 | Hello
1515 |
1516 | @if(username )
1517 | @endif
1518 | `
1519 |
1520 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
1521 | tokenizer.parse()
1522 |
1523 | assert.isNull(tokenizer.tagStatement)
1524 | assert.deepEqual(tokenizer.tokens, [
1525 | {
1526 | type: 'raw',
1527 | filename: 'eval.edge',
1528 | value: 'Hello',
1529 | line: 1,
1530 | },
1531 | {
1532 | type: 'newline',
1533 | filename: 'eval.edge',
1534 | line: 1,
1535 | },
1536 | {
1537 | type: 'raw',
1538 | filename: 'eval.edge',
1539 | value: '',
1540 | line: 2,
1541 | },
1542 | {
1543 | filename: 'eval.edge',
1544 | type: TagTypes.TAG,
1545 | properties: {
1546 | name: 'if',
1547 | jsArg: 'username ',
1548 | selfclosed: false,
1549 | },
1550 | loc: {
1551 | start: {
1552 | line: 3,
1553 | col: 4,
1554 | },
1555 | end: {
1556 | line: 3,
1557 | col: 15,
1558 | },
1559 | },
1560 | children: [],
1561 | },
1562 | ])
1563 | })
1564 |
1565 | test('track whitespaces before the starting of tag', ({ assert }) => {
1566 | const template = dedent`
1567 | Hello
1568 |
1569 | @if(username)
1570 | @endif
1571 | `
1572 |
1573 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
1574 | tokenizer.parse()
1575 |
1576 | assert.isNull(tokenizer.tagStatement)
1577 | assert.deepEqual(tokenizer.tokens, [
1578 | {
1579 | type: 'raw',
1580 | filename: 'eval.edge',
1581 | value: 'Hello',
1582 | line: 1,
1583 | },
1584 | {
1585 | type: 'newline',
1586 | filename: 'eval.edge',
1587 | line: 1,
1588 | },
1589 | {
1590 | type: 'raw',
1591 | filename: 'eval.edge',
1592 | value: '',
1593 | line: 2,
1594 | },
1595 | {
1596 | filename: 'eval.edge',
1597 | type: TagTypes.TAG,
1598 | properties: {
1599 | name: 'if',
1600 | jsArg: 'username',
1601 | selfclosed: false,
1602 | },
1603 | loc: {
1604 | start: {
1605 | line: 3,
1606 | col: 6,
1607 | },
1608 | end: {
1609 | line: 3,
1610 | col: 15,
1611 | },
1612 | },
1613 | children: [],
1614 | },
1615 | ])
1616 | })
1617 |
1618 | test('track columns for multiline expression', ({ assert }) => {
1619 | const template = dedent`
1620 | Hello
1621 |
1622 | @if(
1623 | username && age
1624 | )
1625 | @endif
1626 | `
1627 |
1628 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
1629 | tokenizer.parse()
1630 |
1631 | assert.isNull(tokenizer.tagStatement)
1632 | assert.deepEqual(tokenizer.tokens, [
1633 | {
1634 | type: 'raw',
1635 | filename: 'eval.edge',
1636 | value: 'Hello',
1637 | line: 1,
1638 | },
1639 | {
1640 | type: 'newline',
1641 | filename: 'eval.edge',
1642 | line: 1,
1643 | },
1644 | {
1645 | type: 'raw',
1646 | filename: 'eval.edge',
1647 | value: '',
1648 | line: 2,
1649 | },
1650 | {
1651 | filename: 'eval.edge',
1652 | type: TagTypes.TAG,
1653 | properties: {
1654 | name: 'if',
1655 | jsArg: '\n username && age\n',
1656 | selfclosed: false,
1657 | },
1658 | loc: {
1659 | start: {
1660 | line: 3,
1661 | col: 4,
1662 | },
1663 | end: {
1664 | line: 5,
1665 | col: 1,
1666 | },
1667 | },
1668 | children: [],
1669 | },
1670 | ])
1671 | })
1672 |
1673 | test('track columns for mustache statement', ({ assert }) => {
1674 | const template = dedent`
1675 | Hello {{ username }}
1676 | `
1677 |
1678 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
1679 | tokenizer.parse()
1680 |
1681 | assert.isNull(tokenizer.tagStatement)
1682 | assert.deepEqual(tokenizer.tokens, [
1683 | {
1684 | type: 'raw',
1685 | filename: 'eval.edge',
1686 | value: 'Hello ',
1687 | line: 1,
1688 | },
1689 | {
1690 | filename: 'eval.edge',
1691 | type: MustacheTypes.MUSTACHE,
1692 | properties: {
1693 | jsArg: ' username ',
1694 | },
1695 | loc: {
1696 | start: {
1697 | line: 1,
1698 | col: 8,
1699 | },
1700 | end: {
1701 | line: 1,
1702 | col: 20,
1703 | },
1704 | },
1705 | },
1706 | ])
1707 | })
1708 |
1709 | test('track columns for multiple mustache statements', ({ assert }) => {
1710 | const template = dedent`
1711 | Hello {{ username }}, your age is {{ age }}
1712 | `
1713 |
1714 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
1715 | tokenizer.parse()
1716 |
1717 | assert.isNull(tokenizer.mustacheStatement)
1718 | assert.deepEqual(tokenizer.tokens, [
1719 | {
1720 | type: 'raw',
1721 | filename: 'eval.edge',
1722 | value: 'Hello ',
1723 | line: 1,
1724 | },
1725 | {
1726 | filename: 'eval.edge',
1727 | type: MustacheTypes.MUSTACHE,
1728 | properties: {
1729 | jsArg: ' username ',
1730 | },
1731 | loc: {
1732 | start: {
1733 | line: 1,
1734 | col: 8,
1735 | },
1736 | end: {
1737 | line: 1,
1738 | col: 20,
1739 | },
1740 | },
1741 | },
1742 | {
1743 | type: 'raw',
1744 | filename: 'eval.edge',
1745 | value: ', your age is ',
1746 | line: 1,
1747 | },
1748 | {
1749 | filename: 'eval.edge',
1750 | type: MustacheTypes.MUSTACHE,
1751 | properties: {
1752 | jsArg: ' age ',
1753 | },
1754 | loc: {
1755 | start: {
1756 | line: 1,
1757 | col: 36,
1758 | },
1759 | end: {
1760 | line: 1,
1761 | col: 43,
1762 | },
1763 | },
1764 | },
1765 | ])
1766 | })
1767 |
1768 | test('track columns for multiline mustache statements', ({ assert }) => {
1769 | const template = dedent`
1770 | Hello {{
1771 | username
1772 | }}, your age is {{ age }}
1773 | `
1774 |
1775 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
1776 | tokenizer.parse()
1777 |
1778 | assert.isNull(tokenizer.tagStatement)
1779 | assert.deepEqual(tokenizer.tokens, [
1780 | {
1781 | type: 'raw',
1782 | filename: 'eval.edge',
1783 | value: 'Hello ',
1784 | line: 1,
1785 | },
1786 | {
1787 | filename: 'eval.edge',
1788 | type: MustacheTypes.MUSTACHE,
1789 | properties: {
1790 | jsArg: '\n username\n',
1791 | },
1792 | loc: {
1793 | start: {
1794 | line: 1,
1795 | col: 8,
1796 | },
1797 | end: {
1798 | line: 3,
1799 | col: 2,
1800 | },
1801 | },
1802 | },
1803 | {
1804 | type: 'raw',
1805 | filename: 'eval.edge',
1806 | value: ', your age is ',
1807 | line: 3,
1808 | },
1809 | {
1810 | filename: 'eval.edge',
1811 | type: MustacheTypes.MUSTACHE,
1812 | properties: {
1813 | jsArg: ' age ',
1814 | },
1815 | loc: {
1816 | start: {
1817 | line: 3,
1818 | col: 18,
1819 | },
1820 | end: {
1821 | line: 3,
1822 | col: 25,
1823 | },
1824 | },
1825 | },
1826 | ])
1827 | })
1828 |
1829 | test('track columns for multiline saf emustache statements', ({ assert }) => {
1830 | const template = dedent`
1831 | Hello {{{
1832 | username
1833 | }}}, your age is {{ age }}
1834 | `
1835 |
1836 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
1837 | tokenizer.parse()
1838 |
1839 | assert.isNull(tokenizer.tagStatement)
1840 | assert.deepEqual(tokenizer.tokens, [
1841 | {
1842 | type: 'raw',
1843 | filename: 'eval.edge',
1844 | value: 'Hello ',
1845 | line: 1,
1846 | },
1847 | {
1848 | filename: 'eval.edge',
1849 | type: MustacheTypes.SMUSTACHE,
1850 | properties: {
1851 | jsArg: '\n username\n',
1852 | },
1853 | loc: {
1854 | start: {
1855 | line: 1,
1856 | col: 9,
1857 | },
1858 | end: {
1859 | line: 3,
1860 | col: 3,
1861 | },
1862 | },
1863 | },
1864 | {
1865 | type: 'raw',
1866 | filename: 'eval.edge',
1867 | value: ', your age is ',
1868 | line: 3,
1869 | },
1870 | {
1871 | filename: 'eval.edge',
1872 | type: MustacheTypes.MUSTACHE,
1873 | properties: {
1874 | jsArg: ' age ',
1875 | },
1876 | loc: {
1877 | start: {
1878 | line: 3,
1879 | col: 19,
1880 | },
1881 | end: {
1882 | line: 3,
1883 | col: 26,
1884 | },
1885 | },
1886 | },
1887 | ])
1888 | })
1889 | })
1890 |
--------------------------------------------------------------------------------
/tests/tokenizer_tags_generic_end.spec.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * edge-lexer
3 | *
4 | * (c) Edge
5 | *
6 | * For the full copyright and license information, please view the LICENSE
7 | * file that was distributed with this source code.
8 | */
9 |
10 | import dedent from 'dedent'
11 | import { test } from '@japa/runner'
12 |
13 | import { Tokenizer } from '../src/tokenizer.js'
14 | import { TagTypes, MustacheTypes } from '../src/enums.js'
15 |
16 | const tagsDef = {
17 | if: class If {
18 | static block = true
19 | static seekable = true
20 | },
21 | else: class Else {
22 | static block = false
23 | static seekable = false
24 | },
25 | include: class Include {
26 | static block = false
27 | static seekable = true
28 | },
29 | each: class Each {
30 | static block = true
31 | static seekable = true
32 | },
33 | }
34 |
35 | test.group('Tokenizer Tags', () => {
36 | test('tokenize a template into tokens', ({ assert }) => {
37 | const template = dedent`
38 | Hello
39 |
40 | @if(username)
41 | @end
42 | `
43 |
44 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
45 | tokenizer.parse()
46 |
47 | assert.isNull(tokenizer.tagStatement)
48 | assert.deepEqual(tokenizer.tokens, [
49 | {
50 | type: 'raw',
51 | filename: 'eval.edge',
52 | value: 'Hello',
53 | line: 1,
54 | },
55 | {
56 | type: 'newline',
57 | filename: 'eval.edge',
58 | line: 1,
59 | },
60 | {
61 | type: 'raw',
62 | filename: 'eval.edge',
63 | value: '',
64 | line: 2,
65 | },
66 | {
67 | filename: 'eval.edge',
68 | type: TagTypes.TAG,
69 | properties: {
70 | name: 'if',
71 | jsArg: 'username',
72 | selfclosed: false,
73 | },
74 | loc: {
75 | start: {
76 | line: 3,
77 | col: 4,
78 | },
79 | end: {
80 | line: 3,
81 | col: 13,
82 | },
83 | },
84 | children: [],
85 | },
86 | ])
87 | })
88 |
89 | test('add content inside tags as the tag children', ({ assert }) => {
90 | const template = dedent`
91 | Hello
92 |
93 | @if(username)
94 | Hello
95 | @end
96 | `
97 |
98 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
99 | tokenizer.parse()
100 |
101 | assert.isNull(tokenizer.tagStatement)
102 | assert.deepEqual(tokenizer.tokens, [
103 | {
104 | type: 'raw',
105 | filename: 'eval.edge',
106 | value: 'Hello',
107 | line: 1,
108 | },
109 | {
110 | type: 'newline',
111 | filename: 'eval.edge',
112 | line: 1,
113 | },
114 | {
115 | type: 'raw',
116 | filename: 'eval.edge',
117 | value: '',
118 | line: 2,
119 | },
120 | {
121 | type: TagTypes.TAG,
122 | filename: 'eval.edge',
123 | loc: {
124 | start: {
125 | line: 3,
126 | col: 4,
127 | },
128 | end: {
129 | line: 3,
130 | col: 13,
131 | },
132 | },
133 | properties: {
134 | name: 'if',
135 | jsArg: 'username',
136 | selfclosed: false,
137 | },
138 | children: [
139 | {
140 | type: 'newline',
141 | filename: 'eval.edge',
142 | line: 3,
143 | },
144 | {
145 | type: 'raw',
146 | filename: 'eval.edge',
147 | value: ' Hello',
148 | line: 4,
149 | },
150 | ],
151 | },
152 | ])
153 | })
154 |
155 | test('allow nested tags', ({ assert }) => {
156 | const template = dedent`
157 | Hello
158 |
159 | @if(username)
160 | @if(username === 'virk')
161 | Hi
162 | @end
163 | @end
164 | `
165 |
166 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
167 | tokenizer.parse()
168 |
169 | assert.isNull(tokenizer.tagStatement)
170 | assert.deepEqual(tokenizer.tokens, [
171 | {
172 | type: 'raw',
173 | filename: 'eval.edge',
174 | value: 'Hello',
175 | line: 1,
176 | },
177 | {
178 | type: 'newline',
179 | filename: 'eval.edge',
180 | line: 1,
181 | },
182 | {
183 | type: 'raw',
184 | filename: 'eval.edge',
185 | value: '',
186 | line: 2,
187 | },
188 | {
189 | filename: 'eval.edge',
190 | type: TagTypes.TAG,
191 | loc: {
192 | start: {
193 | line: 3,
194 | col: 4,
195 | },
196 | end: {
197 | line: 3,
198 | col: 13,
199 | },
200 | },
201 | properties: {
202 | name: 'if',
203 | jsArg: 'username',
204 | selfclosed: false,
205 | },
206 | children: [
207 | {
208 | type: 'newline',
209 | filename: 'eval.edge',
210 | line: 3,
211 | },
212 | {
213 | type: TagTypes.TAG,
214 | filename: 'eval.edge',
215 | loc: {
216 | start: {
217 | line: 4,
218 | col: 6,
219 | },
220 | end: {
221 | line: 4,
222 | col: 26,
223 | },
224 | },
225 | properties: {
226 | name: 'if',
227 | jsArg: "username === 'virk'",
228 | selfclosed: false,
229 | },
230 | children: [
231 | {
232 | type: 'newline',
233 | filename: 'eval.edge',
234 | line: 4,
235 | },
236 | {
237 | type: 'raw',
238 | filename: 'eval.edge',
239 | value: ' Hi',
240 | line: 5,
241 | },
242 | ],
243 | },
244 | ],
245 | },
246 | ])
247 | })
248 |
249 | test('parse when statement is in multiple lines', ({ assert }) => {
250 | const template = dedent`
251 | Hello
252 |
253 | @if(
254 | username
255 | )
256 | Hello
257 | @end
258 | `
259 |
260 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
261 | tokenizer.parse()
262 |
263 | assert.isNull(tokenizer.tagStatement)
264 | assert.deepEqual(tokenizer.tokens, [
265 | {
266 | type: 'raw',
267 | filename: 'eval.edge',
268 | value: 'Hello',
269 | line: 1,
270 | },
271 | {
272 | type: 'newline',
273 | filename: 'eval.edge',
274 | line: 1,
275 | },
276 | {
277 | type: 'raw',
278 | filename: 'eval.edge',
279 | value: '',
280 | line: 2,
281 | },
282 | {
283 | filename: 'eval.edge',
284 | type: TagTypes.TAG,
285 | loc: {
286 | start: {
287 | line: 3,
288 | col: 4,
289 | },
290 | end: {
291 | line: 5,
292 | col: 1,
293 | },
294 | },
295 | properties: {
296 | name: 'if',
297 | jsArg: '\n username\n',
298 | selfclosed: false,
299 | },
300 | children: [
301 | {
302 | type: 'newline',
303 | filename: 'eval.edge',
304 | line: 5,
305 | },
306 | {
307 | type: 'raw',
308 | filename: 'eval.edge',
309 | value: ' Hello',
310 | line: 6,
311 | },
312 | ],
313 | },
314 | ])
315 | })
316 |
317 | test('parse when statement is in multiple lines and has internal parens too', ({ assert }) => {
318 | const template = dedent`
319 | Hello
320 |
321 | @if((
322 | 2 + 2) * 3 === 12
323 | )
324 | Answer is 12
325 | @end
326 | `
327 |
328 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
329 | tokenizer.parse()
330 |
331 | assert.isNull(tokenizer.tagStatement)
332 | assert.deepEqual(tokenizer.tokens, [
333 | {
334 | type: 'raw',
335 | filename: 'eval.edge',
336 | value: 'Hello',
337 | line: 1,
338 | },
339 | {
340 | type: 'newline',
341 | filename: 'eval.edge',
342 | line: 1,
343 | },
344 | {
345 | type: 'raw',
346 | filename: 'eval.edge',
347 | value: '',
348 | line: 2,
349 | },
350 | {
351 | filename: 'eval.edge',
352 | type: TagTypes.TAG,
353 | loc: {
354 | start: {
355 | line: 3,
356 | col: 4,
357 | },
358 | end: {
359 | line: 5,
360 | col: 1,
361 | },
362 | },
363 | properties: {
364 | name: 'if',
365 | jsArg: '(\n 2 + 2) * 3 === 12\n',
366 | selfclosed: false,
367 | },
368 | children: [
369 | {
370 | type: 'newline',
371 | filename: 'eval.edge',
372 | line: 5,
373 | },
374 | {
375 | type: 'raw',
376 | filename: 'eval.edge',
377 | value: ' Answer is 12',
378 | line: 6,
379 | },
380 | ],
381 | },
382 | ])
383 | })
384 |
385 | test('parse inline tags', ({ assert }) => {
386 | const template = dedent`@include('partials.user')`
387 |
388 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
389 | tokenizer.parse()
390 |
391 | assert.isNull(tokenizer.tagStatement)
392 | assert.deepEqual(tokenizer.tokens, [
393 | {
394 | filename: 'eval.edge',
395 | type: TagTypes.TAG,
396 | loc: {
397 | start: {
398 | line: 1,
399 | col: 9,
400 | },
401 | end: {
402 | line: 1,
403 | col: 25,
404 | },
405 | },
406 | properties: {
407 | name: 'include',
408 | jsArg: "'partials.user'",
409 | selfclosed: false,
410 | },
411 | children: [],
412 | },
413 | ])
414 | })
415 |
416 | test('parse inline tags which are not seekable', ({ assert }) => {
417 | const template = dedent`
418 | @if(username)
419 | Hello
420 | @else
421 | Hello guest
422 | @end
423 | `
424 |
425 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
426 | tokenizer.parse()
427 |
428 | assert.isNull(tokenizer.tagStatement)
429 | assert.deepEqual(tokenizer.tokens, [
430 | {
431 | type: TagTypes.TAG,
432 | filename: 'eval.edge',
433 | loc: {
434 | start: {
435 | line: 1,
436 | col: 4,
437 | },
438 | end: {
439 | line: 1,
440 | col: 13,
441 | },
442 | },
443 | properties: {
444 | name: 'if',
445 | jsArg: 'username',
446 | selfclosed: false,
447 | },
448 | children: [
449 | {
450 | type: 'newline',
451 | filename: 'eval.edge',
452 | line: 1,
453 | },
454 | {
455 | type: 'raw',
456 | filename: 'eval.edge',
457 | value: ' Hello',
458 | line: 2,
459 | },
460 | {
461 | filename: 'eval.edge',
462 | type: TagTypes.TAG,
463 | loc: {
464 | start: {
465 | line: 3,
466 | col: 5,
467 | },
468 | end: {
469 | line: 3,
470 | col: 5,
471 | },
472 | },
473 | properties: {
474 | name: 'else',
475 | jsArg: '',
476 | selfclosed: false,
477 | },
478 | children: [],
479 | },
480 | {
481 | type: 'newline',
482 | filename: 'eval.edge',
483 | line: 3,
484 | },
485 | {
486 | type: 'raw',
487 | filename: 'eval.edge',
488 | value: ' Hello guest',
489 | line: 4,
490 | },
491 | ],
492 | },
493 | ])
494 | })
495 |
496 | test('ignore tag when not registered', ({ assert }) => {
497 | const template = dedent`
498 | @foo('hello world')
499 | `
500 |
501 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
502 | tokenizer.parse()
503 |
504 | assert.isNull(tokenizer.tagStatement)
505 | assert.deepEqual(tokenizer.tokens, [
506 | {
507 | type: 'raw',
508 | filename: 'eval.edge',
509 | value: "@foo('hello world')",
510 | line: 1,
511 | },
512 | ])
513 | })
514 |
515 | test('ignore tag when escaped', ({ assert }) => {
516 | const template = dedent`@@if(username)
517 | @end
518 | `
519 |
520 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
521 | tokenizer.parse()
522 |
523 | assert.isNull(tokenizer.tagStatement)
524 | assert.deepEqual(tokenizer.tokens, [
525 | {
526 | filename: 'eval.edge',
527 | type: TagTypes.ETAG,
528 | properties: {
529 | name: 'if',
530 | jsArg: 'username',
531 | selfclosed: false,
532 | },
533 | loc: {
534 | start: {
535 | line: 1,
536 | col: 5,
537 | },
538 | end: {
539 | line: 1,
540 | col: 14,
541 | },
542 | },
543 | children: [],
544 | },
545 | ])
546 | })
547 |
548 | test('throw exception when tag is still seeking', ({ assert }) => {
549 | assert.plan(2)
550 |
551 | const template = dedent`@if((2 + 2)
552 | @end`
553 |
554 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
555 | try {
556 | tokenizer.parse()
557 | } catch ({ message, line }) {
558 | assert.equal(message, 'Missing token ")"')
559 | assert.equal(line, 1)
560 | }
561 | })
562 |
563 | test('throw exception when there is inline content in the tag opening statement', ({
564 | assert,
565 | }) => {
566 | assert.plan(3)
567 | const template = dedent`@include('foo') hello world`
568 |
569 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
570 | try {
571 | tokenizer.parse()
572 | } catch ({ message, line, col }) {
573 | assert.equal(message, 'Unexpected token " hello world"')
574 | assert.equal(line, 1)
575 | assert.equal(col, 15)
576 | }
577 | })
578 |
579 | test('throw exception when opening brace is in a different line', ({ assert }) => {
580 | assert.plan(3)
581 | const template = dedent`
582 | @if
583 | (
584 | username
585 | )
586 | @end
587 | `
588 |
589 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
590 | try {
591 | tokenizer.parse()
592 | } catch ({ message, line, col }) {
593 | assert.equal(message, 'Missing token "("')
594 | assert.equal(line, 1)
595 | assert.equal(col, 3)
596 | }
597 | })
598 |
599 | test('do not raise exception when tag is not seekable and has no parens', ({ assert }) => {
600 | const template = dedent`
601 | @else
602 | `
603 |
604 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
605 | tokenizer.parse()
606 | assert.deepEqual(tokenizer.tokens, [
607 | {
608 | filename: 'eval.edge',
609 | type: TagTypes.TAG,
610 | properties: {
611 | name: 'else',
612 | jsArg: '',
613 | selfclosed: false,
614 | },
615 | loc: {
616 | start: {
617 | line: 1,
618 | col: 5,
619 | },
620 | end: {
621 | line: 1,
622 | col: 5,
623 | },
624 | },
625 | children: [],
626 | },
627 | ])
628 | })
629 |
630 | test('consume one liner inline tag', ({ assert }) => {
631 | const template = "@include('header')"
632 |
633 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
634 | tokenizer.parse()
635 |
636 | assert.isNull(tokenizer.tagStatement)
637 | assert.deepEqual(tokenizer.tokens, [
638 | {
639 | filename: 'eval.edge',
640 | type: TagTypes.TAG,
641 | loc: {
642 | start: {
643 | line: 1,
644 | col: 9,
645 | },
646 | end: {
647 | line: 1,
648 | col: 18,
649 | },
650 | },
651 | properties: {
652 | name: 'include',
653 | jsArg: "'header'",
654 | selfclosed: false,
655 | },
656 | children: [],
657 | },
658 | ])
659 | })
660 |
661 | test('throw exception when there are unclosed tags', ({ assert }) => {
662 | const template = dedent`
663 | @if(username)
664 | Hello world
665 | `
666 |
667 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
668 | const fn = () => tokenizer.parse()
669 | assert.throws(fn, 'Unclosed tag if')
670 | })
671 |
672 | test('throw exception when there are unclosed nested tags', ({ assert }) => {
673 | const template = dedent`
674 | @if(username)
675 | @each(user in users)
676 | @end
677 | `
678 |
679 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
680 | const fn = () => tokenizer.parse()
681 | assert.throws(fn, 'Unclosed tag if')
682 | })
683 |
684 | test('handle when nested components are closed using generic end', ({ assert }) => {
685 | const template = dedent`
686 | @if(username)
687 | @each(user in users)
688 | @end
689 | @end
690 | `
691 |
692 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
693 | tokenizer.parse()
694 | assert.isNull(tokenizer.tagStatement)
695 |
696 | assert.deepEqual(tokenizer.tokens, [
697 | {
698 | type: TagTypes.TAG,
699 | filename: 'eval.edge',
700 | loc: {
701 | start: {
702 | line: 1,
703 | col: 4,
704 | },
705 | end: {
706 | line: 1,
707 | col: 13,
708 | },
709 | },
710 | properties: {
711 | name: 'if',
712 | jsArg: 'username',
713 | selfclosed: false,
714 | },
715 | children: [
716 | {
717 | type: 'newline',
718 | filename: 'eval.edge',
719 | line: 1,
720 | },
721 | {
722 | type: TagTypes.TAG,
723 | filename: 'eval.edge',
724 | loc: {
725 | start: {
726 | line: 2,
727 | col: 6,
728 | },
729 | end: {
730 | line: 2,
731 | col: 20,
732 | },
733 | },
734 | properties: {
735 | name: 'each',
736 | jsArg: 'user in users',
737 | selfclosed: false,
738 | },
739 | children: [],
740 | },
741 | ],
742 | },
743 | ])
744 | })
745 |
746 | test('work fine if a tag is self closed', ({ assert }) => {
747 | const template = dedent`
748 | @!each(user in users, include = 'user')
749 | `
750 |
751 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
752 | tokenizer.parse()
753 |
754 | assert.isNull(tokenizer.tagStatement)
755 | assert.deepEqual(tokenizer.tokens, [
756 | {
757 | filename: 'eval.edge',
758 | type: TagTypes.TAG,
759 | loc: {
760 | start: {
761 | line: 1,
762 | col: 7,
763 | },
764 | end: {
765 | line: 1,
766 | col: 39,
767 | },
768 | },
769 | properties: {
770 | name: 'each',
771 | jsArg: "user in users, include = 'user'",
772 | selfclosed: true,
773 | },
774 | children: [],
775 | },
776 | ])
777 | })
778 |
779 | test('work fine when bang is defined in tag jsArg', ({ assert }) => {
780 | const template = dedent`
781 | @if(!user)
782 | @end
783 | `
784 |
785 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
786 | tokenizer.parse()
787 |
788 | assert.isNull(tokenizer.tagStatement)
789 | assert.deepEqual(tokenizer.tokens, [
790 | {
791 | filename: 'eval.edge',
792 | type: TagTypes.TAG,
793 | loc: {
794 | start: {
795 | line: 1,
796 | col: 4,
797 | },
798 | end: {
799 | line: 1,
800 | col: 10,
801 | },
802 | },
803 | properties: {
804 | name: 'if',
805 | jsArg: '!user',
806 | selfclosed: false,
807 | },
808 | children: [],
809 | },
810 | ])
811 | })
812 |
813 | test('remove newline after the tag', ({ assert }) => {
814 | const template = dedent`
815 | Hello
816 |
817 | @if(username)~
818 | Hello
819 | @end
820 | `
821 |
822 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
823 | tokenizer.parse()
824 |
825 | assert.isNull(tokenizer.tagStatement)
826 | assert.deepEqual(tokenizer.tokens, [
827 | {
828 | type: 'raw',
829 | filename: 'eval.edge',
830 | value: 'Hello',
831 | line: 1,
832 | },
833 | {
834 | type: 'newline',
835 | filename: 'eval.edge',
836 | line: 1,
837 | },
838 | {
839 | type: 'raw',
840 | filename: 'eval.edge',
841 | value: '',
842 | line: 2,
843 | },
844 | {
845 | type: TagTypes.TAG,
846 | filename: 'eval.edge',
847 | loc: {
848 | start: {
849 | line: 3,
850 | col: 4,
851 | },
852 | end: {
853 | line: 3,
854 | col: 13,
855 | },
856 | },
857 | properties: {
858 | name: 'if',
859 | jsArg: 'username',
860 | selfclosed: false,
861 | },
862 | children: [
863 | {
864 | type: 'raw',
865 | filename: 'eval.edge',
866 | value: ' Hello',
867 | line: 4,
868 | },
869 | ],
870 | },
871 | ])
872 | })
873 |
874 | test('remove newline after the tag spanned over multiple lines', ({ assert }) => {
875 | const template = dedent`
876 | Hello
877 |
878 | @if(
879 | username
880 | )~
881 | Hello
882 | @end
883 | `
884 |
885 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
886 | tokenizer.parse()
887 |
888 | assert.isNull(tokenizer.tagStatement)
889 | assert.deepEqual(tokenizer.tokens, [
890 | {
891 | type: 'raw',
892 | filename: 'eval.edge',
893 | value: 'Hello',
894 | line: 1,
895 | },
896 | {
897 | type: 'newline',
898 | filename: 'eval.edge',
899 | line: 1,
900 | },
901 | {
902 | type: 'raw',
903 | filename: 'eval.edge',
904 | value: '',
905 | line: 2,
906 | },
907 | {
908 | type: TagTypes.TAG,
909 | filename: 'eval.edge',
910 | loc: {
911 | start: {
912 | line: 3,
913 | col: 4,
914 | },
915 | end: {
916 | line: 5,
917 | col: 1,
918 | },
919 | },
920 | properties: {
921 | name: 'if',
922 | jsArg: '\n username\n',
923 | selfclosed: false,
924 | },
925 | children: [
926 | {
927 | type: 'raw',
928 | filename: 'eval.edge',
929 | value: ' Hello',
930 | line: 6,
931 | },
932 | ],
933 | },
934 | ])
935 | })
936 |
937 | test('remove newline between two tags', ({ assert }) => {
938 | const template = dedent`
939 | Hello
940 |
941 | @if(username)~
942 | @if(age)
943 | Hello
944 | @end
945 | @end
946 | `
947 |
948 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
949 | tokenizer.parse()
950 |
951 | assert.isNull(tokenizer.tagStatement)
952 | assert.deepEqual(tokenizer.tokens, [
953 | {
954 | type: 'raw',
955 | filename: 'eval.edge',
956 | value: 'Hello',
957 | line: 1,
958 | },
959 | {
960 | type: 'newline',
961 | filename: 'eval.edge',
962 | line: 1,
963 | },
964 | {
965 | type: 'raw',
966 | filename: 'eval.edge',
967 | value: '',
968 | line: 2,
969 | },
970 | {
971 | type: TagTypes.TAG,
972 | filename: 'eval.edge',
973 | loc: {
974 | start: {
975 | line: 3,
976 | col: 4,
977 | },
978 | end: {
979 | line: 3,
980 | col: 13,
981 | },
982 | },
983 | properties: {
984 | name: 'if',
985 | jsArg: 'username',
986 | selfclosed: false,
987 | },
988 | children: [
989 | {
990 | type: TagTypes.TAG,
991 | filename: 'eval.edge',
992 | loc: {
993 | start: {
994 | line: 4,
995 | col: 6,
996 | },
997 | end: {
998 | line: 4,
999 | col: 10,
1000 | },
1001 | },
1002 | properties: {
1003 | name: 'if',
1004 | jsArg: 'age',
1005 | selfclosed: false,
1006 | },
1007 | children: [
1008 | {
1009 | type: 'newline',
1010 | filename: 'eval.edge',
1011 | line: 4,
1012 | },
1013 | {
1014 | type: 'raw',
1015 | filename: 'eval.edge',
1016 | value: ' Hello',
1017 | line: 5,
1018 | },
1019 | ],
1020 | },
1021 | ],
1022 | },
1023 | ])
1024 | })
1025 |
1026 | test('remove newline between two tags when spanned over multiple lines', ({ assert }) => {
1027 | const template = dedent`
1028 | Hello
1029 |
1030 | @if(
1031 | username
1032 | )~
1033 | @if(age)
1034 | Hello
1035 | @end
1036 | @end
1037 | `
1038 |
1039 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
1040 | tokenizer.parse()
1041 |
1042 | assert.isNull(tokenizer.tagStatement)
1043 | assert.deepEqual(tokenizer.tokens, [
1044 | {
1045 | type: 'raw',
1046 | filename: 'eval.edge',
1047 | value: 'Hello',
1048 | line: 1,
1049 | },
1050 | {
1051 | type: 'newline',
1052 | filename: 'eval.edge',
1053 | line: 1,
1054 | },
1055 | {
1056 | type: 'raw',
1057 | filename: 'eval.edge',
1058 | value: '',
1059 | line: 2,
1060 | },
1061 | {
1062 | type: TagTypes.TAG,
1063 | filename: 'eval.edge',
1064 | loc: {
1065 | start: {
1066 | line: 3,
1067 | col: 4,
1068 | },
1069 | end: {
1070 | line: 5,
1071 | col: 1,
1072 | },
1073 | },
1074 | properties: {
1075 | name: 'if',
1076 | jsArg: '\n username\n',
1077 | selfclosed: false,
1078 | },
1079 | children: [
1080 | {
1081 | type: TagTypes.TAG,
1082 | filename: 'eval.edge',
1083 | loc: {
1084 | start: {
1085 | line: 6,
1086 | col: 6,
1087 | },
1088 | end: {
1089 | line: 6,
1090 | col: 10,
1091 | },
1092 | },
1093 | properties: {
1094 | name: 'if',
1095 | jsArg: 'age',
1096 | selfclosed: false,
1097 | },
1098 | children: [
1099 | {
1100 | type: 'newline',
1101 | filename: 'eval.edge',
1102 | line: 6,
1103 | },
1104 | {
1105 | type: 'raw',
1106 | filename: 'eval.edge',
1107 | value: ' Hello',
1108 | line: 7,
1109 | },
1110 | ],
1111 | },
1112 | ],
1113 | },
1114 | ])
1115 | })
1116 |
1117 | test('remove newline after the tag when tag has noNewLine property', ({ assert }) => {
1118 | const template = dedent`
1119 | Hello
1120 |
1121 | @if(username)
1122 | Hello
1123 | @end
1124 | `
1125 |
1126 | const tags = {
1127 | if: class If {
1128 | static block = true
1129 | static seekable = true
1130 | static noNewLine = true
1131 | },
1132 | }
1133 |
1134 | const tokenizer = new Tokenizer(template, tags, { filename: 'eval.edge' })
1135 | tokenizer.parse()
1136 |
1137 | assert.isNull(tokenizer.tagStatement)
1138 | assert.deepEqual(tokenizer.tokens, [
1139 | {
1140 | type: 'raw',
1141 | filename: 'eval.edge',
1142 | value: 'Hello',
1143 | line: 1,
1144 | },
1145 | {
1146 | type: 'newline',
1147 | filename: 'eval.edge',
1148 | line: 1,
1149 | },
1150 | {
1151 | type: 'raw',
1152 | filename: 'eval.edge',
1153 | value: '',
1154 | line: 2,
1155 | },
1156 | {
1157 | type: TagTypes.TAG,
1158 | filename: 'eval.edge',
1159 | loc: {
1160 | start: {
1161 | line: 3,
1162 | col: 4,
1163 | },
1164 | end: {
1165 | line: 3,
1166 | col: 13,
1167 | },
1168 | },
1169 | properties: {
1170 | name: 'if',
1171 | jsArg: 'username',
1172 | selfclosed: false,
1173 | },
1174 | children: [
1175 | {
1176 | type: 'raw',
1177 | filename: 'eval.edge',
1178 | value: ' Hello',
1179 | line: 4,
1180 | },
1181 | ],
1182 | },
1183 | ])
1184 | })
1185 |
1186 | test('remove newline between two tags when tag has noNewLine property', ({ assert }) => {
1187 | const template = dedent`
1188 | Hello
1189 |
1190 | @if(
1191 | username
1192 | )
1193 | @if(age)
1194 | Hello
1195 | @end
1196 | @end
1197 | `
1198 |
1199 | const tags = {
1200 | if: class If {
1201 | static block = true
1202 | static seekable = true
1203 | static noNewLine = true
1204 | },
1205 | }
1206 |
1207 | const tokenizer = new Tokenizer(template, tags, { filename: 'eval.edge' })
1208 | tokenizer.parse()
1209 |
1210 | assert.isNull(tokenizer.tagStatement)
1211 | assert.deepEqual(tokenizer.tokens, [
1212 | {
1213 | type: 'raw',
1214 | filename: 'eval.edge',
1215 | value: 'Hello',
1216 | line: 1,
1217 | },
1218 | {
1219 | type: 'newline',
1220 | filename: 'eval.edge',
1221 | line: 1,
1222 | },
1223 | {
1224 | type: 'raw',
1225 | filename: 'eval.edge',
1226 | value: '',
1227 | line: 2,
1228 | },
1229 | {
1230 | type: TagTypes.TAG,
1231 | filename: 'eval.edge',
1232 | loc: {
1233 | start: {
1234 | line: 3,
1235 | col: 4,
1236 | },
1237 | end: {
1238 | line: 5,
1239 | col: 1,
1240 | },
1241 | },
1242 | properties: {
1243 | name: 'if',
1244 | jsArg: '\n username\n',
1245 | selfclosed: false,
1246 | },
1247 | children: [
1248 | {
1249 | type: TagTypes.TAG,
1250 | filename: 'eval.edge',
1251 | loc: {
1252 | start: {
1253 | line: 6,
1254 | col: 6,
1255 | },
1256 | end: {
1257 | line: 6,
1258 | col: 10,
1259 | },
1260 | },
1261 | properties: {
1262 | name: 'if',
1263 | jsArg: 'age',
1264 | selfclosed: false,
1265 | },
1266 | children: [
1267 | {
1268 | type: 'raw',
1269 | filename: 'eval.edge',
1270 | value: ' Hello',
1271 | line: 7,
1272 | },
1273 | ],
1274 | },
1275 | ],
1276 | },
1277 | ])
1278 | })
1279 |
1280 | test('remove newline after the endblock', ({ assert }) => {
1281 | const template = dedent`
1282 | @if(username)~
1283 | Hello
1284 | @end~
1285 | world
1286 | `
1287 |
1288 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
1289 | tokenizer.parse()
1290 |
1291 | assert.isNull(tokenizer.tagStatement)
1292 | assert.deepEqual(tokenizer.tokens, [
1293 | {
1294 | type: TagTypes.TAG,
1295 | filename: 'eval.edge',
1296 | loc: {
1297 | start: {
1298 | line: 1,
1299 | col: 4,
1300 | },
1301 | end: {
1302 | line: 1,
1303 | col: 13,
1304 | },
1305 | },
1306 | properties: {
1307 | name: 'if',
1308 | jsArg: 'username',
1309 | selfclosed: false,
1310 | },
1311 | children: [
1312 | {
1313 | type: 'raw',
1314 | filename: 'eval.edge',
1315 | value: ' Hello',
1316 | line: 2,
1317 | },
1318 | ],
1319 | },
1320 | {
1321 | type: 'raw',
1322 | filename: 'eval.edge',
1323 | value: 'world',
1324 | line: 4,
1325 | },
1326 | ])
1327 | })
1328 | })
1329 |
1330 | test.group('Tokenizer columns', () => {
1331 | test('track whitespaces before the opening parenthesis', ({ assert }) => {
1332 | const template = dedent`
1333 | Hello
1334 |
1335 | @if (username)
1336 | @end
1337 | `
1338 |
1339 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
1340 | tokenizer.parse()
1341 |
1342 | assert.isNull(tokenizer.tagStatement)
1343 | assert.deepEqual(tokenizer.tokens, [
1344 | {
1345 | type: 'raw',
1346 | filename: 'eval.edge',
1347 | value: 'Hello',
1348 | line: 1,
1349 | },
1350 | {
1351 | type: 'newline',
1352 | filename: 'eval.edge',
1353 | line: 1,
1354 | },
1355 | {
1356 | type: 'raw',
1357 | filename: 'eval.edge',
1358 | value: '',
1359 | line: 2,
1360 | },
1361 | {
1362 | filename: 'eval.edge',
1363 | type: TagTypes.TAG,
1364 | properties: {
1365 | name: 'if',
1366 | jsArg: 'username',
1367 | selfclosed: false,
1368 | },
1369 | loc: {
1370 | start: {
1371 | line: 3,
1372 | col: 6,
1373 | },
1374 | end: {
1375 | line: 3,
1376 | col: 15,
1377 | },
1378 | },
1379 | children: [],
1380 | },
1381 | ])
1382 | })
1383 |
1384 | test('do not track whitespaces before the closing parenthesis', ({ assert }) => {
1385 | const template = dedent`
1386 | Hello
1387 |
1388 | @if(username )
1389 | @end
1390 | `
1391 |
1392 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
1393 | tokenizer.parse()
1394 |
1395 | assert.isNull(tokenizer.tagStatement)
1396 | assert.deepEqual(tokenizer.tokens, [
1397 | {
1398 | type: 'raw',
1399 | filename: 'eval.edge',
1400 | value: 'Hello',
1401 | line: 1,
1402 | },
1403 | {
1404 | type: 'newline',
1405 | filename: 'eval.edge',
1406 | line: 1,
1407 | },
1408 | {
1409 | type: 'raw',
1410 | filename: 'eval.edge',
1411 | value: '',
1412 | line: 2,
1413 | },
1414 | {
1415 | filename: 'eval.edge',
1416 | type: TagTypes.TAG,
1417 | properties: {
1418 | name: 'if',
1419 | jsArg: 'username ',
1420 | selfclosed: false,
1421 | },
1422 | loc: {
1423 | start: {
1424 | line: 3,
1425 | col: 4,
1426 | },
1427 | end: {
1428 | line: 3,
1429 | col: 15,
1430 | },
1431 | },
1432 | children: [],
1433 | },
1434 | ])
1435 | })
1436 |
1437 | test('track whitespaces before the starting of tag', ({ assert }) => {
1438 | const template = dedent`
1439 | Hello
1440 |
1441 | @if(username)
1442 | @end
1443 | `
1444 |
1445 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
1446 | tokenizer.parse()
1447 |
1448 | assert.isNull(tokenizer.tagStatement)
1449 | assert.deepEqual(tokenizer.tokens, [
1450 | {
1451 | type: 'raw',
1452 | filename: 'eval.edge',
1453 | value: 'Hello',
1454 | line: 1,
1455 | },
1456 | {
1457 | type: 'newline',
1458 | filename: 'eval.edge',
1459 | line: 1,
1460 | },
1461 | {
1462 | type: 'raw',
1463 | filename: 'eval.edge',
1464 | value: '',
1465 | line: 2,
1466 | },
1467 | {
1468 | filename: 'eval.edge',
1469 | type: TagTypes.TAG,
1470 | properties: {
1471 | name: 'if',
1472 | jsArg: 'username',
1473 | selfclosed: false,
1474 | },
1475 | loc: {
1476 | start: {
1477 | line: 3,
1478 | col: 6,
1479 | },
1480 | end: {
1481 | line: 3,
1482 | col: 15,
1483 | },
1484 | },
1485 | children: [],
1486 | },
1487 | ])
1488 | })
1489 |
1490 | test('track columns for multiline expression', ({ assert }) => {
1491 | const template = dedent`
1492 | Hello
1493 |
1494 | @if(
1495 | username && age
1496 | )
1497 | @end
1498 | `
1499 |
1500 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
1501 | tokenizer.parse()
1502 |
1503 | assert.isNull(tokenizer.tagStatement)
1504 | assert.deepEqual(tokenizer.tokens, [
1505 | {
1506 | type: 'raw',
1507 | filename: 'eval.edge',
1508 | value: 'Hello',
1509 | line: 1,
1510 | },
1511 | {
1512 | type: 'newline',
1513 | filename: 'eval.edge',
1514 | line: 1,
1515 | },
1516 | {
1517 | type: 'raw',
1518 | filename: 'eval.edge',
1519 | value: '',
1520 | line: 2,
1521 | },
1522 | {
1523 | filename: 'eval.edge',
1524 | type: TagTypes.TAG,
1525 | properties: {
1526 | name: 'if',
1527 | jsArg: '\n username && age\n',
1528 | selfclosed: false,
1529 | },
1530 | loc: {
1531 | start: {
1532 | line: 3,
1533 | col: 4,
1534 | },
1535 | end: {
1536 | line: 5,
1537 | col: 1,
1538 | },
1539 | },
1540 | children: [],
1541 | },
1542 | ])
1543 | })
1544 |
1545 | test('track columns for mustache statement', ({ assert }) => {
1546 | const template = dedent`
1547 | Hello {{ username }}
1548 | `
1549 |
1550 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
1551 | tokenizer.parse()
1552 |
1553 | assert.isNull(tokenizer.tagStatement)
1554 | assert.deepEqual(tokenizer.tokens, [
1555 | {
1556 | type: 'raw',
1557 | filename: 'eval.edge',
1558 | value: 'Hello ',
1559 | line: 1,
1560 | },
1561 | {
1562 | filename: 'eval.edge',
1563 | type: MustacheTypes.MUSTACHE,
1564 | properties: {
1565 | jsArg: ' username ',
1566 | },
1567 | loc: {
1568 | start: {
1569 | line: 1,
1570 | col: 8,
1571 | },
1572 | end: {
1573 | line: 1,
1574 | col: 20,
1575 | },
1576 | },
1577 | },
1578 | ])
1579 | })
1580 |
1581 | test('track columns for multiple mustache statements', ({ assert }) => {
1582 | const template = dedent`
1583 | Hello {{ username }}, your age is {{ age }}
1584 | `
1585 |
1586 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
1587 | tokenizer.parse()
1588 |
1589 | assert.isNull(tokenizer.mustacheStatement)
1590 | assert.deepEqual(tokenizer.tokens, [
1591 | {
1592 | type: 'raw',
1593 | filename: 'eval.edge',
1594 | value: 'Hello ',
1595 | line: 1,
1596 | },
1597 | {
1598 | filename: 'eval.edge',
1599 | type: MustacheTypes.MUSTACHE,
1600 | properties: {
1601 | jsArg: ' username ',
1602 | },
1603 | loc: {
1604 | start: {
1605 | line: 1,
1606 | col: 8,
1607 | },
1608 | end: {
1609 | line: 1,
1610 | col: 20,
1611 | },
1612 | },
1613 | },
1614 | {
1615 | type: 'raw',
1616 | filename: 'eval.edge',
1617 | value: ', your age is ',
1618 | line: 1,
1619 | },
1620 | {
1621 | filename: 'eval.edge',
1622 | type: MustacheTypes.MUSTACHE,
1623 | properties: {
1624 | jsArg: ' age ',
1625 | },
1626 | loc: {
1627 | start: {
1628 | line: 1,
1629 | col: 36,
1630 | },
1631 | end: {
1632 | line: 1,
1633 | col: 43,
1634 | },
1635 | },
1636 | },
1637 | ])
1638 | })
1639 |
1640 | test('track columns for multiline mustache statements', ({ assert }) => {
1641 | const template = dedent`
1642 | Hello {{
1643 | username
1644 | }}, your age is {{ age }}
1645 | `
1646 |
1647 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
1648 | tokenizer.parse()
1649 |
1650 | assert.isNull(tokenizer.tagStatement)
1651 | assert.deepEqual(tokenizer.tokens, [
1652 | {
1653 | type: 'raw',
1654 | filename: 'eval.edge',
1655 | value: 'Hello ',
1656 | line: 1,
1657 | },
1658 | {
1659 | filename: 'eval.edge',
1660 | type: MustacheTypes.MUSTACHE,
1661 | properties: {
1662 | jsArg: '\n username\n',
1663 | },
1664 | loc: {
1665 | start: {
1666 | line: 1,
1667 | col: 8,
1668 | },
1669 | end: {
1670 | line: 3,
1671 | col: 2,
1672 | },
1673 | },
1674 | },
1675 | {
1676 | type: 'raw',
1677 | filename: 'eval.edge',
1678 | value: ', your age is ',
1679 | line: 3,
1680 | },
1681 | {
1682 | filename: 'eval.edge',
1683 | type: MustacheTypes.MUSTACHE,
1684 | properties: {
1685 | jsArg: ' age ',
1686 | },
1687 | loc: {
1688 | start: {
1689 | line: 3,
1690 | col: 18,
1691 | },
1692 | end: {
1693 | line: 3,
1694 | col: 25,
1695 | },
1696 | },
1697 | },
1698 | ])
1699 | })
1700 |
1701 | test('track columns for multiline saf emustache statements', ({ assert }) => {
1702 | const template = dedent`
1703 | Hello {{{
1704 | username
1705 | }}}, your age is {{ age }}
1706 | `
1707 |
1708 | const tokenizer = new Tokenizer(template, tagsDef, { filename: 'eval.edge' })
1709 | tokenizer.parse()
1710 |
1711 | assert.isNull(tokenizer.tagStatement)
1712 | assert.deepEqual(tokenizer.tokens, [
1713 | {
1714 | type: 'raw',
1715 | filename: 'eval.edge',
1716 | value: 'Hello ',
1717 | line: 1,
1718 | },
1719 | {
1720 | filename: 'eval.edge',
1721 | type: MustacheTypes.SMUSTACHE,
1722 | properties: {
1723 | jsArg: '\n username\n',
1724 | },
1725 | loc: {
1726 | start: {
1727 | line: 1,
1728 | col: 9,
1729 | },
1730 | end: {
1731 | line: 3,
1732 | col: 3,
1733 | },
1734 | },
1735 | },
1736 | {
1737 | type: 'raw',
1738 | filename: 'eval.edge',
1739 | value: ', your age is ',
1740 | line: 3,
1741 | },
1742 | {
1743 | filename: 'eval.edge',
1744 | type: MustacheTypes.MUSTACHE,
1745 | properties: {
1746 | jsArg: ' age ',
1747 | },
1748 | loc: {
1749 | start: {
1750 | line: 3,
1751 | col: 19,
1752 | },
1753 | end: {
1754 | line: 3,
1755 | col: 26,
1756 | },
1757 | },
1758 | },
1759 | ])
1760 | })
1761 | })
1762 |
--------------------------------------------------------------------------------
/tests/utils.spec.ts:
--------------------------------------------------------------------------------
1 | /*
2 | * edge-lexer
3 | *
4 | * (c) Edge
5 | *
6 | * For the full copyright and license information, please view the LICENSE
7 | * file that was distributed with this source code.
8 | */
9 |
10 | import { test } from '@japa/runner'
11 |
12 | import * as utils from '../src/utils.js'
13 | import { TagTypes, MustacheTypes } from '../src/enums.js'
14 |
15 | test.group('Utils | isTag', () => {
16 | test('return true when token type is a tag with a given name', ({ assert }) => {
17 | assert.isTrue(
18 | utils.isTag(
19 | {
20 | type: TagTypes.TAG,
21 | properties: {
22 | name: 'include',
23 | jsArg: '',
24 | selfclosed: true,
25 | },
26 | filename: 'eval.edge',
27 | loc: {
28 | start: { line: 1, col: 0 },
29 | end: { line: 1, col: 20 },
30 | },
31 | children: [],
32 | },
33 | 'include'
34 | )
35 | )
36 | })
37 |
38 | test('return false when token type is a tag with different name', ({ assert }) => {
39 | assert.isFalse(
40 | utils.isTag(
41 | {
42 | type: TagTypes.TAG,
43 | properties: {
44 | name: 'include',
45 | jsArg: '',
46 | selfclosed: true,
47 | },
48 | filename: 'eval.edge',
49 | loc: {
50 | start: { line: 1, col: 0 },
51 | end: { line: 1, col: 20 },
52 | },
53 | children: [],
54 | },
55 | 'layout'
56 | )
57 | )
58 | })
59 |
60 | test('return false when token type is not a tag', ({ assert }) => {
61 | assert.isFalse(
62 | utils.isTag(
63 | {
64 | type: 'raw',
65 | value: '',
66 | filename: 'eval.edge',
67 | line: 1,
68 | },
69 | 'layout'
70 | )
71 | )
72 | })
73 |
74 | test('return true when token type is an escaped tag', ({ assert }) => {
75 | assert.isTrue(
76 | utils.isEscapedTag({
77 | type: TagTypes.ETAG,
78 | properties: {
79 | name: 'include',
80 | jsArg: '',
81 | selfclosed: true,
82 | },
83 | filename: 'eval.edge',
84 | loc: {
85 | start: { line: 1, col: 0 },
86 | end: { line: 1, col: 20 },
87 | },
88 | children: [],
89 | })
90 | )
91 | })
92 |
93 | test('return true when token type is not an escaped tag', ({ assert }) => {
94 | assert.isFalse(
95 | utils.isEscapedTag({
96 | type: TagTypes.TAG,
97 | properties: {
98 | name: 'include',
99 | jsArg: '',
100 | selfclosed: true,
101 | },
102 | filename: 'eval.edge',
103 | loc: {
104 | start: { line: 1, col: 0 },
105 | end: { line: 1, col: 20 },
106 | },
107 | children: [],
108 | })
109 | )
110 | })
111 | })
112 |
113 | test.group('Utils | isMustache', () => {
114 | test('return true when token type is a mustache tag', ({ assert }) => {
115 | assert.isTrue(
116 | utils.isMustache({
117 | type: MustacheTypes.EMUSTACHE,
118 | properties: {
119 | jsArg: '',
120 | },
121 | filename: 'eval.edge',
122 | loc: {
123 | start: { line: 1, col: 0 },
124 | end: { line: 1, col: 20 },
125 | },
126 | })
127 | )
128 | })
129 |
130 | test('return false when token type is not a mustache tag', ({ assert }) => {
131 | assert.isFalse(
132 | utils.isMustache({
133 | type: 'raw',
134 | value: '',
135 | filename: 'eval.edge',
136 | line: 1,
137 | })
138 | )
139 | })
140 |
141 | test('return true when token type is a safe mustache tag', ({ assert }) => {
142 | assert.isTrue(
143 | utils.isSafeMustache({
144 | type: MustacheTypes.SMUSTACHE,
145 | properties: {
146 | jsArg: '',
147 | },
148 | filename: 'eval.edge',
149 | loc: {
150 | start: { line: 1, col: 0 },
151 | end: { line: 1, col: 20 },
152 | },
153 | })
154 | )
155 | })
156 |
157 | test('return false when token type is not a safe mustache tag', ({ assert }) => {
158 | assert.isFalse(
159 | utils.isSafeMustache({
160 | type: MustacheTypes.MUSTACHE,
161 | properties: {
162 | jsArg: '',
163 | },
164 | filename: 'eval.edge',
165 | loc: {
166 | start: { line: 1, col: 0 },
167 | end: { line: 1, col: 20 },
168 | },
169 | })
170 | )
171 | })
172 |
173 | test('return true when token type is an escaped mustache tag', ({ assert }) => {
174 | assert.isTrue(
175 | utils.isEscapedMustache({
176 | type: MustacheTypes.EMUSTACHE,
177 | properties: {
178 | jsArg: '',
179 | },
180 | filename: 'eval.edge',
181 | loc: {
182 | start: { line: 1, col: 0 },
183 | end: { line: 1, col: 20 },
184 | },
185 | })
186 | )
187 | })
188 |
189 | test('return false when token type is not an escaped mustache tag', ({ assert }) => {
190 | assert.isFalse(
191 | utils.isEscapedMustache({
192 | type: MustacheTypes.MUSTACHE,
193 | properties: {
194 | jsArg: '',
195 | },
196 | filename: 'eval.edge',
197 | loc: {
198 | start: { line: 1, col: 0 },
199 | end: { line: 1, col: 20 },
200 | },
201 | })
202 | )
203 | })
204 | })
205 |
206 | test.group('Utils | getLineAndColumn', () => {
207 | test('return line and column for a tag token', ({ assert }) => {
208 | assert.deepEqual(
209 | utils.getLineAndColumn({
210 | type: TagTypes.TAG,
211 | properties: {
212 | name: 'include',
213 | jsArg: '',
214 | selfclosed: true,
215 | },
216 | filename: 'eval.edge',
217 | loc: {
218 | start: { line: 1, col: 0 },
219 | end: { line: 1, col: 20 },
220 | },
221 | children: [],
222 | }),
223 | [1, 0]
224 | )
225 | })
226 |
227 | test('return line and column for a mustache token', ({ assert }) => {
228 | assert.deepEqual(
229 | utils.getLineAndColumn({
230 | type: MustacheTypes.EMUSTACHE,
231 | properties: {
232 | jsArg: '',
233 | },
234 | filename: 'eval.edge',
235 | loc: {
236 | start: { line: 1, col: 5 },
237 | end: { line: 1, col: 20 },
238 | },
239 | }),
240 | [1, 5]
241 | )
242 | })
243 |
244 | test('return line and column for a raw token', ({ assert }) => {
245 | assert.deepEqual(
246 | utils.getLineAndColumn({
247 | type: 'raw',
248 | value: '',
249 | filename: 'eval.edge',
250 | line: 1,
251 | }),
252 | [1, 0]
253 | )
254 | })
255 |
256 | test('return line and column for a newline token', ({ assert }) => {
257 | assert.deepEqual(
258 | utils.getLineAndColumn({
259 | type: 'newline',
260 | filename: 'eval.edge',
261 | line: 1,
262 | }),
263 | [1, 0]
264 | )
265 | })
266 | })
267 |
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "@adonisjs/tsconfig/tsconfig.package.json",
3 | "compilerOptions": {
4 | "rootDir": "./",
5 | "outDir": "./build"
6 | }
7 | }
8 |
--------------------------------------------------------------------------------