├── .github └── workflows │ └── dispatch.yml ├── .gitignore ├── .npmignore ├── CHANGELOG.md ├── LICENSE ├── README.md ├── mode ├── README.md ├── apl.d.ts ├── apl.js ├── asciiarmor.d.ts ├── asciiarmor.js ├── asn1.d.ts ├── asn1.js ├── asterisk.d.ts ├── asterisk.js ├── brainfuck.d.ts ├── brainfuck.js ├── clike.d.ts ├── clike.js ├── clojure.d.ts ├── clojure.js ├── cmake.d.ts ├── cmake.js ├── cobol.d.ts ├── cobol.js ├── coffeescript.d.ts ├── coffeescript.js ├── commonlisp.d.ts ├── commonlisp.js ├── crystal.d.ts ├── crystal.js ├── css.d.ts ├── css.js ├── cypher.d.ts ├── cypher.js ├── d.d.ts ├── d.js ├── diff.d.ts ├── diff.js ├── dockerfile.d.ts ├── dockerfile.js ├── dtd.d.ts ├── dtd.js ├── dylan.d.ts ├── dylan.js ├── ebnf.d.ts ├── ebnf.js ├── ecl.d.ts ├── ecl.js ├── eiffel.d.ts ├── eiffel.js ├── elm.d.ts ├── elm.js ├── erlang.d.ts ├── erlang.js ├── factor.d.ts ├── factor.js ├── fcl.d.ts ├── fcl.js ├── forth.d.ts ├── forth.js ├── fortran.d.ts ├── fortran.js ├── gas.d.ts ├── gas.js ├── gherkin.d.ts ├── gherkin.js ├── go.d.ts ├── go.js ├── groovy.d.ts ├── groovy.js ├── haskell.d.ts ├── haskell.js ├── haxe.d.ts ├── haxe.js ├── http.d.ts ├── http.js ├── idl.d.ts ├── idl.js ├── javascript.d.ts ├── javascript.js ├── jinja2.d.ts ├── jinja2.js ├── julia.d.ts ├── julia.js ├── livescript.d.ts ├── livescript.js ├── lua.d.ts ├── lua.js ├── mathematica.d.ts ├── mathematica.js ├── mbox.d.ts ├── mbox.js ├── mirc.d.ts ├── mirc.js ├── mllike.d.ts ├── mllike.js ├── modelica.d.ts ├── modelica.js ├── mscgen.d.ts ├── mscgen.js ├── mumps.d.ts ├── mumps.js ├── nginx.d.ts ├── nginx.js ├── nsis.d.ts ├── nsis.js ├── ntriples.d.ts ├── ntriples.js ├── octave.d.ts ├── octave.js ├── oz.d.ts ├── oz.js ├── pascal.d.ts ├── pascal.js ├── pegjs.d.ts ├── pegjs.js ├── perl.d.ts ├── perl.js ├── pig.d.ts ├── pig.js ├── powershell.d.ts ├── powershell.js ├── properties.d.ts ├── properties.js ├── protobuf.d.ts ├── protobuf.js ├── pug.d.ts ├── pug.js ├── puppet.d.ts ├── puppet.js ├── python.d.ts ├── python.js ├── q.d.ts ├── q.js ├── r.d.ts ├── r.js ├── rpm.d.ts ├── rpm.js ├── ruby.d.ts ├── ruby.js ├── rust.d.ts ├── rust.js ├── sas.d.ts ├── sas.js ├── sass.d.ts ├── sass.js ├── scheme.d.ts ├── scheme.js ├── shell.d.ts ├── shell.js ├── sieve.d.ts ├── sieve.js ├── simple-mode.d.ts ├── simple-mode.js ├── smalltalk.d.ts ├── smalltalk.js ├── solr.d.ts ├── solr.js ├── sparql.d.ts ├── sparql.js ├── spreadsheet.d.ts ├── spreadsheet.js ├── sql.d.ts ├── sql.js ├── stex.d.ts ├── stex.js ├── stylus.d.ts ├── stylus.js ├── swift.d.ts ├── swift.js ├── tcl.d.ts ├── tcl.js ├── textile.d.ts ├── textile.js ├── tiddlywiki.d.ts ├── tiddlywiki.js ├── tiki.d.ts ├── tiki.js ├── toml.d.ts ├── toml.js ├── troff.d.ts ├── troff.js ├── ttcn-cfg.d.ts ├── ttcn-cfg.js ├── ttcn.d.ts ├── ttcn.js ├── turtle.d.ts ├── turtle.js ├── vb.d.ts ├── vb.js ├── vbscript.d.ts ├── vbscript.js ├── velocity.d.ts ├── velocity.js ├── verilog.d.ts ├── verilog.js ├── vhdl.d.ts ├── vhdl.js ├── wast.d.ts ├── wast.js ├── webidl.d.ts ├── webidl.js ├── xml.d.ts ├── xml.js ├── xquery.d.ts ├── xquery.js ├── yacas.d.ts ├── yacas.js ├── yaml.d.ts ├── yaml.js ├── z80.d.ts └── z80.js ├── package.json └── rollup.config.js /.github/workflows/dispatch.yml: -------------------------------------------------------------------------------- 1 | name: Trigger CI 2 | on: push 3 | 4 | jobs: 5 | build: 6 | name: Dispatch to main repo 7 | runs-on: ubuntu-latest 8 | steps: 9 | - name: Emit repository_dispatch 10 | uses: mvasigh/dispatch-action@main 11 | with: 12 | # You should create a personal access token and store it in your repository 13 | token: ${{ secrets.DISPATCH_AUTH }} 14 | repo: dev 15 | owner: codemirror 16 | event_type: push 17 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /node_modules 2 | package-lock.json 3 | /mode/*.cjs 4 | .tern-* 5 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | /src 2 | /test 3 | /node_modules 4 | .tern-* 5 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## 6.5.1 (2025-04-22) 2 | 3 | ### Bug fixes 4 | 5 | Fix a crash in the Stylus mode's indentation logic. 6 | 7 | ## 6.5.0 (2025-03-13) 8 | 9 | ### New features 10 | 11 | Simple modes can now include a `mergeTokens` option in their language data to disable token merging. 12 | 13 | ## 6.4.3 (2025-02-12) 14 | 15 | ### Bug fixes 16 | 17 | Make Pascal keyword matching case-insensitive. 18 | 19 | Fix a crash in simple-mode.js when using the `dontIndentStates` feature. 20 | 21 | Fix a bug in tokenizing multiline strings in xQuery. 22 | 23 | ## 6.4.2 (2024-11-05) 24 | 25 | ### Bug fixes 26 | 27 | Fix an issue in the Groovy mode where interpolated variable style would continue after whitespace. 28 | 29 | Add support for underscore separators in numbers in the Dart mode. 30 | 31 | ## 6.4.1 (2024-08-15) 32 | 33 | ### Bug fixes 34 | 35 | Stop treating closing brackets as brackets in the Common Lisp mode. 36 | 37 | Fix a bug where the Stylus mode would crash when queried for indentation. 38 | 39 | ## 6.4.0 (2024-04-05) 40 | 41 | ### Bug fixes 42 | 43 | Only match Solr operator words when they are upper-case. 44 | 45 | Fix an infinite loop when tokenizing heredoc strings in the Crystal mode. 46 | 47 | ### New features 48 | 49 | Add the old Pug mode. 50 | 51 | ## 6.3.3 (2023-07-20) 52 | 53 | ### Bug fixes 54 | 55 | In Shell mode, don't allow spaces in heredoc tokens. 56 | 57 | ## 6.3.2 (2023-03-20) 58 | 59 | ### Bug fixes 60 | 61 | Fix tokenizing of character literals in the Scala mode. 62 | 63 | ## 6.3.1 (2022-11-24) 64 | 65 | ### Bug fixes 66 | 67 | In JavaScript, properly parse keywords like `static` when in front of a private property. 68 | 69 | ## 6.3.0 (2022-11-18) 70 | 71 | ### New features 72 | 73 | Add the old PegJS mode. 74 | 75 | ## 6.2.0 (2022-10-24) 76 | 77 | ### Bug fixes 78 | 79 | Include type declarations for mode/simple-mode.js. 80 | 81 | ### New features 82 | 83 | Include a name for each mode in the stream parser objects. 84 | 85 | ## 6.1.0 (2022-06-17) 86 | 87 | ### Bug fixes 88 | 89 | Add structured concurrency keywords to the Swift mode. Update readme to follow interface changes 90 | 91 | ### New features 92 | 93 | Adds the Sass mode from CodeMirror 5. 94 | 95 | ## 6.0.0 (2022-06-08) 96 | 97 | ### Bug fixes 98 | 99 | Add line comment syntax metadata to the Toml mode. 100 | 101 | ## 0.20.0 (2022-04-20) 102 | 103 | ### Breaking changes 104 | 105 | Update dependencies to 0.20.0 106 | 107 | ## 0.19.1 (2022-03-03) 108 | 109 | ### Bug fixes 110 | 111 | Fix an issue causing the Lua mode to indent everything one unit too far. 112 | 113 | Fix a bug in the Swift mode when indenting an empty block after exploding with `insertNewlineAndIndent`. 114 | 115 | ## 0.19.0 (2021-08-11) 116 | 117 | ### Breaking changes 118 | 119 | Update dependencies to 0.19.0 120 | 121 | ## 0.18.1 (2021-06-24) 122 | 123 | ### Bug fixes 124 | 125 | Fix internal imports to include the extension in the path. 126 | 127 | ## 0.18.0 (2021-03-03) 128 | 129 | ### Breaking changes 130 | 131 | Update dependencies to 0.18. 132 | 133 | ## 0.17.1 (2021-01-06) 134 | 135 | ### New features 136 | 137 | The package now also exports CommonJS modules. 138 | 139 | The package now also exports a CommonJS module. 140 | 141 | ## 0.17.0 (2020-12-29) 142 | 143 | ### Breaking changes 144 | 145 | First numbered release. 146 | 147 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (C) 2018-2021 by Marijn Haverbeke and others 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /mode/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # @codemirror/legacy-modes [![NPM version](https://img.shields.io/npm/v/@codemirror/legacy-modes.svg)](https://www.npmjs.org/package/@codemirror/legacy-modes) 4 | 5 | [ [**WEBSITE**](https://codemirror.net/) | [**ISSUES**](https://github.com/codemirror/dev/issues) | [**FORUM**](https://discuss.codemirror.net/c/next/) | [**CHANGELOG**](https://github.com/codemirror/legacy-modes/blob/main/CHANGELOG.md) ] 6 | 7 | This package implements a collection of ported [stream 8 | language](https://codemirror.net/docs/ref#language.StreamParser) modes for 9 | the [CodeMirror](https://codemirror.net/) code editor. Each mode is 10 | available as a separate script file, under 11 | `"@codemirror/legacy-modes/mode/[name]"`, and exports the values 12 | listed below. 13 | 14 | The [project page](https://codemirror.net/) has more information, a 15 | number of [examples](https://codemirror.net/examples/) and the 16 | [documentation](https://codemirror.net/docs/). 17 | 18 | This code is released under an 19 | [MIT license](https://github.com/codemirror/legacy-modes/tree/main/LICENSE). 20 | 21 | We aim to be an inclusive, welcoming community. To make that explicit, 22 | we have a [code of 23 | conduct](http://contributor-covenant.org/version/1/1/0/) that applies 24 | to communication around the project. 25 | 26 | ## Usage 27 | 28 | Using modes from this package works like this: 29 | 30 | - Install this package and the 31 | [`@codemirror/language`](https://codemirror.net/docs/ref/#language) 32 | package. 33 | 34 | - Find the `StreamParser` instance you need in the reference below. 35 | 36 | - Add `StreamLanguage.define(theParser)` to your editor's 37 | configuration. 38 | 39 | For example, to load the Lua mode, you'd do something like... 40 | 41 | ```javascript 42 | import {StreamLanguage} from "@codemirror/language" 43 | import {lua} from "@codemirror/legacy-modes/mode/lua" 44 | 45 | import {EditorView, basicSetup} from "codemirror" 46 | 47 | let view = new EditorView({ 48 | extensions: [basicSetup, StreamLanguage.define(lua)] 49 | }) 50 | ``` 51 | 52 | ## API Reference 53 | -------------------------------------------------------------------------------- /mode/apl.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const apl: StreamParser 3 | -------------------------------------------------------------------------------- /mode/apl.js: -------------------------------------------------------------------------------- 1 | var builtInFuncs = { 2 | "+": ["conjugate", "add"], 3 | "−": ["negate", "subtract"], 4 | "×": ["signOf", "multiply"], 5 | "÷": ["reciprocal", "divide"], 6 | "⌈": ["ceiling", "greaterOf"], 7 | "⌊": ["floor", "lesserOf"], 8 | "∣": ["absolute", "residue"], 9 | "⍳": ["indexGenerate", "indexOf"], 10 | "?": ["roll", "deal"], 11 | "⋆": ["exponentiate", "toThePowerOf"], 12 | "⍟": ["naturalLog", "logToTheBase"], 13 | "○": ["piTimes", "circularFuncs"], 14 | "!": ["factorial", "binomial"], 15 | "⌹": ["matrixInverse", "matrixDivide"], 16 | "<": [null, "lessThan"], 17 | "≤": [null, "lessThanOrEqual"], 18 | "=": [null, "equals"], 19 | ">": [null, "greaterThan"], 20 | "≥": [null, "greaterThanOrEqual"], 21 | "≠": [null, "notEqual"], 22 | "≡": ["depth", "match"], 23 | "≢": [null, "notMatch"], 24 | "∈": ["enlist", "membership"], 25 | "⍷": [null, "find"], 26 | "∪": ["unique", "union"], 27 | "∩": [null, "intersection"], 28 | "∼": ["not", "without"], 29 | "∨": [null, "or"], 30 | "∧": [null, "and"], 31 | "⍱": [null, "nor"], 32 | "⍲": [null, "nand"], 33 | "⍴": ["shapeOf", "reshape"], 34 | ",": ["ravel", "catenate"], 35 | "⍪": [null, "firstAxisCatenate"], 36 | "⌽": ["reverse", "rotate"], 37 | "⊖": ["axis1Reverse", "axis1Rotate"], 38 | "⍉": ["transpose", null], 39 | "↑": ["first", "take"], 40 | "↓": [null, "drop"], 41 | "⊂": ["enclose", "partitionWithAxis"], 42 | "⊃": ["diclose", "pick"], 43 | "⌷": [null, "index"], 44 | "⍋": ["gradeUp", null], 45 | "⍒": ["gradeDown", null], 46 | "⊤": ["encode", null], 47 | "⊥": ["decode", null], 48 | "⍕": ["format", "formatByExample"], 49 | "⍎": ["execute", null], 50 | "⊣": ["stop", "left"], 51 | "⊢": ["pass", "right"] 52 | }; 53 | 54 | var isOperator = /[\.\/⌿⍀¨⍣]/; 55 | var isNiladic = /⍬/; 56 | var isFunction = /[\+−×÷⌈⌊∣⍳\?⋆⍟○!⌹<≤=>≥≠≡≢∈⍷∪∩∼∨∧⍱⍲⍴,⍪⌽⊖⍉↑↓⊂⊃⌷⍋⍒⊤⊥⍕⍎⊣⊢]/; 57 | var isArrow = /←/; 58 | var isComment = /[⍝#].*$/; 59 | 60 | var stringEater = function(type) { 61 | var prev; 62 | prev = false; 63 | return function(c) { 64 | prev = c; 65 | if (c === type) { 66 | return prev === "\\"; 67 | } 68 | return true; 69 | }; 70 | }; 71 | 72 | export const apl = { 73 | name: "apl", 74 | startState: function() { 75 | return { 76 | prev: false, 77 | func: false, 78 | op: false, 79 | string: false, 80 | escape: false 81 | }; 82 | }, 83 | token: function(stream, state) { 84 | var ch; 85 | if (stream.eatSpace()) { 86 | return null; 87 | } 88 | ch = stream.next(); 89 | if (ch === '"' || ch === "'") { 90 | stream.eatWhile(stringEater(ch)); 91 | stream.next(); 92 | state.prev = true; 93 | return "string"; 94 | } 95 | if (/[\[{\(]/.test(ch)) { 96 | state.prev = false; 97 | return null; 98 | } 99 | if (/[\]}\)]/.test(ch)) { 100 | state.prev = true; 101 | return null; 102 | } 103 | if (isNiladic.test(ch)) { 104 | state.prev = false; 105 | return "atom"; 106 | } 107 | if (/[¯\d]/.test(ch)) { 108 | if (state.func) { 109 | state.func = false; 110 | state.prev = false; 111 | } else { 112 | state.prev = true; 113 | } 114 | stream.eatWhile(/[\w\.]/); 115 | return "number"; 116 | } 117 | if (isOperator.test(ch)) { 118 | return "operator" 119 | } 120 | if (isArrow.test(ch)) { 121 | return "operator"; 122 | } 123 | if (isFunction.test(ch)) { 124 | state.func = true; 125 | state.prev = false; 126 | return builtInFuncs[ch] ? "variableName.function.standard" : "variableName.function" 127 | } 128 | if (isComment.test(ch)) { 129 | stream.skipToEnd(); 130 | return "comment"; 131 | } 132 | if (ch === "∘" && stream.peek() === ".") { 133 | stream.next(); 134 | return "variableName.function"; 135 | } 136 | stream.eatWhile(/[\w\$_]/); 137 | state.prev = true; 138 | return "keyword"; 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /mode/asciiarmor.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const asciiArmor: StreamParser 3 | -------------------------------------------------------------------------------- /mode/asciiarmor.js: -------------------------------------------------------------------------------- 1 | function errorIfNotEmpty(stream) { 2 | var nonWS = stream.match(/^\s*\S/); 3 | stream.skipToEnd(); 4 | return nonWS ? "error" : null; 5 | } 6 | 7 | export const asciiArmor = { 8 | name: "asciiarmor", 9 | token: function(stream, state) { 10 | var m; 11 | if (state.state == "top") { 12 | if (stream.sol() && (m = stream.match(/^-----BEGIN (.*)?-----\s*$/))) { 13 | state.state = "headers"; 14 | state.type = m[1]; 15 | return "tag"; 16 | } 17 | return errorIfNotEmpty(stream); 18 | } else if (state.state == "headers") { 19 | if (stream.sol() && stream.match(/^\w+:/)) { 20 | state.state = "header"; 21 | return "atom"; 22 | } else { 23 | var result = errorIfNotEmpty(stream); 24 | if (result) state.state = "body"; 25 | return result; 26 | } 27 | } else if (state.state == "header") { 28 | stream.skipToEnd(); 29 | state.state = "headers"; 30 | return "string"; 31 | } else if (state.state == "body") { 32 | if (stream.sol() && (m = stream.match(/^-----END (.*)?-----\s*$/))) { 33 | if (m[1] != state.type) return "error"; 34 | state.state = "end"; 35 | return "tag"; 36 | } else { 37 | if (stream.eatWhile(/[A-Za-z0-9+\/=]/)) { 38 | return null; 39 | } else { 40 | stream.next(); 41 | return "error"; 42 | } 43 | } 44 | } else if (state.state == "end") { 45 | return errorIfNotEmpty(stream); 46 | } 47 | }, 48 | blankLine: function(state) { 49 | if (state.state == "headers") state.state = "body"; 50 | }, 51 | startState: function() { 52 | return {state: "top", type: null}; 53 | } 54 | }; 55 | -------------------------------------------------------------------------------- /mode/asn1.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare function asn1(conf: { 3 | keywords?: {[word: string]: any}, 4 | cmipVerbs?: {[word: string]: any}, 5 | compareTypes?: {[word: string]: any}, 6 | status?: {[word: string]: any}, 7 | tags?: {[word: string]: any}, 8 | storage?: {[word: string]: any}, 9 | modifier?: {[word: string]: any}, 10 | accessTypes?: {[word: string]: any}, 11 | multiLineStrings?: boolean 12 | }): StreamParser 13 | -------------------------------------------------------------------------------- /mode/asterisk.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const asterisk: StreamParser 3 | -------------------------------------------------------------------------------- /mode/brainfuck.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const brainfuck: StreamParser 3 | -------------------------------------------------------------------------------- /mode/brainfuck.js: -------------------------------------------------------------------------------- 1 | var reserve = "><+-.,[]".split(""); 2 | /* 3 | comments can be either: 4 | placed behind lines 5 | 6 | +++ this is a comment 7 | 8 | where reserved characters cannot be used 9 | or in a loop 10 | [ 11 | this is ok to use [ ] and stuff 12 | ] 13 | or preceded by # 14 | */ 15 | export const brainfuck = { 16 | name: "brainfuck", 17 | startState: function() { 18 | return { 19 | commentLine: false, 20 | left: 0, 21 | right: 0, 22 | commentLoop: false 23 | } 24 | }, 25 | token: function(stream, state) { 26 | if (stream.eatSpace()) return null 27 | if(stream.sol()){ 28 | state.commentLine = false; 29 | } 30 | var ch = stream.next().toString(); 31 | if(reserve.indexOf(ch) !== -1){ 32 | if(state.commentLine === true){ 33 | if(stream.eol()){ 34 | state.commentLine = false; 35 | } 36 | return "comment"; 37 | } 38 | if(ch === "]" || ch === "["){ 39 | if(ch === "["){ 40 | state.left++; 41 | } 42 | else{ 43 | state.right++; 44 | } 45 | return "bracket"; 46 | } 47 | else if(ch === "+" || ch === "-"){ 48 | return "keyword"; 49 | } 50 | else if(ch === "<" || ch === ">"){ 51 | return "atom"; 52 | } 53 | else if(ch === "." || ch === ","){ 54 | return "def"; 55 | } 56 | } 57 | else{ 58 | state.commentLine = true; 59 | if(stream.eol()){ 60 | state.commentLine = false; 61 | } 62 | return "comment"; 63 | } 64 | if(stream.eol()){ 65 | state.commentLine = false; 66 | } 67 | } 68 | }; 69 | -------------------------------------------------------------------------------- /mode/clike.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare function clike(conf: { 3 | name: string, 4 | statementIndentUnit?: number, 5 | dontAlignCalls?: boolean, 6 | keywords?: {[word: string]: any}, 7 | types?: {[word: string]: any}, 8 | builtin?: {[word: string]: any}, 9 | blockKeywords?: {[word: string]: any}, 10 | atoms?: {[word: string]: any}, 11 | hooks?: {[hook: string]: any}, 12 | multiLineStrings?: boolean, 13 | indentStatements?: boolean, 14 | indentSwitch?: boolean, 15 | namespaceSeparator?: string, 16 | isPunctuationChar?: RegExp, 17 | numberStart?: RegExp, 18 | number?: RegExp, 19 | isOperatorChar?: RegExp, 20 | isIdentifierChar?: RegExp, 21 | isReservedIdentifier?: (id: string) => boolean 22 | }): StreamParser 23 | export declare const c: StreamParser 24 | export declare const cpp: StreamParser 25 | export declare const java: StreamParser 26 | export declare const csharp: StreamParser 27 | export declare const scala: StreamParser 28 | export declare const kotlin: StreamParser 29 | export declare const shader: StreamParser 30 | export declare const nesC: StreamParser 31 | export declare const objectiveC: StreamParser 32 | export declare const objectiveCpp: StreamParser 33 | export declare const squirrel: StreamParser 34 | export declare const ceylon: StreamParser 35 | export declare const dart: StreamParser 36 | -------------------------------------------------------------------------------- /mode/clojure.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const clojure: StreamParser 3 | -------------------------------------------------------------------------------- /mode/cmake.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const cmake: StreamParser 3 | -------------------------------------------------------------------------------- /mode/cmake.js: -------------------------------------------------------------------------------- 1 | var variable_regex = /({)?[a-zA-Z0-9_]+(})?/; 2 | 3 | function tokenString(stream, state) { 4 | var current, prev, found_var = false; 5 | while (!stream.eol() && (current = stream.next()) != state.pending) { 6 | if (current === '$' && prev != '\\' && state.pending == '"') { 7 | found_var = true; 8 | break; 9 | } 10 | prev = current; 11 | } 12 | if (found_var) { 13 | stream.backUp(1); 14 | } 15 | if (current == state.pending) { 16 | state.continueString = false; 17 | } else { 18 | state.continueString = true; 19 | } 20 | return "string"; 21 | } 22 | 23 | function tokenize(stream, state) { 24 | var ch = stream.next(); 25 | 26 | // Have we found a variable? 27 | if (ch === '$') { 28 | if (stream.match(variable_regex)) { 29 | return 'variableName.special'; 30 | } 31 | return 'variable'; 32 | } 33 | // Should we still be looking for the end of a string? 34 | if (state.continueString) { 35 | // If so, go through the loop again 36 | stream.backUp(1); 37 | return tokenString(stream, state); 38 | } 39 | // Do we just have a function on our hands? 40 | // In 'cmake_minimum_required (VERSION 2.8.8)', 'cmake_minimum_required' is matched 41 | if (stream.match(/(\s+)?\w+\(/) || stream.match(/(\s+)?\w+\ \(/)) { 42 | stream.backUp(1); 43 | return 'def'; 44 | } 45 | if (ch == "#") { 46 | stream.skipToEnd(); 47 | return "comment"; 48 | } 49 | // Have we found a string? 50 | if (ch == "'" || ch == '"') { 51 | // Store the type (single or double) 52 | state.pending = ch; 53 | // Perform the looping function to find the end 54 | return tokenString(stream, state); 55 | } 56 | if (ch == '(' || ch == ')') { 57 | return 'bracket'; 58 | } 59 | if (ch.match(/[0-9]/)) { 60 | return 'number'; 61 | } 62 | stream.eatWhile(/[\w-]/); 63 | return null; 64 | } 65 | export const cmake = { 66 | name: "cmake", 67 | startState: function () { 68 | var state = {}; 69 | state.inDefinition = false; 70 | state.inInclude = false; 71 | state.continueString = false; 72 | state.pending = false; 73 | return state; 74 | }, 75 | token: function (stream, state) { 76 | if (stream.eatSpace()) return null; 77 | return tokenize(stream, state); 78 | } 79 | }; 80 | 81 | -------------------------------------------------------------------------------- /mode/cobol.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const cobol: StreamParser 3 | -------------------------------------------------------------------------------- /mode/coffeescript.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const coffeeScript: StreamParser 3 | -------------------------------------------------------------------------------- /mode/commonlisp.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const commonLisp: StreamParser 3 | -------------------------------------------------------------------------------- /mode/commonlisp.js: -------------------------------------------------------------------------------- 1 | var specialForm = /^(block|let*|return-from|catch|load-time-value|setq|eval-when|locally|symbol-macrolet|flet|macrolet|tagbody|function|multiple-value-call|the|go|multiple-value-prog1|throw|if|progn|unwind-protect|labels|progv|let|quote)$/; 2 | var assumeBody = /^with|^def|^do|^prog|case$|^cond$|bind$|when$|unless$/; 3 | var numLiteral = /^(?:[+\-]?(?:\d+|\d*\.\d+)(?:[efd][+\-]?\d+)?|[+\-]?\d+(?:\/[+\-]?\d+)?|#b[+\-]?[01]+|#o[+\-]?[0-7]+|#x[+\-]?[\da-f]+)/; 4 | var symbol = /[^\s'`,@()\[\]";]/; 5 | var type; 6 | 7 | function readSym(stream) { 8 | var ch; 9 | while (ch = stream.next()) { 10 | if (ch == "\\") stream.next(); 11 | else if (!symbol.test(ch)) { stream.backUp(1); break; } 12 | } 13 | return stream.current(); 14 | } 15 | 16 | function base(stream, state) { 17 | if (stream.eatSpace()) {type = "ws"; return null;} 18 | if (stream.match(numLiteral)) return "number"; 19 | var ch = stream.next(); 20 | if (ch == "\\") ch = stream.next(); 21 | 22 | if (ch == '"') return (state.tokenize = inString)(stream, state); 23 | else if (ch == "(") { type = "open"; return "bracket"; } 24 | else if (ch == ")") { type = "close"; return "bracket"; } 25 | else if (ch == ";") { stream.skipToEnd(); type = "ws"; return "comment"; } 26 | else if (/['`,@]/.test(ch)) return null; 27 | else if (ch == "|") { 28 | if (stream.skipTo("|")) { stream.next(); return "variableName"; } 29 | else { stream.skipToEnd(); return "error"; } 30 | } else if (ch == "#") { 31 | var ch = stream.next(); 32 | if (ch == "(") { type = "open"; return "bracket"; } 33 | else if (/[+\-=\.']/.test(ch)) return null; 34 | else if (/\d/.test(ch) && stream.match(/^\d*#/)) return null; 35 | else if (ch == "|") return (state.tokenize = inComment)(stream, state); 36 | else if (ch == ":") { readSym(stream); return "meta"; } 37 | else if (ch == "\\") { stream.next(); readSym(stream); return "string.special" } 38 | else return "error"; 39 | } else { 40 | var name = readSym(stream); 41 | if (name == ".") return null; 42 | type = "symbol"; 43 | if (name == "nil" || name == "t" || name.charAt(0) == ":") return "atom"; 44 | if (state.lastType == "open" && (specialForm.test(name) || assumeBody.test(name))) return "keyword"; 45 | if (name.charAt(0) == "&") return "variableName.special"; 46 | return "variableName"; 47 | } 48 | } 49 | 50 | function inString(stream, state) { 51 | var escaped = false, next; 52 | while (next = stream.next()) { 53 | if (next == '"' && !escaped) { state.tokenize = base; break; } 54 | escaped = !escaped && next == "\\"; 55 | } 56 | return "string"; 57 | } 58 | 59 | function inComment(stream, state) { 60 | var next, last; 61 | while (next = stream.next()) { 62 | if (next == "#" && last == "|") { state.tokenize = base; break; } 63 | last = next; 64 | } 65 | type = "ws"; 66 | return "comment"; 67 | } 68 | 69 | export const commonLisp = { 70 | name: "commonlisp", 71 | startState: function () { 72 | return {ctx: {prev: null, start: 0, indentTo: 0}, lastType: null, tokenize: base}; 73 | }, 74 | 75 | token: function (stream, state) { 76 | if (stream.sol() && typeof state.ctx.indentTo != "number") 77 | state.ctx.indentTo = state.ctx.start + 1; 78 | 79 | type = null; 80 | var style = state.tokenize(stream, state); 81 | if (type != "ws") { 82 | if (state.ctx.indentTo == null) { 83 | if (type == "symbol" && assumeBody.test(stream.current())) 84 | state.ctx.indentTo = state.ctx.start + stream.indentUnit; 85 | else 86 | state.ctx.indentTo = "next"; 87 | } else if (state.ctx.indentTo == "next") { 88 | state.ctx.indentTo = stream.column(); 89 | } 90 | state.lastType = type; 91 | } 92 | if (type == "open") state.ctx = {prev: state.ctx, start: stream.column(), indentTo: null}; 93 | else if (type == "close") state.ctx = state.ctx.prev || state.ctx; 94 | return style; 95 | }, 96 | 97 | indent: function (state) { 98 | var i = state.ctx.indentTo; 99 | return typeof i == "number" ? i : state.ctx.start + 1; 100 | }, 101 | 102 | languageData: { 103 | commentTokens: {line: ";;", block: {open: "#|", close: "|#"}}, 104 | closeBrackets: {brackets: ["(", "[", "{", '"']} 105 | } 106 | }; 107 | 108 | -------------------------------------------------------------------------------- /mode/crystal.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const crystal: StreamParser 3 | -------------------------------------------------------------------------------- /mode/css.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const css: StreamParser 3 | export declare const sCSS: StreamParser 4 | export declare const less: StreamParser 5 | export declare const gss: StreamParser 6 | -------------------------------------------------------------------------------- /mode/cypher.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const cypher: StreamParser 3 | -------------------------------------------------------------------------------- /mode/cypher.js: -------------------------------------------------------------------------------- 1 | var wordRegexp = function(words) { 2 | return new RegExp("^(?:" + words.join("|") + ")$", "i"); 3 | }; 4 | 5 | var tokenBase = function(stream/*, state*/) { 6 | curPunc = null; 7 | var ch = stream.next(); 8 | if (ch ==='"') { 9 | stream.match(/^.*?"/); 10 | return "string"; 11 | } 12 | if (ch === "'") { 13 | stream.match(/^.*?'/); 14 | return "string"; 15 | } 16 | if (/[{}\(\),\.;\[\]]/.test(ch)) { 17 | curPunc = ch; 18 | return "punctuation"; 19 | } else if (ch === "/" && stream.eat("/")) { 20 | stream.skipToEnd(); 21 | return "comment"; 22 | } else if (operatorChars.test(ch)) { 23 | stream.eatWhile(operatorChars); 24 | return null; 25 | } else { 26 | stream.eatWhile(/[_\w\d]/); 27 | if (stream.eat(":")) { 28 | stream.eatWhile(/[\w\d_\-]/); 29 | return "atom"; 30 | } 31 | var word = stream.current(); 32 | if (funcs.test(word)) return "builtin"; 33 | if (preds.test(word)) return "def"; 34 | if (keywords.test(word) || systemKeywords.test(word)) return "keyword"; 35 | return "variable"; 36 | } 37 | }; 38 | var pushContext = function(state, type, col) { 39 | return state.context = { 40 | prev: state.context, 41 | indent: state.indent, 42 | col: col, 43 | type: type 44 | }; 45 | }; 46 | var popContext = function(state) { 47 | state.indent = state.context.indent; 48 | return state.context = state.context.prev; 49 | }; 50 | var curPunc; 51 | var funcs = wordRegexp(["abs", "acos", "allShortestPaths", "asin", "atan", "atan2", "avg", "ceil", "coalesce", "collect", "cos", "cot", "count", "degrees", "e", "endnode", "exp", "extract", "filter", "floor", "haversin", "head", "id", "keys", "labels", "last", "left", "length", "log", "log10", "lower", "ltrim", "max", "min", "node", "nodes", "percentileCont", "percentileDisc", "pi", "radians", "rand", "range", "reduce", "rel", "relationship", "relationships", "replace", "reverse", "right", "round", "rtrim", "shortestPath", "sign", "sin", "size", "split", "sqrt", "startnode", "stdev", "stdevp", "str", "substring", "sum", "tail", "tan", "timestamp", "toFloat", "toInt", "toString", "trim", "type", "upper"]); 52 | var preds = wordRegexp(["all", "and", "any", "contains", "exists", "has", "in", "none", "not", "or", "single", "xor"]); 53 | var keywords = wordRegexp(["as", "asc", "ascending", "assert", "by", "case", "commit", "constraint", "create", "csv", "cypher", "delete", "desc", "descending", "detach", "distinct", "drop", "else", "end", "ends", "explain", "false", "fieldterminator", "foreach", "from", "headers", "in", "index", "is", "join", "limit", "load", "match", "merge", "null", "on", "optional", "order", "periodic", "profile", "remove", "return", "scan", "set", "skip", "start", "starts", "then", "true", "union", "unique", "unwind", "using", "when", "where", "with", "call", "yield"]); 54 | var systemKeywords = wordRegexp(["access", "active", "assign", "all", "alter", "as", "catalog", "change", "copy", "create", "constraint", "constraints", "current", "database", "databases", "dbms", "default", "deny", "drop", "element", "elements", "exists", "from", "grant", "graph", "graphs", "if", "index", "indexes", "label", "labels", "management", "match", "name", "names", "new", "node", "nodes", "not", "of", "on", "or", "password", "populated", "privileges", "property", "read", "relationship", "relationships", "remove", "replace", "required", "revoke", "role", "roles", "set", "show", "start", "status", "stop", "suspended", "to", "traverse", "type", "types", "user", "users", "with", "write"]); 55 | var operatorChars = /[*+\-<>=&|~%^]/; 56 | 57 | export const cypher = { 58 | name: "cypher", 59 | startState: function() { 60 | return { 61 | tokenize: tokenBase, 62 | context: null, 63 | indent: 0, 64 | col: 0 65 | }; 66 | }, 67 | token: function(stream, state) { 68 | if (stream.sol()) { 69 | if (state.context && (state.context.align == null)) { 70 | state.context.align = false; 71 | } 72 | state.indent = stream.indentation(); 73 | } 74 | if (stream.eatSpace()) { 75 | return null; 76 | } 77 | var style = state.tokenize(stream, state); 78 | if (style !== "comment" && state.context && (state.context.align == null) && state.context.type !== "pattern") { 79 | state.context.align = true; 80 | } 81 | if (curPunc === "(") { 82 | pushContext(state, ")", stream.column()); 83 | } else if (curPunc === "[") { 84 | pushContext(state, "]", stream.column()); 85 | } else if (curPunc === "{") { 86 | pushContext(state, "}", stream.column()); 87 | } else if (/[\]\}\)]/.test(curPunc)) { 88 | while (state.context && state.context.type === "pattern") { 89 | popContext(state); 90 | } 91 | if (state.context && curPunc === state.context.type) { 92 | popContext(state); 93 | } 94 | } else if (curPunc === "." && state.context && state.context.type === "pattern") { 95 | popContext(state); 96 | } else if (/atom|string|variable/.test(style) && state.context) { 97 | if (/[\}\]]/.test(state.context.type)) { 98 | pushContext(state, "pattern", stream.column()); 99 | } else if (state.context.type === "pattern" && !state.context.align) { 100 | state.context.align = true; 101 | state.context.col = stream.column(); 102 | } 103 | } 104 | return style; 105 | }, 106 | indent: function(state, textAfter, cx) { 107 | var firstChar = textAfter && textAfter.charAt(0); 108 | var context = state.context; 109 | if (/[\]\}]/.test(firstChar)) { 110 | while (context && context.type === "pattern") { 111 | context = context.prev; 112 | } 113 | } 114 | var closing = context && firstChar === context.type; 115 | if (!context) return 0; 116 | if (context.type === "keywords") return null 117 | if (context.align) return context.col + (closing ? 0 : 1); 118 | return context.indent + (closing ? 0 : cx.unit); 119 | } 120 | }; 121 | -------------------------------------------------------------------------------- /mode/d.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const d: StreamParser 3 | -------------------------------------------------------------------------------- /mode/diff.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const diff: StreamParser 3 | -------------------------------------------------------------------------------- /mode/diff.js: -------------------------------------------------------------------------------- 1 | var TOKEN_NAMES = { 2 | '+': 'inserted', 3 | '-': 'deleted', 4 | '@': 'meta' 5 | }; 6 | 7 | export const diff = { 8 | name: "diff", 9 | token: function(stream) { 10 | var tw_pos = stream.string.search(/[\t ]+?$/); 11 | 12 | if (!stream.sol() || tw_pos === 0) { 13 | stream.skipToEnd(); 14 | return ("error " + ( 15 | TOKEN_NAMES[stream.string.charAt(0)] || '')).replace(/ $/, ''); 16 | } 17 | 18 | var token_name = TOKEN_NAMES[stream.peek()] || stream.skipToEnd(); 19 | 20 | if (tw_pos === -1) { 21 | stream.skipToEnd(); 22 | } else { 23 | stream.pos = tw_pos; 24 | } 25 | 26 | return token_name; 27 | } 28 | }; 29 | 30 | -------------------------------------------------------------------------------- /mode/dockerfile.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const dockerFile: StreamParser 3 | -------------------------------------------------------------------------------- /mode/dockerfile.js: -------------------------------------------------------------------------------- 1 | import {simpleMode} from "./simple-mode.js" 2 | 3 | var from = "from"; 4 | var fromRegex = new RegExp("^(\\s*)\\b(" + from + ")\\b", "i"); 5 | 6 | var shells = ["run", "cmd", "entrypoint", "shell"]; 7 | var shellsAsArrayRegex = new RegExp("^(\\s*)(" + shells.join('|') + ")(\\s+\\[)", "i"); 8 | 9 | var expose = "expose"; 10 | var exposeRegex = new RegExp("^(\\s*)(" + expose + ")(\\s+)", "i"); 11 | 12 | var others = [ 13 | "arg", "from", "maintainer", "label", "env", 14 | "add", "copy", "volume", "user", 15 | "workdir", "onbuild", "stopsignal", "healthcheck", "shell" 16 | ]; 17 | 18 | // Collect all Dockerfile directives 19 | var instructions = [from, expose].concat(shells).concat(others), 20 | instructionRegex = "(" + instructions.join('|') + ")", 21 | instructionOnlyLine = new RegExp("^(\\s*)" + instructionRegex + "(\\s*)(#.*)?$", "i"), 22 | instructionWithArguments = new RegExp("^(\\s*)" + instructionRegex + "(\\s+)", "i"); 23 | 24 | export const dockerFile = simpleMode({ 25 | start: [ 26 | // Block comment: This is a line starting with a comment 27 | { 28 | regex: /^\s*#.*$/, 29 | sol: true, 30 | token: "comment" 31 | }, 32 | { 33 | regex: fromRegex, 34 | token: [null, "keyword"], 35 | sol: true, 36 | next: "from" 37 | }, 38 | // Highlight an instruction without any arguments (for convenience) 39 | { 40 | regex: instructionOnlyLine, 41 | token: [null, "keyword", null, "error"], 42 | sol: true 43 | }, 44 | { 45 | regex: shellsAsArrayRegex, 46 | token: [null, "keyword", null], 47 | sol: true, 48 | next: "array" 49 | }, 50 | { 51 | regex: exposeRegex, 52 | token: [null, "keyword", null], 53 | sol: true, 54 | next: "expose" 55 | }, 56 | // Highlight an instruction followed by arguments 57 | { 58 | regex: instructionWithArguments, 59 | token: [null, "keyword", null], 60 | sol: true, 61 | next: "arguments" 62 | }, 63 | { 64 | regex: /./, 65 | token: null 66 | } 67 | ], 68 | from: [ 69 | { 70 | regex: /\s*$/, 71 | token: null, 72 | next: "start" 73 | }, 74 | { 75 | // Line comment without instruction arguments is an error 76 | regex: /(\s*)(#.*)$/, 77 | token: [null, "error"], 78 | next: "start" 79 | }, 80 | { 81 | regex: /(\s*\S+\s+)(as)/i, 82 | token: [null, "keyword"], 83 | next: "start" 84 | }, 85 | // Fail safe return to start 86 | { 87 | token: null, 88 | next: "start" 89 | } 90 | ], 91 | single: [ 92 | { 93 | regex: /(?:[^\\']|\\.)/, 94 | token: "string" 95 | }, 96 | { 97 | regex: /'/, 98 | token: "string", 99 | pop: true 100 | } 101 | ], 102 | double: [ 103 | { 104 | regex: /(?:[^\\"]|\\.)/, 105 | token: "string" 106 | }, 107 | { 108 | regex: /"/, 109 | token: "string", 110 | pop: true 111 | } 112 | ], 113 | array: [ 114 | { 115 | regex: /\]/, 116 | token: null, 117 | next: "start" 118 | }, 119 | { 120 | regex: /"(?:[^\\"]|\\.)*"?/, 121 | token: "string" 122 | } 123 | ], 124 | expose: [ 125 | { 126 | regex: /\d+$/, 127 | token: "number", 128 | next: "start" 129 | }, 130 | { 131 | regex: /[^\d]+$/, 132 | token: null, 133 | next: "start" 134 | }, 135 | { 136 | regex: /\d+/, 137 | token: "number" 138 | }, 139 | { 140 | regex: /[^\d]+/, 141 | token: null 142 | }, 143 | // Fail safe return to start 144 | { 145 | token: null, 146 | next: "start" 147 | } 148 | ], 149 | arguments: [ 150 | { 151 | regex: /^\s*#.*$/, 152 | sol: true, 153 | token: "comment" 154 | }, 155 | { 156 | regex: /"(?:[^\\"]|\\.)*"?$/, 157 | token: "string", 158 | next: "start" 159 | }, 160 | { 161 | regex: /"/, 162 | token: "string", 163 | push: "double" 164 | }, 165 | { 166 | regex: /'(?:[^\\']|\\.)*'?$/, 167 | token: "string", 168 | next: "start" 169 | }, 170 | { 171 | regex: /'/, 172 | token: "string", 173 | push: "single" 174 | }, 175 | { 176 | regex: /[^#"']+[\\`]$/, 177 | token: null 178 | }, 179 | { 180 | regex: /[^#"']+$/, 181 | token: null, 182 | next: "start" 183 | }, 184 | { 185 | regex: /[^#"']+/, 186 | token: null 187 | }, 188 | // Fail safe return to start 189 | { 190 | token: null, 191 | next: "start" 192 | } 193 | ], 194 | languageData: { 195 | commentTokens: {line: "#"} 196 | } 197 | }); 198 | 199 | -------------------------------------------------------------------------------- /mode/dtd.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const dtd: StreamParser 3 | -------------------------------------------------------------------------------- /mode/dtd.js: -------------------------------------------------------------------------------- 1 | var type; 2 | function ret(style, tp) {type = tp; return style;} 3 | 4 | function tokenBase(stream, state) { 5 | var ch = stream.next(); 6 | 7 | if (ch == "<" && stream.eat("!") ) { 8 | if (stream.eatWhile(/[\-]/)) { 9 | state.tokenize = tokenSGMLComment; 10 | return tokenSGMLComment(stream, state); 11 | } else if (stream.eatWhile(/[\w]/)) return ret("keyword", "doindent"); 12 | } else if (ch == "<" && stream.eat("?")) { //xml declaration 13 | state.tokenize = inBlock("meta", "?>"); 14 | return ret("meta", ch); 15 | } else if (ch == "#" && stream.eatWhile(/[\w]/)) return ret("atom", "tag"); 16 | else if (ch == "|") return ret("keyword", "separator"); 17 | else if (ch.match(/[\(\)\[\]\-\.,\+\?>]/)) return ret(null, ch);//if(ch === ">") return ret(null, "endtag"); else 18 | else if (ch.match(/[\[\]]/)) return ret("rule", ch); 19 | else if (ch == "\"" || ch == "'") { 20 | state.tokenize = tokenString(ch); 21 | return state.tokenize(stream, state); 22 | } else if (stream.eatWhile(/[a-zA-Z\?\+\d]/)) { 23 | var sc = stream.current(); 24 | if( sc.substr(sc.length-1,sc.length).match(/\?|\+/) !== null )stream.backUp(1); 25 | return ret("tag", "tag"); 26 | } else if (ch == "%" || ch == "*" ) return ret("number", "number"); 27 | else { 28 | stream.eatWhile(/[\w\\\-_%.{,]/); 29 | return ret(null, null); 30 | } 31 | } 32 | 33 | function tokenSGMLComment(stream, state) { 34 | var dashes = 0, ch; 35 | while ((ch = stream.next()) != null) { 36 | if (dashes >= 2 && ch == ">") { 37 | state.tokenize = tokenBase; 38 | break; 39 | } 40 | dashes = (ch == "-") ? dashes + 1 : 0; 41 | } 42 | return ret("comment", "comment"); 43 | } 44 | 45 | function tokenString(quote) { 46 | return function(stream, state) { 47 | var escaped = false, ch; 48 | while ((ch = stream.next()) != null) { 49 | if (ch == quote && !escaped) { 50 | state.tokenize = tokenBase; 51 | break; 52 | } 53 | escaped = !escaped && ch == "\\"; 54 | } 55 | return ret("string", "tag"); 56 | }; 57 | } 58 | 59 | function inBlock(style, terminator) { 60 | return function(stream, state) { 61 | while (!stream.eol()) { 62 | if (stream.match(terminator)) { 63 | state.tokenize = tokenBase; 64 | break; 65 | } 66 | stream.next(); 67 | } 68 | return style; 69 | }; 70 | } 71 | 72 | export const dtd = { 73 | name: "dtd", 74 | startState: function() { 75 | return {tokenize: tokenBase, 76 | baseIndent: 0, 77 | stack: []}; 78 | }, 79 | 80 | token: function(stream, state) { 81 | if (stream.eatSpace()) return null; 82 | var style = state.tokenize(stream, state); 83 | 84 | var context = state.stack[state.stack.length-1]; 85 | if (stream.current() == "[" || type === "doindent" || type == "[") state.stack.push("rule"); 86 | else if (type === "endtag") state.stack[state.stack.length-1] = "endtag"; 87 | else if (stream.current() == "]" || type == "]" || (type == ">" && context == "rule")) state.stack.pop(); 88 | else if (type == "[") state.stack.push("["); 89 | return style; 90 | }, 91 | 92 | indent: function(state, textAfter, cx) { 93 | var n = state.stack.length; 94 | 95 | if( textAfter.charAt(0) === ']' )n--; 96 | else if(textAfter.substr(textAfter.length-1, textAfter.length) === ">"){ 97 | if(textAfter.substr(0,1) === "<") {} 98 | else if( type == "doindent" && textAfter.length > 1 ) {} 99 | else if( type == "doindent")n--; 100 | else if( type == ">" && textAfter.length > 1) {} 101 | else if( type == "tag" && textAfter !== ">") {} 102 | else if( type == "tag" && state.stack[state.stack.length-1] == "rule")n--; 103 | else if( type == "tag")n++; 104 | else if( textAfter === ">" && state.stack[state.stack.length-1] == "rule" && type === ">")n--; 105 | else if( textAfter === ">" && state.stack[state.stack.length-1] == "rule") {} 106 | else if( textAfter.substr(0,1) !== "<" && textAfter.substr(0,1) === ">" )n=n-1; 107 | else if( textAfter === ">") {} 108 | else n=n-1; 109 | //over rule them all 110 | if(type == null || type == "]")n--; 111 | } 112 | 113 | return state.baseIndent + n * cx.unit; 114 | }, 115 | 116 | languageData: { 117 | indentOnInput: /^\s*[\]>]$/ 118 | } 119 | }; 120 | 121 | -------------------------------------------------------------------------------- /mode/dylan.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const dylan: StreamParser 3 | -------------------------------------------------------------------------------- /mode/ebnf.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const ebnf: StreamParser 3 | -------------------------------------------------------------------------------- /mode/ebnf.js: -------------------------------------------------------------------------------- 1 | var commentType = {slash: 0, parenthesis: 1}; 2 | var stateType = {comment: 0, _string: 1, characterClass: 2}; 3 | 4 | export const ebnf = { 5 | name: "ebnf", 6 | startState: function () { 7 | return { 8 | stringType: null, 9 | commentType: null, 10 | braced: 0, 11 | lhs: true, 12 | localState: null, 13 | stack: [], 14 | inDefinition: false 15 | }; 16 | }, 17 | token: function (stream, state) { 18 | if (!stream) return; 19 | 20 | //check for state changes 21 | if (state.stack.length === 0) { 22 | //strings 23 | if ((stream.peek() == '"') || (stream.peek() == "'")) { 24 | state.stringType = stream.peek(); 25 | stream.next(); // Skip quote 26 | state.stack.unshift(stateType._string); 27 | } else if (stream.match('/*')) { //comments starting with /* 28 | state.stack.unshift(stateType.comment); 29 | state.commentType = commentType.slash; 30 | } else if (stream.match('(*')) { //comments starting with (* 31 | state.stack.unshift(stateType.comment); 32 | state.commentType = commentType.parenthesis; 33 | } 34 | } 35 | 36 | //return state 37 | //stack has 38 | switch (state.stack[0]) { 39 | case stateType._string: 40 | while (state.stack[0] === stateType._string && !stream.eol()) { 41 | if (stream.peek() === state.stringType) { 42 | stream.next(); // Skip quote 43 | state.stack.shift(); // Clear flag 44 | } else if (stream.peek() === "\\") { 45 | stream.next(); 46 | stream.next(); 47 | } else { 48 | stream.match(/^.[^\\\"\']*/); 49 | } 50 | } 51 | return state.lhs ? "property" : "string"; // Token style 52 | 53 | case stateType.comment: 54 | while (state.stack[0] === stateType.comment && !stream.eol()) { 55 | if (state.commentType === commentType.slash && stream.match('*/')) { 56 | state.stack.shift(); // Clear flag 57 | state.commentType = null; 58 | } else if (state.commentType === commentType.parenthesis && stream.match('*)')) { 59 | state.stack.shift(); // Clear flag 60 | state.commentType = null; 61 | } else { 62 | stream.match(/^.[^\*]*/); 63 | } 64 | } 65 | return "comment"; 66 | 67 | case stateType.characterClass: 68 | while (state.stack[0] === stateType.characterClass && !stream.eol()) { 69 | if (!(stream.match(/^[^\]\\]+/) || stream.match('.'))) { 70 | state.stack.shift(); 71 | } 72 | } 73 | return "operator"; 74 | } 75 | 76 | var peek = stream.peek(); 77 | 78 | //no stack 79 | switch (peek) { 80 | case "[": 81 | stream.next(); 82 | state.stack.unshift(stateType.characterClass); 83 | return "bracket"; 84 | case ":": 85 | case "|": 86 | case ";": 87 | stream.next(); 88 | return "operator"; 89 | case "%": 90 | if (stream.match("%%")) { 91 | return "header"; 92 | } else if (stream.match(/[%][A-Za-z]+/)) { 93 | return "keyword"; 94 | } else if (stream.match(/[%][}]/)) { 95 | return "bracket"; 96 | } 97 | break; 98 | case "/": 99 | if (stream.match(/[\/][A-Za-z]+/)) { 100 | return "keyword"; 101 | } 102 | case "\\": 103 | if (stream.match(/[\][a-z]+/)) { 104 | return "string.special"; 105 | } 106 | case ".": 107 | if (stream.match(".")) { 108 | return "atom"; 109 | } 110 | case "*": 111 | case "-": 112 | case "+": 113 | case "^": 114 | if (stream.match(peek)) { 115 | return "atom"; 116 | } 117 | case "$": 118 | if (stream.match("$$")) { 119 | return "builtin"; 120 | } else if (stream.match(/[$][0-9]+/)) { 121 | return "variableName.special"; 122 | } 123 | case "<": 124 | if (stream.match(/<<[a-zA-Z_]+>>/)) { 125 | return "builtin"; 126 | } 127 | } 128 | 129 | if (stream.match('//')) { 130 | stream.skipToEnd(); 131 | return "comment"; 132 | } else if (stream.match('return')) { 133 | return "operator"; 134 | } else if (stream.match(/^[a-zA-Z_][a-zA-Z0-9_]*/)) { 135 | if (stream.match(/(?=[\(.])/)) { 136 | return "variable"; 137 | } else if (stream.match(/(?=[\s\n]*[:=])/)) { 138 | return "def"; 139 | } 140 | return "variableName.special"; 141 | } else if (["[", "]", "(", ")"].indexOf(stream.peek()) != -1) { 142 | stream.next(); 143 | return "bracket"; 144 | } else if (!stream.eatSpace()) { 145 | stream.next(); 146 | } 147 | return null; 148 | } 149 | }; 150 | -------------------------------------------------------------------------------- /mode/ecl.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const ecl: StreamParser 3 | -------------------------------------------------------------------------------- /mode/eiffel.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const eiffel: StreamParser 3 | -------------------------------------------------------------------------------- /mode/eiffel.js: -------------------------------------------------------------------------------- 1 | function wordObj(words) { 2 | var o = {}; 3 | for (var i = 0, e = words.length; i < e; ++i) o[words[i]] = true; 4 | return o; 5 | } 6 | var keywords = wordObj([ 7 | 'note', 8 | 'across', 9 | 'when', 10 | 'variant', 11 | 'until', 12 | 'unique', 13 | 'undefine', 14 | 'then', 15 | 'strip', 16 | 'select', 17 | 'retry', 18 | 'rescue', 19 | 'require', 20 | 'rename', 21 | 'reference', 22 | 'redefine', 23 | 'prefix', 24 | 'once', 25 | 'old', 26 | 'obsolete', 27 | 'loop', 28 | 'local', 29 | 'like', 30 | 'is', 31 | 'inspect', 32 | 'infix', 33 | 'include', 34 | 'if', 35 | 'frozen', 36 | 'from', 37 | 'external', 38 | 'export', 39 | 'ensure', 40 | 'end', 41 | 'elseif', 42 | 'else', 43 | 'do', 44 | 'creation', 45 | 'create', 46 | 'check', 47 | 'alias', 48 | 'agent', 49 | 'separate', 50 | 'invariant', 51 | 'inherit', 52 | 'indexing', 53 | 'feature', 54 | 'expanded', 55 | 'deferred', 56 | 'class', 57 | 'Void', 58 | 'True', 59 | 'Result', 60 | 'Precursor', 61 | 'False', 62 | 'Current', 63 | 'create', 64 | 'attached', 65 | 'detachable', 66 | 'as', 67 | 'and', 68 | 'implies', 69 | 'not', 70 | 'or' 71 | ]); 72 | var operators = wordObj([":=", "and then","and", "or","<<",">>"]); 73 | 74 | function chain(newtok, stream, state) { 75 | state.tokenize.push(newtok); 76 | return newtok(stream, state); 77 | } 78 | 79 | function tokenBase(stream, state) { 80 | if (stream.eatSpace()) return null; 81 | var ch = stream.next(); 82 | if (ch == '"'||ch == "'") { 83 | return chain(readQuoted(ch, "string"), stream, state); 84 | } else if (ch == "-"&&stream.eat("-")) { 85 | stream.skipToEnd(); 86 | return "comment"; 87 | } else if (ch == ":"&&stream.eat("=")) { 88 | return "operator"; 89 | } else if (/[0-9]/.test(ch)) { 90 | stream.eatWhile(/[xXbBCc0-9\.]/); 91 | stream.eat(/[\?\!]/); 92 | return "variable"; 93 | } else if (/[a-zA-Z_0-9]/.test(ch)) { 94 | stream.eatWhile(/[a-zA-Z_0-9]/); 95 | stream.eat(/[\?\!]/); 96 | return "variable"; 97 | } else if (/[=+\-\/*^%<>~]/.test(ch)) { 98 | stream.eatWhile(/[=+\-\/*^%<>~]/); 99 | return "operator"; 100 | } else { 101 | return null; 102 | } 103 | } 104 | 105 | function readQuoted(quote, style, unescaped) { 106 | return function(stream, state) { 107 | var escaped = false, ch; 108 | while ((ch = stream.next()) != null) { 109 | if (ch == quote && (unescaped || !escaped)) { 110 | state.tokenize.pop(); 111 | break; 112 | } 113 | escaped = !escaped && ch == "%"; 114 | } 115 | return style; 116 | }; 117 | } 118 | 119 | export const eiffel = { 120 | name: "eiffel", 121 | startState: function() { 122 | return {tokenize: [tokenBase]}; 123 | }, 124 | 125 | token: function(stream, state) { 126 | var style = state.tokenize[state.tokenize.length-1](stream, state); 127 | if (style == "variable") { 128 | var word = stream.current(); 129 | style = keywords.propertyIsEnumerable(stream.current()) ? "keyword" 130 | : operators.propertyIsEnumerable(stream.current()) ? "operator" 131 | : /^[A-Z][A-Z_0-9]*$/g.test(word) ? "tag" 132 | : /^0[bB][0-1]+$/g.test(word) ? "number" 133 | : /^0[cC][0-7]+$/g.test(word) ? "number" 134 | : /^0[xX][a-fA-F0-9]+$/g.test(word) ? "number" 135 | : /^([0-9]+\.[0-9]*)|([0-9]*\.[0-9]+)$/g.test(word) ? "number" 136 | : /^[0-9]+$/g.test(word) ? "number" 137 | : "variable"; 138 | } 139 | return style; 140 | }, 141 | languageData: { 142 | commentTokens: {line: "--"} 143 | } 144 | }; 145 | 146 | -------------------------------------------------------------------------------- /mode/elm.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const elm: StreamParser 3 | -------------------------------------------------------------------------------- /mode/elm.js: -------------------------------------------------------------------------------- 1 | function switchState(source, setState, f) 2 | { 3 | setState(f); 4 | return f(source, setState); 5 | } 6 | 7 | var lowerRE = /[a-z]/; 8 | var upperRE = /[A-Z]/; 9 | var innerRE = /[a-zA-Z0-9_]/; 10 | 11 | var digitRE = /[0-9]/; 12 | var hexRE = /[0-9A-Fa-f]/; 13 | var symbolRE = /[-&*+.\\/<>=?^|:]/; 14 | var specialRE = /[(),[\]{}]/; 15 | var spacesRE = /[ \v\f]/; // newlines are handled in tokenizer 16 | 17 | function normal() 18 | { 19 | return function(source, setState) 20 | { 21 | if (source.eatWhile(spacesRE)) 22 | { 23 | return null; 24 | } 25 | 26 | var char = source.next(); 27 | 28 | if (specialRE.test(char)) 29 | { 30 | return (char === '{' && source.eat('-')) 31 | ? switchState(source, setState, chompMultiComment(1)) 32 | : (char === '[' && source.match('glsl|')) 33 | ? switchState(source, setState, chompGlsl) 34 | : 'builtin'; 35 | } 36 | 37 | if (char === '\'') 38 | { 39 | return switchState(source, setState, chompChar); 40 | } 41 | 42 | if (char === '"') 43 | { 44 | return source.eat('"') 45 | ? source.eat('"') 46 | ? switchState(source, setState, chompMultiString) 47 | : 'string' 48 | : switchState(source, setState, chompSingleString); 49 | } 50 | 51 | if (upperRE.test(char)) 52 | { 53 | source.eatWhile(innerRE); 54 | return 'type'; 55 | } 56 | 57 | if (lowerRE.test(char)) 58 | { 59 | var isDef = source.pos === 1; 60 | source.eatWhile(innerRE); 61 | return isDef ? "def" : "variable"; 62 | } 63 | 64 | if (digitRE.test(char)) 65 | { 66 | if (char === '0') 67 | { 68 | if (source.eat(/[xX]/)) 69 | { 70 | source.eatWhile(hexRE); // should require at least 1 71 | return "number"; 72 | } 73 | } 74 | else 75 | { 76 | source.eatWhile(digitRE); 77 | } 78 | if (source.eat('.')) 79 | { 80 | source.eatWhile(digitRE); // should require at least 1 81 | } 82 | if (source.eat(/[eE]/)) 83 | { 84 | source.eat(/[-+]/); 85 | source.eatWhile(digitRE); // should require at least 1 86 | } 87 | return "number"; 88 | } 89 | 90 | if (symbolRE.test(char)) 91 | { 92 | if (char === '-' && source.eat('-')) 93 | { 94 | source.skipToEnd(); 95 | return "comment"; 96 | } 97 | source.eatWhile(symbolRE); 98 | return "keyword"; 99 | } 100 | 101 | if (char === '_') 102 | { 103 | return "keyword"; 104 | } 105 | 106 | return "error"; 107 | } 108 | } 109 | 110 | function chompMultiComment(nest) 111 | { 112 | if (nest == 0) 113 | { 114 | return normal(); 115 | } 116 | return function(source, setState) 117 | { 118 | while (!source.eol()) 119 | { 120 | var char = source.next(); 121 | if (char == '{' && source.eat('-')) 122 | { 123 | ++nest; 124 | } 125 | else if (char == '-' && source.eat('}')) 126 | { 127 | --nest; 128 | if (nest === 0) 129 | { 130 | setState(normal()); 131 | return 'comment'; 132 | } 133 | } 134 | } 135 | setState(chompMultiComment(nest)); 136 | return 'comment'; 137 | } 138 | } 139 | 140 | function chompMultiString(source, setState) 141 | { 142 | while (!source.eol()) 143 | { 144 | var char = source.next(); 145 | if (char === '"' && source.eat('"') && source.eat('"')) 146 | { 147 | setState(normal()); 148 | return 'string'; 149 | } 150 | } 151 | return 'string'; 152 | } 153 | 154 | function chompSingleString(source, setState) 155 | { 156 | while (source.skipTo('\\"')) { source.next(); source.next(); } 157 | if (source.skipTo('"')) 158 | { 159 | source.next(); 160 | setState(normal()); 161 | return 'string'; 162 | } 163 | source.skipToEnd(); 164 | setState(normal()); 165 | return 'error'; 166 | } 167 | 168 | function chompChar(source, setState) 169 | { 170 | while (source.skipTo("\\'")) { source.next(); source.next(); } 171 | if (source.skipTo("'")) 172 | { 173 | source.next(); 174 | setState(normal()); 175 | return 'string'; 176 | } 177 | source.skipToEnd(); 178 | setState(normal()); 179 | return 'error'; 180 | } 181 | 182 | function chompGlsl(source, setState) 183 | { 184 | while (!source.eol()) 185 | { 186 | var char = source.next(); 187 | if (char === '|' && source.eat(']')) 188 | { 189 | setState(normal()); 190 | return 'string'; 191 | } 192 | } 193 | return 'string'; 194 | } 195 | 196 | var wellKnownWords = { 197 | case: 1, 198 | of: 1, 199 | as: 1, 200 | if: 1, 201 | then: 1, 202 | else: 1, 203 | let: 1, 204 | in: 1, 205 | type: 1, 206 | alias: 1, 207 | module: 1, 208 | where: 1, 209 | import: 1, 210 | exposing: 1, 211 | port: 1 212 | }; 213 | 214 | export const elm = { 215 | name: "elm", 216 | startState: function () { return { f: normal() }; }, 217 | copyState: function (s) { return { f: s.f }; }, 218 | 219 | token: function(stream, state) { 220 | var type = state.f(stream, function(s) { state.f = s; }); 221 | var word = stream.current(); 222 | return (wellKnownWords.hasOwnProperty(word)) ? 'keyword' : type; 223 | }, 224 | 225 | languageData: { 226 | commentTokens: {line: "--"} 227 | } 228 | }; 229 | -------------------------------------------------------------------------------- /mode/erlang.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const erlang: StreamParser 3 | -------------------------------------------------------------------------------- /mode/factor.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const factor: StreamParser 3 | -------------------------------------------------------------------------------- /mode/factor.js: -------------------------------------------------------------------------------- 1 | import {simpleMode} from "./simple-mode.js" 2 | 3 | export const factor = simpleMode({ 4 | start: [ 5 | // comments 6 | {regex: /#?!.*/, token: "comment"}, 7 | // strings """, multiline --> state 8 | {regex: /"""/, token: "string", next: "string3"}, 9 | {regex: /(STRING:)(\s)/, token: ["keyword", null], next: "string2"}, 10 | {regex: /\S*?"/, token: "string", next: "string"}, 11 | // numbers: dec, hex, unicode, bin, fractional, complex 12 | {regex: /(?:0x[\d,a-f]+)|(?:0o[0-7]+)|(?:0b[0,1]+)|(?:\-?\d+.?\d*)(?=\s)/, token: "number"}, 13 | //{regex: /[+-]?/} //fractional 14 | // definition: defining word, defined word, etc 15 | {regex: /((?:GENERIC)|\:?\:)(\s+)(\S+)(\s+)(\()/, token: ["keyword", null, "def", null, "bracket"], next: "stack"}, 16 | // method definition: defining word, type, defined word, etc 17 | {regex: /(M\:)(\s+)(\S+)(\s+)(\S+)/, token: ["keyword", null, "def", null, "tag"]}, 18 | // vocabulary using --> state 19 | {regex: /USING\:/, token: "keyword", next: "vocabulary"}, 20 | // vocabulary definition/use 21 | {regex: /(USE\:|IN\:)(\s+)(\S+)(?=\s|$)/, token: ["keyword", null, "tag"]}, 22 | // definition: a defining word, defined word 23 | {regex: /(\S+\:)(\s+)(\S+)(?=\s|$)/, token: ["keyword", null, "def"]}, 24 | // "keywords", incl. ; t f . [ ] { } defining words 25 | {regex: /(?:;|\\|t|f|if|loop|while|until|do|PRIVATE>| and the like 27 | {regex: /\S+[\)>\.\*\?]+(?=\s|$)/, token: "builtin"}, 28 | {regex: /[\)><]+\S+(?=\s|$)/, token: "builtin"}, 29 | // operators 30 | {regex: /(?:[\+\-\=\/\*<>])(?=\s|$)/, token: "keyword"}, 31 | // any id (?) 32 | {regex: /\S+/, token: "variable"}, 33 | {regex: /\s+|./, token: null} 34 | ], 35 | vocabulary: [ 36 | {regex: /;/, token: "keyword", next: "start"}, 37 | {regex: /\S+/, token: "tag"}, 38 | {regex: /\s+|./, token: null} 39 | ], 40 | string: [ 41 | {regex: /(?:[^\\]|\\.)*?"/, token: "string", next: "start"}, 42 | {regex: /.*/, token: "string"} 43 | ], 44 | string2: [ 45 | {regex: /^;/, token: "keyword", next: "start"}, 46 | {regex: /.*/, token: "string"} 47 | ], 48 | string3: [ 49 | {regex: /(?:[^\\]|\\.)*?"""/, token: "string", next: "start"}, 50 | {regex: /.*/, token: "string"} 51 | ], 52 | stack: [ 53 | {regex: /\)/, token: "bracket", next: "start"}, 54 | {regex: /--/, token: "bracket"}, 55 | {regex: /\S+/, token: "meta"}, 56 | {regex: /\s+|./, token: null} 57 | ], 58 | languageData: { 59 | name: "factor", 60 | dontIndentStates: ["start", "vocabulary", "string", "string3", "stack"], 61 | commentTokens: {line: "!"} 62 | } 63 | }); 64 | -------------------------------------------------------------------------------- /mode/fcl.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const fcl: StreamParser 3 | -------------------------------------------------------------------------------- /mode/fcl.js: -------------------------------------------------------------------------------- 1 | var keywords = { 2 | "term": true, 3 | "method": true, "accu": true, 4 | "rule": true, "then": true, "is": true, "and": true, "or": true, 5 | "if": true, "default": true 6 | }; 7 | 8 | var start_blocks = { 9 | "var_input": true, 10 | "var_output": true, 11 | "fuzzify": true, 12 | "defuzzify": true, 13 | "function_block": true, 14 | "ruleblock": true 15 | }; 16 | 17 | var end_blocks = { 18 | "end_ruleblock": true, 19 | "end_defuzzify": true, 20 | "end_function_block": true, 21 | "end_fuzzify": true, 22 | "end_var": true 23 | }; 24 | 25 | var atoms = { 26 | "true": true, "false": true, "nan": true, 27 | "real": true, "min": true, "max": true, "cog": true, "cogs": true 28 | }; 29 | 30 | var isOperatorChar = /[+\-*&^%:=<>!|\/]/; 31 | 32 | function tokenBase(stream, state) { 33 | var ch = stream.next(); 34 | 35 | if (/[\d\.]/.test(ch)) { 36 | if (ch == ".") { 37 | stream.match(/^[0-9]+([eE][\-+]?[0-9]+)?/); 38 | } else if (ch == "0") { 39 | stream.match(/^[xX][0-9a-fA-F]+/) || stream.match(/^0[0-7]+/); 40 | } else { 41 | stream.match(/^[0-9]*\.?[0-9]*([eE][\-+]?[0-9]+)?/); 42 | } 43 | return "number"; 44 | } 45 | 46 | if (ch == "/" || ch == "(") { 47 | if (stream.eat("*")) { 48 | state.tokenize = tokenComment; 49 | return tokenComment(stream, state); 50 | } 51 | if (stream.eat("/")) { 52 | stream.skipToEnd(); 53 | return "comment"; 54 | } 55 | } 56 | if (isOperatorChar.test(ch)) { 57 | stream.eatWhile(isOperatorChar); 58 | return "operator"; 59 | } 60 | stream.eatWhile(/[\w\$_\xa1-\uffff]/); 61 | 62 | var cur = stream.current().toLowerCase(); 63 | if (keywords.propertyIsEnumerable(cur) || 64 | start_blocks.propertyIsEnumerable(cur) || 65 | end_blocks.propertyIsEnumerable(cur)) { 66 | return "keyword"; 67 | } 68 | if (atoms.propertyIsEnumerable(cur)) return "atom"; 69 | return "variable"; 70 | } 71 | 72 | 73 | function tokenComment(stream, state) { 74 | var maybeEnd = false, ch; 75 | while (ch = stream.next()) { 76 | if ((ch == "/" || ch == ")") && maybeEnd) { 77 | state.tokenize = tokenBase; 78 | break; 79 | } 80 | maybeEnd = (ch == "*"); 81 | } 82 | return "comment"; 83 | } 84 | 85 | function Context(indented, column, type, align, prev) { 86 | this.indented = indented; 87 | this.column = column; 88 | this.type = type; 89 | this.align = align; 90 | this.prev = prev; 91 | } 92 | 93 | function pushContext(state, col, type) { 94 | return state.context = new Context(state.indented, col, type, null, state.context); 95 | } 96 | 97 | function popContext(state) { 98 | if (!state.context.prev) return; 99 | var t = state.context.type; 100 | if (t == "end_block") 101 | state.indented = state.context.indented; 102 | return state.context = state.context.prev; 103 | } 104 | 105 | // Interface 106 | 107 | export const fcl = { 108 | name: "fcl", 109 | startState: function(indentUnit) { 110 | return { 111 | tokenize: null, 112 | context: new Context(-indentUnit, 0, "top", false), 113 | indented: 0, 114 | startOfLine: true 115 | }; 116 | }, 117 | 118 | token: function(stream, state) { 119 | var ctx = state.context; 120 | if (stream.sol()) { 121 | if (ctx.align == null) ctx.align = false; 122 | state.indented = stream.indentation(); 123 | state.startOfLine = true; 124 | } 125 | if (stream.eatSpace()) return null; 126 | 127 | var style = (state.tokenize || tokenBase)(stream, state); 128 | if (style == "comment") return style; 129 | if (ctx.align == null) ctx.align = true; 130 | 131 | var cur = stream.current().toLowerCase(); 132 | 133 | if (start_blocks.propertyIsEnumerable(cur)) pushContext(state, stream.column(), "end_block"); 134 | else if (end_blocks.propertyIsEnumerable(cur)) popContext(state); 135 | 136 | state.startOfLine = false; 137 | return style; 138 | }, 139 | 140 | indent: function(state, textAfter, cx) { 141 | if (state.tokenize != tokenBase && state.tokenize != null) return 0; 142 | var ctx = state.context; 143 | 144 | var closing = end_blocks.propertyIsEnumerable(textAfter); 145 | if (ctx.align) return ctx.column + (closing ? 0 : 1); 146 | else return ctx.indented + (closing ? 0 : cx.unit); 147 | }, 148 | 149 | languageData: { 150 | commentTokens: {line: "//", block: {open: "(*", close: "*)"}} 151 | } 152 | }; 153 | 154 | -------------------------------------------------------------------------------- /mode/forth.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const forth: StreamParser 3 | -------------------------------------------------------------------------------- /mode/forth.js: -------------------------------------------------------------------------------- 1 | function toWordList(words) { 2 | var ret = []; 3 | words.split(' ').forEach(function(e){ 4 | ret.push({name: e}); 5 | }); 6 | return ret; 7 | } 8 | 9 | var coreWordList = toWordList( 10 | 'INVERT AND OR XOR\ 11 | 2* 2/ LSHIFT RSHIFT\ 12 | 0= = 0< < > U< MIN MAX\ 13 | 2DROP 2DUP 2OVER 2SWAP ?DUP DEPTH DROP DUP OVER ROT SWAP\ 14 | >R R> R@\ 15 | + - 1+ 1- ABS NEGATE\ 16 | S>D * M* UM*\ 17 | FM/MOD SM/REM UM/MOD */ */MOD / /MOD MOD\ 18 | HERE , @ ! CELL+ CELLS C, C@ C! CHARS 2@ 2!\ 19 | ALIGN ALIGNED +! ALLOT\ 20 | CHAR [CHAR] [ ] BL\ 21 | FIND EXECUTE IMMEDIATE COUNT LITERAL STATE\ 22 | ; DOES> >BODY\ 23 | EVALUATE\ 24 | SOURCE >IN\ 25 | <# # #S #> HOLD SIGN BASE >NUMBER HEX DECIMAL\ 26 | FILL MOVE\ 27 | . CR EMIT SPACE SPACES TYPE U. .R U.R\ 28 | ACCEPT\ 29 | TRUE FALSE\ 30 | <> U> 0<> 0>\ 31 | NIP TUCK ROLL PICK\ 32 | 2>R 2R@ 2R>\ 33 | WITHIN UNUSED MARKER\ 34 | I J\ 35 | TO\ 36 | COMPILE, [COMPILE]\ 37 | SAVE-INPUT RESTORE-INPUT\ 38 | PAD ERASE\ 39 | 2LITERAL DNEGATE\ 40 | D- D+ D0< D0= D2* D2/ D< D= DMAX DMIN D>S DABS\ 41 | M+ M*/ D. D.R 2ROT DU<\ 42 | CATCH THROW\ 43 | FREE RESIZE ALLOCATE\ 44 | CS-PICK CS-ROLL\ 45 | GET-CURRENT SET-CURRENT FORTH-WORDLIST GET-ORDER SET-ORDER\ 46 | PREVIOUS SEARCH-WORDLIST WORDLIST FIND ALSO ONLY FORTH DEFINITIONS ORDER\ 47 | -TRAILING /STRING SEARCH COMPARE CMOVE CMOVE> BLANK SLITERAL'); 48 | 49 | var immediateWordList = toWordList('IF ELSE THEN BEGIN WHILE REPEAT UNTIL RECURSE [IF] [ELSE] [THEN] ?DO DO LOOP +LOOP UNLOOP LEAVE EXIT AGAIN CASE OF ENDOF ENDCASE'); 50 | 51 | function searchWordList (wordList, word) { 52 | var i; 53 | for (i = wordList.length - 1; i >= 0; i--) { 54 | if (wordList[i].name === word.toUpperCase()) { 55 | return wordList[i]; 56 | } 57 | } 58 | return undefined; 59 | } 60 | export const forth = { 61 | name: "forth", 62 | startState: function() { 63 | return { 64 | state: '', 65 | base: 10, 66 | coreWordList: coreWordList, 67 | immediateWordList: immediateWordList, 68 | wordList: [] 69 | }; 70 | }, 71 | token: function (stream, stt) { 72 | var mat; 73 | if (stream.eatSpace()) { 74 | return null; 75 | } 76 | if (stt.state === '') { // interpretation 77 | if (stream.match(/^(\]|:NONAME)(\s|$)/i)) { 78 | stt.state = ' compilation'; 79 | return 'builtin'; 80 | } 81 | mat = stream.match(/^(\:)\s+(\S+)(\s|$)+/); 82 | if (mat) { 83 | stt.wordList.push({name: mat[2].toUpperCase()}); 84 | stt.state = ' compilation'; 85 | return 'def'; 86 | } 87 | mat = stream.match(/^(VARIABLE|2VARIABLE|CONSTANT|2CONSTANT|CREATE|POSTPONE|VALUE|WORD)\s+(\S+)(\s|$)+/i); 88 | if (mat) { 89 | stt.wordList.push({name: mat[2].toUpperCase()}); 90 | return 'def'; 91 | } 92 | mat = stream.match(/^(\'|\[\'\])\s+(\S+)(\s|$)+/); 93 | if (mat) { 94 | return 'builtin' 95 | } 96 | } else { // compilation 97 | // ; [ 98 | if (stream.match(/^(\;|\[)(\s)/)) { 99 | stt.state = ''; 100 | stream.backUp(1); 101 | return 'builtin'; 102 | } 103 | if (stream.match(/^(\;|\[)($)/)) { 104 | stt.state = ''; 105 | return 'builtin'; 106 | } 107 | if (stream.match(/^(POSTPONE)\s+\S+(\s|$)+/)) { 108 | return 'builtin'; 109 | } 110 | } 111 | 112 | // dynamic wordlist 113 | mat = stream.match(/^(\S+)(\s+|$)/); 114 | if (mat) { 115 | if (searchWordList(stt.wordList, mat[1]) !== undefined) { 116 | return 'variable'; 117 | } 118 | 119 | // comments 120 | if (mat[1] === '\\') { 121 | stream.skipToEnd(); 122 | return 'comment'; 123 | } 124 | 125 | // core words 126 | if (searchWordList(stt.coreWordList, mat[1]) !== undefined) { 127 | return 'builtin'; 128 | } 129 | if (searchWordList(stt.immediateWordList, mat[1]) !== undefined) { 130 | return 'keyword'; 131 | } 132 | 133 | if (mat[1] === '(') { 134 | stream.eatWhile(function (s) { return s !== ')'; }); 135 | stream.eat(')'); 136 | return 'comment'; 137 | } 138 | 139 | // // strings 140 | if (mat[1] === '.(') { 141 | stream.eatWhile(function (s) { return s !== ')'; }); 142 | stream.eat(')'); 143 | return 'string'; 144 | } 145 | if (mat[1] === 'S"' || mat[1] === '."' || mat[1] === 'C"') { 146 | stream.eatWhile(function (s) { return s !== '"'; }); 147 | stream.eat('"'); 148 | return 'string'; 149 | } 150 | 151 | // numbers 152 | if (mat[1] - 0xfffffffff) { 153 | return 'number'; 154 | } 155 | // if (mat[1].match(/^[-+]?[0-9]+\.[0-9]*/)) { 156 | // return 'number'; 157 | // } 158 | 159 | return 'atom'; 160 | } 161 | } 162 | }; 163 | -------------------------------------------------------------------------------- /mode/fortran.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const fortran: StreamParser 3 | -------------------------------------------------------------------------------- /mode/gas.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const gas: StreamParser 3 | export declare const gasArm: StreamParser 4 | -------------------------------------------------------------------------------- /mode/gherkin.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const gherkin: StreamParser 3 | -------------------------------------------------------------------------------- /mode/go.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const go: StreamParser 3 | -------------------------------------------------------------------------------- /mode/go.js: -------------------------------------------------------------------------------- 1 | var keywords = { 2 | "break":true, "case":true, "chan":true, "const":true, "continue":true, 3 | "default":true, "defer":true, "else":true, "fallthrough":true, "for":true, 4 | "func":true, "go":true, "goto":true, "if":true, "import":true, 5 | "interface":true, "map":true, "package":true, "range":true, "return":true, 6 | "select":true, "struct":true, "switch":true, "type":true, "var":true, 7 | "bool":true, "byte":true, "complex64":true, "complex128":true, 8 | "float32":true, "float64":true, "int8":true, "int16":true, "int32":true, 9 | "int64":true, "string":true, "uint8":true, "uint16":true, "uint32":true, 10 | "uint64":true, "int":true, "uint":true, "uintptr":true, "error": true, 11 | "rune":true, "any":true, "comparable":true 12 | }; 13 | 14 | var atoms = { 15 | "true":true, "false":true, "iota":true, "nil":true, "append":true, 16 | "cap":true, "close":true, "complex":true, "copy":true, "delete":true, "imag":true, 17 | "len":true, "make":true, "new":true, "panic":true, "print":true, 18 | "println":true, "real":true, "recover":true 19 | }; 20 | 21 | var isOperatorChar = /[+\-*&^%:=<>!|\/]/; 22 | 23 | var curPunc; 24 | 25 | function tokenBase(stream, state) { 26 | var ch = stream.next(); 27 | if (ch == '"' || ch == "'" || ch == "`") { 28 | state.tokenize = tokenString(ch); 29 | return state.tokenize(stream, state); 30 | } 31 | if (/[\d\.]/.test(ch)) { 32 | if (ch == ".") { 33 | stream.match(/^[0-9]+([eE][\-+]?[0-9]+)?/); 34 | } else if (ch == "0") { 35 | stream.match(/^[xX][0-9a-fA-F]+/) || stream.match(/^0[0-7]+/); 36 | } else { 37 | stream.match(/^[0-9]*\.?[0-9]*([eE][\-+]?[0-9]+)?/); 38 | } 39 | return "number"; 40 | } 41 | if (/[\[\]{}\(\),;\:\.]/.test(ch)) { 42 | curPunc = ch; 43 | return null; 44 | } 45 | if (ch == "/") { 46 | if (stream.eat("*")) { 47 | state.tokenize = tokenComment; 48 | return tokenComment(stream, state); 49 | } 50 | if (stream.eat("/")) { 51 | stream.skipToEnd(); 52 | return "comment"; 53 | } 54 | } 55 | if (isOperatorChar.test(ch)) { 56 | stream.eatWhile(isOperatorChar); 57 | return "operator"; 58 | } 59 | stream.eatWhile(/[\w\$_\xa1-\uffff]/); 60 | var cur = stream.current(); 61 | if (keywords.propertyIsEnumerable(cur)) { 62 | if (cur == "case" || cur == "default") curPunc = "case"; 63 | return "keyword"; 64 | } 65 | if (atoms.propertyIsEnumerable(cur)) return "atom"; 66 | return "variable"; 67 | } 68 | 69 | function tokenString(quote) { 70 | return function(stream, state) { 71 | var escaped = false, next, end = false; 72 | while ((next = stream.next()) != null) { 73 | if (next == quote && !escaped) {end = true; break;} 74 | escaped = !escaped && quote != "`" && next == "\\"; 75 | } 76 | if (end || !(escaped || quote == "`")) 77 | state.tokenize = tokenBase; 78 | return "string"; 79 | }; 80 | } 81 | 82 | function tokenComment(stream, state) { 83 | var maybeEnd = false, ch; 84 | while (ch = stream.next()) { 85 | if (ch == "/" && maybeEnd) { 86 | state.tokenize = tokenBase; 87 | break; 88 | } 89 | maybeEnd = (ch == "*"); 90 | } 91 | return "comment"; 92 | } 93 | 94 | function Context(indented, column, type, align, prev) { 95 | this.indented = indented; 96 | this.column = column; 97 | this.type = type; 98 | this.align = align; 99 | this.prev = prev; 100 | } 101 | function pushContext(state, col, type) { 102 | return state.context = new Context(state.indented, col, type, null, state.context); 103 | } 104 | function popContext(state) { 105 | if (!state.context.prev) return; 106 | var t = state.context.type; 107 | if (t == ")" || t == "]" || t == "}") 108 | state.indented = state.context.indented; 109 | return state.context = state.context.prev; 110 | } 111 | 112 | // Interface 113 | 114 | export const go = { 115 | name: "go", 116 | startState: function(indentUnit) { 117 | return { 118 | tokenize: null, 119 | context: new Context(-indentUnit, 0, "top", false), 120 | indented: 0, 121 | startOfLine: true 122 | }; 123 | }, 124 | 125 | token: function(stream, state) { 126 | var ctx = state.context; 127 | if (stream.sol()) { 128 | if (ctx.align == null) ctx.align = false; 129 | state.indented = stream.indentation(); 130 | state.startOfLine = true; 131 | if (ctx.type == "case") ctx.type = "}"; 132 | } 133 | if (stream.eatSpace()) return null; 134 | curPunc = null; 135 | var style = (state.tokenize || tokenBase)(stream, state); 136 | if (style == "comment") return style; 137 | if (ctx.align == null) ctx.align = true; 138 | 139 | if (curPunc == "{") pushContext(state, stream.column(), "}"); 140 | else if (curPunc == "[") pushContext(state, stream.column(), "]"); 141 | else if (curPunc == "(") pushContext(state, stream.column(), ")"); 142 | else if (curPunc == "case") ctx.type = "case"; 143 | else if (curPunc == "}" && ctx.type == "}") popContext(state); 144 | else if (curPunc == ctx.type) popContext(state); 145 | state.startOfLine = false; 146 | return style; 147 | }, 148 | 149 | indent: function(state, textAfter, cx) { 150 | if (state.tokenize != tokenBase && state.tokenize != null) return null; 151 | var ctx = state.context, firstChar = textAfter && textAfter.charAt(0); 152 | if (ctx.type == "case" && /^(?:case|default)\b/.test(textAfter)) return ctx.indented; 153 | var closing = firstChar == ctx.type; 154 | if (ctx.align) return ctx.column + (closing ? 0 : 1); 155 | else return ctx.indented + (closing ? 0 : cx.unit); 156 | }, 157 | 158 | languageData: { 159 | indentOnInput: /^\s([{}]|case |default\s*:)$/, 160 | commentTokens: {line: "//", block: {open: "/*", close: "*/"}} 161 | } 162 | }; 163 | 164 | -------------------------------------------------------------------------------- /mode/groovy.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const groovy: StreamParser 3 | -------------------------------------------------------------------------------- /mode/haskell.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const haskell: StreamParser 3 | -------------------------------------------------------------------------------- /mode/haxe.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const haxe: StreamParser 3 | export declare const hxml: StreamParser 4 | -------------------------------------------------------------------------------- /mode/http.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const http: StreamParser 3 | -------------------------------------------------------------------------------- /mode/http.js: -------------------------------------------------------------------------------- 1 | function failFirstLine(stream, state) { 2 | stream.skipToEnd(); 3 | state.cur = header; 4 | return "error"; 5 | } 6 | 7 | function start(stream, state) { 8 | if (stream.match(/^HTTP\/\d\.\d/)) { 9 | state.cur = responseStatusCode; 10 | return "keyword"; 11 | } else if (stream.match(/^[A-Z]+/) && /[ \t]/.test(stream.peek())) { 12 | state.cur = requestPath; 13 | return "keyword"; 14 | } else { 15 | return failFirstLine(stream, state); 16 | } 17 | } 18 | 19 | function responseStatusCode(stream, state) { 20 | var code = stream.match(/^\d+/); 21 | if (!code) return failFirstLine(stream, state); 22 | 23 | state.cur = responseStatusText; 24 | var status = Number(code[0]); 25 | if (status >= 100 && status < 400) { 26 | return "atom"; 27 | } else { 28 | return "error"; 29 | } 30 | } 31 | 32 | function responseStatusText(stream, state) { 33 | stream.skipToEnd(); 34 | state.cur = header; 35 | return null; 36 | } 37 | 38 | function requestPath(stream, state) { 39 | stream.eatWhile(/\S/); 40 | state.cur = requestProtocol; 41 | return "string.special"; 42 | } 43 | 44 | function requestProtocol(stream, state) { 45 | if (stream.match(/^HTTP\/\d\.\d$/)) { 46 | state.cur = header; 47 | return "keyword"; 48 | } else { 49 | return failFirstLine(stream, state); 50 | } 51 | } 52 | 53 | function header(stream) { 54 | if (stream.sol() && !stream.eat(/[ \t]/)) { 55 | if (stream.match(/^.*?:/)) { 56 | return "atom"; 57 | } else { 58 | stream.skipToEnd(); 59 | return "error"; 60 | } 61 | } else { 62 | stream.skipToEnd(); 63 | return "string"; 64 | } 65 | } 66 | 67 | function body(stream) { 68 | stream.skipToEnd(); 69 | return null; 70 | } 71 | 72 | export const http = { 73 | name: "http", 74 | token: function(stream, state) { 75 | var cur = state.cur; 76 | if (cur != header && cur != body && stream.eatSpace()) return null; 77 | return cur(stream, state); 78 | }, 79 | 80 | blankLine: function(state) { 81 | state.cur = body; 82 | }, 83 | 84 | startState: function() { 85 | return {cur: start}; 86 | } 87 | }; 88 | -------------------------------------------------------------------------------- /mode/idl.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const idl: StreamParser 3 | -------------------------------------------------------------------------------- /mode/javascript.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const javascript: StreamParser 3 | export declare const json: StreamParser 4 | export declare const jsonld: StreamParser 5 | export declare const typescript: StreamParser 6 | -------------------------------------------------------------------------------- /mode/jinja2.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const jinja2: StreamParser 3 | -------------------------------------------------------------------------------- /mode/jinja2.js: -------------------------------------------------------------------------------- 1 | var keywords = ["and", "as", "block", "endblock", "by", "cycle", "debug", "else", "elif", 2 | "extends", "filter", "endfilter", "firstof", "do", "for", 3 | "endfor", "if", "endif", "ifchanged", "endifchanged", 4 | "ifequal", "endifequal", "ifnotequal", "set", "raw", "endraw", 5 | "endifnotequal", "in", "include", "load", "not", "now", "or", 6 | "parsed", "regroup", "reversed", "spaceless", "call", "endcall", "macro", 7 | "endmacro", "endspaceless", "ssi", "templatetag", "openblock", 8 | "closeblock", "openvariable", "closevariable", "without", "context", 9 | "openbrace", "closebrace", "opencomment", 10 | "closecomment", "widthratio", "url", "with", "endwith", 11 | "get_current_language", "trans", "endtrans", "noop", "blocktrans", 12 | "endblocktrans", "get_available_languages", 13 | "get_current_language_bidi", "pluralize", "autoescape", "endautoescape"], 14 | operator = /^[+\-*&%=<>!?|~^]/, 15 | sign = /^[:\[\(\{]/, 16 | atom = ["true", "false"], 17 | number = /^(\d[+\-\*\/])?\d+(\.\d+)?/; 18 | 19 | keywords = new RegExp("((" + keywords.join(")|(") + "))\\b"); 20 | atom = new RegExp("((" + atom.join(")|(") + "))\\b"); 21 | 22 | function tokenBase (stream, state) { 23 | var ch = stream.peek(); 24 | 25 | //Comment 26 | if (state.incomment) { 27 | if(!stream.skipTo("#}")) { 28 | stream.skipToEnd(); 29 | } else { 30 | stream.eatWhile(/\#|}/); 31 | state.incomment = false; 32 | } 33 | return "comment"; 34 | //Tag 35 | } else if (state.intag) { 36 | //After operator 37 | if(state.operator) { 38 | state.operator = false; 39 | if(stream.match(atom)) { 40 | return "atom"; 41 | } 42 | if(stream.match(number)) { 43 | return "number"; 44 | } 45 | } 46 | //After sign 47 | if(state.sign) { 48 | state.sign = false; 49 | if(stream.match(atom)) { 50 | return "atom"; 51 | } 52 | if(stream.match(number)) { 53 | return "number"; 54 | } 55 | } 56 | 57 | if(state.instring) { 58 | if(ch == state.instring) { 59 | state.instring = false; 60 | } 61 | stream.next(); 62 | return "string"; 63 | } else if(ch == "'" || ch == '"') { 64 | state.instring = ch; 65 | stream.next(); 66 | return "string"; 67 | } else if (state.inbraces > 0 && ch ==")") { 68 | stream.next() 69 | state.inbraces--; 70 | } 71 | else if (ch == "(") { 72 | stream.next() 73 | state.inbraces++; 74 | } 75 | else if (state.inbrackets > 0 && ch =="]") { 76 | stream.next() 77 | state.inbrackets--; 78 | } 79 | else if (ch == "[") { 80 | stream.next() 81 | state.inbrackets++; 82 | } else if (!state.lineTag && (stream.match(state.intag + "}") || stream.eat("-") && stream.match(state.intag + "}"))) { 83 | state.intag = false; 84 | return "tag"; 85 | } else if(stream.match(operator)) { 86 | state.operator = true; 87 | return "operator"; 88 | } else if(stream.match(sign)) { 89 | state.sign = true; 90 | } else { 91 | if (stream.column() == 1 && state.lineTag && stream.match(keywords)) { 92 | //allow nospace after tag before the keyword 93 | return "keyword"; 94 | } 95 | if(stream.eat(" ") || stream.sol()) { 96 | if(stream.match(keywords)) { 97 | return "keyword"; 98 | } 99 | if(stream.match(atom)) { 100 | return "atom"; 101 | } 102 | if(stream.match(number)) { 103 | return "number"; 104 | } 105 | if(stream.sol()) { 106 | stream.next(); 107 | } 108 | } else { 109 | stream.next(); 110 | } 111 | 112 | } 113 | return "variable"; 114 | } else if (stream.eat("{")) { 115 | if (stream.eat("#")) { 116 | state.incomment = true; 117 | if(!stream.skipTo("#}")) { 118 | stream.skipToEnd(); 119 | } else { 120 | stream.eatWhile(/\#|}/); 121 | state.incomment = false; 122 | } 123 | return "comment"; 124 | //Open tag 125 | } else if (ch = stream.eat(/\{|%/)) { 126 | //Cache close tag 127 | state.intag = ch; 128 | state.inbraces = 0; 129 | state.inbrackets = 0; 130 | if(ch == "{") { 131 | state.intag = "}"; 132 | } 133 | stream.eat("-"); 134 | return "tag"; 135 | } 136 | //Line statements 137 | } else if (stream.eat('#')) { 138 | if (stream.peek() == '#') { 139 | stream.skipToEnd(); 140 | return "comment" 141 | } 142 | else if (!stream.eol()) { 143 | state.intag = true; 144 | state.lineTag = true; 145 | state.inbraces = 0; 146 | state.inbrackets = 0; 147 | return "tag"; 148 | } 149 | } 150 | stream.next(); 151 | }; 152 | 153 | export const jinja2 = { 154 | name: "jinja2", 155 | startState: function () { 156 | return {tokenize: tokenBase, inbrackets: 0, inbraces: 0}; 157 | }, 158 | token: function(stream, state) { 159 | var style = state.tokenize(stream, state); 160 | if (stream.eol() && state.lineTag && !state.instring && state.inbraces == 0 && state.inbrackets == 0) { 161 | //Close line statement at the EOL 162 | state.intag = false 163 | state.lineTag = false 164 | } 165 | return style; 166 | }, 167 | languageData: { 168 | commentTokens: {block: {open: "{#", close: "#}", line: "##"}} 169 | } 170 | }; 171 | -------------------------------------------------------------------------------- /mode/julia.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const julia: StreamParser 3 | -------------------------------------------------------------------------------- /mode/livescript.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const liveScript: StreamParser 3 | -------------------------------------------------------------------------------- /mode/lua.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const lua: StreamParser 3 | -------------------------------------------------------------------------------- /mode/lua.js: -------------------------------------------------------------------------------- 1 | function prefixRE(words) { 2 | return new RegExp("^(?:" + words.join("|") + ")", "i"); 3 | } 4 | function wordRE(words) { 5 | return new RegExp("^(?:" + words.join("|") + ")$", "i"); 6 | } 7 | 8 | // long list of standard functions from lua manual 9 | var builtins = wordRE([ 10 | "_G","_VERSION","assert","collectgarbage","dofile","error","getfenv","getmetatable","ipairs","load", 11 | "loadfile","loadstring","module","next","pairs","pcall","print","rawequal","rawget","rawset","require", 12 | "select","setfenv","setmetatable","tonumber","tostring","type","unpack","xpcall", 13 | 14 | "coroutine.create","coroutine.resume","coroutine.running","coroutine.status","coroutine.wrap","coroutine.yield", 15 | 16 | "debug.debug","debug.getfenv","debug.gethook","debug.getinfo","debug.getlocal","debug.getmetatable", 17 | "debug.getregistry","debug.getupvalue","debug.setfenv","debug.sethook","debug.setlocal","debug.setmetatable", 18 | "debug.setupvalue","debug.traceback", 19 | 20 | "close","flush","lines","read","seek","setvbuf","write", 21 | 22 | "io.close","io.flush","io.input","io.lines","io.open","io.output","io.popen","io.read","io.stderr","io.stdin", 23 | "io.stdout","io.tmpfile","io.type","io.write", 24 | 25 | "math.abs","math.acos","math.asin","math.atan","math.atan2","math.ceil","math.cos","math.cosh","math.deg", 26 | "math.exp","math.floor","math.fmod","math.frexp","math.huge","math.ldexp","math.log","math.log10","math.max", 27 | "math.min","math.modf","math.pi","math.pow","math.rad","math.random","math.randomseed","math.sin","math.sinh", 28 | "math.sqrt","math.tan","math.tanh", 29 | 30 | "os.clock","os.date","os.difftime","os.execute","os.exit","os.getenv","os.remove","os.rename","os.setlocale", 31 | "os.time","os.tmpname", 32 | 33 | "package.cpath","package.loaded","package.loaders","package.loadlib","package.path","package.preload", 34 | "package.seeall", 35 | 36 | "string.byte","string.char","string.dump","string.find","string.format","string.gmatch","string.gsub", 37 | "string.len","string.lower","string.match","string.rep","string.reverse","string.sub","string.upper", 38 | 39 | "table.concat","table.insert","table.maxn","table.remove","table.sort" 40 | ]); 41 | var keywords = wordRE(["and","break","elseif","false","nil","not","or","return", 42 | "true","function", "end", "if", "then", "else", "do", 43 | "while", "repeat", "until", "for", "in", "local" ]); 44 | 45 | var indentTokens = wordRE(["function", "if","repeat","do", "\\(", "{"]); 46 | var dedentTokens = wordRE(["end", "until", "\\)", "}"]); 47 | var dedentPartial = prefixRE(["end", "until", "\\)", "}", "else", "elseif"]); 48 | 49 | function readBracket(stream) { 50 | var level = 0; 51 | while (stream.eat("=")) ++level; 52 | stream.eat("["); 53 | return level; 54 | } 55 | 56 | function normal(stream, state) { 57 | var ch = stream.next(); 58 | if (ch == "-" && stream.eat("-")) { 59 | if (stream.eat("[") && stream.eat("[")) 60 | return (state.cur = bracketed(readBracket(stream), "comment"))(stream, state); 61 | stream.skipToEnd(); 62 | return "comment"; 63 | } 64 | if (ch == "\"" || ch == "'") 65 | return (state.cur = string(ch))(stream, state); 66 | if (ch == "[" && /[\[=]/.test(stream.peek())) 67 | return (state.cur = bracketed(readBracket(stream), "string"))(stream, state); 68 | if (/\d/.test(ch)) { 69 | stream.eatWhile(/[\w.%]/); 70 | return "number"; 71 | } 72 | if (/[\w_]/.test(ch)) { 73 | stream.eatWhile(/[\w\\\-_.]/); 74 | return "variable"; 75 | } 76 | return null; 77 | } 78 | 79 | function bracketed(level, style) { 80 | return function(stream, state) { 81 | var curlev = null, ch; 82 | while ((ch = stream.next()) != null) { 83 | if (curlev == null) {if (ch == "]") curlev = 0;} 84 | else if (ch == "=") ++curlev; 85 | else if (ch == "]" && curlev == level) { state.cur = normal; break; } 86 | else curlev = null; 87 | } 88 | return style; 89 | }; 90 | } 91 | 92 | function string(quote) { 93 | return function(stream, state) { 94 | var escaped = false, ch; 95 | while ((ch = stream.next()) != null) { 96 | if (ch == quote && !escaped) break; 97 | escaped = !escaped && ch == "\\"; 98 | } 99 | if (!escaped) state.cur = normal; 100 | return "string"; 101 | }; 102 | } 103 | 104 | export const lua = { 105 | name: "lua", 106 | 107 | startState: function() { 108 | return {basecol: 0, indentDepth: 0, cur: normal}; 109 | }, 110 | 111 | token: function(stream, state) { 112 | if (stream.eatSpace()) return null; 113 | var style = state.cur(stream, state); 114 | var word = stream.current(); 115 | if (style == "variable") { 116 | if (keywords.test(word)) style = "keyword"; 117 | else if (builtins.test(word)) style = "builtin"; 118 | } 119 | if ((style != "comment") && (style != "string")){ 120 | if (indentTokens.test(word)) ++state.indentDepth; 121 | else if (dedentTokens.test(word)) --state.indentDepth; 122 | } 123 | return style; 124 | }, 125 | 126 | indent: function(state, textAfter, cx) { 127 | var closing = dedentPartial.test(textAfter); 128 | return state.basecol + cx.unit * (state.indentDepth - (closing ? 1 : 0)); 129 | }, 130 | 131 | languageData: { 132 | indentOnInput: /^\s*(?:end|until|else|\)|\})$/, 133 | commentTokens: {line: "--", block: {open: "--[[", close: "]]--"}} 134 | } 135 | }; 136 | -------------------------------------------------------------------------------- /mode/mathematica.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const mathematica: StreamParser 3 | -------------------------------------------------------------------------------- /mode/mathematica.js: -------------------------------------------------------------------------------- 1 | // used pattern building blocks 2 | var Identifier = '[a-zA-Z\\$][a-zA-Z0-9\\$]*'; 3 | var pBase = "(?:\\d+)"; 4 | var pFloat = "(?:\\.\\d+|\\d+\\.\\d*|\\d+)"; 5 | var pFloatBase = "(?:\\.\\w+|\\w+\\.\\w*|\\w+)"; 6 | var pPrecision = "(?:`(?:`?"+pFloat+")?)"; 7 | 8 | // regular expressions 9 | var reBaseForm = new RegExp('(?:'+pBase+'(?:\\^\\^'+pFloatBase+pPrecision+'?(?:\\*\\^[+-]?\\d+)?))'); 10 | var reFloatForm = new RegExp('(?:' + pFloat + pPrecision + '?(?:\\*\\^[+-]?\\d+)?)'); 11 | var reIdInContext = new RegExp('(?:`?)(?:' + Identifier + ')(?:`(?:' + Identifier + '))*(?:`?)'); 12 | 13 | function tokenBase(stream, state) { 14 | var ch; 15 | 16 | // get next character 17 | ch = stream.next(); 18 | 19 | // string 20 | if (ch === '"') { 21 | state.tokenize = tokenString; 22 | return state.tokenize(stream, state); 23 | } 24 | 25 | // comment 26 | if (ch === '(') { 27 | if (stream.eat('*')) { 28 | state.commentLevel++; 29 | state.tokenize = tokenComment; 30 | return state.tokenize(stream, state); 31 | } 32 | } 33 | 34 | // go back one character 35 | stream.backUp(1); 36 | 37 | // look for numbers 38 | // Numbers in a baseform 39 | if (stream.match(reBaseForm, true, false)) { 40 | return 'number'; 41 | } 42 | 43 | // Mathematica numbers. Floats (1.2, .2, 1.) can have optionally a precision (`float) or an accuracy definition 44 | // (``float). Note: while 1.2` is possible 1.2`` is not. At the end an exponent (float*^+12) can follow. 45 | if (stream.match(reFloatForm, true, false)) { 46 | return 'number'; 47 | } 48 | 49 | /* In[23] and Out[34] */ 50 | if (stream.match(/(?:In|Out)\[[0-9]*\]/, true, false)) { 51 | return 'atom'; 52 | } 53 | 54 | // usage 55 | if (stream.match(/([a-zA-Z\$][a-zA-Z0-9\$]*(?:`[a-zA-Z0-9\$]+)*::usage)/, true, false)) { 56 | return 'meta'; 57 | } 58 | 59 | // message 60 | if (stream.match(/([a-zA-Z\$][a-zA-Z0-9\$]*(?:`[a-zA-Z0-9\$]+)*::[a-zA-Z\$][a-zA-Z0-9\$]*):?/, true, false)) { 61 | return 'string.special'; 62 | } 63 | 64 | // this makes a look-ahead match for something like variable:{_Integer} 65 | // the match is then forwarded to the mma-patterns tokenizer. 66 | if (stream.match(/([a-zA-Z\$][a-zA-Z0-9\$]*\s*:)(?:(?:[a-zA-Z\$][a-zA-Z0-9\$]*)|(?:[^:=>~@\^\&\*\)\[\]'\?,\|])).*/, true, false)) { 67 | return 'variableName.special'; 68 | } 69 | 70 | // catch variables which are used together with Blank (_), BlankSequence (__) or BlankNullSequence (___) 71 | // Cannot start with a number, but can have numbers at any other position. Examples 72 | // blub__Integer, a1_, b34_Integer32 73 | if (stream.match(/[a-zA-Z\$][a-zA-Z0-9\$]*_+[a-zA-Z\$][a-zA-Z0-9\$]*/, true, false)) { 74 | return 'variableName.special'; 75 | } 76 | if (stream.match(/[a-zA-Z\$][a-zA-Z0-9\$]*_+/, true, false)) { 77 | return 'variableName.special'; 78 | } 79 | if (stream.match(/_+[a-zA-Z\$][a-zA-Z0-9\$]*/, true, false)) { 80 | return 'variableName.special'; 81 | } 82 | 83 | // Named characters in Mathematica, like \[Gamma]. 84 | if (stream.match(/\\\[[a-zA-Z\$][a-zA-Z0-9\$]*\]/, true, false)) { 85 | return 'character'; 86 | } 87 | 88 | // Match all braces separately 89 | if (stream.match(/(?:\[|\]|{|}|\(|\))/, true, false)) { 90 | return 'bracket'; 91 | } 92 | 93 | // Catch Slots (#, ##, #3, ##9 and the V10 named slots #name). I have never seen someone using more than one digit after #, so we match 94 | // only one. 95 | if (stream.match(/(?:#[a-zA-Z\$][a-zA-Z0-9\$]*|#+[0-9]?)/, true, false)) { 96 | return 'variableName.constant'; 97 | } 98 | 99 | // Literals like variables, keywords, functions 100 | if (stream.match(reIdInContext, true, false)) { 101 | return 'keyword'; 102 | } 103 | 104 | // operators. Note that operators like @@ or /; are matched separately for each symbol. 105 | if (stream.match(/(?:\\|\+|\-|\*|\/|,|;|\.|:|@|~|=|>|<|&|\||_|`|'|\^|\?|!|%)/, true, false)) { 106 | return 'operator'; 107 | } 108 | 109 | // everything else is an error 110 | stream.next(); // advance the stream. 111 | return 'error'; 112 | } 113 | 114 | function tokenString(stream, state) { 115 | var next, end = false, escaped = false; 116 | while ((next = stream.next()) != null) { 117 | if (next === '"' && !escaped) { 118 | end = true; 119 | break; 120 | } 121 | escaped = !escaped && next === '\\'; 122 | } 123 | if (end && !escaped) { 124 | state.tokenize = tokenBase; 125 | } 126 | return 'string'; 127 | }; 128 | 129 | function tokenComment(stream, state) { 130 | var prev, next; 131 | while(state.commentLevel > 0 && (next = stream.next()) != null) { 132 | if (prev === '(' && next === '*') state.commentLevel++; 133 | if (prev === '*' && next === ')') state.commentLevel--; 134 | prev = next; 135 | } 136 | if (state.commentLevel <= 0) { 137 | state.tokenize = tokenBase; 138 | } 139 | return 'comment'; 140 | } 141 | 142 | export const mathematica = { 143 | name: "mathematica", 144 | startState: function() {return {tokenize: tokenBase, commentLevel: 0};}, 145 | token: function(stream, state) { 146 | if (stream.eatSpace()) return null; 147 | return state.tokenize(stream, state); 148 | }, 149 | languageData: { 150 | commentTokens: {block: {open: "(*", close: "*)"}} 151 | } 152 | } 153 | 154 | -------------------------------------------------------------------------------- /mode/mbox.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const mbox: StreamParser 3 | -------------------------------------------------------------------------------- /mode/mbox.js: -------------------------------------------------------------------------------- 1 | var rfc2822 = [ 2 | "From", "Sender", "Reply-To", "To", "Cc", "Bcc", "Message-ID", 3 | "In-Reply-To", "References", "Resent-From", "Resent-Sender", "Resent-To", 4 | "Resent-Cc", "Resent-Bcc", "Resent-Message-ID", "Return-Path", "Received" 5 | ]; 6 | var rfc2822NoEmail = [ 7 | "Date", "Subject", "Comments", "Keywords", "Resent-Date" 8 | ]; 9 | 10 | var whitespace = /^[ \t]/; 11 | var separator = /^From /; // See RFC 4155 12 | var rfc2822Header = new RegExp("^(" + rfc2822.join("|") + "): "); 13 | var rfc2822HeaderNoEmail = new RegExp("^(" + rfc2822NoEmail.join("|") + "): "); 14 | var header = /^[^:]+:/; // Optional fields defined in RFC 2822 15 | var email = /^[^ ]+@[^ ]+/; 16 | var untilEmail = /^.*?(?=[^ ]+?@[^ ]+)/; 17 | var bracketedEmail = /^<.*?>/; 18 | var untilBracketedEmail = /^.*?(?=<.*>)/; 19 | 20 | function styleForHeader(header) { 21 | if (header === "Subject") return "header"; 22 | return "string"; 23 | } 24 | 25 | function readToken(stream, state) { 26 | if (stream.sol()) { 27 | // From last line 28 | state.inSeparator = false; 29 | if (state.inHeader && stream.match(whitespace)) { 30 | // Header folding 31 | return null; 32 | } else { 33 | state.inHeader = false; 34 | state.header = null; 35 | } 36 | 37 | if (stream.match(separator)) { 38 | state.inHeaders = true; 39 | state.inSeparator = true; 40 | return "atom"; 41 | } 42 | 43 | var match; 44 | var emailPermitted = false; 45 | if ((match = stream.match(rfc2822HeaderNoEmail)) || 46 | (emailPermitted = true) && (match = stream.match(rfc2822Header))) { 47 | state.inHeaders = true; 48 | state.inHeader = true; 49 | state.emailPermitted = emailPermitted; 50 | state.header = match[1]; 51 | return "atom"; 52 | } 53 | 54 | // Use vim's heuristics: recognize custom headers only if the line is in a 55 | // block of legitimate headers. 56 | if (state.inHeaders && (match = stream.match(header))) { 57 | state.inHeader = true; 58 | state.emailPermitted = true; 59 | state.header = match[1]; 60 | return "atom"; 61 | } 62 | 63 | state.inHeaders = false; 64 | stream.skipToEnd(); 65 | return null; 66 | } 67 | 68 | if (state.inSeparator) { 69 | if (stream.match(email)) return "link"; 70 | if (stream.match(untilEmail)) return "atom"; 71 | stream.skipToEnd(); 72 | return "atom"; 73 | } 74 | 75 | if (state.inHeader) { 76 | var style = styleForHeader(state.header); 77 | 78 | if (state.emailPermitted) { 79 | if (stream.match(bracketedEmail)) return style + " link"; 80 | if (stream.match(untilBracketedEmail)) return style; 81 | } 82 | stream.skipToEnd(); 83 | return style; 84 | } 85 | 86 | stream.skipToEnd(); 87 | return null; 88 | }; 89 | 90 | export const mbox = { 91 | name: "mbox", 92 | startState: function() { 93 | return { 94 | // Is in a mbox separator 95 | inSeparator: false, 96 | // Is in a mail header 97 | inHeader: false, 98 | // If bracketed email is permitted. Only applicable when inHeader 99 | emailPermitted: false, 100 | // Name of current header 101 | header: null, 102 | // Is in a region of mail headers 103 | inHeaders: false 104 | }; 105 | }, 106 | token: readToken, 107 | blankLine: function(state) { 108 | state.inHeaders = state.inSeparator = state.inHeader = false; 109 | }, 110 | languageData: { 111 | autocomplete: rfc2822.concat(rfc2822NoEmail) 112 | } 113 | } 114 | 115 | -------------------------------------------------------------------------------- /mode/mirc.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const mirc: StreamParser 3 | -------------------------------------------------------------------------------- /mode/mllike.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const oCaml: StreamParser 3 | export declare const fSharp: StreamParser 4 | export declare const sml: StreamParser 5 | -------------------------------------------------------------------------------- /mode/modelica.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const modelica: StreamParser 3 | -------------------------------------------------------------------------------- /mode/modelica.js: -------------------------------------------------------------------------------- 1 | function words(str) { 2 | var obj = {}, words = str.split(" "); 3 | for (var i=0; i+\-\/^\[\]]/; 15 | var isDoubleOperatorChar = /(:=|<=|>=|==|<>|\.\+|\.\-|\.\*|\.\/|\.\^)/; 16 | var isDigit = /[0-9]/; 17 | var isNonDigit = /[_a-zA-Z]/; 18 | 19 | function tokenLineComment(stream, state) { 20 | stream.skipToEnd(); 21 | state.tokenize = null; 22 | return "comment"; 23 | } 24 | 25 | function tokenBlockComment(stream, state) { 26 | var maybeEnd = false, ch; 27 | while (ch = stream.next()) { 28 | if (maybeEnd && ch == "/") { 29 | state.tokenize = null; 30 | break; 31 | } 32 | maybeEnd = (ch == "*"); 33 | } 34 | return "comment"; 35 | } 36 | 37 | function tokenString(stream, state) { 38 | var escaped = false, ch; 39 | while ((ch = stream.next()) != null) { 40 | if (ch == '"' && !escaped) { 41 | state.tokenize = null; 42 | state.sol = false; 43 | break; 44 | } 45 | escaped = !escaped && ch == "\\"; 46 | } 47 | 48 | return "string"; 49 | } 50 | 51 | function tokenIdent(stream, state) { 52 | stream.eatWhile(isDigit); 53 | while (stream.eat(isDigit) || stream.eat(isNonDigit)) { } 54 | 55 | 56 | var cur = stream.current(); 57 | 58 | if(state.sol && (cur == "package" || cur == "model" || cur == "when" || cur == "connector")) state.level++; 59 | else if(state.sol && cur == "end" && state.level > 0) state.level--; 60 | 61 | state.tokenize = null; 62 | state.sol = false; 63 | 64 | if (keywords.propertyIsEnumerable(cur)) return "keyword"; 65 | else if (builtin.propertyIsEnumerable(cur)) return "builtin"; 66 | else if (atoms.propertyIsEnumerable(cur)) return "atom"; 67 | else return "variable"; 68 | } 69 | 70 | function tokenQIdent(stream, state) { 71 | while (stream.eat(/[^']/)) { } 72 | 73 | state.tokenize = null; 74 | state.sol = false; 75 | 76 | if(stream.eat("'")) 77 | return "variable"; 78 | else 79 | return "error"; 80 | } 81 | 82 | function tokenUnsignedNumber(stream, state) { 83 | stream.eatWhile(isDigit); 84 | if (stream.eat('.')) { 85 | stream.eatWhile(isDigit); 86 | } 87 | if (stream.eat('e') || stream.eat('E')) { 88 | if (!stream.eat('-')) 89 | stream.eat('+'); 90 | stream.eatWhile(isDigit); 91 | } 92 | 93 | state.tokenize = null; 94 | state.sol = false; 95 | return "number"; 96 | } 97 | 98 | // Interface 99 | export const modelica = { 100 | name: "modelica", 101 | startState: function() { 102 | return { 103 | tokenize: null, 104 | level: 0, 105 | sol: true 106 | }; 107 | }, 108 | 109 | token: function(stream, state) { 110 | if(state.tokenize != null) { 111 | return state.tokenize(stream, state); 112 | } 113 | 114 | if(stream.sol()) { 115 | state.sol = true; 116 | } 117 | 118 | // WHITESPACE 119 | if(stream.eatSpace()) { 120 | state.tokenize = null; 121 | return null; 122 | } 123 | 124 | var ch = stream.next(); 125 | 126 | // LINECOMMENT 127 | if(ch == '/' && stream.eat('/')) { 128 | state.tokenize = tokenLineComment; 129 | } 130 | // BLOCKCOMMENT 131 | else if(ch == '/' && stream.eat('*')) { 132 | state.tokenize = tokenBlockComment; 133 | } 134 | // TWO SYMBOL TOKENS 135 | else if(isDoubleOperatorChar.test(ch+stream.peek())) { 136 | stream.next(); 137 | state.tokenize = null; 138 | return "operator"; 139 | } 140 | // SINGLE SYMBOL TOKENS 141 | else if(isSingleOperatorChar.test(ch)) { 142 | state.tokenize = null; 143 | return "operator"; 144 | } 145 | // IDENT 146 | else if(isNonDigit.test(ch)) { 147 | state.tokenize = tokenIdent; 148 | } 149 | // Q-IDENT 150 | else if(ch == "'" && stream.peek() && stream.peek() != "'") { 151 | state.tokenize = tokenQIdent; 152 | } 153 | // STRING 154 | else if(ch == '"') { 155 | state.tokenize = tokenString; 156 | } 157 | // UNSIGNED_NUMBER 158 | else if(isDigit.test(ch)) { 159 | state.tokenize = tokenUnsignedNumber; 160 | } 161 | // ERROR 162 | else { 163 | state.tokenize = null; 164 | return "error"; 165 | } 166 | 167 | return state.tokenize(stream, state); 168 | }, 169 | 170 | indent: function(state, textAfter, cx) { 171 | if (state.tokenize != null) return null; 172 | 173 | var level = state.level; 174 | if(/(algorithm)/.test(textAfter)) level--; 175 | if(/(equation)/.test(textAfter)) level--; 176 | if(/(initial algorithm)/.test(textAfter)) level--; 177 | if(/(initial equation)/.test(textAfter)) level--; 178 | if(/(end)/.test(textAfter)) level--; 179 | 180 | if(level > 0) 181 | return cx.unit*level; 182 | else 183 | return 0; 184 | }, 185 | 186 | languageData: { 187 | commentTokens: {line: "//", block: {open: "/*", close: "*/"}}, 188 | autocomplete: completions 189 | } 190 | }; 191 | -------------------------------------------------------------------------------- /mode/mscgen.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const mscgen: StreamParser 3 | export declare const msgenny: StreamParser 4 | export declare const xu: StreamParser 5 | -------------------------------------------------------------------------------- /mode/mumps.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const mumps: StreamParser 3 | -------------------------------------------------------------------------------- /mode/mumps.js: -------------------------------------------------------------------------------- 1 | function wordRegexp(words) { 2 | return new RegExp("^((" + words.join(")|(") + "))\\b", "i"); 3 | } 4 | 5 | var singleOperators = new RegExp("^[\\+\\-\\*/&#!_?\\\\<>=\\'\\[\\]]"); 6 | var doubleOperators = new RegExp("^(('=)|(<=)|(>=)|('>)|('<)|([[)|(]])|(^$))"); 7 | var singleDelimiters = new RegExp("^[\\.,:]"); 8 | var brackets = new RegExp("[()]"); 9 | var identifiers = new RegExp("^[%A-Za-z][A-Za-z0-9]*"); 10 | var commandKeywords = ["break","close","do","else","for","goto", "halt", "hang", "if", "job","kill","lock","merge","new","open", "quit", "read", "set", "tcommit", "trollback", "tstart", "use", "view", "write", "xecute", "b","c","d","e","f","g", "h", "i", "j","k","l","m","n","o", "q", "r", "s", "tc", "tro", "ts", "u", "v", "w", "x"]; 11 | // The following list includes intrinsic functions _and_ special variables 12 | var intrinsicFuncsWords = ["\\$ascii", "\\$char", "\\$data", "\\$ecode", "\\$estack", "\\$etrap", "\\$extract", "\\$find", "\\$fnumber", "\\$get", "\\$horolog", "\\$io", "\\$increment", "\\$job", "\\$justify", "\\$length", "\\$name", "\\$next", "\\$order", "\\$piece", "\\$qlength", "\\$qsubscript", "\\$query", "\\$quit", "\\$random", "\\$reverse", "\\$select", "\\$stack", "\\$test", "\\$text", "\\$translate", "\\$view", "\\$x", "\\$y", "\\$a", "\\$c", "\\$d", "\\$e", "\\$ec", "\\$es", "\\$et", "\\$f", "\\$fn", "\\$g", "\\$h", "\\$i", "\\$j", "\\$l", "\\$n", "\\$na", "\\$o", "\\$p", "\\$q", "\\$ql", "\\$qs", "\\$r", "\\$re", "\\$s", "\\$st", "\\$t", "\\$tr", "\\$v", "\\$z"]; 13 | var intrinsicFuncs = wordRegexp(intrinsicFuncsWords); 14 | var command = wordRegexp(commandKeywords); 15 | 16 | function tokenBase(stream, state) { 17 | if (stream.sol()) { 18 | state.label = true; 19 | state.commandMode = 0; 20 | } 21 | 22 | // The character has meaning in MUMPS. Ignoring consecutive 23 | // spaces would interfere with interpreting whether the next non-space 24 | // character belongs to the command or argument context. 25 | 26 | // Examine each character and update a mode variable whose interpretation is: 27 | // >0 => command 0 => argument <0 => command post-conditional 28 | var ch = stream.peek(); 29 | 30 | if (ch == " " || ch == "\t") { // Pre-process 31 | state.label = false; 32 | if (state.commandMode == 0) 33 | state.commandMode = 1; 34 | else if ((state.commandMode < 0) || (state.commandMode == 2)) 35 | state.commandMode = 0; 36 | } else if ((ch != ".") && (state.commandMode > 0)) { 37 | if (ch == ":") 38 | state.commandMode = -1; // SIS - Command post-conditional 39 | else 40 | state.commandMode = 2; 41 | } 42 | 43 | // Do not color parameter list as line tag 44 | if ((ch === "(") || (ch === "\u0009")) 45 | state.label = false; 46 | 47 | // MUMPS comment starts with ";" 48 | if (ch === ";") { 49 | stream.skipToEnd(); 50 | return "comment"; 51 | } 52 | 53 | // Number Literals // SIS/RLM - MUMPS permits canonic number followed by concatenate operator 54 | if (stream.match(/^[-+]?\d+(\.\d+)?([eE][-+]?\d+)?/)) 55 | return "number"; 56 | 57 | // Handle Strings 58 | if (ch == '"') { 59 | if (stream.skipTo('"')) { 60 | stream.next(); 61 | return "string"; 62 | } else { 63 | stream.skipToEnd(); 64 | return "error"; 65 | } 66 | } 67 | 68 | // Handle operators and Delimiters 69 | if (stream.match(doubleOperators) || stream.match(singleOperators)) 70 | return "operator"; 71 | 72 | // Prevents leading "." in DO block from falling through to error 73 | if (stream.match(singleDelimiters)) 74 | return null; 75 | 76 | if (brackets.test(ch)) { 77 | stream.next(); 78 | return "bracket"; 79 | } 80 | 81 | if (state.commandMode > 0 && stream.match(command)) 82 | return "controlKeyword"; 83 | 84 | if (stream.match(intrinsicFuncs)) 85 | return "builtin"; 86 | 87 | if (stream.match(identifiers)) 88 | return "variable"; 89 | 90 | // Detect dollar-sign when not a documented intrinsic function 91 | // "^" may introduce a GVN or SSVN - Color same as function 92 | if (ch === "$" || ch === "^") { 93 | stream.next(); 94 | return "builtin"; 95 | } 96 | 97 | // MUMPS Indirection 98 | if (ch === "@") { 99 | stream.next(); 100 | return "string.special"; 101 | } 102 | 103 | if (/[\w%]/.test(ch)) { 104 | stream.eatWhile(/[\w%]/); 105 | return "variable"; 106 | } 107 | 108 | // Handle non-detected items 109 | stream.next(); 110 | return "error"; 111 | } 112 | 113 | export const mumps = { 114 | name: "mumps", 115 | startState: function() { 116 | return { 117 | label: false, 118 | commandMode: 0 119 | }; 120 | }, 121 | 122 | token: function(stream, state) { 123 | var style = tokenBase(stream, state); 124 | if (state.label) return "tag"; 125 | return style; 126 | } 127 | }; 128 | -------------------------------------------------------------------------------- /mode/nginx.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const nginx: StreamParser 3 | -------------------------------------------------------------------------------- /mode/nsis.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const nsis: StreamParser 3 | -------------------------------------------------------------------------------- /mode/ntriples.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const ntriples: StreamParser 3 | -------------------------------------------------------------------------------- /mode/ntriples.js: -------------------------------------------------------------------------------- 1 | var Location = { 2 | PRE_SUBJECT : 0, 3 | WRITING_SUB_URI : 1, 4 | WRITING_BNODE_URI : 2, 5 | PRE_PRED : 3, 6 | WRITING_PRED_URI : 4, 7 | PRE_OBJ : 5, 8 | WRITING_OBJ_URI : 6, 9 | WRITING_OBJ_BNODE : 7, 10 | WRITING_OBJ_LITERAL : 8, 11 | WRITING_LIT_LANG : 9, 12 | WRITING_LIT_TYPE : 10, 13 | POST_OBJ : 11, 14 | ERROR : 12 15 | }; 16 | function transitState(currState, c) { 17 | var currLocation = currState.location; 18 | var ret; 19 | 20 | // Opening. 21 | if (currLocation == Location.PRE_SUBJECT && c == '<') ret = Location.WRITING_SUB_URI; 22 | else if(currLocation == Location.PRE_SUBJECT && c == '_') ret = Location.WRITING_BNODE_URI; 23 | else if(currLocation == Location.PRE_PRED && c == '<') ret = Location.WRITING_PRED_URI; 24 | else if(currLocation == Location.PRE_OBJ && c == '<') ret = Location.WRITING_OBJ_URI; 25 | else if(currLocation == Location.PRE_OBJ && c == '_') ret = Location.WRITING_OBJ_BNODE; 26 | else if(currLocation == Location.PRE_OBJ && c == '"') ret = Location.WRITING_OBJ_LITERAL; 27 | 28 | // Closing. 29 | else if(currLocation == Location.WRITING_SUB_URI && c == '>') ret = Location.PRE_PRED; 30 | else if(currLocation == Location.WRITING_BNODE_URI && c == ' ') ret = Location.PRE_PRED; 31 | else if(currLocation == Location.WRITING_PRED_URI && c == '>') ret = Location.PRE_OBJ; 32 | else if(currLocation == Location.WRITING_OBJ_URI && c == '>') ret = Location.POST_OBJ; 33 | else if(currLocation == Location.WRITING_OBJ_BNODE && c == ' ') ret = Location.POST_OBJ; 34 | else if(currLocation == Location.WRITING_OBJ_LITERAL && c == '"') ret = Location.POST_OBJ; 35 | else if(currLocation == Location.WRITING_LIT_LANG && c == ' ') ret = Location.POST_OBJ; 36 | else if(currLocation == Location.WRITING_LIT_TYPE && c == '>') ret = Location.POST_OBJ; 37 | 38 | // Closing typed and language literal. 39 | else if(currLocation == Location.WRITING_OBJ_LITERAL && c == '@') ret = Location.WRITING_LIT_LANG; 40 | else if(currLocation == Location.WRITING_OBJ_LITERAL && c == '^') ret = Location.WRITING_LIT_TYPE; 41 | 42 | // Spaces. 43 | else if( c == ' ' && 44 | ( 45 | currLocation == Location.PRE_SUBJECT || 46 | currLocation == Location.PRE_PRED || 47 | currLocation == Location.PRE_OBJ || 48 | currLocation == Location.POST_OBJ 49 | ) 50 | ) ret = currLocation; 51 | 52 | // Reset. 53 | else if(currLocation == Location.POST_OBJ && c == '.') ret = Location.PRE_SUBJECT; 54 | 55 | // Error 56 | else ret = Location.ERROR; 57 | 58 | currState.location=ret; 59 | } 60 | 61 | export const ntriples = { 62 | name: "ntriples", 63 | startState: function() { 64 | return { 65 | location : Location.PRE_SUBJECT, 66 | uris : [], 67 | anchors : [], 68 | bnodes : [], 69 | langs : [], 70 | types : [] 71 | }; 72 | }, 73 | token: function(stream, state) { 74 | var ch = stream.next(); 75 | if(ch == '<') { 76 | transitState(state, ch); 77 | var parsedURI = ''; 78 | stream.eatWhile( function(c) { if( c != '#' && c != '>' ) { parsedURI += c; return true; } return false;} ); 79 | state.uris.push(parsedURI); 80 | if( stream.match('#', false) ) return 'variable'; 81 | stream.next(); 82 | transitState(state, '>'); 83 | return 'variable'; 84 | } 85 | if(ch == '#') { 86 | var parsedAnchor = ''; 87 | stream.eatWhile(function(c) { if(c != '>' && c != ' ') { parsedAnchor+= c; return true; } return false;}); 88 | state.anchors.push(parsedAnchor); 89 | return 'url'; 90 | } 91 | if(ch == '>') { 92 | transitState(state, '>'); 93 | return 'variable'; 94 | } 95 | if(ch == '_') { 96 | transitState(state, ch); 97 | var parsedBNode = ''; 98 | stream.eatWhile(function(c) { if( c != ' ' ) { parsedBNode += c; return true; } return false;}); 99 | state.bnodes.push(parsedBNode); 100 | stream.next(); 101 | transitState(state, ' '); 102 | return 'builtin'; 103 | } 104 | if(ch == '"') { 105 | transitState(state, ch); 106 | stream.eatWhile( function(c) { return c != '"'; } ); 107 | stream.next(); 108 | if( stream.peek() != '@' && stream.peek() != '^' ) { 109 | transitState(state, '"'); 110 | } 111 | return 'string'; 112 | } 113 | if( ch == '@' ) { 114 | transitState(state, '@'); 115 | var parsedLang = ''; 116 | stream.eatWhile(function(c) { if( c != ' ' ) { parsedLang += c; return true; } return false;}); 117 | state.langs.push(parsedLang); 118 | stream.next(); 119 | transitState(state, ' '); 120 | return 'string.special'; 121 | } 122 | if( ch == '^' ) { 123 | stream.next(); 124 | transitState(state, '^'); 125 | var parsedType = ''; 126 | stream.eatWhile(function(c) { if( c != '>' ) { parsedType += c; return true; } return false;} ); 127 | state.types.push(parsedType); 128 | stream.next(); 129 | transitState(state, '>'); 130 | return 'variable'; 131 | } 132 | if( ch == ' ' ) { 133 | transitState(state, ch); 134 | } 135 | if( ch == '.' ) { 136 | transitState(state, ch); 137 | } 138 | } 139 | }; 140 | -------------------------------------------------------------------------------- /mode/octave.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const octave: StreamParser 3 | -------------------------------------------------------------------------------- /mode/octave.js: -------------------------------------------------------------------------------- 1 | function wordRegexp(words) { 2 | return new RegExp("^((" + words.join(")|(") + "))\\b"); 3 | } 4 | 5 | var singleOperators = new RegExp("^[\\+\\-\\*/&|\\^~<>!@'\\\\]"); 6 | var singleDelimiters = new RegExp('^[\\(\\[\\{\\},:=;\\.]'); 7 | var doubleOperators = new RegExp("^((==)|(~=)|(<=)|(>=)|(<<)|(>>)|(\\.[\\+\\-\\*/\\^\\\\]))"); 8 | var doubleDelimiters = new RegExp("^((!=)|(\\+=)|(\\-=)|(\\*=)|(/=)|(&=)|(\\|=)|(\\^=))"); 9 | var tripleDelimiters = new RegExp("^((>>=)|(<<=))"); 10 | var expressionEnd = new RegExp("^[\\]\\)]"); 11 | var identifiers = new RegExp("^[_A-Za-z\xa1-\uffff][_A-Za-z0-9\xa1-\uffff]*"); 12 | 13 | var builtins = wordRegexp([ 14 | 'error', 'eval', 'function', 'abs', 'acos', 'atan', 'asin', 'cos', 15 | 'cosh', 'exp', 'log', 'prod', 'sum', 'log10', 'max', 'min', 'sign', 'sin', 'sinh', 16 | 'sqrt', 'tan', 'reshape', 'break', 'zeros', 'default', 'margin', 'round', 'ones', 17 | 'rand', 'syn', 'ceil', 'floor', 'size', 'clear', 'zeros', 'eye', 'mean', 'std', 'cov', 18 | 'det', 'eig', 'inv', 'norm', 'rank', 'trace', 'expm', 'logm', 'sqrtm', 'linspace', 'plot', 19 | 'title', 'xlabel', 'ylabel', 'legend', 'text', 'grid', 'meshgrid', 'mesh', 'num2str', 20 | 'fft', 'ifft', 'arrayfun', 'cellfun', 'input', 'fliplr', 'flipud', 'ismember' 21 | ]); 22 | 23 | var keywords = wordRegexp([ 24 | 'return', 'case', 'switch', 'else', 'elseif', 'end', 'endif', 'endfunction', 25 | 'if', 'otherwise', 'do', 'for', 'while', 'try', 'catch', 'classdef', 'properties', 'events', 26 | 'methods', 'global', 'persistent', 'endfor', 'endwhile', 'printf', 'sprintf', 'disp', 'until', 27 | 'continue', 'pkg' 28 | ]); 29 | 30 | 31 | // tokenizers 32 | function tokenTranspose(stream, state) { 33 | if (!stream.sol() && stream.peek() === '\'') { 34 | stream.next(); 35 | state.tokenize = tokenBase; 36 | return 'operator'; 37 | } 38 | state.tokenize = tokenBase; 39 | return tokenBase(stream, state); 40 | } 41 | 42 | 43 | function tokenComment(stream, state) { 44 | if (stream.match(/^.*%}/)) { 45 | state.tokenize = tokenBase; 46 | return 'comment'; 47 | }; 48 | stream.skipToEnd(); 49 | return 'comment'; 50 | } 51 | 52 | function tokenBase(stream, state) { 53 | // whitespaces 54 | if (stream.eatSpace()) return null; 55 | 56 | // Handle one line Comments 57 | if (stream.match('%{')){ 58 | state.tokenize = tokenComment; 59 | stream.skipToEnd(); 60 | return 'comment'; 61 | } 62 | 63 | if (stream.match(/^[%#]/)){ 64 | stream.skipToEnd(); 65 | return 'comment'; 66 | } 67 | 68 | // Handle Number Literals 69 | if (stream.match(/^[0-9\.+-]/, false)) { 70 | if (stream.match(/^[+-]?0x[0-9a-fA-F]+[ij]?/)) { 71 | stream.tokenize = tokenBase; 72 | return 'number'; }; 73 | if (stream.match(/^[+-]?\d*\.\d+([EeDd][+-]?\d+)?[ij]?/)) { return 'number'; }; 74 | if (stream.match(/^[+-]?\d+([EeDd][+-]?\d+)?[ij]?/)) { return 'number'; }; 75 | } 76 | if (stream.match(wordRegexp(['nan','NaN','inf','Inf']))) { return 'number'; }; 77 | 78 | // Handle Strings 79 | var m = stream.match(/^"(?:[^"]|"")*("|$)/) || stream.match(/^'(?:[^']|'')*('|$)/) 80 | if (m) { return m[1] ? 'string' : "error"; } 81 | 82 | // Handle words 83 | if (stream.match(keywords)) { return 'keyword'; } ; 84 | if (stream.match(builtins)) { return 'builtin'; } ; 85 | if (stream.match(identifiers)) { return 'variable'; } ; 86 | 87 | if (stream.match(singleOperators) || stream.match(doubleOperators)) { return 'operator'; }; 88 | if (stream.match(singleDelimiters) || stream.match(doubleDelimiters) || stream.match(tripleDelimiters)) { return null; }; 89 | 90 | if (stream.match(expressionEnd)) { 91 | state.tokenize = tokenTranspose; 92 | return null; 93 | }; 94 | 95 | 96 | // Handle non-detected items 97 | stream.next(); 98 | return 'error'; 99 | }; 100 | 101 | 102 | export const octave = { 103 | name: "octave", 104 | 105 | startState: function() { 106 | return { 107 | tokenize: tokenBase 108 | }; 109 | }, 110 | 111 | token: function(stream, state) { 112 | var style = state.tokenize(stream, state); 113 | if (style === 'number' || style === 'variable'){ 114 | state.tokenize = tokenTranspose; 115 | } 116 | return style; 117 | }, 118 | 119 | languageData: { 120 | commentTokens: {line: "%"} 121 | } 122 | }; 123 | 124 | -------------------------------------------------------------------------------- /mode/oz.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const oz: StreamParser 3 | -------------------------------------------------------------------------------- /mode/pascal.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const pascal: StreamParser 3 | -------------------------------------------------------------------------------- /mode/pascal.js: -------------------------------------------------------------------------------- 1 | function words(str) { 2 | var obj = {}, words = str.split(" "); 3 | for (var i = 0; i < words.length; ++i) obj[words[i]] = true; 4 | return obj; 5 | } 6 | var keywords = words( 7 | "absolute and array asm begin case const constructor destructor div do " + 8 | "downto else end file for function goto if implementation in inherited " + 9 | "inline interface label mod nil not object of operator or packed procedure " + 10 | "program record reintroduce repeat self set shl shr string then to type " + 11 | "unit until uses var while with xor as class dispinterface except exports " + 12 | "finalization finally initialization inline is library on out packed " + 13 | "property raise resourcestring threadvar try absolute abstract alias " + 14 | "assembler bitpacked break cdecl continue cppdecl cvar default deprecated " + 15 | "dynamic enumerator experimental export external far far16 forward generic " + 16 | "helper implements index interrupt iocheck local message name near " + 17 | "nodefault noreturn nostackframe oldfpccall otherwise overload override " + 18 | "pascal platform private protected public published read register " + 19 | "reintroduce result safecall saveregisters softfloat specialize static " + 20 | "stdcall stored strict unaligned unimplemented varargs virtual write"); 21 | var atoms = {"null": true}; 22 | 23 | var isOperatorChar = /[+\-*&%=<>!?|\/]/; 24 | 25 | function tokenBase(stream, state) { 26 | var ch = stream.next(); 27 | if (ch == "#" && state.startOfLine) { 28 | stream.skipToEnd(); 29 | return "meta"; 30 | } 31 | if (ch == '"' || ch == "'") { 32 | state.tokenize = tokenString(ch); 33 | return state.tokenize(stream, state); 34 | } 35 | if (ch == "(" && stream.eat("*")) { 36 | state.tokenize = tokenComment; 37 | return tokenComment(stream, state); 38 | } 39 | if (ch == "{") { 40 | state.tokenize = tokenCommentBraces; 41 | return tokenCommentBraces(stream, state); 42 | } 43 | if (/[\[\]\(\),;\:\.]/.test(ch)) { 44 | return null; 45 | } 46 | if (/\d/.test(ch)) { 47 | stream.eatWhile(/[\w\.]/); 48 | return "number"; 49 | } 50 | if (ch == "/") { 51 | if (stream.eat("/")) { 52 | stream.skipToEnd(); 53 | return "comment"; 54 | } 55 | } 56 | if (isOperatorChar.test(ch)) { 57 | stream.eatWhile(isOperatorChar); 58 | return "operator"; 59 | } 60 | stream.eatWhile(/[\w\$_]/); 61 | var cur = stream.current().toLowerCase(); 62 | if (keywords.propertyIsEnumerable(cur)) return "keyword"; 63 | if (atoms.propertyIsEnumerable(cur)) return "atom"; 64 | return "variable"; 65 | } 66 | 67 | function tokenString(quote) { 68 | return function(stream, state) { 69 | var escaped = false, next, end = false; 70 | while ((next = stream.next()) != null) { 71 | if (next == quote && !escaped) {end = true; break;} 72 | escaped = !escaped && next == "\\"; 73 | } 74 | if (end || !escaped) state.tokenize = null; 75 | return "string"; 76 | }; 77 | } 78 | 79 | function tokenComment(stream, state) { 80 | var maybeEnd = false, ch; 81 | while (ch = stream.next()) { 82 | if (ch == ")" && maybeEnd) { 83 | state.tokenize = null; 84 | break; 85 | } 86 | maybeEnd = (ch == "*"); 87 | } 88 | return "comment"; 89 | } 90 | 91 | function tokenCommentBraces(stream, state) { 92 | var ch; 93 | while (ch = stream.next()) { 94 | if (ch == "}") { 95 | state.tokenize = null; 96 | break; 97 | } 98 | } 99 | return "comment"; 100 | } 101 | 102 | // Interface 103 | 104 | export const pascal = { 105 | name: "pascal", 106 | 107 | startState: function() { 108 | return {tokenize: null}; 109 | }, 110 | 111 | token: function(stream, state) { 112 | if (stream.eatSpace()) return null; 113 | var style = (state.tokenize || tokenBase)(stream, state); 114 | if (style == "comment" || style == "meta") return style; 115 | return style; 116 | }, 117 | 118 | languageData: { 119 | indentOnInput: /^\s*[{}]$/, 120 | commentTokens: {block: {open: "(*", close: "*)"}} 121 | } 122 | }; 123 | -------------------------------------------------------------------------------- /mode/pegjs.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const pegjs: StreamParser 3 | -------------------------------------------------------------------------------- /mode/pegjs.js: -------------------------------------------------------------------------------- 1 | import {javascript} from "./javascript.js" 2 | 3 | function identifier(stream) { 4 | return stream.match(/^[a-zA-Z_][a-zA-Z0-9_]*/) 5 | } 6 | 7 | export const pegjs = { 8 | name: "pegjs", 9 | startState: function () { 10 | return { 11 | inString: false, 12 | stringType: null, 13 | inComment: false, 14 | inCharacterClass: false, 15 | braced: 0, 16 | lhs: true, 17 | localState: null 18 | } 19 | }, 20 | token: function (stream, state) { 21 | if (!state.inString && 22 | !state.inComment && 23 | (stream.peek() === '"' || stream.peek() === "'")) { 24 | //check for state changes 25 | state.stringType = stream.peek() 26 | stream.next() // Skip quote 27 | state.inString = true // Update state 28 | } 29 | if (!state.inString && !state.inComment && stream.match("/*")) 30 | state.inComment = true 31 | 32 | if (state.inString) { 33 | while (state.inString && !stream.eol()) { 34 | if (stream.peek() === state.stringType) { 35 | stream.next() // Skip quote 36 | state.inString = false // Clear flag 37 | } else if (stream.peek() === "\\") { 38 | stream.next() 39 | stream.next() 40 | } else { 41 | stream.match(/^.[^\\"']*/) 42 | } 43 | } 44 | return state.lhs ? "property string" : "string" // Token style 45 | } else if (state.inComment) { 46 | while (state.inComment && !stream.eol()) { 47 | if (stream.match("*/")) state.inComment = false // Clear flag 48 | else stream.match(/^.[^*]*/) 49 | } 50 | return "comment" 51 | } else if (state.inCharacterClass) { 52 | while (state.inCharacterClass && !stream.eol()) { 53 | if (!(stream.match(/^[^\]\\]+/) || stream.match(/^\\./))) 54 | state.inCharacterClass = false 55 | } 56 | } else if (stream.peek() === "[") { 57 | stream.next() 58 | state.inCharacterClass = true 59 | return "bracket" 60 | } else if (stream.match("//")) { 61 | stream.skipToEnd() 62 | return "comment" 63 | } else if (state.braced || stream.peek() === "{") { 64 | if (state.localState === null) state.localState = javascript.startState() 65 | var token = javascript.token(stream, state.localState) 66 | var text = stream.current() 67 | if (!token) { 68 | for (var i = 0; i < text.length; i++) { 69 | if (text[i] === "{") state.braced++ 70 | else if (text[i] === "}") state.braced-- 71 | } 72 | } 73 | return token 74 | } else if (identifier(stream)) { 75 | if (stream.peek() === ":") return "variable" 76 | return "variable-2" 77 | } else if (["[", "]", "(", ")"].indexOf(stream.peek() || "") !== -1) { 78 | stream.next() 79 | return "bracket" 80 | } else if (!stream.eatSpace()) { 81 | stream.next() 82 | } 83 | return null 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /mode/perl.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const perl: StreamParser 3 | -------------------------------------------------------------------------------- /mode/pig.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const pig: StreamParser 3 | -------------------------------------------------------------------------------- /mode/pig.js: -------------------------------------------------------------------------------- 1 | function words(str) { 2 | var obj = {}, words = str.split(" "); 3 | for (var i = 0; i < words.length; ++i) obj[words[i]] = true; 4 | return obj; 5 | } 6 | 7 | // builtin funcs taken from trunk revision 1303237 8 | var pBuiltins = "ABS ACOS ARITY ASIN ATAN AVG BAGSIZE BINSTORAGE BLOOM BUILDBLOOM CBRT CEIL " 9 | + "CONCAT COR COS COSH COUNT COUNT_STAR COV CONSTANTSIZE CUBEDIMENSIONS DIFF DISTINCT DOUBLEABS " 10 | + "DOUBLEAVG DOUBLEBASE DOUBLEMAX DOUBLEMIN DOUBLEROUND DOUBLESUM EXP FLOOR FLOATABS FLOATAVG " 11 | + "FLOATMAX FLOATMIN FLOATROUND FLOATSUM GENERICINVOKER INDEXOF INTABS INTAVG INTMAX INTMIN " 12 | + "INTSUM INVOKEFORDOUBLE INVOKEFORFLOAT INVOKEFORINT INVOKEFORLONG INVOKEFORSTRING INVOKER " 13 | + "ISEMPTY JSONLOADER JSONMETADATA JSONSTORAGE LAST_INDEX_OF LCFIRST LOG LOG10 LOWER LONGABS " 14 | + "LONGAVG LONGMAX LONGMIN LONGSUM MAX MIN MAPSIZE MONITOREDUDF NONDETERMINISTIC OUTPUTSCHEMA " 15 | + "PIGSTORAGE PIGSTREAMING RANDOM REGEX_EXTRACT REGEX_EXTRACT_ALL REPLACE ROUND SIN SINH SIZE " 16 | + "SQRT STRSPLIT SUBSTRING SUM STRINGCONCAT STRINGMAX STRINGMIN STRINGSIZE TAN TANH TOBAG " 17 | + "TOKENIZE TOMAP TOP TOTUPLE TRIM TEXTLOADER TUPLESIZE UCFIRST UPPER UTF8STORAGECONVERTER "; 18 | 19 | // taken from QueryLexer.g 20 | var pKeywords = "VOID IMPORT RETURNS DEFINE LOAD FILTER FOREACH ORDER CUBE DISTINCT COGROUP " 21 | + "JOIN CROSS UNION SPLIT INTO IF OTHERWISE ALL AS BY USING INNER OUTER ONSCHEMA PARALLEL " 22 | + "PARTITION GROUP AND OR NOT GENERATE FLATTEN ASC DESC IS STREAM THROUGH STORE MAPREDUCE " 23 | + "SHIP CACHE INPUT OUTPUT STDERROR STDIN STDOUT LIMIT SAMPLE LEFT RIGHT FULL EQ GT LT GTE LTE " 24 | + "NEQ MATCHES TRUE FALSE DUMP"; 25 | 26 | // data types 27 | var pTypes = "BOOLEAN INT LONG FLOAT DOUBLE CHARARRAY BYTEARRAY BAG TUPLE MAP "; 28 | 29 | var builtins = words(pBuiltins), keywords = words(pKeywords), types = words(pTypes) 30 | 31 | var isOperatorChar = /[*+\-%<>=&?:\/!|]/; 32 | 33 | function chain(stream, state, f) { 34 | state.tokenize = f; 35 | return f(stream, state); 36 | } 37 | 38 | function tokenComment(stream, state) { 39 | var isEnd = false; 40 | var ch; 41 | while(ch = stream.next()) { 42 | if(ch == "/" && isEnd) { 43 | state.tokenize = tokenBase; 44 | break; 45 | } 46 | isEnd = (ch == "*"); 47 | } 48 | return "comment"; 49 | } 50 | 51 | function tokenString(quote) { 52 | return function(stream, state) { 53 | var escaped = false, next, end = false; 54 | while((next = stream.next()) != null) { 55 | if (next == quote && !escaped) { 56 | end = true; break; 57 | } 58 | escaped = !escaped && next == "\\"; 59 | } 60 | if (end || !escaped) 61 | state.tokenize = tokenBase; 62 | return "error"; 63 | }; 64 | } 65 | 66 | 67 | function tokenBase(stream, state) { 68 | var ch = stream.next(); 69 | 70 | // is a start of string? 71 | if (ch == '"' || ch == "'") 72 | return chain(stream, state, tokenString(ch)); 73 | // is it one of the special chars 74 | else if(/[\[\]{}\(\),;\.]/.test(ch)) 75 | return null; 76 | // is it a number? 77 | else if(/\d/.test(ch)) { 78 | stream.eatWhile(/[\w\.]/); 79 | return "number"; 80 | } 81 | // multi line comment or operator 82 | else if (ch == "/") { 83 | if (stream.eat("*")) { 84 | return chain(stream, state, tokenComment); 85 | } 86 | else { 87 | stream.eatWhile(isOperatorChar); 88 | return "operator"; 89 | } 90 | } 91 | // single line comment or operator 92 | else if (ch=="-") { 93 | if(stream.eat("-")){ 94 | stream.skipToEnd(); 95 | return "comment"; 96 | } 97 | else { 98 | stream.eatWhile(isOperatorChar); 99 | return "operator"; 100 | } 101 | } 102 | // is it an operator 103 | else if (isOperatorChar.test(ch)) { 104 | stream.eatWhile(isOperatorChar); 105 | return "operator"; 106 | } 107 | else { 108 | // get the while word 109 | stream.eatWhile(/[\w\$_]/); 110 | // is it one of the listed keywords? 111 | if (keywords && keywords.propertyIsEnumerable(stream.current().toUpperCase())) { 112 | //keywords can be used as variables like flatten(group), group.$0 etc.. 113 | if (!stream.eat(")") && !stream.eat(".")) 114 | return "keyword"; 115 | } 116 | // is it one of the builtin functions? 117 | if (builtins && builtins.propertyIsEnumerable(stream.current().toUpperCase())) 118 | return "builtin"; 119 | // is it one of the listed types? 120 | if (types && types.propertyIsEnumerable(stream.current().toUpperCase())) 121 | return "type"; 122 | // default is a 'variable' 123 | return "variable"; 124 | } 125 | } 126 | 127 | // Interface 128 | export const pig = { 129 | name: "pig", 130 | 131 | startState: function() { 132 | return { 133 | tokenize: tokenBase, 134 | startOfLine: true 135 | }; 136 | }, 137 | 138 | token: function(stream, state) { 139 | if(stream.eatSpace()) return null; 140 | var style = state.tokenize(stream, state); 141 | return style; 142 | }, 143 | 144 | languageData: { 145 | autocomplete: (pBuiltins + pTypes + pKeywords).split(" ") 146 | } 147 | }; 148 | -------------------------------------------------------------------------------- /mode/powershell.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const powerShell: StreamParser 3 | -------------------------------------------------------------------------------- /mode/properties.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const properties: StreamParser 3 | -------------------------------------------------------------------------------- /mode/properties.js: -------------------------------------------------------------------------------- 1 | export const properties = { 2 | name: "properties", 3 | 4 | token: function(stream, state) { 5 | var sol = stream.sol() || state.afterSection; 6 | var eol = stream.eol(); 7 | 8 | state.afterSection = false; 9 | 10 | if (sol) { 11 | if (state.nextMultiline) { 12 | state.inMultiline = true; 13 | state.nextMultiline = false; 14 | } else { 15 | state.position = "def"; 16 | } 17 | } 18 | 19 | if (eol && ! state.nextMultiline) { 20 | state.inMultiline = false; 21 | state.position = "def"; 22 | } 23 | 24 | if (sol) { 25 | while(stream.eatSpace()) {} 26 | } 27 | 28 | var ch = stream.next(); 29 | 30 | if (sol && (ch === "#" || ch === "!" || ch === ";")) { 31 | state.position = "comment"; 32 | stream.skipToEnd(); 33 | return "comment"; 34 | } else if (sol && ch === "[") { 35 | state.afterSection = true; 36 | stream.skipTo("]"); stream.eat("]"); 37 | return "header"; 38 | } else if (ch === "=" || ch === ":") { 39 | state.position = "quote"; 40 | return null; 41 | } else if (ch === "\\" && state.position === "quote") { 42 | if (stream.eol()) { // end of line? 43 | // Multiline value 44 | state.nextMultiline = true; 45 | } 46 | } 47 | 48 | return state.position; 49 | }, 50 | 51 | startState: function() { 52 | return { 53 | position : "def", // Current position, "def", "quote" or "comment" 54 | nextMultiline : false, // Is the next line multiline value 55 | inMultiline : false, // Is the current line a multiline value 56 | afterSection : false // Did we just open a section 57 | }; 58 | } 59 | 60 | }; 61 | -------------------------------------------------------------------------------- /mode/protobuf.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const protobuf: StreamParser 3 | -------------------------------------------------------------------------------- /mode/protobuf.js: -------------------------------------------------------------------------------- 1 | function wordRegexp(words) { 2 | return new RegExp("^((" + words.join(")|(") + "))\\b", "i"); 3 | }; 4 | 5 | var keywordArray = [ 6 | "package", "message", "import", "syntax", 7 | "required", "optional", "repeated", "reserved", "default", "extensions", "packed", 8 | "bool", "bytes", "double", "enum", "float", "string", 9 | "int32", "int64", "uint32", "uint64", "sint32", "sint64", "fixed32", "fixed64", "sfixed32", "sfixed64", 10 | "option", "service", "rpc", "returns" 11 | ]; 12 | var keywords = wordRegexp(keywordArray); 13 | 14 | var identifiers = new RegExp("^[_A-Za-z\xa1-\uffff][_A-Za-z0-9\xa1-\uffff]*"); 15 | 16 | function tokenBase(stream) { 17 | // whitespaces 18 | if (stream.eatSpace()) return null; 19 | 20 | // Handle one line Comments 21 | if (stream.match("//")) { 22 | stream.skipToEnd(); 23 | return "comment"; 24 | } 25 | 26 | // Handle Number Literals 27 | if (stream.match(/^[0-9\.+-]/, false)) { 28 | if (stream.match(/^[+-]?0x[0-9a-fA-F]+/)) 29 | return "number"; 30 | if (stream.match(/^[+-]?\d*\.\d+([EeDd][+-]?\d+)?/)) 31 | return "number"; 32 | if (stream.match(/^[+-]?\d+([EeDd][+-]?\d+)?/)) 33 | return "number"; 34 | } 35 | 36 | // Handle Strings 37 | if (stream.match(/^"([^"]|(""))*"/)) { return "string"; } 38 | if (stream.match(/^'([^']|(''))*'/)) { return "string"; } 39 | 40 | // Handle words 41 | if (stream.match(keywords)) { return "keyword"; } 42 | if (stream.match(identifiers)) { return "variable"; } ; 43 | 44 | // Handle non-detected items 45 | stream.next(); 46 | return null; 47 | }; 48 | 49 | export const protobuf = { 50 | name: "protobuf", 51 | token: tokenBase, 52 | languageData: { 53 | autocomplete: keywordArray 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /mode/pug.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const pug: StreamParser 3 | -------------------------------------------------------------------------------- /mode/puppet.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const puppet: StreamParser 3 | -------------------------------------------------------------------------------- /mode/python.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const python: StreamParser 3 | export declare const cython: StreamParser 4 | -------------------------------------------------------------------------------- /mode/q.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const q: StreamParser 3 | -------------------------------------------------------------------------------- /mode/r.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const r: StreamParser 3 | -------------------------------------------------------------------------------- /mode/rpm.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const rpmChanges: StreamParser 3 | export declare const rpmSpec: StreamParser 4 | -------------------------------------------------------------------------------- /mode/rpm.js: -------------------------------------------------------------------------------- 1 | var headerSeparator = /^-+$/; 2 | var headerLine = /^(Mon|Tue|Wed|Thu|Fri|Sat|Sun) (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) ?\d{1,2} \d{2}:\d{2}(:\d{2})? [A-Z]{3,4} \d{4} - /; 3 | var simpleEmail = /^[\w+.-]+@[\w.-]+/; 4 | 5 | export const rpmChanges = { 6 | name: "rpmchanges", 7 | token: function(stream) { 8 | if (stream.sol()) { 9 | if (stream.match(headerSeparator)) { return 'tag'; } 10 | if (stream.match(headerLine)) { return 'tag'; } 11 | } 12 | if (stream.match(simpleEmail)) { return 'string'; } 13 | stream.next(); 14 | return null; 15 | } 16 | } 17 | 18 | // Quick and dirty spec file highlighting 19 | 20 | var arch = /^(i386|i586|i686|x86_64|ppc64le|ppc64|ppc|ia64|s390x|s390|sparc64|sparcv9|sparc|noarch|alphaev6|alpha|hppa|mipsel)/; 21 | 22 | var preamble = /^[a-zA-Z0-9()]+:/; 23 | var section = /^%(debug_package|package|description|prep|build|install|files|clean|changelog|preinstall|preun|postinstall|postun|pretrans|posttrans|pre|post|triggerin|triggerun|verifyscript|check|triggerpostun|triggerprein|trigger)/; 24 | var control_flow_complex = /^%(ifnarch|ifarch|if)/; // rpm control flow macros 25 | var control_flow_simple = /^%(else|endif)/; // rpm control flow macros 26 | var operators = /^(\!|\?|\<\=|\<|\>\=|\>|\=\=|\&\&|\|\|)/; // operators in control flow macros 27 | 28 | export const rpmSpec = { 29 | name: "rpmspec", 30 | startState: function () { 31 | return { 32 | controlFlow: false, 33 | macroParameters: false, 34 | section: false 35 | }; 36 | }, 37 | token: function (stream, state) { 38 | var ch = stream.peek(); 39 | if (ch == "#") { stream.skipToEnd(); return "comment"; } 40 | 41 | if (stream.sol()) { 42 | if (stream.match(preamble)) { return "header"; } 43 | if (stream.match(section)) { return "atom"; } 44 | } 45 | 46 | if (stream.match(/^\$\w+/)) { return "def"; } // Variables like '$RPM_BUILD_ROOT' 47 | if (stream.match(/^\$\{\w+\}/)) { return "def"; } // Variables like '${RPM_BUILD_ROOT}' 48 | 49 | if (stream.match(control_flow_simple)) { return "keyword"; } 50 | if (stream.match(control_flow_complex)) { 51 | state.controlFlow = true; 52 | return "keyword"; 53 | } 54 | if (state.controlFlow) { 55 | if (stream.match(operators)) { return "operator"; } 56 | if (stream.match(/^(\d+)/)) { return "number"; } 57 | if (stream.eol()) { state.controlFlow = false; } 58 | } 59 | 60 | if (stream.match(arch)) { 61 | if (stream.eol()) { state.controlFlow = false; } 62 | return "number"; 63 | } 64 | 65 | // Macros like '%make_install' or '%attr(0775,root,root)' 66 | if (stream.match(/^%[\w]+/)) { 67 | if (stream.match('(')) { state.macroParameters = true; } 68 | return "keyword"; 69 | } 70 | if (state.macroParameters) { 71 | if (stream.match(/^\d+/)) { return "number";} 72 | if (stream.match(')')) { 73 | state.macroParameters = false; 74 | return "keyword"; 75 | } 76 | } 77 | 78 | // Macros like '%{defined fedora}' 79 | if (stream.match(/^%\{\??[\w \-\:\!]+\}/)) { 80 | if (stream.eol()) { state.controlFlow = false; } 81 | return "def"; 82 | } 83 | 84 | stream.next(); 85 | return null; 86 | } 87 | }; 88 | 89 | -------------------------------------------------------------------------------- /mode/ruby.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const ruby: StreamParser 3 | -------------------------------------------------------------------------------- /mode/rust.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const rust: StreamParser 3 | -------------------------------------------------------------------------------- /mode/rust.js: -------------------------------------------------------------------------------- 1 | import {simpleMode} from "./simple-mode.js" 2 | 3 | export const rust = simpleMode({ 4 | start: [ 5 | // string and byte string 6 | {regex: /b?"/, token: "string", next: "string"}, 7 | // raw string and raw byte string 8 | {regex: /b?r"/, token: "string", next: "string_raw"}, 9 | {regex: /b?r#+"/, token: "string", next: "string_raw_hash"}, 10 | // character 11 | {regex: /'(?:[^'\\]|\\(?:[nrt0'"]|x[\da-fA-F]{2}|u\{[\da-fA-F]{6}\}))'/, token: "string.special"}, 12 | // byte 13 | {regex: /b'(?:[^']|\\(?:['\\nrt0]|x[\da-fA-F]{2}))'/, token: "string.special"}, 14 | 15 | {regex: /(?:(?:[0-9][0-9_]*)(?:(?:[Ee][+-]?[0-9_]+)|\.[0-9_]+(?:[Ee][+-]?[0-9_]+)?)(?:f32|f64)?)|(?:0(?:b[01_]+|(?:o[0-7_]+)|(?:x[0-9a-fA-F_]+))|(?:[0-9][0-9_]*))(?:u8|u16|u32|u64|i8|i16|i32|i64|isize|usize)?/, 16 | token: "number"}, 17 | {regex: /(let(?:\s+mut)?|fn|enum|mod|struct|type|union)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)/, token: ["keyword", null, "def"]}, 18 | {regex: /(?:abstract|alignof|as|async|await|box|break|continue|const|crate|do|dyn|else|enum|extern|fn|for|final|if|impl|in|loop|macro|match|mod|move|offsetof|override|priv|proc|pub|pure|ref|return|self|sizeof|static|struct|super|trait|type|typeof|union|unsafe|unsized|use|virtual|where|while|yield)\b/, token: "keyword"}, 19 | {regex: /\b(?:Self|isize|usize|char|bool|u8|u16|u32|u64|f16|f32|f64|i8|i16|i32|i64|str|Option)\b/, token: "atom"}, 20 | {regex: /\b(?:true|false|Some|None|Ok|Err)\b/, token: "builtin"}, 21 | {regex: /\b(fn)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)/, 22 | token: ["keyword", null ,"def"]}, 23 | {regex: /#!?\[.*\]/, token: "meta"}, 24 | {regex: /\/\/.*/, token: "comment"}, 25 | {regex: /\/\*/, token: "comment", next: "comment"}, 26 | {regex: /[-+\/*=<>!]+/, token: "operator"}, 27 | {regex: /[a-zA-Z_]\w*!/,token: "macroName"}, 28 | {regex: /[a-zA-Z_]\w*/, token: "variable"}, 29 | {regex: /[\{\[\(]/, indent: true}, 30 | {regex: /[\}\]\)]/, dedent: true} 31 | ], 32 | string: [ 33 | {regex: /"/, token: "string", next: "start"}, 34 | {regex: /(?:[^\\"]|\\(?:.|$))*/, token: "string"} 35 | ], 36 | string_raw: [ 37 | {regex: /"/, token: "string", next: "start"}, 38 | {regex: /[^"]*/, token: "string"} 39 | ], 40 | string_raw_hash: [ 41 | {regex: /"#+/, token: "string", next: "start"}, 42 | {regex: /(?:[^"]|"(?!#))*/, token: "string"} 43 | ], 44 | comment: [ 45 | {regex: /.*?\*\//, token: "comment", next: "start"}, 46 | {regex: /.*/, token: "comment"} 47 | ], 48 | languageData: { 49 | name: "rust", 50 | dontIndentStates: ["comment"], 51 | indentOnInput: /^\s*\}$/, 52 | commentTokens: {line: "//", block: {open: "/*", close: "*/"}} 53 | } 54 | }); 55 | -------------------------------------------------------------------------------- /mode/sas.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const sas: StreamParser 3 | -------------------------------------------------------------------------------- /mode/sass.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const sass: StreamParser 3 | -------------------------------------------------------------------------------- /mode/scheme.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const scheme: StreamParser 3 | -------------------------------------------------------------------------------- /mode/shell.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const shell: StreamParser 3 | -------------------------------------------------------------------------------- /mode/shell.js: -------------------------------------------------------------------------------- 1 | var words = {}; 2 | function define(style, dict) { 3 | for(var i = 0; i < dict.length; i++) { 4 | words[dict[i]] = style; 5 | } 6 | }; 7 | 8 | var commonAtoms = ["true", "false"]; 9 | var commonKeywords = ["if", "then", "do", "else", "elif", "while", "until", "for", "in", "esac", "fi", 10 | "fin", "fil", "done", "exit", "set", "unset", "export", "function"]; 11 | var commonCommands = ["ab", "awk", "bash", "beep", "cat", "cc", "cd", "chown", "chmod", "chroot", "clear", 12 | "cp", "curl", "cut", "diff", "echo", "find", "gawk", "gcc", "get", "git", "grep", "hg", "kill", "killall", 13 | "ln", "ls", "make", "mkdir", "openssl", "mv", "nc", "nl", "node", "npm", "ping", "ps", "restart", "rm", 14 | "rmdir", "sed", "service", "sh", "shopt", "shred", "source", "sort", "sleep", "ssh", "start", "stop", 15 | "su", "sudo", "svn", "tee", "telnet", "top", "touch", "vi", "vim", "wall", "wc", "wget", "who", "write", 16 | "yes", "zsh"]; 17 | 18 | define('atom', commonAtoms); 19 | define('keyword', commonKeywords); 20 | define('builtin', commonCommands); 21 | 22 | function tokenBase(stream, state) { 23 | if (stream.eatSpace()) return null; 24 | 25 | var sol = stream.sol(); 26 | var ch = stream.next(); 27 | 28 | if (ch === '\\') { 29 | stream.next(); 30 | return null; 31 | } 32 | if (ch === '\'' || ch === '"' || ch === '`') { 33 | state.tokens.unshift(tokenString(ch, ch === "`" ? "quote" : "string")); 34 | return tokenize(stream, state); 35 | } 36 | if (ch === '#') { 37 | if (sol && stream.eat('!')) { 38 | stream.skipToEnd(); 39 | return 'meta'; // 'comment'? 40 | } 41 | stream.skipToEnd(); 42 | return 'comment'; 43 | } 44 | if (ch === '$') { 45 | state.tokens.unshift(tokenDollar); 46 | return tokenize(stream, state); 47 | } 48 | if (ch === '+' || ch === '=') { 49 | return 'operator'; 50 | } 51 | if (ch === '-') { 52 | stream.eat('-'); 53 | stream.eatWhile(/\w/); 54 | return 'attribute'; 55 | } 56 | if (ch == "<") { 57 | if (stream.match("<<")) return "operator" 58 | var heredoc = stream.match(/^<-?\s*(?:['"]([^'"]*)['"]|([^'"\s]*))/) 59 | if (heredoc) { 60 | state.tokens.unshift(tokenHeredoc(heredoc[1] || heredoc[2])) 61 | return 'string.special' 62 | } 63 | } 64 | if (/\d/.test(ch)) { 65 | stream.eatWhile(/\d/); 66 | if(stream.eol() || !/\w/.test(stream.peek())) { 67 | return 'number'; 68 | } 69 | } 70 | stream.eatWhile(/[\w-]/); 71 | var cur = stream.current(); 72 | if (stream.peek() === '=' && /\w+/.test(cur)) return 'def'; 73 | return words.hasOwnProperty(cur) ? words[cur] : null; 74 | } 75 | 76 | function tokenString(quote, style) { 77 | var close = quote == "(" ? ")" : quote == "{" ? "}" : quote 78 | return function(stream, state) { 79 | var next, escaped = false; 80 | while ((next = stream.next()) != null) { 81 | if (next === close && !escaped) { 82 | state.tokens.shift(); 83 | break; 84 | } else if (next === '$' && !escaped && quote !== "'" && stream.peek() != close) { 85 | escaped = true; 86 | stream.backUp(1); 87 | state.tokens.unshift(tokenDollar); 88 | break; 89 | } else if (!escaped && quote !== close && next === quote) { 90 | state.tokens.unshift(tokenString(quote, style)) 91 | return tokenize(stream, state) 92 | } else if (!escaped && /['"]/.test(next) && !/['"]/.test(quote)) { 93 | state.tokens.unshift(tokenStringStart(next, "string")); 94 | stream.backUp(1); 95 | break; 96 | } 97 | escaped = !escaped && next === '\\'; 98 | } 99 | return style; 100 | }; 101 | }; 102 | 103 | function tokenStringStart(quote, style) { 104 | return function(stream, state) { 105 | state.tokens[0] = tokenString(quote, style) 106 | stream.next() 107 | return tokenize(stream, state) 108 | } 109 | } 110 | 111 | var tokenDollar = function(stream, state) { 112 | if (state.tokens.length > 1) stream.eat('$'); 113 | var ch = stream.next() 114 | if (/['"({]/.test(ch)) { 115 | state.tokens[0] = tokenString(ch, ch == "(" ? "quote" : ch == "{" ? "def" : "string"); 116 | return tokenize(stream, state); 117 | } 118 | if (!/\d/.test(ch)) stream.eatWhile(/\w/); 119 | state.tokens.shift(); 120 | return 'def'; 121 | }; 122 | 123 | function tokenHeredoc(delim) { 124 | return function(stream, state) { 125 | if (stream.sol() && stream.string == delim) state.tokens.shift() 126 | stream.skipToEnd() 127 | return "string.special" 128 | } 129 | } 130 | 131 | function tokenize(stream, state) { 132 | return (state.tokens[0] || tokenBase) (stream, state); 133 | }; 134 | 135 | export const shell = { 136 | name: "shell", 137 | startState: function() {return {tokens:[]};}, 138 | token: function(stream, state) { 139 | return tokenize(stream, state); 140 | }, 141 | languageData: { 142 | autocomplete: commonAtoms.concat(commonKeywords, commonCommands), 143 | closeBrackets: {brackets: ["(", "[", "{", "'", '"', "`"]}, 144 | commentTokens: {line: "#"} 145 | } 146 | }; 147 | -------------------------------------------------------------------------------- /mode/sieve.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const sieve: StreamParser 3 | -------------------------------------------------------------------------------- /mode/sieve.js: -------------------------------------------------------------------------------- 1 | function words(str) { 2 | var obj = {}, words = str.split(" "); 3 | for (var i = 0; i < words.length; ++i) obj[words[i]] = true; 4 | return obj; 5 | } 6 | 7 | var keywords = words("if elsif else stop require"); 8 | var atoms = words("true false not"); 9 | 10 | function tokenBase(stream, state) { 11 | 12 | var ch = stream.next(); 13 | if (ch == "/" && stream.eat("*")) { 14 | state.tokenize = tokenCComment; 15 | return tokenCComment(stream, state); 16 | } 17 | 18 | if (ch === '#') { 19 | stream.skipToEnd(); 20 | return "comment"; 21 | } 22 | 23 | if (ch == "\"") { 24 | state.tokenize = tokenString(ch); 25 | return state.tokenize(stream, state); 26 | } 27 | 28 | if (ch == "(") { 29 | state._indent.push("("); 30 | // add virtual angel wings so that editor behaves... 31 | // ...more sane incase of broken brackets 32 | state._indent.push("{"); 33 | return null; 34 | } 35 | 36 | if (ch === "{") { 37 | state._indent.push("{"); 38 | return null; 39 | } 40 | 41 | if (ch == ")") { 42 | state._indent.pop(); 43 | state._indent.pop(); 44 | } 45 | 46 | if (ch === "}") { 47 | state._indent.pop(); 48 | return null; 49 | } 50 | 51 | if (ch == ",") 52 | return null; 53 | 54 | if (ch == ";") 55 | return null; 56 | 57 | 58 | if (/[{}\(\),;]/.test(ch)) 59 | return null; 60 | 61 | // 1*DIGIT "K" / "M" / "G" 62 | if (/\d/.test(ch)) { 63 | stream.eatWhile(/[\d]/); 64 | stream.eat(/[KkMmGg]/); 65 | return "number"; 66 | } 67 | 68 | // ":" (ALPHA / "_") *(ALPHA / DIGIT / "_") 69 | if (ch == ":") { 70 | stream.eatWhile(/[a-zA-Z_]/); 71 | stream.eatWhile(/[a-zA-Z0-9_]/); 72 | 73 | return "operator"; 74 | } 75 | 76 | stream.eatWhile(/\w/); 77 | var cur = stream.current(); 78 | 79 | // "text:" *(SP / HTAB) (hash-comment / CRLF) 80 | // *(multiline-literal / multiline-dotstart) 81 | // "." CRLF 82 | if ((cur == "text") && stream.eat(":")) 83 | { 84 | state.tokenize = tokenMultiLineString; 85 | return "string"; 86 | } 87 | 88 | if (keywords.propertyIsEnumerable(cur)) 89 | return "keyword"; 90 | 91 | if (atoms.propertyIsEnumerable(cur)) 92 | return "atom"; 93 | 94 | return null; 95 | } 96 | 97 | function tokenMultiLineString(stream, state) 98 | { 99 | state._multiLineString = true; 100 | // the first line is special it may contain a comment 101 | if (!stream.sol()) { 102 | stream.eatSpace(); 103 | 104 | if (stream.peek() == "#") { 105 | stream.skipToEnd(); 106 | return "comment"; 107 | } 108 | 109 | stream.skipToEnd(); 110 | return "string"; 111 | } 112 | 113 | if ((stream.next() == ".") && (stream.eol())) 114 | { 115 | state._multiLineString = false; 116 | state.tokenize = tokenBase; 117 | } 118 | 119 | return "string"; 120 | } 121 | 122 | function tokenCComment(stream, state) { 123 | var maybeEnd = false, ch; 124 | while ((ch = stream.next()) != null) { 125 | if (maybeEnd && ch == "/") { 126 | state.tokenize = tokenBase; 127 | break; 128 | } 129 | maybeEnd = (ch == "*"); 130 | } 131 | return "comment"; 132 | } 133 | 134 | function tokenString(quote) { 135 | return function(stream, state) { 136 | var escaped = false, ch; 137 | while ((ch = stream.next()) != null) { 138 | if (ch == quote && !escaped) 139 | break; 140 | escaped = !escaped && ch == "\\"; 141 | } 142 | if (!escaped) state.tokenize = tokenBase; 143 | return "string"; 144 | }; 145 | } 146 | 147 | export const sieve = { 148 | name: "sieve", 149 | startState: function(base) { 150 | return {tokenize: tokenBase, 151 | baseIndent: base || 0, 152 | _indent: []}; 153 | }, 154 | 155 | token: function(stream, state) { 156 | if (stream.eatSpace()) 157 | return null; 158 | 159 | return (state.tokenize || tokenBase)(stream, state); 160 | }, 161 | 162 | indent: function(state, _textAfter, cx) { 163 | var length = state._indent.length; 164 | if (_textAfter && (_textAfter[0] == "}")) 165 | length--; 166 | 167 | if (length <0) 168 | length = 0; 169 | 170 | return length * cx.unit; 171 | }, 172 | 173 | languageData: { 174 | indentOnInput: /^\s*\}$/ 175 | } 176 | }; 177 | -------------------------------------------------------------------------------- /mode/simple-mode.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export interface Rule { 3 | regex?: string | RegExp | undefined; 4 | token?: string | string[] | null | undefined | ((matches: RegExpMatchArray) => string | string[] | null); 5 | sol?: boolean | undefined; 6 | next?: string | undefined; 7 | push?: string | undefined; 8 | pop?: boolean | undefined; 9 | indent?: boolean | undefined; 10 | dedent?: boolean | undefined; 11 | dedentIfLineStart?: boolean | undefined; 12 | } 13 | export declare function simpleMode( 14 | states: { [P in K]: P extends "languageData" ? {[name: string]: any} : Rule[] } & { start: Rule[] }): StreamParser 15 | -------------------------------------------------------------------------------- /mode/simple-mode.js: -------------------------------------------------------------------------------- 1 | export function simpleMode(states) { 2 | ensureState(states, "start"); 3 | var states_ = {}, meta = states.languageData || {}, hasIndentation = false; 4 | for (var state in states) if (state != meta && states.hasOwnProperty(state)) { 5 | var list = states_[state] = [], orig = states[state]; 6 | for (var i = 0; i < orig.length; i++) { 7 | var data = orig[i]; 8 | list.push(new Rule(data, states)); 9 | if (data.indent || data.dedent) hasIndentation = true; 10 | } 11 | } 12 | return { 13 | name: meta.name, 14 | startState: function() { 15 | return {state: "start", pending: null, indent: hasIndentation ? [] : null}; 16 | }, 17 | copyState: function(state) { 18 | var s = {state: state.state, pending: state.pending, indent: state.indent && state.indent.slice(0)}; 19 | if (state.stack) 20 | s.stack = state.stack.slice(0); 21 | return s; 22 | }, 23 | token: tokenFunction(states_), 24 | indent: indentFunction(states_, meta), 25 | mergeTokens: meta.mergeTokens, 26 | languageData: meta 27 | } 28 | }; 29 | 30 | function ensureState(states, name) { 31 | if (!states.hasOwnProperty(name)) 32 | throw new Error("Undefined state " + name + " in simple mode"); 33 | } 34 | 35 | function toRegex(val, caret) { 36 | if (!val) return /(?:)/; 37 | var flags = ""; 38 | if (val instanceof RegExp) { 39 | if (val.ignoreCase) flags = "i"; 40 | val = val.source; 41 | } else { 42 | val = String(val); 43 | } 44 | return new RegExp((caret === false ? "" : "^") + "(?:" + val + ")", flags); 45 | } 46 | 47 | function asToken(val) { 48 | if (!val) return null; 49 | if (val.apply) return val 50 | if (typeof val == "string") return val.replace(/\./g, " "); 51 | var result = []; 52 | for (var i = 0; i < val.length; i++) 53 | result.push(val[i] && val[i].replace(/\./g, " ")); 54 | return result; 55 | } 56 | 57 | function Rule(data, states) { 58 | if (data.next || data.push) ensureState(states, data.next || data.push); 59 | this.regex = toRegex(data.regex); 60 | this.token = asToken(data.token); 61 | this.data = data; 62 | } 63 | 64 | function tokenFunction(states) { 65 | return function(stream, state) { 66 | if (state.pending) { 67 | var pend = state.pending.shift(); 68 | if (state.pending.length == 0) state.pending = null; 69 | stream.pos += pend.text.length; 70 | return pend.token; 71 | } 72 | 73 | var curState = states[state.state]; 74 | for (var i = 0; i < curState.length; i++) { 75 | var rule = curState[i]; 76 | var matches = (!rule.data.sol || stream.sol()) && stream.match(rule.regex); 77 | if (matches) { 78 | if (rule.data.next) { 79 | state.state = rule.data.next; 80 | } else if (rule.data.push) { 81 | (state.stack || (state.stack = [])).push(state.state); 82 | state.state = rule.data.push; 83 | } else if (rule.data.pop && state.stack && state.stack.length) { 84 | state.state = state.stack.pop(); 85 | } 86 | 87 | if (rule.data.indent) 88 | state.indent.push(stream.indentation() + stream.indentUnit); 89 | if (rule.data.dedent) 90 | state.indent.pop(); 91 | var token = rule.token 92 | if (token && token.apply) token = token(matches) 93 | if (matches.length > 2 && rule.token && typeof rule.token != "string") { 94 | state.pending = []; 95 | for (var j = 2; j < matches.length; j++) 96 | if (matches[j]) 97 | state.pending.push({text: matches[j], token: rule.token[j - 1]}); 98 | stream.backUp(matches[0].length - (matches[1] ? matches[1].length : 0)); 99 | return token[0]; 100 | } else if (token && token.join) { 101 | return token[0]; 102 | } else { 103 | return token; 104 | } 105 | } 106 | } 107 | stream.next(); 108 | return null; 109 | }; 110 | } 111 | 112 | function indentFunction(states, meta) { 113 | return function(state, textAfter) { 114 | if (state.indent == null || meta.dontIndentStates && meta.dontIndentStates.indexOf(state.state) > -1) 115 | return null 116 | 117 | var pos = state.indent.length - 1, rules = states[state.state]; 118 | scan: for (;;) { 119 | for (var i = 0; i < rules.length; i++) { 120 | var rule = rules[i]; 121 | if (rule.data.dedent && rule.data.dedentIfLineStart !== false) { 122 | var m = rule.regex.exec(textAfter); 123 | if (m && m[0]) { 124 | pos--; 125 | if (rule.next || rule.push) rules = states[rule.next || rule.push]; 126 | textAfter = textAfter.slice(m[0].length); 127 | continue scan; 128 | } 129 | } 130 | } 131 | break; 132 | } 133 | return pos < 0 ? 0 : state.indent[pos]; 134 | }; 135 | } 136 | -------------------------------------------------------------------------------- /mode/smalltalk.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const smalltalk: StreamParser 3 | -------------------------------------------------------------------------------- /mode/smalltalk.js: -------------------------------------------------------------------------------- 1 | var specialChars = /[+\-\/\\*~<>=@%|&?!.,:;^]/; 2 | var keywords = /true|false|nil|self|super|thisContext/; 3 | 4 | var Context = function(tokenizer, parent) { 5 | this.next = tokenizer; 6 | this.parent = parent; 7 | }; 8 | 9 | var Token = function(name, context, eos) { 10 | this.name = name; 11 | this.context = context; 12 | this.eos = eos; 13 | }; 14 | 15 | var State = function() { 16 | this.context = new Context(next, null); 17 | this.expectVariable = true; 18 | this.indentation = 0; 19 | this.userIndentationDelta = 0; 20 | }; 21 | 22 | State.prototype.userIndent = function(indentation, indentUnit) { 23 | this.userIndentationDelta = indentation > 0 ? (indentation / indentUnit - this.indentation) : 0; 24 | }; 25 | 26 | var next = function(stream, context, state) { 27 | var token = new Token(null, context, false); 28 | var aChar = stream.next(); 29 | 30 | if (aChar === '"') { 31 | token = nextComment(stream, new Context(nextComment, context)); 32 | 33 | } else if (aChar === '\'') { 34 | token = nextString(stream, new Context(nextString, context)); 35 | 36 | } else if (aChar === '#') { 37 | if (stream.peek() === '\'') { 38 | stream.next(); 39 | token = nextSymbol(stream, new Context(nextSymbol, context)); 40 | } else { 41 | if (stream.eatWhile(/[^\s.{}\[\]()]/)) 42 | token.name = 'string.special'; 43 | else 44 | token.name = 'meta'; 45 | } 46 | 47 | } else if (aChar === '$') { 48 | if (stream.next() === '<') { 49 | stream.eatWhile(/[^\s>]/); 50 | stream.next(); 51 | } 52 | token.name = 'string.special'; 53 | 54 | } else if (aChar === '|' && state.expectVariable) { 55 | token.context = new Context(nextTemporaries, context); 56 | 57 | } else if (/[\[\]{}()]/.test(aChar)) { 58 | token.name = 'bracket'; 59 | token.eos = /[\[{(]/.test(aChar); 60 | 61 | if (aChar === '[') { 62 | state.indentation++; 63 | } else if (aChar === ']') { 64 | state.indentation = Math.max(0, state.indentation - 1); 65 | } 66 | 67 | } else if (specialChars.test(aChar)) { 68 | stream.eatWhile(specialChars); 69 | token.name = 'operator'; 70 | token.eos = aChar !== ';'; // ; cascaded message expression 71 | 72 | } else if (/\d/.test(aChar)) { 73 | stream.eatWhile(/[\w\d]/); 74 | token.name = 'number'; 75 | 76 | } else if (/[\w_]/.test(aChar)) { 77 | stream.eatWhile(/[\w\d_]/); 78 | token.name = state.expectVariable ? (keywords.test(stream.current()) ? 'keyword' : 'variable') : null; 79 | 80 | } else { 81 | token.eos = state.expectVariable; 82 | } 83 | 84 | return token; 85 | }; 86 | 87 | var nextComment = function(stream, context) { 88 | stream.eatWhile(/[^"]/); 89 | return new Token('comment', stream.eat('"') ? context.parent : context, true); 90 | }; 91 | 92 | var nextString = function(stream, context) { 93 | stream.eatWhile(/[^']/); 94 | return new Token('string', stream.eat('\'') ? context.parent : context, false); 95 | }; 96 | 97 | var nextSymbol = function(stream, context) { 98 | stream.eatWhile(/[^']/); 99 | return new Token('string.special', stream.eat('\'') ? context.parent : context, false); 100 | }; 101 | 102 | var nextTemporaries = function(stream, context) { 103 | var token = new Token(null, context, false); 104 | var aChar = stream.next(); 105 | 106 | if (aChar === '|') { 107 | token.context = context.parent; 108 | token.eos = true; 109 | 110 | } else { 111 | stream.eatWhile(/[^|]/); 112 | token.name = 'variable'; 113 | } 114 | 115 | return token; 116 | }; 117 | 118 | export const smalltalk = { 119 | name: "smalltalk", 120 | 121 | startState: function() { 122 | return new State; 123 | }, 124 | 125 | token: function(stream, state) { 126 | state.userIndent(stream.indentation(), stream.indentUnit); 127 | 128 | if (stream.eatSpace()) { 129 | return null; 130 | } 131 | 132 | var token = state.context.next(stream, state.context, state); 133 | state.context = token.context; 134 | state.expectVariable = token.eos; 135 | 136 | return token.name; 137 | }, 138 | 139 | blankLine: function(state, indentUnit) { 140 | state.userIndent(0, indentUnit); 141 | }, 142 | 143 | indent: function(state, textAfter, cx) { 144 | var i = state.context.next === next && textAfter && textAfter.charAt(0) === ']' ? -1 : state.userIndentationDelta; 145 | return (state.indentation + i) * cx.unit; 146 | }, 147 | 148 | languageData: { 149 | indentOnInput: /^\s*\]$/ 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /mode/solr.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const solr: StreamParser 3 | -------------------------------------------------------------------------------- /mode/solr.js: -------------------------------------------------------------------------------- 1 | var isStringChar = /[^\s\|\!\+\-\*\?\~\^\&\:\(\)\[\]\{\}\"\\]/; 2 | var isOperatorChar = /[\|\!\+\-\*\?\~\^\&]/; 3 | var isOperatorString = /^(OR|AND|NOT|TO)$/; 4 | 5 | function isNumber(word) { 6 | return parseFloat(word).toString() === word; 7 | } 8 | 9 | function tokenString(quote) { 10 | return function(stream, state) { 11 | var escaped = false, next; 12 | while ((next = stream.next()) != null) { 13 | if (next == quote && !escaped) break; 14 | escaped = !escaped && next == "\\"; 15 | } 16 | 17 | if (!escaped) state.tokenize = tokenBase; 18 | return "string"; 19 | }; 20 | } 21 | 22 | function tokenOperator(operator) { 23 | return function(stream, state) { 24 | if (operator == "|") 25 | stream.eat(/\|/); 26 | else if (operator == "&") 27 | stream.eat(/\&/); 28 | 29 | state.tokenize = tokenBase; 30 | return "operator"; 31 | }; 32 | } 33 | 34 | function tokenWord(ch) { 35 | return function(stream, state) { 36 | var word = ch; 37 | while ((ch = stream.peek()) && ch.match(isStringChar) != null) { 38 | word += stream.next(); 39 | } 40 | 41 | state.tokenize = tokenBase; 42 | if (isOperatorString.test(word)) 43 | return "operator"; 44 | else if (isNumber(word)) 45 | return "number"; 46 | else if (stream.peek() == ":") 47 | return "propertyName"; 48 | else 49 | return "string"; 50 | }; 51 | } 52 | 53 | function tokenBase(stream, state) { 54 | var ch = stream.next(); 55 | if (ch == '"') 56 | state.tokenize = tokenString(ch); 57 | else if (isOperatorChar.test(ch)) 58 | state.tokenize = tokenOperator(ch); 59 | else if (isStringChar.test(ch)) 60 | state.tokenize = tokenWord(ch); 61 | 62 | return (state.tokenize != tokenBase) ? state.tokenize(stream, state) : null; 63 | } 64 | 65 | export const solr = { 66 | name: "solr", 67 | 68 | startState: function() { 69 | return { 70 | tokenize: tokenBase 71 | }; 72 | }, 73 | 74 | token: function(stream, state) { 75 | if (stream.eatSpace()) return null; 76 | return state.tokenize(stream, state); 77 | } 78 | }; 79 | -------------------------------------------------------------------------------- /mode/sparql.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const sparql: StreamParser 3 | -------------------------------------------------------------------------------- /mode/spreadsheet.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const spreadsheet: StreamParser 3 | -------------------------------------------------------------------------------- /mode/spreadsheet.js: -------------------------------------------------------------------------------- 1 | export const spreadsheet = { 2 | name: "spreadsheet", 3 | 4 | startState: function () { 5 | return { 6 | stringType: null, 7 | stack: [] 8 | }; 9 | }, 10 | token: function (stream, state) { 11 | if (!stream) return; 12 | 13 | //check for state changes 14 | if (state.stack.length === 0) { 15 | //strings 16 | if ((stream.peek() == '"') || (stream.peek() == "'")) { 17 | state.stringType = stream.peek(); 18 | stream.next(); // Skip quote 19 | state.stack.unshift("string"); 20 | } 21 | } 22 | 23 | //return state 24 | //stack has 25 | switch (state.stack[0]) { 26 | case "string": 27 | while (state.stack[0] === "string" && !stream.eol()) { 28 | if (stream.peek() === state.stringType) { 29 | stream.next(); // Skip quote 30 | state.stack.shift(); // Clear flag 31 | } else if (stream.peek() === "\\") { 32 | stream.next(); 33 | stream.next(); 34 | } else { 35 | stream.match(/^.[^\\\"\']*/); 36 | } 37 | } 38 | return "string"; 39 | 40 | case "characterClass": 41 | while (state.stack[0] === "characterClass" && !stream.eol()) { 42 | if (!(stream.match(/^[^\]\\]+/) || stream.match(/^\\./))) 43 | state.stack.shift(); 44 | } 45 | return "operator"; 46 | } 47 | 48 | var peek = stream.peek(); 49 | 50 | //no stack 51 | switch (peek) { 52 | case "[": 53 | stream.next(); 54 | state.stack.unshift("characterClass"); 55 | return "bracket"; 56 | case ":": 57 | stream.next(); 58 | return "operator"; 59 | case "\\": 60 | if (stream.match(/\\[a-z]+/)) return "string.special"; 61 | else { 62 | stream.next(); 63 | return "atom"; 64 | } 65 | case ".": 66 | case ",": 67 | case ";": 68 | case "*": 69 | case "-": 70 | case "+": 71 | case "^": 72 | case "<": 73 | case "/": 74 | case "=": 75 | stream.next(); 76 | return "atom"; 77 | case "$": 78 | stream.next(); 79 | return "builtin"; 80 | } 81 | 82 | if (stream.match(/\d+/)) { 83 | if (stream.match(/^\w+/)) return "error"; 84 | return "number"; 85 | } else if (stream.match(/^[a-zA-Z_]\w*/)) { 86 | if (stream.match(/(?=[\(.])/, false)) return "keyword"; 87 | return "variable"; 88 | } else if (["[", "]", "(", ")", "{", "}"].indexOf(peek) != -1) { 89 | stream.next(); 90 | return "bracket"; 91 | } else if (!stream.eatSpace()) { 92 | stream.next(); 93 | } 94 | return null; 95 | } 96 | }; 97 | -------------------------------------------------------------------------------- /mode/sql.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare function sql(conf: { 3 | client?: {[word: string]: any}, 4 | atoms?: {[word: string]: any}, 5 | builtin?: {[word: string]: any}, 6 | keywords?: {[word: string]: any}, 7 | operatorChars?: RegExp, 8 | support?: {[word: string]: any}, 9 | hooks?: {[hook: string]: any}, 10 | dateSQL?: {[word: string]: any}, 11 | backslashStringEscapes?: boolean, 12 | brackets?: RegExp, 13 | punctuation?: RegExp 14 | }): StreamParser 15 | export declare const standardSQL: StreamParser 16 | export declare const msSQL: StreamParser 17 | export declare const mySQL: StreamParser 18 | export declare const mariaDB: StreamParser 19 | export declare const sqlite: StreamParser 20 | export declare const cassandra: StreamParser 21 | export declare const plSQL: StreamParser 22 | export declare const hive: StreamParser 23 | export declare const pgSQL: StreamParser 24 | export declare const gql: StreamParser 25 | export declare const gpSQL: StreamParser 26 | export declare const sparkSQL: StreamParser 27 | export declare const esper: StreamParser 28 | -------------------------------------------------------------------------------- /mode/stex.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const stex: StreamParser 3 | export declare const stexMath: StreamParser 4 | -------------------------------------------------------------------------------- /mode/stylus.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const stylus: StreamParser 3 | -------------------------------------------------------------------------------- /mode/swift.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const swift: StreamParser 3 | -------------------------------------------------------------------------------- /mode/tcl.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const tcl: StreamParser 3 | -------------------------------------------------------------------------------- /mode/tcl.js: -------------------------------------------------------------------------------- 1 | function parseWords(str) { 2 | var obj = {}, words = str.split(" "); 3 | for (var i = 0; i < words.length; ++i) obj[words[i]] = true; 4 | return obj; 5 | } 6 | var keywords = parseWords("Tcl safe after append array auto_execok auto_import auto_load " + 7 | "auto_mkindex auto_mkindex_old auto_qualify auto_reset bgerror " + 8 | "binary break catch cd close concat continue dde eof encoding error " + 9 | "eval exec exit expr fblocked fconfigure fcopy file fileevent filename " + 10 | "filename flush for foreach format gets glob global history http if " + 11 | "incr info interp join lappend lindex linsert list llength load lrange " + 12 | "lreplace lsearch lset lsort memory msgcat namespace open package parray " + 13 | "pid pkg::create pkg_mkIndex proc puts pwd re_syntax read regex regexp " + 14 | "registry regsub rename resource return scan seek set socket source split " + 15 | "string subst switch tcl_endOfWord tcl_findLibrary tcl_startOfNextWord " + 16 | "tcl_wordBreakAfter tcl_startOfPreviousWord tcl_wordBreakBefore tcltest " + 17 | "tclvars tell time trace unknown unset update uplevel upvar variable " + 18 | "vwait"); 19 | var functions = parseWords("if elseif else and not or eq ne in ni for foreach while switch"); 20 | var isOperatorChar = /[+\-*&%=<>!?^\/\|]/; 21 | function chain(stream, state, f) { 22 | state.tokenize = f; 23 | return f(stream, state); 24 | } 25 | function tokenBase(stream, state) { 26 | var beforeParams = state.beforeParams; 27 | state.beforeParams = false; 28 | var ch = stream.next(); 29 | if ((ch == '"' || ch == "'") && state.inParams) { 30 | return chain(stream, state, tokenString(ch)); 31 | } else if (/[\[\]{}\(\),;\.]/.test(ch)) { 32 | if (ch == "(" && beforeParams) state.inParams = true; 33 | else if (ch == ")") state.inParams = false; 34 | return null; 35 | } else if (/\d/.test(ch)) { 36 | stream.eatWhile(/[\w\.]/); 37 | return "number"; 38 | } else if (ch == "#") { 39 | if (stream.eat("*")) 40 | return chain(stream, state, tokenComment); 41 | if (ch == "#" && stream.match(/ *\[ *\[/)) 42 | return chain(stream, state, tokenUnparsed); 43 | stream.skipToEnd(); 44 | return "comment"; 45 | } else if (ch == '"') { 46 | stream.skipTo(/"/); 47 | return "comment"; 48 | } else if (ch == "$") { 49 | stream.eatWhile(/[$_a-z0-9A-Z\.{:]/); 50 | stream.eatWhile(/}/); 51 | state.beforeParams = true; 52 | return "builtin"; 53 | } else if (isOperatorChar.test(ch)) { 54 | stream.eatWhile(isOperatorChar); 55 | return "comment"; 56 | } else { 57 | stream.eatWhile(/[\w\$_{}\xa1-\uffff]/); 58 | var word = stream.current().toLowerCase(); 59 | if (keywords && keywords.propertyIsEnumerable(word)) 60 | return "keyword"; 61 | if (functions && functions.propertyIsEnumerable(word)) { 62 | state.beforeParams = true; 63 | return "keyword"; 64 | } 65 | return null; 66 | } 67 | } 68 | function tokenString(quote) { 69 | return function(stream, state) { 70 | var escaped = false, next, end = false; 71 | while ((next = stream.next()) != null) { 72 | if (next == quote && !escaped) { 73 | end = true; 74 | break; 75 | } 76 | escaped = !escaped && next == "\\"; 77 | } 78 | if (end) state.tokenize = tokenBase; 79 | return "string"; 80 | }; 81 | } 82 | function tokenComment(stream, state) { 83 | var maybeEnd = false, ch; 84 | while (ch = stream.next()) { 85 | if (ch == "#" && maybeEnd) { 86 | state.tokenize = tokenBase; 87 | break; 88 | } 89 | maybeEnd = (ch == "*"); 90 | } 91 | return "comment"; 92 | } 93 | function tokenUnparsed(stream, state) { 94 | var maybeEnd = 0, ch; 95 | while (ch = stream.next()) { 96 | if (ch == "#" && maybeEnd == 2) { 97 | state.tokenize = tokenBase; 98 | break; 99 | } 100 | if (ch == "]") 101 | maybeEnd++; 102 | else if (ch != " ") 103 | maybeEnd = 0; 104 | } 105 | return "meta"; 106 | } 107 | export const tcl = { 108 | name: "tcl", 109 | startState: function() { 110 | return { 111 | tokenize: tokenBase, 112 | beforeParams: false, 113 | inParams: false 114 | }; 115 | }, 116 | token: function(stream, state) { 117 | if (stream.eatSpace()) return null; 118 | return state.tokenize(stream, state); 119 | }, 120 | languageData: { 121 | commentTokens: {line: "#"} 122 | } 123 | }; 124 | -------------------------------------------------------------------------------- /mode/textile.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const textile: StreamParser 3 | -------------------------------------------------------------------------------- /mode/tiddlywiki.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const tiddlyWiki: StreamParser 3 | -------------------------------------------------------------------------------- /mode/tiki.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const tiki: StreamParser 3 | -------------------------------------------------------------------------------- /mode/toml.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const toml: StreamParser 3 | -------------------------------------------------------------------------------- /mode/toml.js: -------------------------------------------------------------------------------- 1 | export const toml = { 2 | name: "toml", 3 | startState: function () { 4 | return { 5 | inString: false, 6 | stringType: "", 7 | lhs: true, 8 | inArray: 0 9 | }; 10 | }, 11 | token: function (stream, state) { 12 | //check for state changes 13 | if (!state.inString && ((stream.peek() == '"') || (stream.peek() == "'"))) { 14 | state.stringType = stream.peek(); 15 | stream.next(); // Skip quote 16 | state.inString = true; // Update state 17 | } 18 | if (stream.sol() && state.inArray === 0) { 19 | state.lhs = true; 20 | } 21 | //return state 22 | if (state.inString) { 23 | while (state.inString && !stream.eol()) { 24 | if (stream.peek() === state.stringType) { 25 | stream.next(); // Skip quote 26 | state.inString = false; // Clear flag 27 | } else if (stream.peek() === '\\') { 28 | stream.next(); 29 | stream.next(); 30 | } else { 31 | stream.match(/^.[^\\\"\']*/); 32 | } 33 | } 34 | return state.lhs ? "property" : "string"; // Token style 35 | } else if (state.inArray && stream.peek() === ']') { 36 | stream.next(); 37 | state.inArray--; 38 | return 'bracket'; 39 | } else if (state.lhs && stream.peek() === '[' && stream.skipTo(']')) { 40 | stream.next();//skip closing ] 41 | // array of objects has an extra open & close [] 42 | if (stream.peek() === ']') stream.next(); 43 | return "atom"; 44 | } else if (stream.peek() === "#") { 45 | stream.skipToEnd(); 46 | return "comment"; 47 | } else if (stream.eatSpace()) { 48 | return null; 49 | } else if (state.lhs && stream.eatWhile(function (c) { return c != '=' && c != ' '; })) { 50 | return "property"; 51 | } else if (state.lhs && stream.peek() === "=") { 52 | stream.next(); 53 | state.lhs = false; 54 | return null; 55 | } else if (!state.lhs && stream.match(/^\d\d\d\d[\d\-\:\.T]*Z/)) { 56 | return 'atom'; //date 57 | } else if (!state.lhs && (stream.match('true') || stream.match('false'))) { 58 | return 'atom'; 59 | } else if (!state.lhs && stream.peek() === '[') { 60 | state.inArray++; 61 | stream.next(); 62 | return 'bracket'; 63 | } else if (!state.lhs && stream.match(/^\-?\d+(?:\.\d+)?/)) { 64 | return 'number'; 65 | } else if (!stream.eatSpace()) { 66 | stream.next(); 67 | } 68 | return null; 69 | }, 70 | languageData: { 71 | commentTokens: { line: '#' }, 72 | }, 73 | }; 74 | -------------------------------------------------------------------------------- /mode/troff.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const troff: StreamParser 3 | -------------------------------------------------------------------------------- /mode/troff.js: -------------------------------------------------------------------------------- 1 | var words = {}; 2 | 3 | function tokenBase(stream) { 4 | if (stream.eatSpace()) return null; 5 | 6 | var sol = stream.sol(); 7 | var ch = stream.next(); 8 | 9 | if (ch === '\\') { 10 | if (stream.match('fB') || stream.match('fR') || stream.match('fI') || 11 | stream.match('u') || stream.match('d') || 12 | stream.match('%') || stream.match('&')) { 13 | return 'string'; 14 | } 15 | if (stream.match('m[')) { 16 | stream.skipTo(']'); 17 | stream.next(); 18 | return 'string'; 19 | } 20 | if (stream.match('s+') || stream.match('s-')) { 21 | stream.eatWhile(/[\d-]/); 22 | return 'string'; 23 | } 24 | if (stream.match('\(') || stream.match('*\(')) { 25 | stream.eatWhile(/[\w-]/); 26 | return 'string'; 27 | } 28 | return 'string'; 29 | } 30 | if (sol && (ch === '.' || ch === '\'')) { 31 | if (stream.eat('\\') && stream.eat('\"')) { 32 | stream.skipToEnd(); 33 | return 'comment'; 34 | } 35 | } 36 | if (sol && ch === '.') { 37 | if (stream.match('B ') || stream.match('I ') || stream.match('R ')) { 38 | return 'attribute'; 39 | } 40 | if (stream.match('TH ') || stream.match('SH ') || stream.match('SS ') || stream.match('HP ')) { 41 | stream.skipToEnd(); 42 | return 'quote'; 43 | } 44 | if ((stream.match(/[A-Z]/) && stream.match(/[A-Z]/)) || (stream.match(/[a-z]/) && stream.match(/[a-z]/))) { 45 | return 'attribute'; 46 | } 47 | } 48 | stream.eatWhile(/[\w-]/); 49 | var cur = stream.current(); 50 | return words.hasOwnProperty(cur) ? words[cur] : null; 51 | } 52 | 53 | function tokenize(stream, state) { 54 | return (state.tokens[0] || tokenBase) (stream, state); 55 | }; 56 | 57 | export const troff = { 58 | name: "troff", 59 | startState: function() {return {tokens:[]};}, 60 | token: function(stream, state) { 61 | return tokenize(stream, state); 62 | } 63 | }; 64 | -------------------------------------------------------------------------------- /mode/ttcn-cfg.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const ttcnCfg: StreamParser 3 | -------------------------------------------------------------------------------- /mode/ttcn.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const ttcn: StreamParser 3 | -------------------------------------------------------------------------------- /mode/turtle.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const turtle: StreamParser 3 | -------------------------------------------------------------------------------- /mode/turtle.js: -------------------------------------------------------------------------------- 1 | var curPunc; 2 | 3 | function wordRegexp(words) { 4 | return new RegExp("^(?:" + words.join("|") + ")$", "i"); 5 | } 6 | var ops = wordRegexp([]); 7 | var keywords = wordRegexp(["@prefix", "@base", "a"]); 8 | var operatorChars = /[*+\-<>=&|]/; 9 | 10 | function tokenBase(stream, state) { 11 | var ch = stream.next(); 12 | curPunc = null; 13 | if (ch == "<" && !stream.match(/^[\s\u00a0=]/, false)) { 14 | stream.match(/^[^\s\u00a0>]*>?/); 15 | return "atom"; 16 | } 17 | else if (ch == "\"" || ch == "'") { 18 | state.tokenize = tokenLiteral(ch); 19 | return state.tokenize(stream, state); 20 | } 21 | else if (/[{}\(\),\.;\[\]]/.test(ch)) { 22 | curPunc = ch; 23 | return null; 24 | } 25 | else if (ch == "#") { 26 | stream.skipToEnd(); 27 | return "comment"; 28 | } 29 | else if (operatorChars.test(ch)) { 30 | stream.eatWhile(operatorChars); 31 | return null; 32 | } 33 | else if (ch == ":") { 34 | return "operator"; 35 | } else { 36 | stream.eatWhile(/[_\w\d]/); 37 | if(stream.peek() == ":") { 38 | return "variableName.special"; 39 | } else { 40 | var word = stream.current(); 41 | 42 | if(keywords.test(word)) { 43 | return "meta"; 44 | } 45 | 46 | if(ch >= "A" && ch <= "Z") { 47 | return "comment"; 48 | } else { 49 | return "keyword"; 50 | } 51 | } 52 | var word = stream.current(); 53 | if (ops.test(word)) 54 | return null; 55 | else if (keywords.test(word)) 56 | return "meta"; 57 | else 58 | return "variable"; 59 | } 60 | } 61 | 62 | function tokenLiteral(quote) { 63 | return function(stream, state) { 64 | var escaped = false, ch; 65 | while ((ch = stream.next()) != null) { 66 | if (ch == quote && !escaped) { 67 | state.tokenize = tokenBase; 68 | break; 69 | } 70 | escaped = !escaped && ch == "\\"; 71 | } 72 | return "string"; 73 | }; 74 | } 75 | 76 | function pushContext(state, type, col) { 77 | state.context = {prev: state.context, indent: state.indent, col: col, type: type}; 78 | } 79 | function popContext(state) { 80 | state.indent = state.context.indent; 81 | state.context = state.context.prev; 82 | } 83 | 84 | export const turtle = { 85 | name: "turtle", 86 | startState: function() { 87 | return {tokenize: tokenBase, 88 | context: null, 89 | indent: 0, 90 | col: 0}; 91 | }, 92 | 93 | token: function(stream, state) { 94 | if (stream.sol()) { 95 | if (state.context && state.context.align == null) state.context.align = false; 96 | state.indent = stream.indentation(); 97 | } 98 | if (stream.eatSpace()) return null; 99 | var style = state.tokenize(stream, state); 100 | 101 | if (style != "comment" && state.context && state.context.align == null && state.context.type != "pattern") { 102 | state.context.align = true; 103 | } 104 | 105 | if (curPunc == "(") pushContext(state, ")", stream.column()); 106 | else if (curPunc == "[") pushContext(state, "]", stream.column()); 107 | else if (curPunc == "{") pushContext(state, "}", stream.column()); 108 | else if (/[\]\}\)]/.test(curPunc)) { 109 | while (state.context && state.context.type == "pattern") popContext(state); 110 | if (state.context && curPunc == state.context.type) popContext(state); 111 | } 112 | else if (curPunc == "." && state.context && state.context.type == "pattern") popContext(state); 113 | else if (/atom|string|variable/.test(style) && state.context) { 114 | if (/[\}\]]/.test(state.context.type)) 115 | pushContext(state, "pattern", stream.column()); 116 | else if (state.context.type == "pattern" && !state.context.align) { 117 | state.context.align = true; 118 | state.context.col = stream.column(); 119 | } 120 | } 121 | 122 | return style; 123 | }, 124 | 125 | indent: function(state, textAfter, cx) { 126 | var firstChar = textAfter && textAfter.charAt(0); 127 | var context = state.context; 128 | if (/[\]\}]/.test(firstChar)) 129 | while (context && context.type == "pattern") context = context.prev; 130 | 131 | var closing = context && firstChar == context.type; 132 | if (!context) 133 | return 0; 134 | else if (context.type == "pattern") 135 | return context.col; 136 | else if (context.align) 137 | return context.col + (closing ? 0 : 1); 138 | else 139 | return context.indent + (closing ? 0 : cx.unit); 140 | }, 141 | 142 | languageData: { 143 | commentTokens: {line: "#"} 144 | } 145 | }; 146 | -------------------------------------------------------------------------------- /mode/vb.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const vb: StreamParser 3 | -------------------------------------------------------------------------------- /mode/vbscript.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const vbScript: StreamParser 3 | export declare const vbScriptASP: StreamParser 4 | -------------------------------------------------------------------------------- /mode/velocity.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const velocity: StreamParser 3 | -------------------------------------------------------------------------------- /mode/verilog.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const verilog: StreamParser 3 | export declare const tlv: StreamParser 4 | -------------------------------------------------------------------------------- /mode/vhdl.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const vhdl: StreamParser 3 | -------------------------------------------------------------------------------- /mode/wast.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const wast: StreamParser 3 | -------------------------------------------------------------------------------- /mode/wast.js: -------------------------------------------------------------------------------- 1 | import {simpleMode} from "./simple-mode.js" 2 | 3 | var kKeywords = [ 4 | "align", 5 | "block", 6 | "br(_if|_table|_on_(cast|data|func|i31|null))?", 7 | "call(_indirect|_ref)?", 8 | "current_memory", 9 | "\\bdata\\b", 10 | "catch(_all)?", 11 | "delegate", 12 | "drop", 13 | "elem", 14 | "else", 15 | "end", 16 | "export", 17 | "\\bextern\\b", 18 | "\\bfunc\\b", 19 | "global(\\.(get|set))?", 20 | "if", 21 | "import", 22 | "local(\\.(get|set|tee))?", 23 | "loop", 24 | "module", 25 | "mut", 26 | "nop", 27 | "offset", 28 | "param", 29 | "result", 30 | "rethrow", 31 | "return(_call(_indirect|_ref)?)?", 32 | "select", 33 | "start", 34 | "table(\\.(size|get|set|size|grow|fill|init|copy))?", 35 | "then", 36 | "throw", 37 | "try", 38 | "type", 39 | "unreachable", 40 | "unwind", 41 | 42 | // Numeric opcodes. 43 | "i(32|64)\\.(store(8|16)|(load(8|16)_[su]))", 44 | "i64\\.(load32_[su]|store32)", 45 | "[fi](32|64)\\.(const|load|store)", 46 | "f(32|64)\\.(abs|add|ceil|copysign|div|eq|floor|[gl][et]|max|min|mul|nearest|neg?|sqrt|sub|trunc)", 47 | "i(32|64)\\.(a[dn]d|c[lt]z|(div|rem)_[su]|eqz?|[gl][te]_[su]|mul|ne|popcnt|rot[lr]|sh(l|r_[su])|sub|x?or)", 48 | "i64\\.extend_[su]_i32", 49 | "i32\\.wrap_i64", 50 | "i(32|64)\\.trunc_f(32|64)_[su]", 51 | "f(32|64)\\.convert_i(32|64)_[su]", 52 | "f64\\.promote_f32", 53 | "f32\\.demote_f64", 54 | "f32\\.reinterpret_i32", 55 | "i32\\.reinterpret_f32", 56 | "f64\\.reinterpret_i64", 57 | "i64\\.reinterpret_f64", 58 | // Atomics. 59 | "memory(\\.((atomic\\.(notify|wait(32|64)))|grow|size))?", 60 | "i64\.atomic\\.(load32_u|store32|rmw32\\.(a[dn]d|sub|x?or|(cmp)?xchg)_u)", 61 | "i(32|64)\\.atomic\\.(load((8|16)_u)?|store(8|16)?|rmw(\\.(a[dn]d|sub|x?or|(cmp)?xchg)|(8|16)\\.(a[dn]d|sub|x?or|(cmp)?xchg)_u))", 62 | // SIMD. 63 | "v128\\.load(8x8|16x4|32x2)_[su]", 64 | "v128\\.load(8|16|32|64)_splat", 65 | "v128\\.(load|store)(8|16|32|64)_lane", 66 | "v128\\.load(32|64)_zero", 67 | "v128\.(load|store|const|not|andnot|and|or|xor|bitselect|any_true)", 68 | "i(8x16|16x8)\\.(extract_lane_[su]|(add|sub)_sat_[su]|avgr_u)", 69 | "i(8x16|16x8|32x4|64x2)\\.(neg|add|sub|abs|shl|shr_[su]|all_true|bitmask|eq|ne|[lg][te]_s)", 70 | "(i(8x16|16x8|32x4|64x2)|f(32x4|64x2))\.(splat|replace_lane)", 71 | "i(8x16|16x8|32x4)\\.(([lg][te]_u)|((min|max)_[su]))", 72 | "f(32x4|64x2)\\.(neg|add|sub|abs|nearest|eq|ne|[lg][te]|sqrt|mul|div|min|max|ceil|floor|trunc)", 73 | "[fi](32x4|64x2)\\.extract_lane", 74 | "i8x16\\.(shuffle|swizzle|popcnt|narrow_i16x8_[su])", 75 | "i16x8\\.(narrow_i32x4_[su]|mul|extadd_pairwise_i8x16_[su]|q15mulr_sat_s)", 76 | "i16x8\\.(extend|extmul)_(low|high)_i8x16_[su]", 77 | "i32x4\\.(mul|dot_i16x8_s|trunc_sat_f64x2_[su]_zero)", 78 | "i32x4\\.((extend|extmul)_(low|high)_i16x8_|trunc_sat_f32x4_|extadd_pairwise_i16x8_)[su]", 79 | "i64x2\\.(mul|(extend|extmul)_(low|high)_i32x4_[su])", 80 | "f32x4\\.(convert_i32x4_[su]|demote_f64x2_zero)", 81 | "f64x2\\.(promote_low_f32x4|convert_low_i32x4_[su])", 82 | // Reference types, function references, and GC. 83 | "\\bany\\b", 84 | "array\\.len", 85 | "(array|struct)(\\.(new_(default_)?with_rtt|get(_[su])?|set))?", 86 | "\\beq\\b", 87 | "field", 88 | "i31\\.(new|get_[su])", 89 | "\\bnull\\b", 90 | "ref(\\.(([ai]s_(data|func|i31))|cast|eq|func|(is_|as_non_)?null|test))?", 91 | "rtt(\\.(canon|sub))?", 92 | ]; 93 | 94 | export const wast = simpleMode({ 95 | start: [ 96 | {regex: new RegExp(kKeywords.join('|')), token: "keyword"}, 97 | {regex: /\b((any|data|eq|extern|i31|func)ref|[fi](32|64)|i(8|16))\b/, token: "atom"}, 98 | {regex: /\b(funcref|externref|[fi](32|64))\b/, token: "atom"}, 99 | {regex: /\$([a-zA-Z0-9_`\+\-\*\/\\\^~=<>!\?@#$%&|:\.]+)/, token: "variable"}, 100 | {regex: /"(?:[^"\\\x00-\x1f\x7f]|\\[nt\\'"]|\\[0-9a-fA-F][0-9a-fA-F])*"/, token: "string"}, 101 | {regex: /\(;.*?/, token: "comment", next: "comment"}, 102 | {regex: /;;.*$/, token: "comment"}, 103 | {regex: /\(/, indent: true}, 104 | {regex: /\)/, dedent: true}, 105 | ], 106 | 107 | comment: [ 108 | {regex: /.*?;\)/, token: "comment", next: "start"}, 109 | {regex: /.*/, token: "comment"}, 110 | ], 111 | 112 | languageData: { 113 | name: "wast", 114 | dontIndentStates: ['comment'], 115 | }, 116 | }); 117 | 118 | // https://github.com/WebAssembly/design/issues/981 mentions text/webassembly, 119 | // which seems like a reasonable choice, although it's not standard right now. 120 | -------------------------------------------------------------------------------- /mode/webidl.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const webIDL: StreamParser 3 | -------------------------------------------------------------------------------- /mode/webidl.js: -------------------------------------------------------------------------------- 1 | function wordRegexp(words) { 2 | return new RegExp("^((" + words.join(")|(") + "))\\b"); 3 | }; 4 | 5 | var builtinArray = [ 6 | "Clamp", 7 | "Constructor", 8 | "EnforceRange", 9 | "Exposed", 10 | "ImplicitThis", 11 | "Global", "PrimaryGlobal", 12 | "LegacyArrayClass", 13 | "LegacyUnenumerableNamedProperties", 14 | "LenientThis", 15 | "NamedConstructor", 16 | "NewObject", 17 | "NoInterfaceObject", 18 | "OverrideBuiltins", 19 | "PutForwards", 20 | "Replaceable", 21 | "SameObject", 22 | "TreatNonObjectAsNull", 23 | "TreatNullAs", 24 | "EmptyString", 25 | "Unforgeable", 26 | "Unscopeable" 27 | ]; 28 | var builtins = wordRegexp(builtinArray); 29 | 30 | var typeArray = [ 31 | "unsigned", "short", "long", // UnsignedIntegerType 32 | "unrestricted", "float", "double", // UnrestrictedFloatType 33 | "boolean", "byte", "octet", // Rest of PrimitiveType 34 | "Promise", // PromiseType 35 | "ArrayBuffer", "DataView", "Int8Array", "Int16Array", "Int32Array", 36 | "Uint8Array", "Uint16Array", "Uint32Array", "Uint8ClampedArray", 37 | "Float32Array", "Float64Array", // BufferRelatedType 38 | "ByteString", "DOMString", "USVString", "sequence", "object", "RegExp", 39 | "Error", "DOMException", "FrozenArray", // Rest of NonAnyType 40 | "any", // Rest of SingleType 41 | "void" // Rest of ReturnType 42 | ]; 43 | var types = wordRegexp(typeArray); 44 | 45 | var keywordArray = [ 46 | "attribute", "callback", "const", "deleter", "dictionary", "enum", "getter", 47 | "implements", "inherit", "interface", "iterable", "legacycaller", "maplike", 48 | "partial", "required", "serializer", "setlike", "setter", "static", 49 | "stringifier", "typedef", // ArgumentNameKeyword except 50 | // "unrestricted" 51 | "optional", "readonly", "or" 52 | ]; 53 | var keywords = wordRegexp(keywordArray); 54 | 55 | var atomArray = [ 56 | "true", "false", // BooleanLiteral 57 | "Infinity", "NaN", // FloatLiteral 58 | "null" // Rest of ConstValue 59 | ]; 60 | var atoms = wordRegexp(atomArray); 61 | 62 | var startDefArray = ["callback", "dictionary", "enum", "interface"]; 63 | var startDefs = wordRegexp(startDefArray); 64 | 65 | var endDefArray = ["typedef"]; 66 | var endDefs = wordRegexp(endDefArray); 67 | 68 | var singleOperators = /^[:<=>?]/; 69 | var integers = /^-?([1-9][0-9]*|0[Xx][0-9A-Fa-f]+|0[0-7]*)/; 70 | var floats = /^-?(([0-9]+\.[0-9]*|[0-9]*\.[0-9]+)([Ee][+-]?[0-9]+)?|[0-9]+[Ee][+-]?[0-9]+)/; 71 | var identifiers = /^_?[A-Za-z][0-9A-Z_a-z-]*/; 72 | var identifiersEnd = /^_?[A-Za-z][0-9A-Z_a-z-]*(?=\s*;)/; 73 | var strings = /^"[^"]*"/; 74 | var multilineComments = /^\/\*.*?\*\//; 75 | var multilineCommentsStart = /^\/\*.*/; 76 | var multilineCommentsEnd = /^.*?\*\//; 77 | 78 | function readToken(stream, state) { 79 | // whitespace 80 | if (stream.eatSpace()) return null; 81 | 82 | // comment 83 | if (state.inComment) { 84 | if (stream.match(multilineCommentsEnd)) { 85 | state.inComment = false; 86 | return "comment"; 87 | } 88 | stream.skipToEnd(); 89 | return "comment"; 90 | } 91 | if (stream.match("//")) { 92 | stream.skipToEnd(); 93 | return "comment"; 94 | } 95 | if (stream.match(multilineComments)) return "comment"; 96 | if (stream.match(multilineCommentsStart)) { 97 | state.inComment = true; 98 | return "comment"; 99 | } 100 | 101 | // integer and float 102 | if (stream.match(/^-?[0-9\.]/, false)) { 103 | if (stream.match(integers) || stream.match(floats)) return "number"; 104 | } 105 | 106 | // string 107 | if (stream.match(strings)) return "string"; 108 | 109 | // identifier 110 | if (state.startDef && stream.match(identifiers)) return "def"; 111 | 112 | if (state.endDef && stream.match(identifiersEnd)) { 113 | state.endDef = false; 114 | return "def"; 115 | } 116 | 117 | if (stream.match(keywords)) return "keyword"; 118 | 119 | if (stream.match(types)) { 120 | var lastToken = state.lastToken; 121 | var nextToken = (stream.match(/^\s*(.+?)\b/, false) || [])[1]; 122 | 123 | if (lastToken === ":" || lastToken === "implements" || 124 | nextToken === "implements" || nextToken === "=") { 125 | // Used as identifier 126 | return "builtin"; 127 | } else { 128 | // Used as type 129 | return "type"; 130 | } 131 | } 132 | 133 | if (stream.match(builtins)) return "builtin"; 134 | if (stream.match(atoms)) return "atom"; 135 | if (stream.match(identifiers)) return "variable"; 136 | 137 | // other 138 | if (stream.match(singleOperators)) return "operator"; 139 | 140 | // unrecognized 141 | stream.next(); 142 | return null; 143 | }; 144 | 145 | export const webIDL = { 146 | name: "webidl", 147 | startState: function() { 148 | return { 149 | // Is in multiline comment 150 | inComment: false, 151 | // Last non-whitespace, matched token 152 | lastToken: "", 153 | // Next token is a definition 154 | startDef: false, 155 | // Last token of the statement is a definition 156 | endDef: false 157 | }; 158 | }, 159 | token: function(stream, state) { 160 | var style = readToken(stream, state); 161 | 162 | if (style) { 163 | var cur = stream.current(); 164 | state.lastToken = cur; 165 | if (style === "keyword") { 166 | state.startDef = startDefs.test(cur); 167 | state.endDef = state.endDef || endDefs.test(cur); 168 | } else { 169 | state.startDef = false; 170 | } 171 | } 172 | 173 | return style; 174 | }, 175 | 176 | languageData: { 177 | autocomplete: builtinArray.concat(typeArray).concat(keywordArray).concat(atomArray) 178 | } 179 | }; 180 | -------------------------------------------------------------------------------- /mode/xml.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const xml: StreamParser 3 | export declare const html: StreamParser 4 | -------------------------------------------------------------------------------- /mode/xquery.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const xQuery: StreamParser 3 | -------------------------------------------------------------------------------- /mode/yacas.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const yacas: StreamParser 3 | -------------------------------------------------------------------------------- /mode/yacas.js: -------------------------------------------------------------------------------- 1 | function words(str) { 2 | var obj = {}, words = str.split(" "); 3 | for (var i = 0; i < words.length; ++i) obj[words[i]] = true; 4 | return obj; 5 | } 6 | 7 | var bodiedOps = words("Assert BackQuote D Defun Deriv For ForEach FromFile " + 8 | "FromString Function Integrate InverseTaylor Limit " + 9 | "LocalSymbols Macro MacroRule MacroRulePattern " + 10 | "NIntegrate Rule RulePattern Subst TD TExplicitSum " + 11 | "TSum Taylor Taylor1 Taylor2 Taylor3 ToFile " + 12 | "ToStdout ToString TraceRule Until While"); 13 | 14 | // patterns 15 | var pFloatForm = "(?:(?:\\.\\d+|\\d+\\.\\d*|\\d+)(?:[eE][+-]?\\d+)?)"; 16 | var pIdentifier = "(?:[a-zA-Z\\$'][a-zA-Z0-9\\$']*)"; 17 | 18 | // regular expressions 19 | var reFloatForm = new RegExp(pFloatForm); 20 | var reIdentifier = new RegExp(pIdentifier); 21 | var rePattern = new RegExp(pIdentifier + "?_" + pIdentifier); 22 | var reFunctionLike = new RegExp(pIdentifier + "\\s*\\("); 23 | 24 | function tokenBase(stream, state) { 25 | var ch; 26 | 27 | // get next character 28 | ch = stream.next(); 29 | 30 | // string 31 | if (ch === '"') { 32 | state.tokenize = tokenString; 33 | return state.tokenize(stream, state); 34 | } 35 | 36 | // comment 37 | if (ch === '/') { 38 | if (stream.eat('*')) { 39 | state.tokenize = tokenComment; 40 | return state.tokenize(stream, state); 41 | } 42 | if (stream.eat("/")) { 43 | stream.skipToEnd(); 44 | return "comment"; 45 | } 46 | } 47 | 48 | // go back one character 49 | stream.backUp(1); 50 | 51 | // update scope info 52 | var m = stream.match(/^(\w+)\s*\(/, false); 53 | if (m !== null && bodiedOps.hasOwnProperty(m[1])) 54 | state.scopes.push('bodied'); 55 | 56 | var scope = currentScope(state); 57 | 58 | if (scope === 'bodied' && ch === '[') 59 | state.scopes.pop(); 60 | 61 | if (ch === '[' || ch === '{' || ch === '(') 62 | state.scopes.push(ch); 63 | 64 | scope = currentScope(state); 65 | 66 | if (scope === '[' && ch === ']' || 67 | scope === '{' && ch === '}' || 68 | scope === '(' && ch === ')') 69 | state.scopes.pop(); 70 | 71 | if (ch === ';') { 72 | while (scope === 'bodied') { 73 | state.scopes.pop(); 74 | scope = currentScope(state); 75 | } 76 | } 77 | 78 | // look for ordered rules 79 | if (stream.match(/\d+ *#/, true, false)) { 80 | return 'qualifier'; 81 | } 82 | 83 | // look for numbers 84 | if (stream.match(reFloatForm, true, false)) { 85 | return 'number'; 86 | } 87 | 88 | // look for placeholders 89 | if (stream.match(rePattern, true, false)) { 90 | return 'variableName.special'; 91 | } 92 | 93 | // match all braces separately 94 | if (stream.match(/(?:\[|\]|{|}|\(|\))/, true, false)) { 95 | return 'bracket'; 96 | } 97 | 98 | // literals looking like function calls 99 | if (stream.match(reFunctionLike, true, false)) { 100 | stream.backUp(1); 101 | return 'variableName.function'; 102 | } 103 | 104 | // all other identifiers 105 | if (stream.match(reIdentifier, true, false)) { 106 | return 'variable'; 107 | } 108 | 109 | // operators; note that operators like @@ or /; are matched separately for each symbol. 110 | if (stream.match(/(?:\\|\+|\-|\*|\/|,|;|\.|:|@|~|=|>|<|&|\||_|`|'|\^|\?|!|%|#)/, true, false)) { 111 | return 'operator'; 112 | } 113 | 114 | // everything else is an error 115 | return 'error'; 116 | } 117 | 118 | function tokenString(stream, state) { 119 | var next, end = false, escaped = false; 120 | while ((next = stream.next()) != null) { 121 | if (next === '"' && !escaped) { 122 | end = true; 123 | break; 124 | } 125 | escaped = !escaped && next === '\\'; 126 | } 127 | if (end && !escaped) { 128 | state.tokenize = tokenBase; 129 | } 130 | return 'string'; 131 | }; 132 | 133 | function tokenComment(stream, state) { 134 | var prev, next; 135 | while((next = stream.next()) != null) { 136 | if (prev === '*' && next === '/') { 137 | state.tokenize = tokenBase; 138 | break; 139 | } 140 | prev = next; 141 | } 142 | return 'comment'; 143 | } 144 | 145 | function currentScope(state) { 146 | var scope = null; 147 | if (state.scopes.length > 0) 148 | scope = state.scopes[state.scopes.length - 1]; 149 | return scope; 150 | } 151 | 152 | export const yacas = { 153 | name: "yacas", 154 | startState: function() { 155 | return { 156 | tokenize: tokenBase, 157 | scopes: [] 158 | }; 159 | }, 160 | token: function(stream, state) { 161 | if (stream.eatSpace()) return null; 162 | return state.tokenize(stream, state); 163 | }, 164 | indent: function(state, textAfter, cx) { 165 | if (state.tokenize !== tokenBase && state.tokenize !== null) 166 | return null; 167 | 168 | var delta = 0; 169 | if (textAfter === ']' || textAfter === '];' || 170 | textAfter === '}' || textAfter === '};' || 171 | textAfter === ');') 172 | delta = -1; 173 | 174 | return (state.scopes.length + delta) * cx.unit; 175 | }, 176 | 177 | languageData: { 178 | electricInput: /[{}\[\]()\;]/, 179 | commentTokens: {line: "//", block: {open: "/*", close: "*/"}} 180 | } 181 | }; 182 | -------------------------------------------------------------------------------- /mode/yaml.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const yaml: StreamParser 3 | -------------------------------------------------------------------------------- /mode/yaml.js: -------------------------------------------------------------------------------- 1 | var cons = ['true', 'false', 'on', 'off', 'yes', 'no']; 2 | var keywordRegex = new RegExp("\\b(("+cons.join(")|(")+"))$", 'i'); 3 | 4 | export const yaml = { 5 | name: "yaml", 6 | token: function(stream, state) { 7 | var ch = stream.peek(); 8 | var esc = state.escaped; 9 | state.escaped = false; 10 | /* comments */ 11 | if (ch == "#" && (stream.pos == 0 || /\s/.test(stream.string.charAt(stream.pos - 1)))) { 12 | stream.skipToEnd(); 13 | return "comment"; 14 | } 15 | 16 | if (stream.match(/^('([^']|\\.)*'?|"([^"]|\\.)*"?)/)) 17 | return "string"; 18 | 19 | if (state.literal && stream.indentation() > state.keyCol) { 20 | stream.skipToEnd(); return "string"; 21 | } else if (state.literal) { state.literal = false; } 22 | if (stream.sol()) { 23 | state.keyCol = 0; 24 | state.pair = false; 25 | state.pairStart = false; 26 | /* document start */ 27 | if(stream.match('---')) { return "def"; } 28 | /* document end */ 29 | if (stream.match('...')) { return "def"; } 30 | /* array list item */ 31 | if (stream.match(/^\s*-\s+/)) { return 'meta'; } 32 | } 33 | /* inline pairs/lists */ 34 | if (stream.match(/^(\{|\}|\[|\])/)) { 35 | if (ch == '{') 36 | state.inlinePairs++; 37 | else if (ch == '}') 38 | state.inlinePairs--; 39 | else if (ch == '[') 40 | state.inlineList++; 41 | else 42 | state.inlineList--; 43 | return 'meta'; 44 | } 45 | 46 | /* list separator */ 47 | if (state.inlineList > 0 && !esc && ch == ',') { 48 | stream.next(); 49 | return 'meta'; 50 | } 51 | /* pairs separator */ 52 | if (state.inlinePairs > 0 && !esc && ch == ',') { 53 | state.keyCol = 0; 54 | state.pair = false; 55 | state.pairStart = false; 56 | stream.next(); 57 | return 'meta'; 58 | } 59 | 60 | /* start of value of a pair */ 61 | if (state.pairStart) { 62 | /* block literals */ 63 | if (stream.match(/^\s*(\||\>)\s*/)) { state.literal = true; return 'meta'; }; 64 | /* references */ 65 | if (stream.match(/^\s*(\&|\*)[a-z0-9\._-]+\b/i)) { return 'variable'; } 66 | /* numbers */ 67 | if (state.inlinePairs == 0 && stream.match(/^\s*-?[0-9\.\,]+\s?$/)) { return 'number'; } 68 | if (state.inlinePairs > 0 && stream.match(/^\s*-?[0-9\.\,]+\s?(?=(,|}))/)) { return 'number'; } 69 | /* keywords */ 70 | if (stream.match(keywordRegex)) { return 'keyword'; } 71 | } 72 | 73 | /* pairs (associative arrays) -> key */ 74 | if (!state.pair && stream.match(/^\s*(?:[,\[\]{}&*!|>'"%@`][^\s'":]|[^,\[\]{}#&*!|>'"%@`])[^#]*?(?=\s*:($|\s))/)) { 75 | state.pair = true; 76 | state.keyCol = stream.indentation(); 77 | return "atom"; 78 | } 79 | if (state.pair && stream.match(/^:\s*/)) { state.pairStart = true; return 'meta'; } 80 | 81 | /* nothing found, continue */ 82 | state.pairStart = false; 83 | state.escaped = (ch == '\\'); 84 | stream.next(); 85 | return null; 86 | }, 87 | startState: function() { 88 | return { 89 | pair: false, 90 | pairStart: false, 91 | keyCol: 0, 92 | inlinePairs: 0, 93 | inlineList: 0, 94 | literal: false, 95 | escaped: false 96 | }; 97 | }, 98 | languageData: { 99 | commentTokens: {line: "#"} 100 | } 101 | }; 102 | -------------------------------------------------------------------------------- /mode/z80.d.ts: -------------------------------------------------------------------------------- 1 | import {StreamParser} from "@codemirror/language" 2 | export declare const z80: StreamParser 3 | export declare const ez80: StreamParser 4 | -------------------------------------------------------------------------------- /mode/z80.js: -------------------------------------------------------------------------------- 1 | function mkZ80(ez80) { 2 | var keywords1, keywords2; 3 | if (ez80) { 4 | keywords1 = /^(exx?|(ld|cp)([di]r?)?|[lp]ea|pop|push|ad[cd]|cpl|daa|dec|inc|neg|sbc|sub|and|bit|[cs]cf|x?or|res|set|r[lr]c?a?|r[lr]d|s[lr]a|srl|djnz|nop|[de]i|halt|im|in([di]mr?|ir?|irx|2r?)|ot(dmr?|[id]rx|imr?)|out(0?|[di]r?|[di]2r?)|tst(io)?|slp)(\.([sl]?i)?[sl])?\b/i; 5 | keywords2 = /^(((call|j[pr]|rst|ret[in]?)(\.([sl]?i)?[sl])?)|(rs|st)mix)\b/i; 6 | } else { 7 | keywords1 = /^(exx?|(ld|cp|in)([di]r?)?|pop|push|ad[cd]|cpl|daa|dec|inc|neg|sbc|sub|and|bit|[cs]cf|x?or|res|set|r[lr]c?a?|r[lr]d|s[lr]a|srl|djnz|nop|rst|[de]i|halt|im|ot[di]r|out[di]?)\b/i; 8 | keywords2 = /^(call|j[pr]|ret[in]?|b_?(call|jump))\b/i; 9 | } 10 | 11 | var variables1 = /^(af?|bc?|c|de?|e|hl?|l|i[xy]?|r|sp)\b/i; 12 | var variables2 = /^(n?[zc]|p[oe]?|m)\b/i; 13 | var errors = /^([hl][xy]|i[xy][hl]|slia|sll)\b/i; 14 | var numbers = /^([\da-f]+h|[0-7]+o|[01]+b|\d+d?)\b/i; 15 | 16 | return { 17 | name: "z80", 18 | startState: function() { 19 | return { 20 | context: 0 21 | }; 22 | }, 23 | token: function(stream, state) { 24 | if (!stream.column()) 25 | state.context = 0; 26 | 27 | if (stream.eatSpace()) 28 | return null; 29 | 30 | var w; 31 | 32 | if (stream.eatWhile(/\w/)) { 33 | if (ez80 && stream.eat('.')) { 34 | stream.eatWhile(/\w/); 35 | } 36 | w = stream.current(); 37 | 38 | if (stream.indentation()) { 39 | if ((state.context == 1 || state.context == 4) && variables1.test(w)) { 40 | state.context = 4; 41 | return 'variable'; 42 | } 43 | 44 | if (state.context == 2 && variables2.test(w)) { 45 | state.context = 4; 46 | return 'variableName.special'; 47 | } 48 | 49 | if (keywords1.test(w)) { 50 | state.context = 1; 51 | return 'keyword'; 52 | } else if (keywords2.test(w)) { 53 | state.context = 2; 54 | return 'keyword'; 55 | } else if (state.context == 4 && numbers.test(w)) { 56 | return 'number'; 57 | } 58 | 59 | if (errors.test(w)) 60 | return 'error'; 61 | } else if (stream.match(numbers)) { 62 | return 'number'; 63 | } else { 64 | return null; 65 | } 66 | } else if (stream.eat(';')) { 67 | stream.skipToEnd(); 68 | return 'comment'; 69 | } else if (stream.eat('"')) { 70 | while (w = stream.next()) { 71 | if (w == '"') 72 | break; 73 | 74 | if (w == '\\') 75 | stream.next(); 76 | } 77 | return 'string'; 78 | } else if (stream.eat('\'')) { 79 | if (stream.match(/\\?.'/)) 80 | return 'number'; 81 | } else if (stream.eat('.') || stream.sol() && stream.eat('#')) { 82 | state.context = 5; 83 | 84 | if (stream.eatWhile(/\w/)) 85 | return 'def'; 86 | } else if (stream.eat('$')) { 87 | if (stream.eatWhile(/[\da-f]/i)) 88 | return 'number'; 89 | } else if (stream.eat('%')) { 90 | if (stream.eatWhile(/[01]/)) 91 | return 'number'; 92 | } else { 93 | stream.next(); 94 | } 95 | return null; 96 | } 97 | }; 98 | }; 99 | 100 | export const z80 = mkZ80(false) 101 | export const ez80 = mkZ80(true) 102 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@codemirror/legacy-modes", 3 | "version": "6.5.1", 4 | "description": "Collection of ported legacy language modes for the CodeMirror code editor", 5 | "scripts": { 6 | "test": "cm-runtests", 7 | "prepare": "rollup -c" 8 | }, 9 | "keywords": [ 10 | "editor", 11 | "code" 12 | ], 13 | "author": { 14 | "name": "Marijn Haverbeke", 15 | "email": "marijn@haverbeke.berlin", 16 | "url": "http://marijnhaverbeke.nl" 17 | }, 18 | "type": "module", 19 | "types": "dist/index.d.ts", 20 | "module": "dist/index.js", 21 | "exports": { 22 | "./mode/*": { 23 | "import": "./mode/*.js", 24 | "require": "./mode/*.cjs" 25 | }, 26 | "./package.json": "./package.json" 27 | }, 28 | "sideEffects": false, 29 | "license": "MIT", 30 | "dependencies": { 31 | "@codemirror/language": "^6.0.0" 32 | }, 33 | "devDependencies": { 34 | "rollup": "^2.35.1", 35 | "typescript": "^4.1.3" 36 | }, 37 | "repository": { 38 | "type": "git", 39 | "url": "https://github.com/codemirror/legacy-modes.git" 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /rollup.config.js: -------------------------------------------------------------------------------- 1 | const {readdirSync} = require("fs"), {join} = require("path") 2 | 3 | let mode = join(__dirname, "mode") 4 | 5 | module.exports = readdirSync(mode).filter(f => /\.js$/.test(f)).map(f => ({ 6 | input: join(mode, f), 7 | output: { 8 | file: join(mode, f.replace(/\.js$/, ".cjs")), 9 | format: "cjs" 10 | }, 11 | external: id => !/^(\.?\/|\w:)/.test(id) 12 | })) 13 | --------------------------------------------------------------------------------