├── .gitignore
├── AUTHORS.pygments
├── LICENSE
├── LICENSE.bottle
├── LICENSE.pygments
├── README.md
├── _config.yml
├── assets
├── jquery-3.4.1.min.js
├── logo.png
├── main.css
├── main.js
├── minus.gif
├── plus.gif
└── reset.css
├── bottle.py
├── main.py
├── pygments
├── __init__.py
├── __main__.py
├── cmdline.py
├── console.py
├── filter.py
├── filters
│ └── __init__.py
├── formatter.py
├── formatters
│ ├── __init__.py
│ ├── _mapping.py
│ ├── bbcode.py
│ ├── html.py
│ ├── img.py
│ ├── irc.py
│ ├── latex.py
│ ├── other.py
│ ├── rtf.py
│ ├── svg.py
│ ├── terminal.py
│ └── terminal256.py
├── lexer.py
├── lexers
│ ├── __init__.py
│ ├── _asy_builtins.py
│ ├── _cl_builtins.py
│ ├── _cocoa_builtins.py
│ ├── _csound_builtins.py
│ ├── _lasso_builtins.py
│ ├── _lua_builtins.py
│ ├── _mapping.py
│ ├── _mql_builtins.py
│ ├── _openedge_builtins.py
│ ├── _php_builtins.py
│ ├── _postgres_builtins.py
│ ├── _scilab_builtins.py
│ ├── _sourcemod_builtins.py
│ ├── _stan_builtins.py
│ ├── _stata_builtins.py
│ ├── _tsql_builtins.py
│ ├── _vbscript_builtins.py
│ ├── _vim_builtins.py
│ ├── actionscript.py
│ ├── agile.py
│ ├── algebra.py
│ ├── ambient.py
│ ├── ampl.py
│ ├── apl.py
│ ├── archetype.py
│ ├── asm.py
│ ├── automation.py
│ ├── basic.py
│ ├── bibtex.py
│ ├── boa.py
│ ├── business.py
│ ├── c_cpp.py
│ ├── c_like.py
│ ├── capnproto.py
│ ├── chapel.py
│ ├── clean.py
│ ├── compiled.py
│ ├── configs.py
│ ├── console.py
│ ├── crystal.py
│ ├── csound.py
│ ├── css.py
│ ├── d.py
│ ├── dalvik.py
│ ├── data.py
│ ├── diff.py
│ ├── dotnet.py
│ ├── dsls.py
│ ├── dylan.py
│ ├── ecl.py
│ ├── eiffel.py
│ ├── elm.py
│ ├── email.py
│ ├── erlang.py
│ ├── esoteric.py
│ ├── ezhil.py
│ ├── factor.py
│ ├── fantom.py
│ ├── felix.py
│ ├── floscript.py
│ ├── forth.py
│ ├── fortran.py
│ ├── foxpro.py
│ ├── freefem.py
│ ├── functional.py
│ ├── go.py
│ ├── grammar_notation.py
│ ├── graph.py
│ ├── graphics.py
│ ├── haskell.py
│ ├── haxe.py
│ ├── hdl.py
│ ├── hexdump.py
│ ├── html.py
│ ├── idl.py
│ ├── igor.py
│ ├── inferno.py
│ ├── installers.py
│ ├── int_fiction.py
│ ├── iolang.py
│ ├── j.py
│ ├── javascript.py
│ ├── julia.py
│ ├── jvm.py
│ ├── lisp.py
│ ├── make.py
│ ├── markup.py
│ ├── math.py
│ ├── matlab.py
│ ├── mime.py
│ ├── ml.py
│ ├── modeling.py
│ ├── modula2.py
│ ├── monte.py
│ ├── ncl.py
│ ├── nimrod.py
│ ├── nit.py
│ ├── nix.py
│ ├── oberon.py
│ ├── objective.py
│ ├── ooc.py
│ ├── other.py
│ ├── parasail.py
│ ├── parsers.py
│ ├── pascal.py
│ ├── pawn.py
│ ├── perl.py
│ ├── php.py
│ ├── pony.py
│ ├── praat.py
│ ├── prolog.py
│ ├── python.py
│ ├── qvt.py
│ ├── r.py
│ ├── rdf.py
│ ├── rebol.py
│ ├── resource.py
│ ├── rnc.py
│ ├── roboconf.py
│ ├── robotframework.py
│ ├── ruby.py
│ ├── rust.py
│ ├── sas.py
│ ├── scdoc.py
│ ├── scripting.py
│ ├── sgf.py
│ ├── shell.py
│ ├── slash.py
│ ├── smalltalk.py
│ ├── smv.py
│ ├── snobol.py
│ ├── solidity.py
│ ├── special.py
│ ├── sql.py
│ ├── stata.py
│ ├── supercollider.py
│ ├── tcl.py
│ ├── templates.py
│ ├── teraterm.py
│ ├── testing.py
│ ├── text.py
│ ├── textedit.py
│ ├── textfmts.py
│ ├── theorem.py
│ ├── trafficscript.py
│ ├── typoscript.py
│ ├── unicon.py
│ ├── urbi.py
│ ├── varnish.py
│ ├── verification.py
│ ├── web.py
│ ├── webmisc.py
│ ├── whiley.py
│ ├── x10.py
│ ├── xorg.py
│ └── zig.py
├── modeline.py
├── plugin.py
├── regexopt.py
├── scanner.py
├── sphinxext.py
├── style.py
├── styles
│ ├── __init__.py
│ ├── abap.py
│ ├── algol.py
│ ├── algol_nu.py
│ ├── arduino.py
│ ├── autumn.py
│ ├── borland.py
│ ├── bw.py
│ ├── colorful.py
│ ├── default.py
│ ├── emacs.py
│ ├── friendly.py
│ ├── fruity.py
│ ├── igor.py
│ ├── inkpot.py
│ ├── lovelace.py
│ ├── manni.py
│ ├── monokai.py
│ ├── murphy.py
│ ├── native.py
│ ├── paraiso_dark.py
│ ├── paraiso_light.py
│ ├── pastie.py
│ ├── perldoc.py
│ ├── rainbow_dash.py
│ ├── rrt.py
│ ├── sas.py
│ ├── solarized.py
│ ├── stata_dark.py
│ ├── stata_light.py
│ ├── tango.py
│ ├── trac.py
│ ├── vim.py
│ ├── vs.py
│ └── xcode.py
├── token.py
├── unistring.py
└── util.py
└── views
├── base.tpl
├── breadcrumb.tpl
├── search.tpl
└── view.tpl
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Repomono Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/LICENSE.bottle:
--------------------------------------------------------------------------------
1 | Copyright (c) 2012, Marcel Hellkamp.
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy
4 | of this software and associated documentation files (the "Software"), to deal
5 | in the Software without restriction, including without limitation the rights
6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 | copies of the Software, and to permit persons to whom the Software is
8 | furnished to do so, subject to the following conditions:
9 |
10 | The above copyright notice and this permission notice shall be included in
11 | all copies or substantial portions of the Software.
12 |
13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19 | THE SOFTWARE.
20 |
--------------------------------------------------------------------------------
/LICENSE.pygments:
--------------------------------------------------------------------------------
1 | Copyright (c) 2006-2019 by the respective authors (see AUTHORS.pygments file).
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without
5 | modification, are permitted provided that the following conditions are
6 | met:
7 |
8 | * Redistributions of source code must retain the above copyright
9 | notice, this list of conditions and the following disclaimer.
10 |
11 | * Redistributions in binary form must reproduce the above copyright
12 | notice, this list of conditions and the following disclaimer in the
13 | documentation and/or other materials provided with the distribution.
14 |
15 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
21 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## Code Search
2 |
3 | This is a code search tool that is intended to be used locally on developer workstations.
4 |
5 | - A simple download of the project is sufficient. Third-party dependencies are checked in. It doesn't require Internet connection when running.
6 | - The project uses MIT license which is permissive enough for widespread use.
7 | - It relies on GNU grep to be in your PATH and uses that to search code.
8 |
9 | Demo: https://demo.repomono.com/cs/view.php
10 |
11 | NOTE: It is considered in alpha stage. Please create issues if you want to give feedback! :-)
12 |
13 | Example usage:
14 |
15 | ```
16 | $ cd ~/src/
17 | $ git clone https://github.com/repomono/cs.git
18 | $ cd ~/your-own-project-directory/
19 | $ python3 ~/src/cs/main.py
20 | Listening on http://localhost:56789/
21 | ```
22 |
23 | We implemented two versions, one in PHP and with semantic cross references, the other one in Python.
24 | The open source version is written in Python and tested with Python 3.8 on Fedora 32.
25 | We are still polishing the semantic cross references and will open source that in the future.
26 | At the same time we'd like to collect feedback on the overall design, user experience, and actual value of the tool.
27 |
28 | At this point we do not have enough resources to handle external code changes. If you have something in mind, just let us know in the issues, and we'll try to prioritize. Thank you!
29 |
--------------------------------------------------------------------------------
/_config.yml:
--------------------------------------------------------------------------------
1 | theme: jekyll-theme-minimal
--------------------------------------------------------------------------------
/assets/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/repomono/cs/ee6e605eefe7b43b2d704eaa2a2dded79370c01b/assets/logo.png
--------------------------------------------------------------------------------
/assets/main.js:
--------------------------------------------------------------------------------
1 | function setState(name, key, value) {
2 | state = localStorage.getItem(name);
3 | if (state === null) {
4 | state = {};
5 | } else {
6 | try {
7 | state = JSON.parse(state);
8 | } catch (e) {
9 | state = {};
10 | }
11 | }
12 | state[key] = value;
13 | state = JSON.stringify(state);
14 | localStorage.setItem(name, state);
15 | }
16 |
17 | function deleteState(name, key) {
18 | state = localStorage.getItem(name);
19 | if (state === null) {
20 | return;
21 | }
22 | try {
23 | state = JSON.parse(state);
24 | } catch (e) {
25 | return;
26 | }
27 | delete state[key];
28 | state = JSON.stringify(state);
29 | localStorage.setItem(name, state);
30 | }
31 |
32 | function getState(name) {
33 | state = localStorage.getItem(name);
34 | if (state === null) {
35 | state = {};
36 | } else {
37 | try {
38 | state = JSON.parse(state);
39 | } catch (e) {
40 | state = {};
41 | }
42 | }
43 | return state;
44 | }
45 |
46 | function verticalDividerMouseDown(evt) {
47 | evt.preventDefault();
48 | $(window).mousemove(verticalDividerMouseMove).mouseup(verticalDividerMouseUp);
49 | $('aside').data('initial-width', $('aside').width()).data('initial-pageX', evt.pageX);
50 | }
51 |
52 | function verticalDividerMouseMove(evt) {
53 | evt.preventDefault();
54 | $('aside').width(evt.pageX - $('aside').data('initial-pageX') + $('aside').data('initial-width'));
55 | }
56 |
57 | function verticalDividerMouseUp(evt) {
58 | evt.preventDefault();
59 | $(window).off('mousemove', verticalDividerMouseMove).off('mouseup', verticalDividerMouseUp);
60 | setState('aside', 'width', $('aside').width());
61 | }
62 |
63 | $(function() {
64 | $('.file-tree li').has('.dir').click(function(evt) {
65 | link = $(this).children('.dir').children('a');
66 | isme = (evt.target == this) || $(evt.target).is(link);
67 | if (isme && $(this).hasClass('expand')) {
68 | evt.preventDefault();
69 | $(this).removeClass('expand').addClass('collapse');
70 | setState('expand', $(this).attr('id'), 1);
71 | } else if (isme && $(this).hasClass('collapse')) {
72 | evt.preventDefault();
73 | $(this).removeClass('collapse').addClass('expand');
74 | deleteState('expand', $(this).attr('id'));
75 | }
76 | });
77 |
78 | for (let id of Object.keys(getState('expand'))) {
79 | $('#' + id).removeClass('expand').addClass('collapse');
80 | }
81 |
82 | let names_to_nodes = {};
83 |
84 | $('.n, .na, .nb, .bp, .nc, .no, .nd, .ni, .ne, .nf, .fm, .py, .nl, .nn, .nx, .nt, .nv, .vc, .vg, .vi, .vm').each(function() {
85 | name = this.innerText;
86 | url = encodeURI("/search?q=" + name);
87 | $(this).wrapInner('');
88 | if (name in names_to_nodes) {
89 | names_to_nodes[name].push(this);
90 | } else {
91 | names_to_nodes[name] = [this];
92 | }
93 | });
94 |
95 | $('.hh').hover(function() {
96 | $('.highlight').removeClass('highlight');
97 | name = this.innerText;
98 | if (name in names_to_nodes) {
99 | $(names_to_nodes[name]).addClass('highlight');
100 | }
101 | });
102 |
103 | $('.file-tree').scroll(function() {
104 | id = $(this).attr('id');
105 | st = $(this).scrollTop();
106 | setState('scroll-top', id, st);
107 | });
108 |
109 | for (let [id, st] of Object.entries(getState('scroll-top'))) {
110 | $('#' + id).scrollTop(st);
111 | }
112 |
113 | $('.vertical.divider').mousedown(verticalDividerMouseDown);
114 |
115 | let asideState = getState('aside');
116 | if (asideState.hasOwnProperty('width')) {
117 | //$('aside').width(asideState.width);
118 | }
119 | });
120 |
--------------------------------------------------------------------------------
/assets/minus.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/repomono/cs/ee6e605eefe7b43b2d704eaa2a2dded79370c01b/assets/minus.gif
--------------------------------------------------------------------------------
/assets/plus.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/repomono/cs/ee6e605eefe7b43b2d704eaa2a2dded79370c01b/assets/plus.gif
--------------------------------------------------------------------------------
/assets/reset.css:
--------------------------------------------------------------------------------
1 | /* http://meyerweb.com/eric/tools/css/reset/
2 | v2.0 | 20110126
3 | License: none (public domain)
4 | */
5 |
6 | html, body, div, span, applet, object, iframe,
7 | h1, h2, h3, h4, h5, h6, p, blockquote, pre,
8 | a, abbr, acronym, address, big, cite, code,
9 | del, dfn, em, img, ins, kbd, q, s, samp,
10 | small, strike, strong, sub, sup, tt, var,
11 | b, u, i, center,
12 | dl, dt, dd, ol, ul, li,
13 | fieldset, form, label, legend,
14 | table, caption, tbody, tfoot, thead, tr, th, td,
15 | article, aside, canvas, details, embed,
16 | figure, figcaption, footer, header, hgroup,
17 | menu, nav, output, ruby, section, summary,
18 | time, mark, audio, video {
19 | margin: 0;
20 | padding: 0;
21 | border: 0;
22 | font-size: 100%;
23 | font: inherit;
24 | vertical-align: baseline;
25 | }
26 | /* HTML5 display-role reset for older browsers */
27 | article, aside, details, figcaption, figure,
28 | footer, header, hgroup, menu, nav, section {
29 | display: block;
30 | }
31 | body {
32 | line-height: 1;
33 | }
34 | ol, ul {
35 | list-style: none;
36 | }
37 | blockquote, q {
38 | quotes: none;
39 | }
40 | blockquote:before, blockquote:after,
41 | q:before, q:after {
42 | content: '';
43 | content: none;
44 | }
45 | table {
46 | border-collapse: collapse;
47 | border-spacing: 0;
48 | }
--------------------------------------------------------------------------------
/pygments/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Pygments
4 | ~~~~~~~~
5 |
6 | Pygments is a syntax highlighting package written in Python.
7 |
8 | It is a generic syntax highlighter for general use in all kinds of software
9 | such as forum systems, wikis or other applications that need to prettify
10 | source code. Highlights are:
11 |
12 | * a wide range of common languages and markup formats is supported
13 | * special attention is paid to details, increasing quality by a fair amount
14 | * support for new languages and formats are added easily
15 | * a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
16 | formats that PIL supports, and ANSI sequences
17 | * it is usable as a command-line tool and as a library
18 | * ... and it highlights even Brainfuck!
19 |
20 | The `Pygments master branch`_ is installable with ``easy_install Pygments==dev``.
21 |
22 | .. _Pygments master branch:
23 | https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev
24 |
25 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
26 | :license: BSD, see LICENSE for details.
27 | """
28 | import sys
29 |
30 | from pygments.util import StringIO, BytesIO
31 |
32 | __version__ = '2.5.2'
33 | __docformat__ = 'restructuredtext'
34 |
35 | __all__ = ['lex', 'format', 'highlight']
36 |
37 |
38 | def lex(code, lexer):
39 | """
40 | Lex ``code`` with ``lexer`` and return an iterable of tokens.
41 | """
42 | try:
43 | return lexer.get_tokens(code)
44 | except TypeError as err:
45 | if (isinstance(err.args[0], str) and
46 | ('unbound method get_tokens' in err.args[0] or
47 | 'missing 1 required positional argument' in err.args[0])):
48 | raise TypeError('lex() argument must be a lexer instance, '
49 | 'not a class')
50 | raise
51 |
52 |
53 | def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin
54 | """
55 | Format a tokenlist ``tokens`` with the formatter ``formatter``.
56 |
57 | If ``outfile`` is given and a valid file object (an object
58 | with a ``write`` method), the result will be written to it, otherwise
59 | it is returned as a string.
60 | """
61 | try:
62 | if not outfile:
63 | realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()
64 | formatter.format(tokens, realoutfile)
65 | return realoutfile.getvalue()
66 | else:
67 | formatter.format(tokens, outfile)
68 | except TypeError as err:
69 | if (isinstance(err.args[0], str) and
70 | ('unbound method format' in err.args[0] or
71 | 'missing 1 required positional argument' in err.args[0])):
72 | raise TypeError('format() argument must be a formatter instance, '
73 | 'not a class')
74 | raise
75 |
76 |
77 | def highlight(code, lexer, formatter, outfile=None):
78 | """
79 | Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
80 |
81 | If ``outfile`` is given and a valid file object (an object
82 | with a ``write`` method), the result will be written to it, otherwise
83 | it is returned as a string.
84 | """
85 | return format(lex(code, lexer), formatter, outfile)
86 |
87 |
88 | if __name__ == '__main__': # pragma: no cover
89 | from pygments.cmdline import main
90 | sys.exit(main(sys.argv))
91 |
--------------------------------------------------------------------------------
/pygments/__main__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.__main__
4 | ~~~~~~~~~~~~~~~~~
5 |
6 | Main entry point for ``python -m pygments``.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | import sys
13 | import pygments.cmdline
14 |
15 | try:
16 | sys.exit(pygments.cmdline.main(sys.argv))
17 | except KeyboardInterrupt:
18 | sys.exit(1)
19 |
--------------------------------------------------------------------------------
/pygments/console.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.console
4 | ~~~~~~~~~~~~~~~~
5 |
6 | Format colored console output.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | esc = "\x1b["
13 |
14 | codes = {}
15 | codes[""] = ""
16 | codes["reset"] = esc + "39;49;00m"
17 |
18 | codes["bold"] = esc + "01m"
19 | codes["faint"] = esc + "02m"
20 | codes["standout"] = esc + "03m"
21 | codes["underline"] = esc + "04m"
22 | codes["blink"] = esc + "05m"
23 | codes["overline"] = esc + "06m"
24 |
25 | dark_colors = ["black", "red", "green", "yellow", "blue",
26 | "magenta", "cyan", "gray"]
27 | light_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brightblue",
28 | "brightmagenta", "brightcyan", "white"]
29 |
30 | x = 30
31 | for d, l in zip(dark_colors, light_colors):
32 | codes[d] = esc + "%im" % x
33 | codes[l] = esc + "%im" % (60 + x)
34 | x += 1
35 |
36 | del d, l, x
37 |
38 | codes["white"] = codes["bold"]
39 |
40 |
41 | def reset_color():
42 | return codes["reset"]
43 |
44 |
45 | def colorize(color_key, text):
46 | return codes[color_key] + text + codes["reset"]
47 |
48 |
49 | def ansiformat(attr, text):
50 | """
51 | Format ``text`` with a color and/or some attributes::
52 |
53 | color normal color
54 | *color* bold color
55 | _color_ underlined color
56 | +color+ blinking color
57 | """
58 | result = []
59 | if attr[:1] == attr[-1:] == '+':
60 | result.append(codes['blink'])
61 | attr = attr[1:-1]
62 | if attr[:1] == attr[-1:] == '*':
63 | result.append(codes['bold'])
64 | attr = attr[1:-1]
65 | if attr[:1] == attr[-1:] == '_':
66 | result.append(codes['underline'])
67 | attr = attr[1:-1]
68 | result.append(codes[attr])
69 | result.append(text)
70 | result.append(codes['reset'])
71 | return ''.join(result)
72 |
--------------------------------------------------------------------------------
/pygments/filter.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.filter
4 | ~~~~~~~~~~~~~~~
5 |
6 | Module that implements the default filter.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 |
13 | def apply_filters(stream, filters, lexer=None):
14 | """
15 | Use this method to apply an iterable of filters to
16 | a stream. If lexer is given it's forwarded to the
17 | filter, otherwise the filter receives `None`.
18 | """
19 | def _apply(filter_, stream):
20 | for token in filter_.filter(lexer, stream):
21 | yield token
22 | for filter_ in filters:
23 | stream = _apply(filter_, stream)
24 | return stream
25 |
26 |
27 | def simplefilter(f):
28 | """
29 | Decorator that converts a function into a filter::
30 |
31 | @simplefilter
32 | def lowercase(self, lexer, stream, options):
33 | for ttype, value in stream:
34 | yield ttype, value.lower()
35 | """
36 | return type(f.__name__, (FunctionFilter,), {
37 | '__module__': getattr(f, '__module__'),
38 | '__doc__': f.__doc__,
39 | 'function': f,
40 | })
41 |
42 |
43 | class Filter(object):
44 | """
45 | Default filter. Subclass this class or use the `simplefilter`
46 | decorator to create own filters.
47 | """
48 |
49 | def __init__(self, **options):
50 | self.options = options
51 |
52 | def filter(self, lexer, stream):
53 | raise NotImplementedError()
54 |
55 |
56 | class FunctionFilter(Filter):
57 | """
58 | Abstract class used by `simplefilter` to create simple
59 | function filters on the fly. The `simplefilter` decorator
60 | automatically creates subclasses of this class for
61 | functions passed to it.
62 | """
63 | function = None
64 |
65 | def __init__(self, **options):
66 | if not hasattr(self, 'function'):
67 | raise TypeError('%r used without bound function' %
68 | self.__class__.__name__)
69 | Filter.__init__(self, **options)
70 |
71 | def filter(self, lexer, stream):
72 | # pylint: disable=not-callable
73 | for ttype, value in self.function(lexer, stream, self.options):
74 | yield ttype, value
75 |
--------------------------------------------------------------------------------
/pygments/formatter.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.formatter
4 | ~~~~~~~~~~~~~~~~~~
5 |
6 | Base formatter class.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | import codecs
13 |
14 | from pygments.util import get_bool_opt, string_types
15 | from pygments.styles import get_style_by_name
16 |
17 | __all__ = ['Formatter']
18 |
19 |
20 | def _lookup_style(style):
21 | if isinstance(style, string_types):
22 | return get_style_by_name(style)
23 | return style
24 |
25 |
26 | class Formatter(object):
27 | """
28 | Converts a token stream to text.
29 |
30 | Options accepted:
31 |
32 | ``style``
33 | The style to use, can be a string or a Style subclass
34 | (default: "default"). Not used by e.g. the
35 | TerminalFormatter.
36 | ``full``
37 | Tells the formatter to output a "full" document, i.e.
38 | a complete self-contained document. This doesn't have
39 | any effect for some formatters (default: false).
40 | ``title``
41 | If ``full`` is true, the title that should be used to
42 | caption the document (default: '').
43 | ``encoding``
44 | If given, must be an encoding name. This will be used to
45 | convert the Unicode token strings to byte strings in the
46 | output. If it is "" or None, Unicode strings will be written
47 | to the output file, which most file-like objects do not
48 | support (default: None).
49 | ``outencoding``
50 | Overrides ``encoding`` if given.
51 | """
52 |
53 | #: Name of the formatter
54 | name = None
55 |
56 | #: Shortcuts for the formatter
57 | aliases = []
58 |
59 | #: fn match rules
60 | filenames = []
61 |
62 | #: If True, this formatter outputs Unicode strings when no encoding
63 | #: option is given.
64 | unicodeoutput = True
65 |
66 | def __init__(self, **options):
67 | self.style = _lookup_style(options.get('style', 'default'))
68 | self.full = get_bool_opt(options, 'full', False)
69 | self.title = options.get('title', '')
70 | self.encoding = options.get('encoding', None) or None
71 | if self.encoding in ('guess', 'chardet'):
72 | # can happen for e.g. pygmentize -O encoding=guess
73 | self.encoding = 'utf-8'
74 | self.encoding = options.get('outencoding') or self.encoding
75 | self.options = options
76 |
77 | def get_style_defs(self, arg=''):
78 | """
79 | Return the style definitions for the current style as a string.
80 |
81 | ``arg`` is an additional argument whose meaning depends on the
82 | formatter used. Note that ``arg`` can also be a list or tuple
83 | for some formatters like the html formatter.
84 | """
85 | return ''
86 |
87 | def format(self, tokensource, outfile):
88 | """
89 | Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
90 | tuples and write it into ``outfile``.
91 | """
92 | if self.encoding:
93 | # wrap the outfile in a StreamWriter
94 | outfile = codecs.lookup(self.encoding)[3](outfile)
95 | return self.format_unencoded(tokensource, outfile)
96 |
--------------------------------------------------------------------------------
/pygments/formatters/bbcode.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.formatters.bbcode
4 | ~~~~~~~~~~~~~~~~~~~~~~~~~~
5 |
6 | BBcode formatter.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 |
13 | from pygments.formatter import Formatter
14 | from pygments.util import get_bool_opt
15 |
16 | __all__ = ['BBCodeFormatter']
17 |
18 |
19 | class BBCodeFormatter(Formatter):
20 | """
21 | Format tokens with BBcodes. These formatting codes are used by many
22 | bulletin boards, so you can highlight your sourcecode with pygments before
23 | posting it there.
24 |
25 | This formatter has no support for background colors and borders, as there
26 | are no common BBcode tags for that.
27 |
28 | Some board systems (e.g. phpBB) don't support colors in their [code] tag,
29 | so you can't use the highlighting together with that tag.
30 | Text in a [code] tag usually is shown with a monospace font (which this
31 | formatter can do with the ``monofont`` option) and no spaces (which you
32 | need for indentation) are removed.
33 |
34 | Additional options accepted:
35 |
36 | `style`
37 | The style to use, can be a string or a Style subclass (default:
38 | ``'default'``).
39 |
40 | `codetag`
41 | If set to true, put the output into ``[code]`` tags (default:
42 | ``false``)
43 |
44 | `monofont`
45 | If set to true, add a tag to show the code with a monospace font
46 | (default: ``false``).
47 | """
48 | name = 'BBCode'
49 | aliases = ['bbcode', 'bb']
50 | filenames = []
51 |
52 | def __init__(self, **options):
53 | Formatter.__init__(self, **options)
54 | self._code = get_bool_opt(options, 'codetag', False)
55 | self._mono = get_bool_opt(options, 'monofont', False)
56 |
57 | self.styles = {}
58 | self._make_styles()
59 |
60 | def _make_styles(self):
61 | for ttype, ndef in self.style:
62 | start = end = ''
63 | if ndef['color']:
64 | start += '[color=#%s]' % ndef['color']
65 | end = '[/color]' + end
66 | if ndef['bold']:
67 | start += '[b]'
68 | end = '[/b]' + end
69 | if ndef['italic']:
70 | start += '[i]'
71 | end = '[/i]' + end
72 | if ndef['underline']:
73 | start += '[u]'
74 | end = '[/u]' + end
75 | # there are no common BBcodes for background-color and border
76 |
77 | self.styles[ttype] = start, end
78 |
79 | def format_unencoded(self, tokensource, outfile):
80 | if self._code:
81 | outfile.write('[code]')
82 | if self._mono:
83 | outfile.write('[font=monospace]')
84 |
85 | lastval = ''
86 | lasttype = None
87 |
88 | for ttype, value in tokensource:
89 | while ttype not in self.styles:
90 | ttype = ttype.parent
91 | if ttype == lasttype:
92 | lastval += value
93 | else:
94 | if lastval:
95 | start, end = self.styles[lasttype]
96 | outfile.write(''.join((start, lastval, end)))
97 | lastval = value
98 | lasttype = ttype
99 |
100 | if lastval:
101 | start, end = self.styles[lasttype]
102 | outfile.write(''.join((start, lastval, end)))
103 |
104 | if self._mono:
105 | outfile.write('[/font]')
106 | if self._code:
107 | outfile.write('[/code]')
108 | if self._code or self._mono:
109 | outfile.write('\n')
110 |
--------------------------------------------------------------------------------
/pygments/lexers/agile.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.agile
4 | ~~~~~~~~~~~~~~~~~~~~~
5 |
6 | Just export lexer classes previously contained in this module.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | from pygments.lexers.lisp import SchemeLexer
13 | from pygments.lexers.jvm import IokeLexer, ClojureLexer
14 | from pygments.lexers.python import PythonLexer, PythonConsoleLexer, \
15 | PythonTracebackLexer, Python3Lexer, Python3TracebackLexer, DgLexer
16 | from pygments.lexers.ruby import RubyLexer, RubyConsoleLexer, FancyLexer
17 | from pygments.lexers.perl import PerlLexer, Perl6Lexer
18 | from pygments.lexers.d import CrocLexer, MiniDLexer
19 | from pygments.lexers.iolang import IoLexer
20 | from pygments.lexers.tcl import TclLexer
21 | from pygments.lexers.factor import FactorLexer
22 | from pygments.lexers.scripting import LuaLexer, MoonScriptLexer
23 |
24 | __all__ = []
25 |
--------------------------------------------------------------------------------
/pygments/lexers/ambient.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.ambient
4 | ~~~~~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexers for AmbientTalk language.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | import re
13 |
14 | from pygments.lexer import RegexLexer, include, words
15 | from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
16 | Number, Punctuation
17 |
18 | __all__ = ['AmbientTalkLexer']
19 |
20 |
21 | class AmbientTalkLexer(RegexLexer):
22 | """
23 | Lexer for `AmbientTalk `_ source code.
24 |
25 | .. versionadded:: 2.0
26 | """
27 | name = 'AmbientTalk'
28 | filenames = ['*.at']
29 | aliases = ['at', 'ambienttalk', 'ambienttalk/2']
30 | mimetypes = ['text/x-ambienttalk']
31 |
32 | flags = re.MULTILINE | re.DOTALL
33 |
34 | builtin = words(('if:', 'then:', 'else:', 'when:', 'whenever:', 'discovered:',
35 | 'disconnected:', 'reconnected:', 'takenOffline:', 'becomes:',
36 | 'export:', 'as:', 'object:', 'actor:', 'mirror:', 'taggedAs:',
37 | 'mirroredBy:', 'is:'))
38 | tokens = {
39 | 'root': [
40 | (r'\s+', Text),
41 | (r'//.*?\n', Comment.Single),
42 | (r'/\*.*?\*/', Comment.Multiline),
43 | (r'(def|deftype|import|alias|exclude)\b', Keyword),
44 | (builtin, Name.Builtin),
45 | (r'(true|false|nil)\b', Keyword.Constant),
46 | (r'(~|lobby|jlobby|/)\.', Keyword.Constant, 'namespace'),
47 | (r'"(\\\\|\\"|[^"])*"', String),
48 | (r'\|', Punctuation, 'arglist'),
49 | (r'<:|[*^!%&<>+=,./?-]|:=', Operator),
50 | (r"`[a-zA-Z_]\w*", String.Symbol),
51 | (r"[a-zA-Z_]\w*:", Name.Function),
52 | (r"[{}()\[\];`]", Punctuation),
53 | (r'(self|super)\b', Name.Variable.Instance),
54 | (r"[a-zA-Z_]\w*", Name.Variable),
55 | (r"@[a-zA-Z_]\w*", Name.Class),
56 | (r"@\[", Name.Class, 'annotations'),
57 | include('numbers'),
58 | ],
59 | 'numbers': [
60 | (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
61 | (r'\d+', Number.Integer)
62 | ],
63 | 'namespace': [
64 | (r'[a-zA-Z_]\w*\.', Name.Namespace),
65 | (r'[a-zA-Z_]\w*:', Name.Function, '#pop'),
66 | (r'[a-zA-Z_]\w*(?!\.)', Name.Function, '#pop')
67 | ],
68 | 'annotations': [
69 | (r"(.*?)\]", Name.Class, '#pop')
70 | ],
71 | 'arglist': [
72 | (r'\|', Punctuation, '#pop'),
73 | (r'\s*(,)\s*', Punctuation),
74 | (r'[a-zA-Z_]\w*', Name.Variable),
75 | ],
76 | }
77 |
--------------------------------------------------------------------------------
/pygments/lexers/apl.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.apl
4 | ~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexers for APL.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | from pygments.lexer import RegexLexer
13 | from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
14 | Number, Punctuation
15 |
16 | __all__ = ['APLLexer']
17 |
18 |
19 | class APLLexer(RegexLexer):
20 | """
21 | A simple APL lexer.
22 |
23 | .. versionadded:: 2.0
24 | """
25 | name = 'APL'
26 | aliases = ['apl']
27 | filenames = ['*.apl']
28 |
29 | tokens = {
30 | 'root': [
31 | # Whitespace
32 | # ==========
33 | (r'\s+', Text),
34 | #
35 | # Comment
36 | # =======
37 | # '⍝' is traditional; '#' is supported by GNU APL and NGN (but not Dyalog)
38 | (u'[⍝#].*$', Comment.Single),
39 | #
40 | # Strings
41 | # =======
42 | (r'\'((\'\')|[^\'])*\'', String.Single),
43 | (r'"(("")|[^"])*"', String.Double), # supported by NGN APL
44 | #
45 | # Punctuation
46 | # ===========
47 | # This token type is used for diamond and parenthesis
48 | # but not for bracket and ; (see below)
49 | (u'[⋄◇()]', Punctuation),
50 | #
51 | # Array indexing
52 | # ==============
53 | # Since this token type is very important in APL, it is not included in
54 | # the punctuation token type but rather in the following one
55 | (r'[\[\];]', String.Regex),
56 | #
57 | # Distinguished names
58 | # ===================
59 | # following IBM APL2 standard
60 | (u'⎕[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*', Name.Function),
61 | #
62 | # Labels
63 | # ======
64 | # following IBM APL2 standard
65 | # (u'[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*:', Name.Label),
66 | #
67 | # Variables
68 | # =========
69 | # following IBM APL2 standard
70 | (u'[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*', Name.Variable),
71 | #
72 | # Numbers
73 | # =======
74 | (u'¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞)'
75 | u'([Jj]¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞))?',
76 | Number),
77 | #
78 | # Operators
79 | # ==========
80 | (u'[\\.\\\\\\/⌿⍀¨⍣⍨⍠⍤∘]', Name.Attribute), # closest token type
81 | (u'[+\\-×÷⌈⌊∣|⍳?*⍟○!⌹<≤=>≥≠≡≢∊⍷∪∩~∨∧⍱⍲⍴,⍪⌽⊖⍉↑↓⊂⊃⌷⍋⍒⊤⊥⍕⍎⊣⊢⍁⍂≈⌸⍯↗]',
82 | Operator),
83 | #
84 | # Constant
85 | # ========
86 | (u'⍬', Name.Constant),
87 | #
88 | # Quad symbol
89 | # ===========
90 | (u'[⎕⍞]', Name.Variable.Global),
91 | #
92 | # Arrows left/right
93 | # =================
94 | (u'[←→]', Keyword.Declaration),
95 | #
96 | # D-Fn
97 | # ====
98 | (u'[⍺⍵⍶⍹∇:]', Name.Builtin.Pseudo),
99 | (r'[{}]', Keyword.Type),
100 | ],
101 | }
102 |
--------------------------------------------------------------------------------
/pygments/lexers/boa.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.boa
4 | ~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexers for the Boa language.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | import re
13 |
14 | from pygments.lexer import RegexLexer, words
15 | from pygments.token import String, Comment, Keyword, Name, Number, Text, \
16 | Operator, Punctuation
17 |
18 | __all__ = ['BoaLexer']
19 |
20 | line_re = re.compile('.*?\n')
21 |
22 |
23 | class BoaLexer(RegexLexer):
24 | """
25 | Lexer for the `Boa `_ language.
26 |
27 | .. versionadded:: 2.4
28 | """
29 | name = 'Boa'
30 | aliases = ['boa']
31 | filenames = ['*.boa']
32 |
33 | reserved = words(
34 | ('input', 'output', 'of', 'weight', 'before', 'after', 'stop',
35 | 'ifall', 'foreach', 'exists', 'function', 'break', 'switch', 'case',
36 | 'visitor', 'default', 'return', 'visit', 'while', 'if', 'else'),
37 | suffix=r'\b', prefix=r'\b')
38 | keywords = words(
39 | ('bottom', 'collection', 'maximum', 'mean', 'minimum', 'set', 'sum',
40 | 'top', 'string', 'int', 'bool', 'float', 'time', 'false', 'true',
41 | 'array', 'map', 'stack', 'enum', 'type'), suffix=r'\b', prefix=r'\b')
42 | classes = words(
43 | ('Project', 'ForgeKind', 'CodeRepository', 'Revision', 'RepositoryKind',
44 | 'ChangedFile', 'FileKind', 'ASTRoot', 'Namespace', 'Declaration', 'Type',
45 | 'Method', 'Variable', 'Statement', 'Expression', 'Modifier',
46 | 'StatementKind', 'ExpressionKind', 'ModifierKind', 'Visibility',
47 | 'TypeKind', 'Person', 'ChangeKind'),
48 | suffix=r'\b', prefix=r'\b')
49 | operators = ('->', ':=', ':', '=', '<<', '!', '++', '||',
50 | '&&', '+', '-', '*', ">", "<")
51 | string_sep = ('`', '\"')
52 | built_in_functions = words(
53 | (
54 | # Array functions
55 | 'new', 'sort',
56 | # Date & Time functions
57 | 'yearof', 'dayofyear', 'hourof', 'minuteof', 'secondof', 'now',
58 | 'addday', 'addmonth', 'addweek', 'addyear', 'dayofmonth', 'dayofweek',
59 | 'dayofyear', 'formattime', 'trunctoday', 'trunctohour', 'trunctominute',
60 | 'trunctomonth', 'trunctosecond', 'trunctoyear',
61 | # Map functions
62 | 'clear', 'haskey', 'keys', 'lookup', 'remove', 'values',
63 | # Math functions
64 | 'abs', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh',
65 | 'ceil', 'cos', 'cosh', 'exp', 'floor', 'highbit', 'isfinite', 'isinf',
66 | 'isnan', 'isnormal', 'log', 'log10', 'max', 'min', 'nrand', 'pow',
67 | 'rand', 'round', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'trunc',
68 | # Other functions
69 | 'def', 'hash', 'len',
70 | # Set functions
71 | 'add', 'contains', 'remove',
72 | # String functions
73 | 'format', 'lowercase', 'match', 'matchposns', 'matchstrs', 'regex',
74 | 'split', 'splitall', 'splitn', 'strfind', 'strreplace', 'strrfind',
75 | 'substring', 'trim', 'uppercase',
76 | # Type Conversion functions
77 | 'bool', 'float', 'int', 'string', 'time',
78 | # Domain-Specific functions
79 | 'getast', 'getsnapshot', 'hasfiletype', 'isfixingrevision', 'iskind',
80 | 'isliteral',
81 | ),
82 | prefix=r'\b',
83 | suffix=r'\(')
84 |
85 | tokens = {
86 | 'root': [
87 | (r'#.*?$', Comment.Single),
88 | (r'/\*.*?\*/', Comment.Multiline),
89 | (reserved, Keyword.Reserved),
90 | (built_in_functions, Name.Function),
91 | (keywords, Keyword.Type),
92 | (classes, Name.Classes),
93 | (words(operators), Operator),
94 | (r'[][(),;{}\\.]', Punctuation),
95 | (r'"(\\\\|\\"|[^"])*"', String),
96 | (r'`(\\\\|\\`|[^`])*`', String),
97 | (words(string_sep), String.Delimeter),
98 | (r'[a-zA-Z_]+', Name.Variable),
99 | (r'[0-9]+', Number.Integer),
100 | (r'\s+?', Text), # Whitespace
101 | ]
102 | }
103 |
--------------------------------------------------------------------------------
/pygments/lexers/capnproto.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.capnproto
4 | ~~~~~~~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexers for the Cap'n Proto schema language.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | import re
13 |
14 | from pygments.lexer import RegexLexer, default
15 | from pygments.token import Text, Comment, Keyword, Name, Literal
16 |
17 | __all__ = ['CapnProtoLexer']
18 |
19 |
20 | class CapnProtoLexer(RegexLexer):
21 | """
22 | For `Cap'n Proto `_ source.
23 |
24 | .. versionadded:: 2.2
25 | """
26 | name = 'Cap\'n Proto'
27 | filenames = ['*.capnp']
28 | aliases = ['capnp']
29 |
30 | flags = re.MULTILINE | re.UNICODE
31 |
32 | tokens = {
33 | 'root': [
34 | (r'#.*?$', Comment.Single),
35 | (r'@[0-9a-zA-Z]*', Name.Decorator),
36 | (r'=', Literal, 'expression'),
37 | (r':', Name.Class, 'type'),
38 | (r'\$', Name.Attribute, 'annotation'),
39 | (r'(struct|enum|interface|union|import|using|const|annotation|'
40 | r'extends|in|of|on|as|with|from|fixed)\b',
41 | Keyword),
42 | (r'[\w.]+', Name),
43 | (r'[^#@=:$\w]+', Text),
44 | ],
45 | 'type': [
46 | (r'[^][=;,(){}$]+', Name.Class),
47 | (r'[\[(]', Name.Class, 'parentype'),
48 | default('#pop'),
49 | ],
50 | 'parentype': [
51 | (r'[^][;()]+', Name.Class),
52 | (r'[\[(]', Name.Class, '#push'),
53 | (r'[])]', Name.Class, '#pop'),
54 | default('#pop'),
55 | ],
56 | 'expression': [
57 | (r'[^][;,(){}$]+', Literal),
58 | (r'[\[(]', Literal, 'parenexp'),
59 | default('#pop'),
60 | ],
61 | 'parenexp': [
62 | (r'[^][;()]+', Literal),
63 | (r'[\[(]', Literal, '#push'),
64 | (r'[])]', Literal, '#pop'),
65 | default('#pop'),
66 | ],
67 | 'annotation': [
68 | (r'[^][;,(){}=:]+', Name.Attribute),
69 | (r'[\[(]', Name.Attribute, 'annexp'),
70 | default('#pop'),
71 | ],
72 | 'annexp': [
73 | (r'[^][;()]+', Name.Attribute),
74 | (r'[\[(]', Name.Attribute, '#push'),
75 | (r'[])]', Name.Attribute, '#pop'),
76 | default('#pop'),
77 | ],
78 | }
79 |
--------------------------------------------------------------------------------
/pygments/lexers/chapel.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.chapel
4 | ~~~~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexer for the Chapel language.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | from pygments.lexer import RegexLexer, bygroups, words
13 | from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
14 | Number, Punctuation
15 |
16 | __all__ = ['ChapelLexer']
17 |
18 |
19 | class ChapelLexer(RegexLexer):
20 | """
21 | For `Chapel `_ source.
22 |
23 | .. versionadded:: 2.0
24 | """
25 | name = 'Chapel'
26 | filenames = ['*.chpl']
27 | aliases = ['chapel', 'chpl']
28 | # mimetypes = ['text/x-chapel']
29 |
30 | tokens = {
31 | 'root': [
32 | (r'\n', Text),
33 | (r'\s+', Text),
34 | (r'\\\n', Text),
35 |
36 | (r'//(.*?)\n', Comment.Single),
37 | (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
38 |
39 | (r'(config|const|in|inout|out|param|ref|type|var)\b',
40 | Keyword.Declaration),
41 | (r'(false|nil|true)\b', Keyword.Constant),
42 | (r'(bool|complex|imag|int|opaque|range|real|string|uint)\b',
43 | Keyword.Type),
44 | (words((
45 | 'align', 'as', 'atomic',
46 | 'begin', 'borrowed', 'break', 'by',
47 | 'catch', 'cobegin', 'coforall', 'continue',
48 | 'delete', 'dmapped', 'do', 'domain',
49 | 'else', 'enum', 'except', 'export', 'extern',
50 | 'for', 'forall',
51 | 'if', 'index', 'inline',
52 | 'label', 'lambda', 'let', 'local',
53 | 'new', 'noinit',
54 | 'on', 'only', 'otherwise', 'override', 'owned',
55 | 'pragma', 'private', 'prototype', 'public',
56 | 'reduce', 'require', 'return',
57 | 'scan', 'select', 'serial', 'shared', 'single', 'sparse', 'subdomain', 'sync',
58 | 'then', 'throw', 'throws', 'try',
59 | 'unmanaged', 'use',
60 | 'when', 'where', 'while', 'with',
61 | 'yield',
62 | 'zip'), suffix=r'\b'),
63 | Keyword),
64 | (r'(iter)((?:\s)+)', bygroups(Keyword, Text), 'procname'),
65 | (r'(proc)((?:\s)+)', bygroups(Keyword, Text), 'procname'),
66 | (r'(class|module|record|union)(\s+)', bygroups(Keyword, Text),
67 | 'classname'),
68 |
69 | # imaginary integers
70 | (r'\d+i', Number),
71 | (r'\d+\.\d*([Ee][-+]\d+)?i', Number),
72 | (r'\.\d+([Ee][-+]\d+)?i', Number),
73 | (r'\d+[Ee][-+]\d+i', Number),
74 |
75 | # reals cannot end with a period due to lexical ambiguity with
76 | # .. operator. See reference for rationale.
77 | (r'(\d*\.\d+)([eE][+-]?[0-9]+)?i?', Number.Float),
78 | (r'\d+[eE][+-]?[0-9]+i?', Number.Float),
79 |
80 | # integer literals
81 | # -- binary
82 | (r'0[bB][01]+', Number.Bin),
83 | # -- hex
84 | (r'0[xX][0-9a-fA-F]+', Number.Hex),
85 | # -- octal
86 | (r'0[oO][0-7]+', Number.Oct),
87 | # -- decimal
88 | (r'[0-9]+', Number.Integer),
89 |
90 | # strings
91 | (r'"(\\\\|\\"|[^"])*"', String),
92 | (r"'(\\\\|\\'|[^'])*'", String),
93 |
94 | # tokens
95 | (r'(=|\+=|-=|\*=|/=|\*\*=|%=|&=|\|=|\^=|&&=|\|\|=|<<=|>>=|'
96 | r'<=>|<~>|\.\.|by|#|\.\.\.|'
97 | r'&&|\|\||!|&|\||\^|~|<<|>>|'
98 | r'==|!=|<=|>=|<|>|'
99 | r'[+\-*/%]|\*\*)', Operator),
100 | (r'[:;,.?()\[\]{}]', Punctuation),
101 |
102 | # identifiers
103 | (r'[a-zA-Z_][\w$]*', Name.Other),
104 | ],
105 | 'classname': [
106 | (r'[a-zA-Z_][\w$]*', Name.Class, '#pop'),
107 | ],
108 | 'procname': [
109 | (r'([a-zA-Z_][.\w$]*|\~[a-zA-Z_][.\w$]*|[+*/!~%<>=&^|\-]{1,2})',
110 | Name.Function, '#pop'),
111 | ],
112 | }
113 |
--------------------------------------------------------------------------------
/pygments/lexers/compiled.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.compiled
4 | ~~~~~~~~~~~~~~~~~~~~~~~~
5 |
6 | Just export lexer classes previously contained in this module.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | from pygments.lexers.jvm import JavaLexer, ScalaLexer
13 | from pygments.lexers.c_cpp import CLexer, CppLexer
14 | from pygments.lexers.d import DLexer
15 | from pygments.lexers.objective import ObjectiveCLexer, \
16 | ObjectiveCppLexer, LogosLexer
17 | from pygments.lexers.go import GoLexer
18 | from pygments.lexers.rust import RustLexer
19 | from pygments.lexers.c_like import ECLexer, ValaLexer, CudaLexer
20 | from pygments.lexers.pascal import DelphiLexer, Modula2Lexer, AdaLexer
21 | from pygments.lexers.business import CobolLexer, CobolFreeformatLexer
22 | from pygments.lexers.fortran import FortranLexer
23 | from pygments.lexers.prolog import PrologLexer
24 | from pygments.lexers.python import CythonLexer
25 | from pygments.lexers.graphics import GLShaderLexer
26 | from pygments.lexers.ml import OcamlLexer
27 | from pygments.lexers.basic import BlitzBasicLexer, BlitzMaxLexer, MonkeyLexer
28 | from pygments.lexers.dylan import DylanLexer, DylanLidLexer, DylanConsoleLexer
29 | from pygments.lexers.ooc import OocLexer
30 | from pygments.lexers.felix import FelixLexer
31 | from pygments.lexers.nimrod import NimrodLexer
32 | from pygments.lexers.crystal import CrystalLexer
33 |
34 | __all__ = []
35 |
--------------------------------------------------------------------------------
/pygments/lexers/eiffel.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.eiffel
4 | ~~~~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexer for the Eiffel language.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | from pygments.lexer import RegexLexer, include, words
13 | from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
14 | Number, Punctuation
15 |
16 | __all__ = ['EiffelLexer']
17 |
18 |
19 | class EiffelLexer(RegexLexer):
20 | """
21 | For `Eiffel `_ source code.
22 |
23 | .. versionadded:: 2.0
24 | """
25 | name = 'Eiffel'
26 | aliases = ['eiffel']
27 | filenames = ['*.e']
28 | mimetypes = ['text/x-eiffel']
29 |
30 | tokens = {
31 | 'root': [
32 | (r'[^\S\n]+', Text),
33 | (r'--.*?\n', Comment.Single),
34 | (r'[^\S\n]+', Text),
35 | # Please note that keyword and operator are case insensitive.
36 | (r'(?i)(true|false|void|current|result|precursor)\b', Keyword.Constant),
37 | (r'(?i)(and(\s+then)?|not|xor|implies|or(\s+else)?)\b', Operator.Word),
38 | (words((
39 | 'across', 'agent', 'alias', 'all', 'as', 'assign', 'attached',
40 | 'attribute', 'check', 'class', 'convert', 'create', 'debug',
41 | 'deferred', 'detachable', 'do', 'else', 'elseif', 'end', 'ensure',
42 | 'expanded', 'export', 'external', 'feature', 'from', 'frozen', 'if',
43 | 'inherit', 'inspect', 'invariant', 'like', 'local', 'loop', 'none',
44 | 'note', 'obsolete', 'old', 'once', 'only', 'redefine', 'rename',
45 | 'require', 'rescue', 'retry', 'select', 'separate', 'then',
46 | 'undefine', 'until', 'variant', 'when'), prefix=r'(?i)\b', suffix=r'\b'),
47 | Keyword.Reserved),
48 | (r'"\[(([^\]%]|\n)|%(.|\n)|\][^"])*?\]"', String),
49 | (r'"([^"%\n]|%.)*?"', String),
50 | include('numbers'),
51 | (r"'([^'%]|%'|%%)'", String.Char),
52 | (r"(//|\\\\|>=|<=|:=|/=|~|/~|[\\?!#%&@|+/\-=>*$<^\[\]])", Operator),
53 | (r"([{}():;,.])", Punctuation),
54 | (r'([a-z]\w*)|([A-Z][A-Z0-9_]*[a-z]\w*)', Name),
55 | (r'([A-Z][A-Z0-9_]*)', Name.Class),
56 | (r'\n+', Text),
57 | ],
58 | 'numbers': [
59 | (r'0[xX][a-fA-F0-9]+', Number.Hex),
60 | (r'0[bB][01]+', Number.Bin),
61 | (r'0[cC][0-7]+', Number.Oct),
62 | (r'([0-9]+\.[0-9]*)|([0-9]*\.[0-9]+)', Number.Float),
63 | (r'[0-9]+', Number.Integer),
64 | ],
65 | }
66 |
--------------------------------------------------------------------------------
/pygments/lexers/elm.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.elm
4 | ~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexer for the Elm programming language.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | from pygments.lexer import RegexLexer, words, include
13 | from pygments.token import Comment, Keyword, Name, Number, Punctuation, String, Text
14 |
15 | __all__ = ['ElmLexer']
16 |
17 |
18 | class ElmLexer(RegexLexer):
19 | """
20 | For `Elm `_ source code.
21 |
22 | .. versionadded:: 2.1
23 | """
24 |
25 | name = 'Elm'
26 | aliases = ['elm']
27 | filenames = ['*.elm']
28 | mimetypes = ['text/x-elm']
29 |
30 | validName = r'[a-z_][a-zA-Z0-9_\']*'
31 |
32 | specialName = r'^main '
33 |
34 | builtinOps = (
35 | '~', '||', '|>', '|', '`', '^', '\\', '\'', '>>', '>=', '>', '==',
36 | '=', '<~', '<|', '<=', '<<', '<-', '<', '::', ':', '/=', '//', '/',
37 | '..', '.', '->', '-', '++', '+', '*', '&&', '%',
38 | )
39 |
40 | reservedWords = words((
41 | 'alias', 'as', 'case', 'else', 'if', 'import', 'in',
42 | 'let', 'module', 'of', 'port', 'then', 'type', 'where',
43 | ), suffix=r'\b')
44 |
45 | tokens = {
46 | 'root': [
47 |
48 | # Comments
49 | (r'\{-', Comment.Multiline, 'comment'),
50 | (r'--.*', Comment.Single),
51 |
52 | # Whitespace
53 | (r'\s+', Text),
54 |
55 | # Strings
56 | (r'"', String, 'doublequote'),
57 |
58 | # Modules
59 | (r'^\s*module\s*', Keyword.Namespace, 'imports'),
60 |
61 | # Imports
62 | (r'^\s*import\s*', Keyword.Namespace, 'imports'),
63 |
64 | # Shaders
65 | (r'\[glsl\|.*', Name.Entity, 'shader'),
66 |
67 | # Keywords
68 | (reservedWords, Keyword.Reserved),
69 |
70 | # Types
71 | (r'[A-Z]\w*', Keyword.Type),
72 |
73 | # Main
74 | (specialName, Keyword.Reserved),
75 |
76 | # Prefix Operators
77 | (words((builtinOps), prefix=r'\(', suffix=r'\)'), Name.Function),
78 |
79 | # Infix Operators
80 | (words(builtinOps), Name.Function),
81 |
82 | # Numbers
83 | include('numbers'),
84 |
85 | # Variable Names
86 | (validName, Name.Variable),
87 |
88 | # Parens
89 | (r'[,()\[\]{}]', Punctuation),
90 |
91 | ],
92 |
93 | 'comment': [
94 | (r'-(?!\})', Comment.Multiline),
95 | (r'\{-', Comment.Multiline, 'comment'),
96 | (r'[^-}]', Comment.Multiline),
97 | (r'-\}', Comment.Multiline, '#pop'),
98 | ],
99 |
100 | 'doublequote': [
101 | (r'\\u[0-9a-fA-F]{4}', String.Escape),
102 | (r'\\[nrfvb\\"]', String.Escape),
103 | (r'[^"]', String),
104 | (r'"', String, '#pop'),
105 | ],
106 |
107 | 'imports': [
108 | (r'\w+(\.\w+)*', Name.Class, '#pop'),
109 | ],
110 |
111 | 'numbers': [
112 | (r'_?\d+\.(?=\d+)', Number.Float),
113 | (r'_?\d+', Number.Integer),
114 | ],
115 |
116 | 'shader': [
117 | (r'\|(?!\])', Name.Entity),
118 | (r'\|\]', Name.Entity, '#pop'),
119 | (r'.*\n', Name.Entity),
120 | ],
121 | }
122 |
--------------------------------------------------------------------------------
/pygments/lexers/ezhil.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.ezhil
4 | ~~~~~~~~~~~~~~~~~~~~~
5 |
6 | Pygments lexers for Ezhil language.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | import re
13 | from pygments.lexer import RegexLexer, include, words
14 | from pygments.token import Keyword, Text, Comment, Name
15 | from pygments.token import String, Number, Punctuation, Operator
16 |
17 | __all__ = ['EzhilLexer']
18 |
19 |
20 | class EzhilLexer(RegexLexer):
21 | """
22 | Lexer for `Ezhil, a Tamil script-based programming language `_
23 |
24 | .. versionadded:: 2.1
25 | """
26 | name = 'Ezhil'
27 | aliases = ['ezhil']
28 | filenames = ['*.n']
29 | mimetypes = ['text/x-ezhil']
30 | flags = re.MULTILINE | re.UNICODE
31 | # Refer to tamil.utf8.tamil_letters from open-tamil for a stricter version of this.
32 | # This much simpler version is close enough, and includes combining marks.
33 | _TALETTERS = u'[a-zA-Z_]|[\u0b80-\u0bff]'
34 | tokens = {
35 | 'root': [
36 | include('keywords'),
37 | (r'#.*\n', Comment.Single),
38 | (r'[@+/*,^\-%]|[!<>=]=?|&&?|\|\|?', Operator),
39 | (u'இல்', Operator.Word),
40 | (words((u'assert', u'max', u'min',
41 | u'நீளம்', u'சரம்_இடமாற்று', u'சரம்_கண்டுபிடி',
42 | u'பட்டியல்', u'பின்இணை', u'வரிசைப்படுத்து',
43 | u'எடு', u'தலைகீழ்', u'நீட்டிக்க', u'நுழைக்க', u'வை',
44 | u'கோப்பை_திற', u'கோப்பை_எழுது', u'கோப்பை_மூடு',
45 | u'pi', u'sin', u'cos', u'tan', u'sqrt', u'hypot', u'pow',
46 | u'exp', u'log', u'log10', u'exit',
47 | ), suffix=r'\b'), Name.Builtin),
48 | (r'(True|False)\b', Keyword.Constant),
49 | (r'[^\S\n]+', Text),
50 | include('identifier'),
51 | include('literal'),
52 | (r'[(){}\[\]:;.]', Punctuation),
53 | ],
54 | 'keywords': [
55 | (u'பதிப்பி|தேர்ந்தெடு|தேர்வு|ஏதேனில்|ஆனால்|இல்லைஆனால்|இல்லை|ஆக|ஒவ்வொன்றாக|இல்|வரை|செய்|முடியேனில்|பின்கொடு|முடி|நிரல்பாகம்|தொடர்|நிறுத்து|நிரல்பாகம்', Keyword),
56 | ],
57 | 'identifier': [
58 | (u'(?:'+_TALETTERS+u')(?:[0-9]|'+_TALETTERS+u')*', Name),
59 | ],
60 | 'literal': [
61 | (r'".*?"', String),
62 | (r'(?u)\d+((\.\d*)?[eE][+-]?\d+|\.\d*)', Number.Float),
63 | (r'(?u)\d+', Number.Integer),
64 | ]
65 | }
66 |
67 | def __init__(self, **options):
68 | super(EzhilLexer, self).__init__(**options)
69 | self.encoding = options.get('encoding', 'utf-8')
70 |
--------------------------------------------------------------------------------
/pygments/lexers/floscript.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.floscript
4 | ~~~~~~~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexer for FloScript
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | from pygments.lexer import RegexLexer, include
13 | from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
14 | Number, Punctuation
15 |
16 | __all__ = ['FloScriptLexer']
17 |
18 |
19 | class FloScriptLexer(RegexLexer):
20 | """
21 | For `FloScript `_ configuration language source code.
22 |
23 | .. versionadded:: 2.4
24 | """
25 |
26 | name = 'FloScript'
27 | aliases = ['floscript', 'flo']
28 | filenames = ['*.flo']
29 |
30 | def innerstring_rules(ttype):
31 | return [
32 | # the old style '%s' % (...) string formatting
33 | (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
34 | '[hlL]?[E-GXc-giorsux%]', String.Interpol),
35 | # backslashes, quotes and formatting signs must be parsed one at a time
36 | (r'[^\\\'"%\n]+', ttype),
37 | (r'[\'"\\]', ttype),
38 | # unhandled string formatting sign
39 | (r'%', ttype),
40 | # newlines are an error (use "nl" state)
41 | ]
42 |
43 | tokens = {
44 | 'root': [
45 | (r'\n', Text),
46 | (r'[^\S\n]+', Text),
47 |
48 | (r'[]{}:(),;[]', Punctuation),
49 | (r'\\\n', Text),
50 | (r'\\', Text),
51 | (r'(to|by|with|from|per|for|cum|qua|via|as|at|in|of|on|re|is|if|be|into|'
52 | r'and|not)\b', Operator.Word),
53 | (r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator),
54 | (r'(load|init|server|logger|log|loggee|first|over|under|next|done|timeout|'
55 | r'repeat|native|benter|enter|recur|exit|precur|renter|rexit|print|put|inc|'
56 | r'copy|set|aux|rear|raze|go|let|do|bid|ready|start|stop|run|abort|use|flo|'
57 | r'give|take)\b', Name.Builtin),
58 | (r'(frame|framer|house)\b', Keyword),
59 | ('"', String, 'string'),
60 |
61 | include('name'),
62 | include('numbers'),
63 | (r'#.+$', Comment.Singleline),
64 | ],
65 | 'string': [
66 | ('[^"]+', String),
67 | ('"', String, '#pop'),
68 | ],
69 | 'numbers': [
70 | (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
71 | (r'\d+[eE][+-]?[0-9]+j?', Number.Float),
72 | (r'0[0-7]+j?', Number.Oct),
73 | (r'0[bB][01]+', Number.Bin),
74 | (r'0[xX][a-fA-F0-9]+', Number.Hex),
75 | (r'\d+L', Number.Integer.Long),
76 | (r'\d+j?', Number.Integer)
77 | ],
78 |
79 | 'name': [
80 | (r'@[\w.]+', Name.Decorator),
81 | (r'[a-zA-Z_]\w*', Name),
82 | ],
83 | }
84 |
--------------------------------------------------------------------------------
/pygments/lexers/functional.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.functional
4 | ~~~~~~~~~~~~~~~~~~~~~~~~~~
5 |
6 | Just export lexer classes previously contained in this module.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | from pygments.lexers.lisp import SchemeLexer, CommonLispLexer, RacketLexer, \
13 | NewLispLexer, ShenLexer
14 | from pygments.lexers.haskell import HaskellLexer, LiterateHaskellLexer, \
15 | KokaLexer
16 | from pygments.lexers.theorem import CoqLexer
17 | from pygments.lexers.erlang import ErlangLexer, ErlangShellLexer, \
18 | ElixirConsoleLexer, ElixirLexer
19 | from pygments.lexers.ml import SMLLexer, OcamlLexer, OpaLexer
20 |
21 | __all__ = []
22 |
--------------------------------------------------------------------------------
/pygments/lexers/go.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.go
4 | ~~~~~~~~~~~~~~~~~~
5 |
6 | Lexers for the Google Go language.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | import re
13 |
14 | from pygments.lexer import RegexLexer, bygroups, words
15 | from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
16 | Number, Punctuation
17 |
18 | __all__ = ['GoLexer']
19 |
20 |
21 | class GoLexer(RegexLexer):
22 | """
23 | For `Go `_ source.
24 |
25 | .. versionadded:: 1.2
26 | """
27 | name = 'Go'
28 | filenames = ['*.go']
29 | aliases = ['go']
30 | mimetypes = ['text/x-gosrc']
31 |
32 | flags = re.MULTILINE | re.UNICODE
33 |
34 | tokens = {
35 | 'root': [
36 | (r'\n', Text),
37 | (r'\s+', Text),
38 | (r'\\\n', Text), # line continuations
39 | (r'//(.*?)\n', Comment.Single),
40 | (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
41 | (r'(import|package)\b', Keyword.Namespace),
42 | (r'(var|func|struct|map|chan|type|interface|const)\b',
43 | Keyword.Declaration),
44 | (words((
45 | 'break', 'default', 'select', 'case', 'defer', 'go',
46 | 'else', 'goto', 'switch', 'fallthrough', 'if', 'range',
47 | 'continue', 'for', 'return'), suffix=r'\b'),
48 | Keyword),
49 | (r'(true|false|iota|nil)\b', Keyword.Constant),
50 | # It seems the builtin types aren't actually keywords, but
51 | # can be used as functions. So we need two declarations.
52 | (words((
53 | 'uint', 'uint8', 'uint16', 'uint32', 'uint64',
54 | 'int', 'int8', 'int16', 'int32', 'int64',
55 | 'float', 'float32', 'float64',
56 | 'complex64', 'complex128', 'byte', 'rune',
57 | 'string', 'bool', 'error', 'uintptr',
58 | 'print', 'println', 'panic', 'recover', 'close', 'complex',
59 | 'real', 'imag', 'len', 'cap', 'append', 'copy', 'delete',
60 | 'new', 'make'), suffix=r'\b(\()'),
61 | bygroups(Name.Builtin, Punctuation)),
62 | (words((
63 | 'uint', 'uint8', 'uint16', 'uint32', 'uint64',
64 | 'int', 'int8', 'int16', 'int32', 'int64',
65 | 'float', 'float32', 'float64',
66 | 'complex64', 'complex128', 'byte', 'rune',
67 | 'string', 'bool', 'error', 'uintptr'), suffix=r'\b'),
68 | Keyword.Type),
69 | # imaginary_lit
70 | (r'\d+i', Number),
71 | (r'\d+\.\d*([Ee][-+]\d+)?i', Number),
72 | (r'\.\d+([Ee][-+]\d+)?i', Number),
73 | (r'\d+[Ee][-+]\d+i', Number),
74 | # float_lit
75 | (r'\d+(\.\d+[eE][+\-]?\d+|'
76 | r'\.\d*|[eE][+\-]?\d+)', Number.Float),
77 | (r'\.\d+([eE][+\-]?\d+)?', Number.Float),
78 | # int_lit
79 | # -- octal_lit
80 | (r'0[0-7]+', Number.Oct),
81 | # -- hex_lit
82 | (r'0[xX][0-9a-fA-F]+', Number.Hex),
83 | # -- decimal_lit
84 | (r'(0|[1-9][0-9]*)', Number.Integer),
85 | # char_lit
86 | (r"""'(\\['"\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
87 | r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|[^\\])'""",
88 | String.Char),
89 | # StringLiteral
90 | # -- raw_string_lit
91 | (r'`[^`]*`', String),
92 | # -- interpreted_string_lit
93 | (r'"(\\\\|\\"|[^"])*"', String),
94 | # Tokens
95 | (r'(<<=|>>=|<<|>>|<=|>=|&\^=|&\^|\+=|-=|\*=|/=|%=|&=|\|=|&&|\|\|'
96 | r'|<-|\+\+|--|==|!=|:=|\.\.\.|[+\-*/%&])', Operator),
97 | (r'[|^<>=!()\[\]{}.,;:]', Punctuation),
98 | # identifier
99 | (r'[^\W\d]\w*', Name.Other),
100 | ]
101 | }
102 |
--------------------------------------------------------------------------------
/pygments/lexers/graph.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.graph
4 | ~~~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexers for graph query languages.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | import re
13 |
14 | from pygments.lexer import RegexLexer, include, bygroups, using, this
15 | from pygments.token import Keyword, Punctuation, Comment, Operator, Name,\
16 | String, Number, Whitespace
17 |
18 |
19 | __all__ = ['CypherLexer']
20 |
21 |
22 | class CypherLexer(RegexLexer):
23 | """
24 | For `Cypher Query Language
25 | `_
26 |
27 | For the Cypher version in Neo4j 3.3
28 |
29 | .. versionadded:: 2.0
30 | """
31 | name = 'Cypher'
32 | aliases = ['cypher']
33 | filenames = ['*.cyp', '*.cypher']
34 |
35 | flags = re.MULTILINE | re.IGNORECASE
36 |
37 | tokens = {
38 | 'root': [
39 | include('comment'),
40 | include('keywords'),
41 | include('clauses'),
42 | include('relations'),
43 | include('strings'),
44 | include('whitespace'),
45 | include('barewords'),
46 | ],
47 | 'comment': [
48 | (r'^.*//.*\n', Comment.Single),
49 | ],
50 | 'keywords': [
51 | (r'(create|order|match|limit|set|skip|start|return|with|where|'
52 | r'delete|foreach|not|by|true|false)\b', Keyword),
53 | ],
54 | 'clauses': [
55 | # based on https://neo4j.com/docs/cypher-refcard/3.3/
56 | (r'(all|any|as|asc|ascending|assert|call|case|create|'
57 | r'create\s+index|create\s+unique|delete|desc|descending|'
58 | r'distinct|drop\s+constraint\s+on|drop\s+index\s+on|end|'
59 | r'ends\s+with|fieldterminator|foreach|in|is\s+node\s+key|'
60 | r'is\s+null|is\s+unique|limit|load\s+csv\s+from|match|merge|none|'
61 | r'not|null|on\s+match|on\s+create|optional\s+match|order\s+by|'
62 | r'remove|return|set|skip|single|start|starts\s+with|then|union|'
63 | r'union\s+all|unwind|using\s+periodic\s+commit|yield|where|when|'
64 | r'with)\b', Keyword),
65 | ],
66 | 'relations': [
67 | (r'(-\[)(.*?)(\]->)', bygroups(Operator, using(this), Operator)),
68 | (r'(<-\[)(.*?)(\]-)', bygroups(Operator, using(this), Operator)),
69 | (r'(-\[)(.*?)(\]-)', bygroups(Operator, using(this), Operator)),
70 | (r'-->|<--|\[|\]', Operator),
71 | (r'<|>|<>|=|<=|=>|\(|\)|\||:|,|;', Punctuation),
72 | (r'[.*{}]', Punctuation),
73 | ],
74 | 'strings': [
75 | (r'"(?:\\[tbnrf\'"\\]|[^\\"])*"', String),
76 | (r'`(?:``|[^`])+`', Name.Variable),
77 | ],
78 | 'whitespace': [
79 | (r'\s+', Whitespace),
80 | ],
81 | 'barewords': [
82 | (r'[a-z]\w*', Name),
83 | (r'\d+', Number),
84 | ],
85 | }
86 |
--------------------------------------------------------------------------------
/pygments/lexers/hexdump.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.hexdump
4 | ~~~~~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexers for hexadecimal dumps.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | from pygments.lexer import RegexLexer, bygroups, include
13 | from pygments.token import Text, Name, Number, String, Punctuation
14 |
15 | __all__ = ['HexdumpLexer']
16 |
17 |
18 | class HexdumpLexer(RegexLexer):
19 | """
20 | For typical hex dump output formats by the UNIX and GNU/Linux tools ``hexdump``,
21 | ``hd``, ``hexcat``, ``od`` and ``xxd``, and the DOS tool ``DEBUG``. For example:
22 |
23 | .. sourcecode:: hexdump
24 |
25 | 00000000 7f 45 4c 46 02 01 01 00 00 00 00 00 00 00 00 00 |.ELF............|
26 | 00000010 02 00 3e 00 01 00 00 00 c5 48 40 00 00 00 00 00 |..>......H@.....|
27 |
28 | The specific supported formats are the outputs of:
29 |
30 | * ``hexdump FILE``
31 | * ``hexdump -C FILE`` -- the `canonical` format used in the example.
32 | * ``hd FILE`` -- same as ``hexdump -C FILE``.
33 | * ``hexcat FILE``
34 | * ``od -t x1z FILE``
35 | * ``xxd FILE``
36 | * ``DEBUG.EXE FILE.COM`` and entering ``d`` to the prompt.
37 |
38 | .. versionadded:: 2.1
39 | """
40 | name = 'Hexdump'
41 | aliases = ['hexdump']
42 |
43 | hd = r'[0-9A-Ha-h]'
44 |
45 | tokens = {
46 | 'root': [
47 | (r'\n', Text),
48 | include('offset'),
49 | (r'('+hd+r'{2})(\-)('+hd+r'{2})',
50 | bygroups(Number.Hex, Punctuation, Number.Hex)),
51 | (hd+r'{2}', Number.Hex),
52 | (r'(\s{2,3})(\>)(.{16})(\<)$',
53 | bygroups(Text, Punctuation, String, Punctuation), 'bracket-strings'),
54 | (r'(\s{2,3})(\|)(.{16})(\|)$',
55 | bygroups(Text, Punctuation, String, Punctuation), 'piped-strings'),
56 | (r'(\s{2,3})(\>)(.{1,15})(\<)$',
57 | bygroups(Text, Punctuation, String, Punctuation)),
58 | (r'(\s{2,3})(\|)(.{1,15})(\|)$',
59 | bygroups(Text, Punctuation, String, Punctuation)),
60 | (r'(\s{2,3})(.{1,15})$', bygroups(Text, String)),
61 | (r'(\s{2,3})(.{16}|.{20})$', bygroups(Text, String), 'nonpiped-strings'),
62 | (r'\s', Text),
63 | (r'^\*', Punctuation),
64 | ],
65 | 'offset': [
66 | (r'^('+hd+'+)(:)', bygroups(Name.Label, Punctuation), 'offset-mode'),
67 | (r'^'+hd+'+', Name.Label),
68 | ],
69 | 'offset-mode': [
70 | (r'\s', Text, '#pop'),
71 | (hd+'+', Name.Label),
72 | (r':', Punctuation)
73 | ],
74 | 'piped-strings': [
75 | (r'\n', Text),
76 | include('offset'),
77 | (hd+r'{2}', Number.Hex),
78 | (r'(\s{2,3})(\|)(.{1,16})(\|)$',
79 | bygroups(Text, Punctuation, String, Punctuation)),
80 | (r'\s', Text),
81 | (r'^\*', Punctuation),
82 | ],
83 | 'bracket-strings': [
84 | (r'\n', Text),
85 | include('offset'),
86 | (hd+r'{2}', Number.Hex),
87 | (r'(\s{2,3})(\>)(.{1,16})(\<)$',
88 | bygroups(Text, Punctuation, String, Punctuation)),
89 | (r'\s', Text),
90 | (r'^\*', Punctuation),
91 | ],
92 | 'nonpiped-strings': [
93 | (r'\n', Text),
94 | include('offset'),
95 | (r'('+hd+r'{2})(\-)('+hd+r'{2})',
96 | bygroups(Number.Hex, Punctuation, Number.Hex)),
97 | (hd+r'{2}', Number.Hex),
98 | (r'(\s{19,})(.{1,20}?)$', bygroups(Text, String)),
99 | (r'(\s{2,3})(.{1,20})$', bygroups(Text, String)),
100 | (r'\s', Text),
101 | (r'^\*', Punctuation),
102 | ],
103 | }
104 |
--------------------------------------------------------------------------------
/pygments/lexers/inferno.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.inferno
4 | ~~~~~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexers for Inferno os and all the related stuff.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | import re
13 |
14 | from pygments.lexer import RegexLexer, include, bygroups, default
15 | from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \
16 | Name, String, Number
17 |
18 | __all__ = ['LimboLexer']
19 |
20 |
21 | class LimboLexer(RegexLexer):
22 | """
23 | Lexer for `Limbo programming language `_
24 |
25 | TODO:
26 | - maybe implement better var declaration highlighting
27 | - some simple syntax error highlighting
28 |
29 | .. versionadded:: 2.0
30 | """
31 | name = 'Limbo'
32 | aliases = ['limbo']
33 | filenames = ['*.b']
34 | mimetypes = ['text/limbo']
35 |
36 | tokens = {
37 | 'whitespace': [
38 | (r'^(\s*)([a-zA-Z_]\w*:(\s*)\n)',
39 | bygroups(Text, Name.Label)),
40 | (r'\n', Text),
41 | (r'\s+', Text),
42 | (r'#(\n|(.|\n)*?[^\\]\n)', Comment.Single),
43 | ],
44 | 'string': [
45 | (r'"', String, '#pop'),
46 | (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
47 | r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
48 | (r'[^\\"\n]+', String), # all other characters
49 | (r'\\', String), # stray backslash
50 | ],
51 | 'statements': [
52 | (r'"', String, 'string'),
53 | (r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
54 | (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
55 | (r'(\d+\.\d*|\.\d+|\d+[fF])', Number.Float),
56 | (r'16r[0-9a-fA-F]+', Number.Hex),
57 | (r'8r[0-7]+', Number.Oct),
58 | (r'((([1-3]\d)|([2-9]))r)?(\d+)', Number.Integer),
59 | (r'[()\[\],.]', Punctuation),
60 | (r'[~!%^&*+=|?:<>/-]|(->)|(<-)|(=>)|(::)', Operator),
61 | (r'(alt|break|case|continue|cyclic|do|else|exit'
62 | r'for|hd|if|implement|import|include|len|load|or'
63 | r'pick|return|spawn|tagof|tl|to|while)\b', Keyword),
64 | (r'(byte|int|big|real|string|array|chan|list|adt'
65 | r'|fn|ref|of|module|self|type)\b', Keyword.Type),
66 | (r'(con|iota|nil)\b', Keyword.Constant),
67 | (r'[a-zA-Z_]\w*', Name),
68 | ],
69 | 'statement' : [
70 | include('whitespace'),
71 | include('statements'),
72 | ('[{}]', Punctuation),
73 | (';', Punctuation, '#pop'),
74 | ],
75 | 'root': [
76 | include('whitespace'),
77 | default('statement'),
78 | ],
79 | }
80 |
81 | def analyse_text(text):
82 | # Any limbo module implements something
83 | if re.search(r'^implement \w+;', text, re.MULTILINE):
84 | return 0.7
85 |
86 | # TODO:
87 | # - Make lexers for:
88 | # - asm sources
89 | # - man pages
90 | # - mkfiles
91 | # - module definitions
92 | # - namespace definitions
93 | # - shell scripts
94 | # - maybe keyfiles and fonts
95 | # they all seem to be quite similar to their equivalents
96 | # from unix world, so there should not be a lot of problems
97 |
--------------------------------------------------------------------------------
/pygments/lexers/iolang.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.iolang
4 | ~~~~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexers for the Io language.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | from pygments.lexer import RegexLexer
13 | from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
14 | Number
15 |
16 | __all__ = ['IoLexer']
17 |
18 |
19 | class IoLexer(RegexLexer):
20 | """
21 | For `Io `_ (a small, prototype-based
22 | programming language) source.
23 |
24 | .. versionadded:: 0.10
25 | """
26 | name = 'Io'
27 | filenames = ['*.io']
28 | aliases = ['io']
29 | mimetypes = ['text/x-iosrc']
30 | tokens = {
31 | 'root': [
32 | (r'\n', Text),
33 | (r'\s+', Text),
34 | # Comments
35 | (r'//(.*?)\n', Comment.Single),
36 | (r'#(.*?)\n', Comment.Single),
37 | (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
38 | (r'/\+', Comment.Multiline, 'nestedcomment'),
39 | # DoubleQuotedString
40 | (r'"(\\\\|\\"|[^"])*"', String),
41 | # Operators
42 | (r'::=|:=|=|\(|\)|;|,|\*|-|\+|>|<|@|!|/|\||\^|\.|%|&|\[|\]|\{|\}',
43 | Operator),
44 | # keywords
45 | (r'(clone|do|doFile|doString|method|for|if|else|elseif|then)\b',
46 | Keyword),
47 | # constants
48 | (r'(nil|false|true)\b', Name.Constant),
49 | # names
50 | (r'(Object|list|List|Map|args|Sequence|Coroutine|File)\b',
51 | Name.Builtin),
52 | (r'[a-zA-Z_]\w*', Name),
53 | # numbers
54 | (r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
55 | (r'\d+', Number.Integer)
56 | ],
57 | 'nestedcomment': [
58 | (r'[^+/]+', Comment.Multiline),
59 | (r'/\+', Comment.Multiline, '#push'),
60 | (r'\+/', Comment.Multiline, '#pop'),
61 | (r'[+/]', Comment.Multiline),
62 | ]
63 | }
64 |
--------------------------------------------------------------------------------
/pygments/lexers/math.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.math
4 | ~~~~~~~~~~~~~~~~~~~~
5 |
6 | Just export lexers that were contained in this module.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | from pygments.lexers.python import NumPyLexer
13 | from pygments.lexers.matlab import MatlabLexer, MatlabSessionLexer, \
14 | OctaveLexer, ScilabLexer
15 | from pygments.lexers.julia import JuliaLexer, JuliaConsoleLexer
16 | from pygments.lexers.r import RConsoleLexer, SLexer, RdLexer
17 | from pygments.lexers.modeling import BugsLexer, JagsLexer, StanLexer
18 | from pygments.lexers.idl import IDLLexer
19 | from pygments.lexers.algebra import MuPADLexer
20 |
21 | __all__ = []
22 |
--------------------------------------------------------------------------------
/pygments/lexers/nit.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.nit
4 | ~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexer for the Nit language.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | from pygments.lexer import RegexLexer, words
13 | from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
14 | Number, Punctuation
15 |
16 | __all__ = ['NitLexer']
17 |
18 |
19 | class NitLexer(RegexLexer):
20 | """
21 | For `nit `_ source.
22 |
23 | .. versionadded:: 2.0
24 | """
25 |
26 | name = 'Nit'
27 | aliases = ['nit']
28 | filenames = ['*.nit']
29 | tokens = {
30 | 'root': [
31 | (r'#.*?$', Comment.Single),
32 | (words((
33 | 'package', 'module', 'import', 'class', 'abstract', 'interface',
34 | 'universal', 'enum', 'end', 'fun', 'type', 'init', 'redef',
35 | 'isa', 'do', 'readable', 'writable', 'var', 'intern', 'extern',
36 | 'public', 'protected', 'private', 'intrude', 'if', 'then',
37 | 'else', 'while', 'loop', 'for', 'in', 'and', 'or', 'not',
38 | 'implies', 'return', 'continue', 'break', 'abort', 'assert',
39 | 'new', 'is', 'once', 'super', 'self', 'true', 'false', 'nullable',
40 | 'null', 'as', 'isset', 'label', '__debug__'), suffix=r'(?=[\r\n\t( ])'),
41 | Keyword),
42 | (r'[A-Z]\w*', Name.Class),
43 | (r'"""(([^\'\\]|\\.)|\\r|\\n)*((\{\{?)?(""?\{\{?)*""""*)', String), # Simple long string
44 | (r'\'\'\'(((\\.|[^\'\\])|\\r|\\n)|\'((\\.|[^\'\\])|\\r|\\n)|'
45 | r'\'\'((\\.|[^\'\\])|\\r|\\n))*\'\'\'', String), # Simple long string alt
46 | (r'"""(([^\'\\]|\\.)|\\r|\\n)*((""?)?(\{\{?""?)*\{\{\{\{*)', String), # Start long string
47 | (r'\}\}\}(((\\.|[^\'\\])|\\r|\\n))*(""?)?(\{\{?""?)*\{\{\{\{*', String), # Mid long string
48 | (r'\}\}\}(((\\.|[^\'\\])|\\r|\\n))*(\{\{?)?(""?\{\{?)*""""*', String), # End long string
49 | (r'"(\\.|([^"}{\\]))*"', String), # Simple String
50 | (r'"(\\.|([^"}{\\]))*\{', String), # Start string
51 | (r'\}(\\.|([^"}{\\]))*\{', String), # Mid String
52 | (r'\}(\\.|([^"}{\\]))*"', String), # End String
53 | (r'(\'[^\'\\]\')|(\'\\.\')', String.Char),
54 | (r'[0-9]+', Number.Integer),
55 | (r'[0-9]*.[0-9]+', Number.Float),
56 | (r'0(x|X)[0-9A-Fa-f]+', Number.Hex),
57 | (r'[a-z]\w*', Name),
58 | (r'_\w+', Name.Variable.Instance),
59 | (r'==|!=|<==>|>=|>>|>|<=|<<|<|\+|-|=|/|\*|%|\+=|-=|!|@', Operator),
60 | (r'\(|\)|\[|\]|,|\.\.\.|\.\.|\.|::|:', Punctuation),
61 | (r'`\{[^`]*`\}', Text), # Extern blocks won't be Lexed by Nit
62 | (r'[\r\n\t ]+', Text),
63 | ],
64 | }
65 |
--------------------------------------------------------------------------------
/pygments/lexers/oberon.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.oberon
4 | ~~~~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexers for Oberon family languages.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | import re
13 |
14 | from pygments.lexer import RegexLexer, include, words
15 | from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
16 | Number, Punctuation
17 |
18 | __all__ = ['ComponentPascalLexer']
19 |
20 |
21 | class ComponentPascalLexer(RegexLexer):
22 | """
23 | For `Component Pascal `_ source code.
24 |
25 | .. versionadded:: 2.1
26 | """
27 | name = 'Component Pascal'
28 | aliases = ['componentpascal', 'cp']
29 | filenames = ['*.cp', '*.cps']
30 | mimetypes = ['text/x-component-pascal']
31 |
32 | flags = re.MULTILINE | re.DOTALL
33 |
34 | tokens = {
35 | 'root': [
36 | include('whitespace'),
37 | include('comments'),
38 | include('punctuation'),
39 | include('numliterals'),
40 | include('strings'),
41 | include('operators'),
42 | include('builtins'),
43 | include('identifiers'),
44 | ],
45 | 'whitespace': [
46 | (r'\n+', Text), # blank lines
47 | (r'\s+', Text), # whitespace
48 | ],
49 | 'comments': [
50 | (r'\(\*([^$].*?)\*\)', Comment.Multiline),
51 | # TODO: nested comments (* (* ... *) ... (* ... *) *) not supported!
52 | ],
53 | 'punctuation': [
54 | (r'[()\[\]{},.:;|]', Punctuation),
55 | ],
56 | 'numliterals': [
57 | (r'[0-9A-F]+X\b', Number.Hex), # char code
58 | (r'[0-9A-F]+[HL]\b', Number.Hex), # hexadecimal number
59 | (r'[0-9]+\.[0-9]+E[+-][0-9]+', Number.Float), # real number
60 | (r'[0-9]+\.[0-9]+', Number.Float), # real number
61 | (r'[0-9]+', Number.Integer), # decimal whole number
62 | ],
63 | 'strings': [
64 | (r"'[^\n']*'", String), # single quoted string
65 | (r'"[^\n"]*"', String), # double quoted string
66 | ],
67 | 'operators': [
68 | # Arithmetic Operators
69 | (r'[+-]', Operator),
70 | (r'[*/]', Operator),
71 | # Relational Operators
72 | (r'[=#<>]', Operator),
73 | # Dereferencing Operator
74 | (r'\^', Operator),
75 | # Logical AND Operator
76 | (r'&', Operator),
77 | # Logical NOT Operator
78 | (r'~', Operator),
79 | # Assignment Symbol
80 | (r':=', Operator),
81 | # Range Constructor
82 | (r'\.\.', Operator),
83 | (r'\$', Operator),
84 | ],
85 | 'identifiers': [
86 | (r'([a-zA-Z_$][\w$]*)', Name),
87 | ],
88 | 'builtins': [
89 | (words((
90 | 'ANYPTR', 'ANYREC', 'BOOLEAN', 'BYTE', 'CHAR', 'INTEGER', 'LONGINT',
91 | 'REAL', 'SET', 'SHORTCHAR', 'SHORTINT', 'SHORTREAL'
92 | ), suffix=r'\b'), Keyword.Type),
93 | (words((
94 | 'ABS', 'ABSTRACT', 'ARRAY', 'ASH', 'ASSERT', 'BEGIN', 'BITS', 'BY',
95 | 'CAP', 'CASE', 'CHR', 'CLOSE', 'CONST', 'DEC', 'DIV', 'DO', 'ELSE',
96 | 'ELSIF', 'EMPTY', 'END', 'ENTIER', 'EXCL', 'EXIT', 'EXTENSIBLE', 'FOR',
97 | 'HALT', 'IF', 'IMPORT', 'IN', 'INC', 'INCL', 'IS', 'LEN', 'LIMITED',
98 | 'LONG', 'LOOP', 'MAX', 'MIN', 'MOD', 'MODULE', 'NEW', 'ODD', 'OF',
99 | 'OR', 'ORD', 'OUT', 'POINTER', 'PROCEDURE', 'RECORD', 'REPEAT', 'RETURN',
100 | 'SHORT', 'SHORTCHAR', 'SHORTINT', 'SIZE', 'THEN', 'TYPE', 'TO', 'UNTIL',
101 | 'VAR', 'WHILE', 'WITH'
102 | ), suffix=r'\b'), Keyword.Reserved),
103 | (r'(TRUE|FALSE|NIL|INF)\b', Keyword.Constant),
104 | ]
105 | }
106 |
--------------------------------------------------------------------------------
/pygments/lexers/ooc.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.ooc
4 | ~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexers for the Ooc language.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | from pygments.lexer import RegexLexer, bygroups, words
13 | from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
14 | Number, Punctuation
15 |
16 | __all__ = ['OocLexer']
17 |
18 |
19 | class OocLexer(RegexLexer):
20 | """
21 | For `Ooc `_ source code
22 |
23 | .. versionadded:: 1.2
24 | """
25 | name = 'Ooc'
26 | aliases = ['ooc']
27 | filenames = ['*.ooc']
28 | mimetypes = ['text/x-ooc']
29 |
30 | tokens = {
31 | 'root': [
32 | (words((
33 | 'class', 'interface', 'implement', 'abstract', 'extends', 'from',
34 | 'this', 'super', 'new', 'const', 'final', 'static', 'import',
35 | 'use', 'extern', 'inline', 'proto', 'break', 'continue',
36 | 'fallthrough', 'operator', 'if', 'else', 'for', 'while', 'do',
37 | 'switch', 'case', 'as', 'in', 'version', 'return', 'true',
38 | 'false', 'null'), prefix=r'\b', suffix=r'\b'),
39 | Keyword),
40 | (r'include\b', Keyword, 'include'),
41 | (r'(cover)([ \t]+)(from)([ \t]+)(\w+[*@]?)',
42 | bygroups(Keyword, Text, Keyword, Text, Name.Class)),
43 | (r'(func)((?:[ \t]|\\\n)+)(~[a-z_]\w*)',
44 | bygroups(Keyword, Text, Name.Function)),
45 | (r'\bfunc\b', Keyword),
46 | # Note: %= and ^= not listed on http://ooc-lang.org/syntax
47 | (r'//.*', Comment),
48 | (r'(?s)/\*.*?\*/', Comment.Multiline),
49 | (r'(==?|\+=?|-[=>]?|\*=?|/=?|:=|!=?|%=?|\?|>{1,3}=?|<{1,3}=?|\.\.|'
50 | r'&&?|\|\|?|\^=?)', Operator),
51 | (r'(\.)([ \t]*)([a-z]\w*)', bygroups(Operator, Text,
52 | Name.Function)),
53 | (r'[A-Z][A-Z0-9_]+', Name.Constant),
54 | (r'[A-Z]\w*([@*]|\[[ \t]*\])?', Name.Class),
55 |
56 | (r'([a-z]\w*(?:~[a-z]\w*)?)((?:[ \t]|\\\n)*)(?=\()',
57 | bygroups(Name.Function, Text)),
58 | (r'[a-z]\w*', Name.Variable),
59 |
60 | # : introduces types
61 | (r'[:(){}\[\];,]', Punctuation),
62 |
63 | (r'0x[0-9a-fA-F]+', Number.Hex),
64 | (r'0c[0-9]+', Number.Oct),
65 | (r'0b[01]+', Number.Bin),
66 | (r'[0-9_]\.[0-9_]*(?!\.)', Number.Float),
67 | (r'[0-9_]+', Number.Decimal),
68 |
69 | (r'"(?:\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\"])*"',
70 | String.Double),
71 | (r"'(?:\\.|\\[0-9]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
72 | String.Char),
73 | (r'@', Punctuation), # pointer dereference
74 | (r'\.', Punctuation), # imports or chain operator
75 |
76 | (r'\\[ \t\n]', Text),
77 | (r'[ \t]+', Text),
78 | ],
79 | 'include': [
80 | (r'[\w/]+', Name),
81 | (r',', Punctuation),
82 | (r'[ \t]', Text),
83 | (r'[;\n]', Text, '#pop'),
84 | ],
85 | }
86 |
--------------------------------------------------------------------------------
/pygments/lexers/other.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.other
4 | ~~~~~~~~~~~~~~~~~~~~~
5 |
6 | Just export lexer classes previously contained in this module.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | from pygments.lexers.sql import SqlLexer, MySqlLexer, SqliteConsoleLexer
13 | from pygments.lexers.shell import BashLexer, BashSessionLexer, BatchLexer, \
14 | TcshLexer
15 | from pygments.lexers.robotframework import RobotFrameworkLexer
16 | from pygments.lexers.testing import GherkinLexer
17 | from pygments.lexers.esoteric import BrainfuckLexer, BefungeLexer, RedcodeLexer
18 | from pygments.lexers.prolog import LogtalkLexer
19 | from pygments.lexers.snobol import SnobolLexer
20 | from pygments.lexers.rebol import RebolLexer
21 | from pygments.lexers.configs import KconfigLexer, Cfengine3Lexer
22 | from pygments.lexers.modeling import ModelicaLexer
23 | from pygments.lexers.scripting import AppleScriptLexer, MOOCodeLexer, \
24 | HybrisLexer
25 | from pygments.lexers.graphics import PostScriptLexer, GnuplotLexer, \
26 | AsymptoteLexer, PovrayLexer
27 | from pygments.lexers.business import ABAPLexer, OpenEdgeLexer, \
28 | GoodDataCLLexer, MaqlLexer
29 | from pygments.lexers.automation import AutoItLexer, AutohotkeyLexer
30 | from pygments.lexers.dsls import ProtoBufLexer, BroLexer, PuppetLexer, \
31 | MscgenLexer, VGLLexer
32 | from pygments.lexers.basic import CbmBasicV2Lexer
33 | from pygments.lexers.pawn import SourcePawnLexer, PawnLexer
34 | from pygments.lexers.ecl import ECLLexer
35 | from pygments.lexers.urbi import UrbiscriptLexer
36 | from pygments.lexers.smalltalk import SmalltalkLexer, NewspeakLexer
37 | from pygments.lexers.installers import NSISLexer, RPMSpecLexer
38 | from pygments.lexers.textedit import AwkLexer
39 | from pygments.lexers.smv import NuSMVLexer
40 |
41 | __all__ = []
42 |
--------------------------------------------------------------------------------
/pygments/lexers/parasail.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.parasail
4 | ~~~~~~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexer for ParaSail.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | import re
13 |
14 | from pygments.lexer import RegexLexer, include
15 | from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
16 | Number, Punctuation, Literal
17 |
18 | __all__ = ['ParaSailLexer']
19 |
20 |
21 | class ParaSailLexer(RegexLexer):
22 | """
23 | For `ParaSail `_ source code.
24 |
25 | .. versionadded:: 2.1
26 | """
27 |
28 | name = 'ParaSail'
29 | aliases = ['parasail']
30 | filenames = ['*.psi', '*.psl']
31 | mimetypes = ['text/x-parasail']
32 |
33 | flags = re.MULTILINE
34 |
35 | tokens = {
36 | 'root': [
37 | (r'[^\S\n]+', Text),
38 | (r'//.*?\n', Comment.Single),
39 | (r'\b(and|or|xor)=', Operator.Word),
40 | (r'\b(and(\s+then)?|or(\s+else)?|xor|rem|mod|'
41 | r'(is|not)\s+null)\b',
42 | Operator.Word),
43 | # Keywords
44 | (r'\b(abs|abstract|all|block|class|concurrent|const|continue|'
45 | r'each|end|exit|extends|exports|forward|func|global|implements|'
46 | r'import|in|interface|is|lambda|locked|new|not|null|of|op|'
47 | r'optional|private|queued|ref|return|reverse|separate|some|'
48 | r'type|until|var|with|'
49 | # Control flow
50 | r'if|then|else|elsif|case|for|while|loop)\b',
51 | Keyword.Reserved),
52 | (r'(abstract\s+)?(interface|class|op|func|type)',
53 | Keyword.Declaration),
54 | # Literals
55 | (r'"[^"]*"', String),
56 | (r'\\[\'ntrf"0]', String.Escape),
57 | (r'#[a-zA-Z]\w*', Literal), # Enumeration
58 | include('numbers'),
59 | (r"'[^']'", String.Char),
60 | (r'[a-zA-Z]\w*', Name),
61 | # Operators and Punctuation
62 | (r'(<==|==>|<=>|\*\*=|<\|=|<<=|>>=|==|!=|=\?|<=|>=|'
63 | r'\*\*|<<|>>|=>|:=|\+=|-=|\*=|\|=|\||/=|\+|-|\*|/|'
64 | r'\.\.|<\.\.|\.\.<|<\.\.<)',
65 | Operator),
66 | (r'(<|>|\[|\]|\(|\)|\||:|;|,|.|\{|\}|->)',
67 | Punctuation),
68 | (r'\n+', Text),
69 | ],
70 | 'numbers': [
71 | (r'\d[0-9_]*#[0-9a-fA-F][0-9a-fA-F_]*#', Number.Hex), # any base
72 | (r'0[xX][0-9a-fA-F][0-9a-fA-F_]*', Number.Hex), # C-like hex
73 | (r'0[bB][01][01_]*', Number.Bin), # C-like bin
74 | (r'\d[0-9_]*\.\d[0-9_]*[eE][+-]\d[0-9_]*', # float exp
75 | Number.Float),
76 | (r'\d[0-9_]*\.\d[0-9_]*', Number.Float), # float
77 | (r'\d[0-9_]*', Number.Integer), # integer
78 | ],
79 | }
80 |
--------------------------------------------------------------------------------
/pygments/lexers/pony.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.pony
4 | ~~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexers for Pony and related languages.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | from pygments.lexer import RegexLexer, bygroups, words
13 | from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
14 | Number, Punctuation
15 |
16 | __all__ = ['PonyLexer']
17 |
18 |
19 | class PonyLexer(RegexLexer):
20 | """
21 | For Pony source code.
22 |
23 | .. versionadded:: 2.4
24 | """
25 |
26 | name = 'Pony'
27 | aliases = ['pony']
28 | filenames = ['*.pony']
29 |
30 | _caps = r'(iso|trn|ref|val|box|tag)'
31 |
32 | tokens = {
33 | 'root': [
34 | (r'\n', Text),
35 | (r'[^\S\n]+', Text),
36 | (r'//.*\n', Comment.Single),
37 | (r'/\*', Comment.Multiline, 'nested_comment'),
38 | (r'"""(?:.|\n)*?"""', String.Doc),
39 | (r'"', String, 'string'),
40 | (r'\'.*\'', String.Char),
41 | (r'=>|[]{}:().~;,|&!^?[]', Punctuation),
42 | (words((
43 | 'addressof', 'and', 'as', 'consume', 'digestof', 'is', 'isnt',
44 | 'not', 'or'),
45 | suffix=r'\b'),
46 | Operator.Word),
47 | (r'!=|==|<<|>>|[-+/*%=<>]', Operator),
48 | (words((
49 | 'box', 'break', 'compile_error', 'compile_intrinsic',
50 | 'continue', 'do', 'else', 'elseif', 'embed', 'end', 'error',
51 | 'for', 'if', 'ifdef', 'in', 'iso', 'lambda', 'let', 'match',
52 | 'object', 'recover', 'ref', 'repeat', 'return', 'tag', 'then',
53 | 'this', 'trn', 'try', 'until', 'use', 'var', 'val', 'where',
54 | 'while', 'with', '#any', '#read', '#send', '#share'),
55 | suffix=r'\b'),
56 | Keyword),
57 | (r'(actor|class|struct|primitive|interface|trait|type)((?:\s)+)',
58 | bygroups(Keyword, Text), 'typename'),
59 | (r'(new|fun|be)((?:\s)+)', bygroups(Keyword, Text), 'methodname'),
60 | (words((
61 | 'I8', 'U8', 'I16', 'U16', 'I32', 'U32', 'I64', 'U64', 'I128',
62 | 'U128', 'ILong', 'ULong', 'ISize', 'USize', 'F32', 'F64',
63 | 'Bool', 'Pointer', 'None', 'Any', 'Array', 'String',
64 | 'Iterator'),
65 | suffix=r'\b'),
66 | Name.Builtin.Type),
67 | (r'_?[A-Z]\w*', Name.Type),
68 | (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
69 | (r'0x[0-9a-fA-F]+', Number.Hex),
70 | (r'\d+', Number.Integer),
71 | (r'(true|false)\b', Name.Builtin),
72 | (r'_\d*', Name),
73 | (r'_?[a-z][\w\'_]*', Name)
74 | ],
75 | 'typename': [
76 | (_caps + r'?((?:\s)*)(_?[A-Z]\w*)',
77 | bygroups(Keyword, Text, Name.Class), '#pop')
78 | ],
79 | 'methodname': [
80 | (_caps + r'?((?:\s)*)(_?[a-z]\w*)',
81 | bygroups(Keyword, Text, Name.Function), '#pop')
82 | ],
83 | 'nested_comment': [
84 | (r'[^*/]+', Comment.Multiline),
85 | (r'/\*', Comment.Multiline, '#push'),
86 | (r'\*/', Comment.Multiline, '#pop'),
87 | (r'[*/]', Comment.Multiline)
88 | ],
89 | 'string': [
90 | (r'"', String, '#pop'),
91 | (r'\\"', String),
92 | (r'[^\\"]+', String)
93 | ]
94 | }
95 |
--------------------------------------------------------------------------------
/pygments/lexers/resource.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.resource
4 | ~~~~~~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexer for resource definition files.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | import re
13 |
14 | from pygments.lexer import RegexLexer, bygroups, words
15 | from pygments.token import Comment, String, Number, Operator, Text, \
16 | Keyword, Name
17 |
18 | __all__ = ['ResourceLexer']
19 |
20 |
21 | class ResourceLexer(RegexLexer):
22 | """Lexer for `ICU Resource bundles
23 | `_.
24 |
25 | .. versionadded:: 2.0
26 | """
27 | name = 'ResourceBundle'
28 | aliases = ['resource', 'resourcebundle']
29 | filenames = []
30 |
31 | _types = (':table', ':array', ':string', ':bin', ':import', ':intvector',
32 | ':int', ':alias')
33 |
34 | flags = re.MULTILINE | re.IGNORECASE
35 | tokens = {
36 | 'root': [
37 | (r'//.*?$', Comment),
38 | (r'"', String, 'string'),
39 | (r'-?\d+', Number.Integer),
40 | (r'[,{}]', Operator),
41 | (r'([^\s{:]+)(\s*)(%s?)' % '|'.join(_types),
42 | bygroups(Name, Text, Keyword)),
43 | (r'\s+', Text),
44 | (words(_types), Keyword),
45 | ],
46 | 'string': [
47 | (r'(\\x[0-9a-f]{2}|\\u[0-9a-f]{4}|\\U00[0-9a-f]{6}|'
48 | r'\\[0-7]{1,3}|\\c.|\\[abtnvfre\'"?\\]|\\\{|[^"{\\])+', String),
49 | (r'\{', String.Escape, 'msgname'),
50 | (r'"', String, '#pop')
51 | ],
52 | 'msgname': [
53 | (r'([^{},]+)(\s*)', bygroups(Name, String.Escape), ('#pop', 'message'))
54 | ],
55 | 'message': [
56 | (r'\{', String.Escape, 'msgname'),
57 | (r'\}', String.Escape, '#pop'),
58 | (r'(,)(\s*)([a-z]+)(\s*\})',
59 | bygroups(Operator, String.Escape, Keyword, String.Escape), '#pop'),
60 | (r'(,)(\s*)([a-z]+)(\s*)(,)(\s*)(offset)(\s*)(:)(\s*)(-?\d+)(\s*)',
61 | bygroups(Operator, String.Escape, Keyword, String.Escape, Operator,
62 | String.Escape, Operator.Word, String.Escape, Operator,
63 | String.Escape, Number.Integer, String.Escape), 'choice'),
64 | (r'(,)(\s*)([a-z]+)(\s*)(,)(\s*)',
65 | bygroups(Operator, String.Escape, Keyword, String.Escape, Operator,
66 | String.Escape), 'choice'),
67 | (r'\s+', String.Escape)
68 | ],
69 | 'choice': [
70 | (r'(=|<|>|<=|>=|!=)(-?\d+)(\s*\{)',
71 | bygroups(Operator, Number.Integer, String.Escape), 'message'),
72 | (r'([a-z]+)(\s*\{)', bygroups(Keyword.Type, String.Escape), 'str'),
73 | (r'\}', String.Escape, ('#pop', '#pop')),
74 | (r'\s+', String.Escape)
75 | ],
76 | 'str': [
77 | (r'\}', String.Escape, '#pop'),
78 | (r'\{', String.Escape, 'msgname'),
79 | (r'[^{}]+', String)
80 | ]
81 | }
82 |
83 | def analyse_text(text):
84 | if text.startswith('root:table'):
85 | return 1.0
86 |
--------------------------------------------------------------------------------
/pygments/lexers/rnc.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.rnc
4 | ~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexer for Relax-NG Compact syntax
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | from pygments.lexer import RegexLexer
13 | from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
14 | Punctuation
15 |
16 | __all__ = ['RNCCompactLexer']
17 |
18 |
19 | class RNCCompactLexer(RegexLexer):
20 | """
21 | For `RelaxNG-compact `_ syntax.
22 |
23 | .. versionadded:: 2.2
24 | """
25 |
26 | name = 'Relax-NG Compact'
27 | aliases = ['rnc', 'rng-compact']
28 | filenames = ['*.rnc']
29 |
30 | tokens = {
31 | 'root': [
32 | (r'namespace\b', Keyword.Namespace),
33 | (r'(?:default|datatypes)\b', Keyword.Declaration),
34 | (r'##.*$', Comment.Preproc),
35 | (r'#.*$', Comment.Single),
36 | (r'"[^"]*"', String.Double),
37 | # TODO single quoted strings and escape sequences outside of
38 | # double-quoted strings
39 | (r'(?:element|attribute|mixed)\b', Keyword.Declaration, 'variable'),
40 | (r'(text\b|xsd:[^ ]+)', Keyword.Type, 'maybe_xsdattributes'),
41 | (r'[,?&*=|~]|>>', Operator),
42 | (r'[(){}]', Punctuation),
43 | (r'.', Text),
44 | ],
45 |
46 | # a variable has been declared using `element` or `attribute`
47 | 'variable': [
48 | (r'[^{]+', Name.Variable),
49 | (r'\{', Punctuation, '#pop'),
50 | ],
51 |
52 | # after an xsd: declaration there may be attributes
53 | 'maybe_xsdattributes': [
54 | (r'\{', Punctuation, 'xsdattributes'),
55 | (r'\}', Punctuation, '#pop'),
56 | (r'.', Text),
57 | ],
58 |
59 | # attributes take the form { key1 = value1 key2 = value2 ... }
60 | 'xsdattributes': [
61 | (r'[^ =}]', Name.Attribute),
62 | (r'=', Operator),
63 | (r'"[^"]*"', String.Double),
64 | (r'\}', Punctuation, '#pop'),
65 | (r'.', Text),
66 | ],
67 | }
68 |
--------------------------------------------------------------------------------
/pygments/lexers/roboconf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.roboconf
4 | ~~~~~~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexers for Roboconf DSL.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | from pygments.lexer import RegexLexer, words, re
13 | from pygments.token import Text, Operator, Keyword, Name, Comment
14 |
15 | __all__ = ['RoboconfGraphLexer', 'RoboconfInstancesLexer']
16 |
17 |
18 | class RoboconfGraphLexer(RegexLexer):
19 | """
20 | Lexer for `Roboconf `_ graph files.
21 |
22 | .. versionadded:: 2.1
23 | """
24 | name = 'Roboconf Graph'
25 | aliases = ['roboconf-graph']
26 | filenames = ['*.graph']
27 |
28 | flags = re.IGNORECASE | re.MULTILINE
29 | tokens = {
30 | 'root': [
31 | # Skip white spaces
32 | (r'\s+', Text),
33 |
34 | # There is one operator
35 | (r'=', Operator),
36 |
37 | # Keywords
38 | (words(('facet', 'import'), suffix=r'\s*\b', prefix=r'\b'), Keyword),
39 | (words((
40 | 'installer', 'extends', 'exports', 'imports', 'facets',
41 | 'children'), suffix=r'\s*:?', prefix=r'\b'), Name),
42 |
43 | # Comments
44 | (r'#.*\n', Comment),
45 |
46 | # Default
47 | (r'[^#]', Text),
48 | (r'.*\n', Text)
49 | ]
50 | }
51 |
52 |
53 | class RoboconfInstancesLexer(RegexLexer):
54 | """
55 | Lexer for `Roboconf `_ instances files.
56 |
57 | .. versionadded:: 2.1
58 | """
59 | name = 'Roboconf Instances'
60 | aliases = ['roboconf-instances']
61 | filenames = ['*.instances']
62 |
63 | flags = re.IGNORECASE | re.MULTILINE
64 | tokens = {
65 | 'root': [
66 |
67 | # Skip white spaces
68 | (r'\s+', Text),
69 |
70 | # Keywords
71 | (words(('instance of', 'import'), suffix=r'\s*\b', prefix=r'\b'), Keyword),
72 | (words(('name', 'count'), suffix=r's*:?', prefix=r'\b'), Name),
73 | (r'\s*[\w.-]+\s*:', Name),
74 |
75 | # Comments
76 | (r'#.*\n', Comment),
77 |
78 | # Default
79 | (r'[^#]', Text),
80 | (r'.*\n', Text)
81 | ]
82 | }
83 |
--------------------------------------------------------------------------------
/pygments/lexers/scdoc.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.scdoc
4 | ~~~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexer for scdoc, a simple man page generator.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | import re
13 |
14 | from pygments.lexer import RegexLexer, include, bygroups, \
15 | using, this
16 | from pygments.token import Text, Comment, Keyword, String, \
17 | Generic
18 |
19 |
20 | __all__ = ['ScdocLexer']
21 |
22 |
23 | class ScdocLexer(RegexLexer):
24 | """
25 | `scdoc` is a simple man page generator for POSIX systems written in C99.
26 | https://git.sr.ht/~sircmpwn/scdoc
27 |
28 | .. versionadded:: 2.5
29 | """
30 | name = 'scdoc'
31 | aliases = ['scdoc', 'scd']
32 | filenames = ['*.scd', '*.scdoc']
33 | flags = re.MULTILINE
34 |
35 | tokens = {
36 | 'root': [
37 | # comment
38 | (r'^(;.+\n)', bygroups(Comment)),
39 |
40 | # heading with pound prefix
41 | (r'^(#)([^#].+\n)', bygroups(Generic.Heading, Text)),
42 | (r'^(#{2})(.+\n)', bygroups(Generic.Subheading, Text)),
43 | # bulleted lists
44 | (r'^(\s*)([*-])(\s)(.+\n)',
45 | bygroups(Text, Keyword, Text, using(this, state='inline'))),
46 | # numbered lists
47 | (r'^(\s*)(\.+\.)( .+\n)',
48 | bygroups(Text, Keyword, using(this, state='inline'))),
49 | # quote
50 | (r'^(\s*>\s)(.+\n)', bygroups(Keyword, Generic.Emph)),
51 | # text block
52 | (r'^(```\n)([\w\W]*?)(^```$)', bygroups(String, Text, String)),
53 |
54 | include('inline'),
55 | ],
56 | 'inline': [
57 | # escape
58 | (r'\\.', Text),
59 | # underlines
60 | (r'(\s)(_[^_]+_)(\W|\n)', bygroups(Text, Generic.Emph, Text)),
61 | # bold
62 | (r'(\s)(\*[^\*]+\*)(\W|\n)', bygroups(Text, Generic.Strong, Text)),
63 | # inline code
64 | (r'`[^`]+`', String.Backtick),
65 |
66 | # general text, must come last!
67 | (r'[^\\\s]+', Text),
68 | (r'.', Text),
69 | ],
70 | }
71 |
--------------------------------------------------------------------------------
/pygments/lexers/sgf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.sgf
4 | ~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexer for Smart Game Format (sgf) file format.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | from pygments.lexer import RegexLexer, bygroups
13 | from pygments.token import Name, Literal, String, Text, Punctuation
14 |
15 | __all__ = ["SmartGameFormatLexer"]
16 |
17 |
18 | class SmartGameFormatLexer(RegexLexer):
19 | """
20 | Lexer for Smart Game Format (sgf) file format.
21 |
22 | The format is used to store game records of board games for two players
23 | (mainly Go game).
24 | For more information about the definition of the format, see:
25 | https://www.red-bean.com/sgf/
26 |
27 | .. versionadded:: 2.4
28 | """
29 | name = 'SmartGameFormat'
30 | aliases = ['sgf']
31 | filenames = ['*.sgf']
32 |
33 | tokens = {
34 | 'root': [
35 | (r'[\s():;]', Punctuation),
36 | # tokens:
37 | (r'(A[BW]|AE|AN|AP|AR|AS|[BW]L|BM|[BW]R|[BW]S|[BW]T|CA|CH|CP|CR|'
38 | r'DD|DM|DO|DT|EL|EV|EX|FF|FG|G[BW]|GC|GM|GN|HA|HO|ID|IP|IT|IY|KM|'
39 | r'KO|LB|LN|LT|L|MA|MN|M|N|OB|OM|ON|OP|OT|OV|P[BW]|PC|PL|PM|RE|RG|'
40 | r'RO|RU|SO|SC|SE|SI|SL|SO|SQ|ST|SU|SZ|T[BW]|TC|TE|TM|TR|UC|US|VW|'
41 | r'V|[BW]|C)',
42 | Name.Builtin),
43 | # number:
44 | (r'(\[)([0-9.]+)(\])',
45 | bygroups(Punctuation, Literal.Number, Punctuation)),
46 | # date:
47 | (r'(\[)([0-9]{4}-[0-9]{2}-[0-9]{2})(\])',
48 | bygroups(Punctuation, Literal.Date, Punctuation)),
49 | # point:
50 | (r'(\[)([a-z]{2})(\])',
51 | bygroups(Punctuation, String, Punctuation)),
52 | # double points:
53 | (r'(\[)([a-z]{2})(:)([a-z]{2})(\])',
54 | bygroups(Punctuation, String, Punctuation, String, Punctuation)),
55 |
56 | (r'(\[)([\w\s#()+,\-.:?]+)(\])',
57 | bygroups(Punctuation, String, Punctuation)),
58 | (r'(\[)(\s.*)(\])',
59 | bygroups(Punctuation, Text, Punctuation)),
60 | ],
61 | }
62 |
--------------------------------------------------------------------------------
/pygments/lexers/smv.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.smv
4 | ~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexers for the SMV languages.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | from pygments.lexer import RegexLexer, words
13 | from pygments.token import Comment, Generic, Keyword, Name, Number, \
14 | Operator, Punctuation, Text
15 |
16 | __all__ = ['NuSMVLexer']
17 |
18 |
19 | class NuSMVLexer(RegexLexer):
20 | """
21 | Lexer for the NuSMV language.
22 |
23 | .. versionadded:: 2.2
24 | """
25 |
26 | name = 'NuSMV'
27 | aliases = ['nusmv']
28 | filenames = ['*.smv']
29 | mimetypes = []
30 |
31 | tokens = {
32 | 'root': [
33 | # Comments
34 | (r'(?s)\/\-\-.*?\-\-/', Comment),
35 | (r'--.*\n', Comment),
36 |
37 | # Reserved
38 | (words(('MODULE', 'DEFINE', 'MDEFINE', 'CONSTANTS', 'VAR', 'IVAR',
39 | 'FROZENVAR', 'INIT', 'TRANS', 'INVAR', 'SPEC', 'CTLSPEC',
40 | 'LTLSPEC', 'PSLSPEC', 'COMPUTE', 'NAME', 'INVARSPEC',
41 | 'FAIRNESS', 'JUSTICE', 'COMPASSION', 'ISA', 'ASSIGN',
42 | 'CONSTRAINT', 'SIMPWFF', 'CTLWFF', 'LTLWFF', 'PSLWFF',
43 | 'COMPWFF', 'IN', 'MIN', 'MAX', 'MIRROR', 'PRED',
44 | 'PREDICATES'), suffix=r'(?![\w$#-])'),
45 | Keyword.Declaration),
46 | (r'process(?![\w$#-])', Keyword),
47 | (words(('array', 'of', 'boolean', 'integer', 'real', 'word'),
48 | suffix=r'(?![\w$#-])'), Keyword.Type),
49 | (words(('case', 'esac'), suffix=r'(?![\w$#-])'), Keyword),
50 | (words(('word1', 'bool', 'signed', 'unsigned', 'extend', 'resize',
51 | 'sizeof', 'uwconst', 'swconst', 'init', 'self', 'count',
52 | 'abs', 'max', 'min'), suffix=r'(?![\w$#-])'),
53 | Name.Builtin),
54 | (words(('EX', 'AX', 'EF', 'AF', 'EG', 'AG', 'E', 'F', 'O', 'G',
55 | 'H', 'X', 'Y', 'Z', 'A', 'U', 'S', 'V', 'T', 'BU', 'EBF',
56 | 'ABF', 'EBG', 'ABG', 'next', 'mod', 'union', 'in', 'xor',
57 | 'xnor'), suffix=r'(?![\w$#-])'),
58 | Operator.Word),
59 | (words(('TRUE', 'FALSE'), suffix=r'(?![\w$#-])'), Keyword.Constant),
60 |
61 | # Names
62 | (r'[a-zA-Z_][\w$#-]*', Name.Variable),
63 |
64 | # Operators
65 | (r':=', Operator),
66 | (r'[-&|+*/<>!=]', Operator),
67 |
68 | # Literals
69 | (r'\-?\d+\b', Number.Integer),
70 | (r'0[su][bB]\d*_[01_]+', Number.Bin),
71 | (r'0[su][oO]\d*_[0-7_]+', Number.Oct),
72 | (r'0[su][dD]\d*_[\d_]+', Number.Dec),
73 | (r'0[su][hH]\d*_[\da-fA-F_]+', Number.Hex),
74 |
75 | # Whitespace, punctuation and the rest
76 | (r'\s+', Text.Whitespace),
77 | (r'[()\[\]{};?:.,]', Punctuation),
78 | ],
79 | }
80 |
--------------------------------------------------------------------------------
/pygments/lexers/snobol.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.snobol
4 | ~~~~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexers for the SNOBOL language.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | from pygments.lexer import RegexLexer, bygroups
13 | from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
14 | Number, Punctuation
15 |
16 | __all__ = ['SnobolLexer']
17 |
18 |
19 | class SnobolLexer(RegexLexer):
20 | """
21 | Lexer for the SNOBOL4 programming language.
22 |
23 | Recognizes the common ASCII equivalents of the original SNOBOL4 operators.
24 | Does not require spaces around binary operators.
25 |
26 | .. versionadded:: 1.5
27 | """
28 |
29 | name = "Snobol"
30 | aliases = ["snobol"]
31 | filenames = ['*.snobol']
32 | mimetypes = ['text/x-snobol']
33 |
34 | tokens = {
35 | # root state, start of line
36 | # comments, continuation lines, and directives start in column 1
37 | # as do labels
38 | 'root': [
39 | (r'\*.*\n', Comment),
40 | (r'[+.] ', Punctuation, 'statement'),
41 | (r'-.*\n', Comment),
42 | (r'END\s*\n', Name.Label, 'heredoc'),
43 | (r'[A-Za-z$][\w$]*', Name.Label, 'statement'),
44 | (r'\s+', Text, 'statement'),
45 | ],
46 | # statement state, line after continuation or label
47 | 'statement': [
48 | (r'\s*\n', Text, '#pop'),
49 | (r'\s+', Text),
50 | (r'(?<=[^\w.])(LT|LE|EQ|NE|GE|GT|INTEGER|IDENT|DIFFER|LGT|SIZE|'
51 | r'REPLACE|TRIM|DUPL|REMDR|DATE|TIME|EVAL|APPLY|OPSYN|LOAD|UNLOAD|'
52 | r'LEN|SPAN|BREAK|ANY|NOTANY|TAB|RTAB|REM|POS|RPOS|FAIL|FENCE|'
53 | r'ABORT|ARB|ARBNO|BAL|SUCCEED|INPUT|OUTPUT|TERMINAL)(?=[^\w.])',
54 | Name.Builtin),
55 | (r'[A-Za-z][\w.]*', Name),
56 | # ASCII equivalents of original operators
57 | # | for the EBCDIC equivalent, ! likewise
58 | # \ for EBCDIC negation
59 | (r'\*\*|[?$.!%*/#+\-@|&\\=]', Operator),
60 | (r'"[^"]*"', String),
61 | (r"'[^']*'", String),
62 | # Accept SPITBOL syntax for real numbers
63 | # as well as Macro SNOBOL4
64 | (r'[0-9]+(?=[^.EeDd])', Number.Integer),
65 | (r'[0-9]+(\.[0-9]*)?([EDed][-+]?[0-9]+)?', Number.Float),
66 | # Goto
67 | (r':', Punctuation, 'goto'),
68 | (r'[()<>,;]', Punctuation),
69 | ],
70 | # Goto block
71 | 'goto': [
72 | (r'\s*\n', Text, "#pop:2"),
73 | (r'\s+', Text),
74 | (r'F|S', Keyword),
75 | (r'(\()([A-Za-z][\w.]*)(\))',
76 | bygroups(Punctuation, Name.Label, Punctuation))
77 | ],
78 | # everything after the END statement is basically one
79 | # big heredoc.
80 | 'heredoc': [
81 | (r'.*\n', String.Heredoc)
82 | ]
83 | }
84 |
--------------------------------------------------------------------------------
/pygments/lexers/solidity.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.solidity
4 | ~~~~~~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexers for Solidity.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | import re
13 |
14 | from pygments.lexer import RegexLexer, bygroups, include, words
15 | from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
16 | Number, Punctuation
17 |
18 | __all__ = ['SolidityLexer']
19 |
20 |
21 | class SolidityLexer(RegexLexer):
22 | """
23 | For Solidity source code.
24 |
25 | .. versionadded:: 2.5
26 | """
27 |
28 | name = 'Solidity'
29 | aliases = ['solidity']
30 | filenames = ['*.sol']
31 | mimetypes = []
32 |
33 | flags = re.MULTILINE | re.UNICODE
34 |
35 | datatype = (
36 | r'\b(address|bool|((bytes|hash|int|string|uint)(8|16|24|32|40|48|56|64'
37 | r'|72|80|88|96|104|112|120|128|136|144|152|160|168|176|184|192|200|208'
38 | r'|216|224|232|240|248|256)?))\b'
39 | )
40 |
41 | tokens = {
42 | 'root': [
43 | include('whitespace'),
44 | include('comments'),
45 | (r'\bpragma\s+solidity\b', Keyword, 'pragma'),
46 | (r'\b(contract)(\s+)([a-zA-Z_]\w*)',
47 | bygroups(Keyword, Text.WhiteSpace, Name.Entity)),
48 | (datatype + r'(\s+)((external|public|internal|private)\s+)?' +
49 | r'([a-zA-Z_]\w*)',
50 | bygroups(Keyword.Type, None, None, None, Text.WhiteSpace, Keyword,
51 | None, Name.Variable)),
52 | (r'\b(enum|event|function|struct)(\s+)([a-zA-Z_]\w*)',
53 | bygroups(Keyword.Type, Text.WhiteSpace, Name.Variable)),
54 | (r'\b(msg|block|tx)\.([A-Za-z_][A-Za-z0-9_]*)\b', Keyword),
55 | (words((
56 | 'block', 'break', 'constant', 'constructor', 'continue',
57 | 'contract', 'do', 'else', 'external', 'false', 'for',
58 | 'function', 'if', 'import', 'inherited', 'internal', 'is',
59 | 'library', 'mapping', 'memory', 'modifier', 'msg', 'new',
60 | 'payable', 'private', 'public', 'require', 'return',
61 | 'returns', 'struct', 'suicide', 'throw', 'this', 'true',
62 | 'tx', 'var', 'while'), prefix=r'\b', suffix=r'\b'),
63 | Keyword.Type),
64 | (words(('keccak256',), prefix=r'\b', suffix=r'\b'), Name.Builtin),
65 | (datatype, Keyword.Type),
66 | include('constants'),
67 | (r'[a-zA-Z_]\w*', Text),
68 | (r'[!<=>+*/-]', Operator),
69 | (r'[.;:{}(),\[\]]', Punctuation)
70 | ],
71 | 'comments': [
72 | (r'//(\n|[\w\W]*?[^\\]\n)', Comment.Single),
73 | (r'/(\\\n)?[*][\w\W]*?[*](\\\n)?/', Comment.Multiline),
74 | (r'/(\\\n)?[*][\w\W]*', Comment.Multiline)
75 | ],
76 | 'constants': [
77 | (r'("([\\]"|.)*?")', String.Double),
78 | (r"('([\\]'|.)*?')", String.Single),
79 | (r'\b0[xX][0-9a-fA-F]+\b', Number.Hex),
80 | (r'\b\d+\b', Number.Decimal),
81 | ],
82 | 'pragma': [
83 | include('whitespace'),
84 | include('comments'),
85 | (r'(\^|>=|<)(\s*)(\d+\.\d+\.\d+)',
86 | bygroups(Operator, Text.WhiteSpace, Keyword)),
87 | (r';', Punctuation, '#pop')
88 | ],
89 | 'whitespace': [
90 | (r'\s+', Text.WhiteSpace),
91 | (r'\n', Text.WhiteSpace)
92 | ]
93 | }
94 |
--------------------------------------------------------------------------------
/pygments/lexers/special.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.special
4 | ~~~~~~~~~~~~~~~~~~~~~~~
5 |
6 | Special lexers.
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | import re
13 |
14 | from pygments.lexer import Lexer
15 | from pygments.token import Token, Error, Text
16 | from pygments.util import get_choice_opt, text_type, BytesIO
17 |
18 |
19 | __all__ = ['TextLexer', 'RawTokenLexer']
20 |
21 |
22 | class TextLexer(Lexer):
23 | """
24 | "Null" lexer, doesn't highlight anything.
25 | """
26 | name = 'Text only'
27 | aliases = ['text']
28 | filenames = ['*.txt']
29 | mimetypes = ['text/plain']
30 | priority = 0.01
31 |
32 | def get_tokens_unprocessed(self, text):
33 | yield 0, Text, text
34 |
35 | def analyse_text(text):
36 | return TextLexer.priority
37 |
38 |
39 | _ttype_cache = {}
40 |
41 | line_re = re.compile(b'.*?\n')
42 |
43 |
44 | class RawTokenLexer(Lexer):
45 | """
46 | Recreate a token stream formatted with the `RawTokenFormatter`. This
47 | lexer raises exceptions during parsing if the token stream in the
48 | file is malformed.
49 |
50 | Additional options accepted:
51 |
52 | `compress`
53 | If set to ``"gz"`` or ``"bz2"``, decompress the token stream with
54 | the given compression algorithm before lexing (default: ``""``).
55 | """
56 | name = 'Raw token data'
57 | aliases = ['raw']
58 | filenames = []
59 | mimetypes = ['application/x-pygments-tokens']
60 |
61 | def __init__(self, **options):
62 | self.compress = get_choice_opt(options, 'compress',
63 | ['', 'none', 'gz', 'bz2'], '')
64 | Lexer.__init__(self, **options)
65 |
66 | def get_tokens(self, text):
67 | if isinstance(text, text_type):
68 | # raw token stream never has any non-ASCII characters
69 | text = text.encode('ascii')
70 | if self.compress == 'gz':
71 | import gzip
72 | gzipfile = gzip.GzipFile('', 'rb', 9, BytesIO(text))
73 | text = gzipfile.read()
74 | elif self.compress == 'bz2':
75 | import bz2
76 | text = bz2.decompress(text)
77 |
78 | # do not call Lexer.get_tokens() because we do not want Unicode
79 | # decoding to occur, and stripping is not optional.
80 | text = text.strip(b'\n') + b'\n'
81 | for i, t, v in self.get_tokens_unprocessed(text):
82 | yield t, v
83 |
84 | def get_tokens_unprocessed(self, text):
85 | length = 0
86 | for match in line_re.finditer(text):
87 | try:
88 | ttypestr, val = match.group().split(b'\t', 1)
89 | except ValueError:
90 | val = match.group().decode('ascii', 'replace')
91 | ttype = Error
92 | else:
93 | ttype = _ttype_cache.get(ttypestr)
94 | if not ttype:
95 | ttype = Token
96 | ttypes = ttypestr.split('.')[1:]
97 | for ttype_ in ttypes:
98 | if not ttype_ or not ttype_[0].isupper():
99 | raise ValueError('malformed token name')
100 | ttype = getattr(ttype, ttype_)
101 | _ttype_cache[ttypestr] = ttype
102 | val = val[2:-2].decode('unicode-escape')
103 | yield length, ttype, val
104 | length += len(val)
105 |
--------------------------------------------------------------------------------
/pygments/lexers/supercollider.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pygments.lexers.supercollider
4 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
5 |
6 | Lexer for SuperCollider
7 |
8 | :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
9 | :license: BSD, see LICENSE for details.
10 | """
11 |
12 | import re
13 |
14 | from pygments.lexer import RegexLexer, include, words, default
15 | from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
16 | Number, Punctuation
17 |
18 | __all__ = ['SuperColliderLexer']
19 |
20 |
21 | class SuperColliderLexer(RegexLexer):
22 | """
23 | For `SuperCollider `_ source code.
24 |
25 | .. versionadded:: 2.1
26 | """
27 |
28 | name = 'SuperCollider'
29 | aliases = ['sc', 'supercollider']
30 | filenames = ['*.sc', '*.scd']
31 | mimetypes = ['application/supercollider', 'text/supercollider', ]
32 |
33 | flags = re.DOTALL | re.MULTILINE
34 | tokens = {
35 | 'commentsandwhitespace': [
36 | (r'\s+', Text),
37 | (r'