├── .gitattributes
├── .gitignore
├── .readthedocs.yml
├── LICENSE
├── README.md
├── docs
├── conf.py
├── index.md
├── license.md
├── reference.md
├── requirements.txt
└── usage.md
├── examples
├── .gitignore
├── execute_script.py
├── historical_data.py
└── parse_dump_unparse.py
├── pyproject.toml
├── src
└── pynescript
│ ├── __about__.py
│ ├── __init__.py
│ ├── __main__.py
│ ├── ast
│ ├── __init__.py
│ ├── __main__.py
│ ├── builder.py
│ ├── collector.py
│ ├── error.py
│ ├── evaluator.py
│ ├── grammar
│ │ ├── __init__.py
│ │ ├── antlr4
│ │ │ ├── __init__.py
│ │ │ ├── error_listener.py
│ │ │ ├── generated
│ │ │ │ ├── PinescriptLexer.interp
│ │ │ │ ├── PinescriptLexer.py
│ │ │ │ ├── PinescriptLexer.tokens
│ │ │ │ ├── PinescriptLexerBase.py
│ │ │ │ ├── PinescriptParser.interp
│ │ │ │ ├── PinescriptParser.py
│ │ │ │ ├── PinescriptParser.tokens
│ │ │ │ ├── PinescriptParserBase.py
│ │ │ │ ├── PinescriptParserListener.py
│ │ │ │ ├── PinescriptParserVisitor.py
│ │ │ │ └── __init__.py
│ │ │ ├── lexer.py
│ │ │ ├── listener.py
│ │ │ ├── parser.py
│ │ │ ├── resource
│ │ │ │ ├── PinescriptLexer.g4
│ │ │ │ ├── PinescriptLexerBase.py
│ │ │ │ ├── PinescriptParser.g4
│ │ │ │ └── PinescriptParserBase.py
│ │ │ ├── tool
│ │ │ │ ├── __init__.py
│ │ │ │ └── generate.py
│ │ │ └── visitor.py
│ │ └── asdl
│ │ │ ├── __init__.py
│ │ │ ├── generated
│ │ │ ├── PinescriptASTNode.py
│ │ │ └── __init__.py
│ │ │ ├── resource
│ │ │ └── Pinescript.asdl
│ │ │ └── tool
│ │ │ ├── __init__.py
│ │ │ ├── asdlgen.py
│ │ │ └── generate.py
│ ├── helper.py
│ ├── node.py
│ ├── transformer.py
│ ├── unparser.py
│ └── visitor.py
│ ├── ext
│ ├── __init__.py
│ ├── nautilus_trader
│ │ ├── __init__.py
│ │ └── strategy.py
│ └── pygments
│ │ ├── __init__.py
│ │ └── lexers.py
│ ├── py.typed
│ └── util
│ ├── __init__.py
│ ├── itertools.py
│ └── pine_facade.py
└── tests
├── __init__.py
├── conftest.py
├── data
└── builtin_scripts
│ └── .gitignore
└── test_parse_and_unparse.py
/.gitattributes:
--------------------------------------------------------------------------------
1 | * text=auto eol=lf
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | coverage.lcov
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 | cover/
54 | coverage/
55 |
56 | # Translations
57 | *.mo
58 | *.pot
59 |
60 | # Django stuff:
61 | *.log
62 | local_settings.py
63 | db.sqlite3
64 | db.sqlite3-journal
65 |
66 | # Flask stuff:
67 | instance/
68 | .webassets-cache
69 |
70 | # Scrapy stuff:
71 | .scrapy
72 |
73 | # Sphinx documentation
74 | docs/_build/
75 | docs/apidoc/
76 |
77 | # PyBuilder
78 | .pybuilder/
79 | target/
80 |
81 | # Jupyter Notebook
82 | .ipynb_checkpoints
83 |
84 | # IPython
85 | profile_default/
86 | ipython_config.py
87 |
88 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
89 | __pypackages__/
90 |
91 | # Celery stuff
92 | celerybeat-schedule
93 | celerybeat.pid
94 |
95 | # SageMath parsed files
96 | *.sage.py
97 |
98 | # Environments
99 | .env
100 | .venv
101 | env/
102 | venv/
103 | ENV/
104 | env.bak/
105 | venv.bak/
106 |
107 | # Spyder project settings
108 | .spyderproject
109 | .spyproject
110 |
111 | # Rope project settings
112 | .ropeproject
113 |
114 | # mkdocs documentation
115 | /site
116 |
117 | # mypy
118 | .mypy_cache/
119 | .dmypy.json
120 | dmypy.json
121 |
122 | # ruff
123 | .ruff_cache/
124 |
125 | # Pyre type checker
126 | .pyre/
127 |
128 | # pytype static type analyzer
129 | .pytype/
130 |
131 | # Cython debug symbols
132 | cython_debug/
133 |
134 | # PyCharm
135 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
136 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
137 | # and can be added to the global gitignore or merged into this file. For a more nuclear
138 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
139 | #.idea/
140 |
141 | # ANTLR4
142 | .antlr/
143 |
144 | # Others
145 | .ignore/
146 | .pypirc
--------------------------------------------------------------------------------
/.readthedocs.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | build:
3 | os: ubuntu-20.04
4 | tools:
5 | python: "3.10"
6 | sphinx:
7 | configuration: docs/conf.py
8 | formats: all
9 | python:
10 | install:
11 | - requirements: docs/requirements.txt
12 | - path: .
13 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU LESSER GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 |
9 | This version of the GNU Lesser General Public License incorporates
10 | the terms and conditions of version 3 of the GNU General Public
11 | License, supplemented by the additional permissions listed below.
12 |
13 | 0. Additional Definitions.
14 |
15 | As used herein, "this License" refers to version 3 of the GNU Lesser
16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU
17 | General Public License.
18 |
19 | "The Library" refers to a covered work governed by this License,
20 | other than an Application or a Combined Work as defined below.
21 |
22 | An "Application" is any work that makes use of an interface provided
23 | by the Library, but which is not otherwise based on the Library.
24 | Defining a subclass of a class defined by the Library is deemed a mode
25 | of using an interface provided by the Library.
26 |
27 | A "Combined Work" is a work produced by combining or linking an
28 | Application with the Library. The particular version of the Library
29 | with which the Combined Work was made is also called the "Linked
30 | Version".
31 |
32 | The "Minimal Corresponding Source" for a Combined Work means the
33 | Corresponding Source for the Combined Work, excluding any source code
34 | for portions of the Combined Work that, considered in isolation, are
35 | based on the Application, and not on the Linked Version.
36 |
37 | The "Corresponding Application Code" for a Combined Work means the
38 | object code and/or source code for the Application, including any data
39 | and utility programs needed for reproducing the Combined Work from the
40 | Application, but excluding the System Libraries of the Combined Work.
41 |
42 | 1. Exception to Section 3 of the GNU GPL.
43 |
44 | You may convey a covered work under sections 3 and 4 of this License
45 | without being bound by section 3 of the GNU GPL.
46 |
47 | 2. Conveying Modified Versions.
48 |
49 | If you modify a copy of the Library, and, in your modifications, a
50 | facility refers to a function or data to be supplied by an Application
51 | that uses the facility (other than as an argument passed when the
52 | facility is invoked), then you may convey a copy of the modified
53 | version:
54 |
55 | a) under this License, provided that you make a good faith effort to
56 | ensure that, in the event an Application does not supply the
57 | function or data, the facility still operates, and performs
58 | whatever part of its purpose remains meaningful, or
59 |
60 | b) under the GNU GPL, with none of the additional permissions of
61 | this License applicable to that copy.
62 |
63 | 3. Object Code Incorporating Material from Library Header Files.
64 |
65 | The object code form of an Application may incorporate material from
66 | a header file that is part of the Library. You may convey such object
67 | code under terms of your choice, provided that, if the incorporated
68 | material is not limited to numerical parameters, data structure
69 | layouts and accessors, or small macros, inline functions and templates
70 | (ten or fewer lines in length), you do both of the following:
71 |
72 | a) Give prominent notice with each copy of the object code that the
73 | Library is used in it and that the Library and its use are
74 | covered by this License.
75 |
76 | b) Accompany the object code with a copy of the GNU GPL and this license
77 | document.
78 |
79 | 4. Combined Works.
80 |
81 | You may convey a Combined Work under terms of your choice that,
82 | taken together, effectively do not restrict modification of the
83 | portions of the Library contained in the Combined Work and reverse
84 | engineering for debugging such modifications, if you also do each of
85 | the following:
86 |
87 | a) Give prominent notice with each copy of the Combined Work that
88 | the Library is used in it and that the Library and its use are
89 | covered by this License.
90 |
91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license
92 | document.
93 |
94 | c) For a Combined Work that displays copyright notices during
95 | execution, include the copyright notice for the Library among
96 | these notices, as well as a reference directing the user to the
97 | copies of the GNU GPL and this license document.
98 |
99 | d) Do one of the following:
100 |
101 | 0) Convey the Minimal Corresponding Source under the terms of this
102 | License, and the Corresponding Application Code in a form
103 | suitable for, and under terms that permit, the user to
104 | recombine or relink the Application with a modified version of
105 | the Linked Version to produce a modified Combined Work, in the
106 | manner specified by section 6 of the GNU GPL for conveying
107 | Corresponding Source.
108 |
109 | 1) Use a suitable shared library mechanism for linking with the
110 | Library. A suitable mechanism is one that (a) uses at run time
111 | a copy of the Library already present on the user's computer
112 | system, and (b) will operate properly with a modified version
113 | of the Library that is interface-compatible with the Linked
114 | Version.
115 |
116 | e) Provide Installation Information, but only if you would otherwise
117 | be required to provide such information under section 6 of the
118 | GNU GPL, and only to the extent that such information is
119 | necessary to install and execute a modified version of the
120 | Combined Work produced by recombining or relinking the
121 | Application with a modified version of the Linked Version. (If
122 | you use option 4d0, the Installation Information must accompany
123 | the Minimal Corresponding Source and Corresponding Application
124 | Code. If you use option 4d1, you must provide the Installation
125 | Information in the manner specified by section 6 of the GNU GPL
126 | for conveying Corresponding Source.)
127 |
128 | 5. Combined Libraries.
129 |
130 | You may place library facilities that are a work based on the
131 | Library side by side in a single library together with other library
132 | facilities that are not Applications and are not covered by this
133 | License, and convey such a combined library under terms of your
134 | choice, if you do both of the following:
135 |
136 | a) Accompany the combined library with a copy of the same work based
137 | on the Library, uncombined with any other library facilities,
138 | conveyed under the terms of this License.
139 |
140 | b) Give prominent notice with the combined library that part of it
141 | is a work based on the Library, and explaining where to find the
142 | accompanying uncombined form of the same work.
143 |
144 | 6. Revised Versions of the GNU Lesser General Public License.
145 |
146 | The Free Software Foundation may publish revised and/or new versions
147 | of the GNU Lesser General Public License from time to time. Such new
148 | versions will be similar in spirit to the present version, but may
149 | differ in detail to address new problems or concerns.
150 |
151 | Each version is given a distinguishing version number. If the
152 | Library as you received it specifies that a certain numbered version
153 | of the GNU Lesser General Public License "or any later version"
154 | applies to it, you have the option of following the terms and
155 | conditions either of that published version or of any later version
156 | published by the Free Software Foundation. If the Library as you
157 | received it does not specify a version number of the GNU Lesser
158 | General Public License, you may choose any version of the GNU Lesser
159 | General Public License ever published by the Free Software Foundation.
160 |
161 | If the Library as you received it specifies that a proxy can decide
162 | whether future versions of the GNU Lesser General Public License shall
163 | apply, that proxy's public statement of acceptance of any version is
164 | permanent authorization for you to choose that version for the
165 | Library.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Pynescript
2 |
3 | [][pypi_]
4 | [][status]
5 | [][python version]
6 | [][license]
7 |
8 | [][read the docs]
9 | [][tests]
10 | [][codecov]
11 |
12 | [pypi_]: https://pypi.org/project/pynescript/
13 | [status]: https://pypi.org/project/pynescript/
14 | [python version]: https://pypi.org/project/pynescript
15 | [read the docs]: https://pynescript.readthedocs.io/
16 | [tests]: https://github.com/elbakramer/pynescript/actions?workflow=Tests
17 | [codecov]: https://app.codecov.io/gh/elbakramer/pynescript
18 |
19 | ## Features
20 |
21 | Handle [Pinescript] using [Python]
22 |
23 | - Parse Pinescript code into AST
24 | - Dump parsed AST
25 | - Unparse parsed AST back to Pinescript code
26 |
27 | Given an example pinescript with name of `rsi_strategy.pine`:
28 |
29 | ```pinescript
30 | //@version=5
31 | strategy("RSI Strategy", overlay=true)
32 | length = input( 14 )
33 | overSold = input( 30 )
34 | overBought = input( 70 )
35 | price = close
36 | vrsi = ta.rsi(price, length)
37 | co = ta.crossover(vrsi, overSold)
38 | cu = ta.crossunder(vrsi, overBought)
39 | if (not na(vrsi))
40 | if (co)
41 | strategy.entry("RsiLE", strategy.long, comment="RsiLE")
42 | if (cu)
43 | strategy.entry("RsiSE", strategy.short, comment="RsiSE")
44 | //plot(strategy.equity, title="equity", color=color.red, linewidth=2, style=plot.style_areabr)
45 | ```
46 |
47 | Parsing script into AST and dumping it:
48 |
49 | ```console
50 | $ pynescript parse-and-dump rsi_strategy.pine
51 | ```
52 |
53 | Gives like:
54 |
55 | ```python
56 | Script(
57 | body=[
58 | Expr(
59 | value=Call(
60 | func=Name(id='strategy', ctx=Load()),
61 | args=[
62 | Arg(
63 | value=Constant(value='RSI Strategy')),
64 | Arg(
65 | value=Constant(value=True),
66 | name='overlay')])),
67 | Assign(
68 | target=Name(id='length', ctx=Store()),
69 | value=Call(
70 | func=Name(id='input', ctx=Load()),
71 | args=[
72 | Arg(
73 | value=Constant(value=14))]),
74 | annotations=[]),
75 | ...
76 | ```
77 |
78 |
79 | Full AST dump that is quite long...
80 |
81 | ```python
82 | Script(
83 | body=[
84 | Expr(
85 | value=Call(
86 | func=Name(id='strategy', ctx=Load()),
87 | args=[
88 | Arg(
89 | value=Constant(value='RSI Strategy')),
90 | Arg(
91 | value=Constant(value=True),
92 | name='overlay')])),
93 | Assign(
94 | target=Name(id='length', ctx=Store()),
95 | value=Call(
96 | func=Name(id='input', ctx=Load()),
97 | args=[
98 | Arg(
99 | value=Constant(value=14))]),
100 | annotations=[]),
101 | Assign(
102 | target=Name(id='overSold', ctx=Store()),
103 | value=Call(
104 | func=Name(id='input', ctx=Load()),
105 | args=[
106 | Arg(
107 | value=Constant(value=30))]),
108 | annotations=[]),
109 | Assign(
110 | target=Name(id='overBought', ctx=Store()),
111 | value=Call(
112 | func=Name(id='input', ctx=Load()),
113 | args=[
114 | Arg(
115 | value=Constant(value=70))]),
116 | annotations=[]),
117 | Assign(
118 | target=Name(id='price', ctx=Store()),
119 | value=Name(id='close', ctx=Load()),
120 | annotations=[]),
121 | Assign(
122 | target=Name(id='vrsi', ctx=Store()),
123 | value=Call(
124 | func=Attribute(
125 | value=Name(id='ta', ctx=Load()),
126 | attr='rsi',
127 | ctx=Load()),
128 | args=[
129 | Arg(
130 | value=Name(id='price', ctx=Load())),
131 | Arg(
132 | value=Name(id='length', ctx=Load()))]),
133 | annotations=[]),
134 | Assign(
135 | target=Name(id='co', ctx=Store()),
136 | value=Call(
137 | func=Attribute(
138 | value=Name(id='ta', ctx=Load()),
139 | attr='crossover',
140 | ctx=Load()),
141 | args=[
142 | Arg(
143 | value=Name(id='vrsi', ctx=Load())),
144 | Arg(
145 | value=Name(id='overSold', ctx=Load()))]),
146 | annotations=[]),
147 | Assign(
148 | target=Name(id='cu', ctx=Store()),
149 | value=Call(
150 | func=Attribute(
151 | value=Name(id='ta', ctx=Load()),
152 | attr='crossunder',
153 | ctx=Load()),
154 | args=[
155 | Arg(
156 | value=Name(id='vrsi', ctx=Load())),
157 | Arg(
158 | value=Name(id='overBought', ctx=Load()))]),
159 | annotations=[]),
160 | Expr(
161 | value=If(
162 | test=UnaryOp(
163 | op=Not(),
164 | operand=Call(
165 | func=Name(id='na', ctx=Load()),
166 | args=[
167 | Arg(
168 | value=Name(id='vrsi', ctx=Load()))])),
169 | body=[
170 | Expr(
171 | value=If(
172 | test=Name(id='co', ctx=Load()),
173 | body=[
174 | Expr(
175 | value=Call(
176 | func=Attribute(
177 | value=Name(id='strategy', ctx=Load()),
178 | attr='entry',
179 | ctx=Load()),
180 | args=[
181 | Arg(
182 | value=Constant(value='RsiLE')),
183 | Arg(
184 | value=Attribute(
185 | value=Name(id='strategy', ctx=Load()),
186 | attr='long',
187 | ctx=Load())),
188 | Arg(
189 | value=Constant(value='RsiLE'),
190 | name='comment')]))],
191 | orelse=[])),
192 | Expr(
193 | value=If(
194 | test=Name(id='cu', ctx=Load()),
195 | body=[
196 | Expr(
197 | value=Call(
198 | func=Attribute(
199 | value=Name(id='strategy', ctx=Load()),
200 | attr='entry',
201 | ctx=Load()),
202 | args=[
203 | Arg(
204 | value=Constant(value='RsiSE')),
205 | Arg(
206 | value=Attribute(
207 | value=Name(id='strategy', ctx=Load()),
208 | attr='short',
209 | ctx=Load())),
210 | Arg(
211 | value=Constant(value='RsiSE'),
212 | name='comment')]))],
213 | orelse=[]))],
214 | orelse=[]))],
215 | annotations=[
216 | '//@version=5'])
217 | ```
218 |
219 |
220 |
221 | Parsing into AST and unparsing it back:
222 |
223 | ```console
224 | $ pynescript parse-and-unparse rsi_strategy.pine
225 | ```
226 |
227 | Gives (with some difference in syntax including spacing):
228 |
229 | ```pinescript
230 | //@version=5
231 | strategy("RSI Strategy", overlay=true)
232 | length = input(14)
233 | overSold = input(30)
234 | overBought = input(70)
235 | price = close
236 | vrsi = ta.rsi(price, length)
237 | co = ta.crossover(vrsi, overSold)
238 | cu = ta.crossunder(vrsi, overBought)
239 | if not na(vrsi)
240 | if co
241 | strategy.entry("RsiLE", strategy.long, comment="RsiLE")
242 | if cu
243 | strategy.entry("RsiSE", strategy.short, comment="RsiSE")
244 | ```
245 |
246 | ## Requirements
247 |
248 | - Python 3.10 or higher
249 |
250 | ## Installation
251 |
252 | You can install _Pynescript_ via [pip] from [PyPI]:
253 |
254 | ```console
255 | $ pip install pynescript
256 | ```
257 |
258 | ## Usage
259 |
260 | Please see the [Usage][usage] for details.
261 |
262 | ## License
263 |
264 | Distributed under the terms of the [LGPL 3.0 license][license],
265 | _Pynescript_ is free and open source software.
266 |
267 | ## Issues
268 |
269 | If you encounter any problems,
270 | please [file an issue] along with a detailed description.
271 |
272 | [pinescript]: https://www.tradingview.com/pine-script-docs/en/v5/Introduction.html
273 | [python]: https://www.python.org/
274 |
275 | [pip]: https://pip.pypa.io/
276 | [pypi]: https://pypi.org/
277 |
278 | [file an issue]: https://github.com/elbakramer/pynescript/issues
279 |
280 |
281 |
282 | [license]: https://github.com/elbakramer/pynescript/blob/main/LICENSE
283 | [usage]: https://pynescript.readthedocs.io/en/latest/usage.html
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import shutil
4 |
5 | from pathlib import Path
6 |
7 | from sphinx.application import Sphinx
8 | from sphinx.ext.apidoc import main as sphinx_apidoc_main
9 |
10 |
11 | project = "Pynescript"
12 | author = "Yunseong Hwang"
13 | copyright = "2024, Yunseong Hwang" # noqa: A001
14 | extensions = [
15 | "sphinx.ext.autodoc",
16 | "sphinx.ext.napoleon",
17 | "sphinx_click",
18 | "myst_parser",
19 | ]
20 | autodoc_typehints = "description"
21 | html_theme = "furo"
22 |
23 |
24 | def run_apidoc(_) -> None:
25 | docs_conf_path = Path(__file__)
26 | docs_dir = docs_conf_path.parent
27 |
28 | project_dir = docs_dir.parent
29 |
30 | output_path = project_dir / "docs/apidoc"
31 | module_path = project_dir / "src/pynescript"
32 |
33 | if output_path.exists():
34 | shutil.rmtree(output_path)
35 |
36 | args = [
37 | "--force",
38 | "--separate",
39 | "--ext-autodoc",
40 | "--output-dir",
41 | str(output_path),
42 | str(module_path),
43 | ]
44 |
45 | sphinx_apidoc_main(args)
46 |
47 |
48 | def setup(app: Sphinx) -> None:
49 | app.connect("builder-inited", run_apidoc)
50 |
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | ```{include} ../README.md
2 | ---
3 | end-before:
4 | ---
5 | ```
6 |
7 | [license]: license
8 | [usage]: usage
9 |
10 | ```{toctree}
11 | ---
12 | hidden:
13 | maxdepth: 1
14 | ---
15 |
16 | usage
17 | reference
18 | License
19 | Changelog
20 | ```
21 |
--------------------------------------------------------------------------------
/docs/license.md:
--------------------------------------------------------------------------------
1 | # License
2 |
3 | ```{literalinclude} ../LICENSE
4 | ---
5 | language: none
6 | ---
7 | ```
8 |
--------------------------------------------------------------------------------
/docs/reference.md:
--------------------------------------------------------------------------------
1 | # Reference
2 |
3 | ## pynescript
4 |
5 | ```{toctree}
6 | apidoc/modules
7 | ```
8 |
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | furo==2024.1.29
2 | myst-parser==2.0.0
3 | sphinx==7.2.6
4 | sphinx-click==5.1.0
--------------------------------------------------------------------------------
/docs/usage.md:
--------------------------------------------------------------------------------
1 | # Usage
2 |
3 | ## Library
4 |
5 | Simple parse, dump, unparse
6 |
7 | ```{literalinclude} ../examples/parse_dump_unparse.py
8 | ---
9 | language: python
10 | ---
11 | ```
12 |
13 | Traversing parsed AST nodes
14 |
15 | ```{literalinclude} ../examples/execute_script.py
16 | ---
17 | language: python
18 | ---
19 | ```
20 |
21 | ## Cli
22 |
23 | ```{eval-rst}
24 | .. click:: pynescript.__main__:cli
25 | :prog: pynescript
26 | :nested: full
27 | ```
28 |
--------------------------------------------------------------------------------
/examples/.gitignore:
--------------------------------------------------------------------------------
1 | *.csv
--------------------------------------------------------------------------------
/examples/execute_script.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import itertools
4 |
5 | from collections.abc import Sequence
6 | from dataclasses import dataclass
7 | from enum import Enum
8 | from typing import ClassVar
9 | from typing import Generic
10 | from typing import TypeAlias
11 | from typing import TypeVar
12 |
13 | from historical_data import hist
14 | from pandas import DataFrame
15 |
16 | from pynescript import ast
17 | from pynescript.ast import NodeVisitor
18 | from pynescript.ast import parse
19 |
20 |
21 | T = TypeVar("T")
22 |
23 |
24 | class series(Generic[T]):
25 | data: list[T] | T
26 |
27 | def __init__(self, data: Sequence[T] | None = None):
28 | self.data = list(data) if data is not None else []
29 |
30 | def __getitem__(self, item):
31 | if isinstance(item, int):
32 | item = -1 - item
33 | return self.data[item]
34 | if isinstance(item, slice):
35 | start = -1 - item.start if item.start is not None else None
36 | stop = -1 - item.stop if item.stop is not None else None
37 | step = -item.step if item.step is not None else -1
38 | return self.data[start:stop:step]
39 | raise ValueError()
40 |
41 | def set(self, item):
42 | if isinstance(item, series):
43 | item = item[0]
44 | self.data[-1] = item
45 |
46 | def add(self, item):
47 | self.data.append(item)
48 |
49 | def extend(self, items):
50 | self.data.extend(items)
51 |
52 |
53 | class simple(series[T]):
54 | def __init__(self, value: T | None = None):
55 | self.data = value
56 |
57 | def __getitem__(self, item):
58 | if isinstance(item, int):
59 | return self.data
60 | if isinstance(item, slice):
61 | return itertools.islice(itertools.repeat(self.data), item.start, item.stop, item.step)
62 | raise ValueError()
63 |
64 | def set(self, item):
65 | if isinstance(item, series):
66 | item = item[0]
67 | self.data = item
68 |
69 | def add(self, item):
70 | self.data = item
71 |
72 | def extend(self, items):
73 | self.data = items[0]
74 |
75 |
76 | class const(simple[T]):
77 | pass
78 |
79 |
80 | class source(series[T]):
81 | pass
82 |
83 |
84 | class plot_display(Enum):
85 | all = 1
86 | data_window = 2
87 | none = 3
88 | pane = 4
89 | price_scale = 5
90 | status_line = 6
91 |
92 |
93 | class input(simple[T]):
94 | def __init__(
95 | self,
96 | defval: const[T] | source[T],
97 | title: const[str] | None = None,
98 | tooltip: const[str] | None = None,
99 | inline: const[str] | None = None,
100 | group: const[str] | None = None,
101 | display: const[plot_display] | None = None,
102 | ):
103 | self.defval = defval
104 | self.title = title
105 | self.tooltip = tooltip
106 | self.inline = inline
107 | self.group = group
108 | self.display = display
109 |
110 | self.set(self.defval)
111 |
112 |
113 | display = plot_display
114 |
115 |
116 | class ta:
117 | @classmethod
118 | def rsi(cls, source: series[int] | series[float], length: simple[int]) -> series[float]:
119 | import pandas as pd
120 | import ta
121 |
122 | source = source[: length[0]][::-1]
123 | source = pd.Series(source)
124 |
125 | result = ta.momentum.rsi(source, length[0]).iloc[-1]
126 | return [result]
127 |
128 | @classmethod
129 | def crossover(cls, source1: series[int] | series[float], source2: series[int] | series[float]) -> series[bool]:
130 | return [source1[0] > source2[0] and source1[1] <= source2[1]]
131 |
132 | @classmethod
133 | def crossunder(cls, source1: series[int] | series[float], source2: series[int] | series[float]) -> series[bool]:
134 | return [source1[0] < source2[0] and source1[1] >= source2[1]]
135 |
136 |
137 | class strategy_direction(Enum):
138 | long = 1
139 | short = 2
140 |
141 |
142 | void: TypeAlias = None
143 |
144 |
145 | class scale_type(Enum):
146 | right = 1
147 | left = 2
148 | none = 3
149 |
150 |
151 | scale = scale_type
152 |
153 |
154 | @dataclass
155 | class strategy:
156 | title: const[str]
157 | shorttitle: const[str] | None = None
158 | overlay: const[bool] | None = None
159 | format: const[str] | None = None
160 | precision: const[int] | None = None
161 | scale: const[scale_type] | None = None
162 | pyramiding: const[int] | None = None
163 | calc_on_order_fills: const[bool] | None = None
164 | cacl_on_every_tick: const[bool] | None = None
165 | max_bars_back: const[int] | None = None
166 | backtest_fill_limits_assumption: const[int] | None = None
167 | default_qty_type: const[str] | None = None
168 | default_qty_value: const[int] | const[float] | None = None
169 | initial_capital: const[int] | const[float] | None = None
170 | currency: const[str] | None = None
171 | slippage: const[int] | None = None
172 | commission_type: const[str] | None = None
173 | commition_value: const[int] | const[float] | None = None
174 | process_orders_on_close: const[bool] | None = None
175 | close_entries_rule: const[str] | None = None
176 | margin_long: const[int] | const[float] | None = None
177 | margin_short: const[int] | const[float] | None = None
178 | explicit_plot_zorder: const[bool] | None = None
179 | max_lines_count: const[int] | None = None
180 | max_labels_count: const[int] | None = None
181 | max_boxes_count: const[int] | None = None
182 | risk_free_rate: const[int] | const[float] | None = None
183 | use_bar_magnifier: const[bool] | None = None
184 | fill_orders_on_standard_ohlc: const[bool] | None = None
185 | max_polylines_count: const[int] | None = None
186 |
187 | long: ClassVar = strategy_direction.long
188 | short: ClassVar = strategy_direction.short
189 |
190 | fixed: ClassVar = "fixed"
191 | cash: ClassVar = "cash"
192 | percent_of_equity: ClassVar = "percent_of_equity"
193 |
194 | @dataclass
195 | class entry:
196 | id: series[str]
197 | direction: series[strategy_direction]
198 | qty: series[int] | series[float] | None = None
199 | limit: series[int] | series[float] | None = None
200 | stop: series[int] | series[float] | None = None
201 | oca_name: series[str] | None = None
202 | oca_type: input[str] | None = None
203 | comment: series[str] | None = None
204 | alert_message: series[str] | None = None
205 | disable_alert: series[bool] | None = None
206 |
207 |
208 | class na_type:
209 | def __call__(self, x: series[T]) -> series[bool]:
210 | return [x[0] is None]
211 |
212 | def __eq__(self, other):
213 | if isinstance(other, series):
214 | other = other[0]
215 | return other is None or isinstance(other, na_type)
216 |
217 |
218 | na = na_type()
219 |
220 |
221 | class ExampleScriptExecutor:
222 | class Visitor(NodeVisitor):
223 | def __init__(self, executor: ExampleScriptExecutor):
224 | self.executor = executor
225 |
226 | def visit_Name(self, node: ast.Name):
227 | if isinstance(node.ctx, ast.Load):
228 | if self.executor.scopes:
229 | for scope in self.executor.scopes:
230 | if node.id in scope:
231 | node_store = scope[node.id]
232 | return self.executor.nodes[node_store]
233 | if node.id in self.executor.builtins:
234 | return self.executor.builtins[node.id]
235 | if node.id in self.executor.sources:
236 | return self.executor.sources[node.id]
237 | return node
238 |
239 | def visit_Attribute(self, node: ast.Attribute):
240 | if isinstance(node.ctx, ast.Load):
241 | value = self.visit(node.value)
242 | return getattr(value, node.attr)
243 | return node
244 |
245 | def visit_Constant(self, node: ast.Constant):
246 | return const(node.value)
247 |
248 | def visit_Call(self, node: ast.Call):
249 | func = self.visit(node.func)
250 |
251 | args = []
252 | kwargs = {}
253 |
254 | found_has_name = False
255 | for arg in node.args:
256 | if arg.name:
257 | found_has_name = True
258 | kwargs[arg.name] = self.visit(arg.value)
259 | elif found_has_name:
260 | raise ValueError()
261 | else:
262 | args.append(self.visit(arg.value))
263 |
264 | result = func(*args, **kwargs)
265 |
266 | if isinstance(result, strategy) and self.executor.declaration is None:
267 | if result.default_qty_type is None:
268 | result.default_qty_type = strategy.fixed
269 | if result.default_qty_value is None:
270 | result.default_qty_value = 1
271 | if result.initial_capital is None:
272 | result.initial_capital = 1000000
273 |
274 | self.executor.declaration = result
275 | self.executor.cash = result.initial_capital
276 | print(f"initial cash: {self.executor.cash}")
277 |
278 | if isinstance(result, strategy.entry):
279 | price = (
280 | self.executor.sources["close"][0]
281 | if result.limit is None and result.stop is None
282 | else result.limit[0] or result.stop[0]
283 | )
284 |
285 | if self.executor.position_size != 0:
286 | if result.direction == strategy.long and self.executor.position_size < 0:
287 | self.executor.cash += 2 * self.executor.position_amount + self.executor.position_size * price
288 | print(
289 | f"{self.executor.current_date}: action=exit direction=short price={price} quantity={-self.executor.position_size} cash={self.executor.cash}"
290 | )
291 | self.executor.position_size = 0
292 | self.executor.position_amount = 0
293 | elif result.direction == strategy.short and self.executor.position_size > 0:
294 | self.executor.cash += self.executor.position_size * price
295 | print(
296 | f"{self.executor.current_date}: action=exit direction=long price={price} quantity={self.executor.position_size} cash={self.executor.cash}"
297 | )
298 | self.executor.position_size = 0
299 | self.executor.position_amount = 0
300 |
301 | if result.qty is not None:
302 | quantity = result.qty[0]
303 | else:
304 | if self.executor.declaration.default_qty_type == strategy.fixed:
305 | quantity = self.executor.declaration.default_qty_value
306 | elif self.executor.declaration.default_qty_type == strategy.cash:
307 | cash = self.executor.declaration.default_qty_value
308 | quantity = cash // price
309 | elif self.executor.declaration.default_qty_type == strategy.percent_of_equity:
310 | percent = self.executor.declaration.default_qty_value / 100
311 | cash = self.executor.cash * percent
312 | quantity = cash // price
313 | else:
314 | raise ValueError()
315 |
316 | cash_amount = price * quantity
317 |
318 | if self.executor.cash > cash_amount:
319 | if result.direction == strategy.long and not self.executor.position_size > 0:
320 | self.executor.cash -= cash_amount
321 | print(
322 | f"{self.executor.current_date}: action=enter direction=long price={price} quantity={quantity} cash={self.executor.cash}"
323 | )
324 | self.executor.position_size = +quantity
325 | self.executor.position_amount = cash_amount
326 | elif result.direction == strategy.short and not self.executor.position_size < 0:
327 | self.executor.cash -= cash_amount
328 | print(
329 | f"{self.executor.current_date}: action=enter direction=short price={price} quantity={quantity} cash={self.executor.cash}"
330 | )
331 | self.executor.position_size = -quantity
332 | self.executor.position_amount = cash_amount
333 |
334 | return result
335 |
336 | def visit_Assign(self, node: ast.Assign):
337 | if node.target not in self.executor.nodes:
338 | self.executor.nodes[node.target] = series([None])
339 |
340 | value = self.visit(node.value)
341 |
342 | if (
343 | isinstance(value, input)
344 | and isinstance(node.target, ast.Name)
345 | and node.target.id in self.executor.inputs
346 | ):
347 | value.set(self.executor.inputs[node.target.id])
348 |
349 | self.executor.nodes[node.target].set(value[0])
350 |
351 | if isinstance(node.target, ast.Name):
352 | self.executor.scopes[-1][node.target.id] = node.target
353 |
354 | def visit_Expr(self, node: ast.Expr):
355 | return self.visit(node.value)
356 |
357 | def visit_UnaryOp(self, node: ast.UnaryOp):
358 | if isinstance(node.op, ast.Not):
359 | return [not self.visit(node.operand)[0]]
360 | if isinstance(node.op, ast.UAdd):
361 | return [+self.visit(node.operand)[0]]
362 | if isinstance(node.op, ast.USub):
363 | return [-self.visit(node.operand)[0]]
364 | raise ValueError()
365 |
366 | def visit_If(self, node: ast.If):
367 | if self.visit(node.test)[0]:
368 | self.executor.scopes.append({})
369 | for stmt in node.body:
370 | self.visit(stmt)
371 | self.executor.scopes.pop()
372 | elif node.orelse:
373 | self.executor.scopes.append({})
374 | for stmt in node.orelse:
375 | self.visit(stmt)
376 | self.executor.scopes.pop()
377 |
378 | def visit_Script(self, node: ast.Script):
379 | self.executor.scopes.append({})
380 | for stmt in node.body:
381 | self.visit(stmt)
382 | self.executor.scopes.pop()
383 |
384 | def __init__(self, script_source: str):
385 | self.tree = parse(script_source)
386 | self.visitor = self.Visitor(self)
387 | self.sources = {
388 | "close": source(),
389 | }
390 | self.builtins = {
391 | "strategy": strategy,
392 | "input": input,
393 | "ta": ta,
394 | "na": na,
395 | }
396 | self.inputs = {}
397 | self.declaration = None
398 | self.nodes = {}
399 | self.scopes = []
400 | self.cash = 0
401 | self.position_size = 0
402 | self.position_amount = 0
403 | self.current_date = None
404 |
405 | def execute(self, data: DataFrame, inputs: dict | None = None):
406 | if inputs:
407 | self.input = dict(inputs)
408 | for row in data.itertuples():
409 | self.current_date = row.Index
410 | self.sources["close"].add(row.Close)
411 | for node, values in self.nodes.items():
412 | values.add(None)
413 | self.visitor.visit(self.tree)
414 | net_profit_percent = round((self.cash / self.declaration.initial_capital - 1) * 100, 2)
415 | print(f"final cash: {self.cash} ({'+' if net_profit_percent > 0 else ''}{net_profit_percent}%)")
416 |
417 |
418 | script_source = """
419 | //@version=5
420 | strategy("RSI Strategy", overlay=true)
421 | length = input( 14 )
422 | overSold = input( 30 )
423 | overBought = input( 70 )
424 | price = close
425 | vrsi = ta.rsi(price, length)
426 | co = ta.crossover(vrsi, overSold)
427 | cu = ta.crossunder(vrsi, overBought)
428 | if (not na(vrsi))
429 | if (co)
430 | strategy.entry("RsiLE", strategy.long, comment="RsiLE")
431 | if (cu)
432 | strategy.entry("RsiSE", strategy.short, comment="RsiSE")
433 | //plot(strategy.equity, title="equity", color=color.red, linewidth=2, style=plot.style_areabr)
434 | """
435 |
436 | executor = ExampleScriptExecutor(script_source)
437 | executor.execute(hist)
438 |
--------------------------------------------------------------------------------
/examples/historical_data.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from pathlib import Path
4 |
5 | import pandas as pd
6 | import yfinance as yf
7 |
8 |
9 | ticker = "TSLA"
10 | filename = "tsla.csv"
11 |
12 |
13 | def download_data():
14 | tsla = yf.Ticker(ticker)
15 | hist = tsla.history(period="max", interval="1d")
16 | hist.to_csv(filename)
17 |
18 |
19 | def read_data():
20 | hist = None
21 | if Path(filename).exists():
22 | hist = pd.read_csv(filename, index_col=0, parse_dates=True)
23 | return hist
24 |
25 |
26 | hist = read_data()
27 |
28 |
29 | if __name__ == "__main__":
30 | download_data()
31 |
--------------------------------------------------------------------------------
/examples/parse_dump_unparse.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from pynescript.ast import dump
4 | from pynescript.ast import parse
5 | from pynescript.ast import unparse
6 |
7 |
8 | script_source = """
9 | //@version=5
10 | strategy("RSI Strategy", overlay=true)
11 | length = input( 14 )
12 | overSold = input( 30 )
13 | overBought = input( 70 )
14 | price = close
15 | vrsi = ta.rsi(price, length)
16 | co = ta.crossover(vrsi, overSold)
17 | cu = ta.crossunder(vrsi, overBought)
18 | if (not na(vrsi))
19 | if (co)
20 | strategy.entry("RsiLE", strategy.long, comment="RsiLE")
21 | if (cu)
22 | strategy.entry("RsiSE", strategy.short, comment="RsiSE")
23 | //plot(strategy.equity, title="equity", color=color.red, linewidth=2, style=plot.style_areabr)
24 | """
25 |
26 | tree = parse(script_source)
27 | tree_dump = dump(tree, indent=2)
28 | tree_unparsed = unparse(tree)
29 |
30 | print("DUMP:")
31 | print(tree_dump)
32 | print()
33 |
34 | print("UNPARSED:")
35 | print(tree_unparsed)
36 | print()
37 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["hatchling"]
3 | build-backend = "hatchling.build"
4 |
5 | [project]
6 | name = "pynescript"
7 | dynamic = ["version"]
8 | description = "Handle Pinescript using Python"
9 | readme = "README.md"
10 | requires-python = ">=3.10"
11 | license = "LGPL-3.0-or-later"
12 | keywords = ["python", "tradingview", "pinescript"]
13 | authors = [
14 | { name = "Yunseong Hwang", email = "kika1492@gmail.com" },
15 | ]
16 | maintainers = [
17 | { name = "Yunseong Hwang", email = "kika1492@gmail.com" },
18 | ]
19 | classifiers = [
20 | "Development Status :: 2 - Pre-Alpha",
21 | "License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)",
22 | "Programming Language :: Python",
23 | "Programming Language :: Python :: 3.10",
24 | "Programming Language :: Python :: 3.11",
25 | "Programming Language :: Python :: 3.12",
26 | ]
27 | dependencies = [
28 | "antlr4-python3-runtime>=4.13.1",
29 | "click>=8.1.7",
30 | ]
31 |
32 | [project.urls]
33 | Documentation = "https://github.com/elbakramer/pynescript#readme"
34 | Issues = "https://github.com/elbakramer/pynescript/issues"
35 | Source = "https://github.com/elbakramer/pynescript"
36 |
37 | [project.scripts]
38 | pynescript = "pynescript.__main__:cli"
39 |
40 | [project.entry-points."pygments.lexers"]
41 | pinescript = "pynescript.ext.pygments.lexers:PinescriptLexer"
42 |
43 | [tool.hatch.version]
44 | path = "src/pynescript/__about__.py"
45 |
46 | [tool.hatch.envs.test]
47 | dependencies = [
48 | "coverage[toml]>=7.4.2",
49 | "pytest>=8.0.1",
50 | "pytest-cov>=4.1.0",
51 | "pytest-xdist>=3.5.0",
52 | ]
53 | [tool.hatch.envs.test.scripts]
54 | test = [
55 | "pytest -n auto -d {args:tests}",
56 | ]
57 | test-cov = [
58 | "pytest -n auto -d --cov=src/pynescript --cov=tests --cov-report=term --cov-report=lcov {args:tests}",
59 | ]
60 | coverage = [
61 | "coverage run -m pytest {args:tests}",
62 | "coverage combine",
63 | "coverage lcov",
64 | "coverage report",
65 | ]
66 |
67 | [[tool.hatch.envs.test.matrix]]
68 | python = ["3.10", "3.11", "3.12"]
69 |
70 | [tool.hatch.envs.lint]
71 | detached = true
72 | dependencies = [
73 | "black>=24.2.0",
74 | "mypy>=1.8.0",
75 | "ruff>=0.2.2",
76 | ]
77 | [tool.hatch.envs.lint.scripts]
78 | typing = [
79 | "mypy --install-types --non-interactive {args:src/pynescript tests}",
80 | ]
81 | style = [
82 | "ruff {args:.}",
83 | "black --check --diff {args:.}",
84 | ]
85 | format = [
86 | "black {args:.}",
87 | "ruff --fix {args:.}",
88 | "style",
89 | ]
90 | all = [
91 | "style",
92 | "typing",
93 | ]
94 |
95 | [tool.hatch.envs.docs]
96 | detached = true
97 | dependencies = [
98 | "furo>=2024.1.29",
99 | "myst-parser>=2.0.0",
100 | "pygments>=2.17.2",
101 | "sphinx>=7.2.6",
102 | "sphinx-click>=5.1.0",
103 | "sphinx-autobuild>=2024.2.4",
104 | ]
105 |
106 | [tool.hatch.envs.docs.scripts]
107 | build = [
108 | "sphinx-build docs docs/_build",
109 | ]
110 |
111 | [tool.black]
112 | target-version = ["py310"]
113 | line-length = 120
114 | skip-string-normalization = true
115 |
116 | [tool.ruff]
117 | target-version = "py310"
118 | line-length = 120
119 | extend-exclude = ["generated"]
120 |
121 | [tool.ruff.lint]
122 | select = [
123 | "A",
124 | "ARG",
125 | "B",
126 | "C",
127 | "DTZ",
128 | "E",
129 | "EM",
130 | "F",
131 | "FBT",
132 | "I",
133 | "ICN",
134 | "ISC",
135 | "N",
136 | "PLC",
137 | "PLE",
138 | "PLR",
139 | "PLW",
140 | "Q",
141 | "RUF",
142 | "S",
143 | "T",
144 | "TID",
145 | "UP",
146 | "W",
147 | "YTT",
148 | ]
149 | ignore = [
150 | "ISC001",
151 | ]
152 |
153 | [tool.ruff.lint.isort]
154 | known-first-party = ["pynescript"]
155 | force-single-line = true
156 | lines-between-types = 1
157 | lines-after-imports = 2
158 | required-imports = ["from __future__ import annotations"]
159 |
160 | [tool.ruff.lint.per-file-ignores]
161 | "tests/**/*" = ["PLR2004", "S101", "TID252"]
162 |
163 | [tool.coverage.run]
164 | source_pkgs = ["pynescript", "tests"]
165 | branch = true
166 | parallel = true
167 | omit = [
168 | "src/pynescript/__about__.py",
169 | ]
170 |
171 | [tool.coverage.paths]
172 | pynescript = ["src/pynescript", "*/pynescript/src/pynescript"]
173 | tests = ["tests", "*/pynescript/tests"]
174 |
175 | [tool.coverage.report]
176 | exclude_lines = [
177 | "no cov",
178 | "if __name__ == .__main__.:",
179 | "if TYPE_CHECKING:",
180 | ]
181 |
--------------------------------------------------------------------------------
/src/pynescript/__about__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 |
20 | __version__ = "0.2.0"
21 |
--------------------------------------------------------------------------------
/src/pynescript/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
--------------------------------------------------------------------------------
/src/pynescript/__main__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | import click
20 |
21 |
22 | @click.group()
23 | @click.version_option()
24 | def cli():
25 | pass
26 |
27 |
28 | @cli.command(short_help="Parse pinescript file to AST tree.")
29 | @click.argument(
30 | "filename",
31 | metavar="PATH",
32 | type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True),
33 | )
34 | @click.option(
35 | "--encoding",
36 | default="utf-8",
37 | help="Text encoding of the file.",
38 | )
39 | @click.option(
40 | "--indent",
41 | type=int,
42 | default=2,
43 | help="Indentation with of an AST dump.",
44 | )
45 | @click.option(
46 | "--output-file",
47 | metavar="PATH",
48 | type=click.Path(writable=True, allow_dash=True),
49 | help="Path to output dump file, defaults to standard output.",
50 | default="-",
51 | )
52 | def parse_and_dump(filename, encoding, indent, output_file):
53 | from pynescript.ast import dump
54 | from pynescript.ast import parse
55 |
56 | with click.open_file(filename, "r", encoding=encoding) as f:
57 | script_node = parse(f.read(), filename)
58 |
59 | script_node_dump = dump(script_node, indent=indent)
60 |
61 | with click.open_file(output_file, "w", encoding=encoding) as f:
62 | f.write(script_node_dump)
63 |
64 |
65 | @cli.command(short_help="Parse pinescript file and unparse back to pinescript.")
66 | @click.argument(
67 | "filename",
68 | metavar="PATH",
69 | type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True),
70 | )
71 | @click.option(
72 | "--encoding",
73 | default="utf-8",
74 | help="Text encoding of the file.",
75 | )
76 | @click.option(
77 | "--output-file",
78 | metavar="PATH",
79 | type=click.Path(writable=True, allow_dash=True),
80 | help="Path to output dump file, defaults to standard output.",
81 | default="-",
82 | )
83 | def parse_and_unparse(filename, encoding, output_file):
84 | from pynescript.ast import parse
85 | from pynescript.ast import unparse
86 |
87 | with click.open_file(filename, "r", encoding=encoding) as f:
88 | script_node = parse(f.read(), filename)
89 |
90 | unparsed_script = unparse(script_node)
91 |
92 | with click.open_file(output_file, "w", encoding=encoding) as f:
93 | f.write(unparsed_script)
94 |
95 |
96 | @cli.command(short_help="Download builtin scripts.")
97 | @click.option(
98 | "--script-dir",
99 | type=click.Path(exists=False, file_okay=False, writable=True),
100 | help="Diretory where scripts to be saved (like tests/data/builtin_scripts).",
101 | required=True,
102 | )
103 | def download_builtin_scripts(script_dir):
104 | from pynescript.util.pine_facade import download_builtin_scripts as download
105 |
106 | download(script_dir)
107 |
108 |
109 | if __name__ == "__main__":
110 | cli(prog_name="pynescript") # pragma: no cover
111 |
--------------------------------------------------------------------------------
/src/pynescript/ast/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | # ruff: noqa: F403
20 | from .error import *
21 | from .helper import *
22 | from .node import *
23 | from .transformer import *
24 | from .visitor import *
25 |
--------------------------------------------------------------------------------
/src/pynescript/ast/__main__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | from pynescript.ast.helper import dump
20 | from pynescript.ast.helper import parse
21 |
22 |
23 | def main():
24 | import argparse
25 |
26 | parser = argparse.ArgumentParser(prog="python -m pynescript.ast")
27 | parser.add_argument(
28 | "infile",
29 | type=argparse.FileType(mode="rb"),
30 | nargs="?",
31 | default="-",
32 | help="the file to parse; defaults to stdin",
33 | )
34 | parser.add_argument(
35 | "-m",
36 | "--mode",
37 | default="exec",
38 | choices=("exec", "eval"),
39 | help="specify what kind of code must be parsed",
40 | )
41 | parser.add_argument(
42 | "--no-type-comments",
43 | default=True,
44 | action="store_false",
45 | help="don't add information about type comments",
46 | )
47 | parser.add_argument(
48 | "-a",
49 | "--include-attributes",
50 | action="store_true",
51 | help="include attributes such as line numbers and column offsets",
52 | )
53 | parser.add_argument("-i", "--indent", type=int, default=2, help="indentation of nodes (number of spaces)")
54 | args = parser.parse_args()
55 |
56 | with args.infile as infile:
57 | source = infile.read()
58 |
59 | tree = parse(source, args.infile.name, args.mode)
60 | print(dump(tree, include_attributes=args.include_attributes, indent=args.indent)) # noqa:T201
61 |
62 |
63 | if __name__ == "__main__":
64 | main()
65 |
--------------------------------------------------------------------------------
/src/pynescript/ast/collector.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | from pynescript.ast import node as ast
20 | from pynescript.ast.visitor import NodeVisitor
21 |
22 |
23 | Structure = (
24 | ast.ForTo,
25 | ast.ForIn,
26 | ast.While,
27 | ast.If,
28 | ast.Switch,
29 | )
30 |
31 |
32 | class StatementCollector(NodeVisitor):
33 | # ruff: noqa: N802
34 |
35 | def visit_Script(self, node):
36 | for stmt in node.body:
37 | yield from self.visit(stmt)
38 |
39 | def visit_FunctionDef(self, node):
40 | yield node
41 | for stmt in node.body:
42 | yield from self.visit(stmt)
43 |
44 | def visit_TypeDef(self, node):
45 | yield node
46 | for stmt in node.body:
47 | yield from self.visit(stmt)
48 |
49 | def visit_Assign(self, node):
50 | yield node
51 | if isinstance(node.value, Structure):
52 | yield from self.visit(node.value)
53 |
54 | def visit_ReAssign(self, node):
55 | yield node
56 | if isinstance(node.value, Structure):
57 | yield from self.visit(node.value)
58 |
59 | def visit_AugAssign(self, node):
60 | yield node
61 | if isinstance(node.value, Structure):
62 | yield from self.visit(node.value)
63 |
64 | def visit_Import(self, node):
65 | yield node
66 |
67 | def visit_Expr(self, node):
68 | yield node
69 | if isinstance(node.value, Structure):
70 | yield from self.visit(node.value)
71 |
72 | def visit_Break(self, node):
73 | yield node
74 |
75 | def visit_Continue(self, node):
76 | yield node
77 |
78 | def visit_ForTo(self, node):
79 | for stmt in node.body:
80 | yield from self.visit(stmt)
81 |
82 | def visit_ForIn(self, node):
83 | for stmt in node.body:
84 | yield from self.visit(stmt)
85 |
86 | def visit_While(self, node):
87 | for stmt in node.body:
88 | yield from self.visit(stmt)
89 |
90 | def visit_If(self, node):
91 | for stmt in node.body:
92 | yield from self.visit(stmt)
93 | for stmt in node.orelse:
94 | yield from self.visit(stmt)
95 |
96 | def visit_Switch(self, node):
97 | for case in node.cases:
98 | yield from self.visit(case)
99 |
100 | def visit_Case(self, node):
101 | for stmt in node.body:
102 | yield from self.visit(stmt)
103 |
--------------------------------------------------------------------------------
/src/pynescript/ast/error.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | from io import StringIO
20 | from typing import NamedTuple
21 |
22 |
23 | class SyntaxErrorDetails(NamedTuple):
24 | filename: str
25 | lineno: int
26 | offset: int
27 | text: str
28 | end_lineno: int | None = None
29 | end_offset: int | None = None
30 |
31 |
32 | class SyntaxError(Exception): # noqa: A001
33 | def __init__(self, message: str, *details):
34 | self.message = message
35 | if details:
36 | if len(details) == 1 and isinstance(details[0], SyntaxErrorDetails):
37 | self.details = details[0]
38 | else:
39 | self.details = SyntaxErrorDetails(*details)
40 |
41 | def __str__(self):
42 | f = StringIO()
43 | code = self.details.text.lstrip()
44 | offset = self.details.offset + len(code) - len(self.details.text)
45 | f.write(self.message)
46 | f.write("\n")
47 | f.write(f' File "{self.details.filename}", line {self.details.lineno}\n')
48 | f.write(f" {code}")
49 | f.write(" ")
50 | f.write(" " * offset)
51 | f.write("^")
52 | return f.getvalue()
53 |
54 |
55 | class IndentationError(SyntaxError): # noqa: A001
56 | pass
57 |
58 |
59 | __all__ = [
60 | "SyntaxErrorDetails",
61 | "SyntaxError",
62 | "IndentationError",
63 | ]
64 |
--------------------------------------------------------------------------------
/src/pynescript/ast/evaluator.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | import itertools
20 | import operator
21 |
22 | from pynescript.ast import node as ast
23 | from pynescript.ast.visitor import NodeVisitor
24 |
25 |
26 | class NodeLiteralEvaluator(NodeVisitor):
27 | # ruff: noqa: N802
28 |
29 | def visit_BoolOp(self, node: ast.BoolOp):
30 | if isinstance(node.op, ast.And):
31 | return all(self.visit(value) for value in node.values)
32 | if isinstance(node.op, ast.Or):
33 | return any(self.visit(value) for value in node.values)
34 | msg = f"unexpected node operator: {node.op}"
35 | raise ValueError(msg)
36 |
37 | def visit_BinOp(self, node: ast.BinOp):
38 | if isinstance(node.op, ast.Add):
39 | return operator.add(self.visit(node.left), self.visit(node.right))
40 | if isinstance(node.op, ast.Sub):
41 | return operator.sub(self.visit(node.left), self.visit(node.right))
42 | if isinstance(node.op, ast.Mult):
43 | return operator.mul(self.visit(node.left), self.visit(node.right))
44 | if isinstance(node.op, ast.Div):
45 | return operator.truediv(self.visit(node.left), self.visit(node.right))
46 | if isinstance(node.op, ast.Mod):
47 | return operator.mod(self.visit(node.left), self.visit(node.right))
48 | msg = f"unexpected node operator: {node.op}"
49 | raise ValueError(msg)
50 |
51 | def visit_UnaryOp(self, node: ast.UnaryOp):
52 | if isinstance(node.op, ast.Not):
53 | return operator.not_(self.visit(node.operand))
54 | if isinstance(node.op, ast.UAdd):
55 | return operator.pos(self.visit(node.operand))
56 | if isinstance(node.op, ast.USub):
57 | return operator.neg(self.visit(node.operand))
58 | msg = f"unexpected node operator: {node.op}"
59 | raise ValueError(msg)
60 |
61 | def visit_Conditional(self, node: ast.Conditional):
62 | return self.visit(node.body) if self.visit(node.test) else self.visit(node.orelse)
63 |
64 | def visit_Compare(self, node: ast.Compare): # noqa: C901, PLR0911, PLR0912
65 | left = self.visit(node.left)
66 | comparators = map(self.visit, itertools.chain([node.left], node.comparators))
67 | comparator_pairs = itertools.pairwise(comparators)
68 | compare_ops = map(self.visit, node.ops)
69 | for op, (left, right) in zip(compare_ops, comparator_pairs, strict=True):
70 | if isinstance(op, ast.Eq):
71 | if not operator.eq(left, right):
72 | return False
73 | elif isinstance(op, ast.NotEq):
74 | if not operator.ne(left, right):
75 | return False
76 | elif isinstance(op, ast.Lt):
77 | if not operator.lt(left, right):
78 | return False
79 | elif isinstance(op, ast.LtE):
80 | if not operator.le(left, right):
81 | return False
82 | elif isinstance(op, ast.Gt):
83 | if not operator.gt(left, right):
84 | return False
85 | elif isinstance(op, ast.GtE):
86 | if not operator.ge(left, right):
87 | return False
88 | else:
89 | msg = f"unexpected node operator: {op}"
90 | raise ValueError(msg)
91 | return True
92 |
93 | def visit_Constant(self, node: ast.Constant):
94 | if node.kind:
95 | msg = f"unexpected constant kind: {node.kind!s}"
96 | raise ValueError(msg)
97 | return node.value
98 |
99 | def visit_Tuple(self, node: ast.Tuple):
100 | return tuple(self.visit(elt) for elt in node.elts)
101 |
102 | def generic_visit(self, node: ast.AST):
103 | msg = f"unexpected type of node: {type(node)}"
104 | raise ValueError(msg)
105 |
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/antlr4/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/antlr4/error_listener.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | import re
20 |
21 | from antlr4 import FileStream
22 | from antlr4 import InputStream
23 | from antlr4 import Lexer
24 | from antlr4 import Parser
25 | from antlr4 import Token
26 | from antlr4 import TokenStream
27 | from antlr4.error.ErrorListener import ErrorListener
28 | from antlr4.Recognizer import Recognizer
29 |
30 | from pynescript.ast.error import SyntaxError
31 | from pynescript.ast.error import SyntaxErrorDetails
32 |
33 |
34 | class PinescriptErrorListener(ErrorListener):
35 | # ruff: noqa: N802
36 |
37 | INSTANCE: PinescriptErrorListener | None = None
38 |
39 | def _getFilenameFrom(self, recognizer: Recognizer) -> str:
40 | if isinstance(recognizer, Parser):
41 | input_stream = recognizer._input
42 | if isinstance(input_stream, TokenStream):
43 | lexer = input_stream.tokenSource
44 | recognizer = lexer
45 | else:
46 | msg = f"unexpected type of input: {type(input_stream)}"
47 | raise TypeError(msg)
48 | if isinstance(recognizer, Lexer):
49 | input_stream = recognizer._input
50 | if isinstance(input_stream, FileStream):
51 | return input_stream.fileName
52 | elif isinstance(input_stream, InputStream):
53 | if hasattr(input_stream, "getSourceName"):
54 | return input_stream.getSourceName()
55 | elif hasattr(input_stream, "sourceName"):
56 | return input_stream.sourceName
57 | elif input_stream.name:
58 | return input_stream.name
59 | else:
60 | return ""
61 | else:
62 | msg = f"unexpected type of input: {type(input_stream)}"
63 | raise TypeError(msg)
64 | else:
65 | msg = f"unexpected type of recognizer: {type(recognizer)}"
66 | raise TypeError(msg)
67 |
68 | _LINE_PATTERN = re.compile(r"(.*?(?:\r\n|\n|\r|$))")
69 |
70 | def _splitLines(self, source: str, maxlines: int | None = None) -> list[str]:
71 | lines = []
72 | for lineno, match in enumerate(self._LINE_PATTERN.finditer(source), 1):
73 | if maxlines is not None and lineno > maxlines:
74 | break
75 | lines.append(match[0])
76 | return lines
77 |
78 | def _getInputTextFrom(self, recognizer: Recognizer, lineno: int | None = None) -> str:
79 | if isinstance(recognizer, Parser):
80 | input_stream = recognizer._input
81 | if isinstance(input_stream, TokenStream):
82 | lexer = input_stream.tokenSource
83 | recognizer = lexer
84 | else:
85 | msg = f"unexpected type of input: {type(input_stream)}"
86 | raise TypeError(msg)
87 | if isinstance(recognizer, Lexer):
88 | input_stream = recognizer._input
89 | if isinstance(input_stream, InputStream):
90 | source = str(input_stream)
91 | if lineno is not None and lineno > 0:
92 | lines = self._splitLines(source, maxlines=lineno)
93 | source = lines[lineno - 1]
94 | return source
95 | else:
96 | msg = f"unexpected type of input: {type(input_stream)}"
97 | raise TypeError(msg)
98 | else:
99 | msg = f"unexpected type of recognizer: {type(recognizer)}"
100 | raise TypeError(msg)
101 |
102 | def syntaxError( # noqa: PLR0913
103 | self,
104 | recognizer: Recognizer,
105 | offendingSymbol: Token, # noqa: N803
106 | line: int,
107 | column: int,
108 | msg: str,
109 | e: Exception | None,
110 | ):
111 | filename = self._getFilenameFrom(recognizer)
112 | lineno = line
113 | offset = column
114 | text = self._getInputTextFrom(recognizer, lineno)
115 | symbol_len = offendingSymbol.stop - offendingSymbol.start + 1
116 | symbol_nls = offendingSymbol.text.count("\n")
117 | symbol_nlpos = offendingSymbol.text.rfind("\n")
118 | end_lineno = offendingSymbol.line + symbol_nls
119 | end_offset = symbol_len - symbol_nlpos + 1 if symbol_nls > 0 else offendingSymbol.column + symbol_len
120 | details = SyntaxErrorDetails(
121 | filename,
122 | lineno,
123 | offset,
124 | text,
125 | end_lineno,
126 | end_offset,
127 | )
128 | error = SyntaxError(msg, details)
129 |
130 | if isinstance(e, SyntaxError):
131 | e.details = error.details
132 | error = e
133 | else:
134 | error.__cause__ = e
135 |
136 | raise error
137 |
138 |
139 | PinescriptErrorListener.INSTANCE = PinescriptErrorListener()
140 |
141 |
142 | __all__ = [
143 | "PinescriptErrorListener",
144 | ]
145 |
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/antlr4/generated/PinescriptLexer.tokens:
--------------------------------------------------------------------------------
1 | INDENT=1
2 | DEDENT=2
3 | AND=3
4 | AS=4
5 | BREAK=5
6 | BY=6
7 | CONST=7
8 | CONTINUE=8
9 | ELSE=9
10 | EXPORT=10
11 | FALSE=11
12 | FOR=12
13 | IF=13
14 | IMPORT=14
15 | IN=15
16 | INPUT=16
17 | METHOD=17
18 | NOT=18
19 | OR=19
20 | SERIES=20
21 | SIMPLE=21
22 | SWITCH=22
23 | TO=23
24 | TYPE=24
25 | TRUE=25
26 | VAR=26
27 | VARIP=27
28 | WHILE=28
29 | LPAR=29
30 | RPAR=30
31 | LSQB=31
32 | RSQB=32
33 | LESS=33
34 | GREATER=34
35 | EQUAL=35
36 | EQEQUAL=36
37 | NOTEQUAL=37
38 | LESSEQUAL=38
39 | GREATEREQUAL=39
40 | RARROW=40
41 | DOT=41
42 | COMMA=42
43 | COLON=43
44 | QUESTION=44
45 | PLUS=45
46 | MINUS=46
47 | STAR=47
48 | SLASH=48
49 | PERCENT=49
50 | PLUSEQUAL=50
51 | MINEQUAL=51
52 | STAREQUAL=52
53 | SLASHEQUAL=53
54 | PERCENTEQUAL=54
55 | COLONEQUAL=55
56 | NAME=56
57 | NUMBER=57
58 | STRING=58
59 | COLOR=59
60 | NEWLINE=60
61 | WS=61
62 | COMMENT=62
63 | ERROR_TOKEN=63
64 | 'and'=3
65 | 'as'=4
66 | 'break'=5
67 | 'by'=6
68 | 'const'=7
69 | 'continue'=8
70 | 'else'=9
71 | 'export'=10
72 | 'false'=11
73 | 'for'=12
74 | 'if'=13
75 | 'import'=14
76 | 'in'=15
77 | 'input'=16
78 | 'method'=17
79 | 'not'=18
80 | 'or'=19
81 | 'series'=20
82 | 'simple'=21
83 | 'switch'=22
84 | 'to'=23
85 | 'type'=24
86 | 'true'=25
87 | 'var'=26
88 | 'varip'=27
89 | 'while'=28
90 | '('=29
91 | ')'=30
92 | '['=31
93 | ']'=32
94 | '<'=33
95 | '>'=34
96 | '='=35
97 | '=='=36
98 | '!='=37
99 | '<='=38
100 | '>='=39
101 | '=>'=40
102 | '.'=41
103 | ','=42
104 | ':'=43
105 | '?'=44
106 | '+'=45
107 | '-'=46
108 | '*'=47
109 | '/'=48
110 | '%'=49
111 | '+='=50
112 | '-='=51
113 | '*='=52
114 | '/='=53
115 | '%='=54
116 | ':='=55
117 |
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/antlr4/generated/PinescriptLexerBase.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | import re
20 | import sys
21 |
22 | from collections import deque
23 | from typing import TextIO
24 |
25 | from antlr4 import InputStream
26 | from antlr4 import Lexer
27 | from antlr4 import Token
28 | from antlr4.Token import CommonToken
29 |
30 | from pynescript.ast.error import IndentationError
31 | from pynescript.ast.error import SyntaxError
32 |
33 |
34 | class PinescriptLexerBase(Lexer):
35 | # ruff: noqa: N802, N803, N806, A002
36 |
37 | """
38 | - ignore possible leading newlines
39 | - ignore excessive trailing newlines except a single newline
40 | - ensure that script ends with a newline if none
41 | - ignore consecutive newlines except the last one
42 | - ignore newlines inside open parentheses, brackets
43 | - ignore newlines after operators
44 | - ignore newlines for line wraping (lines whose indentation width is not a multiple of four)
45 | - track indentation level, push INDENT or DEDENT token respectfully
46 | - handle multiline string literal correctly (ignore )
47 | """
48 |
49 | def __init__(self, input: InputStream, output: TextIO = sys.stdout):
50 | super().__init__(input, output)
51 |
52 | # operators that are followed by terms
53 | self._operators = {
54 | self.AND,
55 | self.COLON,
56 | self.COLONEQUAL,
57 | self.COMMA,
58 | self.EQEQUAL,
59 | self.EQUAL,
60 | self.GREATER,
61 | self.GREATEREQUAL,
62 | self.LESS,
63 | self.LESSEQUAL,
64 | self.MINEQUAL,
65 | self.MINUS,
66 | self.NOTEQUAL,
67 | self.OR,
68 | self.PERCENT,
69 | self.PERCENTEQUAL,
70 | self.PLUS,
71 | self.PLUSEQUAL,
72 | self.QUESTION,
73 | self.SLASH,
74 | self.SLASHEQUAL,
75 | self.STAR,
76 | self.STAREQUAL,
77 | }
78 |
79 | # indent specific parameters
80 | self._tabLength: int = 4
81 | self._indentLength: int = 4
82 |
83 | # track internal tokens
84 | self._currentToken: CommonToken | None = None
85 | self._followingToken: CommonToken | None = None
86 |
87 | # keep pending tokens
88 | self._pendingTokens: list[CommonToken] = []
89 |
90 | # track last pending token types
91 | self._lastPendingTokenType: int = 0
92 | self._lastPendingTokenTypeFromDefaultChannel: int = 0
93 |
94 | # track number of opens
95 | self._numOpens: int = 0
96 |
97 | # track indentations
98 | self._indentLengthStack: deque[int] = deque()
99 |
100 | def _resetInternalStates(self):
101 | self._currentToken: CommonToken | None = None
102 | self._followingToken: CommonToken | None = None
103 | self._pendingTokens: list[CommonToken] = []
104 | self._lastPendingTokenType: int = 0
105 | self._lastPendingTokenTypeFromDefaultChannel: int = 0
106 | self._numOpens: int = 0
107 | self._indentLengthStack = deque()
108 |
109 | def nextToken(self) -> CommonToken:
110 | self._checkNextToken()
111 | return self._popPendingToken()
112 |
113 | def _checkNextToken(self) -> None:
114 | if self._reachedEndOfFile():
115 | return
116 |
117 | self._setNextInternalTokens()
118 | self._handleStartOfInputIfNecessary()
119 |
120 | match self._currentToken.type:
121 | case self.LPAR | self.LSQB:
122 | self._numOpens += 1
123 | self._addPendingToken(self._currentToken)
124 | case self.RPAR | self.RSQB:
125 | self._numOpens -= 1
126 | self._addPendingToken(self._currentToken)
127 | case self.NEWLINE:
128 | self._handle_NEWLINE_token()
129 | case self.STRING:
130 | self._handle_STRING_token()
131 | case self.ERROR_TOKEN:
132 | message = "token recognition error at: '" + self._currentToken.text + "'"
133 | self._reportLexerError(message, self._currentToken, SyntaxError)
134 | self._addPendingToken(self._currentToken)
135 | case Token.EOF:
136 | self._handle_EOF_token()
137 | case _:
138 | self._addPendingToken(self._currentToken)
139 |
140 | def _reachedEndOfFile(self) -> bool:
141 | return self._lastPendingTokenType == Token.EOF
142 |
143 | def _setNextInternalTokens(self) -> None:
144 | self._currentToken = super().nextToken() if self._followingToken is None else self._followingToken
145 | self._followingToken = self._currentToken if self._currentToken.type == Token.EOF else super().nextToken()
146 |
147 | def _handleStartOfInputIfNecessary(self):
148 | if len(self._indentLengthStack) > 0:
149 | return
150 | self._indentLengthStack.append(0)
151 | while self._currentToken.type != Token.EOF:
152 | if self._currentToken.channel == Token.DEFAULT_CHANNEL:
153 | if self._currentToken.type == self.NEWLINE:
154 | self._hideAndAddPendingToken(self._currentToken)
155 | else:
156 | self._checkLeadingIndentIfAny()
157 | return
158 | else:
159 | self._addPendingToken(self._currentToken)
160 | self._setNextInternalTokens()
161 |
162 | def _checkLeadingIndentIfAny(self):
163 | if self._lastPendingTokenType == self.WS:
164 | prev_token: CommonToken = self._pendingTokens[-1]
165 | if self._getIndentationLength(prev_token.text) != 0:
166 | message = "first statement indented"
167 | self._reportLexerError(message, self._currentToken, IndentationError)
168 | self._createAndAddPendingToken(self.INDENT, Token.DEFAULT_CHANNEL, message, self._currentToken)
169 |
170 | def _getIndentationLength(self, text: str) -> int:
171 | length = 0
172 | for ch in text:
173 | match ch:
174 | case " ":
175 | length += 1
176 | case "\t":
177 | length += self._tabLength
178 | case "\f":
179 | length = 0
180 | return length
181 |
182 | def _createAndAddPendingToken(self, type: int, channel: int, text: str | None, base_token: CommonToken):
183 | token: CommonToken = base_token.clone()
184 | token.type = type
185 | token.channel = channel
186 | token.stop = base_token.start - 1
187 | token.text = "<" + self.symbolicNames[type] + ">" if text is None else text
188 | self._addPendingToken(token)
189 |
190 | def _addPendingToken(self, token: CommonToken):
191 | self._lastPendingTokenType = token.type
192 | if token.channel == Token.DEFAULT_CHANNEL:
193 | self._lastPendingTokenTypeFromDefaultChannel = self._lastPendingTokenType
194 | self._pendingTokens.append(token)
195 |
196 | def _hideAndAddPendingToken(self, token: CommonToken):
197 | token.channel = Token.HIDDEN_CHANNEL
198 | self._addPendingToken(token)
199 |
200 | def _popPendingToken(self) -> CommonToken:
201 | return self._pendingTokens.pop(0)
202 |
203 | def _handle_NEWLINE_token(self):
204 | if self._numOpens > 0 or self._lastPendingTokenType in self._operators:
205 | self._hideAndAddPendingToken(self._currentToken)
206 | else:
207 | nl_token: CommonToken = self._currentToken
208 | is_looking_ahead: bool = self._followingToken.type == self.WS
209 |
210 | if is_looking_ahead:
211 | self._setNextInternalTokens()
212 |
213 | match self._followingToken.type:
214 | case self.NEWLINE | self.COMMENT:
215 | self._hideAndAddPendingToken(nl_token)
216 | if is_looking_ahead:
217 | self._addPendingToken(self._currentToken)
218 | case _:
219 | if is_looking_ahead:
220 | indentation_length: int = (
221 | 0
222 | if self._followingToken.type == Token.EOF
223 | else self._getIndentationLength(self._currentToken.text)
224 | )
225 | if self._isValidIndent(indentation_length):
226 | self._addPendingToken(nl_token)
227 | self._addPendingToken(self._currentToken)
228 | self._insertIndentOrDedentToken(indentation_length)
229 | else:
230 | self._hideAndAddPendingToken(nl_token)
231 | self._addPendingToken(self._currentToken)
232 | else:
233 | self._addPendingToken(nl_token)
234 | self._insertIndentOrDedentToken(0)
235 |
236 | def _isValidIndent(self, indent_length: int):
237 | return indent_length % self._indentLength == 0
238 |
239 | def _insertIndentOrDedentToken(self, indent_length: int):
240 | prev_indent_length: int = self._indentLengthStack[-1]
241 | if indent_length > prev_indent_length:
242 | self._createAndAddPendingToken(self.INDENT, Token.DEFAULT_CHANNEL, None, self._followingToken)
243 | self._indentLengthStack.append(indent_length)
244 | else:
245 | while indent_length < prev_indent_length:
246 | self._indentLengthStack.pop()
247 | prev_indent_length = self._indentLengthStack[-1]
248 | if indent_length <= prev_indent_length:
249 | self._createAndAddPendingToken(self.DEDENT, Token.DEFAULT_CHANNEL, None, self._followingToken)
250 | else:
251 | message = "inconsistent dedent"
252 | self._reportLexerError(message, self._followingToken, IndentationError)
253 | self._createAndAddPendingToken(
254 | self.ERROR_TOKEN, Token.DEFAULT_CHANNEL, message, self._followingToken
255 | )
256 |
257 | def _handle_STRING_token(self):
258 | replacedText: str = self._currentToken.text
259 | replacedText = re.sub(r"(\r?\n)+", r"\1", replacedText)
260 | replacedText = re.sub(r"(\r?\n)(\s)+", "", replacedText)
261 | if len(self._currentToken.text) == len(replacedText):
262 | self._addPendingToken(self._currentToken)
263 | else:
264 | originalToken: CommonToken = self._currentToken.clone()
265 | self._currentToken.text = replacedText
266 | self._addPendingToken(self._currentToken)
267 | self._hideAndAddPendingToken(originalToken)
268 |
269 | def _insertTrailingTokens(self):
270 | match self._lastPendingTokenTypeFromDefaultChannel:
271 | case self.NEWLINE | self.DEDENT:
272 | pass
273 | case _:
274 | self._createAndAddPendingToken(self.NEWLINE, Token.DEFAULT_CHANNEL, None, self._followingToken)
275 | self._insertIndentOrDedentToken(0)
276 |
277 | def _handle_EOF_token(self):
278 | if self._lastPendingTokenTypeFromDefaultChannel > 0:
279 | self._insertTrailingTokens()
280 | self._addPendingToken(self._currentToken)
281 |
282 | def _reportLexerError(self, message, token, errcls):
283 | lineno = token.line
284 | offset = token.column
285 | error = errcls(message) if errcls else None
286 | self.getErrorListenerDispatch().syntaxError(
287 | self,
288 | token,
289 | lineno,
290 | offset,
291 | message,
292 | error,
293 | )
294 |
295 | def reset(self):
296 | self._resetInternalStates()
297 | super().reset()
298 |
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/antlr4/generated/PinescriptParser.tokens:
--------------------------------------------------------------------------------
1 | INDENT=1
2 | DEDENT=2
3 | AND=3
4 | AS=4
5 | BREAK=5
6 | BY=6
7 | CONST=7
8 | CONTINUE=8
9 | ELSE=9
10 | EXPORT=10
11 | FALSE=11
12 | FOR=12
13 | IF=13
14 | IMPORT=14
15 | IN=15
16 | INPUT=16
17 | METHOD=17
18 | NOT=18
19 | OR=19
20 | SERIES=20
21 | SIMPLE=21
22 | SWITCH=22
23 | TO=23
24 | TYPE=24
25 | TRUE=25
26 | VAR=26
27 | VARIP=27
28 | WHILE=28
29 | LPAR=29
30 | RPAR=30
31 | LSQB=31
32 | RSQB=32
33 | LESS=33
34 | GREATER=34
35 | EQUAL=35
36 | EQEQUAL=36
37 | NOTEQUAL=37
38 | LESSEQUAL=38
39 | GREATEREQUAL=39
40 | RARROW=40
41 | DOT=41
42 | COMMA=42
43 | COLON=43
44 | QUESTION=44
45 | PLUS=45
46 | MINUS=46
47 | STAR=47
48 | SLASH=48
49 | PERCENT=49
50 | PLUSEQUAL=50
51 | MINEQUAL=51
52 | STAREQUAL=52
53 | SLASHEQUAL=53
54 | PERCENTEQUAL=54
55 | COLONEQUAL=55
56 | NAME=56
57 | NUMBER=57
58 | STRING=58
59 | COLOR=59
60 | NEWLINE=60
61 | WS=61
62 | COMMENT=62
63 | ERROR_TOKEN=63
64 | 'and'=3
65 | 'as'=4
66 | 'break'=5
67 | 'by'=6
68 | 'const'=7
69 | 'continue'=8
70 | 'else'=9
71 | 'export'=10
72 | 'false'=11
73 | 'for'=12
74 | 'if'=13
75 | 'import'=14
76 | 'in'=15
77 | 'input'=16
78 | 'method'=17
79 | 'not'=18
80 | 'or'=19
81 | 'series'=20
82 | 'simple'=21
83 | 'switch'=22
84 | 'to'=23
85 | 'type'=24
86 | 'true'=25
87 | 'var'=26
88 | 'varip'=27
89 | 'while'=28
90 | '('=29
91 | ')'=30
92 | '['=31
93 | ']'=32
94 | '<'=33
95 | '>'=34
96 | '='=35
97 | '=='=36
98 | '!='=37
99 | '<='=38
100 | '>='=39
101 | '=>'=40
102 | '.'=41
103 | ','=42
104 | ':'=43
105 | '?'=44
106 | '+'=45
107 | '-'=46
108 | '*'=47
109 | '/'=48
110 | '%'=49
111 | '+='=50
112 | '-='=51
113 | '*='=52
114 | '/='=53
115 | '%='=54
116 | ':='=55
117 |
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/antlr4/generated/PinescriptParserBase.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | import sys
20 |
21 | from typing import TextIO
22 |
23 | from antlr4 import Parser
24 | from antlr4 import TokenStream
25 |
26 |
27 | class PinescriptParserBase(Parser):
28 | # ruff: noqa: N802, N803, N806, A002
29 |
30 | def __init__(self, input: TokenStream, output: TextIO = sys.stdout):
31 | super().__init__(input, output)
32 |
33 | def isEqualToCurrentTokenText(self, tokenText: str) -> bool:
34 | return self.getCurrentToken().text == tokenText
35 |
36 | def isNotEqualToCurrentTokenText(self, tokenText: str) -> bool:
37 | return not self.isEqualToCurrentTokenText(tokenText)
38 |
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/antlr4/generated/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from .PinescriptLexer import PinescriptLexer
4 | from .PinescriptParser import PinescriptParser
5 | from .PinescriptParserListener import PinescriptParserListener
6 | from .PinescriptParserVisitor import PinescriptParserVisitor
7 |
8 |
9 | __all__ = [
10 | "PinescriptLexer",
11 | "PinescriptParser",
12 | "PinescriptParserListener",
13 | "PinescriptParserVisitor",
14 | ]
15 |
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/antlr4/lexer.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | from .generated import PinescriptLexer
20 |
21 |
22 | __all__ = [
23 | "PinescriptLexer",
24 | ]
25 |
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/antlr4/listener.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | from .generated import PinescriptParserListener
20 |
21 |
22 | __all__ = [
23 | "PinescriptParserListener",
24 | ]
25 |
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/antlr4/parser.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | from .generated import PinescriptParser
20 |
21 |
22 | __all__ = [
23 | "PinescriptParser",
24 | ]
25 |
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/antlr4/resource/PinescriptLexer.g4:
--------------------------------------------------------------------------------
1 | // Copyright 2024 Yunseong Hwang
2 | //
3 | // Licensed under the GNU Lesser General Public License Version 3.0 (the "License"); you may not use this file except in
4 | // compliance with the License. You may obtain a copy of the License at
5 | //
6 | // https://www.gnu.org/licenses/lgpl-3.0.en.html
7 | //
8 | // Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9 | // an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10 | // specific language governing permissions and limitations under the License.
11 | //
12 | // SPDX-License-Identifier: LGPL-3.0-or-later
13 |
14 | lexer grammar PinescriptLexer;
15 |
16 | options {
17 | superClass = PinescriptLexerBase;
18 | }
19 |
20 | tokens {
21 | INDENT,
22 | DEDENT
23 | }
24 |
25 | channels {
26 | COMMENT_CHANNEL
27 | }
28 |
29 | // KEYWORDS
30 |
31 | AND: 'and';
32 | AS: 'as';
33 | BREAK: 'break';
34 | BY: 'by';
35 | CONST: 'const';
36 | CONTINUE: 'continue';
37 | ELSE: 'else';
38 | EXPORT: 'export';
39 | FALSE: 'false';
40 | FOR: 'for';
41 | IF: 'if';
42 | IMPORT: 'import';
43 | IN: 'in';
44 | INPUT: 'input';
45 | METHOD: 'method';
46 | NOT: 'not';
47 | OR: 'or';
48 | SERIES: 'series';
49 | SIMPLE: 'simple';
50 | SWITCH: 'switch';
51 | TO: 'to';
52 | TYPE: 'type';
53 | TRUE: 'true';
54 | VAR: 'var';
55 | VARIP: 'varip';
56 | WHILE: 'while';
57 |
58 | // PUNCTUATIONS AND OPERATORS
59 |
60 | LPAR: '(';
61 | RPAR: ')';
62 | LSQB: '[';
63 | RSQB: ']';
64 |
65 | LESS: '<';
66 | GREATER: '>';
67 | EQUAL: '=';
68 | EQEQUAL: '==';
69 | NOTEQUAL: '!=';
70 | LESSEQUAL: '<=';
71 | GREATEREQUAL: '>=';
72 |
73 | RARROW: '=>';
74 |
75 | DOT: '.';
76 | COMMA: ',';
77 | COLON: ':';
78 | QUESTION: '?';
79 |
80 | PLUS: '+';
81 | MINUS: '-';
82 | STAR: '*';
83 | SLASH: '/';
84 | PERCENT: '%';
85 |
86 | PLUSEQUAL: '+=';
87 | MINEQUAL: '-=';
88 | STAREQUAL: '*=';
89 | SLASHEQUAL: '/=';
90 | PERCENTEQUAL: '%=';
91 |
92 | COLONEQUAL: ':=';
93 |
94 | // COMMON TOKENS
95 |
96 | NAME: ID_START ID_CONTINUE*;
97 | NUMBER: NUMBER_LITERAL;
98 | STRING: STRING_LITERAL;
99 | COLOR: COLOR_LITERAL;
100 | NEWLINE: OS_INDEPENDENT_NL;
101 |
102 | // WHITE SPACES, COMMENTS, MISCS
103 |
104 | WS: [ \t\f]+ -> channel(HIDDEN);
105 | COMMENT: '//' ~[\r\n]* -> channel(COMMENT_CHANNEL);
106 | ERROR_TOKEN: .;
107 |
108 | // FRAGMENTS
109 |
110 | fragment STRING_LITERAL: SINGLE_QUOTED_STRING | DOUBLE_QUOTED_STRING;
111 |
112 | fragment SINGLE_QUOTED_STRING: '\'' STRING_ITEM_FOR_SINGLE_QUOTE* '\'';
113 | fragment DOUBLE_QUOTED_STRING: '"' STRING_ITEM_FOR_DOUBLE_QUOTE* '"';
114 |
115 | fragment STRING_ITEM_FOR_SINGLE_QUOTE
116 | : STRING_CHAR_NO_SINGLE_QUOTE
117 | | STRING_ESCAPE_SEQ;
118 | fragment STRING_ITEM_FOR_DOUBLE_QUOTE
119 | : STRING_CHAR_NO_DOUBLE_QUOTE
120 | | STRING_ESCAPE_SEQ;
121 |
122 | fragment STRING_CHAR_NO_SINGLE_QUOTE: ~[\\'];
123 | fragment STRING_CHAR_NO_DOUBLE_QUOTE: ~[\\"];
124 |
125 | fragment STRING_ESCAPE_SEQ: '\\' .;
126 |
127 | fragment COLOR_LITERAL: COLOR_LITERAL_RGBA | COLOR_LITERAL_RGB;
128 |
129 | fragment COLOR_LITERAL_RGBA
130 | : '#' HEX_DIGIT HEX_DIGIT HEX_DIGIT HEX_DIGIT HEX_DIGIT HEX_DIGIT HEX_DIGIT HEX_DIGIT;
131 | fragment COLOR_LITERAL_RGB
132 | : '#' HEX_DIGIT HEX_DIGIT HEX_DIGIT HEX_DIGIT HEX_DIGIT HEX_DIGIT;
133 |
134 | fragment NUMBER_LITERAL: INTEGER | FLOAT_NUMBER | IMAG_NUMBER;
135 |
136 | fragment INTEGER: DEC_INTEGER | BIN_INTEGER | OCT_INTEGER | HEX_INTEGER;
137 | fragment DEC_INTEGER: NON_ZERO_DIGIT ('_'? DIGIT)* | '0'+ ('_'? '0')*;
138 | fragment BIN_INTEGER: '0' ('b' | 'B') ('_'? BIN_DIGIT)+;
139 | fragment OCT_INTEGER: '0' ('o' | 'O') ('_'? OCT_DIGIT)+;
140 | fragment HEX_INTEGER: '0' ('x' | 'X') ('_'? HEX_DIGIT)+;
141 | fragment NON_ZERO_DIGIT: [1-9];
142 | fragment DIGIT: [0-9];
143 | fragment BIN_DIGIT: '0' | '1';
144 | fragment OCT_DIGIT: [0-7];
145 | fragment HEX_DIGIT: DIGIT | [a-f] | [A-F];
146 |
147 | fragment FLOAT_NUMBER: POINT_FLOAT | EXPONENT_FLOAT;
148 | fragment POINT_FLOAT: DIGIT_PART? FRACTION | DIGIT_PART '.';
149 | fragment EXPONENT_FLOAT: (DIGIT_PART | POINT_FLOAT) EXPONENT;
150 | fragment DIGIT_PART: DIGIT ('_'? DIGIT)*;
151 | fragment FRACTION: '.' DIGIT_PART;
152 | fragment EXPONENT: ('e' | 'E') ('+' | '-')? DIGIT_PART;
153 |
154 | fragment IMAG_NUMBER: (FLOAT_NUMBER | DIGIT_PART) ('j' | 'J');
155 |
156 | fragment OS_INDEPENDENT_NL: '\r'? '\n';
157 |
158 | fragment ID_START: [a-zA-Z_];
159 | fragment ID_CONTINUE: [a-zA-Z_0-9];
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/antlr4/resource/PinescriptLexerBase.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | import re
20 | import sys
21 |
22 | from collections import deque
23 | from typing import TextIO
24 |
25 | from antlr4 import InputStream
26 | from antlr4 import Lexer
27 | from antlr4 import Token
28 | from antlr4.Token import CommonToken
29 |
30 | from pynescript.ast.error import IndentationError
31 | from pynescript.ast.error import SyntaxError
32 |
33 |
34 | class PinescriptLexerBase(Lexer):
35 | # ruff: noqa: N802, N803, N806, A002
36 |
37 | """
38 | - ignore possible leading newlines
39 | - ignore excessive trailing newlines except a single newline
40 | - ensure that script ends with a newline if none
41 | - ignore consecutive newlines except the last one
42 | - ignore newlines inside open parentheses, brackets
43 | - ignore newlines after operators
44 | - ignore newlines for line wraping (lines whose indentation width is not a multiple of four)
45 | - track indentation level, push INDENT or DEDENT token respectfully
46 | - handle multiline string literal correctly (ignore )
47 | """
48 |
49 | def __init__(self, input: InputStream, output: TextIO = sys.stdout):
50 | super().__init__(input, output)
51 |
52 | # operators that are followed by terms
53 | self._operators = {
54 | self.AND,
55 | self.COLON,
56 | self.COLONEQUAL,
57 | self.COMMA,
58 | self.EQEQUAL,
59 | self.EQUAL,
60 | self.GREATER,
61 | self.GREATEREQUAL,
62 | self.LESS,
63 | self.LESSEQUAL,
64 | self.MINEQUAL,
65 | self.MINUS,
66 | self.NOTEQUAL,
67 | self.OR,
68 | self.PERCENT,
69 | self.PERCENTEQUAL,
70 | self.PLUS,
71 | self.PLUSEQUAL,
72 | self.QUESTION,
73 | self.SLASH,
74 | self.SLASHEQUAL,
75 | self.STAR,
76 | self.STAREQUAL,
77 | }
78 |
79 | # indent specific parameters
80 | self._tabLength: int = 4
81 | self._indentLength: int = 4
82 |
83 | # track internal tokens
84 | self._currentToken: CommonToken | None = None
85 | self._followingToken: CommonToken | None = None
86 |
87 | # keep pending tokens
88 | self._pendingTokens: list[CommonToken] = []
89 |
90 | # track last pending token types
91 | self._lastPendingTokenType: int = 0
92 | self._lastPendingTokenTypeFromDefaultChannel: int = 0
93 |
94 | # track number of opens
95 | self._numOpens: int = 0
96 |
97 | # track indentations
98 | self._indentLengthStack: deque[int] = deque()
99 |
100 | def _resetInternalStates(self):
101 | self._currentToken: CommonToken | None = None
102 | self._followingToken: CommonToken | None = None
103 | self._pendingTokens: list[CommonToken] = []
104 | self._lastPendingTokenType: int = 0
105 | self._lastPendingTokenTypeFromDefaultChannel: int = 0
106 | self._numOpens: int = 0
107 | self._indentLengthStack = deque()
108 |
109 | def nextToken(self) -> CommonToken:
110 | self._checkNextToken()
111 | return self._popPendingToken()
112 |
113 | def _checkNextToken(self) -> None:
114 | if self._reachedEndOfFile():
115 | return
116 |
117 | self._setNextInternalTokens()
118 | self._handleStartOfInputIfNecessary()
119 |
120 | match self._currentToken.type:
121 | case self.LPAR | self.LSQB:
122 | self._numOpens += 1
123 | self._addPendingToken(self._currentToken)
124 | case self.RPAR | self.RSQB:
125 | self._numOpens -= 1
126 | self._addPendingToken(self._currentToken)
127 | case self.NEWLINE:
128 | self._handle_NEWLINE_token()
129 | case self.STRING:
130 | self._handle_STRING_token()
131 | case self.ERROR_TOKEN:
132 | message = "token recognition error at: '" + self._currentToken.text + "'"
133 | self._reportLexerError(message, self._currentToken, SyntaxError)
134 | self._addPendingToken(self._currentToken)
135 | case Token.EOF:
136 | self._handle_EOF_token()
137 | case _:
138 | self._addPendingToken(self._currentToken)
139 |
140 | def _reachedEndOfFile(self) -> bool:
141 | return self._lastPendingTokenType == Token.EOF
142 |
143 | def _setNextInternalTokens(self) -> None:
144 | self._currentToken = super().nextToken() if self._followingToken is None else self._followingToken
145 | self._followingToken = self._currentToken if self._currentToken.type == Token.EOF else super().nextToken()
146 |
147 | def _handleStartOfInputIfNecessary(self):
148 | if len(self._indentLengthStack) > 0:
149 | return
150 | self._indentLengthStack.append(0)
151 | while self._currentToken.type != Token.EOF:
152 | if self._currentToken.channel == Token.DEFAULT_CHANNEL:
153 | if self._currentToken.type == self.NEWLINE:
154 | self._hideAndAddPendingToken(self._currentToken)
155 | else:
156 | self._checkLeadingIndentIfAny()
157 | return
158 | else:
159 | self._addPendingToken(self._currentToken)
160 | self._setNextInternalTokens()
161 |
162 | def _checkLeadingIndentIfAny(self):
163 | if self._lastPendingTokenType == self.WS:
164 | prev_token: CommonToken = self._pendingTokens[-1]
165 | if self._getIndentationLength(prev_token.text) != 0:
166 | message = "first statement indented"
167 | self._reportLexerError(message, self._currentToken, IndentationError)
168 | self._createAndAddPendingToken(self.INDENT, Token.DEFAULT_CHANNEL, message, self._currentToken)
169 |
170 | def _getIndentationLength(self, text: str) -> int:
171 | length = 0
172 | for ch in text:
173 | match ch:
174 | case " ":
175 | length += 1
176 | case "\t":
177 | length += self._tabLength
178 | case "\f":
179 | length = 0
180 | return length
181 |
182 | def _createAndAddPendingToken(self, type: int, channel: int, text: str | None, base_token: CommonToken):
183 | token: CommonToken = base_token.clone()
184 | token.type = type
185 | token.channel = channel
186 | token.stop = base_token.start - 1
187 | token.text = "<" + self.symbolicNames[type] + ">" if text is None else text
188 | self._addPendingToken(token)
189 |
190 | def _addPendingToken(self, token: CommonToken):
191 | self._lastPendingTokenType = token.type
192 | if token.channel == Token.DEFAULT_CHANNEL:
193 | self._lastPendingTokenTypeFromDefaultChannel = self._lastPendingTokenType
194 | self._pendingTokens.append(token)
195 |
196 | def _hideAndAddPendingToken(self, token: CommonToken):
197 | token.channel = Token.HIDDEN_CHANNEL
198 | self._addPendingToken(token)
199 |
200 | def _popPendingToken(self) -> CommonToken:
201 | return self._pendingTokens.pop(0)
202 |
203 | def _handle_NEWLINE_token(self):
204 | if self._numOpens > 0 or self._lastPendingTokenType in self._operators:
205 | self._hideAndAddPendingToken(self._currentToken)
206 | else:
207 | nl_token: CommonToken = self._currentToken
208 | is_looking_ahead: bool = self._followingToken.type == self.WS
209 |
210 | if is_looking_ahead:
211 | self._setNextInternalTokens()
212 |
213 | match self._followingToken.type:
214 | case self.NEWLINE | self.COMMENT:
215 | self._hideAndAddPendingToken(nl_token)
216 | if is_looking_ahead:
217 | self._addPendingToken(self._currentToken)
218 | case _:
219 | if is_looking_ahead:
220 | indentation_length: int = (
221 | 0
222 | if self._followingToken.type == Token.EOF
223 | else self._getIndentationLength(self._currentToken.text)
224 | )
225 | if self._isValidIndent(indentation_length):
226 | self._addPendingToken(nl_token)
227 | self._addPendingToken(self._currentToken)
228 | self._insertIndentOrDedentToken(indentation_length)
229 | else:
230 | self._hideAndAddPendingToken(nl_token)
231 | self._addPendingToken(self._currentToken)
232 | else:
233 | self._addPendingToken(nl_token)
234 | self._insertIndentOrDedentToken(0)
235 |
236 | def _isValidIndent(self, indent_length: int):
237 | return indent_length % self._indentLength == 0
238 |
239 | def _insertIndentOrDedentToken(self, indent_length: int):
240 | prev_indent_length: int = self._indentLengthStack[-1]
241 | if indent_length > prev_indent_length:
242 | self._createAndAddPendingToken(self.INDENT, Token.DEFAULT_CHANNEL, None, self._followingToken)
243 | self._indentLengthStack.append(indent_length)
244 | else:
245 | while indent_length < prev_indent_length:
246 | self._indentLengthStack.pop()
247 | prev_indent_length = self._indentLengthStack[-1]
248 | if indent_length <= prev_indent_length:
249 | self._createAndAddPendingToken(self.DEDENT, Token.DEFAULT_CHANNEL, None, self._followingToken)
250 | else:
251 | message = "inconsistent dedent"
252 | self._reportLexerError(message, self._followingToken, IndentationError)
253 | self._createAndAddPendingToken(
254 | self.ERROR_TOKEN, Token.DEFAULT_CHANNEL, message, self._followingToken
255 | )
256 |
257 | def _handle_STRING_token(self):
258 | replacedText: str = self._currentToken.text
259 | replacedText = re.sub(r"(\r?\n)+", r"\1", replacedText)
260 | replacedText = re.sub(r"(\r?\n)(\s)+", "", replacedText)
261 | if len(self._currentToken.text) == len(replacedText):
262 | self._addPendingToken(self._currentToken)
263 | else:
264 | originalToken: CommonToken = self._currentToken.clone()
265 | self._currentToken.text = replacedText
266 | self._addPendingToken(self._currentToken)
267 | self._hideAndAddPendingToken(originalToken)
268 |
269 | def _insertTrailingTokens(self):
270 | match self._lastPendingTokenTypeFromDefaultChannel:
271 | case self.NEWLINE | self.DEDENT:
272 | pass
273 | case _:
274 | self._createAndAddPendingToken(self.NEWLINE, Token.DEFAULT_CHANNEL, None, self._followingToken)
275 | self._insertIndentOrDedentToken(0)
276 |
277 | def _handle_EOF_token(self):
278 | if self._lastPendingTokenTypeFromDefaultChannel > 0:
279 | self._insertTrailingTokens()
280 | self._addPendingToken(self._currentToken)
281 |
282 | def _reportLexerError(self, message, token, errcls):
283 | lineno = token.line
284 | offset = token.column
285 | error = errcls(message) if errcls else None
286 | self.getErrorListenerDispatch().syntaxError(
287 | self,
288 | token,
289 | lineno,
290 | offset,
291 | message,
292 | error,
293 | )
294 |
295 | def reset(self):
296 | self._resetInternalStates()
297 | super().reset()
298 |
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/antlr4/resource/PinescriptParser.g4:
--------------------------------------------------------------------------------
1 | // Copyright 2024 Yunseong Hwang
2 | //
3 | // Licensed under the GNU Lesser General Public License Version 3.0 (the "License"); you may not use this file except in
4 | // compliance with the License. You may obtain a copy of the License at
5 | //
6 | // https://www.gnu.org/licenses/lgpl-3.0.en.html
7 | //
8 | // Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9 | // an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10 | // specific language governing permissions and limitations under the License.
11 | //
12 | // SPDX-License-Identifier: LGPL-3.0-or-later
13 |
14 | parser grammar PinescriptParser;
15 |
16 | options {
17 | tokenVocab = PinescriptLexer;
18 | superClass = PinescriptParserBase;
19 | }
20 |
21 | // STARTING RULES
22 |
23 | start: start_script;
24 |
25 | start_script: statements? EOF;
26 | start_expression: expression NEWLINE? EOF;
27 |
28 | start_comments: comments? EOF;
29 |
30 | // STATEMENTS
31 |
32 | statements: statement+;
33 | statement: compound_statement | simple_statements;
34 |
35 | // COMPOUND_STATEMENTS
36 |
37 | compound_statement
38 | : compound_assignment
39 | | function_declaration
40 | | type_declaration
41 | | structure_statement;
42 |
43 | // SIMPLE STATEMENTS
44 |
45 | simple_statements: simple_statement (COMMA simple_statement)* COMMA? NEWLINE;
46 |
47 | simple_statement
48 | : simple_assignment
49 | | expression_statement
50 | | import_statement
51 | | break_statement
52 | | continue_statement;
53 |
54 | // COMPOUND ASSIGNMENTS
55 |
56 | compound_assignment
57 | : compound_variable_initialization
58 | | compound_reassignment
59 | | compound_augassignment;
60 |
61 | compound_variable_initialization
62 | : compound_name_initialization
63 | | compound_tuple_initialization;
64 |
65 | compound_name_initialization: variable_declaration EQUAL structure_expression;
66 | compound_tuple_initialization: tuple_declaration EQUAL structure_expression;
67 |
68 | compound_reassignment: assignment_target COLONEQUAL structure_expression;
69 | compound_augassignment: assignment_target augassign_op structure_expression;
70 |
71 | // FUNCTION DECLARATION
72 |
73 | function_declaration
74 | : EXPORT? METHOD? name LPAR parameter_list? RPAR RARROW local_block;
75 |
76 | parameter_list: parameter_definition (COMMA parameter_definition)* COMMA?;
77 | parameter_definition: type_specification? name_store (EQUAL expression)?;
78 |
79 | // TYPE DECLARATION
80 |
81 | type_declaration: EXPORT? TYPE name NEWLINE INDENT field_definitions DEDENT;
82 |
83 | field_definitions: field_definition+;
84 | field_definition: type_specification name_store (EQUAL expression)? NEWLINE;
85 |
86 | // STRUCTURES
87 |
88 | structure: if_structure | for_structure | while_structure | switch_structure;
89 |
90 | structure_statement: structure;
91 | structure_expression: structure;
92 |
93 | // IF STRUCTURE
94 |
95 | if_structure: if_structure_elif | if_structure_else;
96 |
97 | if_structure_elif: IF expression local_block elif_structure;
98 | if_structure_else: IF expression local_block else_block?;
99 |
100 | elif_structure: elif_structure_elif | elif_structure_else;
101 |
102 | elif_structure_elif: ELSE IF expression local_block elif_structure;
103 | elif_structure_else: ELSE IF expression local_block else_block?;
104 |
105 | else_block: ELSE local_block;
106 |
107 | // FOR STRUCTURE
108 |
109 | for_structure: for_structure_to | for_structure_in;
110 |
111 | for_structure_to
112 | : FOR for_iterator EQUAL expression TO expression (BY expression)? local_block;
113 | for_structure_in: FOR for_iterator IN expression local_block;
114 |
115 | for_iterator: name_store | tuple_declaration;
116 |
117 | // WHILE STRUCTURE
118 |
119 | while_structure: WHILE expression local_block;
120 |
121 | // SWITCH STRUCTURE
122 |
123 | switch_structure: SWITCH expression? NEWLINE INDENT switch_cases DEDENT;
124 |
125 | switch_cases: switch_pattern_case+ switch_default_case?;
126 |
127 | switch_pattern_case: expression RARROW local_block;
128 | switch_default_case: RARROW local_block;
129 |
130 | // LOCAL BLOCK
131 |
132 | local_block: indented_local_block | inline_local_block;
133 |
134 | indented_local_block: NEWLINE INDENT statements DEDENT;
135 | inline_local_block: statement;
136 |
137 | // SIMPLE ASSIGNMENTS
138 |
139 | simple_assignment
140 | : simple_variable_initialization
141 | | simple_reassignment
142 | | simple_augassignment;
143 |
144 | simple_variable_initialization
145 | : simple_name_initialization
146 | | simple_tuple_initialization;
147 |
148 | simple_name_initialization: variable_declaration EQUAL expression;
149 | simple_tuple_initialization: tuple_declaration EQUAL expression;
150 |
151 | simple_reassignment: assignment_target COLONEQUAL expression;
152 | simple_augassignment: assignment_target augassign_op expression;
153 |
154 | // EXPRESSIONS
155 |
156 | expression: conditional_expression;
157 | expression_statement: expression;
158 |
159 | // CONDITIONAL TERNARY EXPRESSION
160 |
161 | conditional_expression: conditional_expression_rule | disjunction_expression;
162 | conditional_expression_rule
163 | : disjunction_expression QUESTION expression COLON expression;
164 |
165 | // LOGICAL EXPRESSIONS
166 |
167 | disjunction_expression: disjunction_expression_rule | conjunction_expression;
168 | disjunction_expression_rule
169 | : conjunction_expression (OR conjunction_expression)+;
170 |
171 | conjunction_expression: conjunction_expression_rule | equality_expression;
172 | conjunction_expression_rule: equality_expression (AND equality_expression)+;
173 |
174 | // COMPARISON EXPRESSIONS
175 |
176 | equality_expression: equality_expression_rule | inequality_expression;
177 | equality_expression_rule: inequality_expression equality_trailing_pair+;
178 |
179 | equality_trailing_pair: equal_trailing_pair | not_equal_trailing_pair;
180 |
181 | equal_trailing_pair: EQEQUAL inequality_expression;
182 | not_equal_trailing_pair: NOTEQUAL inequality_expression;
183 |
184 | inequality_expression: inequality_expression_rule | additive_expression;
185 | inequality_expression_rule: additive_expression inequality_trailing_pair+;
186 |
187 | inequality_trailing_pair
188 | : less_than_equal_trailing_pair
189 | | less_than_trailing_pair
190 | | greater_than_equal_trailing_pair
191 | | greater_than_trailing_pair;
192 |
193 | less_than_equal_trailing_pair: LESSEQUAL additive_expression;
194 | less_than_trailing_pair: LESS additive_expression;
195 | greater_than_equal_trailing_pair: GREATEREQUAL additive_expression;
196 | greater_than_trailing_pair: GREATER additive_expression;
197 |
198 | // ARITHMETIC EXPRESSIONS
199 |
200 | additive_expression
201 | : additive_expression additive_op multiplicative_expression
202 | | multiplicative_expression;
203 |
204 | additive_op: PLUS | MINUS;
205 |
206 | multiplicative_expression
207 | : multiplicative_expression multiplicative_op unary_expression
208 | | unary_expression;
209 |
210 | multiplicative_op: STAR | SLASH | PERCENT;
211 |
212 | unary_expression: unary_op unary_expression | primary_expression;
213 |
214 | unary_op: NOT | PLUS | MINUS;
215 |
216 | // PRIMARY EXPRESSIONS
217 |
218 | primary_expression
219 | : primary_expression DOT name_load # primary_expression_attribute
220 | | primary_expression template_spec_suffix? LPAR argument_list? RPAR # primary_expression_call
221 | | primary_expression LSQB subscript_slice RSQB # primary_expression_subscript
222 | | atomic_expression # primary_expression_fallback;
223 |
224 | argument_list: argument_definition (COMMA argument_definition)* COMMA?;
225 | argument_definition: (name_store EQUAL)? expression;
226 |
227 | subscript_slice: expression (COMMA expression)* COMMA?;
228 |
229 | // ATOMIC EXPRESSIONS
230 |
231 | atomic_expression
232 | : name_load
233 | | literal_expression
234 | | grouped_expression
235 | | tuple_expression;
236 |
237 | literal_expression
238 | : literal_number
239 | | literal_string
240 | | literal_bool
241 | | literal_color;
242 |
243 | literal_number: NUMBER;
244 | literal_string: STRING;
245 | literal_bool: TRUE | FALSE;
246 | literal_color: COLOR;
247 |
248 | grouped_expression: LPAR expression RPAR;
249 | tuple_expression: LSQB expression (COMMA expression)* COMMA? RSQB;
250 |
251 | // IMPORT
252 |
253 | import_statement: IMPORT name SLASH name SLASH literal_number (AS name)?;
254 |
255 | // LOOP CONTROLS
256 |
257 | break_statement: BREAK;
258 | continue_statement: CONTINUE;
259 |
260 | // VARIABLE DECLARATION AND ASSIGNMENT RELATED SEGMENTS
261 |
262 | variable_declaration: declaration_mode? type_specification? name_store;
263 | tuple_declaration: LSQB name_store (COMMA name_store)* COMMA? RSQB;
264 |
265 | declaration_mode: VARIP | VAR;
266 |
267 | assignment_target
268 | : assignment_target_attribute
269 | | assignment_target_subscript
270 | | assignment_target_name
271 | | assignment_target_group;
272 |
273 | assignment_target_attribute: primary_expression DOT name_store;
274 | assignment_target_subscript: primary_expression LSQB subscript_slice RSQB;
275 | assignment_target_name: name_store;
276 | assignment_target_group: LPAR assignment_target RPAR;
277 |
278 | augassign_op: STAREQUAL | SLASHEQUAL | PERCENTEQUAL | PLUSEQUAL | MINEQUAL;
279 |
280 | // TYPE SPECIFICATION
281 |
282 | type_specification
283 | : type_qualifier? attributed_type_name template_spec_suffix? array_type_suffix?;
284 |
285 | type_qualifier: CONST | INPUT | SIMPLE | SERIES;
286 | attributed_type_name: name_load (DOT name_load)*;
287 |
288 | template_spec_suffix: LESS type_argument_list? GREATER;
289 | array_type_suffix: LSQB RSQB;
290 |
291 | type_argument_list: type_specification (COMMA type_specification)* COMMA?;
292 |
293 | // NAME WITH SOFT KEYWORDS
294 |
295 | name: NAME | TYPE | METHOD | CONST | INPUT | SIMPLE | SERIES;
296 |
297 | name_load: name;
298 | name_store: name;
299 |
300 | // COMMENTS
301 |
302 | comments: comment+;
303 | comment: COMMENT;
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/antlr4/resource/PinescriptParserBase.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | import sys
20 |
21 | from typing import TextIO
22 |
23 | from antlr4 import Parser
24 | from antlr4 import TokenStream
25 |
26 |
27 | class PinescriptParserBase(Parser):
28 | # ruff: noqa: N802, N803, N806, A002
29 |
30 | def __init__(self, input: TokenStream, output: TextIO = sys.stdout):
31 | super().__init__(input, output)
32 |
33 | def isEqualToCurrentTokenText(self, tokenText: str) -> bool:
34 | return self.getCurrentToken().text == tokenText
35 |
36 | def isNotEqualToCurrentTokenText(self, tokenText: str) -> bool:
37 | return not self.isEqualToCurrentTokenText(tokenText)
38 |
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/antlr4/tool/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/antlr4/tool/generate.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | import shutil
20 | import subprocess
21 |
22 | from pathlib import Path
23 |
24 |
25 | def main():
26 | script_directory_path = Path(__file__).parent
27 |
28 | grammar_source_directory_path = script_directory_path / ".." / "resource"
29 | grammar_output_directory_path = script_directory_path / ".." / "generated"
30 |
31 | grammar_file_encoding = "utf-8"
32 |
33 | generate_grammar_command = [
34 | "antlr4",
35 | "-o",
36 | str(grammar_output_directory_path),
37 | "-lib",
38 | str(grammar_source_directory_path),
39 | "-encoding",
40 | grammar_file_encoding,
41 | "-listener",
42 | "-visitor",
43 | "-Dlanguage=Python3",
44 | str(grammar_source_directory_path / "*.g4"),
45 | ]
46 |
47 | subprocess.check_call(generate_grammar_command) # noqa: S603
48 |
49 | for filename in grammar_source_directory_path.glob("*.py"):
50 | shutil.copy(filename, grammar_output_directory_path)
51 |
52 |
53 | if __name__ == "__main__":
54 | main()
55 |
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/antlr4/visitor.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | from .generated import PinescriptParserVisitor
20 |
21 |
22 | __all__ = [
23 | "PinescriptParserVisitor",
24 | ]
25 |
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/asdl/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | from .generated import * # noqa: F403
20 |
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/asdl/generated/PinescriptASTNode.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | import builtins as _builtins
3 | import dataclasses as _dataclasses
4 | import typing as _typing
5 |
6 | identifier = str
7 | int = int
8 | string = str | bytes
9 | constant = str | bytes | int | float | complex | bool | tuple | frozenset | None | type(...)
10 |
11 |
12 | class AST:
13 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = []
14 | _attributes: _typing.ClassVar[_builtins.list[_builtins.str]] = []
15 |
16 |
17 | @_dataclasses.dataclass
18 | class mod(AST):
19 | __hash__ = _builtins.object.__hash__
20 |
21 |
22 | @_dataclasses.dataclass
23 | class Script(mod):
24 | body: _builtins.list[stmt] = _dataclasses.field(default_factory=_builtins.list)
25 | annotations: _builtins.list[string] = _dataclasses.field(default_factory=_builtins.list)
26 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["body", "annotations"]
27 | __hash__ = _builtins.object.__hash__
28 |
29 |
30 | @_dataclasses.dataclass
31 | class Expression(mod):
32 | body: expr = _dataclasses.field(default=None)
33 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["body"]
34 | __hash__ = _builtins.object.__hash__
35 |
36 |
37 | @_dataclasses.dataclass
38 | class stmt(AST):
39 | lineno: int = _dataclasses.field(default=None, repr=False, compare=False, kw_only=True)
40 | col_offset: int = _dataclasses.field(default=None, repr=False, compare=False, kw_only=True)
41 | end_lineno: int | None = _dataclasses.field(default=None, repr=False, compare=False, kw_only=True)
42 | end_col_offset: int | None = _dataclasses.field(default=None, repr=False, compare=False, kw_only=True)
43 | _attributes: _typing.ClassVar[_builtins.list[_builtins.str]] = [
44 | "lineno",
45 | "col_offset",
46 | "end_lineno",
47 | "end_col_offset",
48 | ]
49 | __hash__ = _builtins.object.__hash__
50 |
51 |
52 | @_dataclasses.dataclass
53 | class FunctionDef(stmt):
54 | name: identifier = _dataclasses.field(default=None)
55 | args: _builtins.list[param] = _dataclasses.field(default_factory=_builtins.list)
56 | body: _builtins.list[stmt] = _dataclasses.field(default_factory=_builtins.list)
57 | method: int | None = _dataclasses.field(default=None)
58 | export: int | None = _dataclasses.field(default=None)
59 | annotations: _builtins.list[string] = _dataclasses.field(default_factory=_builtins.list)
60 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = [
61 | "name",
62 | "args",
63 | "body",
64 | "method",
65 | "export",
66 | "annotations",
67 | ]
68 | __hash__ = _builtins.object.__hash__
69 |
70 |
71 | @_dataclasses.dataclass
72 | class TypeDef(stmt):
73 | name: identifier = _dataclasses.field(default=None)
74 | body: _builtins.list[stmt] = _dataclasses.field(default_factory=_builtins.list)
75 | export: int | None = _dataclasses.field(default=None)
76 | annotations: _builtins.list[string] = _dataclasses.field(default_factory=_builtins.list)
77 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["name", "body", "export", "annotations"]
78 | __hash__ = _builtins.object.__hash__
79 |
80 |
81 | @_dataclasses.dataclass
82 | class Assign(stmt):
83 | target: expr = _dataclasses.field(default=None)
84 | value: expr | None = _dataclasses.field(default=None)
85 | type: expr | None = _dataclasses.field(default=None)
86 | mode: decl_mode | None = _dataclasses.field(default=None)
87 | annotations: _builtins.list[string] = _dataclasses.field(default_factory=_builtins.list)
88 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["target", "value", "type", "mode", "annotations"]
89 | __hash__ = _builtins.object.__hash__
90 |
91 |
92 | @_dataclasses.dataclass
93 | class ReAssign(stmt):
94 | target: expr = _dataclasses.field(default=None)
95 | value: expr = _dataclasses.field(default=None)
96 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["target", "value"]
97 | __hash__ = _builtins.object.__hash__
98 |
99 |
100 | @_dataclasses.dataclass
101 | class AugAssign(stmt):
102 | target: expr = _dataclasses.field(default=None)
103 | op: operator = _dataclasses.field(default=None)
104 | value: expr = _dataclasses.field(default=None)
105 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["target", "op", "value"]
106 | __hash__ = _builtins.object.__hash__
107 |
108 |
109 | @_dataclasses.dataclass
110 | class Import(stmt):
111 | namespace: identifier = _dataclasses.field(default=None)
112 | name: identifier = _dataclasses.field(default=None)
113 | version: int = _dataclasses.field(default=None)
114 | alias: identifier | None = _dataclasses.field(default=None)
115 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["namespace", "name", "version", "alias"]
116 | __hash__ = _builtins.object.__hash__
117 |
118 |
119 | @_dataclasses.dataclass
120 | class Expr(stmt):
121 | value: expr = _dataclasses.field(default=None)
122 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["value"]
123 | __hash__ = _builtins.object.__hash__
124 |
125 |
126 | @_dataclasses.dataclass
127 | class Break(stmt):
128 | __hash__ = _builtins.object.__hash__
129 |
130 |
131 | @_dataclasses.dataclass
132 | class Continue(stmt):
133 | __hash__ = _builtins.object.__hash__
134 |
135 |
136 | @_dataclasses.dataclass
137 | class expr(AST):
138 | lineno: int = _dataclasses.field(default=None, repr=False, compare=False, kw_only=True)
139 | col_offset: int = _dataclasses.field(default=None, repr=False, compare=False, kw_only=True)
140 | end_lineno: int | None = _dataclasses.field(default=None, repr=False, compare=False, kw_only=True)
141 | end_col_offset: int | None = _dataclasses.field(default=None, repr=False, compare=False, kw_only=True)
142 | _attributes: _typing.ClassVar[_builtins.list[_builtins.str]] = [
143 | "lineno",
144 | "col_offset",
145 | "end_lineno",
146 | "end_col_offset",
147 | ]
148 | __hash__ = _builtins.object.__hash__
149 |
150 |
151 | @_dataclasses.dataclass
152 | class BoolOp(expr):
153 | op: bool_op = _dataclasses.field(default=None)
154 | values: _builtins.list[expr] = _dataclasses.field(default_factory=_builtins.list)
155 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["op", "values"]
156 | __hash__ = _builtins.object.__hash__
157 |
158 |
159 | @_dataclasses.dataclass
160 | class BinOp(expr):
161 | left: expr = _dataclasses.field(default=None)
162 | op: operator = _dataclasses.field(default=None)
163 | right: expr = _dataclasses.field(default=None)
164 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["left", "op", "right"]
165 | __hash__ = _builtins.object.__hash__
166 |
167 |
168 | @_dataclasses.dataclass
169 | class UnaryOp(expr):
170 | op: unary_op = _dataclasses.field(default=None)
171 | operand: expr = _dataclasses.field(default=None)
172 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["op", "operand"]
173 | __hash__ = _builtins.object.__hash__
174 |
175 |
176 | @_dataclasses.dataclass
177 | class Conditional(expr):
178 | test: expr = _dataclasses.field(default=None)
179 | body: expr = _dataclasses.field(default=None)
180 | orelse: expr = _dataclasses.field(default=None)
181 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["test", "body", "orelse"]
182 | __hash__ = _builtins.object.__hash__
183 |
184 |
185 | @_dataclasses.dataclass
186 | class Compare(expr):
187 | left: expr = _dataclasses.field(default=None)
188 | ops: _builtins.list[compare_op] = _dataclasses.field(default_factory=_builtins.list)
189 | comparators: _builtins.list[expr] = _dataclasses.field(default_factory=_builtins.list)
190 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["left", "ops", "comparators"]
191 | __hash__ = _builtins.object.__hash__
192 |
193 |
194 | @_dataclasses.dataclass
195 | class Call(expr):
196 | func: expr = _dataclasses.field(default=None)
197 | args: _builtins.list[arg] = _dataclasses.field(default_factory=_builtins.list)
198 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["func", "args"]
199 | __hash__ = _builtins.object.__hash__
200 |
201 |
202 | @_dataclasses.dataclass
203 | class Constant(expr):
204 | value: constant = _dataclasses.field(default=None)
205 | kind: string | None = _dataclasses.field(default=None)
206 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["value", "kind"]
207 | __hash__ = _builtins.object.__hash__
208 |
209 |
210 | @_dataclasses.dataclass
211 | class Attribute(expr):
212 | value: expr = _dataclasses.field(default=None)
213 | attr: identifier = _dataclasses.field(default=None)
214 | ctx: expr_context = _dataclasses.field(default=None)
215 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["value", "attr", "ctx"]
216 | __hash__ = _builtins.object.__hash__
217 |
218 |
219 | @_dataclasses.dataclass
220 | class Subscript(expr):
221 | value: expr = _dataclasses.field(default=None)
222 | slice: expr | None = _dataclasses.field(default=None)
223 | ctx: expr_context = _dataclasses.field(default=None)
224 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["value", "slice", "ctx"]
225 | __hash__ = _builtins.object.__hash__
226 |
227 |
228 | @_dataclasses.dataclass
229 | class Name(expr):
230 | id: identifier = _dataclasses.field(default=None)
231 | ctx: expr_context = _dataclasses.field(default=None)
232 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["id", "ctx"]
233 | __hash__ = _builtins.object.__hash__
234 |
235 |
236 | @_dataclasses.dataclass
237 | class Tuple(expr):
238 | elts: _builtins.list[expr] = _dataclasses.field(default_factory=_builtins.list)
239 | ctx: expr_context = _dataclasses.field(default=None)
240 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["elts", "ctx"]
241 | __hash__ = _builtins.object.__hash__
242 |
243 |
244 | @_dataclasses.dataclass
245 | class ForTo(expr):
246 | target: expr = _dataclasses.field(default=None)
247 | start: expr = _dataclasses.field(default=None)
248 | end: expr = _dataclasses.field(default=None)
249 | body: _builtins.list[stmt] = _dataclasses.field(default_factory=_builtins.list)
250 | step: expr | None = _dataclasses.field(default=None)
251 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["target", "start", "end", "body", "step"]
252 | __hash__ = _builtins.object.__hash__
253 |
254 |
255 | @_dataclasses.dataclass
256 | class ForIn(expr):
257 | target: expr = _dataclasses.field(default=None)
258 | iter: expr = _dataclasses.field(default=None)
259 | body: _builtins.list[stmt] = _dataclasses.field(default_factory=_builtins.list)
260 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["target", "iter", "body"]
261 | __hash__ = _builtins.object.__hash__
262 |
263 |
264 | @_dataclasses.dataclass
265 | class While(expr):
266 | test: expr = _dataclasses.field(default=None)
267 | body: _builtins.list[stmt] = _dataclasses.field(default_factory=_builtins.list)
268 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["test", "body"]
269 | __hash__ = _builtins.object.__hash__
270 |
271 |
272 | @_dataclasses.dataclass
273 | class If(expr):
274 | test: expr = _dataclasses.field(default=None)
275 | body: _builtins.list[stmt] = _dataclasses.field(default_factory=_builtins.list)
276 | orelse: _builtins.list[stmt] = _dataclasses.field(default_factory=_builtins.list)
277 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["test", "body", "orelse"]
278 | __hash__ = _builtins.object.__hash__
279 |
280 |
281 | @_dataclasses.dataclass
282 | class Switch(expr):
283 | cases: _builtins.list[case] = _dataclasses.field(default_factory=_builtins.list)
284 | subject: expr | None = _dataclasses.field(default=None)
285 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["cases", "subject"]
286 | __hash__ = _builtins.object.__hash__
287 |
288 |
289 | @_dataclasses.dataclass
290 | class Qualify(expr):
291 | qualifier: type_qual = _dataclasses.field(default=None)
292 | value: expr = _dataclasses.field(default=None)
293 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["qualifier", "value"]
294 | __hash__ = _builtins.object.__hash__
295 |
296 |
297 | @_dataclasses.dataclass
298 | class Specialize(expr):
299 | value: expr = _dataclasses.field(default=None)
300 | args: expr = _dataclasses.field(default=None)
301 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["value", "args"]
302 | __hash__ = _builtins.object.__hash__
303 |
304 |
305 | @_dataclasses.dataclass
306 | class decl_mode(AST):
307 | __hash__ = _builtins.object.__hash__
308 |
309 |
310 | @_dataclasses.dataclass
311 | class Var(decl_mode):
312 | __hash__ = _builtins.object.__hash__
313 |
314 |
315 | @_dataclasses.dataclass
316 | class VarIp(decl_mode):
317 | __hash__ = _builtins.object.__hash__
318 |
319 |
320 | @_dataclasses.dataclass
321 | class type_qual(AST):
322 | __hash__ = _builtins.object.__hash__
323 |
324 |
325 | @_dataclasses.dataclass
326 | class Const(type_qual):
327 | __hash__ = _builtins.object.__hash__
328 |
329 |
330 | @_dataclasses.dataclass
331 | class Input(type_qual):
332 | __hash__ = _builtins.object.__hash__
333 |
334 |
335 | @_dataclasses.dataclass
336 | class Simple(type_qual):
337 | __hash__ = _builtins.object.__hash__
338 |
339 |
340 | @_dataclasses.dataclass
341 | class Series(type_qual):
342 | __hash__ = _builtins.object.__hash__
343 |
344 |
345 | @_dataclasses.dataclass
346 | class expr_context(AST):
347 | __hash__ = _builtins.object.__hash__
348 |
349 |
350 | @_dataclasses.dataclass
351 | class Load(expr_context):
352 | __hash__ = _builtins.object.__hash__
353 |
354 |
355 | @_dataclasses.dataclass
356 | class Store(expr_context):
357 | __hash__ = _builtins.object.__hash__
358 |
359 |
360 | @_dataclasses.dataclass
361 | class bool_op(AST):
362 | __hash__ = _builtins.object.__hash__
363 |
364 |
365 | @_dataclasses.dataclass
366 | class And(bool_op):
367 | __hash__ = _builtins.object.__hash__
368 |
369 |
370 | @_dataclasses.dataclass
371 | class Or(bool_op):
372 | __hash__ = _builtins.object.__hash__
373 |
374 |
375 | @_dataclasses.dataclass
376 | class operator(AST):
377 | __hash__ = _builtins.object.__hash__
378 |
379 |
380 | @_dataclasses.dataclass
381 | class Add(operator):
382 | __hash__ = _builtins.object.__hash__
383 |
384 |
385 | @_dataclasses.dataclass
386 | class Sub(operator):
387 | __hash__ = _builtins.object.__hash__
388 |
389 |
390 | @_dataclasses.dataclass
391 | class Mult(operator):
392 | __hash__ = _builtins.object.__hash__
393 |
394 |
395 | @_dataclasses.dataclass
396 | class Div(operator):
397 | __hash__ = _builtins.object.__hash__
398 |
399 |
400 | @_dataclasses.dataclass
401 | class Mod(operator):
402 | __hash__ = _builtins.object.__hash__
403 |
404 |
405 | @_dataclasses.dataclass
406 | class unary_op(AST):
407 | __hash__ = _builtins.object.__hash__
408 |
409 |
410 | @_dataclasses.dataclass
411 | class Not(unary_op):
412 | __hash__ = _builtins.object.__hash__
413 |
414 |
415 | @_dataclasses.dataclass
416 | class UAdd(unary_op):
417 | __hash__ = _builtins.object.__hash__
418 |
419 |
420 | @_dataclasses.dataclass
421 | class USub(unary_op):
422 | __hash__ = _builtins.object.__hash__
423 |
424 |
425 | @_dataclasses.dataclass
426 | class compare_op(AST):
427 | __hash__ = _builtins.object.__hash__
428 |
429 |
430 | @_dataclasses.dataclass
431 | class Eq(compare_op):
432 | __hash__ = _builtins.object.__hash__
433 |
434 |
435 | @_dataclasses.dataclass
436 | class NotEq(compare_op):
437 | __hash__ = _builtins.object.__hash__
438 |
439 |
440 | @_dataclasses.dataclass
441 | class Lt(compare_op):
442 | __hash__ = _builtins.object.__hash__
443 |
444 |
445 | @_dataclasses.dataclass
446 | class LtE(compare_op):
447 | __hash__ = _builtins.object.__hash__
448 |
449 |
450 | @_dataclasses.dataclass
451 | class Gt(compare_op):
452 | __hash__ = _builtins.object.__hash__
453 |
454 |
455 | @_dataclasses.dataclass
456 | class GtE(compare_op):
457 | __hash__ = _builtins.object.__hash__
458 |
459 |
460 | @_dataclasses.dataclass
461 | class param(AST):
462 | lineno: int = _dataclasses.field(default=None, repr=False, compare=False, kw_only=True)
463 | col_offset: int = _dataclasses.field(default=None, repr=False, compare=False, kw_only=True)
464 | end_lineno: int | None = _dataclasses.field(default=None, repr=False, compare=False, kw_only=True)
465 | end_col_offset: int | None = _dataclasses.field(default=None, repr=False, compare=False, kw_only=True)
466 | _attributes: _typing.ClassVar[_builtins.list[_builtins.str]] = [
467 | "lineno",
468 | "col_offset",
469 | "end_lineno",
470 | "end_col_offset",
471 | ]
472 | __hash__ = _builtins.object.__hash__
473 |
474 |
475 | @_dataclasses.dataclass
476 | class Param(param):
477 | name: identifier = _dataclasses.field(default=None)
478 | default: expr | None = _dataclasses.field(default=None)
479 | type: expr | None = _dataclasses.field(default=None)
480 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["name", "default", "type"]
481 | __hash__ = _builtins.object.__hash__
482 |
483 |
484 | @_dataclasses.dataclass
485 | class arg(AST):
486 | lineno: int = _dataclasses.field(default=None, repr=False, compare=False, kw_only=True)
487 | col_offset: int = _dataclasses.field(default=None, repr=False, compare=False, kw_only=True)
488 | end_lineno: int | None = _dataclasses.field(default=None, repr=False, compare=False, kw_only=True)
489 | end_col_offset: int | None = _dataclasses.field(default=None, repr=False, compare=False, kw_only=True)
490 | _attributes: _typing.ClassVar[_builtins.list[_builtins.str]] = [
491 | "lineno",
492 | "col_offset",
493 | "end_lineno",
494 | "end_col_offset",
495 | ]
496 | __hash__ = _builtins.object.__hash__
497 |
498 |
499 | @_dataclasses.dataclass
500 | class Arg(arg):
501 | value: expr = _dataclasses.field(default=None)
502 | name: identifier | None = _dataclasses.field(default=None)
503 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["value", "name"]
504 | __hash__ = _builtins.object.__hash__
505 |
506 |
507 | @_dataclasses.dataclass
508 | class case(AST):
509 | lineno: int = _dataclasses.field(default=None, repr=False, compare=False, kw_only=True)
510 | col_offset: int = _dataclasses.field(default=None, repr=False, compare=False, kw_only=True)
511 | end_lineno: int | None = _dataclasses.field(default=None, repr=False, compare=False, kw_only=True)
512 | end_col_offset: int | None = _dataclasses.field(default=None, repr=False, compare=False, kw_only=True)
513 | _attributes: _typing.ClassVar[_builtins.list[_builtins.str]] = [
514 | "lineno",
515 | "col_offset",
516 | "end_lineno",
517 | "end_col_offset",
518 | ]
519 | __hash__ = _builtins.object.__hash__
520 |
521 |
522 | @_dataclasses.dataclass
523 | class Case(case):
524 | body: _builtins.list[stmt] = _dataclasses.field(default_factory=_builtins.list)
525 | pattern: expr | None = _dataclasses.field(default=None)
526 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["body", "pattern"]
527 | __hash__ = _builtins.object.__hash__
528 |
529 |
530 | @_dataclasses.dataclass
531 | class cmnt(AST):
532 | lineno: int = _dataclasses.field(default=None, repr=False, compare=False, kw_only=True)
533 | col_offset: int = _dataclasses.field(default=None, repr=False, compare=False, kw_only=True)
534 | end_lineno: int | None = _dataclasses.field(default=None, repr=False, compare=False, kw_only=True)
535 | end_col_offset: int | None = _dataclasses.field(default=None, repr=False, compare=False, kw_only=True)
536 | _attributes: _typing.ClassVar[_builtins.list[_builtins.str]] = [
537 | "lineno",
538 | "col_offset",
539 | "end_lineno",
540 | "end_col_offset",
541 | ]
542 | __hash__ = _builtins.object.__hash__
543 |
544 |
545 | @_dataclasses.dataclass
546 | class Comment(cmnt):
547 | value: string = _dataclasses.field(default=None)
548 | kind: string | None = _dataclasses.field(default=None)
549 | _fields: _typing.ClassVar[_builtins.list[_builtins.str]] = ["value", "kind"]
550 | __hash__ = _builtins.object.__hash__
551 |
552 |
553 | __all__ = [
554 | "identifier",
555 | "int",
556 | "string",
557 | "constant",
558 | "AST",
559 | "mod",
560 | "Script",
561 | "Expression",
562 | "stmt",
563 | "FunctionDef",
564 | "TypeDef",
565 | "Assign",
566 | "ReAssign",
567 | "AugAssign",
568 | "Import",
569 | "Expr",
570 | "Break",
571 | "Continue",
572 | "expr",
573 | "BoolOp",
574 | "BinOp",
575 | "UnaryOp",
576 | "Conditional",
577 | "Compare",
578 | "Call",
579 | "Constant",
580 | "Attribute",
581 | "Subscript",
582 | "Name",
583 | "Tuple",
584 | "ForTo",
585 | "ForIn",
586 | "While",
587 | "If",
588 | "Switch",
589 | "Qualify",
590 | "Specialize",
591 | "decl_mode",
592 | "Var",
593 | "VarIp",
594 | "type_qual",
595 | "Const",
596 | "Input",
597 | "Simple",
598 | "Series",
599 | "expr_context",
600 | "Load",
601 | "Store",
602 | "bool_op",
603 | "And",
604 | "Or",
605 | "operator",
606 | "Add",
607 | "Sub",
608 | "Mult",
609 | "Div",
610 | "Mod",
611 | "unary_op",
612 | "Not",
613 | "UAdd",
614 | "USub",
615 | "compare_op",
616 | "Eq",
617 | "NotEq",
618 | "Lt",
619 | "LtE",
620 | "Gt",
621 | "GtE",
622 | "param",
623 | "Param",
624 | "arg",
625 | "Arg",
626 | "case",
627 | "Case",
628 | "cmnt",
629 | "Comment",
630 | ]
631 |
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/asdl/generated/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from .PinescriptASTNode import *
4 |
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/asdl/resource/Pinescript.asdl:
--------------------------------------------------------------------------------
1 | -- Copyright 2024 Yunseong Hwang
2 | --
3 | -- Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | -- you may not use this file except in compliance with the License.
5 | -- You may obtain a copy of the License at
6 | --
7 | -- https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | --
9 | -- Unless required by applicable law or agreed to in writing, software
10 | -- distributed under the License is distributed on an "AS IS" BASIS,
11 | -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | -- See the License for the specific language governing permissions and
13 | -- limitations under the License.
14 | --
15 | -- SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | module Pinescript
18 | {
19 | mod = Script(stmt* body, string* annotations)
20 | | Expression(expr body)
21 |
22 | stmt = FunctionDef(identifier name, param* args, stmt* body, int? method, int? export, string* annotations)
23 | | TypeDef(identifier name, stmt* body, int? export, string* annotations)
24 | | Assign(expr target, expr? value, expr? type, decl_mode? mode, string* annotations)
25 | | ReAssign(expr target, expr value)
26 | | AugAssign(expr target, operator op, expr value)
27 | | Import(identifier namespace, identifier name, int version, identifier? alias)
28 | | Expr(expr value)
29 | | Break
30 | | Continue
31 |
32 | -- col_offset is the byte offset in the utf8 string the parser uses
33 | attributes (int lineno, int col_offset, int? end_lineno, int? end_col_offset)
34 |
35 | expr = BoolOp(bool_op op, expr* values)
36 | | BinOp(expr left, operator op, expr right)
37 | | UnaryOp(unary_op op, expr operand)
38 | | Conditional(expr test, expr body, expr orelse)
39 | | Compare(expr left, compare_op* ops, expr* comparators)
40 | | Call(expr func, arg* args)
41 | | Constant(constant value, string? kind)
42 |
43 | -- the following expression can appear in assignment context
44 | | Attribute(expr value, identifier attr, expr_context ctx)
45 | | Subscript(expr value, expr? slice, expr_context ctx)
46 | | Name(identifier id, expr_context ctx)
47 | | Tuple(expr* elts, expr_context ctx)
48 |
49 | -- the following structure can appear as both statement and expression
50 | | ForTo(expr target, expr start, expr end, stmt* body, expr? step)
51 | | ForIn(expr target, expr iter, stmt* body)
52 | | While(expr test, stmt* body)
53 | | If(expr test, stmt* body, stmt* orelse)
54 | | Switch(case* cases, expr? subject)
55 |
56 | -- can appear only in type qualification and template specialization
57 | | Qualify(type_qual qualifier, expr value)
58 | | Specialize(expr value, expr args)
59 |
60 | -- col_offset is the byte offset in the utf8 string the parser uses
61 | attributes (int lineno, int col_offset, int? end_lineno, int? end_col_offset)
62 |
63 | decl_mode = Var | VarIp
64 |
65 | type_qual = Const | Input | Simple | Series
66 |
67 | expr_context = Load | Store
68 |
69 | bool_op = And | Or
70 |
71 | operator = Add | Sub | Mult | Div | Mod
72 |
73 | unary_op = Not | UAdd | USub
74 |
75 | compare_op = Eq | NotEq | Lt | LtE | Gt | GtE
76 |
77 | param = Param(identifier name, expr? default, expr? type)
78 |
79 | -- col_offset is the byte offset in the utf8 string the parser uses
80 | attributes (int lineno, int col_offset, int? end_lineno, int? end_col_offset)
81 |
82 | arg = Arg(expr value, identifier? name)
83 |
84 | -- col_offset is the byte offset in the utf8 string the parser uses
85 | attributes (int lineno, int col_offset, int? end_lineno, int? end_col_offset)
86 |
87 | case = Case(stmt* body, expr? pattern)
88 |
89 | -- col_offset is the byte offset in the utf8 string the parser uses
90 | attributes (int lineno, int col_offset, int? end_lineno, int? end_col_offset)
91 |
92 | cmnt = Comment(string value, string? kind)
93 | -- col_offset is the byte offset in the utf8 string the parser uses
94 | attributes (int lineno, int col_offset, int? end_lineno, int? end_col_offset)
95 | }
96 |
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/asdl/tool/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/asdl/tool/asdlgen.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | import ast
20 | import functools
21 | import itertools
22 |
23 | from argparse import ArgumentParser
24 | from pathlib import Path
25 |
26 | import pyasdl
27 |
28 |
29 | class PythonGenerator(pyasdl.ASDLVisitor):
30 | # ruff: noqa: N802
31 |
32 | def __init__(self, *, defaults: str = "all"):
33 | self._defaults = defaults
34 | self._base = ast.Name("AST", ast.Load())
35 | self._dataclasses = ast.Name("_dataclasses", ast.Load())
36 | self._dataclass = ast.Attribute(self._dataclasses, "dataclass", ast.Load())
37 | self._field = ast.Attribute(self._dataclasses, "field", ast.Load())
38 | self._typing = ast.Name("_typing", ast.Load())
39 | self._classvar = ast.Attribute(self._typing, "ClassVar", ast.Load())
40 | self._builtins = ast.Name("_builtins", ast.Load())
41 | self._object = ast.Attribute(self._builtins, "object", ast.Load())
42 | self._hash = ast.Attribute(self._object, "__hash__", ast.Load())
43 | self._assign_hash = ast.Assign([ast.Name("__hash__", ast.Store())], self._hash)
44 | self._fields = ast.Name("_fields", ast.Store())
45 | self._attributes = ast.Name("_attributes", ast.Store())
46 | self._list = ast.Attribute(self._builtins, "list", ast.Load())
47 | self._str = ast.Attribute(self._builtins, "str", ast.Load())
48 | self._classvar_list_str = ast.Subscript(
49 | self._classvar,
50 | ast.Subscript(
51 | self._list,
52 | self._str,
53 | ast.Load(),
54 | ),
55 | ast.Load(),
56 | )
57 | self._attribute_keywords = [
58 | ast.keyword("repr", ast.Constant(value=False)),
59 | ast.keyword("compare", ast.Constant(value=False)),
60 | ast.keyword("kw_only", ast.Constant(value=True)),
61 | ]
62 |
63 | def _generate_internals(self) -> list[ast.ImportFrom | ast.Import]:
64 | """
65 | from __future__ import annotations
66 |
67 | import builtins as _builtins
68 | import dataclasses as _dataclasses
69 | import typing as _typing
70 | """
71 | return [
72 | ast.ImportFrom("__future__", [ast.alias("annotations")]),
73 | ast.Import([ast.alias("builtins", self._builtins.id)]),
74 | ast.Import([ast.alias("dataclasses", self._dataclasses.id)]),
75 | ast.Import([ast.alias("typing", self._typing.id)]),
76 | ]
77 |
78 | def _generate_builtin_types(self) -> list[ast.Assign]:
79 | """
80 | identifier = str
81 | int = int
82 | string = str | bytes
83 | constant = str | bytes | int | float | complex | bool | tuple | frozenset | type(None) | type(Ellipsis)
84 | """
85 | return [
86 | ast.Assign([ast.Name("identifier", ast.Store())], ast.Name("str", ast.Load())),
87 | ast.Assign([ast.Name("int", ast.Store())], ast.Name("int", ast.Load())),
88 | ast.Assign(
89 | [ast.Name("string", ast.Store())],
90 | ast.BinOp(ast.Name("str", ast.Load()), ast.BitOr(), ast.Name("bytes", ast.Load())),
91 | ),
92 | ast.Assign(
93 | [ast.Name("constant", ast.Store())],
94 | functools.reduce(
95 | lambda left, right: ast.BinOp(left, ast.BitOr(), right),
96 | [
97 | ast.Name("str", ast.Load()),
98 | ast.Name("bytes", ast.Load()),
99 | ast.Name("int", ast.Load()),
100 | ast.Name("float", ast.Load()),
101 | ast.Name("complex", ast.Load()),
102 | ast.Name("bool", ast.Load()),
103 | ast.Name("tuple", ast.Load()),
104 | ast.Name("frozenset", ast.Load()),
105 | ast.Constant(None),
106 | ast.Call(ast.Name("type", ast.Load()), [ast.Constant(Ellipsis)], []),
107 | ],
108 | ),
109 | ),
110 | ]
111 |
112 | def _generate_base(self) -> ast.ClassDef:
113 | """
114 | class AST:
115 | _fields: ClassVar[list[str]] = []
116 | _attributes: ClassVar[list[str]] = []
117 | """
118 | return ast.ClassDef(
119 | name=self._base.id,
120 | body=[
121 | ast.AnnAssign(self._fields, self._classvar_list_str, ast.List([], ast.Load()), simple=1),
122 | ast.AnnAssign(self._attributes, self._classvar_list_str, ast.List([], ast.Load()), simple=1),
123 | ],
124 | bases=[],
125 | keywords=[],
126 | decorator_list=[],
127 | )
128 |
129 | def _generate_exports(self, names: list[str]) -> ast.Assign:
130 | """
131 | ___all___ = [...]
132 | """
133 | exports = ast.Assign(
134 | [ast.Name("__all__", ast.Store())],
135 | ast.List(
136 | [ast.Constant(name) for name in names],
137 | ast.Load(),
138 | ),
139 | )
140 | return exports
141 |
142 | def _fix_attributes(self, attributes: list[ast.AnnAssign]):
143 | for annassign in attributes:
144 | if annassign.value is None:
145 | annassign.value = ast.Call(self._field, [], [])
146 | annassign.value.keywords.extend(self._attribute_keywords)
147 | return attributes
148 |
149 | def _assign_attributes(self, attributes: list[ast.AnnAssign]):
150 | target = self._attributes
151 | annotation = self._classvar_list_str
152 | default = ast.List([ast.Constant(attr.target.id) for attr in attributes], ast.Load())
153 | assign = ast.AnnAssign(target, annotation, default, simple=1)
154 | return assign
155 |
156 | def _assign_fields(self, fields: list[ast.AnnAssign]):
157 | target = self._fields
158 | annotation = self._classvar_list_str
159 | default = ast.List([ast.Constant(field.target.id) for field in fields], ast.Load())
160 | assign = ast.AnnAssign(target, annotation, default, simple=1)
161 | return assign
162 |
163 | def visit_Module(self, node: pyasdl.Module) -> ast.Module:
164 | internals = self._generate_internals()
165 | builtins = self._generate_builtin_types()
166 | base = self._generate_base()
167 | definitions = list(itertools.chain.from_iterable(self.visit_all(node.body)))
168 | names = [t.id for b in builtins for t in b.targets] + [self._base.id] + [d.name for d in definitions]
169 | exports = self._generate_exports(names)
170 | body = internals + builtins + [base] + definitions + [exports]
171 | module = ast.Module(body, type_ignores=[])
172 | return module
173 |
174 | def visit_Type(self, node: pyasdl.Type) -> list[ast.ClassDef]:
175 | attributes = self.visit_all(node.value.attributes)
176 | self._fix_attributes(attributes)
177 | definitions = self.visit(
178 | node.value,
179 | name=node.name,
180 | attributes=attributes,
181 | )
182 | if not isinstance(definitions, list):
183 | definitions = [definitions]
184 | return definitions
185 |
186 | def visit_Sum(self, node: pyasdl.Sum, name: str, attributes: list[ast.AnnAssign]) -> list[ast.ClassDef]:
187 | body = []
188 | if attributes:
189 | body.extend(attributes)
190 | body.append(self._assign_attributes(attributes))
191 | body.append(self._assign_hash)
192 | cls = ast.ClassDef(
193 | name=name,
194 | body=body,
195 | bases=[self._base],
196 | keywords=[],
197 | decorator_list=[self._dataclass],
198 | )
199 | definitions = []
200 | definitions.append(cls)
201 | definitions.extend(self.visit_all(node.types, base=name))
202 | return definitions
203 |
204 | def visit_Constructor(self, node: pyasdl.Constructor, base: str) -> ast.ClassDef:
205 | fields = self.visit_all(node.fields)
206 | name = node.name
207 | body = []
208 | base = ast.Name(base, ast.Load())
209 | if fields:
210 | body.extend(fields)
211 | body.append(self._assign_fields(fields))
212 | body.append(self._assign_hash)
213 | cls = ast.ClassDef(
214 | name=name,
215 | body=body,
216 | bases=[base],
217 | keywords=[],
218 | decorator_list=[self._dataclass],
219 | )
220 | return cls
221 |
222 | def visit_Product(self, node: pyasdl.Product, name: str, attributes: list[ast.AnnAssign]) -> ast.ClassDef:
223 | fields = self.visit_all(node.fields)
224 | body = fields + attributes
225 | if fields:
226 | body.append(self._assign_fields(fields))
227 | if attributes:
228 | body.append(self._assign_attributes(attributes))
229 | body.append(self._assign_hash)
230 | cls = ast.ClassDef(
231 | name=name,
232 | body=body,
233 | bases=[self._base],
234 | keywords=[],
235 | decorator_list=[self._dataclass],
236 | )
237 | return cls
238 |
239 | def visit_Field(self, node: pyasdl.Field) -> ast.AnnAssign:
240 | target = ast.Name(node.name, ast.Store())
241 | annotation = ast.Name(node.kind, ast.Load())
242 | default = None
243 | if self._defaults == "all":
244 | default = ast.Call(self._field, [], [ast.keyword("default", ast.Constant(None))])
245 | if node.qualifier is not None:
246 | if node.qualifier is pyasdl.FieldQualifier.SEQUENCE:
247 | annotation = ast.Subscript(self._list, annotation, ast.Load())
248 | default = ast.Call(self._field, [], [ast.keyword("default_factory", self._list)])
249 | elif node.qualifier is pyasdl.FieldQualifier.OPTIONAL:
250 | annotation = ast.BinOp(annotation, ast.BitOr(), ast.Constant(None))
251 | default = ast.Call(self._field, [], [ast.keyword("default", ast.Constant(None))])
252 | else:
253 | msg = f"Unexpected field qualifier: {node.qualifier}"
254 | raise ValueError(msg)
255 | if self._defaults == "none":
256 | default = None
257 | return ast.AnnAssign(target, annotation, default, simple=1)
258 |
259 | def generate(self, tree: pyasdl.Module) -> ast.Module:
260 | return self.visit_Module(tree)
261 |
262 |
263 | def main():
264 | parser = ArgumentParser()
265 | parser.add_argument("file", type=Path)
266 | parser.add_argument("-o", "--out", default=1)
267 | options = parser.parse_args()
268 |
269 | with open(options.file, encoding="utf-8") as stream:
270 | tree = pyasdl.parse(stream.read())
271 |
272 | generator = PythonGenerator()
273 | stub = generator.generate(tree)
274 |
275 | with open(options.out, "w", encoding="utf-8") as stream:
276 | stream.write(ast.unparse(ast.fix_missing_locations(stub)))
277 |
278 |
279 | if __name__ == "__main__":
280 | main()
281 |
--------------------------------------------------------------------------------
/src/pynescript/ast/grammar/asdl/tool/generate.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | import shutil
20 | import subprocess
21 | import sys
22 |
23 | from pathlib import Path
24 |
25 |
26 | def main():
27 | script_directory_path = Path(__file__).parent
28 |
29 | asdl_generate_script_path = script_directory_path / "asdlgen.py"
30 |
31 | asdl_source_directory_path = script_directory_path / ".." / "resource"
32 | asdl_source_path = asdl_source_directory_path / "Pinescript.asdl"
33 | asdl_output_directory_path = script_directory_path / ".." / "generated"
34 | asdl_output_path = asdl_output_directory_path / "PinescriptASTNode.py"
35 |
36 | generate_ast_nodes_command = [
37 | sys.executable,
38 | str(asdl_generate_script_path),
39 | str(asdl_source_path),
40 | "-o",
41 | str(asdl_output_path),
42 | ]
43 |
44 | subprocess.check_call(generate_ast_nodes_command) # noqa: S603
45 |
46 | ruff = shutil.which("ruff")
47 |
48 | if ruff:
49 | format_ast_nodes_command = [
50 | ruff,
51 | "format",
52 | "--silent",
53 | str(asdl_output_path),
54 | ]
55 |
56 | subprocess.call(format_ast_nodes_command) # noqa: S603
57 |
58 | for filename in asdl_source_directory_path.glob("*.py"):
59 | shutil.copy(filename, asdl_output_directory_path)
60 |
61 |
62 | if __name__ == "__main__":
63 | main()
64 |
--------------------------------------------------------------------------------
/src/pynescript/ast/helper.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | import itertools
20 | import re
21 |
22 | from collections import deque
23 | from collections.abc import Iterator
24 | from pathlib import Path
25 | from typing import Any
26 |
27 | from antlr4 import CommonTokenStream
28 | from antlr4 import FileStream
29 | from antlr4 import InputStream
30 |
31 | from pynescript.ast import node as ast
32 | from pynescript.ast.builder import PinescriptASTBuilder
33 | from pynescript.ast.grammar.antlr4.error_listener import PinescriptErrorListener
34 | from pynescript.ast.grammar.antlr4.lexer import PinescriptLexer
35 | from pynescript.ast.grammar.antlr4.parser import PinescriptParser
36 | from pynescript.ast.node import AST
37 | from pynescript.ast.node import Expression
38 | from pynescript.util.itertools import grouper
39 |
40 |
41 | def _add_annotations(script, statements, comments):
42 | comments_and_statements = itertools.chain(comments, statements)
43 | comments_and_statements = sorted(comments_and_statements, key=lambda item: (item.lineno, item.col_offset))
44 |
45 | comments_and_statements = itertools.groupby(comments_and_statements, lambda item: isinstance(item, ast.Comment))
46 | comments_and_statements = [(k, list(g)) for k, g in comments_and_statements]
47 |
48 | if not comments_and_statements[0][0]:
49 | comments_and_statements.insert(0, (True, []))
50 |
51 | grouped_annotations_and_statements = [
52 | [c for c in group if c.kind.startswith("@")] if comment else group[0]
53 | for comment, group in comments_and_statements
54 | ]
55 |
56 | annotations = [c.value for c in grouped_annotations_and_statements[0] if c.kind.endswith("S")]
57 |
58 | if annotations:
59 | script.annotations = annotations
60 |
61 | grouped_annotations_and_statement_pairs = grouper(grouped_annotations_and_statements, n=2, incomplete="ignore")
62 |
63 | for comments, statement in grouped_annotations_and_statement_pairs:
64 | if isinstance(statement, ast.FunctionDef):
65 | annotations = [c.value for c in comments if c.kind.endswith("F")]
66 | if annotations:
67 | statement.annotations = annotations
68 | elif isinstance(statement, ast.TypeDef):
69 | annotations = [c.value for c in comments if c.kind.endswith("T")]
70 | if annotations:
71 | statement.annotations = annotations
72 | elif isinstance(statement, ast.Assign):
73 | annotations = [c.value for c in comments if c.kind.endswith("V")]
74 | if annotations:
75 | statement.annotations = annotations
76 |
77 |
78 | def _parse(
79 | stream: InputStream,
80 | mode: str = "exec",
81 | ) -> AST:
82 | if mode not in {"exec", "eval"}:
83 | msg = f"invalid argument mode: {mode}"
84 | raise ValueError(msg)
85 |
86 | lexer = PinescriptLexer(stream)
87 | token_stream = CommonTokenStream(lexer)
88 | parser = PinescriptParser(token_stream)
89 | error_listener = PinescriptErrorListener.INSTANCE
90 |
91 | lexer.removeErrorListeners()
92 | parser.removeErrorListeners()
93 | lexer.addErrorListener(error_listener)
94 | parser.addErrorListener(error_listener)
95 |
96 | rule = {
97 | "exec": parser.start_script,
98 | "eval": parser.start_expression,
99 | }[mode]()
100 |
101 | builder = PinescriptASTBuilder()
102 | node = builder.visit(rule)
103 |
104 | if mode == "exec":
105 | from pynescript.ast.collector import StatementCollector
106 |
107 | statement_collector = StatementCollector()
108 |
109 | statements = statement_collector.visit(node)
110 | statements = list(statements)
111 |
112 | if not statements:
113 | return node
114 |
115 | parser.getTokenStream().channel = PinescriptLexer.COMMENT_CHANNEL
116 | parser.reset()
117 |
118 | comments = builder.visit(parser.start_comments())
119 |
120 | if not comments:
121 | return node
122 |
123 | _add_annotations(node, statements, comments)
124 |
125 | return node
126 |
127 |
128 | def _get_absolute_path(filename: str) -> str:
129 | if filename in {""}:
130 | return filename
131 | filename_path = Path(filename)
132 | if not filename_path.exists():
133 | return filename
134 | filename = str(filename_path.absolute())
135 | return filename
136 |
137 |
138 | def _parse_inputstream(
139 | source: str,
140 | filename: str = "",
141 | mode: str = "exec",
142 | ) -> AST:
143 | filename = _get_absolute_path(filename)
144 | stream = InputStream(source)
145 | stream.name = filename
146 | return _parse(stream, mode)
147 |
148 |
149 | def _parse_filestream(
150 | filename: str,
151 | encoding: str = "utf-8",
152 | mode: str = "exec",
153 | ) -> AST:
154 | filename = _get_absolute_path(filename)
155 | stream = FileStream(filename, encoding=encoding)
156 | return _parse(stream, mode)
157 |
158 |
159 | def parse(
160 | source: str,
161 | filename: str = "",
162 | mode: str = "exec",
163 | ) -> AST:
164 | return _parse_inputstream(source, filename, mode)
165 |
166 |
167 | def literal_eval(node_or_string: AST | str):
168 | if isinstance(node_or_string, str):
169 | node_or_string = parse(node_or_string.lstrip(" \t"), mode="eval")
170 | if isinstance(node_or_string, Expression):
171 | node_or_string = node_or_string.body
172 |
173 | from pynescript.ast.evaluator import NodeLiteralEvaluator
174 |
175 | evaluator = NodeLiteralEvaluator()
176 | return evaluator.visit(node_or_string)
177 |
178 |
179 | def dump( # noqa: C901
180 | node: AST,
181 | *,
182 | annotate_fields: bool = True,
183 | include_attributes: bool = False,
184 | indent: int | None = None,
185 | ) -> str:
186 | def _format(node, level=0): # noqa: C901, PLR0912
187 | if indent is not None:
188 | level += 1
189 | prefix = "\n" + indent * level
190 | sep = ",\n" + indent * level
191 | else:
192 | prefix = ""
193 | sep = ", "
194 | if isinstance(node, AST):
195 | cls = type(node)
196 | args = []
197 | allsimple = True
198 | keywords = annotate_fields
199 | for name in node._fields:
200 | try:
201 | value = getattr(node, name)
202 | except AttributeError:
203 | keywords = True
204 | continue
205 | if value is None and getattr(cls, name, ...) is None:
206 | keywords = True
207 | continue
208 | value, simple = _format(value, level)
209 | allsimple = allsimple and simple
210 | if keywords:
211 | args.append(f"{name}={value}")
212 | else:
213 | args.append(value)
214 | if include_attributes and node._attributes:
215 | for name in node._attributes:
216 | try:
217 | value = getattr(node, name)
218 | except AttributeError:
219 | continue
220 | if value is None and getattr(cls, name, ...) is None:
221 | continue
222 | value, simple = _format(value, level)
223 | allsimple = allsimple and simple
224 | args.append(f"{name}={value}")
225 | if allsimple and len(args) <= 3: # noqa: PLR2004
226 | return "{}({})".format(node.__class__.__name__, ", ".join(args)), not args
227 | return f"{node.__class__.__name__}({prefix}{sep.join(args)})", False
228 | elif isinstance(node, list):
229 | if not node:
230 | return "[]", True
231 | return f"[{prefix}{sep.join(_format(x, level)[0] for x in node)}]", False
232 | return repr(node), True
233 |
234 | if not isinstance(node, AST):
235 | raise TypeError("expected AST, got %r" % node.__class__.__name__)
236 |
237 | if indent is not None and not isinstance(indent, str):
238 | indent = " " * indent
239 |
240 | return _format(node)[0]
241 |
242 |
243 | def copy_location(new_node: AST, old_node: AST) -> AST:
244 | for attr in "lineno", "col_offset", "end_lineno", "end_col_offset":
245 | if attr in old_node._attributes and attr in new_node._attributes:
246 | value = getattr(old_node, attr, None)
247 | if value is not None or (hasattr(old_node, attr) and attr.startswith("end_")):
248 | setattr(new_node, attr, value)
249 | return new_node
250 |
251 |
252 | def iter_fields(node: AST) -> Iterator[tuple[str, Any]]:
253 | for field in node._fields:
254 | try:
255 | yield field, getattr(node, field)
256 | except AttributeError:
257 | pass
258 |
259 |
260 | def iter_child_nodes(node: AST) -> Iterator[AST]:
261 | for _name, field in iter_fields(node):
262 | if isinstance(field, AST):
263 | yield field
264 | elif isinstance(field, list):
265 | for item in field:
266 | if isinstance(item, AST):
267 | yield item
268 |
269 |
270 | def _fix_locations( # noqa: PLR0912
271 | node: AST,
272 | lineno: int,
273 | col_offset: int,
274 | end_lineno: int,
275 | end_col_offset: int,
276 | ) -> None:
277 | if "lineno" in node._attributes:
278 | if not hasattr(node, "lineno"):
279 | node.lineno = lineno
280 | else:
281 | lineno = node.lineno
282 | if "end_lineno" in node._attributes:
283 | if getattr(node, "end_lineno", None) is None:
284 | node.end_lineno = end_lineno
285 | else:
286 | end_lineno = node.end_lineno
287 | if "col_offset" in node._attributes:
288 | if not hasattr(node, "col_offset"):
289 | node.col_offset = col_offset
290 | else:
291 | col_offset = node.col_offset
292 | if "end_col_offset" in node._attributes:
293 | if getattr(node, "end_col_offset", None) is None:
294 | node.end_col_offset = end_col_offset
295 | else:
296 | end_col_offset = node.end_col_offset
297 |
298 | for child in iter_child_nodes(node):
299 | _fix_locations(child, lineno, col_offset, end_lineno, end_col_offset)
300 |
301 |
302 | def fix_missing_locations(node: AST) -> AST:
303 | _fix_locations(node, 1, 0, 1, 0)
304 | return node
305 |
306 |
307 | def increment_lineno(node: AST, n: int = 1) -> AST:
308 | for child in walk(node):
309 | if "lineno" in child._attributes:
310 | child.lineno = getattr(child, "lineno", 0) + n
311 | if "end_lineno" in child._attributes and (end_lineno := getattr(child, "end_lineno", 0)) is not None:
312 | child.end_lineno = end_lineno + n
313 | return node
314 |
315 |
316 | _line_pattern = re.compile(r"(.*?(?:\r\n|\n|\r|$))")
317 |
318 |
319 | def _splitlines_no_ff(source: str, maxlines: int | None = None) -> list[str]:
320 | lines = []
321 | for lineno, match in enumerate(_line_pattern.finditer(source), 1):
322 | if maxlines is not None and lineno > maxlines:
323 | break
324 | lines.append(match[0])
325 | return lines
326 |
327 |
328 | def _pad_whitespace(source: str) -> str:
329 | result = ""
330 | for c in source:
331 | if c in "\f\t":
332 | result += c
333 | else:
334 | result += " "
335 | return result
336 |
337 |
338 | def get_source_segment(source: str, node: AST, *, padded: bool = False) -> str:
339 | try:
340 | if node.end_lineno is None or node.end_col_offset is None:
341 | return None
342 | lineno = node.lineno - 1
343 | end_lineno = node.end_lineno - 1
344 | col_offset = node.col_offset
345 | end_col_offset = node.end_col_offset
346 | except AttributeError:
347 | return None
348 |
349 | lines = _splitlines_no_ff(source, maxlines=end_lineno + 1)
350 | if end_lineno == lineno:
351 | return lines[lineno].encode()[col_offset:end_col_offset].decode()
352 |
353 | if padded:
354 | padding = _pad_whitespace(lines[lineno].encode()[:col_offset].decode())
355 | else:
356 | padding = ""
357 |
358 | first = padding + lines[lineno].encode()[col_offset:].decode()
359 | last = lines[end_lineno].encode()[:end_col_offset].decode()
360 | lines = lines[lineno + 1 : end_lineno]
361 |
362 | lines.insert(0, first)
363 | lines.append(last)
364 | return "".join(lines)
365 |
366 |
367 | def walk(node: AST) -> Iterator[AST]:
368 | todo = deque([node])
369 | while todo:
370 | node = todo.popleft()
371 | todo.extend(iter_child_nodes(node))
372 | yield node
373 |
374 |
375 | def unparse(node: AST):
376 | from pynescript.ast.unparser import NodeUnparser
377 |
378 | unparser = NodeUnparser()
379 | return unparser.visit(node)
380 |
381 |
382 | __all__ = [
383 | "parse",
384 | "literal_eval",
385 | "dump",
386 | "iter_fields",
387 | "iter_child_nodes",
388 | "copy_location",
389 | "fix_missing_locations",
390 | "increment_lineno",
391 | "get_source_segment",
392 | "walk",
393 | "unparse",
394 | ]
395 |
--------------------------------------------------------------------------------
/src/pynescript/ast/node.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | from .grammar.asdl.generated import * # noqa: F403
20 |
--------------------------------------------------------------------------------
/src/pynescript/ast/transformer.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | from pynescript.ast.helper import iter_fields
20 | from pynescript.ast.node import AST
21 | from pynescript.ast.visitor import NodeVisitor
22 |
23 |
24 | class NodeTransformer(NodeVisitor):
25 | def generic_visit(self, node: AST):
26 | for field, old_value in iter_fields(node):
27 | if isinstance(old_value, list):
28 | new_values = []
29 | for value in old_value:
30 | if isinstance(value, AST):
31 | value = self.visit(value) # noqa: PLW2901
32 | if value is None:
33 | continue
34 | elif not isinstance(value, AST):
35 | new_values.extend(value)
36 | continue
37 | new_values.append(value)
38 | old_value[:] = new_values
39 | elif isinstance(old_value, AST):
40 | new_node = self.visit(old_value)
41 | if new_node is None:
42 | delattr(node, field)
43 | else:
44 | setattr(node, field, new_node)
45 | return node
46 |
--------------------------------------------------------------------------------
/src/pynescript/ast/unparser.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | import json
20 |
21 | from contextlib import contextmanager
22 | from contextlib import nullcontext
23 | from enum import IntEnum
24 | from enum import auto
25 | from typing import ClassVar
26 |
27 | from pynescript.ast import node as ast
28 | from pynescript.ast.visitor import NodeVisitor
29 |
30 |
31 | class Precedence(IntEnum):
32 | TEST = auto() # '?', ':'
33 | OR = auto() # 'or'
34 | AND = auto() # 'and'
35 | EQ = auto() # '==', '!='
36 | INEQ = auto() # '>', '<', '>=', '<='
37 | CMP = INEQ
38 | EXPR = auto()
39 | ARITH = auto() # '+', '-'
40 | TERM = auto() # '*', '/', '%'
41 | FACTOR = auto() # unary '+', unary '-', 'not'
42 | NOT = FACTOR
43 | ATOM = auto()
44 |
45 | def next(self): # noqa: A003
46 | try:
47 | return self.__class__(self + 1)
48 | except ValueError:
49 | return self
50 |
51 |
52 | class NodeUnparser(NodeVisitor):
53 | # ruff: noqa: N802, ARG002
54 |
55 | def __init__(self):
56 | self._source = []
57 | self._precedences = {}
58 | self._indent = 0
59 |
60 | def interleave(self, inter, f, seq):
61 | seq = iter(seq)
62 | try:
63 | f(next(seq))
64 | except StopIteration:
65 | pass
66 | else:
67 | for x in seq:
68 | inter()
69 | f(x)
70 |
71 | def items_view(self, traverser, items, *, single: bool = False):
72 | if len(items) == 1:
73 | traverser(items[0])
74 | if single:
75 | self.write(",")
76 | else:
77 | self.interleave(lambda: self.write(", "), traverser, items)
78 |
79 | def maybe_newline(self):
80 | if self._source:
81 | self.write("\n")
82 |
83 | def fill(self, text=""):
84 | self.maybe_newline()
85 | self.write(" " * self._indent + text)
86 |
87 | def write(self, *text):
88 | self._source.extend(text)
89 |
90 | @contextmanager
91 | def buffered(self, buffer=None):
92 | if buffer is None:
93 | buffer = []
94 | original_source = self._source
95 | self._source = buffer
96 | yield buffer
97 | self._source = original_source
98 |
99 | @contextmanager
100 | def block(self, *, extra=None):
101 | if extra:
102 | self.write(extra)
103 | self._indent += 1
104 | yield
105 | self._indent -= 1
106 |
107 | @contextmanager
108 | def delimit(self, start, end):
109 | self.write(start)
110 | yield
111 | self.write(end)
112 |
113 | def delimit_if(self, start, end, condition):
114 | if condition:
115 | return self.delimit(start, end)
116 | else:
117 | return nullcontext()
118 |
119 | def require_parens(self, precedence, node):
120 | return self.delimit_if("(", ")", self.get_precedence(node) > precedence)
121 |
122 | def get_precedence(self, node):
123 | return self._precedences.get(node, Precedence.TEST)
124 |
125 | def set_precedence(self, precedence, *nodes):
126 | for node in nodes:
127 | self._precedences[node] = precedence
128 |
129 | def traverse(self, node):
130 | if isinstance(node, list):
131 | for item in node:
132 | self.traverse(item)
133 | else:
134 | super().visit(node)
135 |
136 | def visit(self, node):
137 | self._source = []
138 | self.traverse(node)
139 | return "".join(self._source)
140 |
141 | def visit_Script(self, node: ast.Script):
142 | if node.annotations:
143 | for annotation in node.annotations:
144 | self.fill(annotation)
145 | self.traverse(node.body)
146 |
147 | def visit_Expression(self, node: ast.Expression):
148 | self.traverse(node.body)
149 |
150 | def visit_FunctionDef(self, node: ast.FunctionDef):
151 | self.fill()
152 | if node.annotations:
153 | for annotation in node.annotations:
154 | self.fill(annotation)
155 | self.fill()
156 | if node.export:
157 | self.write("export ")
158 | if node.method:
159 | self.write("method ")
160 | self.write(node.name)
161 | with self.delimit("(", ")"):
162 | if node.args:
163 | self.items_view(self.traverse, node.args)
164 | self.write(" => ")
165 | if len(node.body) == 1 and isinstance(node.body[0], ast.Expr):
166 | self.traverse(node.body[0].value)
167 | else:
168 | with self.block():
169 | self.traverse(node.body)
170 |
171 | def visit_TypeDef(self, node: ast.TypeDef):
172 | self.fill()
173 | if node.annotations:
174 | for annotation in node.annotations:
175 | self.fill(annotation)
176 | self.fill()
177 | if node.export:
178 | self.write("export ")
179 | self.write("type ")
180 | self.write(node.name)
181 | with self.block():
182 | self.traverse(node.body)
183 |
184 | def visit_Assign(self, node: ast.Assign):
185 | self.fill()
186 | if node.annotations:
187 | for annotation in node.annotations:
188 | self.fill(annotation)
189 | self.fill()
190 | if node.mode:
191 | self.traverse(node.mode)
192 | self.write(" ")
193 | if node.type:
194 | self.traverse(node.type)
195 | self.write(" ")
196 | self.traverse(node.target)
197 | if node.value:
198 | self.write(" = ")
199 | self.traverse(node.value)
200 |
201 | def visit_ReAssign(self, node: ast.ReAssign):
202 | self.fill()
203 | self.traverse(node.target)
204 | self.write(" := ")
205 | self.traverse(node.value)
206 |
207 | def visit_AugAssign(self, node: ast.AugAssign):
208 | self.fill()
209 | self.traverse(node.target)
210 | self.write(" ")
211 | self.traverse(node.op)
212 | self.write("= ")
213 | self.traverse(node.value)
214 |
215 | def visit_ForTo(self, node: ast.ForTo):
216 | self.write("for ")
217 | self.traverse(node.target)
218 | self.write(" = ")
219 | self.traverse(node.start)
220 | self.write(" to ")
221 | self.traverse(node.end)
222 | if node.step:
223 | self.write(" by ")
224 | self.traverse(node.step)
225 | with self.block():
226 | self.traverse(node.body)
227 |
228 | def visit_ForIn(self, node: ast.ForIn):
229 | self.write("for ")
230 | self.traverse(node.target)
231 | self.write(" in ")
232 | self.traverse(node.iter)
233 | with self.block():
234 | self.traverse(node.body)
235 |
236 | def visit_While(self, node: ast.While):
237 | self.write("while ")
238 | self.traverse(node.test)
239 | with self.block():
240 | self.traverse(node.body)
241 |
242 | def visit_If(self, node: ast.If):
243 | self.write("if ")
244 | self.traverse(node.test)
245 | with self.block():
246 | self.traverse(node.body)
247 | while (
248 | node.orelse
249 | and len(node.orelse) == 1
250 | and isinstance(node.orelse[0], ast.Expr)
251 | and isinstance(node.orelse[0].value, ast.If)
252 | ):
253 | node = node.orelse[0].value
254 | self.fill("else if ")
255 | self.traverse(node.test)
256 | with self.block():
257 | self.traverse(node.body)
258 | if node.orelse:
259 | self.fill("else")
260 | with self.block():
261 | self.traverse(node.orelse)
262 |
263 | def visit_Switch(self, node: ast.Switch):
264 | self.write("switch")
265 | if node.subject:
266 | self.write(" ")
267 | self.traverse(node.subject)
268 | with self.block():
269 | self.traverse(node.cases)
270 |
271 | def visit_Import(self, node: ast.Import):
272 | self.fill()
273 | self.write("import ")
274 | self.write(node.namespace)
275 | self.write("/")
276 | self.write(node.name)
277 | self.write("/")
278 | self.write(str(node.version))
279 | if node.alias:
280 | self.write(" as ")
281 | self.write(node.alias)
282 |
283 | def visit_Expr(self, node: ast.Expr):
284 | self.fill()
285 | self.traverse(node.value)
286 |
287 | def visit_Break(self, node: ast.Break):
288 | self.fill("break")
289 |
290 | def visit_Continue(self, node: ast.Continue):
291 | self.fill("continue")
292 |
293 | boolops: ClassVar = {
294 | "And": "and",
295 | "Or": "or",
296 | }
297 |
298 | boolop_precedence: ClassVar = {
299 | "and": Precedence.AND,
300 | "or": Precedence.OR,
301 | }
302 |
303 | def visit_BoolOp(self, node: ast.BoolOp):
304 | operator = self.boolops[node.op.__class__.__name__]
305 | operator_precedence = self.boolop_precedence[operator]
306 |
307 | def increasing_level_traverse(node):
308 | nonlocal operator_precedence
309 | operator_precedence = operator_precedence.next()
310 | self.set_precedence(operator_precedence, node)
311 | self.traverse(node)
312 |
313 | with self.require_parens(operator_precedence, node):
314 | s = f" {operator} "
315 | self.interleave(lambda: self.write(s), increasing_level_traverse, node.values)
316 |
317 | binop: ClassVar = {
318 | "Add": "+",
319 | "Sub": "-",
320 | "Mult": "*",
321 | "Div": "/",
322 | "Mod": "%",
323 | }
324 |
325 | binop_precedence: ClassVar = {
326 | "+": Precedence.ARITH,
327 | "-": Precedence.ARITH,
328 | "*": Precedence.TERM,
329 | "/": Precedence.TERM,
330 | "%": Precedence.TERM,
331 | }
332 |
333 | def visit_BinOp(self, node: ast.BinOp):
334 | operator = self.binop[node.op.__class__.__name__]
335 | operator_precedence = self.binop_precedence[operator]
336 | with self.require_parens(operator_precedence, node):
337 | left_precedence = operator_precedence
338 | right_precedence = operator_precedence.next()
339 | self.set_precedence(left_precedence, node.left)
340 | self.traverse(node.left)
341 | self.write(f" {operator} ")
342 | self.set_precedence(right_precedence, node.right)
343 | self.traverse(node.right)
344 |
345 | unop: ClassVar = {
346 | "Not": "not",
347 | "UAdd": "+",
348 | "USub": "-",
349 | }
350 |
351 | unop_precedence: ClassVar = {
352 | "not": Precedence.NOT,
353 | "+": Precedence.FACTOR,
354 | "-": Precedence.FACTOR,
355 | }
356 |
357 | def visit_UnaryOp(self, node: ast.UnaryOp):
358 | operator = self.unop[node.op.__class__.__name__]
359 | operator_precedence = self.unop_precedence[operator]
360 | with self.require_parens(operator_precedence, node):
361 | self.write(operator)
362 | if isinstance(node.op, ast.Not):
363 | self.write(" ")
364 | self.set_precedence(operator_precedence, node.operand)
365 | self.traverse(node.operand)
366 |
367 | def visit_Conditional(self, node: ast.Conditional):
368 | with self.require_parens(Precedence.TEST, node):
369 | self.set_precedence(Precedence.TEST.next(), node.test, node.body)
370 | self.traverse(node.test)
371 | self.write(" ? ")
372 | self.traverse(node.body)
373 | self.write(" : ")
374 | self.set_precedence(Precedence.TEST, node.orelse)
375 | self.traverse(node.orelse)
376 |
377 | cmpops: ClassVar = {
378 | "Eq": "==",
379 | "NotEq": "!=",
380 | "Lt": "<",
381 | "LtE": "<=",
382 | "Gt": ">",
383 | "GtE": ">=",
384 | }
385 |
386 | cmpop_precedence: ClassVar = {
387 | "==": Precedence.EQ,
388 | "!=": Precedence.EQ,
389 | "<": Precedence.INEQ,
390 | "<=": Precedence.INEQ,
391 | ">": Precedence.INEQ,
392 | ">=": Precedence.INEQ,
393 | }
394 |
395 | def visit_Compare(self, node: ast.Compare):
396 | with self.require_parens(Precedence.CMP, node):
397 | self.set_precedence(Precedence.CMP.next(), node.left, *node.comparators)
398 | self.traverse(node.left)
399 | for o, e in zip(node.ops, node.comparators, strict=True):
400 | operator = self.cmpops[o.__class__.__name__]
401 | self.write(f" {operator} ")
402 | self.traverse(e)
403 |
404 | def visit_Call(self, node: ast.Call):
405 | self.set_precedence(Precedence.ATOM, node.func)
406 | self.traverse(node.func)
407 | with self.delimit("(", ")"):
408 | if node.args:
409 | self.items_view(self.traverse, node.args)
410 |
411 | def visit_Constant(self, node: ast.Constant):
412 | if node.kind:
413 | self.write(node.value)
414 | elif isinstance(node.value, bool):
415 | if node.value:
416 | self.write("true")
417 | else:
418 | self.write("false")
419 | elif isinstance(node.value, str):
420 | if '"' in node.value and "'" not in node.value:
421 | self.write(repr(node.value))
422 | else:
423 | self.write(json.dumps(node.value, ensure_ascii=False))
424 | else:
425 | self.write(repr(node.value))
426 |
427 | def visit_Attribute(self, node: ast.Attribute):
428 | self.set_precedence(Precedence.ATOM, node.value)
429 | self.traverse(node.value)
430 | self.write(".")
431 | self.write(node.attr)
432 |
433 | def visit_Subscript(self, node: ast.Subscript):
434 | self.traverse(node.value)
435 | with self.delimit("[", "]"):
436 | if node.slice:
437 | if isinstance(node.slice, ast.Tuple):
438 | self.items_view(self.traverse, node.slice.elts)
439 | else:
440 | self.traverse(node.slice)
441 |
442 | def visit_Name(self, node: ast.Name):
443 | self.write(node.id)
444 |
445 | def visit_Tuple(self, node: ast.Tuple):
446 | with self.delimit("[", "]"):
447 | if node.elts:
448 | self.items_view(self.traverse, node.elts)
449 |
450 | def visit_Qualify(self, node: ast.Qualify):
451 | self.traverse(node.qualifier)
452 | self.write(" ")
453 | self.traverse(node.value)
454 |
455 | def visit_Specialize(self, node: ast.Specialize):
456 | self.traverse(node.value)
457 | with self.delimit("<", ">"):
458 | if node.args:
459 | if isinstance(node.args, ast.Tuple):
460 | self.items_view(self.traverse, node.args.elts)
461 | else:
462 | self.traverse(node.args)
463 |
464 | def visit_Var(self, node: ast.Var):
465 | self.write("var")
466 |
467 | def visit_VarIp(self, node: ast.VarIp):
468 | self.write("varip")
469 |
470 | def visit_Const(self, node: ast.Const):
471 | self.write("const")
472 |
473 | def visit_Input(self, node: ast.Input):
474 | self.write("input")
475 |
476 | def visit_Sipmle(self, node: ast.Simple):
477 | self.write("simple")
478 |
479 | def visit_Series(self, node: ast.Series):
480 | self.write("series")
481 |
482 | def visit_And(self, node: ast.And):
483 | self.write("and")
484 |
485 | def visit_Or(self, node: ast.Or):
486 | self.write("or")
487 |
488 | def visit_Add(self, node: ast.Add):
489 | self.write("+")
490 |
491 | def visit_Sub(self, node: ast.Sub):
492 | self.write("-")
493 |
494 | def visit_Mult(self, node: ast.Mult):
495 | self.write("*")
496 |
497 | def visit_Div(self, node: ast.Div):
498 | self.write("/")
499 |
500 | def visit_Mod(self, node: ast.Mod):
501 | self.write("%")
502 |
503 | def visit_Not(self, node: ast.Not):
504 | self.write("not")
505 |
506 | def visit_UAdd(self, node: ast.UAdd):
507 | self.write("+")
508 |
509 | def visit_USub(self, node: ast.USub):
510 | self.write("-")
511 |
512 | def visit_Eq(self, node: ast.Eq):
513 | self.write("==")
514 |
515 | def visit_NotEq(self, node: ast.NotEq):
516 | self.write("!=")
517 |
518 | def visit_Lt(self, node: ast.Lt):
519 | self.write("<")
520 |
521 | def visit_LtE(self, node: ast.LtE):
522 | self.write("<=")
523 |
524 | def visit_Gt(self, node: ast.Gt):
525 | self.write(">")
526 |
527 | def visit_GtE(self, node: ast.GtE):
528 | self.write(">=")
529 |
530 | def visit_Param(self, node: ast.Param):
531 | if node.type:
532 | self.traverse(node.type)
533 | self.write(" ")
534 | self.write(node.name)
535 | if node.default:
536 | self.write("=")
537 | self.traverse(node.default)
538 |
539 | def visit_Arg(self, node: ast.Arg):
540 | if node.name:
541 | self.write(node.name)
542 | self.write("=")
543 | self.traverse(node.value)
544 |
545 | def visit_Case(self, node: ast.Case):
546 | self.fill()
547 | if node.pattern:
548 | self.traverse(node.pattern)
549 | self.write(" ")
550 | self.write("=> ")
551 | if len(node.body) == 1 and isinstance(node.body[0], ast.Expr):
552 | self.traverse(node.body[0].value)
553 | else:
554 | with self.block():
555 | self.traverse(node.body)
556 |
557 | def visit_Comment(self, node: ast.Comment):
558 | self.fill(node.value)
559 |
--------------------------------------------------------------------------------
/src/pynescript/ast/visitor.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | from pynescript.ast.helper import iter_fields
20 | from pynescript.ast.node import AST
21 |
22 |
23 | class NodeVisitor:
24 | def visit(self, node: AST):
25 | method = "visit_" + node.__class__.__name__
26 | visitor = getattr(self, method, self.generic_visit)
27 | return visitor(node)
28 |
29 | def generic_visit(self, node: AST):
30 | for _field, value in iter_fields(node):
31 | if isinstance(value, list):
32 | for item in value:
33 | if isinstance(item, AST):
34 | self.visit(item)
35 | elif isinstance(value, AST):
36 | self.visit(value)
37 |
--------------------------------------------------------------------------------
/src/pynescript/ext/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
--------------------------------------------------------------------------------
/src/pynescript/ext/nautilus_trader/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
--------------------------------------------------------------------------------
/src/pynescript/ext/nautilus_trader/strategy.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | from nautilus_trader.config import StrategyConfig
20 | from nautilus_trader.model.data import Bar
21 | from nautilus_trader.model.data import BarType
22 | from nautilus_trader.model.data import TradeTick
23 | from nautilus_trader.model.identifiers import InstrumentId
24 | from nautilus_trader.model.instruments import Instrument
25 | from nautilus_trader.trading.strategy import Strategy
26 |
27 |
28 | class PinescriptStrategyConfig(StrategyConfig):
29 | instrument_id: InstrumentId
30 | bar_type: BarType
31 |
32 |
33 | class PinescriptStrategy(Strategy):
34 | def __init__(self, config: PinescriptStrategyConfig):
35 | super().__init__(config)
36 | self.instrument_id = config.instrument_id
37 | self.bar_type = config.bar_type
38 | self.instrument: Instrument | None = None
39 |
40 | def on_start(self):
41 | self.instrument = self.cache.instrument(self.instrument_id)
42 | self.request_bars(self.bar_type)
43 | self.subscribe_bars(self.bar_type)
44 | self.subscribe_trade_ticks(self.instrument_id)
45 |
46 | def on_bar(self, bar: Bar):
47 | pass
48 |
49 | def on_trade_tick(self, tick: TradeTick):
50 | pass
51 |
52 | def on_stop(self):
53 | self.cancel_all_orders(self.instrument_id)
54 | self.close_all_positions(self.instrument_id)
55 | self.unsubscribe_bars(self.bar_type)
56 |
57 | def on_reset(self):
58 | pass
59 |
--------------------------------------------------------------------------------
/src/pynescript/ext/pygments/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
--------------------------------------------------------------------------------
/src/pynescript/ext/pygments/lexers.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | from typing import ClassVar
20 |
21 | from antlr4 import InputStream
22 | from antlr4 import Token as ANTLR4Token
23 | from pygments.lexer import Lexer
24 | from pygments.token import Token
25 |
26 | from pynescript.ast.grammar.antlr4.lexer import PinescriptLexer as PinescriptANTLR4Lexer
27 |
28 |
29 | class PinescriptLexer(Lexer):
30 | name: ClassVar[str] = "Pinescript Lexer"
31 |
32 | aliases: ClassVar[list[str]] = ["pinescript"]
33 | filenames: ClassVar[list[str]] = ["*.pine"]
34 |
35 | url: ClassVar[str] = "https://www.tradingview.com/pine-script-docs/en/v5/Introduction.html"
36 |
37 | _token_type_mapping: ClassVar[dict] = {
38 | PinescriptANTLR4Lexer.AND: Token.Operator,
39 | PinescriptANTLR4Lexer.AS: Token.Keyword,
40 | PinescriptANTLR4Lexer.BREAK: Token.Keyword,
41 | PinescriptANTLR4Lexer.BY: Token.Keyword,
42 | PinescriptANTLR4Lexer.CONST: Token.Keyword,
43 | PinescriptANTLR4Lexer.CONTINUE: Token.Keyword,
44 | PinescriptANTLR4Lexer.ELSE: Token.Keyword,
45 | PinescriptANTLR4Lexer.EXPORT: Token.Keyword,
46 | PinescriptANTLR4Lexer.FALSE: Token.Literal,
47 | PinescriptANTLR4Lexer.FOR: Token.Keyword,
48 | PinescriptANTLR4Lexer.IF: Token.Keyword,
49 | PinescriptANTLR4Lexer.IMPORT: Token.Keyword,
50 | PinescriptANTLR4Lexer.IN: Token.Keyword,
51 | PinescriptANTLR4Lexer.INPUT: Token.Keyword,
52 | PinescriptANTLR4Lexer.METHOD: Token.Keyword,
53 | PinescriptANTLR4Lexer.NOT: Token.Operator,
54 | PinescriptANTLR4Lexer.OR: Token.Operator,
55 | PinescriptANTLR4Lexer.SERIES: Token.Keyword,
56 | PinescriptANTLR4Lexer.SIMPLE: Token.Keyword,
57 | PinescriptANTLR4Lexer.SWITCH: Token.Keyword,
58 | PinescriptANTLR4Lexer.TO: Token.Keyword,
59 | PinescriptANTLR4Lexer.TYPE: Token.Keyword,
60 | PinescriptANTLR4Lexer.TRUE: Token.Literal,
61 | PinescriptANTLR4Lexer.VAR: Token.Keyword,
62 | PinescriptANTLR4Lexer.VARIP: Token.Keyword,
63 | PinescriptANTLR4Lexer.WHILE: Token.Keyword,
64 | PinescriptANTLR4Lexer.WS: Token.Text.Whitespace,
65 | PinescriptANTLR4Lexer.COMMENT: Token.Comment,
66 | PinescriptANTLR4Lexer.LPAR: Token.Punctuation,
67 | PinescriptANTLR4Lexer.RPAR: Token.Punctuation,
68 | PinescriptANTLR4Lexer.LSQB: Token.Punctuation,
69 | PinescriptANTLR4Lexer.RSQB: Token.Punctuation,
70 | PinescriptANTLR4Lexer.LESS: Token.Operator,
71 | PinescriptANTLR4Lexer.GREATER: Token.Operator,
72 | PinescriptANTLR4Lexer.EQUAL: Token.Operator,
73 | PinescriptANTLR4Lexer.EQEQUAL: Token.Operator,
74 | PinescriptANTLR4Lexer.NOTEQUAL: Token.Operator,
75 | PinescriptANTLR4Lexer.LESSEQUAL: Token.Operator,
76 | PinescriptANTLR4Lexer.GREATEREQUAL: Token.Operator,
77 | PinescriptANTLR4Lexer.RARROW: Token.Punctuation,
78 | PinescriptANTLR4Lexer.DOT: Token.Punctuation,
79 | PinescriptANTLR4Lexer.COMMA: Token.Punctuation,
80 | PinescriptANTLR4Lexer.COLON: Token.Operator,
81 | PinescriptANTLR4Lexer.QUESTION: Token.Operator,
82 | PinescriptANTLR4Lexer.PLUS: Token.Operator,
83 | PinescriptANTLR4Lexer.MINUS: Token.Operator,
84 | PinescriptANTLR4Lexer.STAR: Token.Operator,
85 | PinescriptANTLR4Lexer.SLASH: Token.Operator,
86 | PinescriptANTLR4Lexer.PERCENT: Token.Operator,
87 | PinescriptANTLR4Lexer.PLUSEQUAL: Token.Operator,
88 | PinescriptANTLR4Lexer.MINEQUAL: Token.Operator,
89 | PinescriptANTLR4Lexer.STAREQUAL: Token.Operator,
90 | PinescriptANTLR4Lexer.SLASHEQUAL: Token.Operator,
91 | PinescriptANTLR4Lexer.PERCENTEQUAL: Token.Operator,
92 | PinescriptANTLR4Lexer.COLONEQUAL: Token.Operator,
93 | PinescriptANTLR4Lexer.NAME: Token.Name,
94 | PinescriptANTLR4Lexer.NUMBER: Token.Literal.Number,
95 | PinescriptANTLR4Lexer.STRING: Token.Literal.String,
96 | PinescriptANTLR4Lexer.COLOR: Token.Literal,
97 | PinescriptANTLR4Lexer.NEWLINE: Token.Text.Whitespace,
98 | PinescriptANTLR4Lexer.WS: Token.Text.Whitespace,
99 | PinescriptANTLR4Lexer.COMMENT: Token.Comment,
100 | PinescriptANTLR4Lexer.ERROR_TOKEN: Token.Error,
101 | }
102 |
103 | def get_tokens_unprocessed(self, text: str):
104 | stream = InputStream(text)
105 | lexer = PinescriptANTLR4Lexer(stream)
106 | while True:
107 | token = lexer.nextToken()
108 | if token is None:
109 | return
110 | if token.type in {ANTLR4Token.EOF}:
111 | return
112 | if token.type in {PinescriptANTLR4Lexer.INDENT, PinescriptANTLR4Lexer.DEDENT}:
113 | continue
114 | yield token.start, self._token_type_mapping.get(token.type, Token.Other), token.text
115 |
--------------------------------------------------------------------------------
/src/pynescript/py.typed:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/elbakramer/pynescript/0d01bfe12c3d2a54cda51efc54209e1726d77a80/src/pynescript/py.typed
--------------------------------------------------------------------------------
/src/pynescript/util/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
--------------------------------------------------------------------------------
/src/pynescript/util/itertools.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | from itertools import zip_longest
20 |
21 |
22 | def grouper(iterable, n, *, incomplete="fill", fillvalue=None):
23 | args = [iter(iterable)] * n
24 | match incomplete:
25 | case "fill":
26 | return zip_longest(*args, fillvalue=fillvalue)
27 | case "strict":
28 | return zip(*args, strict=True)
29 | case "ignore":
30 | return zip(*args, strict=False)
31 | case _:
32 | msg = "Expected fill, strict, or ignore"
33 | raise ValueError(msg)
34 |
--------------------------------------------------------------------------------
/src/pynescript/util/pine_facade.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | import pathlib
20 |
21 | import requests
22 | import tqdm
23 |
24 |
25 | def list_builtin_scripts():
26 | url = "https://pine-facade.tradingview.com"
27 | path = "/pine-facade/list/"
28 | params = {"filter": "template"}
29 | response = requests.get(url + path, params=params, timeout=60)
30 | response.raise_for_status()
31 | result = response.json()
32 | return result
33 |
34 |
35 | def get_script(script_id_part, version):
36 | url = "https://pine-facade.tradingview.com"
37 | path = f"/pine-facade/get/{requests.utils.quote(script_id_part)}/{version}"
38 | params = {"no_4xx": "false"}
39 | response = requests.get(url + path, params=params, timeout=60)
40 | response.raise_for_status()
41 | result = response.json()
42 | return result
43 |
44 |
45 | def download_builtin_scripts(script_dir, encodig=None):
46 | script_dir = pathlib.Path(script_dir)
47 |
48 | if encodig is None:
49 | encoding = "utf-8"
50 |
51 | if not script_dir.exists():
52 | script_dir.mkdir(parents=True, exist_ok=True)
53 |
54 | script_name_replace_mapping = {
55 | " ": "_",
56 | "-": "_",
57 | "/": "_",
58 | }
59 |
60 | script_list = list_builtin_scripts()
61 | script_list_tqdm = tqdm.tqdm(script_list)
62 |
63 | for script_meta in script_list_tqdm:
64 | script_name = script_meta["scriptName"]
65 | script_id_part = script_meta["scriptIdPart"]
66 | script_version = script_meta["version"]
67 |
68 | script_list_tqdm.set_description(f"Downloading script [{script_name}]")
69 |
70 | script = get_script(script_id_part, script_version)
71 | script_source = script["source"]
72 |
73 | script_name_prefix = script_name.lower()
74 |
75 | for from_pattern, replace_to in script_name_replace_mapping.items():
76 | script_name_prefix = script_name_prefix.replace(from_pattern, replace_to)
77 |
78 | script_filename = f"{script_name_prefix}.pine"
79 |
80 | with open(script_dir / script_filename, "w", encoding=encoding) as f:
81 | f.write(script_source)
82 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | from pathlib import Path
20 |
21 | from pytest import Metafunc
22 | from pytest import Parser
23 |
24 |
25 | tests_dir = Path(__file__).parent
26 | builtin_scripts_dir = tests_dir / "data" / "builtin_scripts"
27 |
28 |
29 | def pytest_addoption(parser: Parser):
30 | parser.addoption("--example-scripts-dir", default=builtin_scripts_dir, type=Path)
31 |
32 |
33 | def pytest_generate_tests(metafunc: Metafunc):
34 | if "pinescript_filepath" in metafunc.fixturenames:
35 | example_scripts_dir: Path = metafunc.config.getoption("--example-scripts-dir")
36 | pinescript_filepaths = example_scripts_dir.glob("*.pine")
37 | pinescript_filepaths = list(pinescript_filepaths)
38 | pinescript_filenames = [path.name for path in pinescript_filepaths]
39 | metafunc.parametrize(
40 | argnames="pinescript_filepath",
41 | argvalues=pinescript_filepaths,
42 | ids=pinescript_filenames,
43 | )
44 |
--------------------------------------------------------------------------------
/tests/data/builtin_scripts/.gitignore:
--------------------------------------------------------------------------------
1 | *.pine
2 |
--------------------------------------------------------------------------------
/tests/test_parse_and_unparse.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Yunseong Hwang
2 | #
3 | # Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.gnu.org/licenses/lgpl-3.0.en.html
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | #
15 | # SPDX-License-Identifier: LGPL-3.0-or-later
16 |
17 | from __future__ import annotations
18 |
19 | from pathlib import Path
20 |
21 | from pynescript.ast.helper import parse
22 | from pynescript.ast.helper import unparse
23 |
24 |
25 | def test_parse_and_unparse(pinescript_filepath: Path):
26 | with open(pinescript_filepath, encoding="utf-8") as f:
27 | source = f.read()
28 | parsed_ast = parse(source)
29 | unparsed_source = unparse(parsed_ast)
30 | reparsed_ast = parse(unparsed_source)
31 | assert repr(parsed_ast) == repr(reparsed_ast)
32 |
--------------------------------------------------------------------------------