├── remlang
├── __init__.py
├── compiler
│ ├── __init__.py
│ ├── README.md
│ ├── tk.py
│ ├── msg.py
│ ├── control_flow.py
│ ├── err.py
│ ├── module.py
│ ├── test_lang.py
│ ├── utils.py
│ ├── order_dual_opt.py
│ ├── reference_collections.py
│ ├── pattern_matching.py
│ ├── linked_list.py
│ ├── rem.grammar
│ ├── rem_parser.py
│ └── ast.py
├── standard
│ ├── __init__.py
│ ├── linq
│ │ ├── __init__.py
│ │ └── tsql.py
│ ├── curry.py
│ ├── module.py
│ ├── path.py
│ ├── io.py
│ ├── collections.py
│ ├── syntax.py
│ └── default.py
├── console.py
├── execute.py
└── intepreter.py
├── run_test.py
├── tests-example_source_codes
├── sys.rem
├── hw.rem
├── for_comp.rem
├── lambda.rem
├── pattern_matching.rem
├── chinese.rem
├── into.rem
├── class.rem
├── mapreduce.rem
├── qsort.rem
├── itest.rem
└── tutorial.rem
├── intro_pic.png
├── overview++.png
├── overview-figs
├── $.png
├── for.png
├── into.png
├── lambda.png
├── where.png
└── inverted.png
├── rem.lib
└── module.rem
├── .travis.yml
├── test.sh
├── setup.py
├── LICENSE
├── .gitignore
├── README.md
├── README.rst
└── intro.md
/remlang/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/remlang/compiler/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/remlang/standard/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/remlang/standard/linq/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/run_test.py:
--------------------------------------------------------------------------------
1 | from remlang.intepreter import repl
2 |
3 | repl()
4 |
--------------------------------------------------------------------------------
/tests-example_source_codes/sys.rem:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | sys'argv .print
--------------------------------------------------------------------------------
/intro_pic.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thautwarm/Rem/HEAD/intro_pic.png
--------------------------------------------------------------------------------
/overview++.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thautwarm/Rem/HEAD/overview++.png
--------------------------------------------------------------------------------
/overview-figs/$.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thautwarm/Rem/HEAD/overview-figs/$.png
--------------------------------------------------------------------------------
/overview-figs/for.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thautwarm/Rem/HEAD/overview-figs/for.png
--------------------------------------------------------------------------------
/overview-figs/into.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thautwarm/Rem/HEAD/overview-figs/into.png
--------------------------------------------------------------------------------
/overview-figs/lambda.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thautwarm/Rem/HEAD/overview-figs/lambda.png
--------------------------------------------------------------------------------
/overview-figs/where.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thautwarm/Rem/HEAD/overview-figs/where.png
--------------------------------------------------------------------------------
/overview-figs/inverted.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thautwarm/Rem/HEAD/overview-figs/inverted.png
--------------------------------------------------------------------------------
/rem.lib/module.rem:
--------------------------------------------------------------------------------
1 | let using = `{|module|
2 | apply_module module __env__'parent
3 | }
4 |
5 |
6 |
--------------------------------------------------------------------------------
/remlang/standard/curry.py:
--------------------------------------------------------------------------------
1 | try:
2 | from cytoolz import curry, compose
3 | except ModuleNotFoundError:
4 | from toolz import curry, compose
5 |
--------------------------------------------------------------------------------
/tests-example_source_codes/hw.rem:
--------------------------------------------------------------------------------
1 |
2 | print "Hello World"
3 |
4 | /*
5 | "Hello World" . print
6 | or
7 | "Hello World" then print
8 | is ok, too
9 | */
10 |
--------------------------------------------------------------------------------
/tests-example_source_codes/for_comp.rem:
--------------------------------------------------------------------------------
1 |
2 | from [1, 2, 3] yield {|x| x+1} . list . print
3 |
4 | from [1, 2, 3], ["a", "b", "c"] yield {|a, b| (a, b)} . list . print
5 |
6 |
--------------------------------------------------------------------------------
/remlang/compiler/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | ### Note
4 |
5 | The following files are generated by parser generator:
6 |
7 | - `rem_parser.py`
8 |
9 | - `testLang.py`
10 |
11 | - `etoken.py`
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 | python:
3 | - "3.6"
4 | - "3.6-dev"
5 | - "3.7-dev"
6 | script:
7 | - pip install EBNFParser==1.1 toolz
8 | - python setup.py install
9 | - bash test.sh
10 |
11 |
12 |
--------------------------------------------------------------------------------
/tests-example_source_codes/lambda.rem:
--------------------------------------------------------------------------------
1 | [1, 2, 3]
2 | . map {_ + 1}
3 | . reduce {_1 + _2}
4 | . {print "equals to 9?" _ == 9}
5 |
6 |
7 | {
8 | x, y =>
9 | let z = x + y
10 | print z
11 | } 1 2
12 |
13 |
14 |
--------------------------------------------------------------------------------
/remlang/standard/linq/tsql.py:
--------------------------------------------------------------------------------
1 | """
2 | Kind of tired. tsql would be made in this week.
3 | """
4 | def _select(t_src, fields):
5 | pass
6 |
7 |
8 | def _from(tables):
9 | pass
10 |
11 |
12 | def _where(conditions):
13 | pass
14 |
--------------------------------------------------------------------------------
/test.sh:
--------------------------------------------------------------------------------
1 | cd tests-example_source_codes
2 |
3 | remlang class.rem
4 | remlang for_comp.rem
5 | remlang hw.rem
6 | remlang mapreduce.rem
7 | remlang pattern_matching.rem
8 | remlang sys.rem a b c
9 | remlang qsort.rem
10 | remlang into.rem
11 | remlang lambda.rem
--------------------------------------------------------------------------------
/remlang/console.py:
--------------------------------------------------------------------------------
1 | class Colored:
2 | Red = '\033[31m'
3 | Green = '\033[32m'
4 | Yellow = '\033[33m'
5 | Blue = '\033[34m'
6 | Purple = '\033[35m'
7 | LightBlue = '\033[36m'
8 | Clear = '\033[39m'
9 | Purple2 = '\033[95m'
10 |
11 |
--------------------------------------------------------------------------------
/remlang/standard/module.py:
--------------------------------------------------------------------------------
1 | if False:
2 | from ..compiler.module import ModuleAgent
3 |
4 |
5 | def apply_module(module: 'ModuleAgent'):
6 | from ..compiler.module import ModuleAgent
7 | if not isinstance(module, ModuleAgent):
8 | raise TypeError
9 | return lambda ctx: ctx.update(module._)
10 |
--------------------------------------------------------------------------------
/tests-example_source_codes/pattern_matching.rem:
--------------------------------------------------------------------------------
1 |
2 | let tup = (1, 2, 3, (4, 5, (6, )))
3 |
4 | case tup
5 | as (2, 3) => False
6 |
7 | as (1, 2, 3, b)
8 | when b . len != 3
9 | => False
10 |
11 | as (1, 2, 3, (a, b, (c, ...d)))
12 | when d . len == 0 where
13 | d = tuple d
14 | end
15 | => True
16 | end then print
--------------------------------------------------------------------------------
/tests-example_source_codes/chinese.rem:
--------------------------------------------------------------------------------
1 | 从 math 导入 pi
2 | 从 builtins 导入 input 作为 python输入
3 |
4 | 让 打印 等于 print
5 | 让 遍历 等于 map
6 | 让 转为浮点数 等于 float
7 |
8 | 让 输入 等于
9 | 从 输入提示 让
10 | 输入提示
11 | 之 遍历 python输入
12 | 之 遍历 转为浮点数
13 | 结束
14 |
15 | 打印 "求圆柱表面积\n"
16 |
17 | 对于 输入 ["输入圆柱半径:", "输入圆柱高度:"]
18 | 作为 半径, 高度
19 | 结束
20 |
21 | 让 圆柱表面积 = 侧面积 + 2 * 底面积
22 | 其中
23 | 使 侧面积 = 2 * pi * 半径 * 高度
24 | 使 底面积 = pi * 半径 ** 2
25 | 结束
26 |
27 | 打印 "圆柱表面积:" 圆柱表面积
28 |
--------------------------------------------------------------------------------
/remlang/compiler/tk.py:
--------------------------------------------------------------------------------
1 | from Ruikowa.ObjectRegex.Tokenizer import unique_literal_cache_pool
2 |
3 | _keywords = [
4 | 'then',
5 | 'when',
6 | 'and',
7 | 'or',
8 | 'in',
9 | 'not',
10 | 'case',
11 | 'as',
12 | 'end',
13 | 'where',
14 | 'from',
15 | 'yield',
16 | 'into',
17 | 'let',
18 | 'True',
19 | 'False',
20 | 'None',
21 | 'import',
22 | 'is',
23 | ]
24 |
25 | keywords_map = dict(zip(_keywords, [unique_literal_cache_pool['keyword']] * len(_keywords)))
26 |
--------------------------------------------------------------------------------
/tests-example_source_codes/into.rem:
--------------------------------------------------------------------------------
1 | @label1
2 | let err = {print "if print this, it must be a bug!"}
3 | call {
4 | @u
5 | from [1, 2, 3] not yield {
6 | |x|
7 | print "in nested closure"
8 | into u
9 | call err
10 | }
11 | print "jump here"
12 | into label1
13 | call err
14 | }
15 | print "hello, woman"
16 | import time
17 | let x = 3
18 |
19 | @break
20 | while { x > 1} {
21 | time'sleep 1
22 |
23 | into break
24 | 1/0 # if you see this err raised, it's unexpected.
25 | }
26 |
--------------------------------------------------------------------------------
/tests-example_source_codes/class.rem:
--------------------------------------------------------------------------------
1 | let class = {
2 | |fn_dict|
3 | # constructor
4 | {
5 | let self = call Object
6 | from
7 | fn_dict ' items . call
8 | not yield {
9 | |tp|
10 | case tp as k, v
11 | =>
12 |
13 | self. set k (v self)
14 | end
15 | }
16 | self
17 | }
18 | }
19 |
20 |
21 | let cls1 = class %{
22 | "go": {|x, y| y},
23 | }
24 |
25 | let inst = call cls1
26 |
27 | inst' go "method call" . print
28 |
--------------------------------------------------------------------------------
/tests-example_source_codes/mapreduce.rem:
--------------------------------------------------------------------------------
1 | [1, 2, 3] . map {|x| x+1} . list . print
2 |
3 | # the priority of `.` is the same as function call.
4 | # the priority of `then` is the same as applicative.
5 | # the priority of '.' is higher than `binary expression| case expression` whose priority is higher than `then`.
6 | /*
7 |
8 | '`then`' = '$'
9 |
10 | < case_expr = binary_expr
11 |
12 | < unary_expr
13 |
14 | < '.' = call
15 |
16 | < '!.xxx' = '![xxx]'
17 |
18 | < atom
19 |
20 | */
21 | let add = {|x, y| x+y}
22 |
23 | 1 . add 2 then print
24 |
25 | from [1, 2, 3, 4, 5, 6] yield {|x| x+1} . filter {|x| x%2 == 0} . map {|x| x**2} . list . print
26 |
27 |
--------------------------------------------------------------------------------
/remlang/compiler/msg.py:
--------------------------------------------------------------------------------
1 | class RemStatus:
2 | __slots__ = ['name']
3 |
4 | def __init__(self, name):
5 | self.name = name
6 |
7 | def __str__(self):
8 | return 'Status[{}]'.format(self.name)
9 |
10 | def __repr__(self):
11 | return self.__str__()
12 |
13 |
14 | class StatusConstructor:
15 | __atom_set__ = {}
16 |
17 | def __new__(cls, name):
18 |
19 | if name in cls.__atom_set__:
20 | return cls.__atom_set__[name]
21 |
22 | status = cls.__atom_set__.get(name)
23 |
24 | if not status:
25 | status = cls.__atom_set__[name] = RemStatus(name)
26 | return status
27 | return status
28 |
--------------------------------------------------------------------------------
/remlang/standard/path.py:
--------------------------------------------------------------------------------
1 | import os
2 | from .curry import curry
3 |
4 |
5 | class Path:
6 | @classmethod
7 | def list_dir(cls, path):
8 | return os.listdir(path)
9 |
10 | @classmethod
11 | def abs(cls, path):
12 | return os.path.abspath(path)
13 |
14 | @classmethod
15 | def combine(cls, neck, end):
16 | return os.path.join(neck, end)
17 |
18 | @classmethod
19 | def ext(cls, path):
20 | return os.path.splitext(path)[1]
21 |
22 | @classmethod
23 | def except_ext(cls, path):
24 | return os.path.splitext(path)[0]
25 |
26 | @classmethod
27 | def location(cls, path):
28 | return os.path.split(os.path.abspath(path))[0]
29 |
--------------------------------------------------------------------------------
/remlang/compiler/control_flow.py:
--------------------------------------------------------------------------------
1 | from Ruikowa.ObjectRegex.ASTDef import Ast
2 | from Ruikowa.color import Colored
3 |
4 |
5 | class BreakUntil(Exception):
6 | __slots__ = ['name', 'res']
7 |
8 | def __init__(self, name, res='ct'):
9 | self.name = name
10 | self.res = res
11 |
12 | def __str__(self):
13 | return 'BreakUntil[label name = {}] with result: {}'.format(self.name, self.res)
14 |
15 |
16 | class Macro:
17 | __slots__ = ['expr']
18 |
19 | def __init__(self, expr):
20 | self.expr = expr
21 |
22 | def __str__(self):
23 | return f'{Colored.Green}[Macro]: \n{self.expr}\n{Colored.Clear}'
24 |
25 | def __repr__(self):
26 | return self.__str__()
27 |
--------------------------------------------------------------------------------
/remlang/standard/io.py:
--------------------------------------------------------------------------------
1 | from .curry import curry
2 |
3 | codecs = ('utf8', 'gb18030', 'latin-1')
4 | def try_open(filename, mode):
5 | for each in codecs:
6 | try:
7 | return open(filename, mode, encoding=each)
8 | except UnicodeDecodeError:
9 | continue
10 | raise UnicodeDecodeError
11 |
12 | @curry
13 | def open_file(file_name, mode):
14 | return try_open(file_name, mode)
15 |
16 |
17 | @curry
18 | def write(f, content):
19 | file = f('w')
20 | with file:
21 | file.write(content)
22 |
23 |
24 | @curry
25 | def read(f):
26 | with f('r') as file:
27 | return file.read()
28 |
29 |
30 | @curry
31 | def append(f, content):
32 | file = f('a')
33 | with file:
34 | file.write(content)
35 |
--------------------------------------------------------------------------------
/tests-example_source_codes/qsort.rem:
--------------------------------------------------------------------------------
1 | import linq
2 | import numpy as np
3 | let qsort = {
4 | |seq|
5 | case seq
6 | as [] => ()
7 | as head, ...tail =>
8 | let tsource = linq'Flow tail. {
9 | |it|
10 | (it'GroupBy group_fn)' Unboxed . call
11 | }
12 | where
13 | let group_fn = {
14 | |e|
15 | if {e > head} {1}
16 | .else {
17 | if {e < head} {-1}
18 | .else {0}
19 | }
20 | }
21 | end
22 | case tsource
23 | as {1 : ge,
24 | -1: le,
25 | 0 : eqs}
26 | =>
27 | qsort le ++ (head, ...eqs) ++ qsort ge
28 | end
29 | end
30 | }
31 | np'random'random_integers 0 100 20
32 | . qsort
33 | . tuple
34 | . print
35 |
--------------------------------------------------------------------------------
/tests-example_source_codes/itest.rem:
--------------------------------------------------------------------------------
1 | @break
2 |
3 | let x = 1
4 | case (1, 2, 3, (4, 5, 6))
5 | as (1, 2, 3) => print "???"
6 | as (&x, 2, a, (4, b, ...c))
7 | end
8 | (a, b, c.tuple)
9 | "some_text.txt" . open . write "some info"
10 | "some_text.txt" . open . read
11 | read (open "some_text.txt")
12 | read $ open "some_text.txt"
13 |
14 | while {x < 10} {
15 | print x
16 | x = x + 1
17 | into break
18 | }
19 |
20 |
21 | from [1, 2, 3], ["1", "2"] yield {|a, b| (a, b)} . filter {|x| x![0] != 2} . list
22 | import numpy as np
23 | np'array [[1, 2, 3], [4, 5, 6]] . slice [(0,2), (0, 2)]
24 | np'max [1, 2, 3]
25 | let h = 1; let r = 2;
26 |
27 | let S = S1 + 2 * S2 where
28 | from math import (pi)
29 | let S1 = 2 * pi * r * h
30 | let S2 = pi * r ** 2
31 | end
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 | with open('./README.rst', encoding='utf-8') as f:
4 | readme = f.read()
5 |
6 | setup(
7 | name='remlang',
8 | long_description=readme,
9 | version='0.5',
10 | packages=['remlang', 'remlang.compiler', 'remlang.standard'],
11 | url='https://github.com/thautwarm/Rem',
12 | license='MIT',
13 | author='thautwarm',
14 | author_email='twshere@outlook.com',
15 | description='rem langauge, which is very comfortable.',
16 | entry_points={
17 | 'console_scripts': [
18 | 'irem=remlang.intepreter:repl',
19 | 'remlang=remlang.execute:run'
20 | ]
21 | },
22 | platforms='any',
23 | install_requires=[
24 | 'toolz',
25 | 'EBNFParser >= 2.1.2'
26 | ],
27 | classifiers=[
28 | 'Programming Language :: Python :: 3.6',
29 | 'Programming Language :: Python :: 3.7',
30 | 'Programming Language :: Python :: Implementation :: CPython'],
31 | zip_safe=False)
32 |
--------------------------------------------------------------------------------
/remlang/compiler/err.py:
--------------------------------------------------------------------------------
1 | from Ruikowa.ObjectRegex.ASTDef import Ast
2 | from Ruikowa.ObjectRegex.Tokenizer import Tokenizer
3 | from Ruikowa.io import grace_open
4 | from Ruikowa.ErrorFamily import find_location
5 |
6 |
7 | class Trace(Exception):
8 | __slots__ = ['origin', 'tk', 'filename']
9 |
10 | def __init__(self, origin: Exception, statement: 'Ast'):
11 |
12 | tk: 'Ast' = statement
13 | while tk.__class__ is not Tokenizer:
14 | tk = tk[0]
15 | self.origin = origin
16 | self.tk: 'Tokenizer' = tk
17 | self.filename = statement.meta[-1]
18 |
19 | def __str__(self):
20 |
21 | try:
22 | src_code = grace_open(self.filename).read()
23 | except FileNotFoundError:
24 | src_code = None
25 | except OSError:
26 | src_code = None
27 |
28 | location = find_location(self.filename, where=self.tk, src_code=src_code)
29 |
30 | return f'{self.origin}\nerror : {self.origin.__class__} at {location}\n'
31 |
32 | def __repr__(self):
33 | return self.__str__()
34 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2017
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/remlang/standard/collections.py:
--------------------------------------------------------------------------------
1 | from .curry import curry
2 | from collections import Iterator
3 | from itertools import groupby
4 |
5 |
6 | def xrange(arg):
7 | if not isinstance(arg, tuple):
8 | return range(arg)
9 | return range(*arg)
10 |
11 |
12 | @curry
13 | def chunk_by(collection, f):
14 | return ((v, tuple(vs)) for v, vs in groupby(collection, f))
15 |
16 |
17 | @curry
18 | def chunk(collection, n):
19 | return (tuple(x for _, x in vs) for v, vs in groupby(enumerate(collection), lambda tp: tp[0] // n))
20 |
21 |
22 | def fst(collection):
23 | if isinstance(collection, Iterator):
24 | try:
25 | return next(collection)
26 | except StopIteration:
27 | return None
28 | try:
29 | return collection[0]
30 | except IndexError:
31 | return None
32 |
33 |
34 | def snd(collection):
35 | if isinstance(collection, Iterator):
36 | try:
37 | return next(next(collection))
38 | except StopIteration:
39 | return None
40 | try:
41 | return collection[1]
42 | except IndexError:
43 | return None
44 |
--------------------------------------------------------------------------------
/remlang/standard/syntax.py:
--------------------------------------------------------------------------------
1 | from .curry import curry
2 | from collections import Iterable
3 | from ..compiler.msg import StatusConstructor
4 |
5 | if_false_status = StatusConstructor('if_false_status')
6 |
7 |
8 | # if - else
9 | # """
10 | # use if-else expr in the following way
11 | # let res = if {cond} {
12 | # do_some
13 | # } .else{
14 | # do_some
15 | # }
16 | # """
17 | @curry
18 | def rem_if(cond_fn, if_true_fn):
19 | if cond_fn():
20 | return if_true_fn()
21 |
22 | return if_false_status
23 |
24 |
25 | def rem_else(status):
26 | if status is if_false_status:
27 | return lambda else_do: else_do()
28 | return lambda _: status
29 |
30 |
31 | # raise syntax
32 | def rem_raise(exp):
33 | raise exp
34 |
35 |
36 | # for-each syntax
37 | @curry
38 | def foreach(collection, f):
39 | for each in collection:
40 | f(each)
41 |
42 |
43 | # while
44 | @curry
45 | def rem_while(condition, f):
46 | while condition():
47 | f()
48 |
49 |
50 | # slice
51 | def indexer(arg):
52 | if not isinstance(arg, Iterable):
53 | return slice(arg)
54 |
55 | res = tuple(slice(*e) if isinstance(e, Iterable) else slice(e) for e in arg)
56 | if len(res) is 1:
57 | res = res[0]
58 | return res
59 |
60 |
61 | @curry
62 | def rem_slice(collection, arg):
63 | return collection[indexer(arg)]
64 |
--------------------------------------------------------------------------------
/remlang/compiler/module.py:
--------------------------------------------------------------------------------
1 | from typing import Tuple
2 | from Ruikowa.ErrorHandler import ErrorHandler
3 | from .reference_collections import ReferenceDict
4 | from .order_dual_opt import op_priority
5 | from ..standard import default
6 | import hashlib
7 |
8 | try:
9 | from cytoolz import compose
10 | except ModuleNotFoundError:
11 | from toolz import compose
12 |
13 | # this is the manager of modules
14 | default_env = ReferenceDict(
15 | {
16 | '@modules': {'main': ''},
17 | })
18 |
19 |
20 | def make_new_module(name: str, module_manager: 'ReferenceDict', compiler: 'ErrorHandler' = None):
21 | """make a new module
22 | """
23 | env = ReferenceDict(default.default.copy(), module_manager=module_manager)
24 | env.update(
25 | __name__=name,
26 | OperatorPriority=op_priority,
27 | __compiler__=compiler)
28 | module_manager[name] = env
29 |
30 | env['中文编程'] = lambda: env['__compiler__'].mut_token_by(
31 | lambda origin_func: compose(env['to_chinese'], origin_func))
32 |
33 | default_env[name] = env
34 | return env
35 |
36 |
37 | def md5(path) -> 'Tuple[str, str]':
38 | with open(path, 'r') as f:
39 | src = f.read()
40 | return src, hashlib.md5(src.encode()).hexdigest()
41 |
42 |
43 | class ModuleAgent:
44 | __slots__ = ['_']
45 |
46 | def __init__(self, module: dict):
47 | self._ = module
48 |
49 | def __getattr__(self, item):
50 | try:
51 | return self._[item]
52 | except KeyError:
53 | raise NameError(f'{self._["__name__"]}.{item}')
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 | *.idea/
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 |
49 | # Translations
50 | *.mo
51 | *.pot
52 |
53 | # Django stuff:
54 | *.log
55 | local_settings.py
56 |
57 | # Flask stuff:
58 | instance/
59 | .webassets-cache
60 |
61 | # Scrapy stuff:
62 | .scrapy
63 |
64 | # Sphinx documentation
65 | docs/_build/
66 |
67 | # PyBuilder
68 | target/
69 |
70 | # Jupyter Notebook
71 | .ipynb_checkpoints
72 |
73 | # pyenv
74 | .python-version
75 |
76 | # celery beat schedule file
77 | celerybeat-schedule
78 |
79 | # SageMath parsed files
80 | *.sage.py
81 |
82 | # dotenv
83 | .env
84 |
85 | # virtualenv
86 | .venv
87 | venv/
88 | ENV/
89 |
90 | # Spyder project settings
91 | .spyderproject
92 | .spyproject
93 |
94 | # Rope project settings
95 | .ropeproject
96 |
97 | # mkdocs documentation
98 | /site
99 |
100 | # mypy
101 | .mypy_cache/
102 |
--------------------------------------------------------------------------------
/remlang/compiler/test_lang.py:
--------------------------------------------------------------------------------
1 |
2 | # This file is automatically generated by EBNFParser.
3 | import argparse, json
4 |
5 | cmd_parser = argparse.ArgumentParser(description='test language parsers swiftly.')
6 | cmd_parser.add_argument("parser", type=str,
7 | help='What kind of parser do you want to test with?(e.g Stmt, Expr, ...)')
8 | cmd_parser.add_argument("codes", metavar='codes', type=str,
9 | help='input some codes in your own language here.')
10 | cmd_parser.add_argument('-o', help='output. support .json and .ast suffix.', type=str)
11 | cmd_parser.add_argument("--testTk", nargs='?', default=False, const=True)
12 | cmd_parser.add_argument('--debug', nargs='?', default=False, const=True,
13 | help='print tokens of grammar file?')
14 |
15 | args = cmd_parser.parse_args()
16 |
17 | if args.debug:
18 | from Ruikowa.Config import Debug
19 | Debug.append(1)
20 |
21 | from Ruikowa.ErrorHandler import ErrorHandler, Colored
22 | from Ruikowa.ObjectRegex.ASTDef import Ast
23 | from Ruikowa.io import grace_open
24 | from rem_parser import *
25 | print(Colored.Green,'=========================ebnfparser test script================================', Colored.Clear)
26 |
27 | print_token = args.testTk
28 | ast: Ast = ErrorHandler(eval(args.parser).match, token_func).from_source_code('', args.codes, print_token=print_token)
29 | print(Colored.Blue, ast, Colored.Clear)
30 | if args.o:
31 | o: str = args.o.lower()
32 | if o.endswith('.json'):
33 | grace_open(o).write(json.dumps(ast.dump_to_json(), indent=2))
34 | elif o.endswith('.ast'):
35 | grace_open(o).write(ast.dump())
36 | else:
37 | raise Exception('Unsupported file ext.')
38 |
39 |
--------------------------------------------------------------------------------
/tests-example_source_codes/tutorial.rem:
--------------------------------------------------------------------------------
1 | call 中文编程 /* repl模式有效,使用remlang命令运行时请使用 --chinese 选项 */
2 |
3 | ## Hello World
4 | print "Hello World"
5 | print $ "Hello" + " " + "World"
6 | print ("Hello" + " " + "World")
7 | "Hello World" . print
8 | "Hello World" then print
9 | "Hello World" then (exec $ "pr" + "int")
10 | ## FileIO
11 |
12 | "io.txt"
13 | .open
14 | .write "sometext"
15 |
16 | "io.txt"
17 | .open
18 | .read
19 | .print
20 |
21 | ## simple data manipulation
22 | let my_data = %{
23 | "name": "reisen",
24 | "age": 16,
25 | "sex": 0
26 | }
27 |
28 | /* for-loop */
29 | from
30 | my_data
31 | not yield
32 | { k, v =>
33 | print $ "my " + k + " is " + v.string
34 | }
35 |
36 | /* for-compreshension */
37 | from my_data yield {_} . tuple . print
38 |
39 | /* pattern matching */
40 | my_data!["nested"] = [1, 2, 3]
41 | case my_data
42 | as {"name": "marisa"} => None
43 | as {"name": "pad" } => None
44 | as {"name": "reisen"} when my_data!["age"] > 20
45 | => None
46 | as {"name": "reisen", "nested": [a, ...b]}
47 | =>
48 | [a, b . list]
49 | end
50 | then print
51 |
52 | /* travsersal manipulation */
53 |
54 | [1, 2, 3]
55 | .map {_ ** 2}
56 | .reduce {_1 + _2}
57 | .print
58 |
59 | /* where */
60 |
61 | 从 math 导入 pi
62 | 让 打印 等于 print
63 |
64 | 对于 (2, 3)
65 | 作为 半径, 高度
66 | 结束
67 |
68 | 让 圆柱表面积 = 侧面积 + 2 * 底面积
69 | 其中
70 | 使 侧面积 = 2 * pi * 半径 * 高度
71 | 使 底面积 = pi * 半径 ** 2
72 | 结束
73 |
74 | 打印 圆柱表面积
75 |
76 |
77 | /* 调用画图 */
78 | /* py_fn: use `help py_fn` to show help information. */
79 | from tkinter import *
80 | let root = py_fn Tk
81 |
82 | let w = py_fn Label [root] %{"text": "Hello, world!"}
83 | w'pack . call
84 |
85 | root'mainloop . call
86 |
87 |
88 | /* Quote Expression */
89 |
90 | let macro = `print x
91 |
92 | let x = 1
93 | macro
94 | let x = 2
95 | macro
96 |
97 |
98 |
99 |
100 |
101 |
--------------------------------------------------------------------------------
/remlang/compiler/utils.py:
--------------------------------------------------------------------------------
1 | from collections import Iterable
2 | from Ruikowa.ObjectRegex.Tokenizer import Tokenizer
3 | from Ruikowa.ObjectRegex.Tokenizer import unique_literal_cache_pool
4 | from typing import List
5 |
6 | from ..standard.curry import curry
7 |
8 |
9 | @curry
10 | def map_token(mapping: dict, tk: Tokenizer):
11 | name, string = mapping[tk.name, tk.string]
12 | return Tokenizer(name, string, tk.lineno, tk.colno)
13 |
14 |
15 | chinese_keywords = {
16 | '然后': ('keyword', 'then'),
17 | '当': ('keyword', 'when'),
18 | '且': ('keyword', 'and'),
19 | '或': ('keyword', 'or'),
20 | '含于': ('keyword', 'in'),
21 | '非': ('keyword', 'not'),
22 | '对于': ('keyword', 'case'),
23 | '作为': ('keyword', 'as'),
24 | '结束': ('keyword', 'end'),
25 | '其中': ('keyword', 'where'),
26 | '从': ('keyword', 'from'),
27 | '生成': ('keyword', 'yield'),
28 | '跳跃到': ('keyword', 'into'),
29 | '使': ('keyword', 'let'),
30 | '让': ('keyword', 'let'),
31 | '真': ('keyword', 'True'),
32 | '假': ('keyword', 'False'),
33 | '空': ('keyword', 'None'),
34 | '导入': ('keyword', 'import'),
35 | '是': ('keyword', 'is'),
36 | '的': ('keyword', '.'),
37 | '之': ('keyword', '.'),
38 | '等于': ('keyword', '='),
39 | '它': ('symbol', '_')
40 | }
41 | chinese_keywords = {k: (unique_literal_cache_pool[t], unique_literal_cache_pool[v])
42 | for k, (t, v) in
43 | chinese_keywords.items()}
44 |
45 |
46 | def to_chinese(tokens: List[Tokenizer]):
47 | for w in tokens:
48 | if w.string in chinese_keywords:
49 | name, string = chinese_keywords[w.string]
50 | yield Tokenizer(name, string, w.lineno, w.colno)
51 | else:
52 | yield w
53 |
54 |
55 | def cast(to_type):
56 | def wrap_fn(func):
57 | def call(*args, **kwargs):
58 | return to_type(func(*args, **kwargs))
59 |
60 | return call
61 |
62 | return wrap_fn
63 |
64 |
65 | def flatten(seq):
66 | for each in seq:
67 | if isinstance(each, Iterable) and not isinstance(each, str):
68 | yield from flatten(each)
69 | else:
70 | yield each
71 |
--------------------------------------------------------------------------------
/remlang/standard/default.py:
--------------------------------------------------------------------------------
1 | from .curry import curry
2 |
3 | from functools import reduce
4 | from collections import Iterable, OrderedDict, Iterator
5 |
6 | from ..compiler.utils import cast, to_chinese
7 | from ..compiler.msg import StatusConstructor, RemStatus
8 |
9 | from .syntax import *
10 | from .io import *
11 | from .module import *
12 | from .path import Path
13 | from .collections import *
14 |
15 | LICENSE_INFO = """
16 | Rem Language 0.5, April 23 2018 00:32.
17 | Backend CPython, Author thautwarm, MIT License.
18 | Report at https://github.com/thautwarm/Rem/issues.
19 | """
20 |
21 |
22 | class Object:
23 | pass
24 |
25 | def py_fn(f, args=None, kwargs=None):
26 | """
27 | Rem codes: py_fn f args kwargs
28 | python intepreted as: f(*args, **kwargs)
29 | """
30 | if not args:
31 | args = ()
32 | if not kwargs:
33 | kwargs = {}
34 | return f(*args, **kwargs)
35 |
36 |
37 | default = {
38 | # syntax
39 | 'err': rem_raise,
40 | 'foreach': foreach,
41 | 'while': rem_while,
42 | 'if': rem_if,
43 | 'else': rem_else,
44 |
45 | 'list': list,
46 | 'tuple': tuple,
47 | 'hashset': set,
48 | 'dict': dict,
49 | 'odict': OrderedDict,
50 | 'max': max,
51 | 'min': min,
52 | 'print': print,
53 | 'help': help,
54 | 'get': curry(getattr),
55 | 'set': curry(setattr),
56 |
57 | # collections
58 | 'chunk_by': chunk_by,
59 | 'chunk': chunk,
60 | 'filter': curry(lambda collection, f: filter(f, collection)),
61 | 'map': curry(lambda collection, f: map(f, collection)),
62 | 'reduce': curry(lambda collection, f: reduce(f, collection)),
63 | 'fold': curry(lambda collection, f, init: reduce(f, collection, init)),
64 | 'fst': fst,
65 | 'snd': snd,
66 | 'slice': rem_slice,
67 | 'indexer': indexer,
68 |
69 | # function helper
70 | 'call': lambda f: f(),
71 | 'py_fn': py_fn,
72 |
73 | # IO
74 | 'write': write,
75 | 'read': read,
76 | 'open': open_file,
77 | 'cast': cast,
78 | 'to_chinese': to_chinese,
79 | 'range': xrange,
80 | 'append': append,
81 |
82 | 'Object': Object,
83 | "index": curry(lambda x, i: x[i]),
84 |
85 | 'len': len,
86 | 'is_inst_of': curry(isinstance),
87 | 'is_type_of': curry(lambda type, inst: isinstance(inst, type)),
88 |
89 | 'apply_module': apply_module,
90 | 'path': Path,
91 |
92 | 'string': str,
93 | 'int': int,
94 | 'float': float,
95 |
96 | # msg
97 | 'status': StatusConstructor,
98 | 'is_status': lambda _: isinstance(_, RemStatus)
99 |
100 | }
101 |
--------------------------------------------------------------------------------
/remlang/execute.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 |
4 | from remlang.compiler.err import Trace
5 | from remlang.compiler.reference_collections import ReferenceDict
6 | from .compiler.ast import ast_for_file, MetaInfo, rem_parser
7 | from .intepreter import repl, main
8 | from .console import Colored
9 | import warnings, logging
10 |
11 | warnings.filterwarnings("ignore")
12 | logger = logging.Logger('rem-exec')
13 |
14 |
15 | def execute(src: str, env: ReferenceDict, path: str):
16 | ast_for_file(env['__compiler__']
17 | .from_source_code(
18 | path,
19 | src,
20 | meta=MetaInfo(fileName=path)),
21 | env)
22 |
23 |
24 | def run():
25 | cmdparser = argparse.ArgumentParser(
26 | description='Rem Langauge executing tool')
27 |
28 | cmdparser.add_argument("--repl",
29 | help='run interactive rem intepreter.',
30 | default=False, nargs='?',
31 | const=True)
32 |
33 | cmdparser.add_argument('file',
34 | metavar='file',
35 | default='',
36 | nargs='*',
37 | type=str,
38 | help='input .rem source file')
39 |
40 | cmdparser.add_argument('-c',
41 | default='',
42 | nargs='?',
43 | help='run some source codes',
44 | type=str)
45 |
46 | cmdparser.add_argument('--py_exception',
47 | default=False,
48 | const=True,
49 | nargs='?',
50 | help='show python exception?',
51 | type=bool)
52 |
53 |
54 | cmdparser.add_argument('--chinese',
55 | default=False,
56 | const=True,
57 | nargs='?',
58 | help='chinese prog',
59 | type=bool)
60 |
61 | args = cmdparser.parse_args()
62 |
63 | if args.chinese:
64 | main['中文编程']()
65 |
66 |
67 | if args.repl:
68 | repl()
69 | elif args.c:
70 | execute("import sys; sys'path'append(\"./\");print 1", main, '')
71 | execute(args.c, main, '')
72 |
73 |
74 | elif args.file:
75 | with open(args.file[0], 'r', encoding='utf8') as f:
76 | src = f.read()
77 | try:
78 | execute("import sys;sys'path'append \"./\";", main, "")
79 | execute(src, main, os.path.abspath(args.file[0]))
80 | except Exception as e:
81 | logger.error(Colored.LightBlue + str(e) + Colored.Clear, exc_info=args.py_exception)
82 |
83 |
84 | else:
85 | cmdparser.print_help()
86 |
--------------------------------------------------------------------------------
/remlang/compiler/order_dual_opt.py:
--------------------------------------------------------------------------------
1 | from .linked_list import RevLinkedList, RevNode
2 | from collections import namedtuple
3 | import operator
4 |
5 | try:
6 | from cytoolz import curry
7 | except ModuleNotFoundError:
8 | from toolz import curry
9 |
10 | import itertools
11 |
12 | BinExp = namedtuple('BinExp', ['left', 'mid', 'right'])
13 |
14 | argsort = lambda seq: sorted(range(len(seq)), key=seq.__getitem__)
15 |
16 | op_priority = { # priority
17 | '|>': 2,
18 | '@': 3,
19 | '>': 3,
20 | '<': 3,
21 | '>=': 3,
22 | '<=': 3,
23 | '==': 3,
24 | '!=': 3,
25 | 'in': 4,
26 | 'or': 5,
27 | 'and': 6,
28 | '<-': 7,
29 | '|': 7, # union
30 | '&': 8, # joint
31 |
32 | '+': 9,
33 | '-': 9,
34 | '*': 10,
35 | '/': 10,
36 |
37 | '//': 10,
38 | '%': 10,
39 | '++': 12,
40 | '--': 12,
41 | '**': 12,
42 |
43 | '^': 12,
44 | '^^': 12,
45 | # begin[bit op]
46 | '>>': 14,
47 | '<<': 14,
48 | '||': 14,
49 |
50 | '&&': 14,
51 | 'is': 15,
52 | # end[bit op]
53 | }
54 |
55 | bin_op_fns = {
56 | '+': curry(operator.add),
57 | '-': curry(operator.sub),
58 | '*': curry(operator.mul),
59 | '/': curry(operator.truediv),
60 | '//': curry(operator.floordiv),
61 | '++': curry(lambda x, y: itertools.chain(x, y)),
62 | '--': curry(lambda x, y: [_ for _ in x if _ not in y]),
63 |
64 | '&': curry(operator.and_),
65 | 'and': curry(lambda x, y: x and y),
66 |
67 | '|': curry(operator.or_),
68 | 'or': curry(lambda a, b: a or b),
69 |
70 | '%': curry(operator.mod),
71 | '**': curry(operator.pow),
72 | '>>': curry(operator.lshift),
73 | '<<': curry(operator.rshift),
74 | '||': curry(operator.or_),
75 | '^': curry(operator.xor),
76 | '<': curry(operator.lt),
77 | '<=': curry(operator.le),
78 | '>': curry(operator.gt),
79 | '>=': curry(operator.ge),
80 | '==': curry(operator.eq),
81 | 'is': curry(operator.is_),
82 | '!=': curry(operator.ne),
83 | 'in': curry(lambda e, collection: e in collection),
84 |
85 | }
86 |
87 |
88 | def order_dual_opt(seq):
89 | if len(seq) is 3:
90 | return BinExp(*seq)
91 |
92 | linked_list = RevLinkedList.from_iter(seq)
93 | arg_indices = argsort([op_priority[e] for e in seq if isinstance(e, str)])
94 | arg_indices.reverse()
95 |
96 | indices = [idx for idx, e in enumerate(seq) if isinstance(e, str)]
97 | indices.reverse()
98 |
99 | op_nodes = sorted((e for e in linked_list if isinstance(e.content, str)), key=lambda x: op_priority[x.content],
100 | reverse=True)
101 |
102 | each: RevNode
103 | for each in op_nodes:
104 | bin_expr = BinExp(each.prev.content, each.content, each.next.content)
105 | each.content = bin_expr
106 | try:
107 | each.prev.prev.next = each
108 | each.prev = each.prev.prev
109 | except AttributeError:
110 | pass
111 |
112 |
113 | try:
114 | each.next.next.prev = each
115 | each.next = each.next.next
116 | except AttributeError:
117 | pass
118 |
119 | return bin_expr
--------------------------------------------------------------------------------
/remlang/compiler/reference_collections.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 |
3 |
4 | class ReferenceDict:
5 | __slots__ = ['local', 'module_manager', 'parent', ]
6 |
7 | def __init__(self, src, parent: 'Optional[ReferenceDict]' = None, module_manager=None):
8 | self.local = src
9 | self.local['__env__'] = self
10 |
11 | self.module_manager = module_manager
12 |
13 | self.parent = parent
14 |
15 | self.module_manager = module_manager
16 |
17 | def __getitem__(self, item):
18 | return self.local[item]
19 |
20 | def __setitem__(self, key, value):
21 | self.local[key] = value
22 |
23 | def get_local(self, item):
24 | return self.local.get(item)
25 |
26 | def get_nonlocal(self, item):
27 | now = self
28 | try:
29 | while item not in now.local:
30 | now = now.parent
31 | return now.local[item]
32 |
33 | except AttributeError:
34 | raise NameError(item)
35 |
36 | def set_local(self, key, value):
37 | self.local[key] = value
38 |
39 | def set_nonlocal(self, key, value):
40 | now = self
41 | try:
42 | while key not in now.local:
43 | now = now.parent
44 | now.local[key] = value
45 |
46 | except AttributeError:
47 | raise NameError(key)
48 |
49 | def get_nonlocal_env(self, item):
50 | now = self
51 | try:
52 | while item not in now.local:
53 | now = now.parent
54 | return now.local
55 |
56 | except AttributeError:
57 | raise NameError(item)
58 |
59 | def __contains__(self, item):
60 | return item in self.local
61 |
62 | def copy(self):
63 | return ReferenceDict(self.local.copy(), self.parent, module_manager=self.module_manager)
64 |
65 | def branch(self):
66 | return ReferenceDict({}, self, module_manager=self.module_manager)
67 |
68 | def branch_with(self, catch: dict):
69 | return ReferenceDict(catch, self, module_manager=self.module_manager)
70 |
71 | def update(self, *args, **kwargs):
72 | self.local.update(*args, **kwargs)
73 |
74 | def __str__(self):
75 | return "ReferenceDict[{}]".format(self.local.__str__())
76 |
77 |
78 | class ParameterProxy:
79 | __slots__ = ['host', 'catch']
80 |
81 | def __init__(self, dictionary: dict, catch: dict = None):
82 | self.host = dictionary
83 | self.catch = catch if catch else {}
84 |
85 | def __setitem__(self, key, value):
86 | self.catch[key] = value
87 |
88 | def __getitem__(self, item):
89 | return self.host[item]
90 |
91 | def update(self, *args, **kwargs):
92 | self.catch.update(*args, **kwargs)
93 |
94 | def copy(self):
95 | new = ParameterProxy(self.host, self.catch.copy())
96 | return new
97 |
98 | def __contains__(self, item):
99 | return self.host.__contains__(item)
100 |
101 |
102 | class ReferenceIter:
103 | __slots__ = ['c']
104 | empty = iter(())
105 |
106 | def __init__(self, c):
107 | self.c = iter(c)
108 |
109 | def __next__(self):
110 | return next(self.c)
111 |
112 | def __iter__(self):
113 | yield from self.c
114 |
115 | def clear(self):
116 | self.c = ReferenceIter.empty
117 |
--------------------------------------------------------------------------------
/remlang/compiler/pattern_matching.py:
--------------------------------------------------------------------------------
1 | from collections import Iterable
2 |
3 | from .reference_collections import ReferenceIter
4 | from .rem_parser import UNameEnum, Tokenizer
5 | from Ruikowa.ObjectRegex.ASTDef import Ast
6 | from typing import Union
7 |
8 | if False:
9 | from .ast import ast_for_expr
10 |
11 |
12 | def import_ast_for_expr():
13 | from .ast import ast_for_expr
14 | globals()['ast_for_expr'] = ast_for_expr
15 |
16 |
17 | unmatched = object()
18 | const_map = {'r': True, 'a': False, 'o': None}
19 |
20 |
21 | def pattern_matching(left_e: Union[Ast, Tokenizer], right_e, ctx):
22 | try:
23 | if left_e.name is UNameEnum.refName:
24 |
25 | [*ref, symbol] = left_e
26 | name = symbol.string
27 | if ref:
28 | if not isinstance(right_e, Iterable):
29 | return ctx.get_nonlocal(name) == right_e
30 | if right_e.__class__ is str:
31 | return right_e == ctx.get_nonlocal(name)
32 | return all(zip(ctx.get_nonlocal(name), right_e))
33 | else:
34 | ctx.set_local(name, right_e)
35 | return True
36 |
37 | elif left_e.name is UNameEnum.string:
38 | return eval(left_e.string) == right_e
39 |
40 | elif left_e.name is UNameEnum.const:
41 | return const_map[left_e.string[1]] is right_e
42 |
43 | elif left_e.name is UNameEnum.number:
44 | return eval(left_e.string) == right_e
45 |
46 | elif left_e.name is UNameEnum.tuplePat:
47 | if not left_e:
48 | try:
49 | next(iter(right_e))
50 | except StopIteration:
51 | return True
52 | else:
53 | return False
54 | many = left_e[0]
55 | return pattern_match_varargs(many, right_e, ctx)
56 | elif left_e.name is UNameEnum.dictPat:
57 |
58 | if not left_e:
59 | return not right_e
60 | kv_pat_many = left_e[0]
61 |
62 | for expr, [dict_value_pat] in kv_pat_many:
63 | expr = ast_for_expr(expr, ctx)
64 | if not pattern_matching(dict_value_pat, right_e[expr], ctx):
65 | return False
66 |
67 | return True
68 |
69 | elif left_e.string is '_':
70 | right_e.clear()
71 | return True
72 | else:
73 | assert False
74 |
75 | except:
76 | return False
77 |
78 |
79 | def pattern_match_varargs(left, right, ctx):
80 | try:
81 | is_iter: bool = False
82 | if left[-1].name is UNameEnum.iterMark:
83 | left.pop()
84 | is_iter = True
85 | elif len(left) > 1:
86 | is_iter = True
87 |
88 | if not is_iter:
89 | # no
90 | return pattern_matching(left[0][-1], right, ctx)
91 |
92 | left = ReferenceIter(left)
93 | right = ReferenceIter(right)
94 |
95 | while True:
96 | try:
97 | k = next(left)
98 | except StopIteration:
99 | try:
100 | next(right)
101 | # no
102 | return False
103 | except StopIteration:
104 | # no
105 | return True
106 |
107 | else:
108 | if len(k) is 2:
109 | k = k[1]
110 | if not pattern_matching(k, right.c, ctx):
111 | # no
112 | return False
113 | return True
114 |
115 | v = next(right)
116 | if not pattern_matching(k[0], v, ctx):
117 | # no
118 | return False
119 | except:
120 | return False
121 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://pypi.python.org/pypi/remlang)
2 | [](https://github.com/thautwarm/Rem/blob/ebnfparser2.0/intro.md)
3 | [](https://github.com/thautwarm/Rem/blob/ebnfparser2.0/LICENSE)
4 |
5 |
6 | # Rem Language
7 |
8 | Just use **PyPI**. Recommend to install `cytoolz` before installing Rem to speed up function invocations.
9 | ```shell
10 | pip install remlang
11 | ```
12 |
13 | [](https://github.com/thautwarm/Rem/blob/ebnfparser2.0/overview++.png)
14 |
15 |
16 | ## Some Supported Features
17 |
18 | See all features at [Inrtoduction](https://github.com/thautwarm/Rem/blob/ebnfparser2.0/intro.md).
19 |
20 |
21 | - [Pattern Matching](https://github.com/thautwarm/Rem/blob/ebnfparser2.0/intro.md#pattern-matching)
22 | Currently support `case` syntax.
23 | ```
24 | case expr
25 | as destruction
26 | [when condition]
27 | => statements
28 | end
29 | ```
30 |
31 | - Dictionary Pattern Matching
32 |
33 | ```
34 | case %{a: b}
35 | as {a : &b+1} => 1
36 | as {a : &b } => 2
37 | end
38 | ```
39 |
40 | - Function Parameter Destruction
41 | ```
42 | >> {|(1, 2, c)| c*2} (1, 2, 3)
43 | => 6
44 | ```
45 | - [**Inverted Syntax**](https://github.com/thautwarm/Rem/blob/ebnfparser2.0/intro.md#inverted-syntax) (see the priority table in the linked page)
46 | ```
47 | file . open . write some_text
48 | ```
49 |
50 | `.` has a high priority while `then` has a lower one.
51 |
52 | [](https://github.com/thautwarm/Rem/blob/ebnfparser2.0/overview-figs/inverted.png)
53 |
54 | [](https://github.com/thautwarm/Rem/blob/ebnfparser2.0/overview-figs/$.png)
55 |
56 |
57 |
58 | - [Into Statement](https://github.com/thautwarm/Rem/blob/ebnfparser2.0/intro.md#into-statement)(just like `goto`)
59 |
60 | [](https://github.com/thautwarm/Rem/blob/ebnfparser2.0/overview-figs/into.png)
61 |
62 |
63 | - [Currying Function](https://github.com/thautwarm/Rem/blob/ebnfparser2.0/intro.md#functionlambda)
64 |
65 | [](https://github.com/thautwarm/Rem/blob/ebnfparser2.0/overview-figs/lambda.png)
66 |
67 |
68 | - [Scope](https://github.com/thautwarm/Rem/blob/ebnfparser2.0/intro.md#scope)
69 |
70 | - [Where Syntax and Block Expression](https://github.com/thautwarm/Rem/blob/ebnfparser2.0/intro.md#where-syntax)
71 | [](https://github.com/thautwarm/Rem/blob/ebnfparser2.0/overview-figs/for.png)
72 |
73 |
74 | - [For Comprehension and For Loop](https://github.com/thautwarm/Rem/blob/ebnfparser2.0/intro.md#for-comprehension)
75 | [](https://github.com/thautwarm/Rem/blob/ebnfparser2.0/overview-figs/for.png)
76 |
77 |
78 |
79 | ## 关于中文编程
80 |
81 | Rem 支持中文编程, 因为它的tokenizer可以被动态操控,在任意一个Rem模块里,均有一个`__compiler__`对象, 负责处理输入语句到`ast`的转换。当下内置了一个无参函数`中文编程`, 便可以使用中文关键字。
82 |
83 | ```
84 | >> call 中文编程
85 | >> 对于 [3, 2] 作为 [甲, 乙] => 甲 * 乙 结束
86 | # 等价于 =>
87 | # case [3, 2] as [甲, 乙] => 甲 * 乙 end
88 | => 6
89 | ```
90 |
91 | 中英文token对照
92 |
93 | | English | 中文 |
94 | | ------- | --- |
95 | | then | 然后 |
96 | | when | 当 |
97 | | and | 并且 |
98 | | or | 或者 |
99 | | in | 含于 |
100 | | not | 非 |
101 | | case | 对于 |
102 | | as | 作为 |
103 | | end | 结束 |
104 | | where | 其中 |
105 | | from | 从 |
106 | | import | 导入 |
107 | | yield | 生成 |
108 | | into | 跳跃到 |
109 | | let | 使/让 |
110 | | True | 真 |
111 | | False | 假 |
112 | | None | 空 |
113 | | is | 是 |
114 | | `.` | 之 |
115 | | `它` | 它 |
116 | | = | 等于 |
117 |
118 |
119 |
120 |
121 |
122 |
--------------------------------------------------------------------------------
/remlang/intepreter.py:
--------------------------------------------------------------------------------
1 | from .compiler.ast import (BreakUntil, ErrorHandler, MetaInfo,
2 | ast_for_file, main)
3 | from .standard.default import LICENSE_INFO
4 | from .compiler.rem_parser import statement
5 | from .console import Colored
6 | from Ruikowa.io import grace_open
7 | from Ruikowa.ErrorFamily import DSLSyntaxError
8 | from pprint import pformat
9 | import logging
10 | import warnings
11 | import argparse
12 |
13 | cmd_parser = argparse.ArgumentParser()
14 | cmd_parser.add_argument('--file', nargs='?', type=str)
15 | cmd_parser.add_argument('--test', nargs='?', type=bool, default=False, const=True)
16 |
17 | warnings.filterwarnings("ignore")
18 | logger = logging.Logger('irem')
19 |
20 |
21 | class ReplIncompleteException(Exception):
22 | pass
23 |
24 |
25 | _input = input
26 |
27 |
28 | def repl():
29 | args = cmd_parser.parse_args()
30 |
31 | # preload sys
32 | ast_for_file(main['__compiler__']
33 | .from_source_code('',
34 | "import sys;sys'path'append \"./\";",
35 | meta=MetaInfo(fileName=''),
36 | partial=False), main)
37 |
38 | testing = args.test
39 | if args.file:
40 | file_src = iter(grace_open(args.file).read().splitlines())
41 | std_input = _input
42 |
43 | def input(s):
44 | code = next(file_src)
45 | print(s, code)
46 | return code
47 | else:
48 | input = _input
49 |
50 | print(Colored.Purple2, LICENSE_INFO, Colored.Clear)
51 | count = None
52 | src = []
53 | errs = []
54 | while True:
55 | try:
56 | inp = input(Colored.Yellow + '>> ' if count is None else ' ')
57 | except StopIteration:
58 | input = std_input
59 | continue
60 | except KeyboardInterrupt:
61 | src.clear()
62 | errs.clear()
63 | count = None
64 | print()
65 | continue
66 |
67 | if not inp:
68 | continue
69 | elif inp == ':manager':
70 | print(Colored.LightBlue, pformat(main.module_manager.local))
71 | continue
72 | elif inp == ':modules':
73 | print(Colored.LightBlue, pformat(main.module_manager['@modules']))
74 | continue
75 | elif inp == ':vars':
76 | print(Colored.Purple2, pformat(main.local))
77 | continue
78 | elif inp == ':q':
79 | print(Colored.Green, '\n Good Bye~')
80 | import sys
81 | return "End Rem Session."
82 |
83 | meta = MetaInfo(fileName='')
84 | src.append(inp)
85 | try:
86 | ans = main['__compiler__'].from_source_code('',
87 | '\n'.join(src),
88 | meta=meta,
89 | partial=False, print_token=testing)
90 | if testing:
91 | print(ans)
92 | try:
93 | ans = ast_for_file(ans, main)
94 | if testing:
95 | print(ans)
96 | main['ans'] = ans
97 | if count is not None:
98 | count = None
99 | if ans is not None and not isinstance(ans, BreakUntil):
100 | print(Colored.Green, '=> ', end='')
101 | if any(map(lambda x: isinstance(ans, x),
102 | (list, dict, set))): # mutable
103 | print(Colored.Blue, pformat(ans))
104 | elif any(map(lambda x: isinstance(ans, x),
105 | (str, int, float, complex, tuple))): # immutable
106 | print(Colored.LightBlue, pformat(ans))
107 | else:
108 | print(Colored.Purple, pformat(ans))
109 |
110 | except BaseException as e:
111 | if testing:
112 | logging.error(Colored.LightBlue + str(e) + Colored.Clear, exc_info=True)
113 | else:
114 | logger.error(Colored.LightBlue + str(e) + Colored.Clear)
115 |
116 | src.clear()
117 | errs.clear()
118 |
119 | except DSLSyntaxError as e:
120 | errs.append(e)
121 | now_count = meta.max_fetched
122 | is_incremental = count is None or now_count > count
123 |
124 | if is_incremental:
125 | count = now_count
126 | else:
127 | src.clear()
128 | count = None
129 | for e in errs:
130 | if testing:
131 | logging.error(Colored.LightBlue + str(e) + Colored.Clear, exc_info=True)
132 | else:
133 | logger.error(Colored.LightBlue + str(e) + Colored.Clear)
134 | errs.clear()
135 | continue
136 |
--------------------------------------------------------------------------------
/remlang/compiler/linked_list.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 |
3 |
4 | class Node:
5 | __slots__ = ['next', 'content']
6 |
7 | def __init__(self, content=None):
8 | self.next = None
9 | self.content = content
10 |
11 | def __str__(self):
12 | return self.content.__str__()
13 |
14 | def __repr__(self):
15 | return self.content.__str__()
16 |
17 |
18 | class LinkedList:
19 | __slots__ = ['head', 'end']
20 |
21 | def __init__(self, head_end=None):
22 | if head_end:
23 | self.head, self.end = head_end
24 | else:
25 | self.head: Optional[Node] = None
26 | self.end: Optional[Node] = None
27 |
28 | def append_node(self, node):
29 | try:
30 | self.end.next = node
31 | except AttributeError:
32 | self.head = self.end = node
33 | self.end = node
34 |
35 | def __getitem__(self, item):
36 | node = self.head
37 | for i in range(item):
38 | if not node.next:
39 | return None
40 | node = node.next
41 | return node
42 |
43 | def append(self, v):
44 | self.append_node(Node(v))
45 |
46 | def append_left_node(self, node):
47 | node.next = self.head
48 | self.head = node
49 | if self.end is None:
50 | self.end = node
51 |
52 | def append_left(self, v):
53 | self.append_left_node(Node(v))
54 |
55 | def extend(self, linked_list):
56 | self.end.next = linked_list.head
57 | self.end = linked_list.end
58 |
59 | def extend_left(self, linked_list):
60 | linked_list.end.next = self.head
61 | self.head = linked_list.head
62 |
63 | def __iter__(self):
64 | if self.head is None:
65 | return None
66 | else:
67 | n = self.head
68 | while n.next:
69 | yield n
70 | n = n.next
71 | yield n
72 |
73 | def __repr__(self):
74 | return self.__str__()
75 |
76 | def __str__(self):
77 | return [n for n in self].__str__()
78 |
79 | @property
80 | def tail(self):
81 | return LinkedList(head_end=(self.head.next, self.end) if self.head is not self.end else None)
82 |
83 | @property
84 | def to_list(self):
85 | return [e.content for e in self]
86 |
87 | @property
88 | def to_tuple(self):
89 | return tuple(e.content for e in self)
90 |
91 | @staticmethod
92 | def from_iter(sequence):
93 | _list = LinkedList()
94 | for elem in sequence:
95 | _list.append(elem)
96 | return _list
97 |
98 |
99 | class RevNode(Node):
100 | def __init__(self, content=None):
101 | super(RevNode, self).__init__(content)
102 | self.prev: Optional[RevNode] = None
103 |
104 |
105 | class RevLinkedList:
106 | def __init__(self, head_end=None):
107 | if head_end:
108 | self.head, self.end = head_end
109 | else:
110 | self.head: Optional[RevNode] = None
111 | self.end: Optional[RevNode] = None
112 |
113 | def append_node(self, node):
114 | try:
115 | self.end.next = node
116 | node.prev = self.end
117 | except AttributeError:
118 | self.head = self.end = node
119 | self.end = node
120 |
121 | def __getitem__(self, item):
122 | node = self.head
123 | for i in range(item):
124 | if not node.next:
125 | return None
126 | node = node.next
127 | return node
128 |
129 | def append(self, v):
130 | self.append_node(RevNode(v))
131 |
132 | def append_left_node(self, node):
133 | node.next = self.head
134 | if self.head is not Node:
135 | self.head.prev = node
136 | self.head = node
137 | if self.end is None:
138 | self.end = node
139 |
140 | def append_left(self, v):
141 | self.append_left_node(RevNode(v))
142 |
143 | def extend(self, linked_list):
144 | self.end.next = linked_list.head
145 | if linked_list.head is not None:
146 | linked_list.head.prev = self.end
147 | self.end = linked_list.end
148 |
149 | def extend_left(self, linked_list):
150 | linked_list.end.next = self.head
151 | if self.head is not None:
152 | self.head.prev = linked_list.end
153 | self.head = linked_list.head
154 |
155 | def __iter__(self):
156 | if self.head is None:
157 | return None
158 | else:
159 | n = self.head
160 | while n.next:
161 | yield n
162 | n = n.next
163 | yield n
164 |
165 | def __repr__(self):
166 | return self.__str__()
167 |
168 | def __str__(self):
169 | return [n for n in self].__str__()
170 |
171 | @property
172 | def tail(self):
173 | return LinkedList(head_end=(self.head.next, self.end) if self.head is not self.end else None)
174 |
175 | @property
176 | def to_list(self):
177 | return [e.content for e in self]
178 |
179 | @property
180 | def to_tuple(self):
181 | return tuple(e.content for e in self)
182 |
183 | @staticmethod
184 | def from_iter(sequence):
185 | _list = RevLinkedList()
186 | for elem in sequence:
187 | _list.append(elem)
188 | return _list
189 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | |PyPI version| |Intro| |MIT|
2 |
3 | Rem Langauge
4 | ============
5 |
6 | Just use **PyPI**. Recommend to install ``cytoolz`` before installing
7 | Rem to speed up function invocations.
8 |
9 | .. code:: shell
10 |
11 | pip install remlang
12 |
13 | |Overview|
14 |
15 | Some Supported Features
16 | -----------------------
17 |
18 | See all features at
19 | `Inrtoduction `__.
20 |
21 | - | `Pattern
22 | Matching `__
23 | | Currently support ``case`` syntax.
24 |
25 | ::
26 |
27 | case expr
28 | as destruction
29 | [when condition]
30 | => statements
31 | end
32 |
33 | - Dictionary Pattern Matching
34 |
35 | ::
36 |
37 | case %{a: b}
38 | as {a : &b+1} => 1
39 | as {a : &b } => 2
40 | end
41 |
42 | - Function Parameter Destruction
43 |
44 | ::
45 |
46 | >> {|(1, 2, c)| c*2} (1, 2, 3)
47 | => 6
48 |
49 | - `**Inverted
50 | Syntax** `__
51 | (see the priority table in the linked page)
52 |
53 | ::
54 |
55 | file . open . write some_text
56 |
57 | ``.`` has a high priority while ``then`` has a lower one.
58 |
59 | |Inverted|
60 |
61 | |$|
62 |
63 | - `Into
64 | Statement `__\ (just
65 | like ``goto``)
66 |
67 | |Into|
68 |
69 | - `Currying
70 | Function `__
71 |
72 | |Lambda|
73 |
74 | - `Scope `__
75 |
76 | - | `Where Syntax and Block
77 | Expression `__
78 | | |Where|
79 |
80 | - | `For Comprehension and For
81 | Loop `__
82 | | |For|
83 |
84 | 关于中文编程
85 | ------------
86 |
87 | Rem 支持中文编程,
88 | 因为它的tokenizer可以被动态操控,在任意一个Rem模块里,均有一个\ ``__compiler__``\ 对象,
89 | 负责处理输入语句到\ ``ast``\ 的转换。当下内置了一个无参函数\ ``中文编程``\ ,
90 | 便可以使用中文关键字。
91 |
92 | ::
93 |
94 | >> call 中文编程
95 | >> 对于 [3, 2] 作为 [甲, 乙] => 甲 * 乙 结束
96 | # 等价于 =>
97 | # case [3, 2] as [甲, 乙] => 甲 * 乙 end
98 | => 6
99 |
100 | 中英文token对照
101 |
102 | +-----------+----------+
103 | | English | 中文 |
104 | +===========+==========+
105 | | then | 然后 |
106 | +-----------+----------+
107 | | when | 当 |
108 | +-----------+----------+
109 | | and | 并且 |
110 | +-----------+----------+
111 | | or | 或者 |
112 | +-----------+----------+
113 | | in | 含于 |
114 | +-----------+----------+
115 | | not | 非 |
116 | +-----------+----------+
117 | | case | 对于 |
118 | +-----------+----------+
119 | | as | 作为 |
120 | +-----------+----------+
121 | | end | 结束 |
122 | +-----------+----------+
123 | | where | 其中 |
124 | +-----------+----------+
125 | | from | 从 |
126 | +-----------+----------+
127 | | import | 导入 |
128 | +-----------+----------+
129 | | yield | 生成 |
130 | +-----------+----------+
131 | | into | 跳跃到 |
132 | +-----------+----------+
133 | | let | 使/让 |
134 | +-----------+----------+
135 | | True | 真 |
136 | +-----------+----------+
137 | | False | 假 |
138 | +-----------+----------+
139 | | None | 空 |
140 | +-----------+----------+
141 | | is | 是 |
142 | +-----------+----------+
143 | | ``.`` | 之 |
144 | +-----------+----------+
145 | | ``它`` | 它 |
146 | +-----------+----------+
147 | | = | 等于 |
148 | +-----------+----------+
149 |
150 | .. |PyPI version| image:: https://img.shields.io/pypi/v/remlang.svg
151 | :target: https://pypi.python.org/pypi/remlang
152 | .. |Intro| image:: https://img.shields.io/badge/intro-remlang-red.svg
153 | :target: https://github.com/thautwarm/Rem/blob/ebnfparser2.0/intro.md
154 | .. |MIT| image:: https://img.shields.io/badge/license-MIT-blue.svg?style=flat
155 | :target: https://github.com/thautwarm/Rem/blob/ebnfparser2.0/LICENSE
156 | .. |Overview| image:: https://github.com/thautwarm/Rem/blob/ebnfparser2.0/overview++.png
157 | :target: https://github.com/thautwarm/Rem/blob/ebnfparser2.0/overview++.png
158 | .. |Inverted| image:: https://github.com/thautwarm/Rem/blob/ebnfparser2.0/overview-figs/inverted.png
159 | :target: https://github.com/thautwarm/Rem/blob/ebnfparser2.0/overview-figs/inverted.png
160 | .. |$| image:: https://github.com/thautwarm/Rem/blob/ebnfparser2.0/overview-figs/$.png
161 | :target: https://github.com/thautwarm/Rem/blob/ebnfparser2.0/overview-figs/$.png
162 | .. |Into| image:: https://github.com/thautwarm/Rem/blob/ebnfparser2.0/overview-figs/into.png
163 | :target: https://github.com/thautwarm/Rem/blob/ebnfparser2.0/overview-figs/into.png
164 | .. |Lambda| image:: https://github.com/thautwarm/Rem/blob/ebnfparser2.0/overview-figs/lambda.png
165 | :target: https://github.com/thautwarm/Rem/blob/ebnfparser2.0/overview-figs/lambda.png
166 | .. |Where| image:: https://github.com/thautwarm/Rem/blob/ebnfparser2.0/overview-figs/where.png
167 | :target: https://github.com/thautwarm/Rem/blob/ebnfparser2.0/overview-figs/for.png
168 | .. |For| image:: https://github.com/thautwarm/Rem/blob/ebnfparser2.0/overview-figs/for.png
169 | :target: https://github.com/thautwarm/Rem/blob/ebnfparser2.0/overview-figs/for.png
170 |
--------------------------------------------------------------------------------
/remlang/compiler/rem.grammar:
--------------------------------------------------------------------------------
1 | ignore [space comments]
2 |
3 | refName ::= ['&'] symbol;
4 |
5 | newline := R'\n+';
6 | space := R'\s+';
7 | symbol := R'[a-zA-Z\u4e00-\u9fa5\u3040-\u309f\u30a0-\u30ff_]{1}[a-zA-Z\u4e00-\u9fa5\u3040-\u309f\u30a0-\u30ff\d_]*';
8 |
9 | keyword cast as K
10 | := 'then', 'when', 'and', 'or', 'not'
11 | 'in', 'not', 'case', 'as'
12 | 'end', 'where', 'from', 'yield'
13 | 'into', 'let', 'True', 'False'
14 | 'None', 'import', 'is';
15 |
16 | string := R'"([^\"]+|\\.)*?"';
17 | comments := R'(#.*)|(((/\*)+?[\w\W]+?(\*/)+))';
18 | number := R'0[Xx][\da-fA-F]+|\d+(?:\.\d+|)(?:E\-{0,1}\d+|)';
19 |
20 |
21 | operator := '>>', '<<', '::'
22 | '++', '--', '**', '//'
23 | '^^', '&&', '||'
24 | '|>' '>=', '<='
25 | '==', '!=' '<-',
26 | '+', '-', '*', '/', '%'
27 | '>', '<',
28 | '^', '&';
29 |
30 | T ::= newline+;
31 |
32 | importAs
33 | ::= symbol ['as' symbol];
34 |
35 | fromImportStmt
36 | ::= 'from' ['...' | '.'{1, 2} ] symbol ('.' symbol)* 'import' ('*' | importAs+ | '(' [T] importAs ([T] ',' [T] importAs)* ')' );
37 |
38 | importStmt
39 | ::= singleImportStmt | fromImportStmt | remImport;
40 |
41 | singleImportStmt
42 | ::= 'import' simpleArgs ['as' symbol];
43 |
44 | remImport throw ['import', 'as']
45 | ::= 'import' string [ 'as' symbol];
46 |
47 |
48 | const ::= 'True' | 'False' | 'None';
49 |
50 |
51 |
52 | # lambda currently does not support argument destruction.
53 | simpleArgs throw [',' T]
54 | ::= symbol ([T] ',' [T] symbol)*;
55 |
56 | # for pattern matching
57 | patMany throw [',' T]
58 | ::= pat [([T] ',' [T] pat)+ [T]] [iterMark];
59 |
60 | iterMark::= ',';
61 | pat ::= ['...'] '_' |
62 | ['...'] refName |
63 | tuplePat |
64 | dictPat |
65 | string |
66 | const |
67 | number ;
68 |
69 | noZipPat ::= refName|tuplePat|dictPat|string|const|number;
70 |
71 | noZipPatMany throw [T, ',']
72 | ::= noZipPat ([T] ',' [T] noZipPat)*;
73 |
74 | tuplePat throw ['(' ')', '[', ']' T]
75 | ::= '('[T] [patMany [T]] ')'| '[' [T] [patMany [T]] ']';
76 |
77 | kvPat throw [':']
78 | ::= expr ':' noZipPat;
79 |
80 | kvPatMany throw [T, ',']
81 | ::= kvPat [([T] ',' [T] kvPat)+ [T]] [','];
82 |
83 | dictPat throw ['{' '}' T]
84 | ::= '{'[T] [kvPatMany [T]]'}';
85 |
86 |
87 | lambdef throw ['{', '}', '|', ',', 'from', 'let', 'end', T, '=>']
88 | ::= '{' [T]
89 | ['|' [(simpleArgs|noZipPatMany) [T]] '|' | (simpleArgs|noZipPatMany) '=>']
90 | [T]
91 | [statements [T]]
92 | '}'
93 | |
94 | 'from' [T]
95 | [(simpleArgs|noZipPatMany) [T]]
96 | 'let' [T]
97 | [statements [T]]
98 | 'end'
99 | ;
100 |
101 | atom throw ['++']
102 | ::= refName | const | string ('++' string)* | number |
103 | '(' expr ')'|
104 | listCons | tupleCons | setCons | dictCons | compreh |
105 | lambdef;
106 |
107 | trailer throw ['!' '[' ']' '\'']
108 | ::= '!' '[' exprCons ']' | '\'' symbol;
109 |
110 | atomExpr throw[T]
111 | ::= atom ([T] trailer)*;
112 |
113 |
114 | invExp throw [T]
115 | ::= atomExpr (atomExpr |[T] invTrailer)*;
116 |
117 | invTrailer throw ['.']
118 | ::= '.' atomExpr;
119 |
120 | suffix := '??' '?';
121 | factor ::= ['not' | '+' | '-'] invExp [suffix];
122 |
123 | binExp ::= factor ((operator | 'or' | 'and' | 'in' | 'is') factor)*;
124 |
125 | caseExp throw ['case', 'end', T]
126 | ::= 'case' [T] expr [T] asExp* 'end';
127 |
128 | asExp throw ['=>', T, 'as', 'when']
129 | ::= ['as' patMany]
130 | [
131 | [T] 'when' [T] expr
132 | ]
133 | [T]
134 | ['=>' [T] [statements]];
135 |
136 | testExpr ::= caseExp | binExp;
137 |
138 | where throw ['where', T, 'end'] ::= 'where' [T] [statements [T]] 'end';
139 |
140 | expr throw [T]
141 | ::= '`' expr | testExpr ([T] thenTrailer | [T] applicationTrailer)* [[T] where];
142 |
143 | thenTrailer throw ['then' T]
144 | ::= 'then' [T] testExpr;
145 |
146 | applicationTrailer throw [T '$']
147 | ::= '$' [T] testExpr;
148 |
149 | statements throw [T]
150 | ::= statement ([T] statement)*;
151 |
152 | statement
153 | ::= (label | let | expr | into | importStmt) [';'];
154 |
155 | let throw ['=' '!']
156 | ::= ['let'] symbol trailer* '=' expr;
157 |
158 | exprMany throw [',', T] ::= expr [([T] ',' [T] expr)+];
159 | unpack throw ['...'] ::= '...' expr;
160 | exprCons throw [',' T] ::= exprMany ([T] ',' [T] unpack [[T] ',' [T] exprMany])* [','];
161 |
162 | kv throw [':', T] ::= expr ':' expr;
163 | kvMany throw [',', T] ::= kv [([T] ',' [T] kv)+ [T]];
164 | kvCons throw [',' T] ::= kvMany ([T] ',' [T] unpack [[T] ',' [T] kvMany])* [','];
165 |
166 | listCons throw ['[' ']' T] ::= '[' [T] [exprCons [T]] ']';
167 | tupleCons throw ['(' ')' T] ::= '(' [T] [exprCons [T]] ')';
168 | setCons throw ['%' '{' '}' T] ::= '%' '{' [T] [exprCons [T]] '}';
169 | dictCons throw ['%' '{' '}' T] ::= '%' '{' [T] [kvCons [T]] '}';
170 | compreh throw['from' 'yield' T] ::= 'from' [T] exprMany [[T] K'not'] [T] 'yield' [T] lambdef;
171 |
172 | label throw ['@'] ::= '@' symbol;
173 | into throw ['into'] ::= 'into' symbol;
174 |
175 | file throw [T] ::= [T] [statements [T]];
176 |
--------------------------------------------------------------------------------
/intro.md:
--------------------------------------------------------------------------------
1 |
2 | Introduction of Rem Language
3 | --------------------------------
4 |
5 | Rem language is currently implemented by CPython, and it's for people who're working on the fields of Python, Julia, R or MATLAB .etc...
6 |
7 | Rem looks like Python, Ruby and many other dynamic languages, and it adopts some convenient features from many functional programming languages.
8 |
9 | The **inverted syntax** in Rem is the core in some degree. As the result of unifying the manipulations on first-class functions, you can write these object-oriented-like codes in the following way:
10 |
11 | ```
12 | >> ["foo", "bar"] . map {|x| x + "tail"}
13 | ```
14 | However it's not really object-oriented, it's functional!
15 |
16 | To explain, the codes above could be represented in Python codes:
17 |
18 | ```python
19 | map (lambda x: x + "tail", ["foo", "bar"])
20 | ```
21 | (Just **take care** that not all the codes in Rem can be directly translated to Python.
22 |
23 | Also, we can try to imitate `keyword` with functions in Rem:
24 |
25 | ```
26 | let x = 0
27 | while { x < 10} {
28 | x = x + 1
29 | }
30 | ```
31 | And `while` here is just a function!
32 |
33 | **Pattern Matching** and **Where Syntax** are both tasty, too. You can learn how to use them in the following manual.
34 |
35 | All in all, Rem has a lot of features to support comfortable programming with modern paradigms.
36 |
37 | I'm planning to write a C# backend for Rem sooner in order to get rid of too much Python(however we shouldn't ignore the requirements of works in real world),
38 | and efficient compilers will come up, too.
39 |
40 | ## Hello World
41 |
42 | Install `remlang` easily by using **PyPI** with Python 3.6+.
43 |
44 | ```shell
45 | pip install remlang
46 | ```
47 |
48 | If you want to use the `repl`, just type `irem`:
49 | ```
50 | irem
51 |
52 | Rem Language alpha, March 15 2018 02:14.
53 | Backend CPython, Author thautwarm, MIT License.
54 | Report at https://github.com/thautwarm/Rem/issues.
55 |
56 | >> "Hello World"
57 | 'Hello World'
58 |
59 | >> "Hello World" . print
60 | Hello World
61 | ```
62 | ## Function(Lambda)
63 |
64 | Something you need to take care is that **Functions are curried** in Rem.
65 |
66 | ```
67 | >> let fn = {|x| x + 1}
68 | >> fn 1
69 | => 2
70 | >> let fn = {|x, y, z|
71 | y = y + z + x
72 | y
73 | }
74 | >> f1 = fn 5
75 | >> f2 = fn 10
76 | >> fn 20
77 | => 35
78 | => from x, y, z let x + y + z end # another way to define a lambda
79 | => {x, y, z => x + y + z} # another way to define a lambda
80 | ```
81 | You can make a `lambda` with the syntax you prefer:
82 |
83 | - JavaScript like:
84 |
85 | `{x, y => x + y}; {{"a": a}, (b, c) => a + b + c}`
86 |
87 | - Ruby like:
88 |
89 | `{|x, y| x + y}, {|{"a": a}; (b, c)| a + b + c }`
90 |
91 | - Natural language like
92 |
93 | ```
94 | from
95 | x, y
96 | let
97 | x + y
98 | end
99 |
100 | from
101 | {"a": a}, (b, c)
102 | let
103 | a + b + c
104 | end
105 | ```
106 |
107 | A very sweet syntax sugar from `Scala` is now supported.
108 | ```
109 | >> 1 . {_ + 1}
110 | => 2
111 | >> [1, 2, 3] . map {_ + 1} . list
112 | => [2, 3, 4]
113 | >> {_1 + _2} 1 2
114 | => 3
115 | ```
116 | Take care that when you're using multiple implicit parameters, you cannot curry it.
117 |
118 | So the following codes would cause runtime error.
119 | ```
120 | >> let f = {_1 + _2} 1
121 | ```
122 |
123 | ## $ operator
124 |
125 | ```
126 | >> let fn = {|x| x**2}
127 | >> fn 2 + 3
128 | => 7 # 2**2 + 3
129 | >> fn $ 2 + 3
130 | => 25 # (2+3) ** 2
131 | >> let fn = {|x, y| x*y}
132 | >> fn $ 1+2 $ 3+4
133 | => 21 # 3*7
134 | ```
135 |
136 | ## Scope
137 |
138 | Partly **Dynamic Scoping**
139 |
140 | ```
141 | >> let x = 0
142 | >> call { x }
143 | => 0
144 | >> call { x = 1; x}
145 | => 1
146 | >> x
147 | => 1
148 | >> call {let x = x; x = 10; x}
149 | => 10
150 | >> x
151 | => 1
152 | ```
153 |
154 | ## Where Syntax
155 | ```
156 | >> dictionary = %{
157 | c where
158 | c = 1
159 | end : 2
160 | }
161 | >> dictionary
162 | => {1: 2}
163 |
164 | >> from math import pi
165 | >> S = S1 + 2*S2 where
166 | let S1 = pi*r**2
167 | let S2 = 2*pi*r
168 | end
169 | ```
170 |
171 | ## Inverted Syntax
172 | ```
173 | >> let add = {|x, y| x + y}
174 | >> 1 . add 2
175 | => 3
176 | >> 1 then add 2
177 | => 3
178 | ```
179 | However, the **priority** of `.` is different from `then`.
180 |
181 | Here is the table of priorities in Rem:
182 |
183 | | syntax | priority | sample |
184 | | ------- | --- | --- |
185 | | then | 1 | `a then func` |
186 | | `$` | 1 | `f $ 1 2` |
187 | | case | 2 | `case ... end` |
188 | | binary expr | 2 | `1 * 2` |
189 | | unary expr | 3 | `a?`, `a??`, `not a`|
190 | | `.`(inverted) | 4 | `a . add` |
191 | | function call | 4 | `f 1 2`, `call f` |
192 | |expr with trailers| 5 | `a'name`, `a![0]` |
193 | | atom | 6 | lambda and so on |
194 |
195 |
196 |
197 | ```
198 | >> let concat = {|x| print x ; {|y| print y ;x ++ y}}
199 | >> "left" . concat "right"
200 | left
201 | => "leftright"
202 | >> "right" then concat "left"
203 | right
204 | => "rightleft"
205 | ```
206 | Both of them are left associative.
207 |
208 |
209 | ## For Comprehension
210 |
211 | Make a generator
212 | ```
213 | >> from [1, 2, 3] yield {|x| x+1} . list
214 | => [2, 3, 4]
215 | >> from [1, 2, 3], ["a", "b", "c"] yield {|a, b| (a, b)} . list
216 | => [(1, 'a'), (1, 'b'), (1, 'c'), (2, 'a'), (2, 'b'), (2, 'c'), (3, 'a'), (3, 'b'), (3, 'c')]
217 | ```
218 |
219 | - for-loop: `not yield`
220 |
221 | ```
222 | >>> from [1, 2, 3] not yield {|x|
223 | print x
224 | x
225 | }
226 | 1
227 | 2
228 | 3
229 | => 3
230 | ```
231 |
232 | ## Pattern Matching
233 |
234 | Very powerfull and can even be the alternative of `if-else`.
235 |
236 | ```
237 | case (1, 2, 3)
238 | as 1, ...a when
239 | a![0] == 2
240 |
241 | => a
242 |
243 | as _ => raise
244 | end
245 | ```
246 | You can use it for only destruction:
247 | ```
248 | >> case (1, 2, 3) as (a, b, c) end
249 | >> print a*b*c
250 | => 6
251 | >> let t = case (1, 2, (3, 5, (6, 5))) as [1, 2, [3, 5, [6,...a]]] => a end
252 | >> t
253 | =>
254 | >> t. tuple
255 | => (5, )
256 | >> case
257 | %{1: [1, 2, 3], 2: [2, 3, 4]}
258 | as
259 | {1: a, 2: b}
260 | end
261 | >> (a, b)
262 | => ([1, 2, 3], [2, 3, 4])
263 | ```
264 |
265 |
266 | The return of `pack` destruction(`...a`) is of type tuple when length is 1, or it's of type
267 | tuple_iterator.
268 |
269 | ## Into Statement
270 |
271 | It is similar to `goto`, but I add some constraints on it: you cannot go to the previous steps,
272 | but you can jump out of current closure until you're in specific closure.
273 |
274 | If you want to use `into` in `for comprehension`
275 | ```
276 | >> @here
277 | >> from [1, 2, 3] not yield {|x|
278 | into here
279 | print x
280 | }
281 | >> from [1, 2, 3] not yield {|x|
282 | print x
283 | }
284 | 1
285 | 2
286 | 3
287 | => 3
288 | ```
289 |
290 | Take care that the literal construction of `list`, `tuple`, `set` and `dict` will ignore the `into` keyword, as well as `from ... not yield ...` for-loop.
291 |
292 |
293 |
294 | ## Collections
295 |
296 | List: Just Python `list`
297 | ```
298 | >> let l1 = [
299 | 1,
300 | 2,
301 | c where
302 | let c = 3
303 | end,
304 | 4
305 | ]
306 |
307 | >> l1 ++ [1, 2, 3] then list
308 | => [1, 2, 3, 4, 1, 2, 3]
309 | >> l1 -- [1, 2, 3]
310 | => [4]
311 | ```
312 |
313 | Dict: Just Python `dict`
314 | ```
315 | >> let d1 = %{
316 | 1: 2,
317 | expr where
318 | let expr = 3
319 | end : 4
320 | }
321 | ```
322 |
323 | Set is also Python `set` as well as `tuple`.
324 |
325 | ## OOP Support
326 |
327 | - Access Member and Index
328 | ```
329 | # access member
330 | >> import math
331 | >> math 'pi
332 | => 3.141592653589793
333 | >> math 'pi '__class__
334 | =>
335 |
336 | # index
337 | >> import numpy as np
338 | >> let arr_cons = np 'ndarray
339 | >> arr_cons [1, 2, 3]
340 | => array([[1, 2, 3]])
341 | >> (arr_cons [[1, 2, 3]]) . slice [0, (1, 2)]
342 | => array([2])
343 | >> (arr_cons [[1, 2, 3]]) ![indexer [0, (1, 2)]]
344 | => array([2])
345 | >> (arr_cons [[1, 2, 3]]) ![0]
346 | => array([1, 2, 3])
347 | ```
348 |
349 | [class.rem](https://github.com/thautwarm/Rem/blob/master/example_source_codes/class.rem)
350 | ```
351 | /* define class here */
352 | let class = {
353 | |fn_dict|
354 | # constructor
355 | {
356 | let self = call Object
357 | from
358 | fn_dict 'items then call
359 | not yield {
360 | |tp|
361 | case tp as k, v
362 | =>
363 | self. set k (v self)
364 | end
365 | }
366 | self
367 | }
368 | }
369 |
370 |
371 | # spec
372 | let cls1 = class %{
373 | "go": {|x, y| y},
374 | }
375 |
376 | let inst = call cls1
377 |
378 | inst'go 1
379 | ```
380 |
381 |
382 | ## Chinese Programming
383 |
384 | 附: 中英文token对照
385 |
386 | | English | 中文 |
387 | | ------- | ----- |
388 | | then | 然后 |
389 | | when | 当 |
390 | | and | 并且 |
391 | | or | 或者 |
392 | | in | 含于 |
393 | | not | 非 |
394 | | case | 对于 |
395 | | as | 作为 |
396 | | end | 结束 |
397 | | where | 其中 |
398 | | from | 从 |
399 | | import | 导入 |
400 | | yield | 生成 |
401 | | into | 跳跃到 |
402 | | let | 使/让 |
403 | | True | 真 |
404 | | False | 假 |
405 | | None | 空 |
406 | | is | 是 |
407 | | `.` | 之 |
408 |
409 |
410 |
411 |
412 |
413 |
414 |
415 |
416 |
--------------------------------------------------------------------------------
/remlang/compiler/rem_parser.py:
--------------------------------------------------------------------------------
1 | # This file is automatically generated by EBNFParser.
2 | from Ruikowa.ObjectRegex.Tokenizer import unique_literal_cache_pool, regex_matcher, char_matcher, str_matcher, Tokenizer
3 | from Ruikowa.ObjectRegex.Node import AstParser, Ref, SeqParser, LiteralValueParser as L, LiteralNameParser, Undef
4 | namespace = globals()
5 | recur_searcher = set()
6 | token_table = ((unique_literal_cache_pool["auto_const"], char_matcher(('&'))),
7 | (unique_literal_cache_pool["newline"], regex_matcher('\n+')),
8 | (unique_literal_cache_pool["space"], regex_matcher('\s+')),
9 | (unique_literal_cache_pool["symbol"], regex_matcher('[a-zA-Z\u4e00-\u9fa5\u3040-\u309f\u30a0-\u30ff_]{1}[a-zA-Z\u4e00-\u9fa5\u3040-\u309f\u30a0-\u30ff\d_]*')),
10 | (unique_literal_cache_pool["keyword"], str_matcher(('yield', 'where', 'when', 'then', 'or', 'not', 'let', 'is', 'into', 'in', 'import', 'from', 'end', 'case', 'as', 'and', 'True', 'None', 'False'))),
11 | (unique_literal_cache_pool["string"], regex_matcher('"([^\"]+|\\.)*?"')),
12 | (unique_literal_cache_pool["comments"], regex_matcher('(#.*)|(((/\*)+?[\w\W]+?(\*/)+))')),
13 | (unique_literal_cache_pool["number"], regex_matcher('0[Xx][\da-fA-F]+|\d+(?:\.\d+|)(?:E\-{0,1}\d+|)')),
14 | (unique_literal_cache_pool["operator"], str_matcher(('||', '|>', '^^', '>>', '>=', '==', '<=', '<<', '<-', '::', '//', '--', '++', '**', '&&', '!='))),
15 | (unique_literal_cache_pool["operator"], char_matcher(('^', '>', '<', '/', '-', '+', '*', '&', '%'))),
16 | (unique_literal_cache_pool["auto_const"], str_matcher(('from', 'as', '...'))),
17 | (unique_literal_cache_pool["auto_const"], char_matcher(('.'))),
18 | (unique_literal_cache_pool["auto_const"], str_matcher(('import'))),
19 | (unique_literal_cache_pool["auto_const"], char_matcher((',', '*', ')', '('))),
20 | (unique_literal_cache_pool["auto_const"], str_matcher(('True', 'None', 'False'))),
21 | (unique_literal_cache_pool["auto_const"], char_matcher(('_'))),
22 | (unique_literal_cache_pool["auto_const"], char_matcher(('}', '|', '{', ']', '[', ':'))),
23 | (unique_literal_cache_pool["auto_const"], str_matcher(('=>'))),
24 | (unique_literal_cache_pool["auto_const"], str_matcher(('let', 'end', '++'))),
25 | (unique_literal_cache_pool["auto_const"], char_matcher(('!'))),
26 | (unique_literal_cache_pool["auto_const"], str_matcher(('\''))),
27 | (unique_literal_cache_pool["suffix"], str_matcher(('??'))),
28 | (unique_literal_cache_pool["suffix"], char_matcher(('?'))),
29 | (unique_literal_cache_pool["auto_const"], str_matcher(('not'))),
30 | (unique_literal_cache_pool["auto_const"], char_matcher(('-', '+'))),
31 | (unique_literal_cache_pool["auto_const"], str_matcher(('where', 'when', 'or', 'is', 'in', 'case', 'and'))),
32 | (unique_literal_cache_pool["auto_const"], char_matcher(('`'))),
33 | (unique_literal_cache_pool["auto_const"], str_matcher(('then'))),
34 | (unique_literal_cache_pool["auto_const"], char_matcher((';', '$'))),
35 | (unique_literal_cache_pool["auto_const"], char_matcher(('='))),
36 | (unique_literal_cache_pool["auto_const"], char_matcher(('%'))),
37 | (unique_literal_cache_pool["auto_const"], str_matcher(('yield'))),
38 | (unique_literal_cache_pool["auto_const"], char_matcher(('@'))),
39 | (unique_literal_cache_pool["auto_const"], str_matcher(('into'))),)
40 |
41 | class UNameEnum:
42 | # names
43 |
44 | refName = unique_literal_cache_pool['refName']
45 | newline = unique_literal_cache_pool['newline']
46 | space = unique_literal_cache_pool['space']
47 | symbol = unique_literal_cache_pool['symbol']
48 | keyword_then = unique_literal_cache_pool['then']
49 | keyword_when = unique_literal_cache_pool['when']
50 | keyword_and = unique_literal_cache_pool['and']
51 | keyword_or = unique_literal_cache_pool['or']
52 | keyword_not = unique_literal_cache_pool['not']
53 | keyword_in = unique_literal_cache_pool['in']
54 | keyword_case = unique_literal_cache_pool['case']
55 | keyword_as = unique_literal_cache_pool['as']
56 | keyword_end = unique_literal_cache_pool['end']
57 | keyword_where = unique_literal_cache_pool['where']
58 | keyword_from = unique_literal_cache_pool['from']
59 | keyword_yield = unique_literal_cache_pool['yield']
60 | keyword_into = unique_literal_cache_pool['into']
61 | keyword_let = unique_literal_cache_pool['let']
62 | keyword_True = unique_literal_cache_pool['True']
63 | keyword_False = unique_literal_cache_pool['False']
64 | keyword_None = unique_literal_cache_pool['None']
65 | keyword_import = unique_literal_cache_pool['import']
66 | keyword_is = unique_literal_cache_pool['is']
67 | keyword = unique_literal_cache_pool['keyword']
68 | string = unique_literal_cache_pool['string']
69 | comments = unique_literal_cache_pool['comments']
70 | number = unique_literal_cache_pool['number']
71 | operator = unique_literal_cache_pool['operator']
72 | T = unique_literal_cache_pool['T']
73 | importAs = unique_literal_cache_pool['importAs']
74 | fromImportStmt = unique_literal_cache_pool['fromImportStmt']
75 | importStmt = unique_literal_cache_pool['importStmt']
76 | singleImportStmt = unique_literal_cache_pool['singleImportStmt']
77 | remImport = unique_literal_cache_pool['remImport']
78 | const = unique_literal_cache_pool['const']
79 | simpleArgs = unique_literal_cache_pool['simpleArgs']
80 | patMany = unique_literal_cache_pool['patMany']
81 | iterMark = unique_literal_cache_pool['iterMark']
82 | pat = unique_literal_cache_pool['pat']
83 | noZipPat = unique_literal_cache_pool['noZipPat']
84 | noZipPatMany = unique_literal_cache_pool['noZipPatMany']
85 | tuplePat = unique_literal_cache_pool['tuplePat']
86 | kvPat = unique_literal_cache_pool['kvPat']
87 | kvPatMany = unique_literal_cache_pool['kvPatMany']
88 | dictPat = unique_literal_cache_pool['dictPat']
89 | lambdef = unique_literal_cache_pool['lambdef']
90 | atom = unique_literal_cache_pool['atom']
91 | trailer = unique_literal_cache_pool['trailer']
92 | atomExpr = unique_literal_cache_pool['atomExpr']
93 | invExp = unique_literal_cache_pool['invExp']
94 | invTrailer = unique_literal_cache_pool['invTrailer']
95 | suffix = unique_literal_cache_pool['suffix']
96 | factor = unique_literal_cache_pool['factor']
97 | binExp = unique_literal_cache_pool['binExp']
98 | caseExp = unique_literal_cache_pool['caseExp']
99 | asExp = unique_literal_cache_pool['asExp']
100 | testExpr = unique_literal_cache_pool['testExpr']
101 | where = unique_literal_cache_pool['where']
102 | expr = unique_literal_cache_pool['expr']
103 | thenTrailer = unique_literal_cache_pool['thenTrailer']
104 | applicationTrailer = unique_literal_cache_pool['applicationTrailer']
105 | statements = unique_literal_cache_pool['statements']
106 | statement = unique_literal_cache_pool['statement']
107 | let = unique_literal_cache_pool['let']
108 | exprMany = unique_literal_cache_pool['exprMany']
109 | unpack = unique_literal_cache_pool['unpack']
110 | exprCons = unique_literal_cache_pool['exprCons']
111 | kv = unique_literal_cache_pool['kv']
112 | kvMany = unique_literal_cache_pool['kvMany']
113 | kvCons = unique_literal_cache_pool['kvCons']
114 | listCons = unique_literal_cache_pool['listCons']
115 | tupleCons = unique_literal_cache_pool['tupleCons']
116 | setCons = unique_literal_cache_pool['setCons']
117 | dictCons = unique_literal_cache_pool['dictCons']
118 | compreh = unique_literal_cache_pool['compreh']
119 | label = unique_literal_cache_pool['label']
120 | into = unique_literal_cache_pool['into']
121 | file = unique_literal_cache_pool['file']
122 |
123 | cast_map = {'then': unique_literal_cache_pool['keyword'], 'when': unique_literal_cache_pool['keyword'], 'and': unique_literal_cache_pool['keyword'], 'or': unique_literal_cache_pool['keyword'], 'not': unique_literal_cache_pool['keyword'], 'in': unique_literal_cache_pool['keyword'], 'case': unique_literal_cache_pool['keyword'], 'as': unique_literal_cache_pool['keyword'], 'end': unique_literal_cache_pool['keyword'], 'where': unique_literal_cache_pool['keyword'], 'from': unique_literal_cache_pool['keyword'], 'yield': unique_literal_cache_pool['keyword'], 'into': unique_literal_cache_pool['keyword'], 'let': unique_literal_cache_pool['keyword'], 'True': unique_literal_cache_pool['keyword'], 'False': unique_literal_cache_pool['keyword'], 'None': unique_literal_cache_pool['keyword'], 'import': unique_literal_cache_pool['keyword'], 'is': unique_literal_cache_pool['keyword']}
124 | token_func = lambda _: Tokenizer.from_raw_strings(_, token_table, ({"space", "comments"}, {}),cast_map=cast_map)
125 | newline = LiteralNameParser('newline')
126 | space = LiteralNameParser('space')
127 | symbol = LiteralNameParser('symbol')
128 | keyword = LiteralNameParser('keyword')
129 | string = LiteralNameParser('string')
130 | comments = LiteralNameParser('comments')
131 | number = LiteralNameParser('number')
132 | operator = LiteralNameParser('operator')
133 | suffix = LiteralNameParser('suffix')
134 | refName = AstParser([SeqParser(['&'], at_least=0,at_most=1), Ref('symbol')],
135 | name="refName",
136 | to_ignore=({}, {}))
137 | T = AstParser([SeqParser([Ref('newline')], at_least=1,at_most=Undef)],
138 | name="T",
139 | to_ignore=({}, {}))
140 | importAs = AstParser([Ref('symbol'), SeqParser(['as', Ref('symbol')], at_least=0,at_most=1)],
141 | name="importAs",
142 | to_ignore=({}, {}))
143 | fromImportStmt = AstParser(['from', SeqParser(['...'], [SeqParser(['.'], at_least=1,at_most=1)], at_least=0,at_most=1), Ref('symbol'), SeqParser(['.', Ref('symbol')], at_least=0,at_most=Undef), 'import', SeqParser(['*'], [SeqParser([Ref('importAs')], at_least=1,at_most=Undef)], ['(', SeqParser([Ref('T')], at_least=0,at_most=1), Ref('importAs'), SeqParser([SeqParser([Ref('T')], at_least=0,at_most=1), ',', SeqParser([Ref('T')], at_least=0,at_most=1), Ref('importAs')], at_least=0,at_most=Undef), ')'], at_least=1,at_most=1)],
144 | name="fromImportStmt",
145 | to_ignore=({}, {}))
146 | importStmt = AstParser([Ref('singleImportStmt')],
147 | [Ref('fromImportStmt')],
148 | [Ref('remImport')],
149 | name="importStmt",
150 | to_ignore=({}, {}))
151 | singleImportStmt = AstParser(['import', Ref('simpleArgs'), SeqParser(['as', Ref('symbol')], at_least=0,at_most=1)],
152 | name="singleImportStmt",
153 | to_ignore=({}, {}))
154 | remImport = AstParser(['import', Ref('string'), SeqParser(['as', Ref('symbol')], at_least=0,at_most=1)],
155 | name="remImport",
156 | to_ignore=({}, {'import', 'as'}))
157 | const = AstParser(['True'],
158 | ['False'],
159 | ['None'],
160 | name="const",
161 | to_ignore=({}, {}))
162 | simpleArgs = AstParser([Ref('symbol'), SeqParser([SeqParser([Ref('T')], at_least=0,at_most=1), ',', SeqParser([Ref('T')], at_least=0,at_most=1), Ref('symbol')], at_least=0,at_most=Undef)],
163 | name="simpleArgs",
164 | to_ignore=({"T"}, {','}))
165 | patMany = AstParser([Ref('pat'), SeqParser([SeqParser([SeqParser([Ref('T')], at_least=0,at_most=1), ',', SeqParser([Ref('T')], at_least=0,at_most=1), Ref('pat')], at_least=1,at_most=Undef), SeqParser([Ref('T')], at_least=0,at_most=1)], at_least=0,at_most=1), SeqParser([Ref('iterMark')], at_least=0,at_most=1)],
166 | name="patMany",
167 | to_ignore=({"T"}, {','}))
168 | iterMark = AstParser([','],
169 | name="iterMark",
170 | to_ignore=({}, {}))
171 | pat = AstParser([SeqParser(['...'], at_least=0,at_most=1), '_'],
172 | [SeqParser(['...'], at_least=0,at_most=1), Ref('refName')],
173 | [Ref('tuplePat')],
174 | [Ref('dictPat')],
175 | [Ref('string')],
176 | [Ref('const')],
177 | [Ref('number')],
178 | name="pat",
179 | to_ignore=({}, {}))
180 | noZipPat = AstParser([Ref('refName')],
181 | [Ref('tuplePat')],
182 | [Ref('dictPat')],
183 | [Ref('string')],
184 | [Ref('const')],
185 | [Ref('number')],
186 | name="noZipPat",
187 | to_ignore=({}, {}))
188 | noZipPatMany = AstParser([Ref('noZipPat'), SeqParser([SeqParser([Ref('T')], at_least=0,at_most=1), ',', SeqParser([Ref('T')], at_least=0,at_most=1), Ref('noZipPat')], at_least=0,at_most=Undef)],
189 | name="noZipPatMany",
190 | to_ignore=({"T"}, {','}))
191 | tuplePat = AstParser(['(', SeqParser([Ref('T')], at_least=0,at_most=1), SeqParser([Ref('patMany'), SeqParser([Ref('T')], at_least=0,at_most=1)], at_least=0,at_most=1), ')'],
192 | ['[', SeqParser([Ref('T')], at_least=0,at_most=1), SeqParser([Ref('patMany'), SeqParser([Ref('T')], at_least=0,at_most=1)], at_least=0,at_most=1), ']'],
193 | name="tuplePat",
194 | to_ignore=({"T"}, {'(', ')', '[', ']'}))
195 | kvPat = AstParser([Ref('expr'), ':', Ref('noZipPat')],
196 | name="kvPat",
197 | to_ignore=({}, {':'}))
198 | kvPatMany = AstParser([Ref('kvPat'), SeqParser([SeqParser([SeqParser([Ref('T')], at_least=0,at_most=1), ',', SeqParser([Ref('T')], at_least=0,at_most=1), Ref('kvPat')], at_least=1,at_most=Undef), SeqParser([Ref('T')], at_least=0,at_most=1)], at_least=0,at_most=1), SeqParser([','], at_least=0,at_most=1)],
199 | name="kvPatMany",
200 | to_ignore=({"T"}, {','}))
201 | dictPat = AstParser(['{', SeqParser([Ref('T')], at_least=0,at_most=1), SeqParser([Ref('kvPatMany'), SeqParser([Ref('T')], at_least=0,at_most=1)], at_least=0,at_most=1), '}'],
202 | name="dictPat",
203 | to_ignore=({"T"}, {'{', '}'}))
204 | lambdef = AstParser(['{', SeqParser([Ref('T')], at_least=0,at_most=1), SeqParser(['|', SeqParser([SeqParser([Ref('simpleArgs')], [Ref('noZipPatMany')], at_least=1,at_most=1), SeqParser([Ref('T')], at_least=0,at_most=1)], at_least=0,at_most=1), '|'], [SeqParser([Ref('simpleArgs')], [Ref('noZipPatMany')], at_least=1,at_most=1), '=>'], at_least=0,at_most=1), SeqParser([Ref('T')], at_least=0,at_most=1), SeqParser([Ref('statements'), SeqParser([Ref('T')], at_least=0,at_most=1)], at_least=0,at_most=1), '}'],
205 | ['from', SeqParser([Ref('T')], at_least=0,at_most=1), SeqParser([SeqParser([Ref('simpleArgs')], [Ref('noZipPatMany')], at_least=1,at_most=1), SeqParser([Ref('T')], at_least=0,at_most=1)], at_least=0,at_most=1), 'let', SeqParser([Ref('T')], at_least=0,at_most=1), SeqParser([Ref('statements'), SeqParser([Ref('T')], at_least=0,at_most=1)], at_least=0,at_most=1), 'end'],
206 | name="lambdef",
207 | to_ignore=({"T"}, {'{', '}', '|', ',', 'from', 'let', 'end', '=>'}))
208 | atom = AstParser([Ref('refName')],
209 | [Ref('const')],
210 | [Ref('string'), SeqParser(['++', Ref('string')], at_least=0,at_most=Undef)],
211 | [Ref('number')],
212 | ['(', Ref('expr'), ')'],
213 | [Ref('listCons')],
214 | [Ref('tupleCons')],
215 | [Ref('setCons')],
216 | [Ref('dictCons')],
217 | [Ref('compreh')],
218 | [Ref('lambdef')],
219 | name="atom",
220 | to_ignore=({}, {'++'}))
221 | trailer = AstParser(['!', '[', Ref('exprCons'), ']'],
222 | ['\'', Ref('symbol')],
223 | name="trailer",
224 | to_ignore=({}, {'!', '[', ']', '\''}))
225 | atomExpr = AstParser([Ref('atom'), SeqParser([SeqParser([Ref('T')], at_least=0,at_most=1), Ref('trailer')], at_least=0,at_most=Undef)],
226 | name="atomExpr",
227 | to_ignore=({"T"}, {}))
228 | invExp = AstParser([Ref('atomExpr'), SeqParser([Ref('atomExpr')], [SeqParser([Ref('T')], at_least=0,at_most=1), Ref('invTrailer')], at_least=0,at_most=Undef)],
229 | name="invExp",
230 | to_ignore=({"T"}, {}))
231 | invTrailer = AstParser(['.', Ref('atomExpr')],
232 | name="invTrailer",
233 | to_ignore=({}, {'.'}))
234 | factor = AstParser([SeqParser(['not'], ['+'], ['-'], at_least=0,at_most=1), Ref('invExp'), SeqParser([Ref('suffix')], at_least=0,at_most=1)],
235 | name="factor",
236 | to_ignore=({}, {}))
237 | binExp = AstParser([Ref('factor'), SeqParser([SeqParser([Ref('operator')], ['or'], ['and'], ['in'], ['is'], at_least=1,at_most=1), Ref('factor')], at_least=0,at_most=Undef)],
238 | name="binExp",
239 | to_ignore=({}, {}))
240 | caseExp = AstParser(['case', SeqParser([Ref('T')], at_least=0,at_most=1), Ref('expr'), SeqParser([Ref('T')], at_least=0,at_most=1), SeqParser([Ref('asExp')], at_least=0,at_most=Undef), 'end'],
241 | name="caseExp",
242 | to_ignore=({"T"}, {'case', 'end'}))
243 | asExp = AstParser([SeqParser(['as', Ref('patMany')], at_least=0,at_most=1), SeqParser([SeqParser([Ref('T')], at_least=0,at_most=1), 'when', SeqParser([Ref('T')], at_least=0,at_most=1), Ref('expr')], at_least=0,at_most=1), SeqParser([Ref('T')], at_least=0,at_most=1), SeqParser(['=>', SeqParser([Ref('T')], at_least=0,at_most=1), SeqParser([Ref('statements')], at_least=0,at_most=1)], at_least=0,at_most=1)],
244 | name="asExp",
245 | to_ignore=({"T"}, {'=>', 'as', 'when'}))
246 | testExpr = AstParser([Ref('caseExp')],
247 | [Ref('binExp')],
248 | name="testExpr",
249 | to_ignore=({}, {}))
250 | where = AstParser(['where', SeqParser([Ref('T')], at_least=0,at_most=1), SeqParser([Ref('statements'), SeqParser([Ref('T')], at_least=0,at_most=1)], at_least=0,at_most=1), 'end'],
251 | name="where",
252 | to_ignore=({"T"}, {'where', 'end'}))
253 | expr = AstParser(['`', Ref('expr')],
254 | [Ref('testExpr'), SeqParser([SeqParser([Ref('T')], at_least=0,at_most=1), Ref('thenTrailer')], [SeqParser([Ref('T')], at_least=0,at_most=1), Ref('applicationTrailer')], at_least=0,at_most=Undef), SeqParser([SeqParser([Ref('T')], at_least=0,at_most=1), Ref('where')], at_least=0,at_most=1)],
255 | name="expr",
256 | to_ignore=({"T"}, {}))
257 | thenTrailer = AstParser(['then', SeqParser([Ref('T')], at_least=0,at_most=1), Ref('testExpr')],
258 | name="thenTrailer",
259 | to_ignore=({"T"}, {'then'}))
260 | applicationTrailer = AstParser(['$', SeqParser([Ref('T')], at_least=0,at_most=1), Ref('testExpr')],
261 | name="applicationTrailer",
262 | to_ignore=({"T"}, {'$'}))
263 | statements = AstParser([Ref('statement'), SeqParser([SeqParser([Ref('T')], at_least=0,at_most=1), Ref('statement')], at_least=0,at_most=Undef)],
264 | name="statements",
265 | to_ignore=({"T"}, {}))
266 | statement = AstParser([SeqParser([Ref('label')], [Ref('let')], [Ref('expr')], [Ref('into')], [Ref('importStmt')], at_least=1,at_most=1), SeqParser([';'], at_least=0,at_most=1)],
267 | name="statement",
268 | to_ignore=({}, {}))
269 | let = AstParser([SeqParser(['let'], at_least=0,at_most=1), Ref('symbol'), SeqParser([Ref('trailer')], at_least=0,at_most=Undef), '=', Ref('expr')],
270 | name="let",
271 | to_ignore=({}, {'=', '!'}))
272 | exprMany = AstParser([Ref('expr'), SeqParser([SeqParser([SeqParser([Ref('T')], at_least=0,at_most=1), ',', SeqParser([Ref('T')], at_least=0,at_most=1), Ref('expr')], at_least=1,at_most=Undef)], at_least=0,at_most=1)],
273 | name="exprMany",
274 | to_ignore=({"T"}, {','}))
275 | unpack = AstParser(['...', Ref('expr')],
276 | name="unpack",
277 | to_ignore=({}, {'...'}))
278 | exprCons = AstParser([Ref('exprMany'), SeqParser([SeqParser([Ref('T')], at_least=0,at_most=1), ',', SeqParser([Ref('T')], at_least=0,at_most=1), Ref('unpack'), SeqParser([SeqParser([Ref('T')], at_least=0,at_most=1), ',', SeqParser([Ref('T')], at_least=0,at_most=1), Ref('exprMany')], at_least=0,at_most=1)], at_least=0,at_most=Undef), SeqParser([','], at_least=0,at_most=1)],
279 | name="exprCons",
280 | to_ignore=({"T"}, {','}))
281 | kv = AstParser([Ref('expr'), ':', Ref('expr')],
282 | name="kv",
283 | to_ignore=({"T"}, {':'}))
284 | kvMany = AstParser([Ref('kv'), SeqParser([SeqParser([SeqParser([Ref('T')], at_least=0,at_most=1), ',', SeqParser([Ref('T')], at_least=0,at_most=1), Ref('kv')], at_least=1,at_most=Undef), SeqParser([Ref('T')], at_least=0,at_most=1)], at_least=0,at_most=1)],
285 | name="kvMany",
286 | to_ignore=({"T"}, {','}))
287 | kvCons = AstParser([Ref('kvMany'), SeqParser([SeqParser([Ref('T')], at_least=0,at_most=1), ',', SeqParser([Ref('T')], at_least=0,at_most=1), Ref('unpack'), SeqParser([SeqParser([Ref('T')], at_least=0,at_most=1), ',', SeqParser([Ref('T')], at_least=0,at_most=1), Ref('kvMany')], at_least=0,at_most=1)], at_least=0,at_most=Undef), SeqParser([','], at_least=0,at_most=1)],
288 | name="kvCons",
289 | to_ignore=({"T"}, {','}))
290 | listCons = AstParser(['[', SeqParser([Ref('T')], at_least=0,at_most=1), SeqParser([Ref('exprCons'), SeqParser([Ref('T')], at_least=0,at_most=1)], at_least=0,at_most=1), ']'],
291 | name="listCons",
292 | to_ignore=({"T"}, {'[', ']'}))
293 | tupleCons = AstParser(['(', SeqParser([Ref('T')], at_least=0,at_most=1), SeqParser([Ref('exprCons'), SeqParser([Ref('T')], at_least=0,at_most=1)], at_least=0,at_most=1), ')'],
294 | name="tupleCons",
295 | to_ignore=({"T"}, {'(', ')'}))
296 | setCons = AstParser(['%', '{', SeqParser([Ref('T')], at_least=0,at_most=1), SeqParser([Ref('exprCons'), SeqParser([Ref('T')], at_least=0,at_most=1)], at_least=0,at_most=1), '}'],
297 | name="setCons",
298 | to_ignore=({"T"}, {'%', '{', '}'}))
299 | dictCons = AstParser(['%', '{', SeqParser([Ref('T')], at_least=0,at_most=1), SeqParser([Ref('kvCons'), SeqParser([Ref('T')], at_least=0,at_most=1)], at_least=0,at_most=1), '}'],
300 | name="dictCons",
301 | to_ignore=({"T"}, {'%', '{', '}'}))
302 | compreh = AstParser(['from', SeqParser([Ref('T')], at_least=0,at_most=1), Ref('exprMany'), SeqParser([SeqParser([Ref('T')], at_least=0,at_most=1), ('keyword', 'not')], at_least=0,at_most=1), SeqParser([Ref('T')], at_least=0,at_most=1), 'yield', SeqParser([Ref('T')], at_least=0,at_most=1), Ref('lambdef')],
303 | name="compreh",
304 | to_ignore=({"T"}, {'from', 'yield'}))
305 | label = AstParser(['@', Ref('symbol')],
306 | name="label",
307 | to_ignore=({}, {'@'}))
308 | into = AstParser(['into', Ref('symbol')],
309 | name="into",
310 | to_ignore=({}, {'into'}))
311 | file = AstParser([SeqParser([Ref('T')], at_least=0,at_most=1), SeqParser([Ref('statements'), SeqParser([Ref('T')], at_least=0,at_most=1)], at_least=0,at_most=1)],
312 | name="file",
313 | to_ignore=({"T"}, {}))
314 | file.compile(namespace, recur_searcher)
--------------------------------------------------------------------------------
/remlang/compiler/ast.py:
--------------------------------------------------------------------------------
1 | import itertools
2 | from Ruikowa.ObjectRegex.ASTDef import Ast
3 | from Ruikowa.ErrorHandler import ErrorHandler
4 | from Ruikowa.ObjectRegex.MetaInfo import MetaInfo
5 |
6 | from .reference_collections import ReferenceDict, ParameterProxy
7 | from .order_dual_opt import order_dual_opt, BinExp, bin_op_fns
8 | from .rem_parser import file, token_table, UNameEnum, Tokenizer
9 | from .utils import flatten
10 | from .module import default_env, make_new_module, md5, ModuleAgent
11 | from .pattern_matching import pattern_match_varargs, pattern_matching, unmatched, import_ast_for_expr
12 | from .control_flow import BreakUntil, Macro
13 | from .err import Trace
14 | from .tk import keywords_map
15 | from .msg import StatusConstructor
16 |
17 | token_func = lambda _: Tokenizer.from_raw_strings(_, token_table, ({"space", "comments"}, {}), cast_map=keywords_map)
18 | rem_parser = ErrorHandler(file.match, token_func)
19 |
20 |
21 | # default settings. eval
22 | def add_exec_func(to: 'ReferenceDict'):
23 | to['__compiler__'] = rem_parser
24 | to['exec'] = lambda src: ast_for_file(
25 | to['__compiler__'].from_source_code(
26 | '',
27 | src,
28 | MetaInfo(fileName='')),
29 | ctx=to)
30 |
31 |
32 | # this is the main module
33 | main = make_new_module('main', default_env)
34 | add_exec_func(to=main)
35 |
36 | const_map = {'r': True, 'a': False, 'o': None}
37 |
38 |
39 | class RefName:
40 | def __init__(self, name):
41 | self.name = name
42 |
43 |
44 | def ast_for_statements(statements: Ast, ctx: ReferenceDict):
45 | """
46 | statements Throw [T]
47 | ::= statement ([T] statement)*;
48 | """
49 | res = None
50 | for each in statements:
51 | try:
52 | res = ast_for_statement(each, ctx)
53 | except BreakUntil as e:
54 | if e.name != ctx.get_local('@label'):
55 | raise e
56 | return res
57 |
58 |
59 | def ast_for_statement(statement: Ast, ctx: ReferenceDict):
60 | """
61 | statement
62 | ::= (label | let | expr | into | importExpr) [';'];
63 | """
64 | # assert statement.name == 'statement'
65 |
66 | sexpr = statement[0]
67 | s_name: str = sexpr.name
68 | try:
69 | if s_name is UNameEnum.expr: # expr
70 | # RuikoEBNF:
71 | # expr ::= testExpr (thenTrailer | applicationTrailer)* [where];
72 | if len(statement) is 2:
73 | # end with ';' then return None
74 | ast_for_expr(sexpr, ctx)
75 | else:
76 | return ast_for_expr(sexpr, ctx)
77 |
78 | elif s_name is UNameEnum.label:
79 | [symbol] = sexpr
80 | assert symbol.name is UNameEnum.symbol
81 | ctx.set_local('@label', symbol.string)
82 |
83 | elif s_name is UNameEnum.let:
84 | # RuikoEBNF:
85 | # let Throw ['=' '!']
86 | # ::= ['`let`'] symbol ['!' trailer+] '=' expr;
87 | to_new_ctx = False
88 |
89 | if sexpr[0].string is UNameEnum.keyword_let:
90 | # bind a new var in current environment(closure).
91 | to_new_ctx = True
92 | _, symbol, *trailers, expr = sexpr
93 | else:
94 |
95 | # For the readability of source codes,
96 | # pattern matching using list destruction is better.
97 | symbol, *trailers, expr = sexpr
98 |
99 | res = ast_for_expr(expr, ctx)
100 | if not trailers:
101 | # let symbol = ...
102 | ctx.set_local(symbol.string, res) if to_new_ctx else ctx.set_nonlocal(symbol.string, res)
103 | return
104 |
105 | # let symbol 'attr = ... | let symbol ![item] = ...
106 | ref = ctx.get_nonlocal(symbol.string)
107 | *fst_n, [last] = trailers
108 | # `trailers` is a list of trailer.
109 | # RuikoEBNF:
110 | # trailer Throw ['[' ']' '.']
111 | # ::= '[' exprCons ']' | '\'' symbol;
112 |
113 | for each, in fst_n:
114 | if each.name is UNameEnum.symbol: # symbol
115 | ref = getattr(ref, each.string)
116 |
117 | else: # [exprCons]
118 | item = tuple(ast_for_expr_cons(each, ctx))
119 | if len(item) is 1:
120 | item = item[0]
121 | ref = ref[item]
122 |
123 | if last.name == UNameEnum.symbol: # symbol
124 | # trailer = . symbol
125 | setattr(ref, last.string, res)
126 | else:
127 | # trailer = [exprCons]
128 | item = tuple(ast_for_expr_cons(last, ctx))
129 | if len(item) is 1:
130 | item = item[0]
131 | ref[item] = res
132 |
133 | # let expr return Nothing
134 | elif s_name is UNameEnum.into:
135 | # RuikoEBNF:
136 | # into Throw ['`into`']
137 | # ::= '`into`' symbol;
138 | [symbol] = sexpr
139 | # TODO with result
140 | raise BreakUntil(symbol.string)
141 |
142 | elif s_name is UNameEnum.importStmt:
143 | # RuikoEBNF:
144 | # importExpr
145 | # ::= singleImportExpr | fromImportExpr | remImport;
146 | [branch] = sexpr
147 |
148 | if branch.name is not UNameEnum.remImport:
149 | exec(' '
150 | .join
151 | (map(lambda _: _.string,
152 | flatten(
153 | branch)))
154 | .strip(),
155 | ctx.local)
156 | return
157 | import os
158 | if len(branch) is 2:
159 | string, symbol = branch
160 | path = eval(string.string)
161 | name = symbol.string
162 | else:
163 | [string] = branch
164 | path = eval(string.string)
165 | name = os.path.split(
166 | os.path.splitext(path)[0])[1]
167 |
168 | src_code, md5_v = md5(path)
169 | manager = ctx.module_manager
170 | managed_modules = manager['@modules']
171 |
172 | if md5_v == managed_modules.get(path):
173 | # imported and file not changed.
174 | # so do not import again
175 | return
176 |
177 | managed_modules[path] = md5_v
178 | env = make_new_module(name, manager, ctx['__compiler__'])
179 | add_exec_func(to=env)
180 | ast_for_file(env['__compiler__'].from_source_code(path, src_code, MetaInfo(fileName=path)),
181 | env)
182 | ctx.set_local(name, ModuleAgent(env.local))
183 |
184 | else:
185 | raise TypeError('unknown statement.')
186 | except BreakUntil as e:
187 | raise e
188 | except Exception as e:
189 | raise Trace(e, statement)
190 |
191 |
192 | def ast_for_expr(expr: 'Ast', ctx: ReferenceDict):
193 | """
194 | expr
195 | ::= testExpr (thenTrailer | applicationTrailer)*
196 | [where];
197 | """
198 | assert expr.name is UNameEnum.expr
199 |
200 | if expr[0].__class__ is Tokenizer:
201 | return Macro(expr[1])
202 |
203 | if expr[-1].name is UNameEnum.where: # where
204 | head, *then_trailers, where = expr
205 | stmts = where[0]
206 | ast_for_statements(stmts, ctx)
207 |
208 | else:
209 | head, *then_trailers = expr
210 |
211 | res = ast_for_test_expr(head, ctx)
212 |
213 | # """
214 | # thenTrailer throw ['then' T]
215 | # ::= 'then' [T] testExpr;
216 | #
217 | # applicationTrailer throw ['$']
218 | # ::= '$' testExpr;
219 | # """
220 |
221 | if len(then_trailers) is 1:
222 | [each] = then_trailers
223 | arg = ast_for_test_expr(each[0], ctx)
224 | return arg(res) if each.name is UNameEnum.thenTrailer else res(arg)
225 |
226 | stack = []
227 | for each in then_trailers:
228 | arg = ast_for_test_expr(each[0], ctx)
229 | if each.name is UNameEnum.thenTrailer:
230 | if stack:
231 | res = res(*stack)
232 | stack.clear()
233 | res = arg(res)
234 | continue
235 | stack.append(arg)
236 | if stack:
237 | res = res(*stack)
238 |
239 | return res
240 |
241 |
242 | def ast_for_test_expr(test: Ast, ctx: ReferenceDict):
243 | """
244 | testExpr ::= caseExp | binExp;
245 | """
246 | assert test.name is UNameEnum.testExpr
247 | sexpr = test[0]
248 | if sexpr.name is UNameEnum.caseExp:
249 | res = ast_for_case_expr(sexpr, ctx)
250 |
251 | else:
252 | res = ast_for_bin_expr(sexpr, ctx)
253 |
254 | return res
255 |
256 |
257 | def ast_for_case_expr(case_expr: 'Ast', ctx: 'ReferenceDict'):
258 | """
259 | caseExp Throw ['`case`', '`end`', T]
260 | ::= '`case`' expr [T] asExp* [otherwiseExp] [T] '`end`';
261 | """
262 | assert case_expr.name is UNameEnum.caseExp
263 |
264 | test, *cases = case_expr
265 | right = ast_for_expr(test, ctx)
266 |
267 | for case in cases:
268 | res = ast_for_as_expr(case, ctx, right)
269 |
270 | # do not use None to represent the matching status
271 | # just think `case None as x => x end` should match.
272 | if res is not unmatched:
273 | return res
274 |
275 |
276 | def ast_for_as_expr(as_expr: 'Ast', ctx: 'ReferenceDict', test_exp: 'BinExp'):
277 | """
278 | asExp Throw ['=>', T, '`as`', '`when`']
279 | ::= ['`as`' patMany]
280 | [
281 | [T] '`when`' [T] expr
282 | ]
283 | [T]
284 | ['=>' [T] [statements]];
285 | """
286 | # assert as_expr.name == 'asExp'
287 |
288 | many = None
289 | when = None
290 | statements = None
291 |
292 | for each in as_expr:
293 |
294 | if each.name is UNameEnum.patMany:
295 | many = each
296 | elif each.name is UNameEnum.expr:
297 | when = each
298 | elif each.name is UNameEnum.statements:
299 | statements = each
300 |
301 | try:
302 | new_ctx = ctx.branch()
303 | if many and not pattern_match_varargs(many, test_exp, new_ctx):
304 | return unmatched
305 | if when and not ast_for_expr(when, new_ctx):
306 | return unmatched
307 |
308 | ctx.update(new_ctx.local)
309 | if not statements:
310 | return unmatched
311 |
312 | return ast_for_statements(statements, ctx)
313 |
314 | except BreakUntil as e:
315 | if e.name != ctx.get_local('@label'):
316 | raise e
317 |
318 |
319 | def ast_for_bin_expr(bin_expr: 'Ast', ctx: 'ReferenceDict'):
320 | """
321 | binExp ::= factor ( (operator | 'or' | 'and' | 'in' | 'is') factor)*;
322 | """
323 | assert bin_expr.name is UNameEnum.binExp
324 | if len(bin_expr) is not 1:
325 | bin_expr = [each.string if each.__class__ is Tokenizer else each for each in bin_expr]
326 | bin_expr = order_dual_opt(bin_expr)
327 | left, mid, right = bin_expr
328 | return parse_bin_exp(left, mid, right, ctx)
329 |
330 | else:
331 | [factor] = bin_expr
332 | return ast_for_factor(factor, ctx)
333 |
334 |
335 | def parse_bin_exp(left, mid, right, ctx: 'ReferenceDict'):
336 | if isinstance(left, BinExp):
337 | left = parse_bin_exp(*left, ctx)
338 | else:
339 | left = ast_for_factor(left, ctx)
340 |
341 | if isinstance(right, BinExp):
342 | right = parse_bin_exp(*right, ctx)
343 | else:
344 | right = ast_for_factor(right, ctx)
345 |
346 | res = bin_op_fns[mid](left, right)
347 | return res
348 |
349 |
350 | def ast_for_factor(factor: 'Ast', ctx: 'ReferenceDict'):
351 | """
352 | factor ::= [unaryOp] invExp [suffix];
353 | """
354 | assert factor.name is UNameEnum.factor
355 | unary_op: 'Tokenizer' = None
356 | suffix: 'Tokenizer' = None
357 | n = len(factor)
358 | if n is 3:
359 | unary_op, inv, suffix = factor
360 |
361 | elif n is 2:
362 | if factor[-1].name is UNameEnum.suffix:
363 | inv, suffix = factor
364 | else:
365 | unary_op, inv = factor
366 | else:
367 | inv, = factor
368 |
369 | res = ast_for_inv_exp(inv, ctx)
370 |
371 | if suffix:
372 | if suffix.string is '?':
373 | res = True if res else False
374 | else:
375 | # '??'
376 | res = res is not None
377 |
378 | if unary_op:
379 | if unary_op.string is '+':
380 | return res
381 | elif unary_op.string is '-':
382 | return -res
383 | else: # not
384 | return not res
385 |
386 | return res
387 |
388 |
389 | def ast_for_inv_exp(inv: 'Ast', ctx: 'ReferenceDict'):
390 | """
391 | invExp ::= atomExpr (atomExpr | invTrailer)*;
392 | """
393 | assert inv.name is UNameEnum.invExp
394 | atom_expr, *inv_trailers = inv
395 | res = ast_for_atom_expr(atom_expr, ctx)
396 |
397 | if len(inv_trailers) is 1:
398 | [each] = inv_trailers
399 | if each.name is UNameEnum.atomExpr:
400 | return res(ast_for_atom_expr(each, ctx))
401 | return ast_for_atom_expr(each[0], ctx)(res)
402 |
403 | stack = []
404 | for each in inv_trailers:
405 | if each.name is UNameEnum.atomExpr:
406 | stack.append(ast_for_atom_expr(each, ctx))
407 | continue
408 | if stack:
409 | res = res(*stack)
410 | stack.clear()
411 |
412 | res = (ast_for_atom_expr(each[0], ctx))(res)
413 |
414 | if stack:
415 | res = res(*stack)
416 |
417 | return res
418 |
419 |
420 | def ast_for_atom_expr(atom_expr: 'Ast', ctx: 'ReferenceDict'):
421 | """
422 | atomExpr Throw[T] ::= atom ([T] trailer)*;
423 | """
424 | assert atom_expr.name is UNameEnum.atomExpr
425 | atom, *trailers = atom_expr
426 | res = ast_for_atom(atom, ctx)
427 | try:
428 | for each, in trailers:
429 | # RuikoEBNF
430 | # trailer Throw ['!' '[' ']' '\'']
431 | # ::= '!' '[' exprCons ']' | '\'' symbol;
432 |
433 | if each.name is UNameEnum.symbol:
434 | name = each.string
435 | res = getattr(res, name)
436 | else:
437 | item = tuple(ast_for_expr_cons(each, ctx))
438 | if len(item) is 1:
439 | item = item[0]
440 | res = res[item]
441 | return res
442 |
443 | except BreakUntil as e:
444 | raise e
445 | except Exception as e:
446 | raise Trace(e, atom_expr)
447 |
448 |
449 | def ast_for_atom(atom: 'Ast', ctx: 'ReferenceDict'):
450 | """
451 | atom Throw ['++']
452 | ::= refName | const | string ('++' string)* | number |
453 | '(' expr ')'|
454 | listCons | tupleCons | setCons | dictCons | compreh |
455 | lambdef;
456 | """
457 | assert atom.name is UNameEnum.atom
458 | if len(atom) is 1:
459 | sexpr = atom[0]
460 | s_name = sexpr.name
461 | if s_name is UNameEnum.refName:
462 |
463 | if len(sexpr) is 2:
464 | return RefName(sexpr[1].string)
465 |
466 | ret = ctx.get_nonlocal(sexpr[0].string)
467 | if ret.__class__ is Macro:
468 | return ast_for_expr(ret.expr, ctx)
469 | return ret
470 |
471 | elif s_name is UNameEnum.const:
472 | sign = sexpr[0].string[1]
473 | return const_map[sign]
474 |
475 | elif s_name is UNameEnum.compreh:
476 | # comprehension
477 | try:
478 | return ast_for_comprehension(sexpr, ctx)
479 | except BreakUntil as e:
480 | if e.name != ctx.get_local('@label'):
481 | raise e
482 |
483 | elif s_name is UNameEnum.number:
484 | return eval(sexpr.string)
485 |
486 | elif s_name is UNameEnum.lambdef:
487 | return ast_for_lambdef(sexpr, ctx)
488 |
489 | elif s_name is UNameEnum.listCons:
490 | if not sexpr:
491 | return list()
492 | return list(ast_for_expr_cons(sexpr[0], ctx))
493 |
494 | elif s_name is UNameEnum.tupleCons: # tupleCons
495 | if not sexpr:
496 | return tuple()
497 | return tuple(ast_for_expr_cons(sexpr[0], ctx))
498 |
499 | elif s_name is UNameEnum.string:
500 | return eval(sexpr.string)
501 |
502 | elif s_name is UNameEnum.setCons:
503 | if not sexpr:
504 | return set()
505 | return set(ast_for_expr_cons(sexpr[0], ctx))
506 |
507 | elif s_name is UNameEnum.dictCons: # dictCons
508 | if not sexpr:
509 | return dict()
510 | return dict(ast_for_kv_cons(sexpr[0], ctx))
511 |
512 | elif atom[0].string is '(': # '(' expr ')'
513 | return ast_for_expr(atom[1], ctx)
514 |
515 | else: # string ('++' string)*
516 | return ''.join(eval(each.string) for each in atom)
517 |
518 |
519 | def ast_for_expr_cons(expr_cons: 'Ast', ctx: 'ReferenceDict'):
520 | """
521 | exprCons Throw [',' T] ::= exprMany ([T] ',' [T] unpack [[T] ',' [T] exprMany])* [','];
522 | """
523 | for each in expr_cons:
524 | if each.name is UNameEnum.unpack: # unpack
525 | yield from ast_for_expr(each[0], ctx)
526 | else:
527 | for e in each:
528 | e = ast_for_expr(e, ctx)
529 | yield e
530 |
531 |
532 | def ast_for_kv_cons(expr_cons: Ast, ctx: ReferenceDict):
533 | """
534 | kvCons Throw [',' T] ::= kvMany ([T] ',' [T] unpack [[T] ',' [T] kvMany])* [','];
535 | """
536 | for each in expr_cons:
537 | if each.name is UNameEnum.unpack: # unpack
538 | iterator = ast_for_expr(each[0], ctx)
539 | yield from iterator.items() if isinstance(iterator, dict) else iterator
540 |
541 | else:
542 | for k, v in each:
543 | yield ast_for_expr(k, ctx), ast_for_expr(v, ctx)
544 |
545 |
546 | def auto_kv_pairs(expr):
547 | return expr.items() if isinstance(expr, dict) else expr
548 |
549 |
550 | def ast_for_comprehension(comprehension: 'Ast', ctx: 'ReferenceDict'):
551 | """
552 | compreh Throw['`from`' '`yield`' T]
553 | ::= '`from`' [T] exprMany [[T] '`not`'] [T] '`yield`' [T] lambdef;
554 | """
555 | # assert comprehension.name == 'compreh'
556 |
557 | new_ctx = ctx.branch()
558 | if comprehension[1].name is UNameEnum.keyword:
559 | collections, _, lambdef = comprehension
560 | is_yield = False
561 | else:
562 | collections, lambdef = comprehension
563 | is_yield = True
564 |
565 | try:
566 | cartesian_prod_collections = itertools.product(
567 | *(auto_kv_pairs(ast_for_expr(each, new_ctx)) for each in collections))
568 |
569 | lambdef = ast_for_lambdef(lambdef, new_ctx)
570 | if is_yield:
571 | return (lambdef(*each) for each in cartesian_prod_collections)
572 |
573 | e = None
574 |
575 | for each in cartesian_prod_collections:
576 | e = lambdef(*each)
577 |
578 | return e
579 |
580 | except BreakUntil as e:
581 | if e.name != ctx.get_local('@label'):
582 | raise e
583 | return e.res
584 |
585 |
586 | def ast_for_lambdef(lambdef: 'Ast', ctx: 'ReferenceDict'):
587 | """
588 | lambdef Throw ['{', '}', '|', ',', '`from`', '`let`', '`end`', T]
589 | ::= '{' [T]
590 | ['|' [singleArgs [T]] '|']
591 | [T]
592 | [statements [T]]
593 | '}'
594 | |
595 | '`from`' [T]
596 | [singleArgs [T]]
597 | '`let`' [T]
598 | [statements [T]]
599 | '`end`'
600 | ;
601 | """
602 | assert lambdef.name is UNameEnum.lambdef
603 | n = len(lambdef)
604 | if n is 1:
605 | lambdef, = lambdef
606 | if lambdef.name is UNameEnum.simpleArgs: # singleArgs
607 | return Fn(list(each.string for each in lambdef), (), (), ctx)
608 | else:
609 | return Thunk(lambdef, ctx)
610 |
611 | elif n is 2:
612 | args, stmts = lambdef
613 | if args.name is UNameEnum.noZipPatMany:
614 | args = tuple(_ for [_] in args)
615 | ctx = ReferenceDict(ParameterProxy(ctx.local), ctx.parent, ctx.module_manager)
616 | return PatternMatchingFn(args, stmts, ctx)
617 | return Fn(list(each.string for each in args), (), stmts, ctx)
618 | else:
619 | return lambda: None
620 |
621 |
622 | def ast_for_file(file_source_parsed: 'Ast', ctx: 'ReferenceDict'):
623 | if file_source_parsed:
624 | return ast_for_statements(file_source_parsed[0], ctx)
625 |
626 |
627 | def rem_eval(ast: 'Ast'):
628 | return lambda ctx: ast_for_expr(ast, ctx)
629 |
630 |
631 | class Thunk:
632 | __slots__ = ['stmts', 'ctx', 'args']
633 |
634 | def __init__(self, stmts, ctx):
635 | self.ctx: ReferenceDict = ctx
636 | self.stmts: Ast = stmts
637 |
638 | def __call__(self, *args):
639 | if not args:
640 | new_ctx = self.ctx.branch()
641 | return ast_for_statements(self.stmts, new_ctx)
642 |
643 | eval_args = {'_': args[0]}
644 |
645 | if len(args) > 1:
646 | eval_args.update({f'_{i + 1}': arg for i, arg in enumerate(args)})
647 |
648 | return ast_for_statements(self.stmts, self.ctx.branch_with(eval_args))
649 |
650 |
651 | class Fn:
652 | __slots__ = ['uneval_args', 'eval_args', 'stmts', 'ctx']
653 |
654 | def __init__(self, uneval, eval_args, stmts, ctx):
655 | self.uneval_args = uneval
656 | self.eval_args = eval_args
657 | self.ctx = ctx
658 | self.stmts = stmts
659 |
660 | def __call__(self, *args):
661 |
662 | if not args:
663 | if self.uneval_args:
664 | return StatusConstructor('err')
665 |
666 | new_ctx: 'ReferenceDict' = self.ctx.branch()
667 | new_ctx.update(self.eval_args)
668 | return ast_for_statements(self.stmts, new_ctx)
669 |
670 | nargs = len(args)
671 | n_uneval = len(self.uneval_args)
672 |
673 | if nargs >= n_uneval:
674 | args_iter = iter(args)
675 | eval_args = self.eval_args + tuple(zip(self.uneval_args, args_iter))
676 | new_ctx: 'ReferenceDict' = self.ctx.branch()
677 |
678 | new_ctx.update(eval_args)
679 |
680 | if nargs is n_uneval:
681 | return ast_for_statements(self.stmts, new_ctx)
682 | return ast_for_statements(self.stmts, new_ctx)(*args_iter)
683 |
684 | uneval_args_iter = iter(self.uneval_args)
685 | eval_args = self.eval_args + tuple((k, v) for v, k in zip(args, uneval_args_iter))
686 | return Fn(tuple(uneval_args_iter), eval_args, self.stmts, self.ctx)
687 |
688 |
689 | class PatternMatchingFn:
690 | __slots__ = ['uneval_pats', 'stmts', 'ctx']
691 |
692 | def __init__(self, uneval, stmts, ctx):
693 | self.uneval_pats = uneval
694 | self.ctx = ctx
695 | self.stmts = stmts
696 |
697 | def __call__(self, *args):
698 | nargs = len(args)
699 | n_uneval = len(self.uneval_pats)
700 |
701 | if nargs >= n_uneval:
702 | new_ctx = self.ctx.branch_with(self.ctx.local.catch)
703 | args_iter = iter(args)
704 | if not all(pattern_matching(k, v, new_ctx) for k, v in zip(self.uneval_pats, args_iter)):
705 | return StatusConstructor("err")
706 |
707 | if nargs is n_uneval:
708 | return ast_for_statements(self.stmts, new_ctx)
709 | return ast_for_statements(self.stmts, new_ctx)(*args_iter)
710 |
711 | new_ctx = self.ctx.copy()
712 | uneval_args_iter = iter(self.uneval_pats)
713 | if not all(pattern_matching(k, v, new_ctx) for v, k in zip(args, uneval_args_iter)):
714 | return StatusConstructor("err")
715 |
716 | return PatternMatchingFn(tuple(uneval_args_iter), self.stmts, new_ctx)
717 |
718 |
719 | import_ast_for_expr()
720 |
--------------------------------------------------------------------------------