├── .gitignore ├── README.md ├── ast ├── ast.go └── ast_test.go ├── go.mod ├── lexer ├── lexer.go └── lexer_test.go ├── main.go ├── parser ├── parser.go └── parser_test.go ├── repl └── repl.go └── token └── token.go /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/* -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | ``` 3 | # run test 4 | # go test github.com/yonh/go-monkey-interpreter/lexer 5 | go test ./lexer 6 | 7 | ``` -------------------------------------------------------------------------------- /ast/ast.go: -------------------------------------------------------------------------------- 1 | package ast 2 | 3 | import ( 4 | "bytes" 5 | "github.com/yonh/go-monkey-interpreter/token" 6 | ) 7 | 8 | // ast 中每个节点都必须实现Node接口,也就是必须提供TokenLiteral方法,该方法返回与其关联的词法单元的字面量 9 | type Node interface { 10 | TokenLiteral() string 11 | String() string 12 | } 13 | 14 | type Statement interface { 15 | Node 16 | statementNode() 17 | } 18 | 19 | type Expression interface { 20 | Node 21 | expressionNode() 22 | } 23 | 24 | // Program是语法分析器生成的每个AST的根节点 25 | type Program struct { 26 | Statements []Statement 27 | } 28 | 29 | func (p *Program) TokenLiteral() string { 30 | if len(p.Statements) > 0 { 31 | return p.Statements[0].TokenLiteral() 32 | } else { 33 | return "" 34 | } 35 | } 36 | func (p *Program) String() string { 37 | var out bytes.Buffer 38 | for _, s := range p.Statements { 39 | out.WriteString(s.String()) 40 | } 41 | return out.String() 42 | } 43 | 44 | type LetStatement struct { 45 | Token token.Token 46 | Name *Identifier 47 | Value Expression 48 | } 49 | 50 | func (ls *LetStatement) statementNode() {} 51 | func (ls *LetStatement) TokenLiteral() string { return ls.Token.Literal } 52 | func (ls *LetStatement) String() string { 53 | var out bytes.Buffer 54 | out.WriteString(ls.TokenLiteral() + " ") 55 | out.WriteString(ls.Name.String()) 56 | out.WriteString(" = ") 57 | 58 | if ls.Value != nil { 59 | out.WriteString(ls.Value.String()) 60 | } 61 | out.WriteString(";") 62 | return out.String() 63 | } 64 | 65 | type Identifier struct { 66 | Token token.Token 67 | Value string 68 | } 69 | 70 | func (i *Identifier) expressionNode() {} 71 | func (i *Identifier) TokenLiteral() string { return i.Token.Literal } 72 | func (i *Identifier) String() string { return i.Value } 73 | 74 | type ReturnStatement struct { 75 | Token token.Token 76 | ReturnValue Expression 77 | } 78 | 79 | func (rs *ReturnStatement) statementNode() {} 80 | func (rs *ReturnStatement) TokenLiteral() string { return rs.Token.Literal } 81 | func (rs *ReturnStatement) String() string { 82 | var out bytes.Buffer 83 | out.WriteString(rs.TokenLiteral() + " ") 84 | if rs.ReturnValue != nil { 85 | out.WriteString(rs.ReturnValue.String()) 86 | } 87 | out.WriteString(";") 88 | return out.String() 89 | } 90 | 91 | type ExpressionStatement struct { 92 | Token token.Token 93 | Expression Expression 94 | } 95 | 96 | func (es *ExpressionStatement) statementNode() {} 97 | func (es *ExpressionStatement) TokenLiteral() string { return es.Token.Literal } 98 | func (es *ExpressionStatement) String() string { 99 | if es.Expression != nil { 100 | return es.Expression.String() 101 | } 102 | return "" 103 | } 104 | -------------------------------------------------------------------------------- /ast/ast_test.go: -------------------------------------------------------------------------------- 1 | package ast 2 | 3 | import ( 4 | "github.com/yonh/go-monkey-interpreter/token" 5 | "testing" 6 | ) 7 | 8 | func TestString(t *testing.T) { 9 | program := &Program{ 10 | Statements: []Statement{ 11 | &LetStatement{ 12 | Token: token.Token{Type: token.LET, Literal: "let"}, 13 | Name: &Identifier{ 14 | Token: token.Token{Type: token.IDENT, Literal: "myVar"}, 15 | Value: "myVar"}, 16 | Value: &Identifier{ 17 | Token: token.Token{Type: token.IDENT, Literal: "anotherVar"}, 18 | Value: "anotherVar"}, 19 | }, 20 | }, 21 | } 22 | 23 | if program.String() != "let myVar = anotherVar;" { 24 | t.Errorf("program.String() wrong. got %q", program.String()) 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/yonh/go-monkey-interpreter 2 | 3 | go 1.14 4 | -------------------------------------------------------------------------------- /lexer/lexer.go: -------------------------------------------------------------------------------- 1 | package lexer 2 | 3 | import ( 4 | "github.com/yonh/go-monkey-interpreter/token" 5 | ) 6 | 7 | type Lexer struct { 8 | input string 9 | position int // 当前字符位于字符串所在索引 10 | readPosition int // 当前读取字符的下一个字符的位置 11 | ch byte // 当前正在查看的字符 12 | } 13 | 14 | func New(input string) *Lexer { 15 | l := &Lexer{input: input} 16 | l.readChar() 17 | return l 18 | } 19 | 20 | // reachChar 的目的是读取下一个字符,并更新 position, readPosition, 21 | // 如果读取到文件末尾,则将 l.ch = 0,这是 NUL 字符的ASCII编码,用来表示“尚未读取到任何内容”或“文件结尾” 22 | // 注意:此方法近支持ASCII字符,不能完全支持Unicode字符,这么做是为了简单 23 | func (l *Lexer) readChar() { 24 | if l.readPosition >= len(l.input) { 25 | l.ch = 0 26 | } else { 27 | l.ch = l.input[l.readPosition] 28 | } 29 | l.position = l.readPosition 30 | l.readPosition += 1 31 | } 32 | 33 | // 循环读取字符,直至遇到非字母或下划线字符,返回读取到的字母或下划线字符串 34 | func (l *Lexer) readIdentifier() string { 35 | position := l.position 36 | for isLetter(l.ch) { 37 | l.readChar() 38 | } 39 | return l.input[position:l.position] 40 | } 41 | 42 | // 循环读取字符,直至遇到非数字字符,返回读取到的数字字符串 43 | func (l *Lexer) readNumber() string { 44 | position := l.position 45 | for isDigit(l.ch) { 46 | l.readChar() 47 | } 48 | return l.input[position:l.position] 49 | } 50 | 51 | // 判断字符是否是字母或下划线 52 | func isLetter(ch byte) bool { 53 | return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' 54 | } 55 | 56 | func isDigit(ch byte) bool { 57 | return '0' <= ch && ch <= '9' 58 | } 59 | 60 | // 此方法首先根据l.ch返回相应的Token类型,然后调用l.readChar()将读取索引指向下一个字符 61 | func (l *Lexer) NextToken() token.Token { 62 | var t token.Token 63 | 64 | l.skipWhitespace() 65 | 66 | switch l.ch { 67 | case '=': 68 | if l.peekChar() == '=' { 69 | ch := l.ch 70 | l.readChar() 71 | literal := string(ch) + string(l.ch) 72 | t = token.Token{token.EQ, literal} 73 | } else { 74 | t = newToken(token.ASSIGN, l.ch) 75 | } 76 | case '+': 77 | t = newToken(token.PLUS, l.ch) 78 | case '-': 79 | t = newToken(token.MINUS, l.ch) 80 | case '!': 81 | if l.peekChar() == '=' { 82 | ch := l.ch 83 | l.readChar() 84 | literal := string(ch) + string(l.ch) 85 | t = token.Token{token.NEQ, literal} 86 | } else { 87 | t = newToken(token.BANG, l.ch) 88 | } 89 | case '/': 90 | t = newToken(token.SLASH, l.ch) 91 | case '*': 92 | t = newToken(token.ASTERISK, l.ch) 93 | case '>': 94 | t = newToken(token.GT, l.ch) 95 | case '<': 96 | t = newToken(token.LT, l.ch) 97 | case '(': 98 | t = newToken(token.LPAREN, l.ch) 99 | case ')': 100 | t = newToken(token.RPAREN, l.ch) 101 | case '{': 102 | t = newToken(token.LBRACE, l.ch) 103 | case '}': 104 | t = newToken(token.RBRACE, l.ch) 105 | case ',': 106 | t = newToken(token.COMMA, l.ch) 107 | case ';': 108 | t = newToken(token.SEMICOLON, l.ch) 109 | case 0: 110 | t.Literal = "" 111 | t.Type = token.EOF 112 | default: 113 | if isLetter(l.ch) { 114 | t.Literal = l.readIdentifier() 115 | t.Type = token.LookupIdent(t.Literal) 116 | return t 117 | } else if isDigit(l.ch) { 118 | t.Type = token.INT 119 | t.Literal = l.readNumber() 120 | return t 121 | } else { 122 | t = newToken(token.ILLEGAL, l.ch) 123 | } 124 | } 125 | 126 | l.readChar() 127 | return t 128 | } 129 | 130 | // peekChar和readChar非常类似,但这个函数不会更改 l.position 和 l.readPosition 的值。 131 | // 它的目的是查看下一个字符的值,以至于可以提前知道调用 readChar() 时会返回什么值 132 | func (l *Lexer) peekChar() byte { 133 | if l.readPosition >= len(l.input) { 134 | return 0 135 | } else { 136 | return l.input[l.readPosition] 137 | } 138 | } 139 | 140 | func newToken(tokenType token.TokenType, ch byte) token.Token { 141 | return token.Token{Type: tokenType, Literal: string(ch)} 142 | } 143 | 144 | // 跳过空白字符 145 | func (l *Lexer) skipWhitespace() { 146 | for l.ch == ' ' || l.ch == '\t' || l.ch == '\n' || l.ch == '\r' { 147 | l.readChar() 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /lexer/lexer_test.go: -------------------------------------------------------------------------------- 1 | package lexer 2 | 3 | import ( 4 | "github.com/yonh/go-monkey-interpreter/token" 5 | "testing" 6 | ) 7 | 8 | func TestNextToken(t *testing.T) { 9 | input := `let five = 5; 10 | let ten = 10; 11 | let add = fn(x, y) { 12 | x + y; 13 | }; 14 | 15 | let result = add(five, ten); 16 | !-/*5; 17 | 5 < 10 > 5; 18 | 19 | if (5 < 10) { 20 | return true; 21 | } else { 22 | return false; 23 | } 24 | 25 | 10 == 10; 26 | 10 != 9; 27 | ` 28 | 29 | tests := []struct { 30 | expectedType token.TokenType 31 | expectedLiteral string 32 | }{ 33 | {token.LET, "let"}, 34 | {token.IDENT, "five"}, 35 | {token.ASSIGN, "="}, 36 | {token.INT, "5"}, 37 | {token.SEMICOLON, ";"}, 38 | {token.LET, "let"}, 39 | {token.IDENT, "ten"}, 40 | {token.ASSIGN, "="}, 41 | {token.INT, "10"}, 42 | {token.SEMICOLON, ";"}, 43 | {token.LET, "let"}, 44 | {token.IDENT, "add"}, 45 | {token.ASSIGN, "="}, 46 | {token.FUNCTION, "fn"}, 47 | {token.LPAREN, "("}, 48 | {token.IDENT, "x"}, 49 | {token.COMMA, ","}, 50 | {token.IDENT, "y"}, 51 | {token.RPAREN, ")"}, 52 | {token.LBRACE, "{"}, 53 | {token.IDENT, "x"}, 54 | {token.PLUS, "+"}, 55 | {token.IDENT, "y"}, 56 | {token.SEMICOLON, ";"}, 57 | {token.RBRACE, "}"}, 58 | {token.SEMICOLON, ";"}, 59 | {token.LET, "let"}, 60 | {token.IDENT, "result"}, 61 | {token.ASSIGN, "="}, 62 | {token.IDENT, "add"}, 63 | {token.LPAREN, "("}, 64 | {token.IDENT, "five"}, 65 | {token.COMMA, ","}, 66 | {token.IDENT, "ten"}, 67 | {token.RPAREN, ")"}, 68 | {token.SEMICOLON, ";"}, 69 | {token.BANG, "!"}, 70 | {token.MINUS, "-"}, 71 | {token.SLASH, "/"}, 72 | {token.ASTERISK, "*"}, 73 | {token.INT, "5"}, 74 | {token.SEMICOLON, ";"}, 75 | {token.INT, "5"}, 76 | {token.LT, "<"}, 77 | {token.INT, "10"}, 78 | {token.GT, ">"}, 79 | {token.INT, "5"}, 80 | {token.SEMICOLON, ";"}, 81 | 82 | {token.IF, "if"}, 83 | {token.LPAREN, "("}, 84 | {token.INT, "5"}, 85 | {token.LT, "<"}, 86 | {token.INT, "10"}, 87 | {token.RPAREN, ")"}, 88 | {token.LBRACE, "{"}, 89 | {token.RETURN, "return"}, 90 | {token.TRUE, "true"}, 91 | {token.SEMICOLON, ";"}, 92 | {token.RBRACE, "}"}, 93 | {token.ELSE, "else"}, 94 | {token.LBRACE, "{"}, 95 | {token.RETURN, "return"}, 96 | {token.FALSE, "false"}, 97 | {token.SEMICOLON, ";"}, 98 | {token.RBRACE, "}"}, 99 | 100 | {token.INT, "10"}, 101 | {token.EQ, "=="}, 102 | {token.INT, "10"}, 103 | {token.SEMICOLON, ";"}, 104 | {token.INT, "10"}, 105 | {token.NEQ, "!="}, 106 | {token.INT, "9"}, 107 | {token.SEMICOLON, ";"}, 108 | } 109 | 110 | l := New(input) 111 | for i, tt := range tests { 112 | token := l.NextToken() 113 | if token.Type != tt.expectedType { 114 | t.Fatalf("tests[%d] - TokenType wrong. expected %q, got %q", i, tt.expectedType, token.Type) 115 | } 116 | if token.Literal != tt.expectedLiteral { 117 | t.Fatalf("tests[%d] - Literal wrong. expected %q, got %q", i, tt.expectedLiteral, token.Literal) 118 | } 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "github.com/yonh/go-monkey-interpreter/repl" 6 | "os" 7 | "os/user" 8 | ) 9 | 10 | func main() { 11 | u, err := user.Current() 12 | if err != nil { 13 | panic(err) 14 | } 15 | 16 | fmt.Printf("Hello %s! This is the Monkey programming languate!\n", u.Username) 17 | fmt.Printf("Feel free to type in commands\n") 18 | repl.Start(os.Stdin, os.Stdout) 19 | } 20 | -------------------------------------------------------------------------------- /parser/parser.go: -------------------------------------------------------------------------------- 1 | package parser 2 | 3 | import ( 4 | "fmt" 5 | "github.com/yonh/go-monkey-interpreter/ast" 6 | "github.com/yonh/go-monkey-interpreter/lexer" 7 | "github.com/yonh/go-monkey-interpreter/token" 8 | ) 9 | 10 | type Parser struct { 11 | l *lexer.Lexer 12 | errors []string 13 | 14 | currToken token.Token 15 | peekToken token.Token 16 | } 17 | 18 | func New(l *lexer.Lexer) *Parser { 19 | p := &Parser{l: l, errors: []string{}} 20 | 21 | // 读取两个词法单元以初始化currToken和peekToken 22 | p.nextToken() 23 | p.nextToken() 24 | 25 | return p 26 | } 27 | 28 | func (p *Parser) Errors() []string { 29 | return p.errors 30 | } 31 | 32 | func (p *Parser) peekError(t token.TokenType) { 33 | msg := fmt.Sprintf("expected next token to be %s, got %s instead", t, p.peekToken.Type) 34 | p.errors = append(p.errors, msg) 35 | } 36 | 37 | func (p *Parser) nextToken() { 38 | p.currToken = p.peekToken 39 | p.peekToken = p.l.NextToken() 40 | } 41 | 42 | // ParseProgram 首先要做的是构建AST的根节点,然后遍历输入中的每个词法单元,生成Statement然后加入到program.Statements中,直到遇到token.EOF 停止 43 | func (p *Parser) ParseProgram() *ast.Program { 44 | program := &ast.Program{} 45 | program.Statements = []ast.Statement{} 46 | 47 | for p.currToken.Type != token.EOF { 48 | stmt := p.parseStatement() 49 | if stmt != nil { 50 | program.Statements = append(program.Statements, stmt) 51 | } 52 | p.nextToken() 53 | } 54 | 55 | return program 56 | } 57 | 58 | func (p *Parser) parseStatement() ast.Statement { 59 | switch p.currToken.Type { 60 | case token.LET: 61 | return p.parseLetStatement() 62 | case token.RETURN: 63 | return p.parseReturnStatement() 64 | default: 65 | return nil 66 | } 67 | } 68 | 69 | func (p *Parser) parseLetStatement() *ast.LetStatement { 70 | stmt := &ast.LetStatement{Token: p.currToken} 71 | //fmt.Printf("curr : %v \n", p.currToken.Type) 72 | //fmt.Printf("peek : %v \n", p.peekToken.Type) 73 | 74 | if !p.expectPeek(token.IDENT) { 75 | return nil 76 | } 77 | stmt.Name = &ast.Identifier{Token: p.currToken, Value: p.currToken.Literal} 78 | 79 | if !p.expectPeek(token.ASSIGN) { 80 | return nil 81 | } 82 | 83 | // todo 目前跳过对表达式的解析,遇到分号(;) 前直接跳过 84 | for !p.currTokenIs(token.SEMICOLON) { 85 | p.nextToken() 86 | } 87 | return stmt 88 | } 89 | 90 | func (p *Parser) parseReturnStatement() *ast.ReturnStatement { 91 | stmt := &ast.ReturnStatement{Token: p.currToken} 92 | 93 | p.nextToken() 94 | 95 | // todo 目前跳过对表达式的解析,遇到分号(;) 前直接跳过 96 | for !p.currTokenIs(token.SEMICOLON) { 97 | p.nextToken() 98 | } 99 | 100 | return stmt 101 | } 102 | 103 | func (p *Parser) currTokenIs(t token.TokenType) bool { 104 | return p.currToken.Type == t 105 | } 106 | func (p *Parser) peekTokenIs(t token.TokenType) bool { 107 | return p.peekToken.Type == t 108 | } 109 | func (p *Parser) expectPeek(t token.TokenType) bool { 110 | if p.peekTokenIs(t) { 111 | p.nextToken() 112 | return true 113 | } else { 114 | p.peekError(t) 115 | return false 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /parser/parser_test.go: -------------------------------------------------------------------------------- 1 | package parser 2 | 3 | import ( 4 | "github.com/yonh/go-monkey-interpreter/ast" 5 | "github.com/yonh/go-monkey-interpreter/lexer" 6 | "testing" 7 | ) 8 | 9 | func TestStatements(t *testing.T) { 10 | input := ` 11 | let x = 5; 12 | let y = 10; 13 | let foobar = 838383; 14 | ` 15 | l := lexer.New(input) 16 | p := New(l) 17 | program := p.ParseProgram() 18 | checkParserErrors(t, p) 19 | if program == nil { 20 | t.Fatalf("ParseProgram() returned nil") 21 | } 22 | if len(program.Statements) != 3 { 23 | t.Fatalf("program.statements does not contains 3 statements. got %d", len(program.Statements)) 24 | } 25 | 26 | tests := []struct { 27 | expectedInentifier string 28 | }{ 29 | {"x"}, 30 | {"y"}, 31 | {"foobar"}, 32 | } 33 | 34 | for i, tt := range tests { 35 | stmt := program.Statements[i] 36 | if !testLetStatement(t, stmt, tt.expectedInentifier) { 37 | return 38 | } 39 | } 40 | } 41 | 42 | func testLetStatement(t *testing.T, s ast.Statement, name string) bool { 43 | if s.TokenLiteral() != "let" { 44 | t.Errorf("s.TokenLiteral not 'let', got %q", s.TokenLiteral()) 45 | return false 46 | } 47 | letStmt, ok := s.(*ast.LetStatement) 48 | if !ok { 49 | t.Errorf("s not *ast.LetStatement, got %T", s) 50 | return false 51 | } 52 | 53 | if letStmt.Name.Value != name { 54 | t.Errorf("letStat.Name.Value not '%s', got '%s", name, letStmt.Name.TokenLiteral()) 55 | return false 56 | } 57 | 58 | if letStmt.Name.TokenLiteral() != name { 59 | t.Errorf("letStmt.Name.TokenLiteral() not '%s', got '%s'", name, letStmt.Name.TokenLiteral()) 60 | return false 61 | } 62 | 63 | return true 64 | } 65 | 66 | func TestReturnStatements(t *testing.T) { 67 | input := ` 68 | return 5; 69 | return 10; 70 | return 933 322;` 71 | l := lexer.New(input) 72 | p := New(l) 73 | program := p.ParseProgram() 74 | checkParserErrors(t, p) 75 | 76 | if len(program.Statements) != 3 { 77 | t.Errorf("program.Statements does not contains 3 statements, got %d", len(program.Statements)) 78 | } 79 | 80 | for _, stmt := range program.Statements { 81 | returnStmt, ok := stmt.(*ast.ReturnStatement) 82 | if !ok { 83 | t.Errorf("stmt not *ast.ReturnStatement, got %T", stmt) 84 | continue 85 | } 86 | if returnStmt.TokenLiteral() != "return" { 87 | t.Errorf("returnStmt.TokenLiteral not 'return', got '%T'", returnStmt.TokenLiteral()) 88 | } 89 | } 90 | } 91 | 92 | func checkParserErrors(t *testing.T, p *Parser) { 93 | errors := p.Errors() 94 | if len(errors) == 0 { 95 | return 96 | } 97 | t.Errorf("parser has %d errors", len(errors)) 98 | for _, msg := range errors { 99 | t.Errorf("parser error: %q", msg) 100 | } 101 | t.FailNow() 102 | } 103 | -------------------------------------------------------------------------------- /repl/repl.go: -------------------------------------------------------------------------------- 1 | package repl 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "github.com/yonh/go-monkey-interpreter/lexer" 7 | "github.com/yonh/go-monkey-interpreter/token" 8 | "io" 9 | ) 10 | 11 | const PROMPT = ">> " 12 | 13 | func Start(in io.Reader, out io.Writer) { 14 | scanner := bufio.NewScanner(in) 15 | 16 | for { 17 | fmt.Fprintf(out, PROMPT) 18 | 19 | scanned := scanner.Scan() 20 | if !scanned { 21 | return 22 | } 23 | 24 | line := scanner.Text() 25 | l := lexer.New(line) 26 | 27 | for t := l.NextToken(); t.Type != token.EOF; t = l.NextToken() { 28 | fmt.Fprintf(out, "%+v\n", t) 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /token/token.go: -------------------------------------------------------------------------------- 1 | package token 2 | 3 | type TokenType string 4 | 5 | type Token struct { 6 | Type TokenType 7 | Literal string 8 | } 9 | 10 | const ( 11 | ILLEGAL = "ILLEGAL" // 未知的词法单元或字符 12 | EOF = "EOF" // 文件结尾 13 | 14 | // 标识符 字面量 15 | IDENT = "IDENT" // add, foo, x, y ... 16 | INT = "INT" // 1, 2, 3 17 | 18 | // 运算符 19 | ASSIGN = "=" 20 | PLUS = "+" 21 | MINUS = "-" 22 | BANG = "!" 23 | ASTERISK = "*" 24 | SLASH = "/" 25 | 26 | EQ = "==" 27 | NEQ = "!=" 28 | LT = "<" 29 | GT = ">" 30 | 31 | // 分隔符 32 | COMMA = "," 33 | SEMICOLON = ";" 34 | 35 | LPAREN = "(" 36 | RPAREN = ")" 37 | LBRACE = "{" 38 | RBRACE = "}" 39 | 40 | // 关键字 41 | FUNCTION = "FUNCTION" 42 | LET = "LET" 43 | TRUE = "TRUE" 44 | FALSE = "FALSE" 45 | IF = "IF" 46 | ELSE = "ELSE" 47 | RETURN = "RETURN" 48 | ) 49 | 50 | // 关键字列表 51 | var keywords = map[string]TokenType{ 52 | "fn": FUNCTION, 53 | "let": LET, 54 | "true": TRUE, 55 | "false": FALSE, 56 | "if": IF, 57 | "else": ELSE, 58 | "return": RETURN, 59 | } 60 | 61 | // 方法会根据关键字表赖查找字符串是否为关键字,如果是则返回关键字的TokenType,否则为IDENT 62 | func LookupIdent(ident string) TokenType { 63 | if tok, ok := keywords[ident]; ok { 64 | return tok 65 | } 66 | return IDENT 67 | } 68 | --------------------------------------------------------------------------------