├── src ├── Core │ ├── Parser │ │ └── Parser.cpp │ └── Lexer │ │ └── Lexer.cpp ├── Utils │ ├── Utils.cpp │ └── Logger.cpp └── main.cpp ├── vcpkg.json ├── main.kor ├── .gitignore ├── include ├── Core │ ├── Parser │ │ └── Parser.hpp │ └── Lexer │ │ └── Lexer.hpp ├── Utils │ ├── Utils.hpp │ └── Logger.hpp └── Common │ ├── Nodes │ ├── Program.hpp │ ├── Node.hpp │ ├── Syntax │ │ ├── Section.hpp │ │ ├── Rule.hpp │ │ └── Factor.hpp │ └── Semantics │ │ ├── Section.hpp │ │ └── Rule.hpp │ └── Token.hpp ├── run.sh ├── test.sh ├── .clangd ├── vcpkg-configuration.json ├── CMakePresets.json ├── README.md ├── LICENSE ├── CMakeLists.txt ├── CONTRIBUTING.md └── tests ├── test_utils.hpp └── test_lexer.cpp /src/Core/Parser/Parser.cpp: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /vcpkg.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": [ 3 | "fmt", 4 | "gtest" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /main.kor: -------------------------------------------------------------------------------- 1 | language ArithLang { 2 | syntax { 3 | program -> statement * ; 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | .vscode 3 | .ropeproject 4 | .cache 5 | compile_commands.json 6 | CMakeUserPresets.json 7 | tests/Testing 8 | vcpkg_installed 9 | -------------------------------------------------------------------------------- /include/Core/Parser/Parser.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | using AST = std::vector; 6 | 7 | class Parser 8 | { 9 | 10 | }; 11 | -------------------------------------------------------------------------------- /include/Utils/Utils.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace Utils 6 | { 7 | std::string ReadFileContent(const std::string& path); 8 | } 9 | -------------------------------------------------------------------------------- /run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # set -a 3 | # source .env 4 | # set +a 5 | 6 | set -e 7 | cmake --preset=default 8 | cmake --build --preset=default 9 | ./build/default/koral "$@" 10 | -------------------------------------------------------------------------------- /include/Common/Nodes/Program.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "Common/Nodes/Node.hpp" 4 | 5 | struct ProgramNode : public Node 6 | { 7 | std::string node = "Program"; 8 | 9 | std::string name; 10 | }; 11 | -------------------------------------------------------------------------------- /include/Common/Nodes/Node.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | struct Node 6 | { 7 | std::string node; // debugging purposes 8 | virtual ~Node() = default; 9 | }; 10 | 11 | // #define SETUP_NODE(name) std::string name = "name"; 12 | -------------------------------------------------------------------------------- /test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # compile whole project first to make sure the binary is up-to-date 5 | cmake --preset=default 1>/dev/null 6 | cmake --build --preset=default 1>/dev/null 7 | 8 | ctest --test-dir ./build/default --output-on-failure 9 | 10 | -------------------------------------------------------------------------------- /include/Common/Nodes/Syntax/Section.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "Common/Nodes/Node.hpp" 4 | #include "Common/Nodes/Syntax/Rule.hpp" 5 | 6 | #include 7 | 8 | struct SyntaxSection : public Node 9 | { 10 | std::string node = "SyntaxSection"; 11 | 12 | std::vector rules; 13 | }; 14 | -------------------------------------------------------------------------------- /include/Common/Nodes/Semantics/Section.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "Common/Nodes/Node.hpp" 4 | #include "Common/Nodes/Semantics/Rule.hpp" 5 | 6 | #include 7 | 8 | struct SemanticsSection : public Node 9 | { 10 | std::string node = "SemanticsSection"; 11 | 12 | std::vector rules; 13 | }; 14 | -------------------------------------------------------------------------------- /include/Common/Nodes/Syntax/Rule.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "Common/Nodes/Node.hpp" 4 | #include "Common/Nodes/Syntax/Factor.hpp" 5 | 6 | #include 7 | #include 8 | 9 | struct SyntaxRule : public Node 10 | { 11 | std::string node = "SyntaxRule"; 12 | 13 | std::string non_terminal; 14 | std::string extends; 15 | std::vector factors; 16 | }; 17 | -------------------------------------------------------------------------------- /.clangd: -------------------------------------------------------------------------------- 1 | CompileFlags: 2 | Add: [Wall, -Wextra, -Wpedantic, -Wno-deprecated-declarations, -std=c++23] 3 | InlayHints: 4 | ParameterNames: No 5 | DeducedTypes: No 6 | Diagnostics: 7 | UnusedIncludes: Strict 8 | ClangTidy: 9 | Add: [bugprone-*, cert-*, modernize-*, performance-*] 10 | Remove: [bugprone-easily-swappable-parameters, modernize-use-trailing-return-type, modernize-use-ranges] 11 | -------------------------------------------------------------------------------- /include/Common/Nodes/Semantics/Rule.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "Common/Nodes/Node.hpp" 4 | 5 | #include 6 | #include 7 | 8 | struct Property 9 | { 10 | std::string key; 11 | std::string value; 12 | }; 13 | 14 | struct SemanticsRule : public Node 15 | { 16 | std::string node = "SemanticsRule"; 17 | 18 | std::string non_terminal; 19 | std::vector property; 20 | }; 21 | -------------------------------------------------------------------------------- /vcpkg-configuration.json: -------------------------------------------------------------------------------- 1 | { 2 | "default-registry": { 3 | "kind": "git", 4 | "baseline": "2d261aa77fbfc4071af94bcc94789c73eb206081", 5 | "repository": "https://github.com/microsoft/vcpkg" 6 | }, 7 | "registries": [ 8 | { 9 | "kind": "artifact", 10 | "location": "https://github.com/microsoft/vcpkg-ce-catalog/archive/refs/heads/main.zip", 11 | "name": "microsoft" 12 | } 13 | ] 14 | } 15 | -------------------------------------------------------------------------------- /src/Utils/Utils.cpp: -------------------------------------------------------------------------------- 1 | #include "Utils/Utils.hpp" 2 | 3 | #include 4 | 5 | std::string Utils::ReadFileContent(const std::string& path) 6 | { 7 | std::ifstream file { path }; 8 | auto fileSize = file.seekg(0, std::ios::end).tellg(); 9 | file.seekg(0); 10 | 11 | std::string fileContent; 12 | fileContent.resize(fileSize); 13 | 14 | file.read(fileContent.data(), fileSize); 15 | 16 | return fileContent; 17 | } 18 | -------------------------------------------------------------------------------- /include/Common/Nodes/Syntax/Factor.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "Common/Nodes/Node.hpp" 4 | 5 | #include 6 | #include 7 | #include 8 | 9 | enum Type : uint8_t 10 | { 11 | TERMINAL, 12 | IDENTIFIER, 13 | REGEX, 14 | GROUP, 15 | OPTIONAL, 16 | REPEAT 17 | }; 18 | 19 | struct Factor : public Node 20 | { 21 | std::string node = "Factor"; 22 | 23 | Type type; 24 | 25 | std::string value; 26 | std::vector nested; 27 | 28 | char occurence; 29 | }; 30 | -------------------------------------------------------------------------------- /src/main.cpp: -------------------------------------------------------------------------------- 1 | #include "Utils/Utils.hpp" 2 | #include "Utils/Logger.hpp" 3 | #include "Core/Lexer/Lexer.hpp" 4 | 5 | #include 6 | #include 7 | #include 8 | 9 | int main(int argc, char* argv[]) 10 | { 11 | nassert(argc >= 2, "Argument count is less than two"); 12 | nassert(std::filesystem::exists(argv[1]), "File doesn't exist"); 13 | 14 | std::string content = Utils::ReadFileContent(argv[1]); 15 | nassert(!content.empty(), "File content is empty"); 16 | 17 | auto lexer = std::make_unique(content); 18 | auto tokens = lexer->Scan(); 19 | 20 | return 0; 21 | } 22 | -------------------------------------------------------------------------------- /CMakePresets.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 2, 3 | "configurePresets": [ 4 | { 5 | "name": "vcpkg", 6 | "generator": "Ninja", 7 | "binaryDir": "build/default", 8 | "cacheVariables": { 9 | "CMAKE_BUILD_TYPE": "Debug", 10 | "CMAKE_CXX_STANDARD": "23", 11 | "CMAKE_CXX_STANDARD_REQUIRED": "ON", 12 | "CMAKE_EXPORT_COMPILE_COMMANDS": "ON", 13 | "CMAKE_TOOLCHAIN_FILE": "$env{VCPKG_ROOT}/scripts/buildsystems/vcpkg.cmake", 14 | "VCPKG_FEATURE_FLAGS": "manifests" 15 | } 16 | }, 17 | { 18 | "name": "release", 19 | "inherits": "vcpkg", 20 | "binaryDir": "build/release", 21 | "cacheVariables": { 22 | "CMAKE_BUILD_TYPE": "Release" 23 | } 24 | } 25 | ], 26 | "buildPresets": [ 27 | { "name": "release", "configurePreset": "release" } 28 | ] 29 | } 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Koral 2 | 3 | Koral is a work-in-progress, dynamically-typed scripting language. It's simple, easy to use and still in development. It's written in Python but may switch to CPython in the future due to the speed upgrade. 4 | 5 | ## Syntax 6 | 7 | The language doesn't have if statements and loops yet, but they will be implemented on a later date. 8 | 9 | Here is a basic program: 10 | ```js 11 | var example = "Hello World!" 12 | var number = 5 * 5 * 4 13 | 14 | say(example) 15 | say(number) 16 | ``` 17 | 18 | Output: 19 | ``` 20 | Hello World! 21 | 100 22 | ``` 23 | 24 | ## Building 25 | 26 | ### Requirements 27 | * Python 3 28 | 29 | ### Run 30 | To run the language, the code needs to be written in a .kor file. To run it, type: 31 | ``` 32 | python main.py `koral file` 33 | ``` 34 | The output will be displayed in the terminal and you can look at the data folder to see how the language compiles. 35 | 36 | ## Contributing 37 | Koral is still in development. If you want to contribute in any way, feel free to text me. To learn more, check `CONTRIBUTIONS.md`. -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Galin Georgiev 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /include/Utils/Logger.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #define nassert(condition, message) \ 9 | Logger::Assert(condition, fmt::format("Assert failed on line {} file {} function {}: {}", __LINE__, __FILE__, __FUNCTION__, message)) 10 | 11 | enum class LogType : uint8_t 12 | { 13 | Debug, 14 | Info, 15 | Warning, 16 | Error, 17 | Fatal, 18 | Assert, 19 | Count 20 | }; 21 | 22 | class Logger 23 | { 24 | public: 25 | static void Info(std::string_view message); 26 | 27 | static void Debug(std::string_view message); 28 | 29 | static void Warning(std::string_view message); 30 | 31 | static void Error(std::string_view message); 32 | 33 | static void Fatal(std::string_view message); 34 | 35 | static void Assert(bool condition, std::string_view message); 36 | 37 | private: 38 | static void Log(LogType type, std::string_view message); 39 | 40 | static std::array(LogType::Count)> m_prefixes; 41 | static std::array(LogType::Count)> m_statuses; 42 | }; 43 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.24) 2 | project(koral LANGUAGES CXX) 3 | 4 | set(CMAKE_CXX_STANDARD 23) 5 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 6 | set(CMAKE_EXPORT_COMPILE_COMMANDS ON) 7 | 8 | if (CMAKE_EXPORT_COMPILE_COMMANDS) 9 | execute_process( 10 | COMMAND ${CMAKE_COMMAND} -E create_symlink 11 | "${CMAKE_BINARY_DIR}/compile_commands.json" 12 | "${CMAKE_SOURCE_DIR}/compile_commands.json" 13 | COMMAND_ERROR_IS_FATAL ANY 14 | ) 15 | endif() 16 | 17 | find_package(fmt CONFIG REQUIRED) 18 | find_package(GTest CONFIG REQUIRED) 19 | 20 | # Library 21 | 22 | file(GLOB_RECURSE LIB_SOURCES 23 | "src/*.cpp" 24 | ) 25 | list(FILTER LIB_SOURCES EXCLUDE REGEX "src/main\\.cpp$") 26 | 27 | add_library(koral_lib ${LIB_SOURCES}) 28 | target_include_directories(koral_lib PUBLIC include) 29 | target_link_libraries(koral_lib PUBLIC fmt::fmt) 30 | 31 | # Executable 32 | 33 | add_executable(koral src/main.cpp) 34 | target_link_libraries(koral PRIVATE koral_lib) 35 | 36 | # Unit tests 37 | 38 | enable_testing() 39 | file(GLOB_RECURSE TESTS "tests/*.cpp") 40 | 41 | add_executable(unit_tests ${TESTS}) 42 | target_include_directories(unit_tests PRIVATE include tests) 43 | target_link_libraries(unit_tests PRIVATE 44 | koral_lib 45 | GTest::gtest 46 | GTest::gtest_main 47 | fmt::fmt 48 | ) 49 | 50 | include(GoogleTest) 51 | gtest_discover_tests(unit_tests) 52 | -------------------------------------------------------------------------------- /src/Utils/Logger.cpp: -------------------------------------------------------------------------------- 1 | #include "Utils/Logger.hpp" 2 | 3 | #include 4 | 5 | #include 6 | 7 | std::array(LogType::Count)> Logger::m_prefixes = { 8 | "DEBUG", "INFO", "WARN", "ERROR", "FATAL", "ASSERT" 9 | }; 10 | 11 | std::array(LogType::Count)> Logger::m_statuses = { 12 | true, true, true, true, false, false 13 | }; 14 | 15 | void Logger::Info(std::string_view message) 16 | { 17 | Log(LogType::Info, message); 18 | } 19 | 20 | void Logger::Debug(std::string_view message) 21 | { 22 | Log(LogType::Debug, message); 23 | } 24 | 25 | void Logger::Warning(std::string_view message) 26 | { 27 | Log(LogType::Warning, message); 28 | } 29 | 30 | void Logger::Error(std::string_view message) 31 | { 32 | Log(LogType::Error, message); 33 | } 34 | 35 | void Logger::Fatal(std::string_view message) 36 | { 37 | Log(LogType::Fatal, message); 38 | exit(1); 39 | } 40 | 41 | void Logger::Assert(bool condition, std::string_view message) 42 | { 43 | if (condition) 44 | return; 45 | 46 | Log(LogType::Assert, message); 47 | exit(1); 48 | } 49 | 50 | void Logger::Log(LogType type, std::string_view message) 51 | { 52 | const int typeIndex = static_cast(type); 53 | 54 | // Fatal and Assert can't be stopped from logging 55 | if (!m_statuses[typeIndex] && type != LogType::Fatal && type != LogType::Assert) 56 | return; 57 | 58 | fmt::println("{}: {}", m_prefixes[typeIndex], message.data()); 59 | } 60 | -------------------------------------------------------------------------------- /include/Core/Lexer/Lexer.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include "Common/Token.hpp" 9 | 10 | class Lexer 11 | { 12 | public: 13 | Lexer(std::string content) : m_content(std::move(content)), m_char(m_content[0]) {}; 14 | 15 | std::vector Scan(); 16 | 17 | private: 18 | void Consume(); 19 | 20 | void Next(); 21 | 22 | char Peek(); 23 | 24 | void AddToken(std::uint8_t type, const std::string& value, size_t line); 25 | 26 | // --- Skipping content --- 27 | 28 | void SkipWhitespace(); 29 | 30 | void SkipComment(bool multiline); 31 | 32 | // --- Reading specific token types --- 33 | 34 | void ReadIdentifier(); 35 | 36 | void ReadString(); 37 | 38 | char EscapeCharacter(); 39 | 40 | void ReadNumber(); 41 | 42 | private: 43 | std::string m_content; 44 | 45 | char m_char; 46 | size_t m_pos = 0; 47 | size_t m_line = 1; 48 | 49 | std::vector m_tokens = {}; 50 | 51 | // --- Token identification variables --- 52 | 53 | const std::unordered_map character_type_map = { 54 | { ';', TOKEN_SEMICOLON }, 55 | { '(', TOKEN_LPAREN }, 56 | { ')', TOKEN_RPAREN }, 57 | { ',', TOKEN_COMMA }, 58 | { '+', TOKEN_PLUS }, 59 | { '-', TOKEN_MINUS } 60 | }; 61 | 62 | const std::unordered_map character_equal_comb = { 63 | { '=', TOKEN_EQ }, 64 | { '+', TOKEN_PLUS_EQ }, 65 | { '-', TOKEN_MINUS_EQ }, 66 | { '!', TOKEN_NOT_EQ }, 67 | { '<', TOKEN_LTE }, 68 | { '>', TOKEN_GTE } 69 | }; 70 | }; 71 | -------------------------------------------------------------------------------- /include/Common/Token.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | struct Token 7 | { 8 | uint8_t type; 9 | std::string value; 10 | size_t line; 11 | 12 | bool operator==(const Token& other) const noexcept 13 | { 14 | return type == other.type && value == other.value && line == other.line; 15 | } 16 | }; 17 | 18 | enum : uint8_t 19 | { 20 | // Special 21 | TOKEN_EOF, 22 | TOKEN_ILLEGAL, 23 | TOKEN_UNTERMINATED, 24 | 25 | // ——— Literals ——— 26 | TOKEN_IDENTIFIER, // variable/function names 27 | TOKEN_NUMBER, // numeric literals 28 | TOKEN_STRING, // string literals 29 | TOKEN_CHAR, // character literals 30 | 31 | // ——— Operators ——— 32 | // Arithmetic 33 | TOKEN_PLUS, // + 34 | TOKEN_MINUS, // - 35 | TOKEN_ASTERISK, // * 36 | TOKEN_SLASH, // / 37 | TOKEN_PERCENT, // % 38 | 39 | // Assignment & comparison 40 | TOKEN_ASSIGN, // = 41 | TOKEN_PLUS_EQ, // += 42 | TOKEN_MINUS_EQ, // -= 43 | TOKEN_MULT_EQ, // *= 44 | TOKEN_DIVIS_EQ, // /= 45 | TOKEN_PERCE_EQ, // %= 46 | TOKEN_EQ, // == 47 | TOKEN_NOT_EQ, // != 48 | TOKEN_LT, // < 49 | TOKEN_GT, // > 50 | TOKEN_LTE, // <= 51 | TOKEN_GTE, // >= 52 | 53 | // Bitwise & logical (if your language has them) 54 | TOKEN_AND, // & 55 | TOKEN_OR, // | 56 | TOKEN_XOR, // ^ 57 | TOKEN_NOT, // ! 58 | TOKEN_AND_AND, // && 59 | TOKEN_OR_OR, // || 60 | 61 | // ——— Delimiters & Punctuation ——— 62 | TOKEN_COMMA, // , 63 | TOKEN_SEMICOLON, // ; 64 | TOKEN_COLON, // : 65 | TOKEN_DOT, // . 66 | // TOKEN_ELLIPSIS, // ... 67 | TOKEN_ARROW, // -> 68 | 69 | // Parentheses, braces, brackets 70 | TOKEN_LPAREN, // ( 71 | TOKEN_RPAREN, // ) 72 | TOKEN_LBRACE, // { 73 | TOKEN_RBRACE, // } 74 | TOKEN_LBRACKET, // [ 75 | TOKEN_RBRACKET, // ] 76 | 77 | // ——— Keywords ——— 78 | // TOKEN_KW_IF, 79 | // TOKEN_KW_ELSE, 80 | // TOKEN_KW_FOR, 81 | // TOKEN_KW_WHILE, 82 | // TOKEN_KW_RETURN, 83 | // TOKEN_KW_FUNC, 84 | // TOKEN_KW_VAR, 85 | // TOKEN_KW_CONST, 86 | }; 87 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | When contributing to this repository, please first discuss the change you wish to make with 4 | the owner of the repository via issue, email or a communication platform of your choice. 5 | 6 | ## Pull Request Process 7 | 8 | 1. Wait for the owner to commit if he's currently working on one. 9 | 2. Once the owner's done, you can now add or improve upon a feature. 10 | 3. When you're done, the pull request will be approved or declined. The 11 | owner may have critiques you need to fix, before approving the pull request. 12 | 13 | ### Our Pledge 14 | 15 | In the interest of fostering an open and welcoming environment, we as 16 | contributors and maintainers pledge to making participation in our project and 17 | our community a harassment-free experience for everyone, regardless of age, body 18 | size, disability, ethnicity, gender identity and expression, level of experience, 19 | nationality, personal appearance, race, religion, or sexual identity and 20 | orientation. 21 | 22 | ### Our Standards 23 | 24 | Examples of behavior that contributes to creating a positive environment 25 | include: 26 | 27 | * Using welcoming and inclusive language 28 | * Being respectful of differing viewpoints and experiences 29 | * Gracefully accepting constructive criticism 30 | * Focusing on what is best for the community 31 | * Showing empathy towards other community members 32 | 33 | Examples of unacceptable behavior by participants include: 34 | 35 | * The use of sexualized language or imagery and unwelcome sexual attention or 36 | advances 37 | * Trolling, insulting/derogatory comments, and personal or political attacks 38 | * Public or private harassment 39 | * Publishing others' private information, such as a physical or electronic 40 | address, without explicit permission 41 | * Other conduct which could reasonably be considered inappropriate in a 42 | professional setting 43 | 44 | ### Our Responsibilities 45 | 46 | Project maintainers are responsible for clarifying the standards of acceptable 47 | behavior and are expected to take appropriate and fair corrective action in 48 | response to any instances of unacceptable behavior. 49 | 50 | Project maintainers have the right and responsibility to remove, edit, or 51 | reject comments, commits, code, wiki edits, issues, and other contributions 52 | that are not aligned to this Code of Conduct, or to ban temporarily or 53 | permanently any contributor for other behaviors that they deem inappropriate, 54 | threatening, offensive, or harmful. -------------------------------------------------------------------------------- /tests/test_utils.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "Common/Token.hpp" 4 | 5 | #include 6 | #include 7 | #include 8 | 9 | inline std::string TokenTypeToString(uint8_t type) 10 | { 11 | switch (type) 12 | { 13 | case TOKEN_EOF: 14 | return "TOKEN_EOF"; 15 | case TOKEN_ILLEGAL: 16 | return "TOKEN_ILLEGAL"; 17 | case TOKEN_UNTERMINATED: 18 | return "TOKEN_UNTERMINATED"; 19 | case TOKEN_IDENTIFIER: 20 | return "TOKEN_IDENTIFIER"; 21 | case TOKEN_NUMBER: 22 | return "TOKEN_NUMBER"; 23 | case TOKEN_STRING: 24 | return "TOKEN_STRING"; 25 | case TOKEN_CHAR: 26 | return "TOKEN_CHAR"; 27 | case TOKEN_PLUS: 28 | return "TOKEN_PLUS"; 29 | case TOKEN_MINUS: 30 | return "TOKEN_MINUS"; 31 | case TOKEN_ASTERISK: 32 | return "TOKEN_ASTERISK"; 33 | case TOKEN_SLASH: 34 | return "TOKEN_SLASH"; 35 | case TOKEN_PERCENT: 36 | return "TOKEN_PERCENT"; 37 | case TOKEN_ASSIGN: 38 | return "TOKEN_ASSIGN"; 39 | case TOKEN_PLUS_EQ: 40 | return "TOKEN_PLUS_EQ"; 41 | case TOKEN_MINUS_EQ: 42 | return "TOKEN_MINUS_EQ"; 43 | case TOKEN_MULT_EQ: 44 | return "TOKEN_MULT_EQ"; 45 | case TOKEN_DIVIS_EQ: 46 | return "TOKEN_DIVIS_EQ"; 47 | case TOKEN_PERCE_EQ: 48 | return "TOKEN_PERCE_EQ"; 49 | case TOKEN_EQ: 50 | return "TOKEN_EQ"; 51 | case TOKEN_NOT_EQ: 52 | return "TOKEN_NOT_EQ"; 53 | case TOKEN_LT: 54 | return "TOKEN_LT"; 55 | case TOKEN_GT: 56 | return "TOKEN_GT"; 57 | case TOKEN_LTE: 58 | return "TOKEN_LTE"; 59 | case TOKEN_GTE: 60 | return "TOKEN_GTE"; 61 | case TOKEN_AND: 62 | return "TOKEN_AND"; 63 | case TOKEN_OR: 64 | return "TOKEN_OR"; 65 | case TOKEN_XOR: 66 | return "TOKEN_XOR"; 67 | case TOKEN_NOT: 68 | return "TOKEN_NOT"; 69 | case TOKEN_AND_AND: 70 | return "TOKEN_AND_AND"; 71 | case TOKEN_OR_OR: 72 | return "TOKEN_OR_OR"; 73 | case TOKEN_COMMA: 74 | return "TOKEN_COMMA"; 75 | case TOKEN_SEMICOLON: 76 | return "TOKEN_SEMICOLON"; 77 | case TOKEN_COLON: 78 | return "TOKEN_COLON"; 79 | case TOKEN_DOT: 80 | return "TOKEN_DOT"; 81 | case TOKEN_ARROW: 82 | return "TOKEN_ARROW"; 83 | case TOKEN_LPAREN: 84 | return "TOKEN_LPAREN"; 85 | case TOKEN_RPAREN: 86 | return "TOKEN_RPAREN"; 87 | case TOKEN_LBRACE: 88 | return "TOKEN_LBRACE"; 89 | case TOKEN_RBRACE: 90 | return "TOKEN_RBRACE"; 91 | case TOKEN_LBRACKET: 92 | return "TOKEN_LBRACKET"; 93 | case TOKEN_RBRACKET: 94 | return "TOKEN_RBRACKET"; 95 | default: 96 | return "TOKEN_UNKNOWN"; 97 | } 98 | } 99 | 100 | inline void PrintTo(const Token& t, std::ostream* os) 101 | { 102 | *os << "Token(type=" << TokenTypeToString(t.type) 103 | << ", value=\"" << t.value << "\"" 104 | << ", line=" << t.line << ")"; 105 | } 106 | -------------------------------------------------------------------------------- /tests/test_lexer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "test_utils.hpp" 3 | #include "Common/Token.hpp" 4 | #include "Core/Lexer/Lexer.hpp" 5 | 6 | // NOTE: Tests are written by ChatGPT 7 | 8 | // 1️⃣ String literal 9 | TEST(LexerTest, LexesStringCorrect) 10 | { 11 | std::string to_lex = "\"test\""; 12 | Lexer lexer(to_lex); 13 | 14 | std::vector result = lexer.Scan(); 15 | std::vector expected = { 16 | { .type = TOKEN_STRING, .value = "test", .line = 1 }, 17 | { .type = TOKEN_EOF, .value = "", .line = 1 }, 18 | }; 19 | 20 | EXPECT_EQ(result, expected); 21 | } 22 | 23 | // 2️⃣ Identifier 24 | TEST(LexerTest, LexesIdentifier) 25 | { 26 | std::string to_lex = "variable"; 27 | Lexer lexer(to_lex); 28 | 29 | std::vector result = lexer.Scan(); 30 | std::vector expected = { 31 | { .type = TOKEN_IDENTIFIER, .value = "variable", .line = 1 }, 32 | { .type = TOKEN_EOF, .value = "", .line = 1 }, 33 | }; 34 | 35 | EXPECT_EQ(result, expected); 36 | } 37 | 38 | // 3️⃣ Integer literal 39 | TEST(LexerTest, LexesIntegerLiteral) 40 | { 41 | std::string to_lex = "42"; 42 | Lexer lexer(to_lex); 43 | 44 | std::vector result = lexer.Scan(); 45 | std::vector expected = { 46 | { .type = TOKEN_NUMBER, .value = "42", .line = 1 }, 47 | { .type = TOKEN_EOF, .value = "", .line = 1 }, 48 | }; 49 | 50 | EXPECT_EQ(result, expected); 51 | } 52 | 53 | // 4️⃣ Simple arithmetic 54 | TEST(LexerTest, LexesArithmeticExpression) 55 | { 56 | std::string to_lex = "a + b * 5"; 57 | Lexer lexer(to_lex); 58 | 59 | std::vector result = lexer.Scan(); 60 | std::vector expected = { 61 | { .type = TOKEN_IDENTIFIER, .value = "a", .line = 1 }, 62 | { .type = TOKEN_PLUS, .value = "+", .line = 1 }, 63 | { .type = TOKEN_IDENTIFIER, .value = "b", .line = 1 }, 64 | { .type = TOKEN_ASTERISK, .value = "*", .line = 1 }, 65 | { .type = TOKEN_NUMBER, .value = "5", .line = 1 }, 66 | { .type = TOKEN_EOF, .value = "", .line = 1 }, 67 | }; 68 | 69 | EXPECT_EQ(result, expected); 70 | } 71 | 72 | // 5️⃣ Assignment and comparison 73 | TEST(LexerTest, LexesAssignmentAndComparison) 74 | { 75 | std::string to_lex = "x = 10; y += 5; if (x >= y) x = y;"; 76 | Lexer lexer(to_lex); 77 | 78 | std::vector result = lexer.Scan(); 79 | std::vector expected = { 80 | { .type = TOKEN_IDENTIFIER, .value = "x", .line = 1 }, 81 | { .type = TOKEN_ASSIGN, .value = "=", .line = 1 }, 82 | { .type = TOKEN_NUMBER, .value = "10", .line = 1 }, 83 | { .type = TOKEN_SEMICOLON, .value = ";", .line = 1 }, 84 | { .type = TOKEN_IDENTIFIER, .value = "y", .line = 1 }, 85 | { .type = TOKEN_PLUS_EQ, .value = "+=", .line = 1 }, 86 | { .type = TOKEN_NUMBER, .value = "5", .line = 1 }, 87 | { .type = TOKEN_SEMICOLON, .value = ";", .line = 1 }, 88 | { .type = TOKEN_IDENTIFIER, .value = "if", .line = 1 }, 89 | { .type = TOKEN_LPAREN, .value = "(", .line = 1 }, 90 | { .type = TOKEN_IDENTIFIER, .value = "x", .line = 1 }, 91 | { .type = TOKEN_GTE, .value = ">=", .line = 1 }, 92 | { .type = TOKEN_IDENTIFIER, .value = "y", .line = 1 }, 93 | { .type = TOKEN_RPAREN, .value = ")", .line = 1 }, 94 | { .type = TOKEN_IDENTIFIER, .value = "x", .line = 1 }, 95 | { .type = TOKEN_ASSIGN, .value = "=", .line = 1 }, 96 | { .type = TOKEN_IDENTIFIER, .value = "y", .line = 1 }, 97 | { .type = TOKEN_SEMICOLON, .value = ";", .line = 1 }, 98 | { .type = TOKEN_EOF, .value = "", .line = 1 }, 99 | }; 100 | 101 | EXPECT_EQ(result, expected); 102 | } 103 | 104 | // 6️⃣ Parentheses and punctuation 105 | TEST(LexerTest, LexesFunctionCall) 106 | { 107 | std::string to_lex = "print(a, b)"; 108 | Lexer lexer(to_lex); 109 | 110 | std::vector result = lexer.Scan(); 111 | std::vector expected = { 112 | { .type = TOKEN_IDENTIFIER, .value = "print", .line = 1 }, 113 | { .type = TOKEN_LPAREN, .value = "(", .line = 1 }, 114 | { .type = TOKEN_IDENTIFIER, .value = "a", .line = 1 }, 115 | { .type = TOKEN_COMMA, .value = ",", .line = 1 }, 116 | { .type = TOKEN_IDENTIFIER, .value = "b", .line = 1 }, 117 | { .type = TOKEN_RPAREN, .value = ")", .line = 1 }, 118 | { .type = TOKEN_EOF, .value = "", .line = 1 }, 119 | }; 120 | 121 | EXPECT_EQ(result, expected); 122 | } 123 | 124 | // 7️⃣ Handles comments 125 | TEST(LexerTest, IgnoresSingleLineComment) 126 | { 127 | std::string to_lex = "x = 1; // comment\n y = 2;"; 128 | Lexer lexer(to_lex); 129 | 130 | std::vector result = lexer.Scan(); 131 | std::vector expected = { 132 | { .type = TOKEN_IDENTIFIER, .value = "x", .line = 1 }, 133 | { .type = TOKEN_ASSIGN, .value = "=", .line = 1 }, 134 | { .type = TOKEN_NUMBER, .value = "1", .line = 1 }, 135 | { .type = TOKEN_SEMICOLON, .value = ";", .line = 1 }, 136 | { .type = TOKEN_IDENTIFIER, .value = "y", .line = 2 }, 137 | { .type = TOKEN_ASSIGN, .value = "=", .line = 2 }, 138 | { .type = TOKEN_NUMBER, .value = "2", .line = 2 }, 139 | { .type = TOKEN_SEMICOLON, .value = ";", .line = 2 }, 140 | { .type = TOKEN_EOF, .value = "", .line = 2 }, 141 | }; 142 | 143 | EXPECT_EQ(result, expected); 144 | } 145 | 146 | // 8️⃣ Handles string with escaped quotes 147 | TEST(LexerTest, HandlesEscapedQuotes) 148 | { 149 | std::string to_lex = R"("He said, \"hi\"")"; 150 | Lexer lexer(to_lex); 151 | 152 | std::vector result = lexer.Scan(); 153 | std::vector expected = { 154 | { .type = TOKEN_STRING, .value = "He said, \"hi\"", .line = 1 }, 155 | { .type = TOKEN_EOF, .value = "", .line = 1 }, 156 | }; 157 | 158 | EXPECT_EQ(result, expected); 159 | } 160 | 161 | // 9️⃣ Multi-line input tracking 162 | TEST(LexerTest, TracksMultipleLines) 163 | { 164 | std::string to_lex = "a = 1;\n\nb = 2;"; 165 | Lexer lexer(to_lex); 166 | 167 | std::vector result = lexer.Scan(); 168 | std::vector expected = { 169 | { .type = TOKEN_IDENTIFIER, .value = "a", .line = 1 }, 170 | { .type = TOKEN_ASSIGN, .value = "=", .line = 1 }, 171 | { .type = TOKEN_NUMBER, .value = "1", .line = 1 }, 172 | { .type = TOKEN_SEMICOLON, .value = ";", .line = 1 }, 173 | { .type = TOKEN_IDENTIFIER, .value = "b", .line = 3 }, 174 | { .type = TOKEN_ASSIGN, .value = "=", .line = 3 }, 175 | { .type = TOKEN_NUMBER, .value = "2", .line = 3 }, 176 | { .type = TOKEN_SEMICOLON, .value = ";", .line = 3 }, 177 | { .type = TOKEN_EOF, .value = "", .line = 3 }, 178 | }; 179 | 180 | EXPECT_EQ(result, expected); 181 | } 182 | 183 | // 🔟 Complex expression with operators 184 | TEST(LexerTest, LexesComplexExpression) 185 | { 186 | std::string to_lex = "count = (a + b) * 2 - 5 / c;"; 187 | Lexer lexer(to_lex); 188 | 189 | std::vector result = lexer.Scan(); 190 | std::vector expected = { 191 | { .type = TOKEN_IDENTIFIER, .value = "count", .line = 1 }, 192 | { .type = TOKEN_ASSIGN, .value = "=", .line = 1 }, 193 | { .type = TOKEN_LPAREN, .value = "(", .line = 1 }, 194 | { .type = TOKEN_IDENTIFIER, .value = "a", .line = 1 }, 195 | { .type = TOKEN_PLUS, .value = "+", .line = 1 }, 196 | { .type = TOKEN_IDENTIFIER, .value = "b", .line = 1 }, 197 | { .type = TOKEN_RPAREN, .value = ")", .line = 1 }, 198 | { .type = TOKEN_ASTERISK, .value = "*", .line = 1 }, 199 | { .type = TOKEN_NUMBER, .value = "2", .line = 1 }, 200 | { .type = TOKEN_MINUS, .value = "-", .line = 1 }, 201 | { .type = TOKEN_NUMBER, .value = "5", .line = 1 }, 202 | { .type = TOKEN_SLASH, .value = "/", .line = 1 }, 203 | { .type = TOKEN_IDENTIFIER, .value = "c", .line = 1 }, 204 | { .type = TOKEN_SEMICOLON, .value = ";", .line = 1 }, 205 | { .type = TOKEN_EOF, .value = "", .line = 1 }, 206 | }; 207 | 208 | EXPECT_EQ(result, expected); 209 | } 210 | -------------------------------------------------------------------------------- /src/Core/Lexer/Lexer.cpp: -------------------------------------------------------------------------------- 1 | #include "Core/Lexer/Lexer.hpp" 2 | 3 | #include "Common/Token.hpp" 4 | 5 | #include 6 | 7 | std::vector Lexer::Scan() 8 | { 9 | this->SkipWhitespace(); 10 | 11 | while (m_char != '\0') 12 | { 13 | this->Consume(); 14 | } 15 | 16 | this->AddToken(TOKEN_EOF, "", m_line); 17 | 18 | return m_tokens; 19 | } 20 | 21 | void Lexer::Consume() 22 | { 23 | if (std::isalpha(m_char) || m_char == '_') 24 | { 25 | return this->ReadIdentifier(); 26 | } 27 | 28 | if (std::isdigit(m_char)) 29 | { 30 | return this->ReadNumber(); 31 | } 32 | 33 | switch (m_char) 34 | { 35 | case ' ': 36 | case '\n': 37 | case '\t': 38 | case '\r': 39 | return this->SkipWhitespace(); 40 | case '*': 41 | if (this->Peek() == '=') 42 | { 43 | this->Next(); 44 | this->AddToken(TOKEN_MULT_EQ, "*=", m_line); 45 | } 46 | else this->AddToken(TOKEN_ASTERISK, "*", m_line); 47 | break; 48 | case '/': 49 | if (this->Peek() == '=') 50 | { 51 | this->Next(); 52 | this->AddToken(TOKEN_DIVIS_EQ, "/=", m_line); 53 | } 54 | else if (this->Peek() == '/') return this->SkipComment(false); 55 | else if (this->Peek() == '*') return this->SkipComment(true); 56 | else this->AddToken(TOKEN_SLASH, "/", m_line); 57 | break; 58 | case '+': 59 | if (this->Peek() == '=') 60 | { 61 | this->Next(); 62 | this->AddToken(TOKEN_PLUS_EQ, "+=", m_line); 63 | } 64 | else this->AddToken(TOKEN_PLUS, "+", m_line); 65 | break; 66 | case '-': 67 | if (this->Peek() == '>') 68 | { 69 | this->Next(); 70 | this->AddToken(TOKEN_ARROW, "->", m_line); 71 | } 72 | else if (this->Peek() == '=') 73 | { 74 | this->Next(); 75 | this->AddToken(TOKEN_MINUS_EQ, "-=", m_line); 76 | } 77 | else this->AddToken(TOKEN_MINUS, "-", m_line); 78 | break; 79 | case '%': 80 | if (this->Peek() == '=') 81 | { 82 | this->Next(); 83 | this->AddToken(TOKEN_PERCE_EQ, "%=", m_line); 84 | } 85 | else this->AddToken(TOKEN_PERCENT, "%", m_line); 86 | break; 87 | case '=': 88 | if (this->Peek() == '=') 89 | { 90 | this->Next(); 91 | this->AddToken(TOKEN_EQ, "==", m_line); 92 | } 93 | else this->AddToken(TOKEN_ASSIGN, "=", m_line); 94 | break; 95 | case '>': 96 | if (this->Peek() == '=') 97 | { 98 | this->Next(); 99 | this->AddToken(TOKEN_GTE, ">=", m_line); 100 | } 101 | else this->AddToken(TOKEN_GT, ">", m_line); 102 | break; 103 | case '<': 104 | if (this->Peek() == '=') 105 | { 106 | this->Next(); 107 | this->AddToken(TOKEN_LTE, "<=", m_line); 108 | } 109 | else this->AddToken(TOKEN_LT, "<", m_line); 110 | break; 111 | case '&': 112 | if (this->Peek() == '&') 113 | { 114 | this->Next(); 115 | this->AddToken(TOKEN_AND_AND, "&&", m_line); 116 | } 117 | else this->AddToken(TOKEN_AND, "&", m_line); 118 | break; 119 | case '|': 120 | if (this->Peek() == '|') 121 | { 122 | this->Next(); 123 | this->AddToken(TOKEN_OR_OR, "||", m_line); 124 | } 125 | else this->AddToken(TOKEN_OR, "|", m_line); 126 | break; 127 | case '^': 128 | this->AddToken(TOKEN_XOR, "^", m_line); break; 129 | case '!': 130 | if (this->Peek() == '=') 131 | { 132 | this->Next(); 133 | this->AddToken(TOKEN_NOT_EQ, "!=", m_line); 134 | } 135 | else this->AddToken(TOKEN_NOT, "!", m_line); 136 | break; 137 | case ';': 138 | this->AddToken(TOKEN_SEMICOLON, ";", m_line); break; 139 | case ':': 140 | this->AddToken(TOKEN_COLON, ":", m_line); break; 141 | case '.': 142 | this->AddToken(TOKEN_DOT, ".", m_line); break; 143 | case ',': 144 | this->AddToken(TOKEN_COMMA, ",", m_line); break; 145 | case '(': 146 | this->AddToken(TOKEN_LPAREN, "(", m_line); break; 147 | case ')': 148 | this->AddToken(TOKEN_RPAREN, ")", m_line); break; 149 | case '{': 150 | this->AddToken(TOKEN_LBRACE, "{", m_line); break; 151 | case '}': 152 | this->AddToken(TOKEN_RBRACE, "}", m_line); break; 153 | case '[': 154 | this->AddToken(TOKEN_LBRACKET, "[", m_line); break; 155 | case ']': 156 | this->AddToken(TOKEN_RBRACKET, "]", m_line); break; 157 | case '\'': 158 | this->Next(); 159 | if (this->Peek() != '\'') 160 | { 161 | // raise error 162 | } 163 | this->AddToken(TOKEN_CHAR, std::to_string(m_char), m_line); 164 | this->Next(); break; 165 | case '"': 166 | this->ReadString(); break; 167 | default: 168 | this->AddToken(TOKEN_ILLEGAL, std::to_string(m_char), m_line); 169 | } 170 | 171 | this->Next(); 172 | } 173 | 174 | void Lexer::Next() 175 | { 176 | size_t read_pos = m_pos + 1; 177 | 178 | if (read_pos >= m_content.length()) 179 | m_char = '\0'; 180 | else 181 | m_char = m_content[read_pos]; 182 | 183 | ++m_pos; 184 | } 185 | 186 | char Lexer::Peek() 187 | { 188 | size_t read_pos = m_pos + 1; 189 | 190 | if (read_pos >= m_content.length()) 191 | return '\0'; 192 | 193 | return m_content[read_pos]; 194 | } 195 | 196 | void Lexer::AddToken(std::uint8_t type, const std::string& value, size_t line) 197 | { 198 | m_tokens.push_back({ 199 | .type=type, 200 | .value=value, 201 | .line=line 202 | }); 203 | } 204 | 205 | // Skip repeats 206 | 207 | void Lexer::SkipComment(bool multiline) 208 | { 209 | if (multiline) 210 | { 211 | while ((m_char != '*' || this->Peek() != '/') && m_char != '\0') 212 | { 213 | this->Next(); 214 | } 215 | 216 | this->Next(); 217 | } 218 | else 219 | { 220 | while (m_char != '\n' && m_char != '\0') 221 | { 222 | this->Next(); 223 | } 224 | } 225 | } 226 | 227 | void Lexer::SkipWhitespace() 228 | { 229 | while (m_char == ' ' || m_char == '\t' || m_char == '\n' || m_char == '\r') 230 | { 231 | if (m_char == '\n') ++m_line; 232 | this->Next(); 233 | } 234 | } 235 | 236 | // --- Reading specific token types --- 237 | 238 | void Lexer::ReadIdentifier() 239 | { 240 | size_t start = m_pos; 241 | this->Next(); 242 | 243 | while (std::isalnum(m_char) || m_char == '_') 244 | { 245 | this->Next(); 246 | } 247 | 248 | std::string identifier = m_content.substr(start, m_pos - start); 249 | // todo: check if identifier is a keyword 250 | 251 | this->AddToken(TOKEN_IDENTIFIER, identifier, m_line); 252 | } 253 | 254 | void Lexer::ReadString() 255 | { 256 | std::string lex = ""; 257 | bool escaped = false; 258 | 259 | this->Next(); 260 | 261 | while ((m_char != '"' || escaped) && m_char != '\0') 262 | { 263 | if (escaped) 264 | { 265 | lex += this->EscapeCharacter(); 266 | escaped = false; 267 | } 268 | else if (m_char == '\\') 269 | { 270 | escaped = true; 271 | } 272 | else 273 | { 274 | if (m_char == '\n') ++m_line; 275 | lex += m_char; 276 | } 277 | 278 | this->Next(); 279 | } 280 | 281 | if (m_char != '"') 282 | return this->AddToken(TOKEN_UNTERMINATED, "", m_line); 283 | 284 | this->AddToken(TOKEN_STRING, lex, m_line); 285 | 286 | this->Next(); 287 | } 288 | 289 | char Lexer::EscapeCharacter() 290 | { 291 | switch (m_char) 292 | { 293 | case 'n': return '\n'; 294 | case 't': return '\t'; 295 | case '"': return '"'; 296 | case '\\': return '\\';break; 297 | default: return m_char; 298 | } 299 | } 300 | 301 | void Lexer::ReadNumber() 302 | { 303 | size_t start = m_pos; 304 | char starting_char = m_char; 305 | bool has_decimal = false; 306 | 307 | this->Next(); 308 | 309 | while (std::isdigit(m_char) || (m_char == '.' && !has_decimal)) 310 | { 311 | if (m_char == '.') 312 | has_decimal = true; 313 | 314 | this->Next(); 315 | } 316 | 317 | std::string number = m_content.substr(start, m_pos - start); 318 | this->AddToken(TOKEN_NUMBER, number, m_line); 319 | } 320 | --------------------------------------------------------------------------------