├── .gitignore ├── Dockerfile ├── LICENSE ├── README.md ├── developer-cert-of-origin.txt ├── launch.sh ├── pom.xml ├── resources └── logback.xml ├── src └── org │ └── antlr │ └── v4 │ └── server │ ├── ANTLRHttpServer.java │ ├── CollectGrammarErrorsAndWarnings.java │ ├── CollectLexOrParseSyntaxErrors.java │ ├── CollectLexerGrammarErrorsAndWarnings.java │ ├── CollectParserGrammarErrorsAndWarnings.java │ ├── GrammarProcessor.java │ ├── IgnoreTokenVocabGrammar.java │ ├── JsonSerializer.java │ ├── persistent │ ├── PersistenceLayer.java │ └── cloudstorage │ │ └── CloudStoragePersistenceLayer.java │ └── unique │ ├── DummyUniqueKeyGenerator.java │ └── UniqueKeyGenerator.java ├── static ├── css │ └── style.css ├── images │ ├── antlrlogo.png │ └── helpicon.png ├── index.html └── js │ ├── antlr-client.js │ └── grammars-v4.js └── test ├── axios-post.html ├── codejar-example.html ├── t.html ├── t2.html ├── test-flex.html ├── test-layout.html └── test-treeview.html /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled class file 2 | *.class 3 | 4 | # Log file 5 | *.log 6 | 7 | # BlueJ files 8 | *.ctxt 9 | 10 | # Mobile Tools for Java (J2ME) 11 | .mtj.tmp/ 12 | 13 | # Package Files # 14 | *.jar 15 | *.war 16 | *.nar 17 | *.ear 18 | *.zip 19 | *.tar.gz 20 | *.rar 21 | 22 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml 23 | hs_err_pid* 24 | target/ 25 | nohup.out 26 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM ubuntu:latest 4 | 5 | ARG LAB_VERSION=0.4-SNAPSHOT 6 | ENV LAB_VERSION=${LAB_VERSION} 7 | 8 | USER root:root 9 | 10 | WORKDIR /app 11 | 12 | RUN apt-get update && \ 13 | apt-get install --yes software-properties-common 14 | 15 | RUN apt update 16 | RUN apt install -y ghostscript pdf2svg texlive-extra-utils 17 | 18 | RUN apt install -y openjdk-11-jre 19 | 20 | COPY src /app/src 21 | COPY resources /app/resources 22 | COPY static /app/static 23 | COPY pom.xml /app/pom.xml 24 | 25 | # Assumes mvn install was run prior to build Dockerfile 26 | ADD target/antlr4-lab-$LAB_VERSION-complete.jar antlr4-lab-$LAB_VERSION-complete.jar 27 | ENTRYPOINT java -jar /app/antlr4-lab-$LAB_VERSION-complete.jar 28 | EXPOSE 80 29 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Antlr Project 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # antlr4-lab 2 | A client/server for trying out and learning about ANTLR 3 | 4 | ## Building and launching server 5 | 6 | Ubuntu with lab.antlr.org static IP 7 | 8 | ```bash 9 | cd ~ 10 | sudo apt-get update 11 | sudo apt install -y openjdk-11-jre 12 | sudo apt install -y maven 13 | sudo apt install -y git 14 | sudo apt install -y ghostscript # gets ps2pdf 15 | sudo apt install -y pdf2svg 16 | git clone https://github.com/antlr/antlr4-lab.git 17 | cd antlr4-lab 18 | mvn install 19 | 20 | sudo mkdir /var/log/antlrlab 21 | sudo chmod 777 /var/log/antlrlab 22 | ssh-keygen -t ed25519 -C 'parrt@...' # add key to github 23 | git config --global user.email "parrt@..." 24 | git config --global user.name "Terence Parr" 25 | ``` 26 | 27 | Launch! 28 | 29 | ```bash 30 | cd ~/antlr4-lab 31 | sudo nohup java -cp ~/.m2/repository/org/antlr/antlr4-lab/0.4-SNAPSHOT/antlr4-lab-0.4-SNAPSHOT-complete.jar org.antlr.v4.server.ANTLRHttpServer 32 | ``` 33 | 34 | Or to restart if it fails, do: 35 | 36 | ```bash 37 | while true 38 | do 39 | sudo java -cp ~/.m2/repository/org/antlr/antlr4-lab/0.4-SNAPSHOT/antlr4-lab-0.4-SNAPSHOT-complete.jar org.antlr.v4.server.ANTLRHttpServer 40 | sudo cp /var/log/antlrlab/antlrlab.log /var/log/antlrlab/antlrlab-died.log 41 | sleep 1 42 | done 43 | ``` 44 | 45 | which I've put into `~/antlr4-lab/launch.sh`: 46 | 47 | ```bash 48 | nohup launch.sh & 49 | ``` 50 | 51 | If you are running the server locally on your box, visit [http://localhost/index.html](http://localhost/index.html) to run the client. 52 | 53 | ### Docker 54 | 55 | I created a [Dockerfile](Dockerfile), although I'm not sure how useful it will be to people. This might be useful for deploying in the cloud later. 56 | 57 | Here's how to build the docker file: 58 | 59 | ```bash 60 | cd antlr4-lab 61 | mvn clean package # has to be built first as docker copies in the jar 62 | docker build --tag antlr4-lab-docker . 63 | ``` 64 | 65 | and here's how to use the docker to launch: 66 | 67 | ```bash 68 | docker run -p80:80 --rm antlr4-lab-docker 69 | ``` 70 | 71 | @kaby76 reports the following: Seems to work fine. But I had to do some things to get it to work on Windows/WSL2. 72 | 73 | In Windows: Install Docker Desktop 74 | 75 | In WSL2/Ubuntu: 76 | 77 | ```bash 78 | sudo apt install docker.io 79 | git clone https://github.com/antlr/antlr4-lab.git 80 | cd antlr4-lab 81 | git checkout docker 82 | mvn clean; mvn install 83 | docker build . 84 | docker image ls # get image name. 85 | docker run -d -p 127.0.0.1:80:80 -e BIND_ADDR=0.0.0.0:80 image-name-from-above 86 | ``` 87 | 88 | In Windows again, run Firefox and connect to 127.0.0.1. 89 | 90 | It looks like docker binds port 80 to 0.0.0.0 by default (after installing `net-tools`, and doing `netstat -a`). 91 | -------------------------------------------------------------------------------- /developer-cert-of-origin.txt: -------------------------------------------------------------------------------- 1 | antlr4-lab uses the Linux Foundation's Developer 2 | Certificate of Origin, DCO, version 1.1. See either 3 | https://developercertificate.org/ or the text below. 4 | 5 | Each commit requires a "signature", which is simple as 6 | using `-s` (not `-S`) to the git commit command: 7 | 8 | git commit -s -m 'This is my commit message' 9 | 10 | Github's pull request process enforces the sig and gives 11 | instructions on how to fix any commits that lack the sig. 12 | See https://github.com/apps/dco for more info. 13 | 14 | ----- https://developercertificate.org/ ------ 15 | 16 | Developer Certificate of Origin 17 | Version 1.1 18 | 19 | Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 20 | 21 | Everyone is permitted to copy and distribute verbatim copies of this 22 | license document, but changing it is not allowed. 23 | 24 | 25 | Developer's Certificate of Origin 1.1 26 | 27 | By making a contribution to this project, I certify that: 28 | 29 | (a) The contribution was created in whole or in part by me and I 30 | have the right to submit it under the open source license 31 | indicated in the file; or 32 | 33 | (b) The contribution is based upon previous work that, to the best 34 | of my knowledge, is covered under an appropriate open source 35 | license and I have the right under that license to submit that 36 | work with modifications, whether created in whole or in part 37 | by me, under the same open source license (unless I am 38 | permitted to submit under a different license), as indicated 39 | in the file; or 40 | 41 | (c) The contribution was provided directly to me by some other 42 | person who certified (a), (b) or (c) and I have not modified 43 | it. 44 | 45 | (d) I understand and agree that this project and the contribution 46 | are public and that a record of the contribution (including all 47 | personal information I submit with it, including my sign-off) is 48 | maintained indefinitely and may be redistributed consistent with 49 | this project or the open source license(s) involved. 50 | -------------------------------------------------------------------------------- /launch.sh: -------------------------------------------------------------------------------- 1 | while true 2 | do 3 | sudo java -cp ~/.m2/repository/org/antlr/antlr4-lab/0.4-SNAPSHOT/antlr4-lab-0.4-SNAPSHOT-complete.jar org.antlr.v4.server.ANTLRHttpServer 4 | sudo cp /var/log/antlrlab/antlrlab.log /var/log/antlrlab/antlrlab-died.log 5 | sleep 1 6 | done 7 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 4.0.0 3 | 4 | org.sonatype.oss 5 | oss-parent 6 | 9 7 | 8 | 9 | org.antlr 10 | antlr4-lab 11 | jar 12 | ANTLR 4 Server 13 | http://www.antlr.org 14 | The ANTLR 4 server. 15 | 16 | 0.4-SNAPSHOT 17 | 18 | 19 | 20 | MIT 21 | repo 22 | 23 | 24 | 25 | 26 | 3.8 27 | 28 | 29 | 30 | 11 31 | 11 32 | 33 | 34 | 35 | 36 | com.google.cloud 37 | google-cloud-storage 38 | 2.13.1 39 | 40 | 41 | org.antlr 42 | antlr4-runtime 43 | 4.11.1 44 | 45 | 46 | org.antlr 47 | antlr4 48 | 4.13.2 49 | 50 | 51 | org.antlr 52 | antlr-runtime 53 | 3.5.3 54 | 55 | 56 | org.antlr 57 | ST4 58 | 4.3.4 59 | 60 | 61 | org.eclipse.jetty 62 | jetty-server 63 | 11.0.11 64 | 65 | 66 | org.eclipse.jetty 67 | jetty-webapp 68 | 11.0.11 69 | 70 | 71 | org.slf4j 72 | slf4j-api 73 | 1.7.36 74 | 75 | 76 | org.slf4j 77 | slf4j-jdk14 78 | 1.8.0-beta4 79 | 80 | 81 | ch.qos.logback 82 | logback-core 83 | 1.2.11 84 | 85 | 86 | ch.qos.logback 87 | logback-classic 88 | 1.2.6 89 | 90 | 91 | us.parr 92 | parrtlib 93 | 0.5-SNAPSHOT 94 | 95 | 96 | com.google.code.gson 97 | gson 98 | 2.10 99 | 100 | 101 | 102 | src 103 | 104 | 105 | resources 106 | 107 | 108 | static 109 | static 110 | 111 | 112 | 113 | 114 | test 115 | 116 | 117 | 118 | 119 | org.apache.maven.plugins 120 | maven-source-plugin 121 | 3.2.1 122 | 123 | 124 | 125 | jar 126 | 127 | 128 | 129 | 130 | 131 | org.apache.maven.plugins 132 | maven-javadoc-plugin 133 | 3.3.1 134 | 135 | 11 136 | false 137 | 138 | 139 | 140 | deploy 141 | 142 | javadoc 143 | 144 | 145 | 146 | 147 | 148 | org.apache.maven.plugins 149 | maven-shade-plugin 150 | 3.2.4 151 | 152 | 153 | package 154 | 155 | false 156 | false 157 | true 158 | complete 159 | 160 | 161 | com.ibm.icu:* 162 | 163 | 164 | 165 | 166 | org.antlr.v4.server.ANTLRHttpServer 167 | 168 | org.antlr.v4.server.ANTLRHttpServer 169 | 170 | 171 | 172 | 173 | 174 | shade 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | -------------------------------------------------------------------------------- /resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | ${LOG_DIR}/antlrlab.log 5 | true 6 | 7 | 8 | ${LOG_FILE}.%d{yyyy-MM-dd}.gz 9 | 10 | 30 11 | 3GB 12 | 13 | 14 | %date{yyyy-MMM-dd; HH:mm:ss.SSS} [%thread]:%n%msg%n 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /src/org/antlr/v4/server/ANTLRHttpServer.java: -------------------------------------------------------------------------------- 1 | package org.antlr.v4.server; 2 | 3 | import com.google.gson.Gson; 4 | import com.google.gson.JsonElement; 5 | import com.google.gson.JsonObject; 6 | import com.google.gson.JsonParser; 7 | import jakarta.servlet.http.HttpServletRequest; 8 | import jakarta.servlet.http.HttpServletResponse; 9 | import org.antlr.v4.server.persistent.PersistenceLayer; 10 | import org.antlr.v4.server.persistent.cloudstorage.CloudStoragePersistenceLayer; 11 | import org.antlr.v4.server.unique.DummyUniqueKeyGenerator; 12 | import org.antlr.v4.server.unique.UniqueKeyGenerator; 13 | import org.antlr.v4.runtime.misc.ParseCancellationException; 14 | import org.eclipse.jetty.server.Server; 15 | import org.eclipse.jetty.server.ServerConnector; 16 | import org.eclipse.jetty.servlet.DefaultServlet; 17 | import org.eclipse.jetty.servlet.ServletContextHandler; 18 | import org.eclipse.jetty.servlet.ServletHolder; 19 | import org.eclipse.jetty.util.thread.QueuedThreadPool; 20 | import org.slf4j.LoggerFactory; 21 | 22 | 23 | import java.io.*; 24 | import java.nio.charset.StandardCharsets; 25 | import java.nio.file.Files; 26 | import java.nio.file.Path; 27 | import java.text.NumberFormat; 28 | import java.util.Optional; 29 | 30 | import static org.antlr.v4.server.GrammarProcessor.interp; 31 | 32 | public class ANTLRHttpServer { 33 | public static final String IMAGES_DIR = "/tmp/antlr-images"; 34 | 35 | public static class ParseServlet extends DefaultServlet { 36 | static final ch.qos.logback.classic.Logger LOGGER = (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(ANTLRHttpServer.class); 37 | 38 | @Override 39 | public void doPost(HttpServletRequest request, HttpServletResponse response) throws IOException { 40 | LOGGER.info("INITIATE REQUEST IP: "+request.getRemoteAddr()+ 41 | ", Content-Length: "+request.getContentLength()); 42 | logMemoryInfo("BEFORE PROCESSING FROM IP: "+request.getRemoteAddr()); 43 | JsonObject jsonResponse = new JsonObject(); 44 | try { 45 | response.setContentType("text/plain;charset=utf-8"); 46 | response.setContentType("text/html;"); 47 | response.addHeader("Access-Control-Allow-Origin", "*"); 48 | 49 | JsonObject jsonObj = JsonParser.parseReader(request.getReader()).getAsJsonObject(); 50 | 51 | String grammar = jsonObj.get("grammar").getAsString(); 52 | String lexGrammar = jsonObj.get("lexgrammar").getAsString(); // can be null 53 | String input = jsonObj.get("input").getAsString(); 54 | String startRule = jsonObj.get("start").getAsString(); 55 | 56 | StringBuilder logMsg = new StringBuilder(); 57 | logMsg.append("GRAMMAR:\n"); 58 | logMsg.append(grammar); 59 | logMsg.append("\nLEX GRAMMAR:\n"); 60 | logMsg.append(lexGrammar); 61 | logMsg.append("\nINPUT ("+input.length()+" char):\n\"\"\""); 62 | logMsg.append(input); 63 | logMsg.append("\"\"\"\n"); 64 | logMsg.append("STARTRULE: "); 65 | logMsg.append(startRule); 66 | logMsg.append('\n'); 67 | LOGGER.info(logMsg.toString()); 68 | 69 | if (grammar.isBlank() && lexGrammar.isBlank()) { 70 | jsonResponse.addProperty("arg_error", "missing either combined grammar or lexer and " + "parser both"); 71 | } 72 | else if (grammar.isBlank()) { 73 | jsonResponse.addProperty("arg_error", "missing parser grammar"); 74 | } 75 | else if (startRule.isBlank()) { 76 | jsonResponse.addProperty("arg_error", "missing start rule"); 77 | } 78 | else if (input.isEmpty()) { 79 | jsonResponse.addProperty("arg_error", "missing input"); 80 | } 81 | else { 82 | try { 83 | jsonResponse = interp(grammar, lexGrammar, input, startRule); 84 | } 85 | catch (ParseCancellationException pce) { 86 | StringWriter sw = new StringWriter(); 87 | PrintWriter pw = new PrintWriter(sw); 88 | pce.printStackTrace(pw); 89 | jsonResponse.addProperty("exception", pce.getMessage()); 90 | jsonResponse.addProperty("exception_trace", sw.toString()); 91 | LOGGER.warn(pce.toString()); 92 | } 93 | } 94 | } 95 | catch (Exception e) { 96 | StringWriter sw = new StringWriter(); 97 | PrintWriter pw = new PrintWriter(sw); 98 | e.printStackTrace(pw); 99 | jsonResponse.addProperty("exception", e.getMessage()); 100 | jsonResponse.addProperty("exception_trace", sw.toString()); 101 | LOGGER.error("PARSER FAILED", e); 102 | } 103 | 104 | response.setStatus(HttpServletResponse.SC_OK); 105 | PrintWriter w = response.getWriter(); 106 | w.write(new Gson().toJson(jsonResponse)); 107 | w.flush(); 108 | 109 | // Don't save SVG tree in log; usually too big 110 | JsonElement result = jsonResponse.get("result"); 111 | if ( result!=null && ((JsonObject) result).has("svgtree") ) { 112 | ((JsonObject) result).remove("svgtree"); 113 | } 114 | logMemoryInfo("AFTER PARSE FROM IP: "+request.getRemoteAddr()); 115 | LOGGER.info("RESULT:\n" + jsonResponse); 116 | } 117 | } 118 | 119 | private static void logMemoryInfo(String prefix) { 120 | Runtime.getRuntime().gc(); 121 | var fm = Runtime.getRuntime().freeMemory(); 122 | var tm = Runtime.getRuntime().totalMemory(); 123 | NumberFormat.getInstance().format(fm); 124 | ParseServlet.LOGGER.info(prefix + " memory: free=" + NumberFormat.getInstance().format(fm) + " bytes" + 125 | ", total=" + NumberFormat.getInstance().format(tm) + " bytes"); 126 | } 127 | 128 | public static class ShareServlet extends DefaultServlet { 129 | static final ch.qos.logback.classic.Logger LOGGER = (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(ANTLRHttpServer.class); 130 | 131 | @Override 132 | public void doPost(HttpServletRequest request, HttpServletResponse response) throws IOException { 133 | final JsonObject jsonResponse = new JsonObject(); 134 | try { 135 | response.setContentType("text/plain;charset=utf-8"); 136 | response.setContentType("text/html;"); 137 | response.addHeader("Access-Control-Allow-Origin", "*"); 138 | 139 | JsonObject jsonObj = JsonParser.parseReader(request.getReader()).getAsJsonObject(); 140 | PersistenceLayer persistenceLayer = new CloudStoragePersistenceLayer(); 141 | UniqueKeyGenerator keyGen = new DummyUniqueKeyGenerator(); 142 | Optional uniqueKey = keyGen.generateKey(); 143 | persistenceLayer.persist(new Gson().toJson(jsonResponse).getBytes(StandardCharsets.UTF_8), uniqueKey.orElseThrow()); 144 | 145 | jsonResponse.addProperty("resource_id", uniqueKey.orElseThrow()); 146 | } 147 | catch (Exception e) { 148 | StringWriter sw = new StringWriter(); 149 | PrintWriter pw = new PrintWriter(sw); 150 | e.printStackTrace(pw); 151 | jsonResponse.addProperty("exception_trace", sw.toString()); 152 | jsonResponse.addProperty("exception", e.getMessage()); 153 | 154 | } 155 | LOGGER.info("RESULT:\n" + jsonResponse); 156 | response.setStatus(HttpServletResponse.SC_OK); 157 | PrintWriter w = response.getWriter(); 158 | w.write(new Gson().toJson(jsonResponse)); 159 | w.flush(); 160 | } 161 | } 162 | 163 | public static void main(String[] args) throws Exception { 164 | new File(IMAGES_DIR).mkdirs(); 165 | 166 | Files.createDirectories(Path.of("/var/log/antlrlab")); 167 | QueuedThreadPool threadPool = new QueuedThreadPool(); 168 | threadPool.setMaxThreads(5); 169 | threadPool.setName("server"); 170 | 171 | Server server = new Server(threadPool); 172 | 173 | ServerConnector http = new ServerConnector(server); 174 | http.setPort(80); 175 | 176 | server.addConnector(http); 177 | 178 | ServletContextHandler context = new ServletContextHandler(ServletContextHandler.SESSIONS); 179 | context.setContextPath("/"); 180 | context.addServlet(new ServletHolder(new ParseServlet()), "/parse/*"); 181 | context.addServlet(new ServletHolder(new ShareServlet()), "/share/*"); 182 | 183 | ServletHolder holderHome = new ServletHolder("static-home", DefaultServlet.class); 184 | holderHome.setInitParameter("resourceBase", "static"); 185 | holderHome.setInitParameter("dirAllowed", "true"); 186 | holderHome.setInitParameter("pathInfoOnly", "true"); 187 | context.addServlet(holderHome, "/*"); 188 | 189 | server.setHandler(context); 190 | 191 | server.start(); 192 | } 193 | } 194 | -------------------------------------------------------------------------------- /src/org/antlr/v4/server/CollectGrammarErrorsAndWarnings.java: -------------------------------------------------------------------------------- 1 | package org.antlr.v4.server; 2 | 3 | import com.google.gson.JsonArray; 4 | import com.google.gson.JsonObject; 5 | import org.antlr.v4.tool.ANTLRMessage; 6 | import org.antlr.v4.tool.ANTLRToolListener; 7 | import org.antlr.v4.tool.ErrorManager; 8 | import org.stringtemplate.v4.ST; 9 | 10 | class CollectGrammarErrorsAndWarnings implements ANTLRToolListener { 11 | ErrorManager errMgr; 12 | 13 | String fileName; 14 | 15 | final JsonArray errors = new JsonArray(); 16 | final JsonArray warnings = new JsonArray(); 17 | 18 | public CollectGrammarErrorsAndWarnings(ErrorManager errMgr) { 19 | this.errMgr = errMgr; 20 | } 21 | 22 | @Override 23 | public void info(String msg) { 24 | } 25 | 26 | @Override 27 | public void error(ANTLRMessage msg) { 28 | msg.fileName = fileName; 29 | ST msgST = errMgr.getMessageTemplate(msg); 30 | String outputMsg = msgST.render(); 31 | // hack to remove "lexer:" fileName etc... 32 | outputMsg = outputMsg.replace(": "+fileName+":", ": "); 33 | if ( errMgr.formatWantsSingleLineMessage() ) { 34 | outputMsg = outputMsg.replace('\n', ' '); 35 | } 36 | 37 | // Strip "(126)" from "error(126): " 38 | outputMsg = outputMsg.replaceAll("error\\(.*?\\)", "error"); 39 | final JsonObject jsonOutput = new JsonObject(); 40 | jsonOutput.addProperty("type", msg.getErrorType().toString()); 41 | jsonOutput.addProperty("line", msg.line); 42 | jsonOutput.addProperty("pos", msg.charPosition); 43 | jsonOutput.addProperty("msg", outputMsg); 44 | errors.add(jsonOutput); 45 | } 46 | 47 | @Override 48 | public void warning(ANTLRMessage msg) { 49 | msg.fileName = fileName; 50 | ST msgST = errMgr.getMessageTemplate(msg); 51 | String outputMsg = msgST.render(); 52 | // hack to remove "lexer:" fileName etc... 53 | outputMsg = outputMsg.replace(": "+fileName+":", ": "); 54 | if ( errMgr.formatWantsSingleLineMessage() ) { 55 | outputMsg = outputMsg.replace('\n', ' '); 56 | } 57 | 58 | outputMsg = outputMsg.replaceAll("warning\\(.*?\\)", "warning"); 59 | final JsonObject jsonOutput = new JsonObject(); 60 | jsonOutput.addProperty("type", msg.getErrorType().toString()); 61 | jsonOutput.addProperty("line", msg.line); 62 | jsonOutput.addProperty("pos", msg.charPosition); 63 | jsonOutput.addProperty("msg", outputMsg); 64 | errors.add(jsonOutput); 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /src/org/antlr/v4/server/CollectLexOrParseSyntaxErrors.java: -------------------------------------------------------------------------------- 1 | package org.antlr.v4.server; 2 | 3 | import com.google.gson.JsonArray; 4 | import com.google.gson.JsonObject; 5 | import org.antlr.v4.runtime.*; 6 | 7 | class CollectLexOrParseSyntaxErrors extends BaseErrorListener { 8 | final JsonArray msgs = new JsonArray(); 9 | 10 | @Override 11 | public void syntaxError(Recognizer recognizer, Object offendingSymbol, 12 | int line, int charPositionInLine, 13 | String msg, 14 | org.antlr.v4.runtime.RecognitionException e) { 15 | final JsonObject err = new JsonObject(); 16 | if ( recognizer instanceof Lexer ) { 17 | int erridx = ((Lexer) recognizer)._input.index(); // where we detected error 18 | int startidx = erridx; 19 | if ( e instanceof LexerNoViableAltException ) { 20 | startidx = ((LexerNoViableAltException)e).getStartIndex(); 21 | } 22 | err.addProperty("startidx", startidx); 23 | err.addProperty("erridx", erridx); 24 | err.addProperty("line", line); 25 | err.addProperty("pos", charPositionInLine); 26 | err.addProperty("msg", msg); 27 | } 28 | else { 29 | Token startToken; 30 | Token stopToken; 31 | if ( e instanceof NoViableAltException ) { 32 | startToken = ((NoViableAltException) e).getStartToken(); 33 | stopToken = e.getOffendingToken(); 34 | } 35 | else if ( e==null ) { 36 | startToken = stopToken = (Token)offendingSymbol; 37 | } 38 | else { 39 | startToken = stopToken = e.getOffendingToken(); 40 | } 41 | err.addProperty("startidx", startToken.getTokenIndex()); 42 | err.addProperty("stopidx", stopToken.getTokenIndex()); 43 | err.addProperty("line", line); 44 | err.addProperty("pos", charPositionInLine); 45 | err.addProperty("msg", msg); 46 | } 47 | msgs.add(err); 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /src/org/antlr/v4/server/CollectLexerGrammarErrorsAndWarnings.java: -------------------------------------------------------------------------------- 1 | package org.antlr.v4.server; 2 | 3 | import org.antlr.v4.tool.ErrorManager; 4 | 5 | public class CollectLexerGrammarErrorsAndWarnings extends CollectGrammarErrorsAndWarnings { 6 | public CollectLexerGrammarErrorsAndWarnings(ErrorManager errMgr) { 7 | super(errMgr); 8 | fileName = "lexer"; 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /src/org/antlr/v4/server/CollectParserGrammarErrorsAndWarnings.java: -------------------------------------------------------------------------------- 1 | package org.antlr.v4.server; 2 | 3 | import org.antlr.v4.tool.ErrorManager; 4 | 5 | public class CollectParserGrammarErrorsAndWarnings extends CollectGrammarErrorsAndWarnings { 6 | public CollectParserGrammarErrorsAndWarnings(ErrorManager errMgr) { 7 | super(errMgr); 8 | fileName = "parser"; 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /src/org/antlr/v4/server/GrammarProcessor.java: -------------------------------------------------------------------------------- 1 | package org.antlr.v4.server; 2 | 3 | import com.google.gson.JsonArray; 4 | import com.google.gson.JsonObject; 5 | import org.antlr.runtime.RecognitionException; 6 | import org.antlr.v4.Tool; 7 | import org.antlr.v4.gui.Interpreter; 8 | import org.antlr.v4.gui.Trees; 9 | import org.antlr.v4.runtime.*; 10 | import org.antlr.v4.runtime.atn.*; 11 | import org.antlr.v4.runtime.misc.IntegerList; 12 | import org.antlr.v4.runtime.misc.ParseCancellationException; 13 | import org.antlr.v4.runtime.tree.ParseTree; 14 | import org.antlr.v4.runtime.tree.Tree; 15 | import org.antlr.v4.tool.*; 16 | 17 | import static org.antlr.v4.gui.Interpreter.profilerColumnNames; 18 | import static org.antlr.v4.server.ANTLRHttpServer.IMAGES_DIR; 19 | import static org.antlr.v4.server.ANTLRHttpServer.ParseServlet.LOGGER; 20 | import static us.parr.lib.ParrtSys.execInDir; 21 | 22 | import java.io.*; 23 | import java.nio.file.Files; 24 | import java.nio.file.Path; 25 | import java.nio.file.Paths; 26 | import java.util.Arrays; 27 | import java.util.List; 28 | import java.util.regex.Matcher; 29 | import java.util.regex.Pattern; 30 | 31 | public class GrammarProcessor { 32 | public static final int MAX_PARSE_TIME_MS = 10 * 1000; // 10 seconds 33 | public static final int MAX_TREE_SIZE_IN_NODES = 50_000; 34 | 35 | private static class KillableGrammarParserInterpreter extends GrammarParserInterpreter { 36 | private final long creationTime = System.currentTimeMillis(); 37 | protected String startRule; 38 | 39 | public KillableGrammarParserInterpreter(Grammar g, 40 | ATN deserializedATN, 41 | String startRule, 42 | TokenStream tokenStream) { 43 | super(g, deserializedATN, tokenStream); 44 | this.startRule = startRule; 45 | } 46 | 47 | @Override 48 | protected void visitState(ATNState p) { 49 | super.visitState(p); 50 | long now = System.currentTimeMillis(); 51 | long runTimeMs = now - creationTime; 52 | if (runTimeMs > MAX_PARSE_TIME_MS) { 53 | String msg = "Parser timeout (" + MAX_PARSE_TIME_MS + "ms) in rule " + startRule; 54 | throw new ParseCancellationException(msg); 55 | } 56 | } 57 | } 58 | 59 | /** 60 | * Interpret the input according to the grammar, starting at the start rule, and return a JSON object 61 | * with errors, tokens, rule names, and the parse tree. 62 | */ 63 | public static JsonObject interp(String grammar, String lexGrammar, String input, String startRule) 64 | throws IOException { 65 | startRule = startRule.strip(); 66 | Grammar g = null; 67 | LexerGrammar lg = null; 68 | Tool antlrTool = new Tool(); 69 | ErrorManager errMgr = new ErrorManager(antlrTool); 70 | errMgr.setFormat("antlr"); 71 | CollectGrammarErrorsAndWarnings parselistener = new CollectParserGrammarErrorsAndWarnings(errMgr); 72 | CollectGrammarErrorsAndWarnings lexlistener = new CollectLexerGrammarErrorsAndWarnings(errMgr); 73 | final JsonArray warnings = new JsonArray(); 74 | try { 75 | if (lexGrammar != null && lexGrammar.strip().length() > 0) { 76 | lg = new LexerGrammar(lexGrammar, lexlistener); 77 | g = new IgnoreTokenVocabGrammar(null, grammar, lg, parselistener); 78 | } 79 | else { 80 | g = new IgnoreTokenVocabGrammar(null, grammar, null, parselistener); 81 | } 82 | 83 | warnings.addAll(lexlistener.warnings); 84 | warnings.addAll(parselistener.warnings); 85 | } 86 | catch (RecognitionException re) { 87 | // shouldn't get here. 88 | LOGGER.info("Can't parse grammar"); 89 | } 90 | 91 | JsonObject result = new JsonObject(); 92 | 93 | Rule r = g.rules.get(startRule); 94 | if (r == null) { 95 | String w = "No such start rule: " + startRule; 96 | LOGGER.error(w); 97 | final JsonObject jsonError = new JsonObject(); 98 | jsonError.addProperty("msg", w); 99 | warnings.add(jsonError); 100 | } 101 | else { 102 | if (lexlistener.errors.size() == 0 && parselistener.errors.size() == 0) { 103 | result = parseAndGetJSON(g, lg, startRule, input); 104 | } 105 | } 106 | 107 | final JsonObject jsonResponse = new JsonObject(); 108 | jsonResponse.add("warnings", warnings); 109 | jsonResponse.add("parser_grammar_errors", parselistener.errors); 110 | jsonResponse.add("lexer_grammar_errors", lexlistener.errors); 111 | jsonResponse.add("result", result); 112 | return jsonResponse; 113 | } 114 | 115 | private static JsonObject parseAndGetJSON(Grammar g, LexerGrammar lg, String startRule, String input) 116 | throws IOException 117 | { 118 | CharStream charStream = CharStreams.fromStream(new StringBufferInputStream(input)); 119 | 120 | LexerInterpreter lexEngine = (lg != null) ? 121 | lg.createLexerInterpreter(charStream) : 122 | g.createLexerInterpreter(charStream); 123 | 124 | CollectLexOrParseSyntaxErrors lexListener = new CollectLexOrParseSyntaxErrors(); 125 | lexEngine.removeErrorListeners(); 126 | lexEngine.addErrorListener(lexListener); 127 | 128 | CommonTokenStream tokens = new CommonTokenStream(lexEngine); 129 | 130 | tokens.fill(); 131 | 132 | KillableGrammarParserInterpreter parser = createGrammarParserInterpreter(g, startRule, tokens); 133 | 134 | CollectLexOrParseSyntaxErrors parseListener = new CollectLexOrParseSyntaxErrors(); 135 | parser.removeErrorListeners(); 136 | parser.addErrorListener(parseListener); 137 | parser.setProfile(true); 138 | 139 | Rule r = g.rules.get(startRule); 140 | ParseTree t = parser.parse(r.index); 141 | ParseInfo parseInfo = parser.getParseInfo(); 142 | 143 | int n = nodeCount(t); 144 | if ( n > MAX_TREE_SIZE_IN_NODES ) { 145 | var msg = "Tree size "+n+" nodes > max of "+MAX_TREE_SIZE_IN_NODES; 146 | throw new ParseCancellationException(msg); 147 | } 148 | 149 | long now = System.currentTimeMillis(); 150 | // LOGGER.info("PARSE TIME: "+(now - parser.creationTime)+"ms"); 151 | 152 | // System.out.println("lex msgs" + lexListener.msgs); 153 | // System.out.println("parse msgs" + parseListener.msgs); 154 | // 155 | // System.out.println(t.toStringTree(parser)); 156 | String[][] profileData = getProfilerTable(parser, parseInfo); 157 | 158 | TokenStream tokenStream = parser.getInputStream(); 159 | // CharStream inputStream = tokenStream.getTokenSource().getInputStream(); 160 | CharStream inputStream = null; // don't send input back to client (they have it and it can be big) 161 | return JsonSerializer.toJSON( 162 | t, 163 | Arrays.asList(parser.getRuleNames()), 164 | parser.getVocabulary(), 165 | tokenStream, 166 | inputStream, 167 | lexListener.msgs, 168 | parseListener.msgs, 169 | profileData); 170 | } 171 | 172 | /** 173 | * Copy this function from {@link Grammar} so we can override {@link ParserInterpreter#visitState(ATNState)} 174 | */ 175 | public static KillableGrammarParserInterpreter createGrammarParserInterpreter(Grammar g, 176 | String startRule, 177 | TokenStream tokenStream) { 178 | if (g.isLexer()) { 179 | throw new IllegalStateException("A parser interpreter can only be created for a parser or combined grammar."); 180 | } 181 | // must run ATN through serializer to set some state flags 182 | IntegerList serialized = ATNSerializer.getSerialized(g.getATN()); 183 | ATN deserializedATN = new ATNDeserializer().deserialize(serialized.toArray()); 184 | 185 | return new KillableGrammarParserInterpreter(g, deserializedATN, startRule, tokenStream); 186 | } 187 | 188 | 189 | private static String[][] getProfilerTable(GrammarParserInterpreter parser, ParseInfo parseInfo) { 190 | String[] ruleNamesByDecision = new String[parser.getATN().decisionToState.size()]; 191 | for (int i = 0; i < ruleNamesByDecision.length; i++) { 192 | ruleNamesByDecision[i] = parser.getRuleNames()[parser.getATN().getDecisionState(i).ruleIndex]; 193 | } 194 | 195 | DecisionInfo[] decisionInfo = parseInfo.getDecisionInfo(); 196 | String[][] table = new String[decisionInfo.length][profilerColumnNames.length]; 197 | 198 | for (int decision = 0; decision < decisionInfo.length; decision++) { 199 | for (int col = 0; col < profilerColumnNames.length; col++) { 200 | Object colVal = Interpreter.getValue(decisionInfo[decision], ruleNamesByDecision, decision, col); 201 | table[decision][col] = colVal.toString(); 202 | } 203 | } 204 | 205 | return table; 206 | } 207 | 208 | public static String toSVG(Tree t, List ruleNames) throws IOException { 209 | long id = Thread.currentThread().getId(); 210 | String psFileName = "temp-" + id + ".ps"; 211 | String pdfFileName = "temp-" + id + ".pdf"; 212 | String svgFileName = "temp-" + id + ".svg"; 213 | Trees.writePS(t, ruleNames, Path.of(IMAGES_DIR, psFileName).toAbsolutePath().toString()); 214 | String ps = Files.readString(Path.of(IMAGES_DIR, psFileName)); 215 | 216 | final String regex = "%%BoundingBox: [0-9]+ [0-9]+ ([0-9]+) ([0-9]+)"; 217 | 218 | final Pattern pattern = Pattern.compile(regex, Pattern.MULTILINE); 219 | final Matcher matcher = pattern.matcher(ps); 220 | 221 | int width; 222 | int height; 223 | if (matcher.find()) { 224 | width = Integer.valueOf(matcher.group(1)); 225 | height = Integer.valueOf(matcher.group(2)); 226 | } 227 | else { 228 | LOGGER.error("Didn't match regex in PS: " + regex); 229 | width = 1000; 230 | height = 1000; 231 | } 232 | 233 | String[] results = 234 | execInDir(IMAGES_DIR, "ps2pdf", 235 | "-dDEVICEWIDTHPOINTS=" + width, 236 | "-dDEVICEHEIGHTPOINTS=" + height, 237 | psFileName, pdfFileName); 238 | 239 | if (results[1].length() > 0) { 240 | LOGGER.info("ps2pdf: " + results[1]); 241 | String msg = results[1].strip(); 242 | return "\n" + 243 | "\n" + 244 | " Can't create SVG tree; ps2pdf says: " + msg + "\n" + 245 | ""; 246 | } 247 | 248 | results = execInDir(IMAGES_DIR, "pdf2svg", pdfFileName, svgFileName); 249 | if (results[1].length() > 0) { 250 | LOGGER.info("pdf2svg: " + results[1]); 251 | String msg = results[1].strip(); 252 | return "\n" + 253 | " Can't create SVG tree; pdf2svg says: " + msg + "\n" + 254 | ""; 255 | } 256 | 257 | String svgfilename = Path.of(IMAGES_DIR, svgFileName).toAbsolutePath().toString(); 258 | String svg = new String(Files.readAllBytes(Paths.get(svgfilename))); 259 | return svg; 260 | } 261 | 262 | public static final int nodeCount(Tree t) { 263 | if (t == null) { 264 | return 0; 265 | } 266 | int n = 1; 267 | for (int i = 0; i < t.getChildCount(); i++) { 268 | n += nodeCount(t.getChild(i)); 269 | } 270 | return n; 271 | } 272 | 273 | /** A test main program for the "big" dir grammar */ 274 | public static void main(String[] args) throws IOException { 275 | new File(IMAGES_DIR).mkdirs(); 276 | var base = "/Users/parrt/antlr/code/antlr4-lab/big/"; 277 | String parserContent = Files.readString(Path.of(base + "TPSParser.g4")); 278 | String lexerContent = Files.readString(Path.of(base + "TPSLexer.g4")); 279 | String input = Files.readString(Path.of(base + "fonline.clc")); 280 | var json = interp(parserContent, lexerContent, input, "program"); 281 | // System.out.println(json); 282 | } 283 | } 284 | -------------------------------------------------------------------------------- /src/org/antlr/v4/server/IgnoreTokenVocabGrammar.java: -------------------------------------------------------------------------------- 1 | package org.antlr.v4.server; 2 | 3 | import org.antlr.runtime.RecognitionException; 4 | import org.antlr.v4.tool.ANTLRToolListener; 5 | import org.antlr.v4.tool.Grammar; 6 | 7 | class IgnoreTokenVocabGrammar extends Grammar { 8 | public IgnoreTokenVocabGrammar(String fileName, 9 | String grammarText, 10 | Grammar tokenVocabSource, 11 | ANTLRToolListener listener) 12 | throws RecognitionException { 13 | super(fileName, grammarText, tokenVocabSource, listener); 14 | } 15 | 16 | @Override 17 | public void importTokensFromTokensFile() { 18 | // don't try to import tokens files; must give me both grammars if split 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /src/org/antlr/v4/server/JsonSerializer.java: -------------------------------------------------------------------------------- 1 | package org.antlr.v4.server; 2 | 3 | import com.google.gson.JsonArray; 4 | import com.google.gson.JsonElement; 5 | import com.google.gson.JsonObject; 6 | import com.google.gson.JsonPrimitive; 7 | import org.antlr.v4.runtime.*; 8 | import org.antlr.v4.runtime.atn.ATN; 9 | import org.antlr.v4.runtime.misc.Interval; 10 | import org.antlr.v4.runtime.tree.ErrorNode; 11 | import org.antlr.v4.runtime.tree.TerminalNode; 12 | import org.antlr.v4.runtime.tree.Tree; 13 | 14 | import java.io.IOException; 15 | import java.util.Arrays; 16 | import java.util.List; 17 | 18 | import static org.antlr.v4.gui.Interpreter.profilerColumnNames; 19 | import static org.antlr.v4.server.GrammarProcessor.toSVG; 20 | 21 | // TODO: Ultimately this will go into the ANTLR core and then we can remove this class 22 | 23 | /** This "class" wraps support functions that generate JSON for parse trees. 24 | * The JSON includes everything needed to reconstruct a parse tree: 25 | * 26 | * Rule names (field: "rules") 27 | * Input chars (field: "input") 28 | * Tokens (field: "tokens") 29 | * Parse tree (field: "tree"; refs rule indexes and token indexes) 30 | * 31 | * For example, given input "99" and a simple expression grammar giving parse tree 32 | * "(s (expr 99) EOF)", the full JSON (formatted by jq) looks like: 33 | * 34 | * { 35 | * "rules": [ 36 | * "s", 37 | * "expr" 38 | * ], 39 | * "input": "99", 40 | * "tokens": [ 41 | * { 42 | * "type": 3, 43 | * "line": 1, 44 | * "pos": 0, 45 | * "channel": 0, 46 | * "start": 0, 47 | * "stop": 1 48 | * }, 49 | * { 50 | * "type": -1, 51 | * "line": 1, 52 | * "pos": 2, 53 | * "channel": 0, 54 | * "start": 2, 55 | * "stop": 1 56 | * } 57 | * ], 58 | * TODO: this isn't up to date 59 | * "tree": { 60 | * "0": [ 61 | * { 62 | * "1": [ 63 | * 0 64 | * ] 65 | * }, 66 | * 1 67 | * ] 68 | * } 69 | * } 70 | * 71 | * Notice that the tree is just a series of nested references to integers, which refer to rules 72 | * and tokens. 73 | * 74 | * One potential use case: Create an ANTLR server that accepts a grammar and input as parameters then 75 | * returns JSON for the parse tree and the tokens. This can be deserialized by JavaScript in a web browser 76 | * to display the parse result. 77 | * 78 | * To load and dump elements from Python 3: 79 | * 80 | * import json 81 | * 82 | * with open("/tmp/t.json") as f: 83 | * data = f.read() 84 | * 85 | * data = json.loads(data) 86 | * print(data['rules']) 87 | * print(data['input']) 88 | * for t in data['tokens']: 89 | * print(t) 90 | * print(data['tree']) 91 | * 92 | * @since 4.10.2 93 | */ 94 | public class JsonSerializer { 95 | /** Create a JSON representation of a parse tree and include all other information necessary to reconstruct 96 | * a printable parse tree: the rules, input, tokens, and the tree structure that refers to the rule 97 | * and token indexes. Extract all information from the parser, which is assumed to be in a state 98 | * post-parse and the object that created tree t. 99 | * 100 | * @param t The parse tree to serialize as JSON 101 | * @param recog The parser that created the parse tree and is in the post-recognition state 102 | * @return JSON representing the parse tree 103 | */ 104 | public static JsonObject toJSON(Tree t, Parser recog) throws IOException { 105 | String[] ruleNames = recog != null ? recog.getRuleNames() : null; 106 | if ( t==null || ruleNames==null ) { 107 | return null; 108 | } 109 | TokenStream tokenStream = recog.getInputStream(); 110 | CharStream inputStream = tokenStream.getTokenSource().getInputStream(); 111 | return toJSON(t, Arrays.asList(ruleNames), recog.getVocabulary(), tokenStream, inputStream, 112 | null, null, null); 113 | } 114 | 115 | /** Create a JSON representation of a parse tree and include all other information necessary to reconstruct 116 | * a printable parse tree: the rules, input, tokens, and the tree structure that refers to the rule 117 | * and token indexes. The tree and rule names are required but the token stream and input stream are optional. 118 | */ 119 | public static JsonObject toJSON(Tree t, 120 | final List ruleNames, 121 | final Vocabulary vocabulary, 122 | final TokenStream tokenStream, 123 | final CharStream inputStream, 124 | JsonArray lexMsgs, 125 | JsonArray parseMsgs, 126 | String[][] profileData) 127 | throws IOException 128 | { 129 | if ( t==null || ruleNames==null ) { 130 | return null; 131 | } 132 | 133 | final JsonObject rootObject = new JsonObject(); 134 | 135 | final JsonArray ruleNameArray = new JsonArray(ruleNames.size()); 136 | ruleNames.forEach(ruleNameArray::add); 137 | rootObject.add("rules", ruleNameArray); 138 | 139 | if ( inputStream!=null ) { 140 | Interval allchar = Interval.of(0, inputStream.size() - 1); 141 | String input = inputStream.getText(allchar); 142 | rootObject.addProperty("input", input); 143 | } 144 | 145 | if ( vocabulary!=null ) { 146 | final JsonArray vocabularyArray = new JsonArray(vocabulary.getMaxTokenType()); 147 | for (int i = 0; i < vocabulary.getMaxTokenType(); i++) { 148 | vocabularyArray.add(vocabulary.getSymbolicName(i)); 149 | } 150 | rootObject.add("symbols", vocabularyArray); 151 | } 152 | 153 | if ( tokenStream!=null ) { 154 | final JsonArray tokenArray = new JsonArray(tokenStream.size()); 155 | for (int i = 0; i < tokenStream.size(); i++) { 156 | Token tok = tokenStream.get(i); 157 | final JsonObject jsonToken = new JsonObject(); 158 | jsonToken.addProperty("type", tok.getType()); 159 | jsonToken.addProperty("line", tok.getLine()); 160 | jsonToken.addProperty("pos", tok.getCharPositionInLine()); 161 | jsonToken.addProperty("channel", tok.getChannel()); 162 | jsonToken.addProperty("start", tok.getStartIndex()); 163 | jsonToken.addProperty("stop", tok.getStopIndex()); 164 | tokenArray.add(jsonToken); 165 | } 166 | rootObject.add("tokens", tokenArray); 167 | } 168 | 169 | rootObject.addProperty("svgtree", toSVG(t, ruleNames)); 170 | rootObject.add("tree", toJSONTree(t)); 171 | 172 | rootObject.add("lex_errors", lexMsgs); 173 | rootObject.add("parse_errors", parseMsgs); 174 | 175 | final JsonArray dataArray = new JsonArray(profileData.length); 176 | for (final String[] row : profileData) { 177 | final JsonArray rowArray = new JsonArray(row.length); 178 | Arrays.stream(row).forEach(rowArray::add); 179 | dataArray.add(rowArray); 180 | } 181 | 182 | final JsonArray colNameArray = new JsonArray(profilerColumnNames.length); 183 | Arrays.stream(profilerColumnNames).forEach(colNameArray::add); 184 | 185 | final JsonObject jsonProfile = new JsonObject(); 186 | jsonProfile.add("colnames", colNameArray); 187 | jsonProfile.add("data", dataArray); 188 | rootObject.add("profile", jsonProfile); 189 | 190 | return rootObject; 191 | } 192 | 193 | /** Create a JSON representation of a parse tree. The tree is just a series of nested references 194 | * to integers, which refer to rules and tokens. 195 | */ 196 | public static JsonElement toJSONTree(final Tree t) { 197 | if ( !(t instanceof RuleContext) ) { 198 | return getJSONNodeText(t); 199 | } 200 | 201 | final JsonArray kidsArray = new JsonArray(); 202 | for (int i = 0; i"); 245 | } 246 | } 247 | -------------------------------------------------------------------------------- /src/org/antlr/v4/server/persistent/PersistenceLayer.java: -------------------------------------------------------------------------------- 1 | package org.antlr.v4.server.persistent; 2 | 3 | import java.io.IOException; 4 | import java.security.InvalidKeyException; 5 | 6 | public interface PersistenceLayer { 7 | /** 8 | * Persists a byte buffer in the underlying storage system. 9 | * @param buffer byte buffer 10 | * @param identifier identifier for the persisted content 11 | * @throws IOException captures IO errors 12 | */ 13 | void persist(byte[] buffer, K identifier) throws IOException; 14 | 15 | /** 16 | * Retrieves the content from 17 | * @param identifier identifier for the persisted content 18 | * @return 19 | * @throws IOException captures IO errors 20 | * @throws InvalidKeyException when key cannot be found 21 | */ 22 | byte[] retrieve(K identifier) throws IOException, InvalidKeyException; 23 | } 24 | -------------------------------------------------------------------------------- /src/org/antlr/v4/server/persistent/cloudstorage/CloudStoragePersistenceLayer.java: -------------------------------------------------------------------------------- 1 | package org.antlr.v4.server.persistent.cloudstorage; 2 | 3 | import com.google.cloud.storage.BlobId; 4 | import com.google.cloud.storage.BlobInfo; 5 | import com.google.cloud.storage.Storage; 6 | import com.google.cloud.storage.StorageOptions; 7 | import org.antlr.v4.server.ANTLRHttpServer; 8 | import org.antlr.v4.server.persistent.PersistenceLayer; 9 | import org.slf4j.LoggerFactory; 10 | 11 | import java.io.IOException; 12 | import java.security.InvalidKeyException; 13 | 14 | public class CloudStoragePersistenceLayer implements PersistenceLayer { 15 | static final ch.qos.logback.classic.Logger LOGGER = 16 | (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(ANTLRHttpServer.class); 17 | 18 | public void persist(byte[] byteBuffer, String identifier) throws IOException { 19 | for (String key: System.getenv().keySet()) { 20 | LOGGER.info(key + " " + System.getenv(key)); 21 | } 22 | // The ID of your GCP project 23 | String projectId = "antlr4lab"; 24 | 25 | // The ID of your GCS bucket 26 | String bucketName = "antlr4-lab-us"; 27 | 28 | // The ID of your GCS object 29 | String objectName = identifier + ".json"; 30 | 31 | // The path to your file to upload 32 | Storage storage = StorageOptions.newBuilder().setProjectId(projectId).build().getService(); 33 | BlobId blobId = BlobId.of(bucketName, objectName); 34 | BlobInfo blobInfo = BlobInfo.newBuilder(blobId).build(); 35 | storage.create(blobInfo, byteBuffer); 36 | LOGGER.info("Successfully stored " + objectName); 37 | } 38 | 39 | @Override 40 | public byte[] retrieve(String identifier) throws IOException, InvalidKeyException { 41 | return new byte[0]; 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/org/antlr/v4/server/unique/DummyUniqueKeyGenerator.java: -------------------------------------------------------------------------------- 1 | package org.antlr.v4.server.unique; 2 | 3 | import java.util.Optional; 4 | import java.util.UUID; 5 | 6 | public class DummyUniqueKeyGenerator implements UniqueKeyGenerator { 7 | @Override 8 | public Optional generateKey() { 9 | return Optional.of(UUID.randomUUID().toString()); 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /src/org/antlr/v4/server/unique/UniqueKeyGenerator.java: -------------------------------------------------------------------------------- 1 | package org.antlr.v4.server.unique; 2 | 3 | import java.util.Optional; 4 | 5 | public interface UniqueKeyGenerator { 6 | Optional generateKey(); 7 | } 8 | -------------------------------------------------------------------------------- /static/css/style.css: -------------------------------------------------------------------------------- 1 | .main-layout { 2 | display: flex; 3 | height: 100%; 4 | } 5 | 6 | .intro { 7 | font-family: 'Arial', sans-serif; 8 | font-size: 13px; 9 | color: #404040; 10 | font-weight: normal; 11 | } 12 | 13 | .dropdown { 14 | font-family: 'Arial', sans-serif; 15 | font-size: 12px; 16 | color: #404040; 17 | font-weight: normal; 18 | border: solid .5px #7F7F7F; 19 | } 20 | 21 | .start-rule { 22 | border: solid .5px #7F7F7F; 23 | font-family: 'Inconsolata', 'Menlo', monospace; 24 | font-size: 12px; 25 | color: #404040; 26 | padding: 3px; 27 | white-space: pre-wrap; 28 | overflow: auto; 29 | } 30 | 31 | .editor:focus { 32 | outline: none; 33 | } 34 | 35 | .ace_editor { /* grammar and input editors */ 36 | border: solid .5px #7F7F7F; 37 | height: 100%; 38 | min-width: 300px; 39 | min-height:120px; 40 | font-family: 'Inconsolata', 'Menlo', monospace; 41 | font-size: 12px; 42 | } 43 | 44 | .drag-over { 45 | border: #94B6EB .5px dashed !important; 46 | } 47 | 48 | .drag-over::after { 49 | position: absolute; 50 | text-align: center; 51 | width: 100%; 52 | content: "Drop file here"; 53 | font-family: 'Arial', sans-serif; 54 | font-size: 24pt; 55 | color: #94B6EB; 56 | font-weight: normal; 57 | } 58 | 59 | .lexical_error_class { 60 | position: absolute; 61 | /*background-color: #D72D26;*/ 62 | background-color: #9A2E06; 63 | opacity: .3; 64 | } 65 | 66 | .syntax_error_class { 67 | position: absolute; 68 | /*background-color: #D72D26;*/ 69 | background-color: #9A2E06; 70 | opacity: .3; 71 | } 72 | 73 | .token_range_class { 74 | position: absolute; 75 | border-bottom: 2px solid grey; 76 | } 77 | 78 | .ace_gutter-cell { 79 | position: absolute; 80 | top: 0; 81 | left: 0; 82 | right: 0; 83 | background-color: white; 84 | padding-left: 1px; 85 | padding-right: 0px; 86 | font-size: 9px; 87 | color: #8C8C8C; 88 | } 89 | 90 | .ace_gutter-cell.ace_error { 91 | background-image: none; 92 | color: black; 93 | border: .5px solid red; 94 | transform: translateY(-7%); 95 | } 96 | 97 | .ace_gutter-layer { 98 | background-color: white; 99 | position: relative; 100 | width: auto; 101 | text-align: right; 102 | pointer-events: auto; 103 | height: 1000000px; 104 | contain: style size layout; 105 | } 106 | 107 | .tabs-header { 108 | font-family: 'Arial', sans-serif; 109 | font-size: 13px; 110 | color: #404040; 111 | font-weight: normal; 112 | padding-bottom: .5em; 113 | } 114 | 115 | .chunk-header { 116 | font-family: 'Arial', sans-serif; 117 | font-size: 13px; 118 | color: #404040; 119 | font-weight: normal; 120 | padding-bottom: .5em; 121 | } 122 | 123 | .tabs-header-selected { 124 | /*text-decoration: underline;*/ 125 | border-bottom: 1.5px solid dodgerblue; 126 | } 127 | 128 | .tab-header:hover { 129 | /*font-weight: bold;*/ 130 | background-color: #F8F6F5; 131 | cursor: pointer; 132 | } 133 | 134 | .console { 135 | border: solid .5px #7F7F7F; 136 | font-family: 'Inconsolata', 'Menlo', monospace; 137 | font-size: 12px; 138 | color: #404040; 139 | padding: 3px; 140 | overflow: auto; 141 | padding-inline-start: 0; 142 | } 143 | 144 | .error { 145 | color: #905857; 146 | } 147 | 148 | /*.mytooltip {*/ 149 | /* font-size:14px;*/ 150 | /* !*height:8px;*!*/ 151 | /* padding: 2px;*/ 152 | /* padding-left: 4px;*/ 153 | /* padding-right: 4px;*/ 154 | /* !*width:80px;*!*/ 155 | /*}*/ 156 | 157 | /*.ui-tooltip {*/ 158 | /* white-space: pre-line;*/ 159 | /*}*/ 160 | 161 | /*.ui-tabs-nav {*/ 162 | /* background: transparent;*/ 163 | /* border-width: 0px 0px 1px 0px;*/ 164 | /* -moz-border-radius: 0px;*/ 165 | /* -webkit-border-radius: 0px;*/ 166 | /* border-radius: 0px;*/ 167 | /*}*/ 168 | 169 | /*.ui-tabs-panel {*/ 170 | /* margin: 0em 0.2em 0.2em 0.2em;*/ 171 | /*}*/ 172 | 173 | 174 | ul, #treeUL { 175 | list-style-type: none; 176 | } 177 | 178 | #treeUL { 179 | margin: 0; 180 | padding: 0; 181 | overflow: auto; 182 | max-height: 15em; 183 | border: solid .5px #7F7F7F; 184 | } 185 | 186 | .tree-root { 187 | cursor: pointer; 188 | -webkit-user-select: none; /* Safari 3.1+ */ 189 | -moz-user-select: none; /* Firefox 2+ */ 190 | -ms-user-select: none; /* IE 10+ */ 191 | user-select: none; 192 | font-size: 12px; 193 | font-family: 'Inconsolata', 'Menlo', monospace; 194 | } 195 | 196 | .tree-root::before { 197 | content: "\232A"; 198 | font-size: 12px; 199 | color: darkgray; 200 | display: inline-block; 201 | margin-right: 1px; 202 | } 203 | 204 | .expanded-tree::before { 205 | content: "\FE40"; 206 | /*position: relative;*/ 207 | /*top: 5px;*/ 208 | font-size: 12px; 209 | color: darkgray; 210 | } 211 | 212 | .tree-token { 213 | font-size: 12px; 214 | font-family: 'Inconsolata', 'Menlo', monospace; 215 | } 216 | 217 | .nested { 218 | display: none; 219 | } 220 | 221 | .active { 222 | display: block; 223 | } 224 | 225 | .profile-table { 226 | border-collapse: collapse; 227 | /*header: 0.5px solid;*/ 228 | font-size: 12px; 229 | color: #323942; 230 | font-family: Arial, sans-serif; 231 | text-align: right; 232 | /*padding-right: 5px;*/ 233 | /*column-gap: 5px;*/ 234 | /*column-gap: 5px;*/ 235 | } 236 | 237 | .profile-table table { 238 | table-layout: fixed; 239 | } 240 | 241 | .profile-table tbody tr:nth-child(odd) { 242 | background-color: #eee; 243 | } 244 | 245 | .profile-table th { 246 | color: black; 247 | /*width: 30px;*/ 248 | /*font-weight: 400;*/ 249 | /*font-size: 12px;*/ 250 | /*font-weight: bold;*/ 251 | padding-bottom: 0.25rem; 252 | width: 14%; 253 | } 254 | 255 | .profile-table td { 256 | /*padding-left: 30px;*/ 257 | width: 14%; 258 | } 259 | 260 | /* base button properties */ 261 | [class*="-button"] { 262 | appearance: none; 263 | border: 1px solid rgba(27, 31, 35, .15); 264 | border-radius: 2px; 265 | box-shadow: rgba(27, 31, 35, .1) 0 1px 0; 266 | box-sizing: border-box; 267 | cursor: pointer; 268 | display: inline-block; 269 | font-family: 'Arial', sans-serif; 270 | font-size: 13px; 271 | line-height: 17px; 272 | /*padding: 6px 16px;*/ 273 | position: relative; 274 | text-align: center; 275 | text-decoration: none; 276 | user-select: none; 277 | -webkit-user-select: none; 278 | touch-action: manipulation; 279 | vertical-align: middle; 280 | white-space: nowrap; 281 | } 282 | 283 | .run-button { 284 | background-color: #2ea44f; 285 | color: #fff; 286 | } 287 | 288 | .profile-button { 289 | background-color: #eeeeee; 290 | } 291 | -------------------------------------------------------------------------------- /static/images/antlrlogo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/antlr/antlr4-lab/13870126f8527780e7d0118d7f2d74abd9249122/static/images/antlrlogo.png -------------------------------------------------------------------------------- /static/images/helpicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/antlr/antlr4-lab/13870126f8527780e7d0118d7f2d74abd9249122/static/images/helpicon.png -------------------------------------------------------------------------------- /static/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 12 | 13 | ANTLR Lab: learn, test, and experiment with ANTLR grammars online! 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 |
49 | 50 | 51 | 52 |
53 | 54 |
55 | Welcome to the ANTLR lab, where you can learn about ANTLR or experiment with 56 | and test grammars! Just hit the 57 | 58 | button to try out the sample grammar. 59 | 60 |

61 | To start developing with ANTLR, see 62 | getting started. 63 |

64 | Feedback/issues welcome. 65 | Brought to you by Terence Parr, the maniac behind ANTLR. 66 |

67 | Disclaimer: This website and related functionality are not meant to be used for private code, data, or other 68 | intellectual property. Assume everything you enter could become public! 69 | Grammars and input you enter are submitted to a unix box for execution and possibly persisted on disk or other mechanism. 70 | Please run antlr4-lab locally to avoid privacy concerns. 71 |

72 |
73 |
74 | 75 | 76 | 77 |
78 |
79 | Lexer 80 | Parser 81 |   82 |   83 |
84 |
85 |
86 | 87 |    88 | 89 | 90 |
91 |
92 | 93 |
94 |
95 | Input  96 |   97 |   98 |
99 |
100 |
101 | 102 | 103 | 104 |
105 |
106 | Start rule 
107 | program 108 | 109 |
110 |
111 | 112 | 113 | 114 |
115 |
116 | Tool console 117 |
118 | Parser console 119 |
120 |
121 | 122 | 123 | 124 |
125 |
126 |
127 |
128 | Tree  129 | Hierarchy 130 |
131 |
132 |
133 |
134 |
135 | 136 | 137 | 138 |
139 |
Parser profile info
140 |
141 |
142 | 143 |
144 |
145 | 146 |
147 | 148 | 149 |
150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 182 | 183 | 184 | 185 | 186 | -------------------------------------------------------------------------------- /static/js/antlr-client.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | let ANTLR_SERVICE = "/parse/"; 4 | 5 | let SAMPLE_PARSER = 6 | "parser grammar ExprParser;\n" + 7 | "options { tokenVocab=ExprLexer; }\n" + 8 | "\n" + 9 | "program\n" + 10 | " : stat EOF\n" + 11 | " | def EOF\n" + 12 | " ;\n" + 13 | "\n" + 14 | //"foo : 'a' 'abc' 'a\\'b' '\\u34ab' 'ab\\ncd' ;\n" + 15 | "stat: ID '=' expr ';'\n" + 16 | " | expr ';'\n" + 17 | " ;\n" + 18 | "\n" + 19 | "def : ID '(' ID (',' ID)* ')' '{' stat* '}' ;\n" + 20 | "\n" + 21 | "expr: ID\n" + 22 | " | INT\n" + 23 | " | func\n" + 24 | " | 'not' expr\n" + 25 | " | expr 'and' expr\n" + 26 | " | expr 'or' expr\n" + 27 | " ;\n" + 28 | "\n" + 29 | "func : ID '(' expr (',' expr)* ')' ;" 30 | 31 | let SAMPLE_LEXER = 32 | "// DELETE THIS CONTENT IF YOU PUT COMBINED GRAMMAR IN Parser TAB\n" + 33 | "lexer grammar ExprLexer;\n" + 34 | "\n" + 35 | "AND : 'and' ;\n" + 36 | "OR : 'or' ;\n" + 37 | "NOT : 'not' ;\n" + 38 | "EQ : '=' ;\n" + 39 | "COMMA : ',' ;\n" + 40 | "SEMI : ';' ;\n" + 41 | "LPAREN : '(' ;\n" + 42 | "RPAREN : ')' ;\n" + 43 | "LCURLY : '{' ;\n" + 44 | "RCURLY : '}' ;\n" + 45 | "\n" + 46 | "INT : [0-9]+ ;\n" + 47 | "ID: [a-zA-Z_][a-zA-Z_0-9]* ;\n" + 48 | "WS: [ \\t\\n\\r\\f]+ -> skip ;"; 49 | 50 | let SAMPLE_INPUT = 51 | "f(x,y) {\n" + 52 | " a = 3+foo;\n" + 53 | " x and y;\n" + 54 | "}"; 55 | 56 | 57 | function processANTLRResults(response) { 58 | let parserSession = $("#grammar").data("parserSession") 59 | let lexerSession = $("#grammar").data("lexerSession") 60 | let session = $("#input").data("session"); 61 | let I = session.getValue(); 62 | let s = $('#start').text(); 63 | 64 | if ( typeof(response.data)==="string" ) { 65 | // Didn't parse as json 66 | console.log("Bad JSON:") 67 | console.log(response.data); 68 | $("#tool_errors").html(`BAD JSON RESPONSE
`); 69 | $("#tool_errors").show(); 70 | $("#tool_errors_header").show(); 71 | return; 72 | } 73 | 74 | removeAllMarkers(parserSession); 75 | parserSession.setAnnotations(null); 76 | removeAllMarkers(lexerSession); 77 | lexerSession.setAnnotations(null); 78 | 79 | let result = response.data.result; 80 | // console.log(result); 81 | 82 | if ( "arg_error" in response.data ) { 83 | $("#tool_errors").html(`${response.data.arg_error}
`); 84 | $("#tool_errors").show(); 85 | $("#tool_errors_header").show(); 86 | $("#parse_errors").hide(); 87 | $("#parse_errors_header").hide(); 88 | return; 89 | } 90 | 91 | if ( "exception_trace" in response.data ) { 92 | console.log("EXCEPTION:"+response.data.exception_trace) 93 | $("#tool_errors").html(`${response.data.exception_trace}
`); 94 | $("#tool_errors").show(); 95 | $("#tool_errors_header").show(); 96 | $("#parse_errors").hide(); 97 | $("#parse_errors_header").hide(); 98 | return; 99 | } 100 | 101 | showToolErrors(response); 102 | 103 | let parser_grammar_errors = response.data.parser_grammar_errors; 104 | let lexer_grammar_errors = response.data.lexer_grammar_errors; 105 | 106 | let grammarAnnotations = []; 107 | for (let ei in parser_grammar_errors) { 108 | let e = parser_grammar_errors[ei]; 109 | grammarAnnotations.push({ 110 | row: e.line-1, 111 | text: e.msg, 112 | type: "error" 113 | }); 114 | } 115 | parserSession.setAnnotations(grammarAnnotations); 116 | 117 | grammarAnnotations = []; 118 | for (let ei in lexer_grammar_errors) { 119 | let e = lexer_grammar_errors[ei]; 120 | grammarAnnotations.push({ 121 | row: e.line-1, 122 | text: e.msg, 123 | type: "error" 124 | }); 125 | } 126 | lexerSession.setAnnotations(grammarAnnotations); 127 | 128 | if ( Object.keys(result).length===0 ) { 129 | return; 130 | } 131 | 132 | showParseErrors(response); 133 | 134 | let tokens = result.tokens; 135 | let symbols = result.symbols; 136 | let lex_errors = result.lex_errors; 137 | let parse_errors = result.parse_errors; 138 | 139 | let profile = result.profile; 140 | 141 | let charToChunk = chunkifyInput(I, tokens, symbols, lex_errors, parse_errors); 142 | $("#input").data("charToChunk", charToChunk); 143 | 144 | let Range = ace.require('ace/range').Range; 145 | 146 | removeAllMarkers(session); 147 | session.setAnnotations(null); 148 | 149 | let annotations = []; 150 | for (let ei in lex_errors) { 151 | let e = lex_errors[ei]; 152 | let a = session.doc.indexToPosition(e.startidx); 153 | let b = session.doc.indexToPosition(e.erridx+1); 154 | let r = new Range(a.row, a.column, b.row, b.column); 155 | session.addMarker(r, "lexical_error_class", "text", false); 156 | annotations.push({ 157 | row: a.row, 158 | text: `${e.line}:${e.pos} ${e.msg}`, 159 | type: "error" 160 | }); 161 | } 162 | 163 | for (let ei in parse_errors) { 164 | let e = parse_errors[ei]; 165 | let a = session.doc.indexToPosition(tokens[e.startidx].start); 166 | let b = session.doc.indexToPosition(tokens[e.stopidx].stop+1); 167 | let r = new Range(a.row, a.column, b.row, b.column); 168 | session.addMarker(r, "syntax_error_class", "text", false); 169 | annotations.push({ 170 | row: a.row, 171 | text: `${e.line}:${e.pos} ${e.msg}`, 172 | type: "error" 173 | }); 174 | } 175 | 176 | session.setAnnotations(annotations); 177 | 178 | let tree = result.tree; 179 | let buf = ['
    ']; 180 | walk(tree, result, I, buf); 181 | buf.push('
'); 182 | $("#tree").html(buf.join('\n')) 183 | 184 | let svgtree = result.svgtree; 185 | if ( svgtree != null ) { 186 | // console.log(svgtree); 187 | let b64_svgtree = btoa(unescape(encodeURIComponent(svgtree))) 188 | $("#svgtree").html(""); 189 | } 190 | 191 | initParseTreeView(); 192 | 193 | buildProfileTableView(profile.colnames, profile.data); 194 | } 195 | 196 | function walk(t, result, input, buf) { 197 | if (t == null) return; 198 | 199 | if ( 'error' in t ) { 200 | buf.push(`
  • <error:${t.error}>
  • `); 201 | return; 202 | } 203 | 204 | let symbols = result.symbols; 205 | let rulenames = result.rules; 206 | let tokens = result.tokens; 207 | let ruleidx = t.ruleidx; 208 | let alt = t.alt; 209 | // console.log(rulenames[ruleidx]); 210 | buf.push('
  • '+rulenames[ruleidx]+'') 211 | if ( 'kids' in t && t.kids.length > 0) { 212 | buf.push('
      '); 213 | for (let i = 0; i < t.kids.length; i++) { 214 | let kid = t.kids[i]; 215 | if (typeof (kid) == 'number') { 216 | let a = tokens[kid].start; 217 | let b = tokens[kid].stop; 218 | buf.push(`
    • ${input.slice(a, b + 1)}
    • `); 219 | // console.log(`${symbols[tokens[kid].type]}:${input.slice(a, b + 1)}`); 220 | } 221 | else { 222 | walk(kid, result, input, buf); 223 | } 224 | } 225 | buf.push('
    '); 226 | } 227 | } 228 | 229 | async function run_antlr() { 230 | let parserSession = $("#grammar").data("parserSession") 231 | let lexerSession = $("#grammar").data("lexerSession") 232 | let g = parserSession.getValue() 233 | let lg = lexerSession.getValue(); 234 | let I = $("#input").data("session").getValue(); 235 | let s = $('#start').text(); 236 | 237 | $("#profile_choice").show(); 238 | 239 | await axios.post(ANTLR_SERVICE, 240 | {grammar: g, lexgrammar: lg, input: I, start: s} 241 | ) 242 | .then(processANTLRResults) 243 | .catch((error) => { 244 | if( error.response ){ 245 | console.log(error.response.data); // => the response payload 246 | } 247 | }); 248 | } 249 | 250 | function initParseTreeView() { 251 | $("#svgtreetab").show(); 252 | $("#treetab").show(); 253 | let toggler = document.getElementsByClassName("tree-root"); 254 | for (let i = 0; i < toggler.length; i++) { 255 | // add event handler to open/close 256 | toggler[i].addEventListener("click", function () { 257 | let nested = this.parentElement.querySelector(".nested"); 258 | if (nested != null) { 259 | nested.classList.toggle("active"); 260 | } 261 | this.classList.toggle("expanded-tree"); 262 | }); 263 | } 264 | } 265 | 266 | function buildProfileTableView(colnames, rows) { 267 | let table = "\n"; 268 | table += "\n"; 269 | table += " \n"; 270 | for (const name of colnames) { 271 | table += ""; 272 | } 273 | table += " \n"; 274 | table += "\n"; 275 | 276 | table += "\n"; 277 | for (const row of rows) { 278 | table += " "; 279 | for (const v of row) { 280 | table += ""; 281 | } 282 | table += "\n"; 283 | } 284 | table += "\n"; 285 | table += "
    "+name+"
    "+v+"
    \n"; 286 | $("#profile").html(table) 287 | } 288 | 289 | function chunkifyInput(input, tokens, symbols, lex_errors, parse_errors) { 290 | let charToChunk = new Array(input.length); 291 | for (let ti in tokens) { 292 | let t = tokens[ti]; 293 | let toktext = input.slice(t.start, t.stop + 1); 294 | let tooltipText = `#${ti} Type ${symbols[t.type]} Line ${t.line}:${t.pos}`; 295 | let chunk = {tooltip:tooltipText, chunktext:toktext, "start":t.start, "stop":t.stop+1}; 296 | for (let i = t.start; i <= t.stop; i++) { 297 | charToChunk[i] = chunk; 298 | } 299 | } 300 | for (let ei in lex_errors) { // set lex error tokens to just error tokens 301 | let e = lex_errors[ei]; 302 | let errtext = input.slice(e.startidx, e.erridx + 1); 303 | let chunk = {tooltip:"token recognition error", chunktext:errtext, "start":e.startidx, "stop":e.erridx+1, error:true}; 304 | for (let i = e.startidx; i <= e.erridx; i++) { 305 | charToChunk[i] = chunk; 306 | } 307 | } 308 | 309 | // chunkify skipped chars (adjacent into one chunk) 310 | let i = 0; 311 | while ( i= charToChunk.length) { 340 | ci = charToChunk.length - 1; 341 | } 342 | let chunk = charToChunk[ci]; 343 | if (chunk != null) { 344 | if ( 'error' in chunk ) { 345 | $("#tokens").html('('+chunk.tooltip+')'); 346 | } 347 | else { 348 | $("#tokens").html('('+chunk.tooltip+')') 349 | } 350 | let a = session.doc.indexToPosition(chunk.start); 351 | let b = session.doc.indexToPosition(chunk.stop); 352 | var Range = ace.Range; 353 | let r = new Range(a.row, a.column, b.row, b.column); 354 | if ( lastTokenRangeMarker !== null ) { 355 | session.removeMarker(lastTokenRangeMarker); 356 | } 357 | lastTokenRangeMarker = session.addMarker(r, "token_range_class", "text"); 358 | $("#input").data("lastTokenRangeMarker", lastTokenRangeMarker); 359 | } 360 | // console.log(pos, ci, chunk); 361 | } else { 362 | // console.log(pos, ci); 363 | } 364 | }; 365 | } 366 | 367 | function showToolErrors(response) { 368 | if (response.data.parser_grammar_errors.length > 0 || 369 | response.data.lexer_grammar_errors.length > 0 || 370 | response.data.warnings.length > 0) 371 | { 372 | let errors = ""; 373 | response.data.parser_grammar_errors.forEach( function(e) { 374 | errors += `${e.msg}
    `; 375 | }); 376 | response.data.lexer_grammar_errors.forEach( function(e) { 377 | errors += `${e.msg}
    `; 378 | }); 379 | response.data.warnings.forEach( function(w) { 380 | errors += `${w.msg}
    `; 381 | }); 382 | errors += "\n"; 383 | $("#tool_errors").html(errors); 384 | $("#tool_errors").show(); 385 | $("#tool_errors_header").show(); 386 | } 387 | else { 388 | $("#tool_errors").hide(); 389 | $("#tool_errors_header").hide(); 390 | } 391 | } 392 | 393 | function showParseErrors(response) { 394 | if (response.data.result.lex_errors.length > 0 || 395 | response.data.result.parse_errors.length > 0 ) 396 | { 397 | let errors = ""; 398 | response.data.result.lex_errors.forEach( function(e) { 399 | errors += `${e.line}:${e.pos} ${e.msg}
    `; 400 | }); 401 | response.data.result.parse_errors.forEach( function(e) { 402 | errors += `${e.line}:${e.pos} ${e.msg}
    `; 403 | }); 404 | errors += "\n"; 405 | $("#parse_errors").html(errors); 406 | $("#parse_errors").show(); 407 | $("#parse_errors_header").show(); 408 | } 409 | else { 410 | $("#parse_errors").hide(); 411 | $("#parse_errors_header").hide(); 412 | } 413 | } 414 | 415 | function createAceANTLRMode() { 416 | var ANTLR4HighlightRules = function() { 417 | this.$rules = { 418 | "start": [ 419 | { token : "string.single", regex : '[\'](?:(?:\\\\.)|(?:\\\\u....)|(?:[^\'\\\\]))*?[\']' }, 420 | { token : "comment.line", regex : "//.*$" }, 421 | { 422 | token : "comment", // multi line comment 423 | regex : "\\/\\*", 424 | next : "comment" 425 | }, 426 | { token: "keyword", regex: "grammar|options|header|parser|lexer|returns|fragment" }, 427 | { token: "entity.name.function", regex: "[a-z][a-zA-Z0-9_]*\\b" }, 428 | { token: "variable", regex: "[A-Z][a-zA-Z0-9_]*\\b" }, // tokens start with uppercase char 429 | { token : "punctuation.operator", regex : "\\?|\\:|\\||\\;" }, 430 | { token : "paren.lparen", regex : "[[({]" }, 431 | { token : "paren.rparen", regex : "[\\])}]" }, 432 | ], 433 | "comment" : [ 434 | { 435 | token : "comment", // closing comment 436 | regex : "\\*\\/", 437 | next : "start" 438 | }, { 439 | defaultToken : "comment" 440 | } 441 | ] 442 | }; 443 | }; 444 | 445 | var ANTLR4Mode = function() { 446 | this.HighlightRules = ANTLR4HighlightRules; 447 | }; 448 | 449 | ace.define('ace/mode/antlr4-mode', 450 | ["require", "exports", "module", "ace/lib/oop", "ace/mode/text", 451 | "ace/mode/text_highlight_rules", "ace/worker/worker_client"], 452 | function (require, exports, module) { 453 | var oop = require("ace/lib/oop"); 454 | var TextMode = require("ace/mode/text").Mode; 455 | var TextHighlightRules = require("ace/mode/text_highlight_rules").TextHighlightRules; 456 | 457 | oop.inherits(ANTLR4HighlightRules, TextHighlightRules); 458 | oop.inherits(ANTLR4Mode, TextMode); 459 | 460 | exports.Mode = ANTLR4Mode; 461 | }); 462 | } 463 | 464 | function createGrammarEditor() { 465 | var parserSession = ace.createEditSession(SAMPLE_PARSER); 466 | var lexerSession = ace.createEditSession(SAMPLE_LEXER); 467 | var editor = ace.edit("grammar"); 468 | 469 | $("#grammar").data("parserSession", parserSession); 470 | $("#grammar").data("lexerSession", lexerSession); 471 | $("#grammar").data("editor", editor) 472 | 473 | editor.setSession(parserSession); 474 | editor.setOptions({ 475 | theme: 'ace/theme/chrome', 476 | "highlightActiveLine": false, 477 | "readOnly": false, 478 | "showLineNumbers": true, 479 | "showGutter": true, 480 | "printMargin": false 481 | }); 482 | // $("#grammar").resize() 483 | 484 | $("#grammar").keyup(function(e) { 485 | if ( (e.key.length === 1 && !e.ctrlKey && !e.metaKey) || e.keyCode==='\n' ) { 486 | parserSession.setAnnotations(null); 487 | removeAllMarkers(parserSession); 488 | lexerSession.setAnnotations(null); 489 | removeAllMarkers(lexerSession); 490 | } 491 | }); 492 | 493 | createAceANTLRMode() 494 | parserSession.setMode("ace/mode/antlr4-mode") 495 | lexerSession.setMode("ace/mode/antlr4-mode") 496 | 497 | return editor; 498 | } 499 | 500 | function removeAllMarkers(session) { 501 | const markers = session.getMarkers(); 502 | if (markers) { 503 | const keys = Object.keys(markers); 504 | for (let item of keys) { 505 | session.removeMarker(markers[item].id); 506 | } 507 | } 508 | } 509 | 510 | function createInputEditor() { 511 | var input = ace.edit("input"); 512 | let session = ace.createEditSession(SAMPLE_INPUT); 513 | $("#input").data("session", session); 514 | $("#input").data("editor", input); 515 | input.setSession(session); 516 | input.setOptions({ 517 | theme: 'ace/theme/chrome', 518 | "highlightActiveLine": false, 519 | "readOnly": false, 520 | "showLineNumbers": true, 521 | "showGutter": true, 522 | "printMargin": false 523 | }); 524 | 525 | $("#input").on('mouseleave', function() { 526 | $("#tokens").html(""); 527 | let lastTokenRangeMarker = $("#input").data("lastTokenRangeMarker") 528 | session.removeMarker(lastTokenRangeMarker); 529 | }); 530 | 531 | $("#input").on('mouseup', function() { 532 | input.resize(); 533 | }); 534 | 535 | $("#input").keyup(function(e) { 536 | if (e.key.length === 1 && !e.ctrlKey && !e.metaKey) { 537 | session.setAnnotations(null); 538 | removeAllMarkers(session); 539 | } 540 | }); 541 | 542 | input.on("mousemove", mouseEventInsideInputText(session)); 543 | } 544 | 545 | function setupGrammarTabs(editor) { 546 | $("#parsertab").addClass("tabs-header-selected"); 547 | $("#lexertab").removeClass("tabs-header-selected"); 548 | 549 | $("#parsertab").click(function () { 550 | editor.setSession($("#grammar").data("parserSession")); 551 | $("#parsertab").addClass("tabs-header-selected"); 552 | $("#lexertab").removeClass("tabs-header-selected"); 553 | }); 554 | $("#lexertab").click(function () { 555 | editor.setSession($("#grammar").data("lexerSession")); 556 | $("#parsertab").removeClass("tabs-header-selected"); 557 | $("#lexertab").addClass("tabs-header-selected"); 558 | }); 559 | } 560 | 561 | function setupTreeTabs() { 562 | $("#svgtreetab").hide(); 563 | $("#treetab").hide(); 564 | $("#svgtreetab").addClass("tabs-header-selected"); 565 | $("#treetab").removeClass("tabs-header-selected"); 566 | $("#svgtree").show(); 567 | $("#tree").hide(); 568 | 569 | $("#svgtreetab").click(function () { 570 | $("#svgtree").show(); 571 | $("#tree").hide(); 572 | $("#svgtreetab").addClass("tabs-header-selected"); 573 | $("#treetab").removeClass("tabs-header-selected"); 574 | }); 575 | $("#treetab").click(function () { 576 | $("#svgtree").hide(); 577 | $("#tree").show(); 578 | $("#svgtreetab").removeClass("tabs-header-selected"); 579 | $("#treetab").addClass("tabs-header-selected"); 580 | }); 581 | } 582 | 583 | function dragOverHandler(e,whichEditor) { 584 | // Prevent default behavior (Prevent file from being opened) 585 | e.preventDefault(); 586 | e.stopPropagation(); 587 | $("#"+whichEditor).addClass("drag-over"); 588 | } 589 | 590 | function dragLeaveHandler(e,whichEditor) { 591 | // Prevent default behavior (Prevent file from being opened) 592 | $("#"+whichEditor).removeClass("drag-over"); 593 | } 594 | 595 | function dropHandler(e,whichEditor) { 596 | e.preventDefault(); 597 | // See https://developer.mozilla.org/en-US/docs/Web/API/HTML_Drag_and_Drop_API/File_drag_and_drop 598 | // Use DataTransferItemList interface to access the file(s) 599 | let editor = $("#"+whichEditor).data("editor"); 600 | let session = editor.session; 601 | for (let f of e.dataTransfer.items) { 602 | // If dropped items aren't files, reject them 603 | if (f.kind === 'file') { 604 | const file = f.getAsFile(); 605 | session.setAnnotations(null); 606 | removeAllMarkers(session); 607 | file.text().then((content)=> { 608 | session.setValue(content); 609 | $("#"+whichEditor).removeClass("drag-over"); 610 | }); 611 | } 612 | } 613 | } 614 | 615 | function setUpDragAndDrop() { 616 | for (let el of ["grammar", "input"]) { 617 | $("#"+el).on('dragover', (e) => { 618 | dragOverHandler(e, el); 619 | }); 620 | $("#"+el).on('dragleave', (e) => { 621 | dragLeaveHandler(e, el); 622 | }); 623 | $("#"+el).on('drop', (e) => { 624 | dropHandler(e.originalEvent, el); 625 | }); 626 | } 627 | } 628 | 629 | // MAIN 630 | $(document).ready(function() { 631 | String.prototype.sliceReplace = function (start, end, repl) { 632 | return this.substring(0, start) + repl + this.substring(end); 633 | }; 634 | 635 | $(document).tooltip(); 636 | 637 | var editor = createGrammarEditor(); 638 | setupGrammarTabs(editor); 639 | createInputEditor(); 640 | 641 | setupTreeTabs(); 642 | 643 | $("#profile_choice").hide(); 644 | $("#profile_header").hide(); 645 | $("#profile").hide(); 646 | $("#profile_choice").click(function () { 647 | if ( $("#profile_choice").text().startsWith("Show") ) { 648 | $("#profile_choice").text("Hide profiler"); 649 | $("#profile_header").show(); 650 | $("#profile").show(); 651 | } 652 | else { 653 | $("#profile_choice").text("Show profiler"); 654 | $("#profile_header").hide(); 655 | $("#profile").hide(); 656 | } 657 | }); 658 | 659 | $("#tool_errors").hide(); 660 | $("#parse_errors").hide(); 661 | $("#tool_errors_header").hide(); 662 | $("#parse_errors_header").hide(); 663 | 664 | setUpDragAndDrop(); 665 | setupSelectGrammarTable(); 666 | }); 667 | -------------------------------------------------------------------------------- /static/js/grammars-v4.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | let GRAMMAR_INDEX = "https://raw.githubusercontent.com/antlr/grammars-v4/master/grammars.json" 4 | 5 | async function selectGrammar() { 6 | // Find. 7 | let name = $("#selectgrammar option:selected" ).text(); 8 | let grammars = $("#selectgrammar").data("grammars") 9 | const found = grammars.find((element) => { return element.name === name; }); 10 | // Set grammar. 11 | if ( found && found.name!=="Sample" ) { 12 | if (found.lexer !== "") { 13 | await axios.get(found.lexer).then(function (response) { 14 | $("#grammar").data("lexerSession").setValue(response.data); 15 | $("#grammar").data("editor").setSession($("#grammar").data("lexerSession")); // force redraw. 16 | $("#parsertab").removeClass("tabs-header-selected"); 17 | $("#lexertab").addClass("tabs-header-selected"); 18 | }); 19 | } 20 | else { 21 | $("#grammar").data("lexerSession").setValue(""); 22 | $("#grammar").data("editor").setSession($("#grammar").data("lexerSession")); // force redraw. 23 | $("#parsertab").removeClass("tabs-header-selected"); 24 | $("#lexertab").addClass("tabs-header-selected"); 25 | } 26 | await axios.get(found.parser).then(function (response) { 27 | $("#grammar").data("parserSession").setValue(response.data); 28 | $("#grammar").data("editor").setSession($("#grammar").data("parserSession")); // force redraw. 29 | $("#parsertab").addClass("tabs-header-selected"); 30 | $("#lexertab").removeClass("tabs-header-selected"); 31 | }); 32 | let prefix = "https://raw.githubusercontent.com/antlr/grammars-v4/master/"; 33 | let trunc = found.parser.substring(prefix.length); 34 | // remove parser grammar file name, assume that it's 35 | // the root (which is wrong!). 36 | let last = trunc.lastIndexOf("/"); 37 | let x = trunc.substring(0, last); 38 | let fname = prefix + x + "/examples/" + found.example[0]; 39 | await axios.get(fname).then(function (response) { 40 | $("#input").data("session").setValue(response.data); 41 | }); 42 | $("#start").text(found.start); 43 | setupInputDropDownForGrammar(found); 44 | } 45 | else { 46 | $("#grammar").data("lexerSession").setValue(SAMPLE_LEXER); 47 | $("#grammar").data("parserSession").setValue(SAMPLE_PARSER); 48 | $("#input").data("session").setValue(SAMPLE_INPUT); 49 | $("#start").text("program"); 50 | $("#grammar").data("editor").setSession($("#grammar").data("parserSession")); // force redraw. 51 | $("#parsertab").addClass("tabs-header-selected"); 52 | $("#lexertab").removeClass("tabs-header-selected"); 53 | setupInputDropDownForGrammar(grammars[0]); 54 | } 55 | let session = $("#input").data("session"); 56 | session.setAnnotations(null); 57 | removeAllMarkers(session); 58 | let parserSession = $("#grammar").data("parserSession"); 59 | parserSession.setAnnotations(null); 60 | removeAllMarkers(parserSession); 61 | let lexerSession = $("#grammar").data("lexerSession"); 62 | lexerSession.setAnnotations(null); 63 | removeAllMarkers(lexerSession); 64 | $("#input").data("charToChunk", null); 65 | } 66 | 67 | async function selectInput() { 68 | // Find grammar. 69 | let name = $("#selectgrammar option:selected" ).text(); 70 | let grammars = $("#selectgrammar").data("grammars") 71 | const found_grammar = grammars.find((element) => { return element.name === name; }); 72 | // Find selected input. 73 | name = $("#selectinput option:selected" ).text(); 74 | let select = $("#selectinput").get(0); 75 | let j, L = select.options.length - 1; 76 | let found = false; 77 | for(j = L; j >= 0; j--) { 78 | let option = select.options[j]; 79 | if (option.selected) { 80 | // Set input. 81 | let x = option.value; 82 | let prefix = "https://raw.githubusercontent.com/antlr/grammars-v4/master/"; 83 | let trunc = found_grammar.parser.substring(prefix.length); 84 | // remove parser grammar file name, assume that it's 85 | // the root (which is wrong!). 86 | let last = trunc.lastIndexOf("/"); 87 | let y = trunc.substring(0, last); 88 | let url = prefix + y + "/examples/" + x; 89 | await axios.get(url).then(function (response) { 90 | $("#input").data("session").setValue(response.data); 91 | }); 92 | $("#start").text(found.start); 93 | found = true; 94 | } 95 | } 96 | if ( found ) { 97 | let session = $("#input").data("session"); 98 | session.setAnnotations(null); 99 | removeAllMarkers(session); 100 | let parserSession = $("#grammar").data("parserSession"); 101 | parserSession.setAnnotations(null); 102 | removeAllMarkers(parserSession); 103 | let lexerSession = $("#grammar").data("lexerSession"); 104 | lexerSession.setAnnotations(null); 105 | removeAllMarkers(lexerSession); 106 | $("#input").data("charToChunk", null); 107 | } 108 | } 109 | 110 | function setupInputDropDownForGrammar(grammar) { 111 | let selectInput = $("#selectinput").get(0); 112 | // remove all previous entries in the "input" select control. 113 | let j, L = selectInput.options.length - 1; 114 | for(j = L; j >= 0; j--) { 115 | selectInput.remove(j); 116 | } 117 | selectInput.selectedIndex = 0 118 | if ( grammar==="Sample" ) { 119 | selectInput.options[i] = new Option("sample", SAMPLE_INPUT); 120 | return; 121 | } 122 | let i = 0; 123 | for (const e of grammar.example) { 124 | let opt = new Option(e, e); 125 | selectInput.options[i] = opt; 126 | i++; 127 | } 128 | } 129 | 130 | function loadGrammarIndex(response) { 131 | let grammars = response.data; 132 | grammars.sort(function(a, b) { 133 | let fa = a.name.toLowerCase(), 134 | fb = b.name.toLowerCase(); 135 | if (fa < fb) { 136 | return -1; 137 | } 138 | if (fa > fb) { 139 | return 1; 140 | } 141 | return 0; 142 | }); 143 | // Add default sample first 144 | grammars.unshift({ 145 | name: "Sample", 146 | lexer: "", 147 | parser: "ExprParser.g4", 148 | start: "program", 149 | example: [ 150 | "sample.expr" 151 | ] 152 | }) 153 | $("#selectgrammar").data("grammars", grammars); // save grammar index in dropdown element 154 | let selectGrammar = $("#selectgrammar").get(0); 155 | let i = 0; 156 | // Enter in hardwired "Expr" contained in this code. 157 | for (const g of grammars) { 158 | let opt = new Option(g.name, g.name); 159 | selectGrammar.options[i] = opt; 160 | i++; 161 | } 162 | setupInputDropDownForGrammar(grammars[0]); 163 | } 164 | 165 | async function setupSelectGrammarTable() { 166 | await axios.get(GRAMMAR_INDEX) 167 | .then(loadGrammarIndex) 168 | .catch((error) => { 169 | if( error.response ){ 170 | console.log(error.response.data); // => the response payload 171 | } 172 | }); 173 | } 174 | -------------------------------------------------------------------------------- /test/axios-post.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /test/codejar-example.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 25 | 26 |
    27 | (defun fibonacci (n) 28 | (loop repeat n 29 | for a = 0 then b 30 | and b = 1 then (+ a b) 31 | finally (return b))) 32 | (defun -reverse (list) 33 | (let ((value '())) 34 | (dolist (e list) (push e value)) 35 | value)) 36 |
    37 | 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /test/t.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 31 | 32 | 33 | 34 |

    35 | 63 | 64 | 66 |

    67 |

    68 | 71 |

    72 | 73 |
    74 |
    75 | 76 | 77 |
    78 |
    79 |
    80 | 81 | 82 | 83 | -------------------------------------------------------------------------------- /test/t2.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 |
    15 | Parser  16 | Lexer 17 |
    18 |
    parser grammar ExprParser; 19 | options { tokenVocab=ExprLexer; } 20 | 21 | program 22 | : stat EOF 23 | | def EOF 24 | ; 25 | 26 | stat: ID '=' expr ';' 27 | | expr ';' 28 | ; 29 | 30 | def : ID '(' ID (',' ID)* ')' '{' stat* '}' ; 31 | 32 | expr: ID 33 | | INT 34 | | func 35 | | 'not' expr 36 | | expr 'and' expr 37 | | expr 'or' expr 38 | ; 39 | 40 | func : ID '(' expr (',' expr)* ')' ; 41 |
    42 | 43 |
    44 | lexer grammar ExprLexer; 45 | 46 | AND : 'and' ; 47 | OR : 'or' ; 48 | NOT : 'not' ; 49 | EQ : '=' ; 50 | COMMA : ',' ; 51 | SEMI : ';' ; 52 | LPAREN : '(' ; 53 | RPAREN : ')' ; 54 | LCURLY : '{' ; 55 | RCURLY : '}' ; 56 | 57 | INT : [0-9]+ ; 58 | ID: [a-zA-Z_][a-zA-Z_0-9]* ; 59 | WS: [ \t\n\r\f]+ -> skip ; 60 |
    61 | 62 | 63 | 64 | 65 | 66 | 67 |
    68 | Input 69 |
    xy = 3+foo;
    70 | 71 |
    72 | Start rule
    73 | program 74 | 75 |
    76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 |
    85 | Parse tree 86 |
    87 | 88 |
    Parser profile info
    89 |
    90 |
    91 | Tool console 92 |
    93 | Parser console 94 |
    95 | 96 | 108 | 109 | 110 | 111 | 112 | -------------------------------------------------------------------------------- /test/test-flex.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 28 | 29 | 32 | 33 | 34 | 35 | 36 |
    37 |
    38 | 39 |
    parser grammar ExprParser; 40 | options { tokenVocab=ExprLexer; } 41 | 42 | program 43 | : stat EOF 44 | | def EOF 45 | ; 46 | 47 | stat: ID '=' expr ';' 48 | | expr ';' 49 | ; 50 | 51 | def : ID '(' ID (',' ID)* ')' '{' stat* '}' ; 52 | 53 | expr: ID 54 | | INT 55 | | func 56 | | 'not' expr 57 | | expr 'and' expr 58 | | expr 'or' expr 59 | ; 60 | 61 | func : ID '(' expr (',' expr)* ')' ; 62 |
    63 |
    64 | 65 |
    66 |
    x = f(3,4); 67 |
    68 |
    69 | 70 |
    71 | 72 | 73 | 74 | -------------------------------------------------------------------------------- /test/test-layout.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 39 | 40 | 43 | 44 | 45 | 46 | 47 |
    48 |
    49 | 50 |
    parser grammar ExprParser; 51 | options { tokenVocab=ExprLexer; } 52 | 53 | program 54 | : stat EOF 55 | | def EOF 56 | ; 57 | 58 | stat: ID '=' expr ';' 59 | | expr ';' 60 | ; 61 | 62 | def : ID '(' ID (',' ID)* ')' '{' stat* '}' ; 63 | 64 | expr: ID 65 | | INT 66 | | func 67 | | 'not' expr 68 | | expr 'and' expr 69 | | expr 'or' expr 70 | ; 71 | 72 | func : ID '(' expr (',' expr)* ')' ; 73 |
    74 |
    lexer grammar ExprParser; 75 | options { tokenVocab=ExprLexer; } 76 | 77 | program 78 | : stat EOF 79 | | def EOF 80 | ; 81 | 82 | stat: ID '=' expr ';' 83 | | expr ';' 84 | ; 85 | 86 | def : ID '(' ID (',' ID)* ')' '{' stat* '}' ; 87 | 88 | expr: ID 89 | | INT 90 | | func 91 | | 'not' expr 92 | | expr 'and' expr 93 | | expr 'or' expr 94 | ; 95 | 96 | func : ID '(' expr (',' expr)* ')' ; 97 |
    98 | 99 |
    100 | 101 |
    102 |
    x = f(3,4); 103 |
    104 |
    105 | 106 |
    107 | 108 | 109 | 110 | -------------------------------------------------------------------------------- /test/test-treeview.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 52 | 66 | 67 | 68 | 69 | 70 |

    Tree View

    71 |

    A tree view represents a hierarchical view of information, where each item can have a number of subitems.

    72 |

    Click on the box(es) to open or close the tree branches.

    73 | 74 |
      75 |
    • program 76 |
        77 |
      • stat 78 |
          79 |
        • ID:a
        • 80 |
        • EQ:=
        • 81 |
        • expr 82 |
            83 |
          • INT:3
          • 84 |
          85 |
        • SEMI:;
        • 86 |
        87 |
      • EOF
      • 88 |
      89 |
    90 | 91 | 92 | 93 | 94 | 95 | --------------------------------------------------------------------------------