├── .eslintignore
├── .eslintrc.yml
├── .gitattributes
├── .github
├── pull_request_template.md
└── workflows
│ ├── crawling.yml
│ └── specified_crawling.yml
├── .gitignore
├── .prettierignore
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── intermediate.pem
├── package-lock.json
├── package.json
├── requirements.txt
├── src
├── Parse.py
├── Revise.py
├── debug.ts
├── index.ts
├── log.ts
├── matrix.json
├── serve.py
├── steps
│ ├── descriptions
│ │ ├── attach.ts
│ │ └── parse.ts
│ ├── details.ts
│ ├── download.ts
│ ├── index.ts
│ ├── list.ts
│ ├── parse.ts
│ ├── prereqs
│ │ ├── attach.ts
│ │ ├── grammar
│ │ │ ├── Prerequisites.g4
│ │ │ ├── Prerequisites.interp
│ │ │ ├── Prerequisites.tokens
│ │ │ ├── PrerequisitesLexer.interp
│ │ │ ├── PrerequisitesLexer.tokens
│ │ │ ├── PrerequisitesLexer.ts
│ │ │ ├── PrerequisitesListener.ts
│ │ │ ├── PrerequisitesParser.ts
│ │ │ ├── PrerequisitesVisitor.ts
│ │ │ └── README.md
│ │ └── parse.ts
│ ├── write.ts
│ └── writeIndex.ts
├── types.ts
└── utils.ts
├── tsconfig.json
└── yarn.lock
/.eslintignore:
--------------------------------------------------------------------------------
1 | src/steps/prereqs/grammar/**/*
2 |
--------------------------------------------------------------------------------
/.eslintrc.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Style configuration
3 | extends:
4 | - airbnb-base
5 | - plugin:import/errors
6 | - plugin:import/warnings
7 | - plugin:import/typescript
8 | - prettier
9 | - plugin:@typescript-eslint/recommended
10 |
11 | plugins:
12 | - "@typescript-eslint/eslint-plugin"
13 | - import
14 |
15 | rules:
16 | # Configure required extensions
17 | import/extensions:
18 | - warn
19 | - ts: never
20 |
21 | # Too pedantic / sometimes encourages Node anti-patterns
22 | import/prefer-default-export: off
23 | no-await-in-loop: 0
24 | no-restricted-syntax: 0
25 | class-methods-use-this: 0
26 | no-plusplus:
27 | - error
28 | - allowForLoopAfterthoughts: true
29 |
30 | # Configure no-use-before-define
31 | no-use-before-define: 0
32 | "@typescript-eslint/no-use-before-define":
33 | - error
34 | - typedefs: false
35 | ignoreTypeReferences: true
36 | functions: false
37 | classes: false
38 |
39 | # ESLint Project Settings
40 | settings:
41 | import/parsers:
42 | "@typescript-eslint/parser":
43 | - ".ts"
44 | env:
45 | es6: true
46 | node: true
47 | parser: "@typescript-eslint/parser"
48 | parserOptions:
49 | ecmaVersion: 2020
50 | sourceType: module
51 | ecmaFeatures:
52 | modules: true
53 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Ignore everything in src/steps/prereqs/grammar because it is generated,
2 | # except for Prerequisites.g4 and README.md
3 | src/steps/prereqs/grammar/* linguist-generated
4 | src/steps/prereqs/grammar/Prerequisites.g4 -linguist-generated
5 | src/steps/prereqs/grammar/README.md -linguist-generated
6 |
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 | Resolves #[issue-number]
2 |
--------------------------------------------------------------------------------
/.github/workflows/crawling.yml:
--------------------------------------------------------------------------------
1 | name: Crawling
2 | on:
3 | push:
4 | branches:
5 | - main
6 | schedule:
7 | - cron: "*/30 * * * *"
8 | concurrency:
9 | group: crawling
10 |
11 | jobs:
12 | crawling:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - name: Checkout
16 | uses: actions/checkout@v2
17 | with:
18 | persist-credentials: false
19 |
20 | - name: Checkout data
21 | uses: actions/checkout@v2
22 | with:
23 | persist-credentials: false
24 | ref: gh-pages
25 | path: ./data
26 |
27 | - name: Install
28 | run: yarn install --frozen-lockfile
29 |
30 | - name: Pip
31 | uses: actions/setup-python@v4
32 | with:
33 | python-version: '3.9'
34 | cache: 'pip' # caching pip dependencies
35 |
36 | - name: Pip Install
37 | run: pip install -r requirements.txt
38 |
39 | - name: Crawling
40 | run: yarn start
41 | env:
42 | LOG_FORMAT: json
43 | NUM_TERMS: 2
44 | ALWAYS_SCRAPE_CURRENT_TERM: 1
45 | DETAILS_CONCURRENCY: 256
46 | DATA_FOLDER: ./data
47 | NODE_EXTRA_CA_CERTS: ${{ github.workspace }}/intermediate.pem
48 |
49 | - name: Revision
50 | run: python ./src/Revise.py
51 |
52 | - name: Upload
53 | uses: JamesIves/github-pages-deploy-action@releases/v4
54 | with:
55 | token: ${{ secrets.CRAWLER_DEPLOY_PERSONAL_ACCESS_TOKEN }}
56 | branch: gh-pages
57 | folder: ./data
58 | clean: true
59 | single-commit: true
60 | git-config-name: gt-scheduler-bot
61 | git-config-email: 89671168+gt-scheduler-bot@users.noreply.github.com
62 |
--------------------------------------------------------------------------------
/.github/workflows/specified_crawling.yml:
--------------------------------------------------------------------------------
1 | name: Specified Crawling
2 | on:
3 | workflow_dispatch:
4 | inputs:
5 | term:
6 | description: 'Enter terms to scrape, separated by commas'
7 | type: string
8 | required: true
9 | concurrency:
10 | group: crawling
11 |
12 | jobs:
13 | crawling:
14 | concurrency: ci-${{ github.ref }}
15 | runs-on: ubuntu-latest
16 | steps:
17 | - name: Checkout
18 | uses: actions/checkout@v2
19 | with:
20 | persist-credentials: false
21 |
22 | - name: Checkout data
23 | uses: actions/checkout@v2
24 | with:
25 | persist-credentials: false
26 | ref: gh-pages
27 | path: ./data
28 |
29 | - name: Install
30 | run: yarn install --frozen-lockfile
31 |
32 | - name: Pip
33 | uses: actions/setup-python@v4
34 | with:
35 | python-version: '3.9'
36 | cache: 'pip' # caching pip dependencies
37 |
38 | - name: Pip Install
39 | run: pip install -r requirements.txt
40 |
41 | - name: Crawling
42 | run: yarn start
43 | env:
44 | LOG_FORMAT: json
45 | NUM_TERMS: 1
46 | SPECIFIED_TERM: ${{ inputs.term }}
47 | ALWAYS_SCRAPE_CURRENT_TERM: 0
48 | DETAILS_CONCURRENCY: 256
49 | DATA_FOLDER: ./data
50 | NODE_EXTRA_CA_CERTS: ${{ github.workspace }}/intermediate.pem
51 |
52 | - name: Revision
53 | run: python ./src/Revise.py
54 |
55 | - name: Upload
56 | uses: JamesIves/github-pages-deploy-action@releases/v4
57 | with:
58 | token: ${{ secrets.CRAWLER_DEPLOY_PERSONAL_ACCESS_TOKEN }}
59 | branch: gh-pages
60 | folder: ./data
61 | clean: true
62 | single-commit: true
63 | git-config-name: gt-scheduler-bot
64 | git-config-email: 89671168+gt-scheduler-bot@users.noreply.github.com
65 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules
2 | .idea
3 | data
4 | .DS_Store
5 | .antlr
6 | *.log
7 | .vscode
8 | */__pycache__
9 | venv
--------------------------------------------------------------------------------
/.prettierignore:
--------------------------------------------------------------------------------
1 | src/steps/prereqs/grammar/**/*
2 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | We welcome community contributions to the GT Scheduler project! Please read each of the following steps below to see how you can get started.
4 |
5 | 1. Open or assign yourself to an Issue from the GT Scheduler [website repo](https://github.com/gt-scheduler/website). Note that Issues for the [crawler](https://github.com/gt-scheduler/crawler) and [backend](https://github.com/gt-scheduler/backend) repositories are created on the website repo (and are tagged with the `cross cutting` tag) in order to track all bugs/feature requests in a single place.
6 |
7 | A rough overview of the tagging convention is as follows:
8 |
9 | - An Issue with the `bug` tag is a bug report that should include details on how to reproduce the buggy behavior
10 | - An Issue with the `feature request` tag is a feature request that can be addressed:
11 | - directly with an implementation pull request
12 | - or by using a separate (informal) RFC (request for comments) Issue tagged with `rfc` to facilitate design discussion and lead to an eventual implementation.
13 | - An Issue with the `task` tag is designed to be a well-scoped unit of work made by the project maintainers, usually in the context of [Bits of Good](https://bitsofgood.org/) development. However, if an Issue with the `task` tag also has the `work needed` tag, feel free to assign yourself to it and work on it.
14 |
15 | 1. If you are a maintainer or Bits of Good contributor with write access to the repository, feel create to create a branch directly using the described branching conventions here. For outside contributors, please fork the appropriate code repository that you are working on to your personal account.
16 |
17 | Then, please create a new branch named using the following format:
18 |
19 | ```
20 | [your-first-name]/[issue-#]-[slug]
21 | ```
22 |
23 | For example, if...
24 |
25 | - my first name was `George`
26 | - I assigned myself to Issue `#57`
27 | - Issue #57 is titled `"Add email support"`
28 |
29 | Then I would name my branch:
30 |
31 | ```
32 | george/57-email-support
33 | ```
34 |
35 | 1. Do whatever the Issue asks in your branch (whether that be implementing a new feature or fixing an existing bug)
36 | 1. Make a PR between your branch and the primary branch (`main`) that links to the Issue you want to resolve.
37 | 1. Get at least one other contributor to review your PR and make comments when necessary.
38 | 1. Merge your PR once it has been approved!
39 |
40 | If you have any questions or concerns, feel free to [open a discussion](https://github.com/gt-scheduler/website/discussions) on the GT Scheduler website repository, or for Bits of Good contributors, feel free to ask a question in the #gt-scheduler channel within the Hack4Impact slack.
41 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # GT Schedule Crawler
2 |
3 | > A periodic web crawler to feed course data into [GT Scheduler](https://bitsofgood.org/scheduler).
4 |
5 | Sample: [202302.json](https://gt-scheduler.github.io/crawler-v2/202302.json)
6 |
7 | To report a bug or request a new feature, please [create a new Issue in the GT Scheduler website repository](https://github.com/gt-scheduler/website/issues/new/choose).
8 |
9 | ## 📃 License & Copyright Notice
10 |
11 | This work is a derivative of the original and spectacular [GT Schedule Crawler](https://github.com/64json/gt-schedule-crawler) project created by Jinseo Park (as a part of the overall [GT Scheduler](https://github.com/64json/gt-scheduler) project). The original work and all modifications are licensed under the [AGPL v3.0](https://github.com/64json/gt-scheduler/blob/master/LICENSE) license.
12 |
13 | ### Original Work
14 |
15 | Copyright (c) 2020 Jinseo Park (parkjs814@gmail.com)
16 |
17 | ### Modifications
18 |
19 | Copyright (c) 2020 the Bits of Good "GT Scheduler" team
20 |
21 | ## 🔍 Overview
22 |
23 | The crawler is a command-line application written in [TypeScript](https://www.typescriptlang.org/) (a typed superset of JavaScript) that runs using Node.js to crawl schedule data from [Oscar](https://oscar.gatech.edu/) (Georgia Tech's registration management system).
24 |
25 | It operates as a series of _steps_ that are processed after one another (see [`src/index.ts`](/src/index.ts)) for each current "term" (combination of year and semester, i.e. Fall 2021).
26 |
27 | In order to process the prerequisites data for each course (which comes in the form of a string like "Undergraduate Semester level CS 2340 Minimum Grade of C and Undergraduate Semester level LMC 3432 Minimum Grade of C" that can become much more complex), the crawler also utilizes an [ANTLR](https://www.antlr.org/) grammar and generated parser in order to convert the prerequisites data retrieved from Oscar into a normalized tree structure. The grammar itself and the generated parser/lexer code can be found in the [`src/steps/prereqs/grammar`](/src/steps/prereqs/grammar) folder.
28 |
29 | The crawler is run every 30 minutes using a [GitHub Action workflow](/.github/workflows/crawling.yml), which then publishes the resultant JSON to the `gh-pages` where it can be downloaded by the frontend app: https://gt-scheduler.github.io/crawler/202008.json.
30 |
31 | ## 🚀 Running Locally
32 |
33 | - [Node.js](https://nodejs.org/en/) (any recent version will probably work)
34 | - Installation of the [`yarn` package manager](https://classic.yarnpkg.com/en/docs/install/) **version 1** (support for version 2 is untested)
35 |
36 | ### Running the crawler
37 |
38 | After cloning the repository to your local computer, run the following command in the repo folder:
39 |
40 | ```
41 | yarn install
42 | ```
43 |
44 | This may take a couple minutes and will create a new folder called `node_modules` with all of the dependencies installed within. This only needs to be run once.
45 |
46 | Then, to run the crawler, run:
47 |
48 | ```
49 | yarn start
50 | ```
51 |
52 | If you are using Windows use
53 |
54 | ```
55 | yarn start:windows
56 | ```
57 |
58 | After the crawler runs, a series of JSON files should have been created in a new `data` directory in the project root.
59 |
60 | #### Running with the website
61 |
62 | If you want to run the website with a local instance of the crawler, use
63 |
64 | ```
65 | yarn serve
66 | ```
67 |
68 | Then on the website side, add a `REACT_APP_LOCAL_CRAWLER_URL` env variable to point to the server URL. This will take the `data` folder that the Parser and Reviser create/modify, and serve it- similar to how the actual GitHub Action works. If you run into delayed updates due to the caching behavior, try using an incognito window.
69 |
70 | ### Debugging
71 |
72 | The crawler output can sometimes be difficult to interpret, since much of the information is represented as tuples or indexed values. For this purpose, we have provided a debug script that can be run with
73 |
74 | ```
75 | yarn debug
76 | ```
77 |
78 | This will generate output with additional debug fields and create JSON files in the `data/debug` directory.
79 |
80 | #### Utilizing structured logging
81 |
82 | By default, the crawler outputs standard log lines to the terminal in development. However, it also supports outputting structured JSON log events that can be more easily parsed and analyzed when debugging. This is turned on by default when the crawler is running in a GitHub Action (where the `LOG_FORMAT` environment variable is set to `json`), but it can also be enabled for development.
83 |
84 | The utility script `yarn start-logged` can be used to run the crawler and output JSON log lines to a logfile in the current working directory:
85 |
86 | ```
87 | yarn start-logged
88 | ```
89 |
90 | To analyze the JSON log lines data, I recommend using [`jq`](https://stedolan.github.io/jq/) since it is a powerful tool for parsing/analyzing JSON in the shell. The following command imports all lines in the latest log file and loads them all as one large array for further processing (**note**: this command will probably only work on Unix-like systems (Linux and probably macOS), so your mileage may vary. If you're running into issues, try running it on a Linux computer and make sure you have [`jq` installed](https://stedolan.github.io/jq/)):
91 |
92 | ```sh
93 | cat $(find . -type f -name "*.log" | sort -n | tail -1) | jq -cs '.'
94 | ```
95 |
96 | For some useful queries on the log data, see [📚 Useful queries on crawler logs](https://github.com/gt-scheduler/crawler/wiki/%F0%9F%93%9A-Useful-queries-on-crawler-logs).
97 |
98 | ### Using the Python Finals Data Scraper
99 |
100 | First, ensure [Python 3.9 or newer](https://www.python.org/downloads/) is installed. Then, install the necessary Python modules with the included `requirements.txt` file:
101 |
102 | ```
103 | pip install -r requirements.txt
104 | ```
105 |
106 | Run the reviser to augment the data previously scraped with the new finals data
107 |
108 | ```
109 | python ./src/Revise.py
110 | ```
111 |
112 | The JSON files in the `data` folder will now contain updated information regarding the finals date and time.
113 |
114 | More information can be found [here](https://github.com/gt-scheduler/crawler/wiki/Finals-Scraping#process)
115 |
116 | #### Updating the list of finals PDFs
117 |
118 | The Registrar publishes a PDF with the Finals schedule at the start of each semester.
119 | The page with the PDF for the Fall 2022 semester can be found [here](https://registrar.gatech.edu/info/final-exam-matrix-fall-2022)
120 |
121 | The `matrix.json` file contains a mapping from term to the pdf file.
122 |
The key is one of the terms identified by the scraper [here](https://gt-scheduler.github.io/crawler-v2/index.json).
123 |
The value is the direct address for the PDF file such as [this](https://registrar.gatech.edu/files/202208%20Final%20Exam%20Matrix.pdf)
124 |
125 | This mapping needs to be updated each semester when a new schedule is posted
126 |
127 | More information can be found on the [wiki](https://github.com/gt-scheduler/crawler/wiki/Finals-Scraping)
128 |
129 | ### Linting
130 |
131 | The project uses pre-commit hooks using [Husky](https://typicode.github.io/husky/#/) and [`lint-staged`](https://www.npmjs.com/package/lint-staged) to run linting (via [ESLint](https://eslint.org/)) and formatting (via [Prettier](https://prettier.io/)). These can be run manually from the command line to format/lint the code on-demand, using the following commands:
132 |
133 | - `yarn run lint` - runs ESLint and reports all linting errors without fixing them
134 | - `yarn run lint:fix` - runs ESLint and reports all linting errors, attempting to fix any auto-fixable ones
135 | - `yarn run format` - runs Prettier and automatically formats the entire codebase
136 | - `yarn run format:check` - runs Prettier and reports formatting errors without fixing them
137 |
138 | ## 👩💻 Contributing
139 |
140 | The GT Scheduler project welcomes (and encourages) contributions from the community. Regular development is performed by the project owners (Jason Park and [Bits of Good](https://bitsofgood.org/)), but we still encourage others to work on adding new features or fixing existing bugs and make the registration process better for the Georgia Tech community.
141 |
142 | More information on how to contribute can be found [in the contributing guide](/CONTRIBUTING.md).
143 |
--------------------------------------------------------------------------------
/intermediate.pem:
--------------------------------------------------------------------------------
1 | Certificate:
2 | Data:
3 | Version: 3 (0x2)
4 | Serial Number:
5 | 83:5b:76:15:20:6d:2d:6e:09:7e:0b:6e:40:9f:ef:c0
6 | Signature Algorithm: sha384WithRSAEncryption
7 | Issuer: C = US, ST = New Jersey, L = Jersey City, O = The USERTRUST Network, CN = USERTrust RSA Certification Authority
8 | Validity
9 | Not Before: Nov 16 00:00:00 2022 GMT
10 | Not After : Nov 15 23:59:59 2032 GMT
11 | Subject: C = US, O = Internet2, CN = InCommon RSA Server CA 2
12 | Subject Public Key Info:
13 | Public Key Algorithm: rsaEncryption
14 | RSA Public-Key: (3072 bit)
15 | Modulus:
16 | 00:89:f0:5c:c4:38:ba:d0:34:57:af:97:55:a0:f4:
17 | 22:43:fc:3e:18:11:3a:db:6d:7a:52:21:06:31:d6:
18 | d4:b7:b7:92:88:86:85:8f:f8:99:ff:18:85:a2:9d:
19 | 2b:5a:e1:f8:04:21:49:de:44:af:40:5f:9a:22:11:
20 | 2c:3a:7b:97:47:a9:95:89:2a:54:c7:9d:c7:33:90:
21 | 29:23:31:48:55:b7:78:1a:a6:3a:b6:0c:1a:3f:3b:
22 | bf:5d:12:3f:e0:39:b3:fa:1a:0b:5b:f8:bf:cc:3d:
23 | 7d:89:7b:d2:f7:9a:9f:35:4f:2a:3f:bf:f7:fd:44:
24 | 9f:db:f5:4d:49:43:66:b8:c2:a5:69:18:30:92:8b:
25 | ae:7b:4b:ac:89:d6:0a:ed:5f:16:df:37:be:ad:31:
26 | 6f:59:1d:89:b5:62:8d:4c:89:dc:37:25:83:dc:68:
27 | 55:cb:fe:c6:d3:d3:f0:4c:0b:bb:87:4a:aa:47:24:
28 | e4:11:32:df:fb:3e:c5:5a:d7:3c:73:5d:9f:f9:27:
29 | ef:98:a1:ca:15:5a:8a:a4:d3:ed:80:c9:2b:c2:ac:
30 | 1a:3a:03:8e:0f:84:34:d0:08:a1:55:3f:94:cc:9e:
31 | 8c:9a:13:4f:1a:0f:bf:5d:fd:01:6a:f9:97:28:21:
32 | 83:4e:fe:6e:cd:07:8e:74:3d:f9:a3:f6:70:d7:a5:
33 | 78:0b:82:78:b6:88:f5:58:b6:3b:86:45:61:af:32:
34 | 86:f9:45:89:89:29:fc:1e:fd:dd:51:38:f8:76:49:
35 | de:24:13:50:ad:47:dc:21:f4:c7:57:78:02:b4:ac:
36 | 17:9f:57:97:9a:bc:61:1f:eb:56:bb:d4:55:c2:c0:
37 | de:81:11:b4:b3:6f:0e:31:d5:5e:3b:09:63:66:f6:
38 | 2b:52:34:68:9a:eb:4d:3b:91:b3:ca:7b:de:57:12:
39 | 55:0a:7d:c2:6e:7e:da:73:82:fe:e6:fc:0f:36:0b:
40 | 34:e0:37:4e:00:6c:cd:61:d1:b9:b7:aa:f2:c9:83:
41 | e8:b1:22:c7:d8:1f:2a:0c:dc:f1
42 | Exponent: 65537 (0x10001)
43 | X509v3 extensions:
44 | X509v3 Authority Key Identifier:
45 | keyid:53:79:BF:5A:AA:2B:4A:CF:54:80:E1:D8:9B:C0:9D:F2:B2:03:66:CB
46 |
47 | X509v3 Subject Key Identifier:
48 | EF:4C:00:92:A6:FB:76:2E:5E:95:E2:C9:5F:87:1B:19:D5:4D:E2:D9
49 | X509v3 Key Usage: critical
50 | Digital Signature, Certificate Sign, CRL Sign
51 | X509v3 Basic Constraints: critical
52 | CA:TRUE, pathlen:0
53 | X509v3 Extended Key Usage:
54 | TLS Web Server Authentication, TLS Web Client Authentication
55 | X509v3 Certificate Policies:
56 | Policy: 1.3.6.1.4.1.6449.1.2.2.103
57 | Policy: 2.23.140.1.2.2
58 |
59 | X509v3 CRL Distribution Points:
60 |
61 | Full Name:
62 | URI:http://crl.usertrust.com/USERTrustRSACertificationAuthority.crl
63 |
64 | Authority Information Access:
65 | CA Issuers - URI:http://crt.usertrust.com/USERTrustRSAAAACA.crt
66 | OCSP - URI:http://ocsp.usertrust.com
67 |
68 | Signature Algorithm: sha384WithRSAEncryption
69 | 26:80:0d:34:e4:1e:ae:22:be:af:3e:a6:e2:84:f9:c6:b7:25:
70 | b1:f7:db:2f:a8:75:c2:6a:82:ac:c3:b6:ce:5b:82:c6:a9:06:
71 | cc:11:63:2a:63:99:72:de:97:5d:50:d9:4e:b0:af:24:a5:76:
72 | 52:23:05:10:d9:f0:08:7c:34:eb:3c:e4:0e:8c:28:94:0b:69:
73 | 4f:6a:1f:34:72:1b:ac:36:51:04:f3:47:0c:76:b1:e6:37:d0:
74 | c9:2c:dd:97:48:7b:da:e3:b3:9a:c4:62:58:88:3a:1f:43:c3:
75 | 2f:30:51:32:71:5f:39:98:7f:f0:35:1a:4a:78:24:9a:74:c4:
76 | 88:42:55:1d:60:09:23:97:e4:95:ba:d7:ce:64:c2:27:76:e3:
77 | 66:ec:2e:6d:2f:09:00:40:03:fa:d0:83:1b:cb:a4:8b:59:84:
78 | 2f:54:4b:fa:f7:de:58:2d:5f:f7:18:17:30:78:8c:63:9d:f9:
79 | 7b:36:b0:40:14:94:6c:ae:f2:0a:cb:a2:16:21:92:05:8d:ea:
80 | 1a:b2:a0:57:4e:a6:6a:e5:f3:2b:bb:09:21:95:ee:09:95:41:
81 | ff:6f:8b:05:41:0c:82:a6:fb:6c:cb:0e:8f:e7:85:19:24:f3:
82 | 10:34:05:bd:41:a8:fc:f2:6c:f1:12:49:58:78:cb:9a:d9:e5:
83 | bc:c1:e0:ba:36:60:dd:3a:d4:75:7d:f8:70:e7:9c:80:c1:7d:
84 | f3:48:89:c0:02:76:fe:09:1b:21:9f:a5:b4:ba:c6:c8:b7:50:
85 | 23:75:e7:2a:5a:1b:8d:cf:26:a4:34:52:70:50:0e:e4:7a:d2:
86 | 2a:35:02:97:92:36:46:21:91:a1:d0:f5:39:3f:d0:2e:00:f8:
87 | 43:37:31:6f:ca:16:e5:39:dd:e1:cb:56:55:fd:b2:cd:62:1b:
88 | 60:09:7d:59:2d:69:9d:a5:fd:26:d8:ee:9c:bc:25:46:0c:90:
89 | bf:e3:a9:90:51:8c:d9:03:ea:ca:ec:9a:92:7a:ba:d5:0c:98:
90 | 09:6d:ee:6d:7e:71:35:fc:eb:f5:44:05:ce:43:a7:d5:5f:b8:
91 | 3e:a1:35:b3:4a:0d:28:3b:63:1c:84:55:a0:6a:04:4b:4d:e5:
92 | da:69:8f:8c:52:88:2a:ec:e8:bc:4b:1e:73:68:de:b1:bc:54:
93 | 94:5f:35:54:1d:80:56:cc:6f:b7:4e:20:1a:24:92:5c:df:99:
94 | 4e:bd:95:2d:24:83:2c:f6:99:93:09:99:6d:86:fe:18:44:75:
95 | d7:49:58:78:77:15:c2:e2:d8:c6:9e:62:23:95:44:5a:cb:1e:
96 | d2:6f:5c:47:5f:d9:a1:1a:67:42:ce:6f:65:e8:df:33:ba:04:
97 | 9b:e3:5e:57:6f:db:0a:0d
98 | -----BEGIN CERTIFICATE-----
99 | MIIGSjCCBDKgAwIBAgIRAINbdhUgbS1uCX4LbkCf78AwDQYJKoZIhvcNAQEMBQAw
100 | gYgxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpOZXcgSmVyc2V5MRQwEgYDVQQHEwtK
101 | ZXJzZXkgQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMS4wLAYD
102 | VQQDEyVVU0VSVHJ1c3QgUlNBIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTIy
103 | MTExNjAwMDAwMFoXDTMyMTExNTIzNTk1OVowRDELMAkGA1UEBhMCVVMxEjAQBgNV
104 | BAoTCUludGVybmV0MjEhMB8GA1UEAxMYSW5Db21tb24gUlNBIFNlcnZlciBDQSAy
105 | MIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAifBcxDi60DRXr5dVoPQi
106 | Q/w+GBE62216UiEGMdbUt7eSiIaFj/iZ/xiFop0rWuH4BCFJ3kSvQF+aIhEsOnuX
107 | R6mViSpUx53HM5ApIzFIVbd4GqY6tgwaPzu/XRI/4Dmz+hoLW/i/zD19iXvS95qf
108 | NU8qP7/3/USf2/VNSUNmuMKlaRgwkouue0usidYK7V8W3ze+rTFvWR2JtWKNTInc
109 | NyWD3GhVy/7G09PwTAu7h0qqRyTkETLf+z7FWtc8c12f+SfvmKHKFVqKpNPtgMkr
110 | wqwaOgOOD4Q00AihVT+UzJ6MmhNPGg+/Xf0BavmXKCGDTv5uzQeOdD35o/Zw16V4
111 | C4J4toj1WLY7hkVhrzKG+UWJiSn8Hv3dUTj4dkneJBNQrUfcIfTHV3gCtKwXn1eX
112 | mrxhH+tWu9RVwsDegRG0s28OMdVeOwljZvYrUjRomutNO5GzynveVxJVCn3Cbn7a
113 | c4L+5vwPNgs04DdOAGzNYdG5t6ryyYPosSLH2B8qDNzxAgMBAAGjggFwMIIBbDAf
114 | BgNVHSMEGDAWgBRTeb9aqitKz1SA4dibwJ3ysgNmyzAdBgNVHQ4EFgQU70wAkqb7
115 | di5eleLJX4cbGdVN4tkwDgYDVR0PAQH/BAQDAgGGMBIGA1UdEwEB/wQIMAYBAf8C
116 | AQAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMCIGA1UdIAQbMBkwDQYL
117 | KwYBBAGyMQECAmcwCAYGZ4EMAQICMFAGA1UdHwRJMEcwRaBDoEGGP2h0dHA6Ly9j
118 | cmwudXNlcnRydXN0LmNvbS9VU0VSVHJ1c3RSU0FDZXJ0aWZpY2F0aW9uQXV0aG9y
119 | aXR5LmNybDBxBggrBgEFBQcBAQRlMGMwOgYIKwYBBQUHMAKGLmh0dHA6Ly9jcnQu
120 | dXNlcnRydXN0LmNvbS9VU0VSVHJ1c3RSU0FBQUFDQS5jcnQwJQYIKwYBBQUHMAGG
121 | GWh0dHA6Ly9vY3NwLnVzZXJ0cnVzdC5jb20wDQYJKoZIhvcNAQEMBQADggIBACaA
122 | DTTkHq4ivq8+puKE+ca3JbH32y+odcJqgqzDts5bgsapBswRYypjmXLel11Q2U6w
123 | rySldlIjBRDZ8Ah8NOs85A6MKJQLaU9qHzRyG6w2UQTzRwx2seY30Mks3ZdIe9rj
124 | s5rEYliIOh9Dwy8wUTJxXzmYf/A1Gkp4JJp0xIhCVR1gCSOX5JW6185kwid242bs
125 | Lm0vCQBAA/rQgxvLpItZhC9US/r33lgtX/cYFzB4jGOd+Xs2sEAUlGyu8grLohYh
126 | kgWN6hqyoFdOpmrl8yu7CSGV7gmVQf9viwVBDIKm+2zLDo/nhRkk8xA0Bb1BqPzy
127 | bPESSVh4y5rZ5bzB4Lo2YN061HV9+HDnnIDBffNIicACdv4JGyGfpbS6xsi3UCN1
128 | 5ypaG43PJqQ0UnBQDuR60io1ApeSNkYhkaHQ9Tk/0C4A+EM3MW/KFuU53eHLVlX9
129 | ss1iG2AJfVktaZ2l/SbY7py8JUYMkL/jqZBRjNkD6srsmpJ6utUMmAlt7m1+cTX8
130 | 6/VEBc5Dp9VfuD6hNbNKDSg7YxyEVaBqBEtN5dppj4xSiCrs6LxLHnNo3rG8VJRf
131 | NVQdgFbMb7dOIBokklzfmU69lS0kgyz2mZMJmW2G/hhEdddJWHh3FcLi2MaeYiOV
132 | RFrLHtJvXEdf2aEaZ0LOb2Xo3zO6BJvjXldv2woN
133 | -----END CERTIFICATE-----
134 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "gt-schedule-crawler",
3 | "version": "1.0.0",
4 | "scripts": {
5 | "start": "export NODE_EXTRA_CA_CERTS=\"$PWD/intermediate.pem\" && ts-node src",
6 | "start-logged": "export NODE_EXTRA_CA_CERTS=\"$PWD/intermediate.pem\" && logfile=\"$(date --iso-8601=seconds).log\"; echo \"Logging to '$logfile'\"; LOG_FORMAT=json ts-node src > \"$logfile\" 2>&1; echo 'Done'",
7 | "start:windows": "set NODE_EXTRA_CA_CERTS=%cd%/intermediate.pem && ts-node src",
8 | "start-logged:windows": "set NODE_EXTRA_CA_CERTS=%cd%/intermediate.pem && logfile=\"$(date --iso-8601=seconds).log\"; echo \"Logging to '$logfile'\"; LOG_FORMAT=json ts-node src > \"$logfile\" 2>&1; echo 'Done'",
9 | "gen-parser": "antlr4ts -visitor src/steps/prereqs/grammar/Prerequisites.g4",
10 | "typecheck": "tsc --noEmit",
11 | "lint": "eslint \"src/**/*.ts\" --ignore-pattern \"src/steps/prereqs/grammar/**/*\"",
12 | "lint:fix": "eslint \"src/**/*.ts\" --ignore-pattern \"src/steps/prereqs/grammar/**/*\" --fix",
13 | "format": "prettier \"src/**/*.{ts,md,json}\" \"*.{ts,md,json}\" --write",
14 | "format:check": "prettier \"src/**/*.{ts,md,json}\" \"*.{ts,md,json}\" --check",
15 | "debug": "ts-node src/debug.ts",
16 | "serve": "cd src && python serve.py"
17 | },
18 | "dependencies": {
19 | "antlr4ts": "^0.5.0-alpha.3",
20 | "axios": "^1.3.4",
21 | "cheerio": "^1.0.0-rc.12",
22 | "exponential-backoff": "^3.1.0",
23 | "fast-safe-stringify": "^2.0.8",
24 | "fs": "^0.0.1-security",
25 | "lodash": "^4.17.21",
26 | "tiny-async-pool": "^1.2.0"
27 | },
28 | "devDependencies": {
29 | "@types/lodash": "^4.14.191",
30 | "@types/node": "^14.0.24",
31 | "@types/tiny-async-pool": "^1.0.0",
32 | "@types/tough-cookie": "^4.0.2",
33 | "@typescript-eslint/eslint-plugin": "^4.23.0",
34 | "@typescript-eslint/parser": "^4.23.0",
35 | "antlr4ts-cli": "^0.5.0-alpha.3",
36 | "eslint": "^7.26.0",
37 | "eslint-config-airbnb-base": "^14.2.1",
38 | "eslint-config-prettier": "^8.3.0",
39 | "eslint-plugin-import": "^2.23.2",
40 | "husky": "^4.0.0",
41 | "lint-staged": "^11.0.0",
42 | "prettier": "^2.3.0",
43 | "ts-node": "^8.10.2",
44 | "typescript": "^4"
45 | },
46 | "husky": {
47 | "hooks": {
48 | "pre-commit": "lint-staged"
49 | }
50 | },
51 | "lint-staged": {
52 | "src/**/*.{js,ts}": [
53 | "eslint"
54 | ],
55 | "src/**/*.{js,ts,md,json}": [
56 | "prettier --write"
57 | ],
58 | "*.{js,ts,md,json}": [
59 | "prettier --write"
60 | ]
61 | }
62 | }
63 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | Bottleneck==1.3.5
2 | certifi==2022.6.15
3 | distro==1.5.0
4 | mkl-fft==1.3.6
5 | mkl-random==1.2.2
6 | mkl-service==2.4.0
7 | numexpr==2.8.3
8 | numpy
9 | packaging==21.3
10 | pandas==1.4.3
11 | pip==22.1.2
12 | pyparsing==3.0.9
13 | python-dateutil==2.8.2
14 | pytz==2022.1
15 | setuptools==63.4.1
16 | six==1.16.0
17 | tabula-py==2.3.0
18 | wheel==0.37.1
19 | PyPDF2==3.0.1
20 | requests
21 |
--------------------------------------------------------------------------------
/src/Parse.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 | import tabula
4 | import re
5 | import numpy as np
6 | import pandas as pd
7 | from datetime import datetime
8 | import json
9 | from pathlib import Path
10 | from typing import *
11 | import PyPDF2
12 | import requests
13 | import os
14 |
15 | # More documentation available: https://github.com/gt-scheduler/crawler/wiki/Finals-Scraping#revise
16 |
17 | MATRIX_FILE_PATH = Path("./src/matrix.json").resolve().absolute()
18 |
19 | # RegEx for date (i.e. "Monday, Dec 12")
20 | dateSearch = re.compile(r"\w+,\s\w+\s\d+")
21 | # RegEx for time range (i.e. "8:00 AM - 10:50 AM")
22 | timeSearch = re.compile(r"\d+:\d\d\s[PA]M\s*(‐|-)\s*\d+:\d\d [PA]M")
23 | # RegEx for title (i.e. "8:00 AM - 10:50 AM Exams")
24 | titleSearch = re.compile(r"\d+:\d\d [AP]M\s+(‐|-)\s+\d+:\d\d\s[AP]M\sExams")
25 |
26 | class Parser:
27 | def __init__(self):
28 | self.dateFormat = "%b %d, %Y"
29 | self.schedule = pd.DataFrame()
30 | self.read = None
31 | self.common = pd.DataFrame()
32 |
33 | def cropPdf(self, input_path, output_path, left=16.4 * 72, bottom=0 * 72, right=1 * 72, top=11 * 72):
34 | """
35 | Crop a PDF file using PyPDF2 by adjusting the visible bounding box.
36 |
37 | Some finals PDFs contain headers, footers, or extraneous whitespace that confuse
38 | downstream parsing logic. Cropping reduces errors and noise in the extracted data.
39 | """
40 | with open(input_path, 'rb') as file:
41 | reader = PyPDF2.PdfReader(file)
42 | writer = PyPDF2.PdfWriter()
43 |
44 | for page_num in range(len(reader.pages)):
45 | page = reader.pages[page_num]
46 | page.mediabox.lower_left = (left, bottom)
47 | page.mediabox.upper_right = (right, top)
48 | writer.add_page(page)
49 |
50 | with open(output_path, 'wb') as output_file:
51 | writer.write(output_file)
52 |
53 | def convertTimeGroup(self, time_group: str) -> str:
54 | """
55 | Converts a time group to a 24-hour format
56 | eg: "10:20am - 2:50pm" -> "1020 - 1450"
57 | """
58 | matching_groups = re.findall(r"((\d{1,2}):(\d{2}) (a|p)m)", time_group)
59 | if matching_groups == None or len(matching_groups) != 2: return "TBA"
60 | converted_times = []
61 | for time in matching_groups:
62 | if len(time) != 4: return "TBA"
63 | [_, hour, minute, ampm] = time
64 | new_hour = str(int(hour) % 12 + (12 if ampm == 'p' else 0))
65 | new_hour = new_hour if len(new_hour) == 2 else f"0{new_hour}"
66 | converted_times.append(f"{new_hour}{minute}")
67 | return " - ".join(converted_times)
68 |
69 | def setFirstRowAsHeader(self, df: pd.DataFrame) -> pd.DataFrame:
70 | """
71 | Take the first row of a DataFrame as column names and
72 | return the DataFrame with remaining rows as data.
73 | """
74 | df.columns = df.iloc[0]
75 | df = df.drop(df.index[0])
76 | df = df.reset_index(drop=True)
77 |
78 | return df
79 |
80 | def parseBlock(self, block, version):
81 | """
82 | A block is a chunk of PDF-extracted text representing a single exam time slot,
83 | typically starting with " - Exams" followed by rows of class schedules
84 | with days, class times, and optional course dates (may include line breaks or NaNs).
85 |
86 | e.g.
87 |
88 | 2:40 PM - 5:30 PM Exams Unnamed: 0
89 | 0 Days Class Start Time
90 | 1 F2:00 PM3:55 PM Monday, Apr 28\r2:40 PM - 5:30 PM
91 | 2 F2:00 PM4:45 PM NaN
92 | 3 F8:00 AM10:45 AM Thursday, May 1\r2:40 PM - 5:30 PM
93 | 4 F8:25 AM10:20 AM NaN
94 | 5 MTWR2:00 PM2:50 PM Friday, Apr 25\r2:40 PM - 5:30 PM
95 | 6 MWF\r2:00 PM2:50 PM NaN
96 | 7 MW2:00 PM2:50 PM NaN
97 | ...
98 | """
99 |
100 | def date (n: re.Match):
101 | nonlocal sectionDate
102 | raw_date = n.group()
103 | formats = [
104 | "%m/%d", # numeric month/day, e.g., "12/09"
105 | "%m-%d", # numeric with dash, e.g., "12-09"
106 | "%A, %b %d", # abbreviated month name, e.g., "Tuesday, Dec 9"
107 | "%A, %B %d" # full month name, e.g., "Tuesday, December 9"
108 | ]
109 |
110 | for fmt in formats:
111 | try:
112 | parsed_date = datetime.strptime(raw_date, fmt)
113 | parsed_date = parsed_date.replace(year=self.year)
114 | sectionDate = parsed_date.strftime(self.dateFormat)
115 | return
116 | except ValueError:
117 | continue
118 |
119 | # no formats matched
120 | print(f"Warning: Could not parse date '{raw_date}'")
121 | sectionDate = None
122 |
123 | def time (n: re.Match):
124 | nonlocal sectionTime
125 | if not sectionTime:
126 | group = n.group().lower()
127 | sectionTime = self.convertTimeGroup(group)
128 |
129 | if version == 1:
130 | block.columns = ["Days", "Time"]
131 | # Tabula combines the Start Time, End Time, and Exam Date/Time columns
132 | # Requires regex to split them apart
133 | sectionDate = ""
134 | sectionTime = ""
135 | hyphen = re.compile(r"(?<=[ap]m)\s(?=\d)")
136 |
137 | # Each row represents a class meeting with 'Days' (e.g., "MW", "TRF") and 'Time' (start and end times,
138 | # sometimes including the exam date or extra time ranges), e.g.,
139 | # Days="MW", Time="6:30 PM 7:20 PM" or Days="T", Time="5:00 PM 6:55 PM Tuesday, Dec 9".
140 | for index, row in block.iterrows():
141 | # Add the finals date/time as a separate column
142 | try:
143 | row[1] = dateSearch.sub(date, row[1])
144 | row[1] = timeSearch.sub(time, row[1])
145 | row[1] = hyphen.sub(" - ", row[1].lower())
146 | row[1] = self.convertTimeGroup(row[1])
147 | except Exception as e:
148 | print(f"Error parsing row: {e}")
149 | pass
150 | block.loc[index, 'finalDate'] = sectionDate
151 | block.loc[index, 'finalTime'] = sectionTime
152 |
153 | # Go back and add the first row's time
154 | block['finalTime'].iloc[0] = block['finalTime'].iloc[1]
155 | return block
156 | elif version == 2:
157 | # check if the title for the block was parsed as a column name or a row
158 | for c in block.columns:
159 | if titleSearch.match(c):
160 | break
161 | else:
162 | block = block.drop(index=[0])
163 | block = block.reset_index(drop=True)
164 |
165 | block = block.drop(index=0)
166 | block = block.reset_index(drop=True)
167 |
168 | block.columns = ["Days", "Time"]
169 |
170 | sectionDate = ""
171 | sectionTime = ""
172 |
173 | def split_schedule(schedule):
174 | # Matches schedule lines like "TR8:00 AM9:15 AM" to capture days, start time, and end time
175 | pattern = r'([A-Za-z]+)\s*\r?\s*(\d{1,2}:\d{2} [AP]M)\s*(\d{1,2}:\d{2} [AP]M)'
176 | matches = re.match(pattern, schedule)
177 | if matches:
178 | days = matches.group(1)
179 | start_time = matches.group(2)
180 | end_time = matches.group(3)
181 | return days, start_time, end_time
182 | else:
183 | return None, None, None
184 |
185 | # split data into the four different columns
186 | for index, row in block.iterrows():
187 | days, start_time, end_time = split_schedule(row[0])
188 | if not pd.isna(row[1]):
189 | row[1] = dateSearch.sub(date, row[1])
190 | row[1] = timeSearch.sub(time, row[1])
191 |
192 | block.loc[index, 'finalDate'] = sectionDate
193 | block.loc[index, 'finalTime'] = sectionTime
194 | block.loc[index, 'Days'] = days
195 | block.loc[index, 'Time'] = self.convertTimeGroup(f"{start_time.lower()} - {end_time.lower()}")
196 |
197 | return block
198 | else:
199 | print("Unknown parser version")
200 |
201 |
202 | def parseCommon(self):
203 | """
204 | Parse the time slots for the common
205 | exams at the bottom of the schedule
206 | """
207 | if self.read is None:
208 | print("File was not foundd")
209 | return None
210 |
211 | df=None
212 | for chunk in self.read:
213 | # Find the chunk with the common exams
214 | if "Common Exams" in chunk.columns: df=chunk.copy()
215 | if df is None: return None
216 |
217 | df = self.setFirstRowAsHeader(df)
218 | df.dropna(axis=1, how='all', inplace=True)
219 |
220 | try:
221 | tempdf = df.copy()
222 | tempdf.columns = ['Course', 'Date']
223 | for index, row in tempdf.iterrows():
224 | # Matches a course name or 'None' followed by a date like "Thurs, Apr 25", capturing (course(s), date).
225 | match = re.match(r'(None|.+?)([A-z]{3,5}, \w{3} \d{1,2})', row[0])
226 |
227 | if match:
228 | tempdf.loc[index, 'Course'] = match.group(1)
229 | tempdf.loc[index, 'Time'] = row[1]
230 | tempdf.loc[index, 'Date'] = match.group(2)
231 | df = tempdf.copy()
232 | except Exception as e:
233 | pass
234 |
235 | def strip_carriage_return(s):
236 | return s.replace('\r', '')
237 | df = df[['Course', 'Date', 'Time']]
238 |
239 | df['Course'] = df['Course'].apply(strip_carriage_return)
240 | df['Date'] = df['Date'].apply(strip_carriage_return)
241 | df['Time'] = df['Time'].apply(strip_carriage_return)
242 |
243 | df['Time'] = df['Time'].str.lower().apply(self.convertTimeGroup)
244 | df = df.loc[df['Course'] != "None"]
245 |
246 | # Change date format from day, month date
247 | # to month date, year
248 | day = re.compile(r"\w+(?=,)")
249 | def convert(val, day):
250 | string = day.sub(lambda match: match.group()[:3],val)
251 | try:
252 | date = datetime.strptime(string, "%a, %b %d").replace(year=self.year).strftime("%b %d, %Y")
253 | except ValueError:
254 | # Full month name was used (e.x. July instead of Jul)
255 | date = datetime.strptime(string, "%a, %B %d").replace(year=self.year).strftime("%b %d, %Y")
256 | return date
257 | df['Date'] = df['Date'].apply(lambda val: convert(val, day))
258 |
259 | # Explode comma separated courses
260 | df['Course'] = df['Course'].map(lambda x: x.split(", "))
261 | df = df.explode(column="Course").reset_index(drop=True)
262 |
263 | # Explode courses combined with /
264 | def splitCourse(string):
265 | course = string.split()[0]
266 | numbers = string.split()[1].split("/")
267 | return ["{} {}".format(course, number) for number in numbers]
268 | df['Course'] = df['Course'].map(splitCourse)
269 | df = df.explode(column="Course").set_index('Course')
270 | df = df.apply(lambda x: x.str.strip()).apply(lambda x: x.str.replace("‐", "-"))
271 | self.common = df
272 |
273 | def export(self, title="Finals Schedule"):
274 | """
275 | Export the data to a CSV file
276 | """
277 | if self.schedule is not None:
278 | self.schedule.to_csv("./data/{}.csv".format(title))
279 | else:
280 | print("Schedule has not been parsed")
281 |
282 | class ParserV1(Parser):
283 | """
284 | Parser class for PDFs from 202308, 202505, and 202508
285 |
286 | ParserV1 handles matrix PDFs where:
287 | - Tabula splits pages into uneven chunks
288 | - Columns may be merged incorrectly
289 | - Schedules have Days/Time columns combined with Exam Date/Time
290 | """
291 | def __init__(self):
292 | super().__init__()
293 |
294 | def parseFile(self, file="202208"):
295 | """
296 | Parse a single file into `self.schedule`, a Pandas DataFrame
297 | Takes a single parameter which is a key in matrix.json
298 | """
299 | self.year = int(file[0:4])
300 |
301 | print(f"Parsing file: {file}")
302 |
303 | # TODO: CHANGE PATH BEFORE COMMITTING
304 | with open(MATRIX_FILE_PATH) as f:
305 | locations = json.load(f)
306 | if file in locations:
307 | url = locations[file] # address for the PDF
308 | else:
309 | print("File was not found")
310 | return None
311 | try:
312 | self.read = tabula.read_pdf(url, pages=1)
313 | except Exception as e:
314 | print(f'Tabula was unable to parse the matrix for : {file}')
315 | print(e)
316 | return None
317 |
318 |
319 | schedule = pd.DataFrame()
320 | sections = set() # Keep track of time blocks already parsed
321 | for chunk in self.read:
322 | # Tabula breaks the file up into separate chunks,
323 | # some containing multiple time slots
324 | columns = self.getColumns(chunk)
325 | for start, end, terminate in columns:
326 | df = chunk.iloc[:terminate, start:end+1]
327 |
328 | # Fix case where tabula breaks the columns incorrectly
329 | if len(df.columns) == 3:
330 | df.iloc[:, 1] = df.iloc[:, 1:].fillna("").agg(" ".join, axis=1).apply(str.strip)
331 | df = df.iloc[:, :-1]
332 |
333 | if df.columns[1] not in sections:
334 | sections.add(df.columns[1])
335 | print("Parsing: {}".format(df.columns[1]))
336 | block = df.drop(index=0).iloc[:, :2].copy()
337 | block.columns = block.iloc[0]
338 | schedule = pd.concat([schedule, self.parseBlock(block, 1)], axis=0, join="outer")
339 | schedule = schedule.apply(lambda x: x.str.strip()).apply(lambda x: x.str.replace("‐", "-"))
340 | schedule.set_index(['Days', 'Time'], inplace=True)
341 | self.schedule = schedule
342 |
343 | def getColumns(self, block: pd.DataFrame) -> List[List[int]]:
344 | """_summary_
345 | Given one block created by tabula, determine which columns to parse
346 | Tabula breaks the page up into chunks, so uneven boxes can result in
347 | weird breaks
348 | Return a list of columns to parse in the format
349 | [start_column, end_column, end_row]
350 | """
351 |
352 | idxs = []
353 | for idx, column in enumerate(block.columns):
354 | if titleSearch.match(column):
355 | if idx == len(block.columns)-1: idxs.append([idx-1, idx])
356 | elif isinstance(block.iloc[0, idx+1], str) and "Exam Date/Time" in block.iloc[0, idx+1]:
357 | # Check if tabula created an extra column
358 | idxs.append([idx-1, idx+1])
359 | else: idxs.append([idx-1, idx])
360 | na = block[block.iloc[:, idxs[-1][0]+1].isna()]
361 | idxs[-1].append(na.index[0] if not na.empty else len(block))
362 | return idxs
363 |
364 |
365 | class ParserV2(Parser):
366 | """
367 | Parser class for PDFs from 202402
368 |
369 | ParserV2 handles PDFs where:
370 | - Coordinates are cropped to exclude header and footer for cleaner extraction
371 | - Tabula may still merge columns, but simpler splitting logic is used
372 | """
373 | def __init__(self):
374 | super().__init__()
375 |
376 | def parseFile(self, file):
377 | """
378 | Parse a single file into `self.schedule`, a Pandas DataFrame
379 | Takes a single parameter which is a key in matrix.json
380 | """
381 | self.year = int(file[0:4])
382 | print(f"Parsing file: {file}")
383 |
384 | with open(MATRIX_FILE_PATH) as f:
385 | locations = json.load(f)
386 | if file in locations:
387 | url = locations[file] # address for the PDF
388 | else:
389 | print("File was not found")
390 | return None
391 | try:
392 | response = requests.get(url)
393 | except Exception as e:
394 | print(f"Unable to download Finals Matrix for: {file}")
395 | print(e)
396 | return None
397 |
398 | with open(f"downloaded_{file}.pdf", 'wb') as f:
399 | f.write(response.content)
400 |
401 | # coordinates in pdf point system
402 | top = 16.4 * 72
403 | left = 0 * 72
404 | bottom = 1 * 72
405 | right = 11 * 72
406 |
407 | self.cropPdf(f"downloaded_{file}.pdf", f"cropped_{file}.pdf", left, bottom, right, top)
408 | self.read = tabula.read_pdf(f"cropped_{file}.pdf", pages=1)
409 | os.unlink(f"downloaded_{file}.pdf")
410 | os.unlink(f"cropped_{file}.pdf")
411 |
412 | schedule = pd.DataFrame()
413 |
414 | for chunk in self.read:
415 | if "Reading and Conflict Periods" in chunk.columns or "Common Exams" in chunk.columns:
416 | continue
417 |
418 | schedule = pd.concat([schedule, self.parseBlock(chunk, 2)], axis=0, join="outer")
419 |
420 | schedule = schedule.apply(lambda x: x.str.strip()).apply(lambda x: x.str.replace("‐", "-"))
421 | schedule.set_index(['Days', 'Time'], inplace=True)
422 | self.schedule = schedule
423 |
424 | class ParserV3(Parser):
425 | """
426 | Parser class for PDFs from 202502
427 |
428 | Similar to ParserV2 but instead crops into left/right halves for easier Tabula extraction
429 | Can be used if Tabula has trouble with the full page (i.e. combines two tables from left and right side into one)
430 | """
431 | def __init__(self):
432 | super().__init__()
433 |
434 | def parseFile(self, file="202208"):
435 | """
436 | Parse a single file into `self.schedule`, a Pandas DataFrame
437 | Takes a single parameter which is a key in matrix.json
438 | """
439 | self.year = int(file[0:4])
440 |
441 | print(f"Parsing file: {file}")
442 |
443 | # Load the matrix.json file to get the URL for the PDF
444 | with open(MATRIX_FILE_PATH) as f:
445 | locations = json.load(f)
446 | if file in locations:
447 | url = locations[file] # address for the PDF
448 | else:
449 | print("File was not found")
450 | return None
451 |
452 | try:
453 | # Download the PDF
454 | response = requests.get(url)
455 | except Exception as e:
456 | print(f"Unable to download Finals Matrix for: {file}")
457 | print(e)
458 | return None
459 |
460 | # Save the downloaded PDF
461 | pdf_path = f"downloaded_{file}.pdf"
462 | with open(pdf_path, 'wb') as f:
463 | f.write(response.content)
464 |
465 | reader = PyPDF2.PdfReader(pdf_path)
466 | page = reader.pages[0]
467 | page_width = float(page.mediabox.width)
468 | page_height = float(page.mediabox.height)
469 |
470 | self.cropPdf(f"downloaded_{file}.pdf", f"cropped_left_{file}.pdf", 0, 0, page_width / 2, page_height)
471 | self.cropPdf(f"downloaded_{file}.pdf", f"cropped_right_{file}.pdf", page_width / 2, 0, page_width, page_height)
472 |
473 | try:
474 | self.read = tabula.read_pdf(f"cropped_left_{file}.pdf", pages=1)
475 | self.read.extend(tabula.read_pdf(f"cropped_right_{file}.pdf", pages=1))
476 | except Exception as e:
477 | print(f"Tabula was unable to parse the half of the matrix for: {file}")
478 | print(e)
479 | return None
480 |
481 | # Clean up the temporary files
482 | os.unlink(pdf_path)
483 | os.unlink(f"cropped_left_{file}.pdf")
484 | os.unlink(f"cropped_right_{file}.pdf")
485 |
486 | # Process the parsed data
487 | schedule = pd.DataFrame()
488 |
489 | for chunk in self.read:
490 |
491 | chunk = chunk.iloc[:, :2]
492 |
493 | if "Reading and Conflict Periods" in chunk.columns or "Common Exams" in chunk.columns:
494 | continue
495 |
496 | schedule = pd.concat([schedule, self.parseBlock(chunk, 2)], axis=0, join="outer")
497 |
498 | schedule = schedule.apply(lambda x: x.str.strip()).apply(lambda x: x.str.replace("‐", "-"))
499 | schedule.set_index(['Days', 'Time'], inplace=True)
500 | self.schedule = schedule
--------------------------------------------------------------------------------
/src/Revise.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | from Parse import ParserV1, ParserV2, ParserV3
5 | import json
6 | from typing import Tuple
7 | import pandas as pd
8 | import numpy as np
9 | from pathlib import Path
10 | import re
11 |
12 | # More documentation available: https://github.com/gt-scheduler/crawler/wiki/Finals-Scraping#revise
13 |
14 | class Section:
15 | cache = None
16 |
17 | def __init__(self, data):
18 | if len(data[1]) == 0: raise LookupError("No Section Information")
19 | info = data[1][0]
20 | periodIdx, days = info[0], info[1]
21 | credits, scheduleTypeIdx = data[2], data[3]
22 |
23 | # Find the period by using the provided periodIdx
24 | # into the periods cache
25 | period = self.cache['periods'][periodIdx]
26 | scheduleType = self.cache['scheduleTypes'][scheduleTypeIdx]
27 |
28 | self.period: str = period
29 | self.days: str = days
30 | self.credits = int(credits)
31 | self.scheduleType = scheduleType
32 | self.obj = data
33 |
34 | def set(self, idx, val):
35 | self.obj[1][0][idx] = val
36 |
37 |
38 | class Revise:
39 |
40 | def __init__(self):
41 | self.iterFiles()
42 |
43 | def iterFiles(self):
44 | failed = set()
45 | parsers = [ParserV1, ParserV2, ParserV3]
46 |
47 | # Attempt to get the finals information for each term
48 | for file in Path("./data/").resolve().absolute().iterdir():
49 | if not re.match(r"\d+\.json", file.name): continue
50 | year = int(file.stem[:4])
51 |
52 | success = False
53 | for Parser in parsers:
54 | try:
55 | parser = Parser()
56 | parser.parseFile(file.stem)
57 | parser.parseCommon()
58 | success = True
59 | break
60 | except Exception as e:
61 | print(f"{Parser.__name__} failed for {file.stem}: {e}")
62 |
63 | if not success:
64 | print(f"All parsers failed for {file.stem}")
65 | failed.add(file.stem)
66 | else:
67 | parser.export(f"{file.stem}_Finals")
68 | self.schedule = parser.schedule
69 | self.common = parser.common
70 | self.file = file
71 | self.process()
72 |
73 | print("Finished all files")
74 | if failed:
75 | print(f"Failed to parse finals for: {', '.join(failed)}")
76 |
77 | def process(self):
78 | """
79 | Revise the scraped JSON for a single term
80 | """
81 |
82 | # Load the current term
83 | with open(self.file) as f:
84 | data = json.load(f)
85 | # Create a list of unique final dates/times
86 | dates = np.sort(np.unique(np.concatenate([self.schedule['finalDate'].unique(), self.common['Date'].unique()]) if not self.schedule.empty else np.array([])))
87 | times = np.unique(np.concatenate([self.schedule['finalTime'].unique(), self.common['Time'].unique()]) if not self.schedule.empty else np.array([]))
88 | data['caches']['finalTimes'] = times.tolist()
89 | data['caches']['finalDates'] = dates.tolist()
90 |
91 | def lookup(days, period) -> pd.Series:
92 | # find the final date/time given class days/period
93 | if not self.schedule.index.isin([(days, period)]).any(): return None
94 | row=self.schedule.loc[days, period]
95 | return row
96 |
97 | vip = re.compile(r"VIP\s\d+")
98 | Section.cache = data['caches']
99 | for course, courseData in data['courses'].items():
100 | # Skip VIP courses
101 | if vip.search(course):
102 | continue
103 | for sectionTitle, sectionData in courseData[1].items():
104 | try:
105 | section = Section(sectionData)
106 | except:
107 | pass
108 | else:
109 | # According to the Registrar's, only lecture courses of at least 2 credit hours,
110 | # have a finals in the Final Exam Matrix.
111 | # https://registrar.gatech.edu/registration/exams
112 | if section.scheduleType != "Lecture*" or section.credits < 2:
113 | continue
114 |
115 | # Check if the course has a common finals time
116 | if course in self.common.index:
117 | row = self.common.loc[course]
118 | dateIdx = int(np.where(dates == row['Date'])[0][0])
119 | timeIdx = int(np.where(times == row['Time'])[0][0])
120 | section.set(6, dateIdx)
121 | section.set(7, timeIdx)
122 | continue
123 |
124 | row = lookup(section.days, section.period)
125 | if row is not None:
126 | dateIdx = int(np.where(dates == row['finalDate'])[0][0])
127 | timeIdx = int(np.where(times == row['finalTime'])[0][0])
128 | section.set(6, dateIdx)
129 | section.set(7, timeIdx)
130 | continue
131 |
132 | with open(self.file, "w") as f:
133 | json.dump(data, f)
134 |
135 |
136 | Revise()
137 |
138 |
139 |
140 |
141 |
--------------------------------------------------------------------------------
/src/debug.ts:
--------------------------------------------------------------------------------
1 | import path from "path";
2 | import fs from "fs/promises";
3 | import { existsSync, mkdirSync } from "fs";
4 | import { dataPath } from "./steps/write";
5 | import { Caches, TermData, Location, Prerequisites } from "./types";
6 |
7 | type MeetingDebug = {
8 | periodIndex: number;
9 | period: string;
10 | days: string;
11 | room: string;
12 | locationIndex: number;
13 | location: Location | null;
14 | instructors: string[];
15 | dateRangeIndex: number;
16 | dateRange: string;
17 | finalDateIndex: number;
18 | finalDate: string | null;
19 | finalTimeIdx: number;
20 | finalTime: string | null;
21 | };
22 |
23 | type SectionDebug = {
24 | crn: string;
25 | meetings: MeetingDebug[];
26 | creditHours: number;
27 | scheduleTypeIndex: number;
28 | scheduleType: string;
29 | campusIndex: number;
30 | campus: string;
31 | attributeIndices: number[];
32 | attributes: string[];
33 | gradeBaseIndex: number;
34 | gradeBase: string | null;
35 | };
36 |
37 | type CourseDebug = {
38 | fullName: string;
39 | sections: Record;
40 | prerequisites: Prerequisites | [];
41 | description: string | null;
42 | };
43 |
44 | type TermDebug = {
45 | courses: Record;
46 | caches: Caches;
47 | updatedAt: string | Date;
48 | version: number;
49 | };
50 |
51 | function safeGet(arr: T[], idx: number): T | null {
52 | if (idx == null || Number.isNaN(idx)) return null;
53 | if (idx < 0 || idx >= arr.length) return null;
54 | return arr[idx];
55 | }
56 |
57 | async function readJsonFile(file: string): Promise {
58 | const buf = await fs.readFile(file, "utf8");
59 | return JSON.parse(buf) as T;
60 | }
61 |
62 | async function writePrettyJson(file: string, data: unknown): Promise {
63 | const content = JSON.stringify(data, null, 2);
64 | await fs.writeFile(file, `${content}\n`, "utf8");
65 | }
66 |
67 | function toDebugTerm(term: TermData): TermDebug {
68 | const { caches } = term;
69 |
70 | const courses: Record = {};
71 |
72 | for (const [courseId, courseTuple] of Object.entries(term.courses)) {
73 | const [fullName, sectionsMap, prerequisites, description] = courseTuple;
74 |
75 | const sections: Record = {};
76 | for (const [sectionId, sectionTuple] of Object.entries(sectionsMap)) {
77 | const [
78 | crn,
79 | meetingsTuples,
80 | creditHours,
81 | scheduleTypeIndex,
82 | campusIndex,
83 | attributeIndices,
84 | gradeBaseIndex,
85 | ] = sectionTuple;
86 |
87 | const scheduleType =
88 | safeGet(caches.scheduleTypes, scheduleTypeIndex) ?? "";
89 | const campus = safeGet(caches.campuses, campusIndex) ?? "";
90 | const attributes = attributeIndices
91 | .map((i) => safeGet(caches.attributes, i))
92 | .filter((x): x is string => x != null);
93 | const gradeBase =
94 | gradeBaseIndex >= 0 ? safeGet(caches.gradeBases, gradeBaseIndex) : null;
95 |
96 | const meetings: MeetingDebug[] = meetingsTuples.map((m) => {
97 | const [
98 | periodIndex,
99 | days,
100 | room,
101 | locationIndex,
102 | instructors,
103 | dateRangeIndex,
104 | finalDateIndex,
105 | finalTimeIdx,
106 | ] = m;
107 |
108 | const period = safeGet(caches.periods, periodIndex) ?? "";
109 | const dateRange = safeGet(caches.dateRanges, dateRangeIndex) ?? "";
110 | const location = safeGet(caches.locations, locationIndex) ?? null;
111 | const finalDate =
112 | finalDateIndex >= 0
113 | ? (safeGet(caches.finalDates, finalDateIndex) as
114 | | string
115 | | Date
116 | | null)
117 | : null;
118 | const finalTime =
119 | finalTimeIdx >= 0 ? safeGet(caches.finalTimes, finalTimeIdx) : null;
120 |
121 | return {
122 | periodIndex,
123 | period,
124 | days,
125 | room,
126 | locationIndex,
127 | location,
128 | instructors,
129 | dateRangeIndex,
130 | dateRange,
131 | finalDateIndex,
132 | finalDate: finalDate != null ? String(finalDate) : null,
133 | finalTimeIdx,
134 | finalTime,
135 | };
136 | });
137 |
138 | sections[sectionId] = {
139 | crn,
140 | meetings,
141 | creditHours,
142 | scheduleTypeIndex,
143 | scheduleType,
144 | campusIndex,
145 | campus,
146 | attributeIndices,
147 | attributes,
148 | gradeBaseIndex,
149 | gradeBase,
150 | };
151 | }
152 |
153 | courses[courseId] = {
154 | fullName,
155 | sections,
156 | prerequisites,
157 | description,
158 | };
159 | }
160 |
161 | return {
162 | courses,
163 | caches: term.caches,
164 | updatedAt: term.updatedAt,
165 | version: term.version,
166 | };
167 | }
168 |
169 | async function ensureDebugDir(debugDir: string): Promise {
170 | if (!existsSync(debugDir)) mkdirSync(debugDir, { recursive: true });
171 | }
172 |
173 | async function listDataFiles(): Promise {
174 | const files = await fs.readdir(dataPath);
175 | // Include only term JSON files like 202408.json, skip index.json and debug dir
176 | const dataFileRegex = /^20\d{4}\.json$/;
177 | return files
178 | .filter((f) => dataFileRegex.test(f))
179 | .map((f) => path.join(dataPath, f));
180 | }
181 |
182 | async function run(): Promise {
183 | const arg = process.argv[2]; // optional: specific file path or file name
184 | const debugDir = path.join(dataPath, "debug");
185 | await ensureDebugDir(debugDir);
186 |
187 | const targets =
188 | arg != null
189 | ? [path.isAbsolute(arg) ? arg : path.join(dataPath, arg)]
190 | : await listDataFiles();
191 |
192 | for (const file of targets) {
193 | const base = path.basename(file);
194 | const out = path.join(debugDir, base);
195 | const term = await readJsonFile(file);
196 | const debugTerm = toDebugTerm(term);
197 | await writePrettyJson(out, debugTerm);
198 | // eslint-disable-next-line no-console
199 | console.log(`Wrote debug: ${out}`);
200 | }
201 | }
202 |
203 | run().catch((err) => {
204 | // eslint-disable-next-line no-console
205 | console.error(err);
206 | process.exit(1);
207 | });
208 |
--------------------------------------------------------------------------------
/src/index.ts:
--------------------------------------------------------------------------------
1 | import asyncPool from "tiny-async-pool";
2 |
3 | import {
4 | download,
5 | list,
6 | parse,
7 | downloadCourseDetails,
8 | attachDescriptions,
9 | attachPrereqs,
10 | write,
11 | parseCourseDescription,
12 | writeIndex,
13 | parseCoursePrereqs,
14 | downloadCoursePrereqDetails,
15 | } from "./steps";
16 | import { Prerequisites } from "./types";
17 | import {
18 | setLogFormat,
19 | isLogFormat,
20 | log,
21 | error,
22 | span,
23 | warn,
24 | getLogFormat,
25 | } from "./log";
26 | import { getIntConfig } from "./utils";
27 |
28 | // Current scraped JSON version
29 | const CURRENT_VERSION = 3;
30 |
31 | // Manually set the list of terms for crawling.
32 | // This will ignore NUM_TERMS
33 | const SPECIFIED_TERMS = process.env.SPECIFIED_TERMS?.split(",").map((term) =>
34 | term.trim()
35 | );
36 |
37 | // Number of terms to scrape (scrapes most recent `NUM_TERMS`)
38 | const NUM_TERMS = SPECIFIED_TERMS
39 | ? SPECIFIED_TERMS.length
40 | : getIntConfig("NUM_TERMS") ?? 2;
41 | // const NUM_TERMS = getIntConfig("NUM_TERMS") ?? 2;
42 |
43 | // Whether to always scrape the current term, even if it's not in the
44 | // most recent `NUM_TERMS` terms.
45 | const ALWAYS_SCRAPE_CURRENT_TERM: boolean =
46 | getIntConfig("ALWAYS_SCRAPE_CURRENT_TERM") === 1;
47 |
48 | // IO Concurrency to download files using.
49 | // This is a completely arbitrary number.
50 | const DETAILS_CONCURRENCY = getIntConfig("DETAILS_CONCURRENCY") ?? 128;
51 |
52 | async function main(): Promise {
53 | const rawLogFormat = process.env.LOG_FORMAT;
54 | if (rawLogFormat != null) {
55 | if (isLogFormat(rawLogFormat)) {
56 | setLogFormat(rawLogFormat);
57 | } else {
58 | warn(`invalid log format provided`, { logFormat: rawLogFormat });
59 | process.exit(1);
60 | }
61 | } else {
62 | setLogFormat("text");
63 | }
64 |
65 | log(`starting crawler`, {
66 | currentVersion: CURRENT_VERSION,
67 | numTerms: NUM_TERMS,
68 | detailsConcurrency: DETAILS_CONCURRENCY,
69 | logFormat: getLogFormat(),
70 | });
71 |
72 | try {
73 | // Create a new top-level span for the entire crawler operation.
74 | // This simply logs when before/after the operation
75 | // so we know how long it took.
76 | await span(`crawling Oscar`, {}, async () => crawl());
77 | process.exit(0);
78 | } catch (err) {
79 | error(`a fatal error occurred while running the crawler`, err);
80 | process.exit(1);
81 | }
82 | }
83 |
84 | async function crawl(): Promise {
85 | const results: [string[], string[]] = await span(
86 | `listing all terms`,
87 | {},
88 | async (setFinishFields) => {
89 | const lists = await list();
90 | const terms = lists[0];
91 | const termInfo = lists[1];
92 |
93 | let toScrape;
94 | // If no term is manually set, scrape the most recent terms
95 | if (SPECIFIED_TERMS) {
96 | if (SPECIFIED_TERMS.some((term) => !terms.includes(term)))
97 | throw new Error("The manually set term is invalid");
98 |
99 | toScrape = SPECIFIED_TERMS;
100 | } else {
101 | const recentTerms = terms.slice(0, NUM_TERMS);
102 | toScrape = recentTerms;
103 | }
104 |
105 | if (ALWAYS_SCRAPE_CURRENT_TERM) {
106 | // Make sure that, in addition to the most-recent terms,
107 | // the 'current' term is also scraped. This is done by
108 | // computing a rough estimate of the current term based on
109 | // the current date.
110 | //
111 | // Motivation: at the beginning of 2023, Oscar had all 3 terms for the
112 | // year (Spring, Summer, Fall) listed (but no courses were in Summer/
113 | // Fall). In the past (to my knowledge), this wasn't the case; terms
114 | // would only appear once the course schedule was released (in the
115 | // middle of the prior semester). The crawler is configured to scrape
116 | // the most recent 2 terms, so to make sure it continues to scrape the
117 | // Spring schedule during the Spring semester, this was added as a
118 | // workaround.
119 |
120 | type TermLabel = "spring" | "summer" | "fall";
121 | const getTermEstimate = (date: Date): TermLabel => {
122 | const month = date.getMonth();
123 | if (month <= 3 /* Until end of April */) {
124 | return "spring";
125 | }
126 | if (month <= 6 /* Until end of July */) {
127 | return "summer";
128 | }
129 | return "fall";
130 | };
131 |
132 | /**
133 | * Reverse of getSemesterName from https://github.com/gt-scheduler/website/blob/main/src/utils/semesters.ts:
134 | */
135 | const termLabelToPossibleTermCodes = (
136 | termString: TermLabel,
137 | year: number
138 | ): string[] => {
139 | switch (termString) {
140 | case "spring":
141 | return [`${year}02`, `${year}03`];
142 | case "summer":
143 | return [`${year}05`, `${year}06`];
144 | case "fall":
145 | return [`${year}08`, `${year}09`];
146 | default:
147 | throw new Error(`invalid term string: ${termString}`);
148 | }
149 | };
150 |
151 | const now = new Date();
152 | const currentTermEstimate = getTermEstimate(now);
153 | const possibleTermCodes = termLabelToPossibleTermCodes(
154 | currentTermEstimate,
155 | now.getFullYear()
156 | );
157 | const matchingTerms = terms.filter((term) =>
158 | possibleTermCodes.includes(term)
159 | );
160 | if (matchingTerms.length === 0) {
161 | warn(`no terms match the current term estimate`, {
162 | currentTermEstimate,
163 | possibleTermCodesFromEstimate: possibleTermCodes,
164 | actualTermCodes: terms,
165 | });
166 | } else {
167 | const [matchingTerm] = matchingTerms;
168 | const alreadyInToScrape = toScrape.includes(matchingTerm);
169 | if (!alreadyInToScrape) {
170 | toScrape = [matchingTerm, ...toScrape];
171 | }
172 | setFinishFields({
173 | addedCurrentTerm: !alreadyInToScrape,
174 | currentTerm: matchingTerm,
175 | });
176 | }
177 | }
178 |
179 | setFinishFields({
180 | terms,
181 | termsToScrape: toScrape,
182 | desiredNumTerms: NUM_TERMS,
183 | });
184 | return [toScrape, termInfo];
185 | }
186 | );
187 | const termsToScrape: string[] = results[0];
188 | const termInfo = results[1];
189 |
190 | // Scrape each term in parallel
191 | await Promise.all(
192 | termsToScrape.map(async (term) => {
193 | // Set the base fields that are added to every span
194 | const termSpanFields: Record = {
195 | term,
196 | version: CURRENT_VERSION,
197 | };
198 |
199 | await span(`crawling term`, termSpanFields, () =>
200 | crawlTerm(term, termSpanFields)
201 | );
202 | })
203 | );
204 |
205 | // Output a JSON file containing all of the scraped term files
206 | await writeIndex(termInfo);
207 | }
208 |
209 | async function crawlTerm(
210 | term: string,
211 | baseSpanFields: Record
212 | ): Promise {
213 | // Alias the parameter so we can modify it
214 | let spanFields = baseSpanFields;
215 |
216 | // Download the term JSON containing every course.
217 | const sections = await span(`downloading term`, spanFields, () =>
218 | download(term, DETAILS_CONCURRENCY)
219 | );
220 |
221 | const termData = await span(`parsing term data to JSON`, spanFields, () =>
222 | parse(sections, CURRENT_VERSION)
223 | );
224 |
225 | const allCourseIds = Object.keys(termData.courses);
226 | const courseIdCount = allCourseIds.length;
227 | spanFields = { ...spanFields, courseIdCount };
228 | log(`collected all course ids`, { allCourseIds, ...spanFields });
229 |
230 | const allPrereqs: Record = {};
231 | const allDescriptions: Record = {};
232 | await span(
233 | `downloading & parsing prerequisite info & course descriptions`,
234 | { ...spanFields, concurrency: DETAILS_CONCURRENCY },
235 | async () =>
236 | asyncPool(DETAILS_CONCURRENCY, allCourseIds, async (courseId) => {
237 | const [coursePrereqs, courseDescription] = await span(
238 | `crawling individual course`,
239 | {
240 | ...spanFields,
241 | courseId,
242 | },
243 | async (setCompletionFields) => {
244 | const [htmlLength, prereqs, description] = await crawlCourseDetails(
245 | term,
246 | courseId
247 | );
248 | setCompletionFields({
249 | htmlLength,
250 | hasDescription: description != null,
251 | });
252 | return [prereqs, description];
253 | }
254 | );
255 |
256 | allPrereqs[courseId] = coursePrereqs;
257 | allDescriptions[courseId] = courseDescription;
258 | })
259 | );
260 |
261 | await span(`attaching prereq information`, spanFields, () =>
262 | attachPrereqs(termData, allPrereqs)
263 | );
264 |
265 | await span(`attaching course descriptions`, spanFields, () =>
266 | attachDescriptions(termData, allDescriptions)
267 | );
268 |
269 | await span(`writing scraped data to disk`, spanFields, () =>
270 | write(term, termData)
271 | );
272 | }
273 |
274 | async function crawlCourseDetails(
275 | term: string,
276 | courseId: string
277 | ): Promise<
278 | [htmlLength: number, prereqs: Prerequisites | [], descriptions: string | null]
279 | > {
280 | const detailsHtml = await downloadCourseDetails(term, courseId);
281 | const description = await parseCourseDescription(detailsHtml, courseId);
282 | const prereqHtml = await downloadCoursePrereqDetails(term, courseId);
283 | const prereqs = await parseCoursePrereqs(prereqHtml, courseId);
284 | return [detailsHtml.length, prereqs, description];
285 | }
286 |
287 | main();
288 |
--------------------------------------------------------------------------------
/src/log.ts:
--------------------------------------------------------------------------------
1 | import process from "process";
2 | import safeStringify from "fast-safe-stringify";
3 |
4 | export type LogFormat = "json" | "text";
5 |
6 | let logFormat: LogFormat = "text";
7 |
8 | /**
9 | * Custom type guard for determining if a string is a valid log format
10 | */
11 | export function isLogFormat(rawLogFormat: string): rawLogFormat is LogFormat {
12 | switch (rawLogFormat) {
13 | case "json":
14 | case "text":
15 | return true;
16 | default:
17 | return false;
18 | }
19 | }
20 |
21 | /**
22 | * Sets the global log format used for any future calls to `log`,
23 | * its derivatives, or any Span functions
24 | */
25 | export function setLogFormat(newLogFormat: LogFormat): void {
26 | logFormat = newLogFormat;
27 | }
28 |
29 | /**
30 | * Gets the current global log format
31 | */
32 | export function getLogFormat(): LogFormat {
33 | return logFormat;
34 | }
35 |
36 | /**
37 | * Base logging function that logs at level="info"
38 | * @param message - static message, used for grepping logs
39 | * @param fields - structured fields (should be string-serializable)
40 | */
41 | export function log(
42 | message: string,
43 | fields: Record = {}
44 | ): void {
45 | if (logFormat === "json") {
46 | // eslint-disable-next-line no-console
47 | console.log(
48 | safeStringify({
49 | ts: new Date().toISOString(),
50 | message,
51 | level: "info",
52 | ...fields,
53 | })
54 | );
55 | } else {
56 | const fieldsAsStrings: Record = {
57 | level: "info",
58 | };
59 | Object.entries(fields).forEach(([key, value]) => {
60 | if (typeof value === "object") {
61 | fieldsAsStrings[key] = safeStringify(value);
62 | } else {
63 | fieldsAsStrings[key] = String(value);
64 | }
65 | });
66 |
67 | // eslint-disable-next-line no-console
68 | console.log(
69 | [
70 | `[${new Date().toISOString()}]`,
71 | message,
72 | ...Object.entries(fieldsAsStrings).map(
73 | ([key, value]) => `${key}='${value}'`
74 | ),
75 | ].join(" ")
76 | );
77 | }
78 | }
79 |
80 | /**
81 | * Base logging function that logs at level="info"
82 | * @param message - static message, used for grepping logs
83 | * @param fields - structured fields (should be string-serializable)
84 | */
85 | export function info(
86 | message: string,
87 | fields: Record = {}
88 | ): void {
89 | log(message, fields);
90 | }
91 |
92 | /**
93 | * Base logging function that logs at level="warn"
94 | * @param message - static message, used for grepping logs
95 | * @param fields - structured fields (should be string-serializable)
96 | */
97 | export function warn(
98 | message: string,
99 | fields: Record = {}
100 | ): void {
101 | log(message, { level: "warn", ...fields });
102 | }
103 |
104 | /**
105 | * Performs a best-effort serialization of the error into structured fields
106 | * @param err - the raw error object or unknown
107 | * @param includeStack - whether to include the stacktrace in "stack"
108 | * @returns a structured log fields record
109 | */
110 | export function errorFields(
111 | err: unknown,
112 | includeStack = false
113 | ): Record {
114 | const { message: errorMessage, stack } = err as {
115 | message?: string;
116 | stack?: string;
117 | };
118 |
119 | // Perform a best-effort serialization of the error
120 | let errorAsString = String(err);
121 | if (errorAsString === "[object Object]") {
122 | errorAsString = safeStringify(err);
123 | }
124 |
125 | const fields: Record = {
126 | error: errorAsString,
127 | errorMessage,
128 | };
129 |
130 | if (includeStack) {
131 | fields.stack = stack;
132 | }
133 |
134 | return fields;
135 | }
136 |
137 | /**
138 | * Base logging function that logs at level="error",
139 | * including explicit error-related fields.
140 | * @param message - static message, used for grepping logs
141 | * @param err - error object or null/undefined
142 | * @param fields - structured fields (should be string-serializable)
143 | */
144 | export function error(
145 | message: string,
146 | err: unknown,
147 | fields: Record = {}
148 | ): void {
149 | log(message, {
150 | level: "error",
151 | ...errorFields(err, true),
152 | ...fields,
153 | });
154 | }
155 |
156 | /**
157 | * Creates a new span with the given base message, starting it immediately
158 | * @param baseMessage - static message, used for grepping logs
159 | * @param fields - structured fields (should be string-serializable)
160 | * @returns a new `Span` instance
161 | */
162 | export function startSpan(
163 | baseMessage: string,
164 | fields: Record = {}
165 | ): Span {
166 | const currentSpan = new Span(baseMessage, fields);
167 | currentSpan.start();
168 | return currentSpan;
169 | }
170 |
171 | /**
172 | * Runs an entire operation in a span
173 | * @param baseMessage - static message, used for grepping logs
174 | * @param fields - structured fields (should be string-serializable)
175 | * @param execute - async callback. Optional function `setCompletionFields`
176 | * passed in as parameter allows callback to set additional fields
177 | * for the span finish event.
178 | * @returns
179 | */
180 | export async function span(
181 | baseMessage: string,
182 | fields: Record,
183 | execute: (
184 | setCompletionFields: (additionalFields: Record) => void
185 | ) => Promise | R
186 | ): Promise {
187 | // Allow the callback to register additional fields upon completion
188 | let completionFields: Record = {};
189 | const setCompletionFields = (additionalFields: Record) => {
190 | completionFields = { ...completionFields, ...additionalFields };
191 | };
192 |
193 | // Run the operation in a new span
194 | const currentSpan = new Span(baseMessage, fields);
195 | currentSpan.start();
196 | try {
197 | const resultOrPromise = execute(setCompletionFields);
198 | let result: R;
199 | if (resultOrPromise instanceof Promise) {
200 | result = await resultOrPromise;
201 | } else {
202 | result = resultOrPromise;
203 | }
204 |
205 | currentSpan.finish(completionFields);
206 |
207 | return result;
208 | } catch (err) {
209 | currentSpan.error(err, completionFields);
210 | throw err;
211 | }
212 | }
213 |
214 | /**
215 | * Represents a span operation that includes timing information
216 | * and structured logging
217 | */
218 | export class Span {
219 | baseMessage: string;
220 |
221 | fields: Record;
222 |
223 | startTime: [number, number] | null;
224 |
225 | /**
226 | * Creates a new span without starting it
227 | * @param baseMessage - static message, used for grepping logs
228 | * @param fields - structured fields (should be string-serializable)
229 | */
230 | constructor(baseMessage: string, fields: Record = {}) {
231 | this.baseMessage = baseMessage;
232 | this.fields = fields;
233 | this.startTime = null;
234 | }
235 |
236 | /**
237 | * Emits a span event as a log line
238 | * @param event - the type of the span event
239 | * @param additionalFields - additional structured fields
240 | * (should be string-serializable)
241 | */
242 | spanEvent(
243 | event: "start" | "finish" | "error",
244 | additionalFields: Record = {}
245 | ): void {
246 | if (logFormat === "json") {
247 | log(this.baseMessage, {
248 | spanEvent: event,
249 | ...this.fields,
250 | ...additionalFields,
251 | });
252 | } else {
253 | const eventPrefix = `${event}ed`;
254 | // 8 is the length of the longest possible event prefix, "finished"
255 | log(`${eventPrefix.padStart(8)} ${this.baseMessage}`, {
256 | ...this.fields,
257 | ...additionalFields,
258 | });
259 | }
260 | }
261 |
262 | /**
263 | * Starts a previously-constructed span, emitting a span start event
264 | * and noting the start time in the `Span` object
265 | */
266 | start(): void {
267 | this.spanEvent("start");
268 | this.startTime = process.hrtime();
269 | }
270 |
271 | getElapsedMs(): number {
272 | if (this.startTime === null) {
273 | throw new Error(
274 | `Span has not yet started: baseMessage="${this.baseMessage}"`
275 | );
276 | }
277 |
278 | // Gives [seconds, nanoseconds]
279 | const end = process.hrtime(this.startTime);
280 | return end[0] * 1_000 + end[1] / 1_000_000;
281 | }
282 |
283 | /**
284 | * Finishes a previously-started span, emitting a span finish event
285 | * that includes the elapsed time since the call to `start`
286 | * @param additionalFields - additional structured fields
287 | * (should be string-serializable)
288 | */
289 | finish(additionalFields: Record = {}): void {
290 | this.spanEvent("finish", {
291 | elapsedMs: this.getElapsedMs(),
292 | ...additionalFields,
293 | });
294 | }
295 |
296 | /**
297 | * Finishes a previously-started span, emitting a span error event
298 | * that includes the elapsed time since the call to `start`
299 | * @param additionalFields - additional structured fields
300 | * (should be string-serializable)
301 | */
302 | error(err: unknown, additionalFields: Record = {}): void {
303 | this.spanEvent("error", {
304 | level: "error",
305 | elapsedMs: this.getElapsedMs(),
306 | ...errorFields(err, false),
307 | ...additionalFields,
308 | });
309 | }
310 | }
311 |
--------------------------------------------------------------------------------
/src/matrix.json:
--------------------------------------------------------------------------------
1 | {
2 | "202205": "https://registrar.gatech.edu/files/202205%20Final%20Exam%20Matrix%20Full.pdf",
3 | "202208": "https://registrar.gatech.edu/files/202208%20Final%20Exam%20Matrix.pdf",
4 | "202302": "https://registrar.gatech.edu/files/202302%20Final%20Exam%20Matrix.pdf",
5 | "202308": "https://registrar.gatech.edu/public/files/202308-Final-Exam-Matrix.pdf",
6 | "202402": "https://registrar.gatech.edu/public/files/202402-Final-Exam-Matrix.pdf",
7 | "202405": "https://registrar.gatech.edu/public/files/202405-Summer-Full-Final-Exam-Schedule.pdf",
8 | "202408": "https://registrar.gatech.edu/public/files/202408%20Final%20Exam%20Matrix.pdf",
9 | "202502": "https://registrar.gatech.edu/public/files/Final%20Exam%20Matrix%20for%20Spring%202025_0.pdf",
10 | "202505": "https://registrar.gatech.edu/public/files/Summer%20Full%20202505_0.pdf",
11 | "202508": "https://registrar.gatech.edu/public/files/Final%20Exam%20Matrix%20for%20Fall%202025_2.pdf"
12 | }
13 |
--------------------------------------------------------------------------------
/src/serve.py:
--------------------------------------------------------------------------------
1 | from http.server import HTTPServer, SimpleHTTPRequestHandler
2 | import os
3 |
4 | # === Server Configuration Constants ===
5 | HOST = "localhost"
6 | PORT = 8080
7 | SERVE_DIRECTORY = "../data"
8 | CORS_HEADERS = {
9 | "Access-Control-Allow-Origin": "*",
10 | "Access-Control-Allow-Methods": "GET, OPTIONS",
11 | "Access-Control-Allow-Headers": "Content-Type",
12 | }
13 |
14 |
15 | # === Custom Request Handler ===
16 | # Note that we need CORS headers to allow cross-origin requests from the website which is at a different origin
17 | class CORSRequestHandler(SimpleHTTPRequestHandler):
18 | def end_headers(self):
19 | for header, value in CORS_HEADERS.items():
20 | self.send_header(header, value)
21 | super().end_headers()
22 |
23 |
24 | # === Server Setup ===
25 | def run_server(host: str = HOST, port: int = PORT, directory: str = SERVE_DIRECTORY):
26 | os.chdir(directory)
27 | server = HTTPServer((host, port), CORSRequestHandler)
28 | print(f"Serving at http://{host}:{port}")
29 | server.serve_forever()
30 |
31 |
32 | if __name__ == "__main__":
33 | run_server()
34 |
35 |
--------------------------------------------------------------------------------
/src/steps/descriptions/attach.ts:
--------------------------------------------------------------------------------
1 | import { warn } from "../../log";
2 | import { TermData } from "../../types";
3 |
4 | /**
5 | * Attaches course descriptions to the data for the current term in-place
6 | * for all courses with valid descriptions
7 | * (*mutates the termData parameter*).
8 | * @param termData - Term data for all courses as parsed in previous steps
9 | * @param prerequisites - Global course Id -> description as parsed in previous steps
10 | */
11 | export function attachDescriptions(
12 | termData: TermData,
13 | descriptions: Record
14 | ): void {
15 | Object.keys(descriptions).forEach((courseId) => {
16 | // Skip null descriptions
17 | if (descriptions[courseId] === null) return;
18 |
19 | if (courseId in termData.courses) {
20 | // eslint-disable-next-line no-param-reassign
21 | termData.courses[courseId][3] = descriptions[courseId];
22 | } else {
23 | warn(`received description for unknown course`, { courseId });
24 | }
25 | });
26 | }
27 |
--------------------------------------------------------------------------------
/src/steps/descriptions/parse.ts:
--------------------------------------------------------------------------------
1 | import { warn } from "../../log";
2 | import { regexExec } from "../../utils";
3 |
4 | const descriptionRegex = /([\s\S]*?)<\/section>/;
5 |
6 | /**
7 | * Parses the HTML for a single course to get its description, if it has one
8 | * @param html - Source HTML from the course details page
9 | * @param courseId - The joined course id (SUBJECT NUMBER); i.e. `"CS 2340"`
10 | */
11 | export function parseCourseDescription(
12 | html: string,
13 | courseId: string
14 | ): string | null {
15 | try {
16 | // Get the first match of the description content regex
17 | const [, contents] = regexExec(descriptionRegex, html);
18 |
19 | // Clean up the contents to remove HTML elements and get plaintext
20 | const withoutHtml = contents.replace(/<[^>]*>/g, "");
21 | const trimmed = withoutHtml.trim();
22 |
23 | // Only return the description if it is non-empty
24 | if (trimmed.length === 0) {
25 | return null;
26 | }
27 |
28 | return trimmed;
29 | } catch {
30 | warn(`could not execute course description regex`, { courseId });
31 | return null;
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/src/steps/details.ts:
--------------------------------------------------------------------------------
1 | import axios from "axios";
2 | import { backOff } from "exponential-backoff";
3 | import { concatParams } from "../utils";
4 | import { warn, error } from "../log";
5 |
6 | /**
7 | * Downloads the course detail information for a single course
8 | * @param term - The term string
9 | * @param courseId - The joined course id (SUBJECT NUMBER); i.e. `"CS 2340"`
10 | */
11 | export async function downloadCourseDetails(
12 | term: string,
13 | courseId: string
14 | ): Promise {
15 | // Attempt to split the course ID into its subject/number
16 | const splitResult = splitCourseId(courseId);
17 | if (splitResult === null) {
18 | warn("could not split course ID; skipping detail scraping", { courseId });
19 | return "";
20 | }
21 |
22 | const [subject, number] = splitResult;
23 | const parameters = {
24 | term,
25 | subjectCode: subject,
26 | courseNumber: number,
27 | };
28 |
29 | const query = `?${concatParams(parameters)}`;
30 | const url = `https://registration.banner.gatech.edu/StudentRegistrationSsb/ssb/courseSearchResults/getCourseDescription${query}`;
31 |
32 | // Perform the request in a retry loop
33 | // (sometimes, we get rate limits/transport errors so this tries to mitigates them)
34 | const maxAttemptCount = 10;
35 | try {
36 | const response = await backOff(
37 | () =>
38 | axios.get(url, {
39 | headers: {
40 | "User-Agent": "gt-scheduler/crawler",
41 | },
42 | }),
43 | {
44 | // See https://github.com/coveooss/exponential-backoff for options API
45 | jitter: "full",
46 | numOfAttempts: maxAttemptCount,
47 | retry: (err, attemptNumber) => {
48 | error(`an error occurred while fetching details`, err, {
49 | courseId,
50 | url,
51 | attemptNumber,
52 | tryingAgain: attemptNumber < maxAttemptCount,
53 | });
54 | return true;
55 | },
56 | }
57 | );
58 | return response.data;
59 | } catch (err) {
60 | error(`exhausted retries for fetching details`, err, { courseId });
61 | throw err;
62 | }
63 | }
64 |
65 | /**
66 | * Downloads the prerequisites for a single course
67 | * @param term - The term string
68 | * @param courseId - The joined course id (SUBJECT NUMBER); i.e. `"CS 2340"`
69 | */
70 | export async function downloadCoursePrereqDetails(
71 | term: string,
72 | courseId: string
73 | ): Promise {
74 | const splitResult = splitCourseId(courseId);
75 | if (splitResult === null) {
76 | warn("could not split course ID; skipping detail scraping", { courseId });
77 | return "";
78 | }
79 |
80 | const [subject, number] = splitResult;
81 | const parameters = {
82 | term,
83 | subjectCode: subject,
84 | courseNumber: number,
85 | };
86 | const query = `?${concatParams(parameters)}`;
87 | const url = `https://registration.banner.gatech.edu/StudentRegistrationSsb/ssb/courseSearchResults/getPrerequisites${query}`;
88 |
89 | // Perform the request in a retry loop
90 | // (sometimes, we get rate limits/transport errors so this tries to mitigates them)
91 | const maxAttemptCount = 10;
92 | try {
93 | const response = await backOff(
94 | () =>
95 | axios.get(url, {
96 | headers: {
97 | "User-Agent": "gt-scheduler/crawler",
98 | },
99 | }),
100 | {
101 | // See https://github.com/coveooss/exponential-backoff for options API
102 | jitter: "full",
103 | numOfAttempts: maxAttemptCount,
104 | retry: (err, attemptNumber) => {
105 | error(`an error occurred while fetching details`, err, {
106 | courseId,
107 | url,
108 | attemptNumber,
109 | tryingAgain: attemptNumber < maxAttemptCount,
110 | });
111 | return true;
112 | },
113 | }
114 | );
115 | return response.data;
116 | } catch (err) {
117 | error(`exhausted retries for fetching prereqs`, err, { courseId });
118 | throw err;
119 | }
120 | }
121 |
122 | /**
123 | * Attempts to split a course ID into its subject/number components
124 | * @param courseId - The joined course id (SUBJECT NUMBER); i.e. `"CS 2340"`
125 | */
126 | function splitCourseId(
127 | courseId: string
128 | ): [subject: string, number: string] | null {
129 | const splitResult = courseId.split(" ");
130 | if (splitResult.length !== 2) return null;
131 | return [splitResult[0], splitResult[1]];
132 | }
133 |
--------------------------------------------------------------------------------
/src/steps/download.ts:
--------------------------------------------------------------------------------
1 | import axios, { AxiosInstance } from "axios";
2 | import { backOff } from "exponential-backoff";
3 | import asyncPool from "tiny-async-pool";
4 |
5 | import { concatParams } from "../utils";
6 | import { BannerResponse, SectionResponse } from "../types";
7 | import { error, span } from "../log";
8 |
9 | export const MAX_PAGE_SIZE = 500;
10 | export const MAX_ATTEMPT_COUNT = 10;
11 | export const PAGE_SIZE = 150; // Best runtime vs number of requests ratio
12 |
13 | export interface SectionsPage {
14 | sections: SectionResponse[];
15 | totalCount: number;
16 | }
17 |
18 | /**
19 | * Creates a Banner 9 API query object based on inputs
20 | */
21 | export function buildParams({
22 | term,
23 | pageOffset,
24 | pageMaxSize,
25 | }: {
26 | term: string;
27 | pageOffset: number;
28 | pageMaxSize: number;
29 | }): Record {
30 | return {
31 | txt_term: term,
32 | txt_subj: "",
33 | txt_courseNumber: "",
34 | startDatepicker: "",
35 | endDatepicker: "",
36 | pageOffset: pageOffset.toString(),
37 | pageMaxSize: pageMaxSize.toString(),
38 | sortColumn: "subjectDescription",
39 | sortDirection: "asc",
40 | };
41 | }
42 |
43 | /**
44 | * Generates session cookies for the Banner 9 API for the given term with exponential backoff in case of errors.
45 | * @param term - The term whose session is created
46 | * @returns An array of the 2 string cookies the Banner 9 API generates
47 | */
48 | export async function generateSearchSessionCookies(
49 | term: string
50 | ): Promise {
51 | try {
52 | // Retries request with exponential back off in case of errors
53 | const response = await backOff(
54 | () =>
55 | axios
56 | .get(
57 | "https://registration.banner.gatech.edu/StudentRegistrationSsb",
58 | {
59 | headers: {
60 | "User-Agent": "gt-scheduler/crawler",
61 | },
62 | }
63 | )
64 | .then(async (res) => {
65 | // Throws an error if session cookie generated is undefined to trigger a retry
66 | if (res.headers["set-cookie"] === undefined) {
67 | throw new Error("Null session cookie generated in /getTerms");
68 | }
69 |
70 | await axios.post(
71 | "https://registration.banner.gatech.edu/StudentRegistrationSsb/ssb/term/search?mode=search",
72 | { term },
73 | {
74 | headers: {
75 | "Content-Type":
76 | "application/x-www-form-urlencoded; charset=UTF-8",
77 | "User-Agent": "gt-scheduler/crawler",
78 | Cookie: res.headers["set-cookie"],
79 | },
80 | }
81 | );
82 |
83 | return res;
84 | }),
85 | {
86 | // See https://github.com/coveooss/exponential-backoff for options API.
87 | jitter: "full",
88 | numOfAttempts: MAX_ATTEMPT_COUNT,
89 | retry: (err, attemptNumber) => {
90 | error(
91 | `an error occurred while generating banner session cookies`,
92 | err,
93 | {
94 | term,
95 | attemptNumber,
96 | tryingAgain: attemptNumber < MAX_ATTEMPT_COUNT,
97 | }
98 | );
99 | return true;
100 | },
101 | }
102 | );
103 |
104 | const cookies = response.headers["set-cookie"];
105 | if (cookies === undefined) {
106 | throw new Error("Null session cookie generated");
107 | }
108 | return cookies;
109 | } catch (err) {
110 | error(`exhausted retries for generating banner session cookies`, err, {
111 | term,
112 | });
113 | throw err;
114 | }
115 | }
116 |
117 | /**
118 | * Fetches a page of sections data for a given term, sectionOffset, and pageMaxSize
119 | * @param session An axios instance with Banner 9 API session cookies attached
120 | * @pageOffset The section number starting from which sections need to be fetched
121 | * @param pageMaxSize The size of page returned (max. 500)
122 | */
123 | async function getSectionsPage({
124 | session,
125 | term,
126 | pageOffset,
127 | pageMaxSize,
128 | }: {
129 | session: AxiosInstance;
130 | term: string;
131 | pageOffset: number;
132 | pageMaxSize: number;
133 | }): Promise {
134 | const params = buildParams({
135 | term,
136 | pageOffset,
137 | pageMaxSize,
138 | });
139 | const query = `?${concatParams(params)}`;
140 | const url = `https://registration.banner.gatech.edu/StudentRegistrationSsb/ssb/searchResults/searchResults${query}`;
141 |
142 | try {
143 | // Retries request with exponential back off in case of errors
144 | const response = await backOff(
145 | () =>
146 | session
147 | .get(url, {
148 | headers: {
149 | "User-Agent": "gt-scheduler/crawler",
150 | },
151 | })
152 | .then((res) => {
153 | // Throws an error if Banner response data is null to trigger a retry
154 | if (res.data.data === null) {
155 | throw new Error("Fetched null data");
156 | }
157 | return res;
158 | }),
159 | {
160 | // See https://github.com/coveooss/exponential-backoff for options API
161 | jitter: "full",
162 | numOfAttempts: MAX_ATTEMPT_COUNT,
163 | retry: (err, attemptNumber) => {
164 | error(`an error occurred while range of section JSON pages`, err, {
165 | term,
166 | pageOffset,
167 | pageMaxSize,
168 | attemptNumber,
169 | tryingAgain: attemptNumber < MAX_ATTEMPT_COUNT,
170 | });
171 | return true;
172 | },
173 | }
174 | );
175 |
176 | const bannerResponse = response.data;
177 | if (bannerResponse.data === null) {
178 | throw new Error("Fetched null data");
179 | }
180 |
181 | return {
182 | sections: bannerResponse.data,
183 | totalCount: bannerResponse.totalCount,
184 | };
185 | } catch (err) {
186 | error(`exhausted retries for range of section JSON pages`, err, {
187 | term,
188 | pageOffset,
189 | pageMaxSize,
190 | });
191 | throw err;
192 | }
193 | }
194 |
195 | export async function download(
196 | term: string,
197 | numThreads: number
198 | ): Promise {
199 | let spanFields: Record = { term };
200 |
201 | // Generates and attaches a session cookie for the given term to an axios instance.
202 | const cookies = await span(
203 | "generating banner session cookies",
204 | { term },
205 | async () => generateSearchSessionCookies(term)
206 | );
207 | const session = axios.create({
208 | headers: { Cookie: cookies },
209 | });
210 |
211 | // Gets total section count for the given query by fetching one section.
212 | // For a pageMaxSize of 0, pageOffset 0 fetches 10 sections while pageOffset 1 fetches 1 section.
213 | // We only care about the totalCount returned so we minimize the time taken to retrieve it by
214 | // using pageOffset of 1.
215 | const firstSection = await span(
216 | "fetching initial section",
217 | { ...spanFields, pageOffset: 1, pageMaxSize: 0 },
218 | async (setCompletionFields) => {
219 | const sectionsPage = await getSectionsPage({
220 | session,
221 | term,
222 | pageOffset: 1,
223 | pageMaxSize: 0,
224 | });
225 | setCompletionFields({ totalCount: sectionsPage.totalCount });
226 | return sectionsPage;
227 | }
228 | );
229 | const { totalCount } = firstSection;
230 |
231 | const numRequests = Math.ceil(totalCount / PAGE_SIZE);
232 |
233 | // Creates an array of sectionOffset values based on the number of requests required
234 | const offsetArr = Array(numRequests)
235 | .fill(0)
236 | .map((_, i) => PAGE_SIZE * i);
237 |
238 | // Stores the response data of the concurrent fetches of course data in an array
239 | let sectionsPages: SectionsPage[] = [];
240 |
241 | const pageMaxSize = PAGE_SIZE;
242 | spanFields = { ...spanFields, totalCount, pageMaxSize };
243 | await span(
244 | "fetching all section JSON pages in thread pool",
245 | spanFields,
246 | async (setCompletionFields) => {
247 | if (numRequests >= 1) {
248 | // Creates a partially applied function for getSectionsPage that only takes in a
249 | // pageOffset input with the remaining parameters fixed.
250 | const partiallyAppliedGetSectionsPage = (pageOffset: number) =>
251 | span(
252 | "fetching range of section JSON pages",
253 | { ...spanFields, pageOffset },
254 | async (setCompletionFieldsInner) => {
255 | const sectionsPage = await getSectionsPage({
256 | session,
257 | term,
258 | pageOffset,
259 | pageMaxSize,
260 | });
261 | setCompletionFieldsInner({
262 | sectionsCount: sectionsPage.sections.length,
263 | });
264 | return sectionsPage;
265 | }
266 | );
267 | sectionsPages = await asyncPool(
268 | numThreads,
269 | offsetArr,
270 | partiallyAppliedGetSectionsPage
271 | );
272 | }
273 | const fetchedCount = sectionsPages.reduce(
274 | (count, sectionsPage) => count + sectionsPage.sections.length,
275 | 0
276 | );
277 | setCompletionFields({
278 | fetchedCount,
279 | });
280 | }
281 | );
282 |
283 | // Concatenates all section pages into one array
284 | const sections: SectionResponse[] = [];
285 | sectionsPages.forEach((sectionsPage) =>
286 | sections.push(...sectionsPage.sections)
287 | );
288 |
289 | if (sections.length !== totalCount) {
290 | const err = new Error(
291 | "Fetched data count does not match total sections count"
292 | );
293 | error(`error counting course sections`, err, {
294 | term,
295 | fetchedCount: sections.length,
296 | totalCount,
297 | });
298 | throw err;
299 | }
300 |
301 | return sections;
302 | }
303 |
--------------------------------------------------------------------------------
/src/steps/index.ts:
--------------------------------------------------------------------------------
1 | export * from "./list";
2 | export * from "./download";
3 | export * from "./parse";
4 | export * from "./write";
5 | export * from "./writeIndex";
6 | export * from "./details";
7 | export * from "./prereqs/parse";
8 | export * from "./prereqs/attach";
9 | export * from "./descriptions/parse";
10 | export * from "./descriptions/attach";
11 |
--------------------------------------------------------------------------------
/src/steps/list.ts:
--------------------------------------------------------------------------------
1 | import axios from "axios";
2 | import { getIntConfig } from "../utils";
3 | import { Term } from "../types";
4 |
5 | const NUM_TERMS = getIntConfig("NUM_TERMS") ?? 2;
6 |
7 | export type TermData = {
8 | code: string;
9 | description: string;
10 | };
11 |
12 | export async function list(): Promise<[string[], string[]]> {
13 | const queryNum = 3 * NUM_TERMS + 10;
14 |
15 | const response = await axios.post(
16 | `https://registration.banner.gatech.edu/StudentRegistrationSsb/ssb/courseSearch/getTerms?searchTerm=&offset=1&max=${queryNum}`
17 | );
18 | const responseFinalized = await axios.post(
19 | `https://registration.banner.gatech.edu/StudentRegistrationSsb/ssb/classSearch/getTerms?searchTerm=&offset=1&max=${queryNum}`
20 | );
21 |
22 | const terms = response.data.map((term) => term.code);
23 | const termsFinalized = responseFinalized.data.map((term) => term.code);
24 |
25 | const results = terms.filter((term) => {
26 | const month = Number(term.slice(4));
27 | return month >= 1 && month <= 12;
28 | });
29 |
30 | return [results, termsFinalized];
31 | }
32 |
--------------------------------------------------------------------------------
/src/steps/parse.ts:
--------------------------------------------------------------------------------
1 | import {
2 | TermData,
3 | Course,
4 | Caches,
5 | Meeting,
6 | Section,
7 | Location,
8 | SectionResponse,
9 | } from "../types";
10 | import { cache } from "../utils";
11 | import { warn } from "../log";
12 |
13 | /**
14 | * A map consisting of course locations and corresponding coordinates
15 | *
16 | * When changing these, make sure to also add an abbreviation to the frontend
17 | * in https://github.com/gt-scheduler/website/blob/main/src/utils/misc.tsx
18 | * (search for `LOCATION_ABBREVIATIONS`).
19 | */
20 |
21 | const courseLocations = new Map([
22 | ["Skiles", new Location(33.773568, -84.395957)],
23 | ["Clough Commons", new Location(33.774909, -84.396404)],
24 | ["Clough UG Learning Commons", new Location(33.774909, -84.396404)],
25 | ["Boggs", new Location(33.776085, -84.400181)],
26 | ["Architecture (West)", new Location(33.776076, -84.396114)],
27 | ["West Architecture", new Location(33.776076, -84.396114)],
28 | ["Architecture (East)", new Location(33.776177, -84.395459)],
29 | ["East Architecture", new Location(33.776177, -84.395459)],
30 | ["Scheller College of Business", new Location(33.776533, -84.387765)],
31 | ["Guggenheim Aerospace", new Location(33.771771, -84.395796)],
32 | ["Van Leer", new Location(33.776065, -84.397116)],
33 | ["Bunger-Henry", new Location(33.775803, -84.398189)],
34 | ["Coll of Computing", new Location(33.777576, -84.397352)],
35 | ["College of Computing", new Location(33.777576, -84.397352)],
36 | ["Weber SST III", new Location(33.772949, -84.396066)],
37 | ["Engr Science & Mech", new Location(33.772114, -84.395289)],
38 | ["Engineering Sci and Mechanics", new Location(33.772114, -84.395289)],
39 | ["Mason", new Location(33.776764, -84.39844)],
40 | ["Love (MRDC II)", new Location(33.77672, -84.401764)],
41 | ["J. Erskine Love Manufacturing", new Location(33.77672, -84.401764)],
42 | ["MRDC", new Location(33.777187, -84.400484)],
43 | ["Manufacture Rel Discip Complex", new Location(33.777187, -84.400484)],
44 | ["Allen Sustainable Education", new Location(33.77622, -84.397959)],
45 | ["Howey (Physics)", new Location(33.777622, -84.398785)],
46 | ["Howey Physics", new Location(33.777622, -84.398785)],
47 | ["Instr Center", new Location(33.775587, -84.401213)],
48 | ["Instructional Center", new Location(33.775587, -84.401213)],
49 | ["O'Keefe", new Location(33.779177, -84.392196)],
50 | ["Curran Street Deck", new Location(33.779495, -84.405633)],
51 | ["D. M. Smith", new Location(33.773801, -84.395122)],
52 | ["D.M. Smith", new Location(33.773801, -84.395122)],
53 | ["Swann", new Location(33.771658, -84.395302)],
54 | ["Kendeda", new Location(33.778759, -84.399597)],
55 | ["ES&T", new Location(33.779004, -84.395849)],
56 | ["Ford Environmental Sci & Tech", new Location(33.779004, -84.395849)],
57 | ["Klaus Advanced Computing", new Location(33.777107, -84.395817)],
58 | ["Cherry Emerson", new Location(33.778011, -84.397065)],
59 | ["U A Whitaker Biomedical Engr", new Location(33.778513, -84.396825)],
60 | ["Whitaker", new Location(33.778513, -84.396825)],
61 | ["Molecular Sciences & Engr", new Location(33.779836, -84.396666)],
62 | ["Molecular Sciences and Engr", new Location(33.779836, -84.396666)],
63 | ["760 Spring Street", new Location(33.77561, -84.38906)],
64 | ["760 Spring St NW", new Location(33.77561, -84.38906)],
65 | ["Paper Tricentennial", new Location(33.780983, -84.404516)],
66 | ["Daniel Lab", new Location(33.773714, -84.394047)],
67 | ["Pettit MiRC", new Location(33.776532, -84.397307)],
68 | ["Centergy", new Location(33.777062, -84.388997)],
69 | ["Stephen C Hall", new Location(33.774134, -84.39396)],
70 | ["Brittain T Room", new Location(33.77247, -84.391271)],
71 | ["Hefner Dormitory(HEF)", new Location(33.779159, -84.403952)],
72 | ["Old Civil Engr", new Location(33.7742, -84.394637)],
73 | ["West Village Dining Commons", new Location(33.779564, -84.404718)],
74 | ["Couch", new Location(33.778233, -84.404507)],
75 | ["J. S. Coon", new Location(33.77258, -84.395624)],
76 | ["575 Fourteenth Street", new Location(33.786914, -84.406213)],
77 | ["Groseclose", new Location(33.775778, -84.401885)],
78 | ["Theater for the Arts", new Location(33.775041, -84.399287)],
79 | ["Habersham", new Location(33.773978, -84.404311)],
80 | ["Savant", new Location(33.772075, -84.395277)],
81 | ["ISyE Main", new Location(33.775178, -84.401879)],
82 | ["Fourth Street Houses", new Location(33.775381, -84.391451)],
83 | ["Rich-Computer Center", new Location(33.77535159008218, -84.39513500282604)],
84 | ]);
85 |
86 | const ignoredLocations = [
87 | "TBA",
88 | // The crawler doesn't attempt to resolve locations to their coordinates
89 | // for any locations outside of the main Atlanta campus (including study
90 | // abroad):
91 | "Budapest Study Abroad",
92 | "China, Shanghai",
93 | "Foreign Studies Prog (FORSPRO)",
94 | "GT Shenzhen",
95 | "Georgia Tech Lorraine",
96 | "Ritsumeikan U, Oita Japan",
97 | "Lisbon, Portugal",
98 | "New Zealand",
99 | "Oxford",
100 | "South Korea",
101 | "Toulouse, France",
102 | ];
103 |
104 | export function parse(sections: SectionResponse[], version: number): TermData {
105 | const courses: Record = {};
106 | const caches: Caches = {
107 | periods: [],
108 | dateRanges: [],
109 | scheduleTypes: [],
110 | campuses: [],
111 | attributes: [],
112 | gradeBases: [],
113 | locations: [],
114 | finalDates: [],
115 | finalTimes: [],
116 | // fullCourseNames: {},
117 | };
118 |
119 | const updatedAt = new Date();
120 | const missingLocations = new Set();
121 |
122 | sections.forEach((section) => {
123 | const {
124 | courseTitle,
125 | courseReferenceNumber,
126 | sequenceNumber,
127 | // creditHours: credits,
128 | campusDescription: campus,
129 | subjectCourse,
130 | } = section;
131 |
132 | let credits = section.creditHours;
133 |
134 | // The number of credit hours maybe missing.
135 | // If the number of credits is null or 0, search within each meeting time for a non-zero value;
136 | // otherwise, default to 0
137 | if (!credits) {
138 | credits =
139 | section.meetingsFaculty.find(
140 | (meetingPart) => meetingPart.meetingTime.creditHourSession > 0
141 | )?.meetingTime.creditHourSession ?? 0;
142 | }
143 |
144 | const courseName = `${section.subject} ${subjectCourse.replace(
145 | section.subject,
146 | ""
147 | )}`;
148 |
149 | const campusIndex = cache(caches.campuses, campus);
150 | const scheduleTypeIndex = cache(
151 | caches.scheduleTypes,
152 | section.scheduleTypeDescription
153 | );
154 |
155 | const attributes = section.sectionAttributes.map(
156 | (attr) => attr.description
157 | );
158 | const attributeIndices = attributes.map((attr) =>
159 | cache(caches.attributes, attr)
160 | );
161 |
162 | const meetings: Meeting[] = section.meetingsFaculty.map((meetingPart) => {
163 | // convert string location to latitude, longitude coordinates
164 | const locationName = Array.from(courseLocations.keys()).find(
165 | (locName) => meetingPart.meetingTime.buildingDescription === locName
166 | );
167 | let location;
168 | if (locationName) {
169 | location = courseLocations.get(locationName);
170 | } else {
171 | const shouldIgnore =
172 | ignoredLocations.find(
173 | (locName) => locName === meetingPart.meetingTime.buildingDescription
174 | ) != null;
175 | if (!shouldIgnore) {
176 | missingLocations.add(meetingPart.meetingTime.buildingDescription);
177 | }
178 | }
179 |
180 | const instructors = section.faculty.map(
181 | (faculty) =>
182 | faculty.displayName.split(", ").reverse().join(" ") +
183 | (faculty.primaryIndicator ? " (P)" : "")
184 | );
185 | const periodIndex = cache(
186 | caches.periods,
187 | meetingPart.meetingTime.beginTime && meetingPart.meetingTime.endTime
188 | ? `${meetingPart.meetingTime.beginTime} - ${meetingPart.meetingTime.endTime}`
189 | : "TBA"
190 | );
191 | const dateRangeIndex = cache(
192 | caches.dateRanges,
193 | `${meetingPart.meetingTime.startDate} - ${meetingPart.meetingTime.endDate}`
194 | );
195 | const locationIndex = cache(caches.locations, location || null);
196 | const building =
197 | meetingPart.meetingTime.buildingDescription &&
198 | meetingPart.meetingTime.room
199 | ? `${meetingPart.meetingTime.buildingDescription} ${meetingPart.meetingTime.room}`
200 | : "TBA";
201 |
202 | // Set to -1 here and to be updated by Revise.py later
203 | const finalDateIndex = -1;
204 | const finalTimeIndex = -1;
205 |
206 | return [
207 | periodIndex,
208 | (meetingPart.meetingTime.monday ? "M" : "") +
209 | (meetingPart.meetingTime.tuesday ? "T" : "") +
210 | (meetingPart.meetingTime.wednesday ? "W" : "") +
211 | (meetingPart.meetingTime.thursday ? "R" : "") +
212 | (meetingPart.meetingTime.friday ? "F" : "") +
213 | (meetingPart.meetingTime.saturday ? "S" : "") +
214 | (meetingPart.meetingTime.sunday ? "U" : ""),
215 | building,
216 | locationIndex,
217 | instructors,
218 | dateRangeIndex,
219 | finalDateIndex,
220 | finalTimeIndex,
221 | ];
222 | });
223 |
224 | if (!(courseName in courses)) {
225 | const title = courseTitle;
226 | const sectionsMap: Record = {};
227 | courses[courseName] = [
228 | title,
229 | sectionsMap,
230 | // Start off with an empty prerequisites array
231 | [],
232 | // Start off with no description
233 | null,
234 | ];
235 | }
236 |
237 | courses[courseName][1][sequenceNumber] = [
238 | courseReferenceNumber,
239 | meetings,
240 | credits,
241 | scheduleTypeIndex,
242 | campusIndex,
243 | attributeIndices,
244 | -1,
245 | ];
246 | });
247 |
248 | if (missingLocations.size > 0) {
249 | warn(`encountered unknown class locations`, {
250 | missingLocations: [...missingLocations.values()],
251 | version,
252 | updatedAt,
253 | });
254 | }
255 |
256 | return { courses, caches, updatedAt, version };
257 | }
258 |
--------------------------------------------------------------------------------
/src/steps/prereqs/attach.ts:
--------------------------------------------------------------------------------
1 | import { warn } from "../../log";
2 | import { TermData, Prerequisites } from "../../types";
3 |
4 | /**
5 | * Attaches course prerequisites to the data for the current term in-place
6 | * (*mutates the termData parameter*).
7 | * @param termData - Term data for all courses as parsed in previous steps
8 | * @param prerequisites - Global course Id -> prerequisites map as parsed in previous steps
9 | */
10 | export function attachPrereqs(
11 | termData: TermData,
12 | prerequisites: Record
13 | ): void {
14 | // For each parsed prerequisite,
15 | // attach it to the corresponding course
16 | // (mutate in-place)
17 | Object.keys(prerequisites).forEach((courseId) => {
18 | if (courseId in termData.courses) {
19 | // eslint-disable-next-line no-param-reassign
20 | termData.courses[courseId][2] = prerequisites[courseId];
21 | } else {
22 | warn(`received prerequisite data for unknown course`, { courseId });
23 | }
24 | });
25 | }
26 |
--------------------------------------------------------------------------------
/src/steps/prereqs/grammar/Prerequisites.g4:
--------------------------------------------------------------------------------
1 | grammar Prerequisites;
2 |
3 | // This files defines the grammar expectedfrom the cleaned prerequisites section
4 | // on a course details page on Oscar
5 |
6 | // Parser rules
7 | parse
8 | : expression // an input can contain either a set (multiple clauses joined with operators)
9 | | atom // or a single clause
10 | | empty // or an empty clause with standalone operators and parentheses
11 | | EOF
12 | ;
13 |
14 | empty
15 | : OPARENS* operator+ empty* CPARENS*
16 | ;
17 |
18 | expression
19 | : left=term ( OR (right=term)*)*
20 | ;
21 |
22 | term
23 | : operator* left=atom (AND (right=atom)*)*
24 | ;
25 |
26 | atom
27 | : course
28 | | test
29 | | (OPARENS expression CPARENS)
30 | ;
31 |
32 | course
33 | : COURSE_PREFIX? subject=COURSE_SUBJECT number=COURSE_NUMBER (GRADE_PREFIX grade=GRADE_LETTER)?
34 | ;
35 |
36 | test
37 | // Re-use course number here to avoid parse ambiguity
38 | : name=TEST_NAME score=COURSE_NUMBER
39 | ;
40 |
41 | operator
42 | : (AND|OR)
43 | ;
44 |
45 | // Lexer rules
46 | AND : 'and';
47 | OR : 'or';
48 | OPARENS : '(';
49 | CPARENS : ')';
50 |
51 | GRADE_LETTER
52 | : 'A'..'D'
53 | | 'S'..'V'
54 | ;
55 |
56 | COURSE_PREFIX
57 | : 'Undergraduate Semester level'
58 | | 'Graduate Semester level'
59 | ;
60 |
61 | GRADE_PREFIX
62 | : 'Minimum Grade of'
63 | ;
64 |
65 | TEST_NAME
66 | : 'SAT Mathematics'
67 | | 'MATH SECTION SCORE'
68 | | 'ACT Math'
69 | | 'Converted ACT Math'
70 | | 'Math: Calculus AB'
71 | | 'Math: Calculus BC'
72 | ;
73 |
74 | COURSE_NUMBER
75 | : [0-9X]+[A-Z]*
76 | ;
77 |
78 | COURSE_SUBJECT
79 | : [A-Z]+
80 | ;
81 |
82 | SPACE
83 | : [ \t\r\n] -> skip
84 | ;
--------------------------------------------------------------------------------
/src/steps/prereqs/grammar/Prerequisites.interp:
--------------------------------------------------------------------------------
1 | token literal names:
2 | null
3 | 'and'
4 | 'or'
5 | '('
6 | ')'
7 | null
8 | null
9 | 'Minimum Grade of'
10 | null
11 | null
12 | null
13 | null
14 |
15 | token symbolic names:
16 | null
17 | AND
18 | OR
19 | OPARENS
20 | CPARENS
21 | GRADE_LETTER
22 | COURSE_PREFIX
23 | GRADE_PREFIX
24 | TEST_NAME
25 | COURSE_NUMBER
26 | COURSE_SUBJECT
27 | SPACE
28 |
29 | rule names:
30 | parse
31 | empty
32 | expression
33 | term
34 | atom
35 | course
36 | test
37 | operator
38 |
39 |
40 | atn:
41 | [3, 51485, 51898, 1421, 44986, 20307, 1543, 60043, 49729, 3, 13, 102, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 3, 2, 3, 2, 3, 2, 3, 2, 5, 2, 23, 10, 2, 3, 3, 7, 3, 26, 10, 3, 12, 3, 14, 3, 29, 11, 3, 3, 3, 6, 3, 32, 10, 3, 13, 3, 14, 3, 33, 3, 3, 7, 3, 37, 10, 3, 12, 3, 14, 3, 40, 11, 3, 3, 3, 7, 3, 43, 10, 3, 12, 3, 14, 3, 46, 11, 3, 3, 4, 3, 4, 3, 4, 7, 4, 51, 10, 4, 12, 4, 14, 4, 54, 11, 4, 7, 4, 56, 10, 4, 12, 4, 14, 4, 59, 11, 4, 3, 5, 7, 5, 62, 10, 5, 12, 5, 14, 5, 65, 11, 5, 3, 5, 3, 5, 3, 5, 7, 5, 70, 10, 5, 12, 5, 14, 5, 73, 11, 5, 7, 5, 75, 10, 5, 12, 5, 14, 5, 78, 11, 5, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 5, 6, 86, 10, 6, 3, 7, 5, 7, 89, 10, 7, 3, 7, 3, 7, 3, 7, 3, 7, 5, 7, 95, 10, 7, 3, 8, 3, 8, 3, 8, 3, 9, 3, 9, 3, 9, 2, 2, 2, 10, 2, 2, 4, 2, 6, 2, 8, 2, 10, 2, 12, 2, 14, 2, 16, 2, 2, 3, 3, 2, 3, 4, 2, 109, 2, 22, 3, 2, 2, 2, 4, 27, 3, 2, 2, 2, 6, 47, 3, 2, 2, 2, 8, 63, 3, 2, 2, 2, 10, 85, 3, 2, 2, 2, 12, 88, 3, 2, 2, 2, 14, 96, 3, 2, 2, 2, 16, 99, 3, 2, 2, 2, 18, 23, 5, 6, 4, 2, 19, 23, 5, 10, 6, 2, 20, 23, 5, 4, 3, 2, 21, 23, 7, 2, 2, 3, 22, 18, 3, 2, 2, 2, 22, 19, 3, 2, 2, 2, 22, 20, 3, 2, 2, 2, 22, 21, 3, 2, 2, 2, 23, 3, 3, 2, 2, 2, 24, 26, 7, 5, 2, 2, 25, 24, 3, 2, 2, 2, 26, 29, 3, 2, 2, 2, 27, 25, 3, 2, 2, 2, 27, 28, 3, 2, 2, 2, 28, 31, 3, 2, 2, 2, 29, 27, 3, 2, 2, 2, 30, 32, 5, 16, 9, 2, 31, 30, 3, 2, 2, 2, 32, 33, 3, 2, 2, 2, 33, 31, 3, 2, 2, 2, 33, 34, 3, 2, 2, 2, 34, 38, 3, 2, 2, 2, 35, 37, 5, 4, 3, 2, 36, 35, 3, 2, 2, 2, 37, 40, 3, 2, 2, 2, 38, 36, 3, 2, 2, 2, 38, 39, 3, 2, 2, 2, 39, 44, 3, 2, 2, 2, 40, 38, 3, 2, 2, 2, 41, 43, 7, 6, 2, 2, 42, 41, 3, 2, 2, 2, 43, 46, 3, 2, 2, 2, 44, 42, 3, 2, 2, 2, 44, 45, 3, 2, 2, 2, 45, 5, 3, 2, 2, 2, 46, 44, 3, 2, 2, 2, 47, 57, 5, 8, 5, 2, 48, 52, 7, 4, 2, 2, 49, 51, 5, 8, 5, 2, 50, 49, 3, 2, 2, 2, 51, 54, 3, 2, 2, 2, 52, 50, 3, 2, 2, 2, 52, 53, 3, 2, 2, 2, 53, 56, 3, 2, 2, 2, 54, 52, 3, 2, 2, 2, 55, 48, 3, 2, 2, 2, 56, 59, 3, 2, 2, 2, 57, 55, 3, 2, 2, 2, 57, 58, 3, 2, 2, 2, 58, 7, 3, 2, 2, 2, 59, 57, 3, 2, 2, 2, 60, 62, 5, 16, 9, 2, 61, 60, 3, 2, 2, 2, 62, 65, 3, 2, 2, 2, 63, 61, 3, 2, 2, 2, 63, 64, 3, 2, 2, 2, 64, 66, 3, 2, 2, 2, 65, 63, 3, 2, 2, 2, 66, 76, 5, 10, 6, 2, 67, 71, 7, 3, 2, 2, 68, 70, 5, 10, 6, 2, 69, 68, 3, 2, 2, 2, 70, 73, 3, 2, 2, 2, 71, 69, 3, 2, 2, 2, 71, 72, 3, 2, 2, 2, 72, 75, 3, 2, 2, 2, 73, 71, 3, 2, 2, 2, 74, 67, 3, 2, 2, 2, 75, 78, 3, 2, 2, 2, 76, 74, 3, 2, 2, 2, 76, 77, 3, 2, 2, 2, 77, 9, 3, 2, 2, 2, 78, 76, 3, 2, 2, 2, 79, 86, 5, 12, 7, 2, 80, 86, 5, 14, 8, 2, 81, 82, 7, 5, 2, 2, 82, 83, 5, 6, 4, 2, 83, 84, 7, 6, 2, 2, 84, 86, 3, 2, 2, 2, 85, 79, 3, 2, 2, 2, 85, 80, 3, 2, 2, 2, 85, 81, 3, 2, 2, 2, 86, 11, 3, 2, 2, 2, 87, 89, 7, 8, 2, 2, 88, 87, 3, 2, 2, 2, 88, 89, 3, 2, 2, 2, 89, 90, 3, 2, 2, 2, 90, 91, 7, 12, 2, 2, 91, 94, 7, 11, 2, 2, 92, 93, 7, 9, 2, 2, 93, 95, 7, 7, 2, 2, 94, 92, 3, 2, 2, 2, 94, 95, 3, 2, 2, 2, 95, 13, 3, 2, 2, 2, 96, 97, 7, 10, 2, 2, 97, 98, 7, 11, 2, 2, 98, 15, 3, 2, 2, 2, 99, 100, 9, 2, 2, 2, 100, 17, 3, 2, 2, 2, 15, 22, 27, 33, 38, 44, 52, 57, 63, 71, 76, 85, 88, 94]
--------------------------------------------------------------------------------
/src/steps/prereqs/grammar/Prerequisites.tokens:
--------------------------------------------------------------------------------
1 | AND=1
2 | OR=2
3 | OPARENS=3
4 | CPARENS=4
5 | GRADE_LETTER=5
6 | COURSE_PREFIX=6
7 | GRADE_PREFIX=7
8 | TEST_NAME=8
9 | COURSE_NUMBER=9
10 | COURSE_SUBJECT=10
11 | SPACE=11
12 | 'and'=1
13 | 'or'=2
14 | '('=3
15 | ')'=4
16 | 'Minimum Grade of'=7
17 |
--------------------------------------------------------------------------------
/src/steps/prereqs/grammar/PrerequisitesLexer.interp:
--------------------------------------------------------------------------------
1 | token literal names:
2 | null
3 | 'and'
4 | 'or'
5 | '('
6 | ')'
7 | null
8 | null
9 | 'Minimum Grade of'
10 | null
11 | null
12 | null
13 | null
14 |
15 | token symbolic names:
16 | null
17 | AND
18 | OR
19 | OPARENS
20 | CPARENS
21 | GRADE_LETTER
22 | COURSE_PREFIX
23 | GRADE_PREFIX
24 | TEST_NAME
25 | COURSE_NUMBER
26 | COURSE_SUBJECT
27 | SPACE
28 |
29 | rule names:
30 | AND
31 | OR
32 | OPARENS
33 | CPARENS
34 | GRADE_LETTER
35 | COURSE_PREFIX
36 | GRADE_PREFIX
37 | TEST_NAME
38 | COURSE_NUMBER
39 | COURSE_SUBJECT
40 | SPACE
41 |
42 | channel names:
43 | DEFAULT_TOKEN_CHANNEL
44 | HIDDEN
45 |
46 | mode names:
47 | DEFAULT_MODE
48 |
49 | atn:
50 | [3, 51485, 51898, 1421, 44986, 20307, 1543, 60043, 49729, 2, 13, 223, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 3, 2, 3, 2, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 4, 3, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 5, 7, 90, 10, 7, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 8, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 5, 9, 202, 10, 9, 3, 10, 6, 10, 205, 10, 10, 13, 10, 14, 10, 206, 3, 10, 7, 10, 210, 10, 10, 12, 10, 14, 10, 213, 11, 10, 3, 11, 6, 11, 216, 10, 11, 13, 11, 14, 11, 217, 3, 12, 3, 12, 3, 12, 3, 12, 2, 2, 2, 13, 3, 2, 3, 5, 2, 4, 7, 2, 5, 9, 2, 6, 11, 2, 7, 13, 2, 8, 15, 2, 9, 17, 2, 10, 19, 2, 11, 21, 2, 12, 23, 2, 13, 3, 2, 6, 4, 2, 67, 70, 85, 88, 4, 2, 50, 59, 90, 90, 3, 2, 67, 92, 5, 2, 11, 12, 15, 15, 34, 34, 2, 231, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 3, 25, 3, 2, 2, 2, 5, 29, 3, 2, 2, 2, 7, 32, 3, 2, 2, 2, 9, 34, 3, 2, 2, 2, 11, 36, 3, 2, 2, 2, 13, 89, 3, 2, 2, 2, 15, 91, 3, 2, 2, 2, 17, 201, 3, 2, 2, 2, 19, 204, 3, 2, 2, 2, 21, 215, 3, 2, 2, 2, 23, 219, 3, 2, 2, 2, 25, 26, 7, 99, 2, 2, 26, 27, 7, 112, 2, 2, 27, 28, 7, 102, 2, 2, 28, 4, 3, 2, 2, 2, 29, 30, 7, 113, 2, 2, 30, 31, 7, 116, 2, 2, 31, 6, 3, 2, 2, 2, 32, 33, 7, 42, 2, 2, 33, 8, 3, 2, 2, 2, 34, 35, 7, 43, 2, 2, 35, 10, 3, 2, 2, 2, 36, 37, 9, 2, 2, 2, 37, 12, 3, 2, 2, 2, 38, 39, 7, 87, 2, 2, 39, 40, 7, 112, 2, 2, 40, 41, 7, 102, 2, 2, 41, 42, 7, 103, 2, 2, 42, 43, 7, 116, 2, 2, 43, 44, 7, 105, 2, 2, 44, 45, 7, 116, 2, 2, 45, 46, 7, 99, 2, 2, 46, 47, 7, 102, 2, 2, 47, 48, 7, 119, 2, 2, 48, 49, 7, 99, 2, 2, 49, 50, 7, 118, 2, 2, 50, 51, 7, 103, 2, 2, 51, 52, 7, 34, 2, 2, 52, 53, 7, 85, 2, 2, 53, 54, 7, 103, 2, 2, 54, 55, 7, 111, 2, 2, 55, 56, 7, 103, 2, 2, 56, 57, 7, 117, 2, 2, 57, 58, 7, 118, 2, 2, 58, 59, 7, 103, 2, 2, 59, 60, 7, 116, 2, 2, 60, 61, 7, 34, 2, 2, 61, 62, 7, 110, 2, 2, 62, 63, 7, 103, 2, 2, 63, 64, 7, 120, 2, 2, 64, 65, 7, 103, 2, 2, 65, 90, 7, 110, 2, 2, 66, 67, 7, 73, 2, 2, 67, 68, 7, 116, 2, 2, 68, 69, 7, 99, 2, 2, 69, 70, 7, 102, 2, 2, 70, 71, 7, 119, 2, 2, 71, 72, 7, 99, 2, 2, 72, 73, 7, 118, 2, 2, 73, 74, 7, 103, 2, 2, 74, 75, 7, 34, 2, 2, 75, 76, 7, 85, 2, 2, 76, 77, 7, 103, 2, 2, 77, 78, 7, 111, 2, 2, 78, 79, 7, 103, 2, 2, 79, 80, 7, 117, 2, 2, 80, 81, 7, 118, 2, 2, 81, 82, 7, 103, 2, 2, 82, 83, 7, 116, 2, 2, 83, 84, 7, 34, 2, 2, 84, 85, 7, 110, 2, 2, 85, 86, 7, 103, 2, 2, 86, 87, 7, 120, 2, 2, 87, 88, 7, 103, 2, 2, 88, 90, 7, 110, 2, 2, 89, 38, 3, 2, 2, 2, 89, 66, 3, 2, 2, 2, 90, 14, 3, 2, 2, 2, 91, 92, 7, 79, 2, 2, 92, 93, 7, 107, 2, 2, 93, 94, 7, 112, 2, 2, 94, 95, 7, 107, 2, 2, 95, 96, 7, 111, 2, 2, 96, 97, 7, 119, 2, 2, 97, 98, 7, 111, 2, 2, 98, 99, 7, 34, 2, 2, 99, 100, 7, 73, 2, 2, 100, 101, 7, 116, 2, 2, 101, 102, 7, 99, 2, 2, 102, 103, 7, 102, 2, 2, 103, 104, 7, 103, 2, 2, 104, 105, 7, 34, 2, 2, 105, 106, 7, 113, 2, 2, 106, 107, 7, 104, 2, 2, 107, 16, 3, 2, 2, 2, 108, 109, 7, 85, 2, 2, 109, 110, 7, 67, 2, 2, 110, 111, 7, 86, 2, 2, 111, 112, 7, 34, 2, 2, 112, 113, 7, 79, 2, 2, 113, 114, 7, 99, 2, 2, 114, 115, 7, 118, 2, 2, 115, 116, 7, 106, 2, 2, 116, 117, 7, 103, 2, 2, 117, 118, 7, 111, 2, 2, 118, 119, 7, 99, 2, 2, 119, 120, 7, 118, 2, 2, 120, 121, 7, 107, 2, 2, 121, 122, 7, 101, 2, 2, 122, 202, 7, 117, 2, 2, 123, 124, 7, 79, 2, 2, 124, 125, 7, 67, 2, 2, 125, 126, 7, 86, 2, 2, 126, 127, 7, 74, 2, 2, 127, 128, 7, 34, 2, 2, 128, 129, 7, 85, 2, 2, 129, 130, 7, 71, 2, 2, 130, 131, 7, 69, 2, 2, 131, 132, 7, 86, 2, 2, 132, 133, 7, 75, 2, 2, 133, 134, 7, 81, 2, 2, 134, 135, 7, 80, 2, 2, 135, 136, 7, 34, 2, 2, 136, 137, 7, 85, 2, 2, 137, 138, 7, 69, 2, 2, 138, 139, 7, 81, 2, 2, 139, 140, 7, 84, 2, 2, 140, 202, 7, 71, 2, 2, 141, 142, 7, 67, 2, 2, 142, 143, 7, 69, 2, 2, 143, 144, 7, 86, 2, 2, 144, 145, 7, 34, 2, 2, 145, 146, 7, 79, 2, 2, 146, 147, 7, 99, 2, 2, 147, 148, 7, 118, 2, 2, 148, 202, 7, 106, 2, 2, 149, 150, 7, 69, 2, 2, 150, 151, 7, 113, 2, 2, 151, 152, 7, 112, 2, 2, 152, 153, 7, 120, 2, 2, 153, 154, 7, 103, 2, 2, 154, 155, 7, 116, 2, 2, 155, 156, 7, 118, 2, 2, 156, 157, 7, 103, 2, 2, 157, 158, 7, 102, 2, 2, 158, 159, 7, 34, 2, 2, 159, 160, 7, 67, 2, 2, 160, 161, 7, 69, 2, 2, 161, 162, 7, 86, 2, 2, 162, 163, 7, 34, 2, 2, 163, 164, 7, 79, 2, 2, 164, 165, 7, 99, 2, 2, 165, 166, 7, 118, 2, 2, 166, 202, 7, 106, 2, 2, 167, 168, 7, 79, 2, 2, 168, 169, 7, 99, 2, 2, 169, 170, 7, 118, 2, 2, 170, 171, 7, 106, 2, 2, 171, 172, 7, 60, 2, 2, 172, 173, 7, 34, 2, 2, 173, 174, 7, 69, 2, 2, 174, 175, 7, 99, 2, 2, 175, 176, 7, 110, 2, 2, 176, 177, 7, 101, 2, 2, 177, 178, 7, 119, 2, 2, 178, 179, 7, 110, 2, 2, 179, 180, 7, 119, 2, 2, 180, 181, 7, 117, 2, 2, 181, 182, 7, 34, 2, 2, 182, 183, 7, 67, 2, 2, 183, 202, 7, 68, 2, 2, 184, 185, 7, 79, 2, 2, 185, 186, 7, 99, 2, 2, 186, 187, 7, 118, 2, 2, 187, 188, 7, 106, 2, 2, 188, 189, 7, 60, 2, 2, 189, 190, 7, 34, 2, 2, 190, 191, 7, 69, 2, 2, 191, 192, 7, 99, 2, 2, 192, 193, 7, 110, 2, 2, 193, 194, 7, 101, 2, 2, 194, 195, 7, 119, 2, 2, 195, 196, 7, 110, 2, 2, 196, 197, 7, 119, 2, 2, 197, 198, 7, 117, 2, 2, 198, 199, 7, 34, 2, 2, 199, 200, 7, 68, 2, 2, 200, 202, 7, 69, 2, 2, 201, 108, 3, 2, 2, 2, 201, 123, 3, 2, 2, 2, 201, 141, 3, 2, 2, 2, 201, 149, 3, 2, 2, 2, 201, 167, 3, 2, 2, 2, 201, 184, 3, 2, 2, 2, 202, 18, 3, 2, 2, 2, 203, 205, 9, 3, 2, 2, 204, 203, 3, 2, 2, 2, 205, 206, 3, 2, 2, 2, 206, 204, 3, 2, 2, 2, 206, 207, 3, 2, 2, 2, 207, 211, 3, 2, 2, 2, 208, 210, 9, 4, 2, 2, 209, 208, 3, 2, 2, 2, 210, 213, 3, 2, 2, 2, 211, 209, 3, 2, 2, 2, 211, 212, 3, 2, 2, 2, 212, 20, 3, 2, 2, 2, 213, 211, 3, 2, 2, 2, 214, 216, 9, 4, 2, 2, 215, 214, 3, 2, 2, 2, 216, 217, 3, 2, 2, 2, 217, 215, 3, 2, 2, 2, 217, 218, 3, 2, 2, 2, 218, 22, 3, 2, 2, 2, 219, 220, 9, 5, 2, 2, 220, 221, 3, 2, 2, 2, 221, 222, 8, 12, 2, 2, 222, 24, 3, 2, 2, 2, 8, 2, 89, 201, 206, 211, 217, 3, 8, 2, 2]
--------------------------------------------------------------------------------
/src/steps/prereqs/grammar/PrerequisitesLexer.tokens:
--------------------------------------------------------------------------------
1 | AND=1
2 | OR=2
3 | OPARENS=3
4 | CPARENS=4
5 | GRADE_LETTER=5
6 | COURSE_PREFIX=6
7 | GRADE_PREFIX=7
8 | TEST_NAME=8
9 | COURSE_NUMBER=9
10 | COURSE_SUBJECT=10
11 | SPACE=11
12 | 'and'=1
13 | 'or'=2
14 | '('=3
15 | ')'=4
16 | 'Minimum Grade of'=7
17 |
--------------------------------------------------------------------------------
/src/steps/prereqs/grammar/PrerequisitesLexer.ts:
--------------------------------------------------------------------------------
1 | // Generated from src/steps/prereqs/grammar/Prerequisites.g4 by ANTLR 4.9.0-SNAPSHOT
2 |
3 |
4 | import { ATN } from "antlr4ts/atn/ATN";
5 | import { ATNDeserializer } from "antlr4ts/atn/ATNDeserializer";
6 | import { CharStream } from "antlr4ts/CharStream";
7 | import { Lexer } from "antlr4ts/Lexer";
8 | import { LexerATNSimulator } from "antlr4ts/atn/LexerATNSimulator";
9 | import { NotNull } from "antlr4ts/Decorators";
10 | import { Override } from "antlr4ts/Decorators";
11 | import { RuleContext } from "antlr4ts/RuleContext";
12 | import { Vocabulary } from "antlr4ts/Vocabulary";
13 | import { VocabularyImpl } from "antlr4ts/VocabularyImpl";
14 |
15 | import * as Utils from "antlr4ts/misc/Utils";
16 |
17 |
18 | export class PrerequisitesLexer extends Lexer {
19 | public static readonly AND = 1;
20 | public static readonly OR = 2;
21 | public static readonly OPARENS = 3;
22 | public static readonly CPARENS = 4;
23 | public static readonly GRADE_LETTER = 5;
24 | public static readonly COURSE_PREFIX = 6;
25 | public static readonly GRADE_PREFIX = 7;
26 | public static readonly TEST_NAME = 8;
27 | public static readonly COURSE_NUMBER = 9;
28 | public static readonly COURSE_SUBJECT = 10;
29 | public static readonly SPACE = 11;
30 |
31 | // tslint:disable:no-trailing-whitespace
32 | public static readonly channelNames: string[] = [
33 | "DEFAULT_TOKEN_CHANNEL", "HIDDEN",
34 | ];
35 |
36 | // tslint:disable:no-trailing-whitespace
37 | public static readonly modeNames: string[] = [
38 | "DEFAULT_MODE",
39 | ];
40 |
41 | public static readonly ruleNames: string[] = [
42 | "AND", "OR", "OPARENS", "CPARENS", "GRADE_LETTER", "COURSE_PREFIX", "GRADE_PREFIX",
43 | "TEST_NAME", "COURSE_NUMBER", "COURSE_SUBJECT", "SPACE",
44 | ];
45 |
46 | private static readonly _LITERAL_NAMES: Array = [
47 | undefined, "'and'", "'or'", "'('", "')'", undefined, undefined, "'Minimum Grade of'",
48 | ];
49 | private static readonly _SYMBOLIC_NAMES: Array = [
50 | undefined, "AND", "OR", "OPARENS", "CPARENS", "GRADE_LETTER", "COURSE_PREFIX",
51 | "GRADE_PREFIX", "TEST_NAME", "COURSE_NUMBER", "COURSE_SUBJECT", "SPACE",
52 | ];
53 | public static readonly VOCABULARY: Vocabulary = new VocabularyImpl(PrerequisitesLexer._LITERAL_NAMES, PrerequisitesLexer._SYMBOLIC_NAMES, []);
54 |
55 | // @Override
56 | // @NotNull
57 | public get vocabulary(): Vocabulary {
58 | return PrerequisitesLexer.VOCABULARY;
59 | }
60 | // tslint:enable:no-trailing-whitespace
61 |
62 |
63 | constructor(input: CharStream) {
64 | super(input);
65 | this._interp = new LexerATNSimulator(PrerequisitesLexer._ATN, this);
66 | }
67 |
68 | // @Override
69 | public get grammarFileName(): string { return "Prerequisites.g4"; }
70 |
71 | // @Override
72 | public get ruleNames(): string[] { return PrerequisitesLexer.ruleNames; }
73 |
74 | // @Override
75 | public get serializedATN(): string { return PrerequisitesLexer._serializedATN; }
76 |
77 | // @Override
78 | public get channelNames(): string[] { return PrerequisitesLexer.channelNames; }
79 |
80 | // @Override
81 | public get modeNames(): string[] { return PrerequisitesLexer.modeNames; }
82 |
83 | public static readonly _serializedATN: string =
84 | "\x03\uC91D\uCABA\u058D\uAFBA\u4F53\u0607\uEA8B\uC241\x02\r\xDF\b\x01\x04" +
85 | "\x02\t\x02\x04\x03\t\x03\x04\x04\t\x04\x04\x05\t\x05\x04\x06\t\x06\x04" +
86 | "\x07\t\x07\x04\b\t\b\x04\t\t\t\x04\n\t\n\x04\v\t\v\x04\f\t\f\x03\x02\x03" +
87 | "\x02\x03\x02\x03\x02\x03\x03\x03\x03\x03\x03\x03\x04\x03\x04\x03\x05\x03" +
88 | "\x05\x03\x06\x03\x06\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03" +
89 | "\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03" +
90 | "\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03" +
91 | "\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03" +
92 | "\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03" +
93 | "\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x03\x07\x05" +
94 | "\x07Z\n\x07\x03\b\x03\b\x03\b\x03\b\x03\b\x03\b\x03\b\x03\b\x03\b\x03" +
95 | "\b\x03\b\x03\b\x03\b\x03\b\x03\b\x03\b\x03\b\x03\t\x03\t\x03\t\x03\t\x03" +
96 | "\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03" +
97 | "\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03" +
98 | "\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03" +
99 | "\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03" +
100 | "\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03" +
101 | "\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03" +
102 | "\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03\t\x03" +
103 | "\t\x03\t\x03\t\x03\t\x03\t\x05\t\xCA\n\t\x03\n\x06\n\xCD\n\n\r\n\x0E\n" +
104 | "\xCE\x03\n\x07\n\xD2\n\n\f\n\x0E\n\xD5\v\n\x03\v\x06\v\xD8\n\v\r\v\x0E" +
105 | "\v\xD9\x03\f\x03\f\x03\f\x03\f\x02\x02\x02\r\x03\x02\x03\x05\x02\x04\x07" +
106 | "\x02\x05\t\x02\x06\v\x02\x07\r\x02\b\x0F\x02\t\x11\x02\n\x13\x02\v\x15" +
107 | "\x02\f\x17\x02\r\x03\x02\x06\x04\x02CFUX\x04\x022;ZZ\x03\x02C\\\x05\x02" +
108 | "\v\f\x0F\x0F\"\"\x02\xE7\x02\x03\x03\x02\x02\x02\x02\x05\x03\x02\x02\x02" +
109 | "\x02\x07\x03\x02\x02\x02\x02\t\x03\x02\x02\x02\x02\v\x03\x02\x02\x02\x02" +
110 | "\r\x03\x02\x02\x02\x02\x0F\x03\x02\x02\x02\x02\x11\x03\x02\x02\x02\x02" +
111 | "\x13\x03\x02\x02\x02\x02\x15\x03\x02\x02\x02\x02\x17\x03\x02\x02\x02\x03" +
112 | "\x19\x03\x02\x02\x02\x05\x1D\x03\x02\x02\x02\x07 \x03\x02\x02\x02\t\"" +
113 | "\x03\x02\x02\x02\v$\x03\x02\x02\x02\rY\x03\x02\x02\x02\x0F[\x03\x02\x02" +
114 | "\x02\x11\xC9\x03\x02\x02\x02\x13\xCC\x03\x02\x02\x02\x15\xD7\x03\x02\x02" +
115 | "\x02\x17\xDB\x03\x02\x02\x02\x19\x1A\x07c\x02\x02\x1A\x1B\x07p\x02\x02" +
116 | "\x1B\x1C\x07f\x02\x02\x1C\x04\x03\x02\x02\x02\x1D\x1E\x07q\x02\x02\x1E" +
117 | "\x1F\x07t\x02\x02\x1F\x06\x03\x02\x02\x02 !\x07*\x02\x02!\b\x03\x02\x02" +
118 | "\x02\"#\x07+\x02\x02#\n\x03\x02\x02\x02$%\t\x02\x02\x02%\f\x03\x02\x02" +
119 | "\x02&\'\x07W\x02\x02\'(\x07p\x02\x02()\x07f\x02\x02)*\x07g\x02\x02*+\x07" +
120 | "t\x02\x02+,\x07i\x02\x02,-\x07t\x02\x02-.\x07c\x02\x02./\x07f\x02\x02" +
121 | "/0\x07w\x02\x0201\x07c\x02\x0212\x07v\x02\x0223\x07g\x02\x0234\x07\"\x02" +
122 | "\x0245\x07U\x02\x0256\x07g\x02\x0267\x07o\x02\x0278\x07g\x02\x0289\x07" +
123 | "u\x02\x029:\x07v\x02\x02:;\x07g\x02\x02;<\x07t\x02\x02<=\x07\"\x02\x02" +
124 | "=>\x07n\x02\x02>?\x07g\x02\x02?@\x07x\x02\x02@A\x07g\x02\x02AZ\x07n\x02" +
125 | "\x02BC\x07I\x02\x02CD\x07t\x02\x02DE\x07c\x02\x02EF\x07f\x02\x02FG\x07" +
126 | "w\x02\x02GH\x07c\x02\x02HI\x07v\x02\x02IJ\x07g\x02\x02JK\x07\"\x02\x02" +
127 | "KL\x07U\x02\x02LM\x07g\x02\x02MN\x07o\x02\x02NO\x07g\x02\x02OP\x07u\x02" +
128 | "\x02PQ\x07v\x02\x02QR\x07g\x02\x02RS\x07t\x02\x02ST\x07\"\x02\x02TU\x07" +
129 | "n\x02\x02UV\x07g\x02\x02VW\x07x\x02\x02WX\x07g\x02\x02XZ\x07n\x02\x02" +
130 | "Y&\x03\x02\x02\x02YB\x03\x02\x02\x02Z\x0E\x03\x02\x02\x02[\\\x07O\x02" +
131 | "\x02\\]\x07k\x02\x02]^\x07p\x02\x02^_\x07k\x02\x02_`\x07o\x02\x02`a\x07" +
132 | "w\x02\x02ab\x07o\x02\x02bc\x07\"\x02\x02cd\x07I\x02\x02de\x07t\x02\x02" +
133 | "ef\x07c\x02\x02fg\x07f\x02\x02gh\x07g\x02\x02hi\x07\"\x02\x02ij\x07q\x02" +
134 | "\x02jk\x07h\x02\x02k\x10\x03\x02\x02\x02lm\x07U\x02\x02mn\x07C\x02\x02" +
135 | "no\x07V\x02\x02op\x07\"\x02\x02pq\x07O\x02\x02qr\x07c\x02\x02rs\x07v\x02" +
136 | "\x02st\x07j\x02\x02tu\x07g\x02\x02uv\x07o\x02\x02vw\x07c\x02\x02wx\x07" +
137 | "v\x02\x02xy\x07k\x02\x02yz\x07e\x02\x02z\xCA\x07u\x02\x02{|\x07O\x02\x02" +
138 | "|}\x07C\x02\x02}~\x07V\x02\x02~\x7F\x07J\x02\x02\x7F\x80\x07\"\x02\x02" +
139 | "\x80\x81\x07U\x02\x02\x81\x82\x07G\x02\x02\x82\x83\x07E\x02\x02\x83\x84" +
140 | "\x07V\x02\x02\x84\x85\x07K\x02\x02\x85\x86\x07Q\x02\x02\x86\x87\x07P\x02" +
141 | "\x02\x87\x88\x07\"\x02\x02\x88\x89\x07U\x02\x02\x89\x8A\x07E\x02\x02\x8A" +
142 | "\x8B\x07Q\x02\x02\x8B\x8C\x07T\x02\x02\x8C\xCA\x07G\x02\x02\x8D\x8E\x07" +
143 | "C\x02\x02\x8E\x8F\x07E\x02\x02\x8F\x90\x07V\x02\x02\x90\x91\x07\"\x02" +
144 | "\x02\x91\x92\x07O\x02\x02\x92\x93\x07c\x02\x02\x93\x94\x07v\x02\x02\x94" +
145 | "\xCA\x07j\x02\x02\x95\x96\x07E\x02\x02\x96\x97\x07q\x02\x02\x97\x98\x07" +
146 | "p\x02\x02\x98\x99\x07x\x02\x02\x99\x9A\x07g\x02\x02\x9A\x9B\x07t\x02\x02" +
147 | "\x9B\x9C\x07v\x02\x02\x9C\x9D\x07g\x02\x02\x9D\x9E\x07f\x02\x02\x9E\x9F" +
148 | "\x07\"\x02\x02\x9F\xA0\x07C\x02\x02\xA0\xA1\x07E\x02\x02\xA1\xA2\x07V" +
149 | "\x02\x02\xA2\xA3\x07\"\x02\x02\xA3\xA4\x07O\x02\x02\xA4\xA5\x07c\x02\x02" +
150 | "\xA5\xA6\x07v\x02\x02\xA6\xCA\x07j\x02\x02\xA7\xA8\x07O\x02\x02\xA8\xA9" +
151 | "\x07c\x02\x02\xA9\xAA\x07v\x02\x02\xAA\xAB\x07j\x02\x02\xAB\xAC\x07<\x02" +
152 | "\x02\xAC\xAD\x07\"\x02\x02\xAD\xAE\x07E\x02\x02\xAE\xAF\x07c\x02\x02\xAF" +
153 | "\xB0\x07n\x02\x02\xB0\xB1\x07e\x02\x02\xB1\xB2\x07w\x02\x02\xB2\xB3\x07" +
154 | "n\x02\x02\xB3\xB4\x07w\x02\x02\xB4\xB5\x07u\x02\x02\xB5\xB6\x07\"\x02" +
155 | "\x02\xB6\xB7\x07C\x02\x02\xB7\xCA\x07D\x02\x02\xB8\xB9\x07O\x02\x02\xB9" +
156 | "\xBA\x07c\x02\x02\xBA\xBB\x07v\x02\x02\xBB\xBC\x07j\x02\x02\xBC\xBD\x07" +
157 | "<\x02\x02\xBD\xBE\x07\"\x02\x02\xBE\xBF\x07E\x02\x02\xBF\xC0\x07c\x02" +
158 | "\x02\xC0\xC1\x07n\x02\x02\xC1\xC2\x07e\x02\x02\xC2\xC3\x07w\x02\x02\xC3" +
159 | "\xC4\x07n\x02\x02\xC4\xC5\x07w\x02\x02\xC5\xC6\x07u\x02\x02\xC6\xC7\x07" +
160 | "\"\x02\x02\xC7\xC8\x07D\x02\x02\xC8\xCA\x07E\x02\x02\xC9l\x03\x02\x02" +
161 | "\x02\xC9{\x03\x02\x02\x02\xC9\x8D\x03\x02\x02\x02\xC9\x95\x03\x02\x02" +
162 | "\x02\xC9\xA7\x03\x02\x02\x02\xC9\xB8\x03\x02\x02\x02\xCA\x12\x03\x02\x02" +
163 | "\x02\xCB\xCD\t\x03\x02\x02\xCC\xCB\x03\x02\x02\x02\xCD\xCE\x03\x02\x02" +
164 | "\x02\xCE\xCC\x03\x02\x02\x02\xCE\xCF\x03\x02\x02\x02\xCF\xD3\x03\x02\x02" +
165 | "\x02\xD0\xD2\t\x04\x02\x02\xD1\xD0\x03\x02\x02\x02\xD2\xD5\x03\x02\x02" +
166 | "\x02\xD3\xD1\x03\x02\x02\x02\xD3\xD4\x03\x02\x02\x02\xD4\x14\x03\x02\x02" +
167 | "\x02\xD5\xD3\x03\x02\x02\x02\xD6\xD8\t\x04\x02\x02\xD7\xD6\x03\x02\x02" +
168 | "\x02\xD8\xD9\x03\x02\x02\x02\xD9\xD7\x03\x02\x02\x02\xD9\xDA\x03\x02\x02" +
169 | "\x02\xDA\x16\x03\x02\x02\x02\xDB\xDC\t\x05\x02\x02\xDC\xDD\x03\x02\x02" +
170 | "\x02\xDD\xDE\b\f\x02\x02\xDE\x18\x03\x02\x02\x02\b\x02Y\xC9\xCE\xD3\xD9" +
171 | "\x03\b\x02\x02";
172 | public static __ATN: ATN;
173 | public static get _ATN(): ATN {
174 | if (!PrerequisitesLexer.__ATN) {
175 | PrerequisitesLexer.__ATN = new ATNDeserializer().deserialize(Utils.toCharArray(PrerequisitesLexer._serializedATN));
176 | }
177 |
178 | return PrerequisitesLexer.__ATN;
179 | }
180 |
181 | }
182 |
183 |
--------------------------------------------------------------------------------
/src/steps/prereqs/grammar/PrerequisitesListener.ts:
--------------------------------------------------------------------------------
1 | // Generated from src/steps/prereqs/grammar/Prerequisites.g4 by ANTLR 4.9.0-SNAPSHOT
2 |
3 |
4 | import { ParseTreeListener } from "antlr4ts/tree/ParseTreeListener";
5 |
6 | import { ParseContext } from "./PrerequisitesParser";
7 | import { EmptyContext } from "./PrerequisitesParser";
8 | import { ExpressionContext } from "./PrerequisitesParser";
9 | import { TermContext } from "./PrerequisitesParser";
10 | import { AtomContext } from "./PrerequisitesParser";
11 | import { CourseContext } from "./PrerequisitesParser";
12 | import { TestContext } from "./PrerequisitesParser";
13 | import { OperatorContext } from "./PrerequisitesParser";
14 |
15 |
16 | /**
17 | * This interface defines a complete listener for a parse tree produced by
18 | * `PrerequisitesParser`.
19 | */
20 | export interface PrerequisitesListener extends ParseTreeListener {
21 | /**
22 | * Enter a parse tree produced by `PrerequisitesParser.parse`.
23 | * @param ctx the parse tree
24 | */
25 | enterParse?: (ctx: ParseContext) => void;
26 | /**
27 | * Exit a parse tree produced by `PrerequisitesParser.parse`.
28 | * @param ctx the parse tree
29 | */
30 | exitParse?: (ctx: ParseContext) => void;
31 |
32 | /**
33 | * Enter a parse tree produced by `PrerequisitesParser.empty`.
34 | * @param ctx the parse tree
35 | */
36 | enterEmpty?: (ctx: EmptyContext) => void;
37 | /**
38 | * Exit a parse tree produced by `PrerequisitesParser.empty`.
39 | * @param ctx the parse tree
40 | */
41 | exitEmpty?: (ctx: EmptyContext) => void;
42 |
43 | /**
44 | * Enter a parse tree produced by `PrerequisitesParser.expression`.
45 | * @param ctx the parse tree
46 | */
47 | enterExpression?: (ctx: ExpressionContext) => void;
48 | /**
49 | * Exit a parse tree produced by `PrerequisitesParser.expression`.
50 | * @param ctx the parse tree
51 | */
52 | exitExpression?: (ctx: ExpressionContext) => void;
53 |
54 | /**
55 | * Enter a parse tree produced by `PrerequisitesParser.term`.
56 | * @param ctx the parse tree
57 | */
58 | enterTerm?: (ctx: TermContext) => void;
59 | /**
60 | * Exit a parse tree produced by `PrerequisitesParser.term`.
61 | * @param ctx the parse tree
62 | */
63 | exitTerm?: (ctx: TermContext) => void;
64 |
65 | /**
66 | * Enter a parse tree produced by `PrerequisitesParser.atom`.
67 | * @param ctx the parse tree
68 | */
69 | enterAtom?: (ctx: AtomContext) => void;
70 | /**
71 | * Exit a parse tree produced by `PrerequisitesParser.atom`.
72 | * @param ctx the parse tree
73 | */
74 | exitAtom?: (ctx: AtomContext) => void;
75 |
76 | /**
77 | * Enter a parse tree produced by `PrerequisitesParser.course`.
78 | * @param ctx the parse tree
79 | */
80 | enterCourse?: (ctx: CourseContext) => void;
81 | /**
82 | * Exit a parse tree produced by `PrerequisitesParser.course`.
83 | * @param ctx the parse tree
84 | */
85 | exitCourse?: (ctx: CourseContext) => void;
86 |
87 | /**
88 | * Enter a parse tree produced by `PrerequisitesParser.test`.
89 | * @param ctx the parse tree
90 | */
91 | enterTest?: (ctx: TestContext) => void;
92 | /**
93 | * Exit a parse tree produced by `PrerequisitesParser.test`.
94 | * @param ctx the parse tree
95 | */
96 | exitTest?: (ctx: TestContext) => void;
97 |
98 | /**
99 | * Enter a parse tree produced by `PrerequisitesParser.operator`.
100 | * @param ctx the parse tree
101 | */
102 | enterOperator?: (ctx: OperatorContext) => void;
103 | /**
104 | * Exit a parse tree produced by `PrerequisitesParser.operator`.
105 | * @param ctx the parse tree
106 | */
107 | exitOperator?: (ctx: OperatorContext) => void;
108 | }
109 |
110 |
--------------------------------------------------------------------------------
/src/steps/prereqs/grammar/PrerequisitesParser.ts:
--------------------------------------------------------------------------------
1 | // Generated from src/steps/prereqs/grammar/Prerequisites.g4 by ANTLR 4.9.0-SNAPSHOT
2 |
3 |
4 | import { ATN } from "antlr4ts/atn/ATN";
5 | import { ATNDeserializer } from "antlr4ts/atn/ATNDeserializer";
6 | import { FailedPredicateException } from "antlr4ts/FailedPredicateException";
7 | import { NotNull } from "antlr4ts/Decorators";
8 | import { NoViableAltException } from "antlr4ts/NoViableAltException";
9 | import { Override } from "antlr4ts/Decorators";
10 | import { Parser } from "antlr4ts/Parser";
11 | import { ParserRuleContext } from "antlr4ts/ParserRuleContext";
12 | import { ParserATNSimulator } from "antlr4ts/atn/ParserATNSimulator";
13 | import { ParseTreeListener } from "antlr4ts/tree/ParseTreeListener";
14 | import { ParseTreeVisitor } from "antlr4ts/tree/ParseTreeVisitor";
15 | import { RecognitionException } from "antlr4ts/RecognitionException";
16 | import { RuleContext } from "antlr4ts/RuleContext";
17 | //import { RuleVersion } from "antlr4ts/RuleVersion";
18 | import { TerminalNode } from "antlr4ts/tree/TerminalNode";
19 | import { Token } from "antlr4ts/Token";
20 | import { TokenStream } from "antlr4ts/TokenStream";
21 | import { Vocabulary } from "antlr4ts/Vocabulary";
22 | import { VocabularyImpl } from "antlr4ts/VocabularyImpl";
23 |
24 | import * as Utils from "antlr4ts/misc/Utils";
25 |
26 | import { PrerequisitesListener } from "./PrerequisitesListener";
27 | import { PrerequisitesVisitor } from "./PrerequisitesVisitor";
28 |
29 |
30 | export class PrerequisitesParser extends Parser {
31 | public static readonly AND = 1;
32 | public static readonly OR = 2;
33 | public static readonly OPARENS = 3;
34 | public static readonly CPARENS = 4;
35 | public static readonly GRADE_LETTER = 5;
36 | public static readonly COURSE_PREFIX = 6;
37 | public static readonly GRADE_PREFIX = 7;
38 | public static readonly TEST_NAME = 8;
39 | public static readonly COURSE_NUMBER = 9;
40 | public static readonly COURSE_SUBJECT = 10;
41 | public static readonly SPACE = 11;
42 | public static readonly RULE_parse = 0;
43 | public static readonly RULE_empty = 1;
44 | public static readonly RULE_expression = 2;
45 | public static readonly RULE_term = 3;
46 | public static readonly RULE_atom = 4;
47 | public static readonly RULE_course = 5;
48 | public static readonly RULE_test = 6;
49 | public static readonly RULE_operator = 7;
50 | // tslint:disable:no-trailing-whitespace
51 | public static readonly ruleNames: string[] = [
52 | "parse", "empty", "expression", "term", "atom", "course", "test", "operator",
53 | ];
54 |
55 | private static readonly _LITERAL_NAMES: Array = [
56 | undefined, "'and'", "'or'", "'('", "')'", undefined, undefined, "'Minimum Grade of'",
57 | ];
58 | private static readonly _SYMBOLIC_NAMES: Array = [
59 | undefined, "AND", "OR", "OPARENS", "CPARENS", "GRADE_LETTER", "COURSE_PREFIX",
60 | "GRADE_PREFIX", "TEST_NAME", "COURSE_NUMBER", "COURSE_SUBJECT", "SPACE",
61 | ];
62 | public static readonly VOCABULARY: Vocabulary = new VocabularyImpl(PrerequisitesParser._LITERAL_NAMES, PrerequisitesParser._SYMBOLIC_NAMES, []);
63 |
64 | // @Override
65 | // @NotNull
66 | public get vocabulary(): Vocabulary {
67 | return PrerequisitesParser.VOCABULARY;
68 | }
69 | // tslint:enable:no-trailing-whitespace
70 |
71 | // @Override
72 | public get grammarFileName(): string { return "Prerequisites.g4"; }
73 |
74 | // @Override
75 | public get ruleNames(): string[] { return PrerequisitesParser.ruleNames; }
76 |
77 | // @Override
78 | public get serializedATN(): string { return PrerequisitesParser._serializedATN; }
79 |
80 | protected createFailedPredicateException(predicate?: string, message?: string): FailedPredicateException {
81 | return new FailedPredicateException(this, predicate, message);
82 | }
83 |
84 | constructor(input: TokenStream) {
85 | super(input);
86 | this._interp = new ParserATNSimulator(PrerequisitesParser._ATN, this);
87 | }
88 | // @RuleVersion(0)
89 | public parse(): ParseContext {
90 | let _localctx: ParseContext = new ParseContext(this._ctx, this.state);
91 | this.enterRule(_localctx, 0, PrerequisitesParser.RULE_parse);
92 | try {
93 | this.state = 20;
94 | this._errHandler.sync(this);
95 | switch ( this.interpreter.adaptivePredict(this._input, 0, this._ctx) ) {
96 | case 1:
97 | this.enterOuterAlt(_localctx, 1);
98 | {
99 | this.state = 16;
100 | this.expression();
101 | }
102 | break;
103 |
104 | case 2:
105 | this.enterOuterAlt(_localctx, 2);
106 | {
107 | this.state = 17;
108 | this.atom();
109 | }
110 | break;
111 |
112 | case 3:
113 | this.enterOuterAlt(_localctx, 3);
114 | {
115 | this.state = 18;
116 | this.empty();
117 | }
118 | break;
119 |
120 | case 4:
121 | this.enterOuterAlt(_localctx, 4);
122 | {
123 | this.state = 19;
124 | this.match(PrerequisitesParser.EOF);
125 | }
126 | break;
127 | }
128 | }
129 | catch (re) {
130 | if (re instanceof RecognitionException) {
131 | _localctx.exception = re;
132 | this._errHandler.reportError(this, re);
133 | this._errHandler.recover(this, re);
134 | } else {
135 | throw re;
136 | }
137 | }
138 | finally {
139 | this.exitRule();
140 | }
141 | return _localctx;
142 | }
143 | // @RuleVersion(0)
144 | public empty(): EmptyContext {
145 | let _localctx: EmptyContext = new EmptyContext(this._ctx, this.state);
146 | this.enterRule(_localctx, 2, PrerequisitesParser.RULE_empty);
147 | let _la: number;
148 | try {
149 | let _alt: number;
150 | this.enterOuterAlt(_localctx, 1);
151 | {
152 | this.state = 25;
153 | this._errHandler.sync(this);
154 | _la = this._input.LA(1);
155 | while (_la === PrerequisitesParser.OPARENS) {
156 | {
157 | {
158 | this.state = 22;
159 | this.match(PrerequisitesParser.OPARENS);
160 | }
161 | }
162 | this.state = 27;
163 | this._errHandler.sync(this);
164 | _la = this._input.LA(1);
165 | }
166 | this.state = 29;
167 | this._errHandler.sync(this);
168 | _alt = 1;
169 | do {
170 | switch (_alt) {
171 | case 1:
172 | {
173 | {
174 | this.state = 28;
175 | this.operator();
176 | }
177 | }
178 | break;
179 | default:
180 | throw new NoViableAltException(this);
181 | }
182 | this.state = 31;
183 | this._errHandler.sync(this);
184 | _alt = this.interpreter.adaptivePredict(this._input, 2, this._ctx);
185 | } while (_alt !== 2 && _alt !== ATN.INVALID_ALT_NUMBER);
186 | this.state = 36;
187 | this._errHandler.sync(this);
188 | _alt = this.interpreter.adaptivePredict(this._input, 3, this._ctx);
189 | while (_alt !== 2 && _alt !== ATN.INVALID_ALT_NUMBER) {
190 | if (_alt === 1) {
191 | {
192 | {
193 | this.state = 33;
194 | this.empty();
195 | }
196 | }
197 | }
198 | this.state = 38;
199 | this._errHandler.sync(this);
200 | _alt = this.interpreter.adaptivePredict(this._input, 3, this._ctx);
201 | }
202 | this.state = 42;
203 | this._errHandler.sync(this);
204 | _alt = this.interpreter.adaptivePredict(this._input, 4, this._ctx);
205 | while (_alt !== 2 && _alt !== ATN.INVALID_ALT_NUMBER) {
206 | if (_alt === 1) {
207 | {
208 | {
209 | this.state = 39;
210 | this.match(PrerequisitesParser.CPARENS);
211 | }
212 | }
213 | }
214 | this.state = 44;
215 | this._errHandler.sync(this);
216 | _alt = this.interpreter.adaptivePredict(this._input, 4, this._ctx);
217 | }
218 | }
219 | }
220 | catch (re) {
221 | if (re instanceof RecognitionException) {
222 | _localctx.exception = re;
223 | this._errHandler.reportError(this, re);
224 | this._errHandler.recover(this, re);
225 | } else {
226 | throw re;
227 | }
228 | }
229 | finally {
230 | this.exitRule();
231 | }
232 | return _localctx;
233 | }
234 | // @RuleVersion(0)
235 | public expression(): ExpressionContext {
236 | let _localctx: ExpressionContext = new ExpressionContext(this._ctx, this.state);
237 | this.enterRule(_localctx, 4, PrerequisitesParser.RULE_expression);
238 | let _la: number;
239 | try {
240 | let _alt: number;
241 | this.enterOuterAlt(_localctx, 1);
242 | {
243 | this.state = 45;
244 | _localctx._left = this.term();
245 | this.state = 55;
246 | this._errHandler.sync(this);
247 | _la = this._input.LA(1);
248 | while (_la === PrerequisitesParser.OR) {
249 | {
250 | {
251 | this.state = 46;
252 | this.match(PrerequisitesParser.OR);
253 | this.state = 50;
254 | this._errHandler.sync(this);
255 | _alt = this.interpreter.adaptivePredict(this._input, 5, this._ctx);
256 | while (_alt !== 2 && _alt !== ATN.INVALID_ALT_NUMBER) {
257 | if (_alt === 1) {
258 | {
259 | {
260 | this.state = 47;
261 | _localctx._right = this.term();
262 | }
263 | }
264 | }
265 | this.state = 52;
266 | this._errHandler.sync(this);
267 | _alt = this.interpreter.adaptivePredict(this._input, 5, this._ctx);
268 | }
269 | }
270 | }
271 | this.state = 57;
272 | this._errHandler.sync(this);
273 | _la = this._input.LA(1);
274 | }
275 | }
276 | }
277 | catch (re) {
278 | if (re instanceof RecognitionException) {
279 | _localctx.exception = re;
280 | this._errHandler.reportError(this, re);
281 | this._errHandler.recover(this, re);
282 | } else {
283 | throw re;
284 | }
285 | }
286 | finally {
287 | this.exitRule();
288 | }
289 | return _localctx;
290 | }
291 | // @RuleVersion(0)
292 | public term(): TermContext {
293 | let _localctx: TermContext = new TermContext(this._ctx, this.state);
294 | this.enterRule(_localctx, 6, PrerequisitesParser.RULE_term);
295 | let _la: number;
296 | try {
297 | let _alt: number;
298 | this.enterOuterAlt(_localctx, 1);
299 | {
300 | this.state = 61;
301 | this._errHandler.sync(this);
302 | _la = this._input.LA(1);
303 | while (_la === PrerequisitesParser.AND || _la === PrerequisitesParser.OR) {
304 | {
305 | {
306 | this.state = 58;
307 | this.operator();
308 | }
309 | }
310 | this.state = 63;
311 | this._errHandler.sync(this);
312 | _la = this._input.LA(1);
313 | }
314 | this.state = 64;
315 | _localctx._left = this.atom();
316 | this.state = 74;
317 | this._errHandler.sync(this);
318 | _alt = this.interpreter.adaptivePredict(this._input, 9, this._ctx);
319 | while (_alt !== 2 && _alt !== ATN.INVALID_ALT_NUMBER) {
320 | if (_alt === 1) {
321 | {
322 | {
323 | this.state = 65;
324 | this.match(PrerequisitesParser.AND);
325 | this.state = 69;
326 | this._errHandler.sync(this);
327 | _alt = this.interpreter.adaptivePredict(this._input, 8, this._ctx);
328 | while (_alt !== 2 && _alt !== ATN.INVALID_ALT_NUMBER) {
329 | if (_alt === 1) {
330 | {
331 | {
332 | this.state = 66;
333 | _localctx._right = this.atom();
334 | }
335 | }
336 | }
337 | this.state = 71;
338 | this._errHandler.sync(this);
339 | _alt = this.interpreter.adaptivePredict(this._input, 8, this._ctx);
340 | }
341 | }
342 | }
343 | }
344 | this.state = 76;
345 | this._errHandler.sync(this);
346 | _alt = this.interpreter.adaptivePredict(this._input, 9, this._ctx);
347 | }
348 | }
349 | }
350 | catch (re) {
351 | if (re instanceof RecognitionException) {
352 | _localctx.exception = re;
353 | this._errHandler.reportError(this, re);
354 | this._errHandler.recover(this, re);
355 | } else {
356 | throw re;
357 | }
358 | }
359 | finally {
360 | this.exitRule();
361 | }
362 | return _localctx;
363 | }
364 | // @RuleVersion(0)
365 | public atom(): AtomContext {
366 | let _localctx: AtomContext = new AtomContext(this._ctx, this.state);
367 | this.enterRule(_localctx, 8, PrerequisitesParser.RULE_atom);
368 | try {
369 | this.state = 83;
370 | this._errHandler.sync(this);
371 | switch (this._input.LA(1)) {
372 | case PrerequisitesParser.COURSE_PREFIX:
373 | case PrerequisitesParser.COURSE_SUBJECT:
374 | this.enterOuterAlt(_localctx, 1);
375 | {
376 | this.state = 77;
377 | this.course();
378 | }
379 | break;
380 | case PrerequisitesParser.TEST_NAME:
381 | this.enterOuterAlt(_localctx, 2);
382 | {
383 | this.state = 78;
384 | this.test();
385 | }
386 | break;
387 | case PrerequisitesParser.OPARENS:
388 | this.enterOuterAlt(_localctx, 3);
389 | {
390 | {
391 | this.state = 79;
392 | this.match(PrerequisitesParser.OPARENS);
393 | this.state = 80;
394 | this.expression();
395 | this.state = 81;
396 | this.match(PrerequisitesParser.CPARENS);
397 | }
398 | }
399 | break;
400 | default:
401 | throw new NoViableAltException(this);
402 | }
403 | }
404 | catch (re) {
405 | if (re instanceof RecognitionException) {
406 | _localctx.exception = re;
407 | this._errHandler.reportError(this, re);
408 | this._errHandler.recover(this, re);
409 | } else {
410 | throw re;
411 | }
412 | }
413 | finally {
414 | this.exitRule();
415 | }
416 | return _localctx;
417 | }
418 | // @RuleVersion(0)
419 | public course(): CourseContext {
420 | let _localctx: CourseContext = new CourseContext(this._ctx, this.state);
421 | this.enterRule(_localctx, 10, PrerequisitesParser.RULE_course);
422 | let _la: number;
423 | try {
424 | this.enterOuterAlt(_localctx, 1);
425 | {
426 | this.state = 86;
427 | this._errHandler.sync(this);
428 | _la = this._input.LA(1);
429 | if (_la === PrerequisitesParser.COURSE_PREFIX) {
430 | {
431 | this.state = 85;
432 | this.match(PrerequisitesParser.COURSE_PREFIX);
433 | }
434 | }
435 |
436 | this.state = 88;
437 | _localctx._subject = this.match(PrerequisitesParser.COURSE_SUBJECT);
438 | this.state = 89;
439 | _localctx._number = this.match(PrerequisitesParser.COURSE_NUMBER);
440 | this.state = 92;
441 | this._errHandler.sync(this);
442 | _la = this._input.LA(1);
443 | if (_la === PrerequisitesParser.GRADE_PREFIX) {
444 | {
445 | this.state = 90;
446 | this.match(PrerequisitesParser.GRADE_PREFIX);
447 | this.state = 91;
448 | _localctx._grade = this.match(PrerequisitesParser.GRADE_LETTER);
449 | }
450 | }
451 |
452 | }
453 | }
454 | catch (re) {
455 | if (re instanceof RecognitionException) {
456 | _localctx.exception = re;
457 | this._errHandler.reportError(this, re);
458 | this._errHandler.recover(this, re);
459 | } else {
460 | throw re;
461 | }
462 | }
463 | finally {
464 | this.exitRule();
465 | }
466 | return _localctx;
467 | }
468 | // @RuleVersion(0)
469 | public test(): TestContext {
470 | let _localctx: TestContext = new TestContext(this._ctx, this.state);
471 | this.enterRule(_localctx, 12, PrerequisitesParser.RULE_test);
472 | try {
473 | this.enterOuterAlt(_localctx, 1);
474 | {
475 | this.state = 94;
476 | _localctx._name = this.match(PrerequisitesParser.TEST_NAME);
477 | this.state = 95;
478 | _localctx._score = this.match(PrerequisitesParser.COURSE_NUMBER);
479 | }
480 | }
481 | catch (re) {
482 | if (re instanceof RecognitionException) {
483 | _localctx.exception = re;
484 | this._errHandler.reportError(this, re);
485 | this._errHandler.recover(this, re);
486 | } else {
487 | throw re;
488 | }
489 | }
490 | finally {
491 | this.exitRule();
492 | }
493 | return _localctx;
494 | }
495 | // @RuleVersion(0)
496 | public operator(): OperatorContext {
497 | let _localctx: OperatorContext = new OperatorContext(this._ctx, this.state);
498 | this.enterRule(_localctx, 14, PrerequisitesParser.RULE_operator);
499 | let _la: number;
500 | try {
501 | this.enterOuterAlt(_localctx, 1);
502 | {
503 | this.state = 97;
504 | _la = this._input.LA(1);
505 | if (!(_la === PrerequisitesParser.AND || _la === PrerequisitesParser.OR)) {
506 | this._errHandler.recoverInline(this);
507 | } else {
508 | if (this._input.LA(1) === Token.EOF) {
509 | this.matchedEOF = true;
510 | }
511 |
512 | this._errHandler.reportMatch(this);
513 | this.consume();
514 | }
515 | }
516 | }
517 | catch (re) {
518 | if (re instanceof RecognitionException) {
519 | _localctx.exception = re;
520 | this._errHandler.reportError(this, re);
521 | this._errHandler.recover(this, re);
522 | } else {
523 | throw re;
524 | }
525 | }
526 | finally {
527 | this.exitRule();
528 | }
529 | return _localctx;
530 | }
531 |
532 | public static readonly _serializedATN: string =
533 | "\x03\uC91D\uCABA\u058D\uAFBA\u4F53\u0607\uEA8B\uC241\x03\rf\x04\x02\t" +
534 | "\x02\x04\x03\t\x03\x04\x04\t\x04\x04\x05\t\x05\x04\x06\t\x06\x04\x07\t" +
535 | "\x07\x04\b\t\b\x04\t\t\t\x03\x02\x03\x02\x03\x02\x03\x02\x05\x02\x17\n" +
536 | "\x02\x03\x03\x07\x03\x1A\n\x03\f\x03\x0E\x03\x1D\v\x03\x03\x03\x06\x03" +
537 | " \n\x03\r\x03\x0E\x03!\x03\x03\x07\x03%\n\x03\f\x03\x0E\x03(\v\x03\x03" +
538 | "\x03\x07\x03+\n\x03\f\x03\x0E\x03.\v\x03\x03\x04\x03\x04\x03\x04\x07\x04" +
539 | "3\n\x04\f\x04\x0E\x046\v\x04\x07\x048\n\x04\f\x04\x0E\x04;\v\x04\x03\x05" +
540 | "\x07\x05>\n\x05\f\x05\x0E\x05A\v\x05\x03\x05\x03\x05\x03\x05\x07\x05F" +
541 | "\n\x05\f\x05\x0E\x05I\v\x05\x07\x05K\n\x05\f\x05\x0E\x05N\v\x05\x03\x06" +
542 | "\x03\x06\x03\x06\x03\x06\x03\x06\x03\x06\x05\x06V\n\x06\x03\x07\x05\x07" +
543 | "Y\n\x07\x03\x07\x03\x07\x03\x07\x03\x07\x05\x07_\n\x07\x03\b\x03\b\x03" +
544 | "\b\x03\t\x03\t\x03\t\x02\x02\x02\n\x02\x02\x04\x02\x06\x02\b\x02\n\x02" +
545 | "\f\x02\x0E\x02\x10\x02\x02\x03\x03\x02\x03\x04\x02m\x02\x16\x03\x02\x02" +
546 | "\x02\x04\x1B\x03\x02\x02\x02\x06/\x03\x02\x02\x02\b?\x03\x02\x02\x02\n" +
547 | "U\x03\x02\x02\x02\fX\x03\x02\x02\x02\x0E`\x03\x02\x02\x02\x10c\x03\x02" +
548 | "\x02\x02\x12\x17\x05\x06\x04\x02\x13\x17\x05\n\x06\x02\x14\x17\x05\x04" +
549 | "\x03\x02\x15\x17\x07\x02\x02\x03\x16\x12\x03\x02\x02\x02\x16\x13\x03\x02" +
550 | "\x02\x02\x16\x14\x03\x02\x02\x02\x16\x15\x03\x02\x02\x02\x17\x03\x03\x02" +
551 | "\x02\x02\x18\x1A\x07\x05\x02\x02\x19\x18\x03\x02\x02\x02\x1A\x1D\x03\x02" +
552 | "\x02\x02\x1B\x19\x03\x02\x02\x02\x1B\x1C\x03\x02\x02\x02\x1C\x1F\x03\x02" +
553 | "\x02\x02\x1D\x1B\x03\x02\x02\x02\x1E \x05\x10\t\x02\x1F\x1E\x03\x02\x02" +
554 | "\x02 !\x03\x02\x02\x02!\x1F\x03\x02\x02\x02!\"\x03\x02\x02\x02\"&\x03" +
555 | "\x02\x02\x02#%\x05\x04\x03\x02$#\x03\x02\x02\x02%(\x03\x02\x02\x02&$\x03" +
556 | "\x02\x02\x02&\'\x03\x02\x02\x02\',\x03\x02\x02\x02(&\x03\x02\x02\x02)" +
557 | "+\x07\x06\x02\x02*)\x03\x02\x02\x02+.\x03\x02\x02\x02,*\x03\x02\x02\x02" +
558 | ",-\x03\x02\x02\x02-\x05\x03\x02\x02\x02.,\x03\x02\x02\x02/9\x05\b\x05" +
559 | "\x0204\x07\x04\x02\x0213\x05\b\x05\x0221\x03\x02\x02\x0236\x03\x02\x02" +
560 | "\x0242\x03\x02\x02\x0245\x03\x02\x02\x0258\x03\x02\x02\x0264\x03\x02\x02" +
561 | "\x0270\x03\x02\x02\x028;\x03\x02\x02\x0297\x03\x02\x02\x029:\x03\x02\x02" +
562 | "\x02:\x07\x03\x02\x02\x02;9\x03\x02\x02\x02<>\x05\x10\t\x02=<\x03\x02" +
563 | "\x02\x02>A\x03\x02\x02\x02?=\x03\x02\x02\x02?@\x03\x02\x02\x02@B\x03\x02" +
564 | "\x02\x02A?\x03\x02\x02\x02BL\x05\n\x06\x02CG\x07\x03\x02\x02DF\x05\n\x06" +
565 | "\x02ED\x03\x02\x02\x02FI\x03\x02\x02\x02GE\x03\x02\x02\x02GH\x03\x02\x02" +
566 | "\x02HK\x03\x02\x02\x02IG\x03\x02\x02\x02JC\x03\x02\x02\x02KN\x03\x02\x02" +
567 | "\x02LJ\x03\x02\x02\x02LM\x03\x02\x02\x02M\t\x03\x02\x02\x02NL\x03\x02" +
568 | "\x02\x02OV\x05\f\x07\x02PV\x05\x0E\b\x02QR\x07\x05\x02\x02RS\x05\x06\x04" +
569 | "\x02ST\x07\x06\x02\x02TV\x03\x02\x02\x02UO\x03\x02\x02\x02UP\x03\x02\x02" +
570 | "\x02UQ\x03\x02\x02\x02V\v\x03\x02\x02\x02WY\x07\b\x02\x02XW\x03\x02\x02" +
571 | "\x02XY\x03\x02\x02\x02YZ\x03\x02\x02\x02Z[\x07\f\x02\x02[^\x07\v\x02\x02" +
572 | "\\]\x07\t\x02\x02]_\x07\x07\x02\x02^\\\x03\x02\x02\x02^_\x03\x02\x02\x02" +
573 | "_\r\x03\x02\x02\x02`a\x07\n\x02\x02ab\x07\v\x02\x02b\x0F\x03\x02\x02\x02" +
574 | "cd\t\x02\x02\x02d\x11\x03\x02\x02\x02\x0F\x16\x1B!&,49?GLUX^";
575 | public static __ATN: ATN;
576 | public static get _ATN(): ATN {
577 | if (!PrerequisitesParser.__ATN) {
578 | PrerequisitesParser.__ATN = new ATNDeserializer().deserialize(Utils.toCharArray(PrerequisitesParser._serializedATN));
579 | }
580 |
581 | return PrerequisitesParser.__ATN;
582 | }
583 |
584 | }
585 |
586 | export class ParseContext extends ParserRuleContext {
587 | public expression(): ExpressionContext | undefined {
588 | return this.tryGetRuleContext(0, ExpressionContext);
589 | }
590 | public atom(): AtomContext | undefined {
591 | return this.tryGetRuleContext(0, AtomContext);
592 | }
593 | public empty(): EmptyContext | undefined {
594 | return this.tryGetRuleContext(0, EmptyContext);
595 | }
596 | public EOF(): TerminalNode | undefined { return this.tryGetToken(PrerequisitesParser.EOF, 0); }
597 | constructor(parent: ParserRuleContext | undefined, invokingState: number) {
598 | super(parent, invokingState);
599 | }
600 | // @Override
601 | public get ruleIndex(): number { return PrerequisitesParser.RULE_parse; }
602 | // @Override
603 | public enterRule(listener: PrerequisitesListener): void {
604 | if (listener.enterParse) {
605 | listener.enterParse(this);
606 | }
607 | }
608 | // @Override
609 | public exitRule(listener: PrerequisitesListener): void {
610 | if (listener.exitParse) {
611 | listener.exitParse(this);
612 | }
613 | }
614 | // @Override
615 | public accept(visitor: PrerequisitesVisitor): Result {
616 | if (visitor.visitParse) {
617 | return visitor.visitParse(this);
618 | } else {
619 | return visitor.visitChildren(this);
620 | }
621 | }
622 | }
623 |
624 |
625 | export class EmptyContext extends ParserRuleContext {
626 | public OPARENS(): TerminalNode[];
627 | public OPARENS(i: number): TerminalNode;
628 | public OPARENS(i?: number): TerminalNode | TerminalNode[] {
629 | if (i === undefined) {
630 | return this.getTokens(PrerequisitesParser.OPARENS);
631 | } else {
632 | return this.getToken(PrerequisitesParser.OPARENS, i);
633 | }
634 | }
635 | public operator(): OperatorContext[];
636 | public operator(i: number): OperatorContext;
637 | public operator(i?: number): OperatorContext | OperatorContext[] {
638 | if (i === undefined) {
639 | return this.getRuleContexts(OperatorContext);
640 | } else {
641 | return this.getRuleContext(i, OperatorContext);
642 | }
643 | }
644 | public empty(): EmptyContext[];
645 | public empty(i: number): EmptyContext;
646 | public empty(i?: number): EmptyContext | EmptyContext[] {
647 | if (i === undefined) {
648 | return this.getRuleContexts(EmptyContext);
649 | } else {
650 | return this.getRuleContext(i, EmptyContext);
651 | }
652 | }
653 | public CPARENS(): TerminalNode[];
654 | public CPARENS(i: number): TerminalNode;
655 | public CPARENS(i?: number): TerminalNode | TerminalNode[] {
656 | if (i === undefined) {
657 | return this.getTokens(PrerequisitesParser.CPARENS);
658 | } else {
659 | return this.getToken(PrerequisitesParser.CPARENS, i);
660 | }
661 | }
662 | constructor(parent: ParserRuleContext | undefined, invokingState: number) {
663 | super(parent, invokingState);
664 | }
665 | // @Override
666 | public get ruleIndex(): number { return PrerequisitesParser.RULE_empty; }
667 | // @Override
668 | public enterRule(listener: PrerequisitesListener): void {
669 | if (listener.enterEmpty) {
670 | listener.enterEmpty(this);
671 | }
672 | }
673 | // @Override
674 | public exitRule(listener: PrerequisitesListener): void {
675 | if (listener.exitEmpty) {
676 | listener.exitEmpty(this);
677 | }
678 | }
679 | // @Override
680 | public accept(visitor: PrerequisitesVisitor): Result {
681 | if (visitor.visitEmpty) {
682 | return visitor.visitEmpty(this);
683 | } else {
684 | return visitor.visitChildren(this);
685 | }
686 | }
687 | }
688 |
689 |
690 | export class ExpressionContext extends ParserRuleContext {
691 | public _left!: TermContext;
692 | public _right!: TermContext;
693 | public term(): TermContext[];
694 | public term(i: number): TermContext;
695 | public term(i?: number): TermContext | TermContext[] {
696 | if (i === undefined) {
697 | return this.getRuleContexts(TermContext);
698 | } else {
699 | return this.getRuleContext(i, TermContext);
700 | }
701 | }
702 | public OR(): TerminalNode[];
703 | public OR(i: number): TerminalNode;
704 | public OR(i?: number): TerminalNode | TerminalNode[] {
705 | if (i === undefined) {
706 | return this.getTokens(PrerequisitesParser.OR);
707 | } else {
708 | return this.getToken(PrerequisitesParser.OR, i);
709 | }
710 | }
711 | constructor(parent: ParserRuleContext | undefined, invokingState: number) {
712 | super(parent, invokingState);
713 | }
714 | // @Override
715 | public get ruleIndex(): number { return PrerequisitesParser.RULE_expression; }
716 | // @Override
717 | public enterRule(listener: PrerequisitesListener): void {
718 | if (listener.enterExpression) {
719 | listener.enterExpression(this);
720 | }
721 | }
722 | // @Override
723 | public exitRule(listener: PrerequisitesListener): void {
724 | if (listener.exitExpression) {
725 | listener.exitExpression(this);
726 | }
727 | }
728 | // @Override
729 | public accept(visitor: PrerequisitesVisitor): Result {
730 | if (visitor.visitExpression) {
731 | return visitor.visitExpression(this);
732 | } else {
733 | return visitor.visitChildren(this);
734 | }
735 | }
736 | }
737 |
738 |
739 | export class TermContext extends ParserRuleContext {
740 | public _left!: AtomContext;
741 | public _right!: AtomContext;
742 | public atom(): AtomContext[];
743 | public atom(i: number): AtomContext;
744 | public atom(i?: number): AtomContext | AtomContext[] {
745 | if (i === undefined) {
746 | return this.getRuleContexts(AtomContext);
747 | } else {
748 | return this.getRuleContext(i, AtomContext);
749 | }
750 | }
751 | public operator(): OperatorContext[];
752 | public operator(i: number): OperatorContext;
753 | public operator(i?: number): OperatorContext | OperatorContext[] {
754 | if (i === undefined) {
755 | return this.getRuleContexts(OperatorContext);
756 | } else {
757 | return this.getRuleContext(i, OperatorContext);
758 | }
759 | }
760 | public AND(): TerminalNode[];
761 | public AND(i: number): TerminalNode;
762 | public AND(i?: number): TerminalNode | TerminalNode[] {
763 | if (i === undefined) {
764 | return this.getTokens(PrerequisitesParser.AND);
765 | } else {
766 | return this.getToken(PrerequisitesParser.AND, i);
767 | }
768 | }
769 | constructor(parent: ParserRuleContext | undefined, invokingState: number) {
770 | super(parent, invokingState);
771 | }
772 | // @Override
773 | public get ruleIndex(): number { return PrerequisitesParser.RULE_term; }
774 | // @Override
775 | public enterRule(listener: PrerequisitesListener): void {
776 | if (listener.enterTerm) {
777 | listener.enterTerm(this);
778 | }
779 | }
780 | // @Override
781 | public exitRule(listener: PrerequisitesListener): void {
782 | if (listener.exitTerm) {
783 | listener.exitTerm(this);
784 | }
785 | }
786 | // @Override
787 | public accept(visitor: PrerequisitesVisitor): Result {
788 | if (visitor.visitTerm) {
789 | return visitor.visitTerm(this);
790 | } else {
791 | return visitor.visitChildren(this);
792 | }
793 | }
794 | }
795 |
796 |
797 | export class AtomContext extends ParserRuleContext {
798 | public course(): CourseContext | undefined {
799 | return this.tryGetRuleContext(0, CourseContext);
800 | }
801 | public test(): TestContext | undefined {
802 | return this.tryGetRuleContext(0, TestContext);
803 | }
804 | public OPARENS(): TerminalNode | undefined { return this.tryGetToken(PrerequisitesParser.OPARENS, 0); }
805 | public expression(): ExpressionContext | undefined {
806 | return this.tryGetRuleContext(0, ExpressionContext);
807 | }
808 | public CPARENS(): TerminalNode | undefined { return this.tryGetToken(PrerequisitesParser.CPARENS, 0); }
809 | constructor(parent: ParserRuleContext | undefined, invokingState: number) {
810 | super(parent, invokingState);
811 | }
812 | // @Override
813 | public get ruleIndex(): number { return PrerequisitesParser.RULE_atom; }
814 | // @Override
815 | public enterRule(listener: PrerequisitesListener): void {
816 | if (listener.enterAtom) {
817 | listener.enterAtom(this);
818 | }
819 | }
820 | // @Override
821 | public exitRule(listener: PrerequisitesListener): void {
822 | if (listener.exitAtom) {
823 | listener.exitAtom(this);
824 | }
825 | }
826 | // @Override
827 | public accept(visitor: PrerequisitesVisitor): Result {
828 | if (visitor.visitAtom) {
829 | return visitor.visitAtom(this);
830 | } else {
831 | return visitor.visitChildren(this);
832 | }
833 | }
834 | }
835 |
836 |
837 | export class CourseContext extends ParserRuleContext {
838 | public _subject!: Token;
839 | public _number!: Token;
840 | public _grade!: Token;
841 | public COURSE_SUBJECT(): TerminalNode { return this.getToken(PrerequisitesParser.COURSE_SUBJECT, 0); }
842 | public COURSE_NUMBER(): TerminalNode { return this.getToken(PrerequisitesParser.COURSE_NUMBER, 0); }
843 | public COURSE_PREFIX(): TerminalNode | undefined { return this.tryGetToken(PrerequisitesParser.COURSE_PREFIX, 0); }
844 | public GRADE_PREFIX(): TerminalNode | undefined { return this.tryGetToken(PrerequisitesParser.GRADE_PREFIX, 0); }
845 | public GRADE_LETTER(): TerminalNode | undefined { return this.tryGetToken(PrerequisitesParser.GRADE_LETTER, 0); }
846 | constructor(parent: ParserRuleContext | undefined, invokingState: number) {
847 | super(parent, invokingState);
848 | }
849 | // @Override
850 | public get ruleIndex(): number { return PrerequisitesParser.RULE_course; }
851 | // @Override
852 | public enterRule(listener: PrerequisitesListener): void {
853 | if (listener.enterCourse) {
854 | listener.enterCourse(this);
855 | }
856 | }
857 | // @Override
858 | public exitRule(listener: PrerequisitesListener): void {
859 | if (listener.exitCourse) {
860 | listener.exitCourse(this);
861 | }
862 | }
863 | // @Override
864 | public accept(visitor: PrerequisitesVisitor): Result {
865 | if (visitor.visitCourse) {
866 | return visitor.visitCourse(this);
867 | } else {
868 | return visitor.visitChildren(this);
869 | }
870 | }
871 | }
872 |
873 |
874 | export class TestContext extends ParserRuleContext {
875 | public _name!: Token;
876 | public _score!: Token;
877 | public TEST_NAME(): TerminalNode { return this.getToken(PrerequisitesParser.TEST_NAME, 0); }
878 | public COURSE_NUMBER(): TerminalNode { return this.getToken(PrerequisitesParser.COURSE_NUMBER, 0); }
879 | constructor(parent: ParserRuleContext | undefined, invokingState: number) {
880 | super(parent, invokingState);
881 | }
882 | // @Override
883 | public get ruleIndex(): number { return PrerequisitesParser.RULE_test; }
884 | // @Override
885 | public enterRule(listener: PrerequisitesListener): void {
886 | if (listener.enterTest) {
887 | listener.enterTest(this);
888 | }
889 | }
890 | // @Override
891 | public exitRule(listener: PrerequisitesListener): void {
892 | if (listener.exitTest) {
893 | listener.exitTest(this);
894 | }
895 | }
896 | // @Override
897 | public accept(visitor: PrerequisitesVisitor): Result {
898 | if (visitor.visitTest) {
899 | return visitor.visitTest(this);
900 | } else {
901 | return visitor.visitChildren(this);
902 | }
903 | }
904 | }
905 |
906 |
907 | export class OperatorContext extends ParserRuleContext {
908 | public AND(): TerminalNode | undefined { return this.tryGetToken(PrerequisitesParser.AND, 0); }
909 | public OR(): TerminalNode | undefined { return this.tryGetToken(PrerequisitesParser.OR, 0); }
910 | constructor(parent: ParserRuleContext | undefined, invokingState: number) {
911 | super(parent, invokingState);
912 | }
913 | // @Override
914 | public get ruleIndex(): number { return PrerequisitesParser.RULE_operator; }
915 | // @Override
916 | public enterRule(listener: PrerequisitesListener): void {
917 | if (listener.enterOperator) {
918 | listener.enterOperator(this);
919 | }
920 | }
921 | // @Override
922 | public exitRule(listener: PrerequisitesListener): void {
923 | if (listener.exitOperator) {
924 | listener.exitOperator(this);
925 | }
926 | }
927 | // @Override
928 | public accept(visitor: PrerequisitesVisitor): Result {
929 | if (visitor.visitOperator) {
930 | return visitor.visitOperator(this);
931 | } else {
932 | return visitor.visitChildren(this);
933 | }
934 | }
935 | }
936 |
937 |
938 |
--------------------------------------------------------------------------------
/src/steps/prereqs/grammar/PrerequisitesVisitor.ts:
--------------------------------------------------------------------------------
1 | // Generated from src/steps/prereqs/grammar/Prerequisites.g4 by ANTLR 4.9.0-SNAPSHOT
2 |
3 |
4 | import { ParseTreeVisitor } from "antlr4ts/tree/ParseTreeVisitor";
5 |
6 | import { ParseContext } from "./PrerequisitesParser";
7 | import { EmptyContext } from "./PrerequisitesParser";
8 | import { ExpressionContext } from "./PrerequisitesParser";
9 | import { TermContext } from "./PrerequisitesParser";
10 | import { AtomContext } from "./PrerequisitesParser";
11 | import { CourseContext } from "./PrerequisitesParser";
12 | import { TestContext } from "./PrerequisitesParser";
13 | import { OperatorContext } from "./PrerequisitesParser";
14 |
15 |
16 | /**
17 | * This interface defines a complete generic visitor for a parse tree produced
18 | * by `PrerequisitesParser`.
19 | *
20 | * @param The return type of the visit operation. Use `void` for
21 | * operations with no return type.
22 | */
23 | export interface PrerequisitesVisitor extends ParseTreeVisitor {
24 | /**
25 | * Visit a parse tree produced by `PrerequisitesParser.parse`.
26 | * @param ctx the parse tree
27 | * @return the visitor result
28 | */
29 | visitParse?: (ctx: ParseContext) => Result;
30 |
31 | /**
32 | * Visit a parse tree produced by `PrerequisitesParser.empty`.
33 | * @param ctx the parse tree
34 | * @return the visitor result
35 | */
36 | visitEmpty?: (ctx: EmptyContext) => Result;
37 |
38 | /**
39 | * Visit a parse tree produced by `PrerequisitesParser.expression`.
40 | * @param ctx the parse tree
41 | * @return the visitor result
42 | */
43 | visitExpression?: (ctx: ExpressionContext) => Result;
44 |
45 | /**
46 | * Visit a parse tree produced by `PrerequisitesParser.term`.
47 | * @param ctx the parse tree
48 | * @return the visitor result
49 | */
50 | visitTerm?: (ctx: TermContext) => Result;
51 |
52 | /**
53 | * Visit a parse tree produced by `PrerequisitesParser.atom`.
54 | * @param ctx the parse tree
55 | * @return the visitor result
56 | */
57 | visitAtom?: (ctx: AtomContext) => Result;
58 |
59 | /**
60 | * Visit a parse tree produced by `PrerequisitesParser.course`.
61 | * @param ctx the parse tree
62 | * @return the visitor result
63 | */
64 | visitCourse?: (ctx: CourseContext) => Result;
65 |
66 | /**
67 | * Visit a parse tree produced by `PrerequisitesParser.test`.
68 | * @param ctx the parse tree
69 | * @return the visitor result
70 | */
71 | visitTest?: (ctx: TestContext) => Result;
72 |
73 | /**
74 | * Visit a parse tree produced by `PrerequisitesParser.operator`.
75 | * @param ctx the parse tree
76 | * @return the visitor result
77 | */
78 | visitOperator?: (ctx: OperatorContext) => Result;
79 | }
80 |
81 |
--------------------------------------------------------------------------------
/src/steps/prereqs/grammar/README.md:
--------------------------------------------------------------------------------
1 | # ✍️ Grammar
2 |
3 | This directory contains the ANTLR grammar source (`prerequisites.g4`) and all derived files using [`antlr4ts`](https://github.com/tunnelvisionlabs/antlr4ts).
4 |
5 | They should be added to version control and only regenerated if the source changes. To recompile the grammar, run:
6 |
7 | ```sh
8 | yarn run gen-parser
9 | ```
10 |
11 | ## ℹ️ Note
12 |
13 | Currently, the `antlr4ts` library is broken slightly, (see [tunnelvisionlabs/antlr4ts/485](https://github.com/tunnelvisionlabs/antlr4ts/issues/485)), so we need to add the following comment at the top of **both** `PrerequisitesLexer.ts` and `PrerequisitesParser.ts`:
14 |
15 | ```
16 | // @ts-nocheck
17 | ```
18 |
--------------------------------------------------------------------------------
/src/steps/prereqs/parse.ts:
--------------------------------------------------------------------------------
1 | /* eslint-disable max-classes-per-file */
2 |
3 | import {
4 | ANTLRErrorListener,
5 | CharStreams,
6 | CommonTokenStream,
7 | Recognizer,
8 | } from "antlr4ts";
9 | import { AbstractParseTreeVisitor } from "antlr4ts/tree/AbstractParseTreeVisitor";
10 | import { ATNSimulator } from "antlr4ts/atn/ATNSimulator";
11 | import { load } from "cheerio";
12 |
13 | import { PrerequisitesLexer } from "./grammar/PrerequisitesLexer";
14 | import {
15 | AtomContext,
16 | CourseContext,
17 | ExpressionContext,
18 | PrerequisitesParser,
19 | TermContext,
20 | } from "./grammar/PrerequisitesParser";
21 | import { PrerequisitesVisitor } from "./grammar/PrerequisitesVisitor";
22 | import { error, warn } from "../../log";
23 | import {
24 | MinimumGrade,
25 | PrerequisiteClause,
26 | PrerequisiteCourse,
27 | PrerequisiteOperator,
28 | Prerequisites,
29 | PrerequisiteSet,
30 | } from "../../types";
31 |
32 | /**
33 | * A map consisting of full course names and their corresponding abbreviations
34 | * from 1996-2023.
35 | */
36 | const fullCourseNames = {
37 | "Vertically Integrated Project": "VIP",
38 | Wolof: "WOLO",
39 | "Electrical & Computer Engr": "ECE",
40 | "Computer Science": "CS",
41 | "Cooperative Work Assignment": "COOP",
42 | "Cross Enrollment": "UCGA",
43 | "Earth and Atmospheric Sciences": "EAS",
44 | English: "ENGL",
45 | "Foreign Studies": "FS",
46 | French: "FREN",
47 | "Georgia Tech": "GT",
48 | "Civil and Environmental Engr": "CEE",
49 | "College of Architecture": "COA",
50 | "College of Engineering": "COE",
51 | "Computational Mod, Sim, & Data": "CX",
52 | "Computational Science & Engr": "CSE",
53 | "Biological Sciences": "BIOS",
54 | Biology: "BIOL",
55 | "Biomed Engr/Joint Emory PKU": "BMEJ",
56 | "Biomedical Engineering": "BMED",
57 | "Industrial Design": "ID",
58 | "International Affairs": "INTA",
59 | "International Logistics": "IL",
60 | Internship: "INTN",
61 | "Intl Executive MBA": "IMBA",
62 | Japanese: "JAPN",
63 | Korean: "KOR",
64 | "Learning Support": "LS",
65 | Linguistics: "LING",
66 | "Literature, Media & Comm": "LMC",
67 | Management: "MGT",
68 | "Management of Technology": "MOT",
69 | "Manufacturing Leadership": "MLDR",
70 | "Materials Science & Engr": "MSE",
71 | Accounting: "ACCT",
72 | "Aerospace Engineering": "AE",
73 | "Air Force Aerospace Studies": "AS",
74 | "Applied Physiology": "APPH",
75 | "Mechanical Engineering": "ME",
76 | "Medical Physics": "MP",
77 | "Military Science & Leadership": "MSL",
78 | "Modern Languages": "ML",
79 | Music: "MUSI",
80 | "Naval Science": "NS",
81 | Neuroscience: "NEUR",
82 | Chemistry: "CHEM",
83 | Chinese: "CHIN",
84 | "City Planning": "CP",
85 | Economics: "ECON",
86 | "Elect & Comp Engr-Professional": "ECEP",
87 | Physics: "PHYS",
88 | "Political Science": "POL",
89 | "Polymer, Textile and Fiber Eng": "PTFE",
90 | Psychology: "PSYC",
91 | "Public Policy": "PUBP",
92 | "Public Policy/Joint GSU PhD": "PUBJ",
93 | Russian: "RUSS",
94 | Sociology: "SOC",
95 | Spanish: "SPAN",
96 | Mathematics: "MATH",
97 | "Center Enhancement-Teach/Learn": "CETL",
98 | "Chemical & Biomolecular Engr": "CHBE",
99 | "Biomedical Engr/Joint Emory": "BMEM",
100 | "Bldg Construction-Professional": "BCP",
101 | "Building Construction": "BC",
102 | Swahili: "SWAH",
103 | "Georgia Tech Lorraine": "GTL",
104 | German: "GRMN",
105 | "Global Media and Cultures": "GMC",
106 | "Health Systems": "HS",
107 | History: "HIST",
108 | "History, Technology & Society": "HTS",
109 | "Industrial & Systems Engr": "ISYE",
110 | "Nuclear & Radiological Engr": "NRE",
111 | Philosophy: "PHIL",
112 | "Applied Systems Engineering": "ASE",
113 | Arabic: "ARBC",
114 | Architecture: "ARCH",
115 | "Office of International Educ": "OIE",
116 | "College of Sciences": "COS",
117 | "Ivan Allen College": "IAC",
118 | "Serve, Learn, Sustain": "SLS",
119 | Persian: "PERS",
120 | Hebrew: "HEBW",
121 | Hindi: "HIN",
122 | "Int'l Plan Co-op Abroad": "IPCO",
123 | "Int'l Plan Intern Abroad": "IPIN",
124 | "Int'l Plan-Exchange Program": "IPFS",
125 | "Int'l Plan-Study Abroad": "IPSA",
126 | Portuguese: "PORT",
127 | "Professional Practice": "DOPP",
128 | "Lit, Communication & Culture": "LCC",
129 | "Health Performance Science": "HPS",
130 | "Philosophy of Science/Tech": "PST",
131 | "Health Physics": "HP",
132 | "Regents' Reading Skills": "RGTR",
133 | "Regents' Writing Skills": "RGTE",
134 | "Chemical Engineering": "CHE",
135 | "Textile & Fiber Engineering": "TFE",
136 | "Textile Engineering": "TEX",
137 | "Management Science": "MSCI",
138 | "Materials Engineering": "MATE",
139 | "Civil Engineering": "CE",
140 | "Electrical Engineering": "EE",
141 | "Computer Engineering": "CMPE",
142 | "Military Science": "MS",
143 | "Nuclear Engineering": "NE",
144 | "Physical Education": "PE",
145 | "Engineering Graphics": "EGR",
146 | "Engr Science and Mechanics": "ESM",
147 | "Technology & Science Policy": "TASP",
148 | "Foreign Language": "FL",
149 | "Studies Abroad": "SA",
150 | };
151 | const courseMap = new Map(Object.entries(fullCourseNames));
152 |
153 | // Header's indices in prereq HTML table
154 | const Headers = {
155 | Operator: 0,
156 | OParen: 1,
157 | Test: 2,
158 | Score: 3,
159 | Subject: 4,
160 | CourseNumber: 5,
161 | Level: 6,
162 | Grade: 7,
163 | CParen: 8,
164 | };
165 |
166 | /**
167 | * Converts prerequisites in HTML table format to Banner 8's string format
168 | * but without semester level.
169 | * @param html - Source HTML for the page
170 | * @returns prereq string (e.g., "MATH 8305 Minimum Grade of D")
171 | *
172 | * NOTE: When a course name is missing from fullCourseNames map above,
173 | * the course is removed from the prerequisite list and a warning is logged.
174 | */
175 | function prereqHTMLToString(html: string, courseId: string) {
176 | const $ = load(html);
177 | const prereqTable = $(".basePreqTable").find("tr");
178 | const prereqs = Array();
179 |
180 | prereqTable.each((rowIndex, element) => {
181 | if (rowIndex === 0) return;
182 |
183 | const prereqRow = $(element).children();
184 | let prereq = "";
185 |
186 | let subjectCode: string | undefined;
187 |
188 | prereqRow.each((colIndex: number): void => {
189 | if (colIndex === Headers.Level) return;
190 | let value = prereqRow.eq(colIndex).text();
191 |
192 | if (value.length === 0) return;
193 | if (colIndex === Headers.Operator) value = value.toLowerCase();
194 | if (colIndex === Headers.Subject) {
195 | subjectCode = courseMap.get(value);
196 | if (!subjectCode) {
197 | warn(
198 | `Course has a prereq for ${value} whose abbreviation does not exist. Prereq skipped.`,
199 | {
200 | courseId,
201 | subject: value,
202 | }
203 | );
204 | return;
205 | }
206 | value = subjectCode;
207 | }
208 |
209 | // ignore course if course abbreviation not found
210 | if (
211 | (colIndex === Headers.CourseNumber || colIndex === Headers.Grade) &&
212 | !subjectCode
213 | )
214 | return;
215 |
216 | if (colIndex === Headers.Grade) value = `Minimum Grade of ${value}`;
217 |
218 | prereq += value;
219 |
220 | if (colIndex !== Headers.OParen && colIndex !== Headers.CParen)
221 | prereq += " ";
222 | });
223 |
224 | prereqs.push(prereq);
225 | });
226 |
227 | // Concatenate all prereqs and remove empty parantheses
228 | return prereqs.join("").trim();
229 | }
230 |
231 | /**
232 | * Parses the HTML of a single course to get its prerequisites
233 | * @param html - Source HTML for the page
234 | * @param courseId - The joined course id (SUBJECT NUMBER); i.e. `"CS 2340"`
235 | */
236 | export function parseCoursePrereqs(
237 | html: string,
238 | courseId: string
239 | ): Prerequisites {
240 | // Converts prereqs in HTML table form to Banner 8 (old Oscar system that crawler-v1 uses)'s string format
241 | const prereqString = prereqHTMLToString(html, courseId);
242 |
243 | // Create the lexer and parser using the ANTLR 4 grammar defined in ./grammar
244 | // (using antlr4ts: https://github.com/tunnelvisionlabs/antlr4ts)
245 | const charStream = CharStreams.fromString(prereqString, courseId);
246 | const lexer = new PrerequisitesLexer(charStream);
247 | lexer.removeErrorListeners();
248 | lexer.addErrorListener(new ErrorListener(courseId, prereqString));
249 | const tokenStream = new CommonTokenStream(lexer);
250 | const parser = new PrerequisitesParser(tokenStream);
251 | parser.removeErrorListeners();
252 | parser.addErrorListener(new ErrorListener(courseId, prereqString));
253 |
254 | // Get the top-level "parse" rule's tree
255 | // and pass it into our visitor to transform the parse tree
256 | // into the prefix-notation parsed version
257 | const tree = parser.parse();
258 | const visitor = new PrefixNotationVisitor();
259 | const prerequisiteClause = visitor.visit(tree);
260 |
261 | // No prerequisites
262 | if (prerequisiteClause == null) {
263 | return [];
264 | }
265 |
266 | // If there is only a single prereq, return as a prefix set with "and"
267 | if (isSingleCourse(prerequisiteClause)) {
268 | return ["and", prerequisiteClause];
269 | }
270 |
271 | // Finally, flatten the tree so that consecutive operands
272 | // for the same operator in a series of nested PrerequisiteSets
273 | // are put into a single PrerequisiteSet
274 | const flattened = flatten(prerequisiteClause);
275 | return flattened;
276 | }
277 |
278 | /**
279 | * Type guard to determine if a clause is a single course
280 | * @param clause - source clause (either single course or prereq set)
281 | */
282 | function isSingleCourse(
283 | clause: PrerequisiteClause
284 | ): clause is PrerequisiteCourse {
285 | return typeof clause === "object" && !Array.isArray(clause);
286 | }
287 |
288 | /**
289 | * Type guard to determine if a clause is a null prerequisite set
290 | * @param clause - source clause (either single course or prereq set)
291 | */
292 | function isNullSet(
293 | clause: PrerequisiteClause
294 | ): clause is [operator: PrerequisiteOperator] {
295 | return (
296 | typeof clause === "object" && Array.isArray(clause) && clause.length === 1
297 | );
298 | }
299 |
300 | /**
301 | * Flattens a prerequisite tree so that:
302 | * - singular `PrerequisiteSet`s like `['and', 'CS 2340 (C)']`
303 | * get turned into their string version (`'CS 2340 (C)'`)
304 | * - consecutive operands
305 | * for the same operator in a series of nested `PrerequisiteSet`s
306 | * are put into a single `PrerequisiteSet`
307 | * - null set PrerequisiteSet`s like `['and']` get removed
308 | * @param source - Source prerequisites tree using prefix boolean operators
309 | */
310 | function flatten(source: PrerequisiteSet): PrerequisiteSet {
311 | function flattenInner(clause: PrerequisiteClause): PrerequisiteClause {
312 | // If the clause is a single course, nothing can be done to flatten
313 | if (isSingleCourse(clause)) return clause;
314 |
315 | const [operator, ...children] = clause;
316 |
317 | // Check for singular `PrerequisiteSet`s
318 | if (children.length === 1) {
319 | return flattenInner(children[0]);
320 | }
321 |
322 | // Check for nested `PrerequisiteSet`s that have the same operator
323 | const newChildren = [];
324 | for (const child of children) {
325 | const flattened = flattenInner(child);
326 | if (!isNullSet(flattened)) {
327 | // If the child is an array and has the same operator,
328 | // append its children to the current children array
329 | if (
330 | typeof flattened === "object" &&
331 | Array.isArray(flattened) &&
332 | flattened[0] === operator
333 | ) {
334 | newChildren.push(...flattened.slice(1));
335 | } else {
336 | // Otherwise, just add the child
337 | newChildren.push(flattened);
338 | }
339 | }
340 | }
341 |
342 | return [operator, ...children.map(flattenInner)];
343 | }
344 |
345 | // Call the recursive traversal function on the root node's children
346 | const [operator, ...children] = source;
347 | const transformedChildren = children
348 | .map(flattenInner)
349 | .filter((c) => !isNullSet(c));
350 | return [operator, ...transformedChildren];
351 | }
352 |
353 | /**
354 | * Custom error listener class that lets us prepend the course ID
355 | * onto parsing errors so that they can be easier identified from logs
356 | */
357 | class ErrorListener implements ANTLRErrorListener {
358 | courseId: string;
359 |
360 | original: string;
361 |
362 | constructor(courseId: string, original: string) {
363 | this.courseId = courseId;
364 | this.original = original;
365 | }
366 |
367 | public syntaxError(
368 | _recognizer: Recognizer,
369 | _offendingSymbol: T,
370 | line: number,
371 | charPositionInLine: number,
372 | msg: string
373 | ): void {
374 | error("an error occurred while parsing prerequisites", new Error(msg), {
375 | line,
376 | charPositionInLine,
377 | courseId: this.courseId,
378 | originalTextFromOscar: this.original,
379 | });
380 | }
381 | }
382 |
383 | // Defines the class used to flatten the parse tree
384 | // into the prefix-notation parsed version
385 | class PrefixNotationVisitor
386 | extends AbstractParseTreeVisitor
387 | implements PrerequisitesVisitor
388 | {
389 | defaultResult(): PrerequisiteClause {
390 | return null as unknown as PrerequisiteClause;
391 | }
392 |
393 | // Expression: logical disjunction (OR)
394 | visitExpression(ctx: ExpressionContext): PrerequisiteClause {
395 | // Create the `PrerequisiteSet` using each child
396 | return [
397 | "or",
398 | ...ctx
399 | .term()
400 | .map((termCtx) => this.visit(termCtx))
401 | .filter((term) => term != null),
402 | ];
403 | }
404 |
405 | // Term: logical conjunction (AND)
406 | visitTerm(ctx: TermContext): PrerequisiteClause {
407 | // Create the `PrerequisiteSet` using each child
408 | return [
409 | "and",
410 | ...ctx
411 | .atom()
412 | .map((atomCtx) => this.visit(atomCtx))
413 | .filter((term) => term != null),
414 | ];
415 | }
416 |
417 | visitAtom(ctx: AtomContext): PrerequisiteClause {
418 | // Visit either the course or the expression inside the parentheses
419 | const course = ctx.course();
420 | const expression = ctx.expression();
421 | const test = ctx.test();
422 |
423 | if (course != null) {
424 | return this.visit(course);
425 | }
426 | if (expression != null) {
427 | return this.visit(expression);
428 | }
429 | if (test != null) {
430 | // Note: we ignore test atoms at the moment,
431 | // though this can be easily changed in the future
432 | return this.defaultResult();
433 | }
434 |
435 | throw new Error("Empty Atom received");
436 | }
437 |
438 | visitCourse(ctx: CourseContext): PrerequisiteClause {
439 | // Construct the base string for this course
440 | // using the format expected by the API
441 | const subject = ctx.COURSE_SUBJECT().toString();
442 | const number = ctx.COURSE_NUMBER().toString();
443 |
444 | // There might not be a grade
445 | let grade: MinimumGrade | undefined;
446 | const gradeCtx = ctx.GRADE_LETTER();
447 | if (gradeCtx != null) {
448 | grade = gradeCtx.toString() as MinimumGrade;
449 | }
450 |
451 | return { id: `${subject} ${number}`, grade };
452 | }
453 | }
454 |
--------------------------------------------------------------------------------
/src/steps/write.ts:
--------------------------------------------------------------------------------
1 | import path from "path";
2 | import fs from "fs";
3 | import { TermData } from "../types";
4 | import { writeFile } from "../utils";
5 |
6 | export const dataPath = path.resolve(__dirname, "..", "..", "data");
7 | if (!fs.existsSync(dataPath)) {
8 | fs.mkdirSync(dataPath);
9 | }
10 |
11 | export function write(term: string, termData: TermData): Promise {
12 | const termPath = path.resolve(dataPath, `${term}.json`);
13 | return writeFile(termPath, termData);
14 | }
15 |
--------------------------------------------------------------------------------
/src/steps/writeIndex.ts:
--------------------------------------------------------------------------------
1 | import path from "path";
2 | import fs from "fs/promises";
3 |
4 | import { writeFile } from "../utils";
5 | import { dataPath } from "./write";
6 | import { log } from "../log";
7 | import { Term } from "../types";
8 |
9 | export async function writeIndex(termsFinalized: string[]): Promise {
10 | // Find all term JSON files in the data directory
11 | const files = await fs.readdir(dataPath);
12 | const dataFileRegex = /20[0-9]{4}.json/;
13 | const allDataFiles = files.filter((f) => f.match(dataFileRegex) !== null);
14 | const allTerms = allDataFiles.map((f) => f.substring(0, f.indexOf(".")));
15 |
16 | log("identified term data files in output directory", {
17 | allDataFiles,
18 | allTerms,
19 | files,
20 | dataPath,
21 | });
22 |
23 | const termsInfo: Term[] = [];
24 |
25 | allTerms.forEach((element) => {
26 | const curr: Term = {
27 | term: element,
28 | finalized: false,
29 | };
30 | if (termsFinalized.includes(element)) {
31 | curr.finalized = true;
32 | }
33 | termsInfo.push(curr);
34 | });
35 |
36 | // Write the list of terms out to `index.json`
37 | const jsonData = {
38 | terms: termsInfo,
39 | };
40 |
41 | const termPath = path.resolve(dataPath, `index.json`);
42 | return writeFile(termPath, jsonData);
43 | }
44 |
--------------------------------------------------------------------------------
/src/types.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * Primary JSON object returned by the API.
3 | * See https://github.com/GTBitsOfGood/gt-scheduler/issues/1#issuecomment-694326220
4 | * for more info on the shape
5 | */
6 | export interface TermData {
7 | /**
8 | * Contains information about each class;
9 | * this makes up the vast bulk of the resultant JSON.
10 | * The course IDs are the keys (`"ACCT 2101"`, `"CS 2340"`, etc.)
11 | */
12 | courses: Record;
13 | /**
14 | * Contains data shared by multiple class descriptions
15 | */
16 | caches: Caches;
17 | /**
18 | * Contains the time this JSON file was retrieved
19 | */
20 | updatedAt: Date;
21 | /**
22 | * Version number for the term data
23 | */
24 | version: number;
25 | }
26 |
27 | /**
28 | * Contains data shared by multiple class descriptions
29 | */
30 | export interface Caches {
31 | /**
32 | * List of all the different time ranges that classes can be offered at
33 | * (e.g. `"8:00 am - 8:50 am"`;
34 | * there is one `"TBA"` string for classes whose time is "To Be Announced")
35 | */
36 | periods: string[];
37 | /**
38 | * List of all possible start/ending dates that classes can be offered between
39 | * (e.g. `"Aug 17, 2020 - Dec 10, 2020"`)
40 | */
41 | dateRanges: string[];
42 | /**
43 | * List of the different types of classes for each time block
44 | * (e.g. `"Lecture*"`, `"Recitation*"`, or `"Internship/Practicum*"`)
45 | */
46 | scheduleTypes: string[];
47 | /**
48 | * List of the different GT campus locations that a class could take place at
49 | * (e.g. `"Georgia Tech-Atlanta *"` or `"Online"`)
50 | */
51 | campuses: string[];
52 | /**
53 | * List of other miscellaneous attributes that can be associated with a class
54 | * (e.g. `"Hybrid Course"`, `"Honors Program"`, or `"Capstone"`)
55 | */
56 | attributes: string[];
57 | /**
58 | * List of the different kinds of grading schemes a course can have
59 | */
60 | gradeBases: string[];
61 | /**
62 | * List of the different building locations a class can be at
63 | */
64 | locations: Location[];
65 | /**
66 | * List of the all the dates on which finals are happening
67 | * Example date: Aug 02, 2022
68 | */
69 | finalDates: Date[];
70 | /**
71 | * List of the time blocks for finals
72 | * Example time: 11:20 am - 2:10 pm
73 | */
74 | finalTimes: string[];
75 | /**
76 | * List of the full names of courses
77 | * Example name: Accounting for ACCT
78 | * */
79 | // fullCourseNames: { [key: string]: string };
80 | }
81 |
82 | /**
83 | * Contains information about a single class
84 | * (**Note** that this is an **array** (tuple), not an object)
85 | */
86 | export type Course = [
87 | /**
88 | * the full, human-readable name of the course (e.g. "Accounting I")
89 | */
90 | fullName: string,
91 | /**
92 | * a JSON object with information about each section of the course;
93 | * the section IDs are the keys (`"A"`, `"B"`, `"S2"`, etc.)
94 | */
95 | sections: Record,
96 | /**
97 | * a tree of prerequisite classes and the necessary grades in them
98 | * (using boolean expressions in prefix order)
99 | *
100 | * @example
101 | *
102 | * ```json
103 | [
104 | "and",
105 | [
106 | "or",
107 | {"id":"CS 3510", "grade":"C"},
108 | {"id":"CS 3511", "grade":"C"}
109 | ]
110 | ]
111 | * ```
112 | */
113 | prerequisites: Prerequisites,
114 | /**
115 | * Description pulled from Oscar
116 | */
117 | description: string | null
118 | ];
119 |
120 | /**
121 | * Contains information about the course's section
122 | * (**Note** that this is an **array** (tuple), not an object)
123 | */
124 | export type Section = [
125 | /**
126 | * the CRN number of this section of the course
127 | */
128 | crn: string,
129 | /**
130 | * array of information about the section's meeting times/places/professors/etc.;
131 | * in most cases, this array will only contain 1 item
132 | */
133 | meetings: Meeting[],
134 | /**
135 | * integer number of credit hours this course is worth
136 | */
137 | creditHours: number,
138 | /**
139 | * integer index into `caches.scheduleTypes`
140 | */
141 | scheduleTypeIndex: number,
142 | /**
143 | * integer index into `caches.campuses`,
144 | * specifying which campus the class is being taught at
145 | */
146 | campusIndex: number,
147 | /**
148 | * array of integer indices into `caches.attributes`,
149 | * specifying any additional attributes the course has
150 | */
151 | attributeIndices: number[],
152 | /**
153 | * integer index into caches.gradeBases,
154 | * specifying the grading scheme of the class
155 | */
156 | gradeBaseIndex: number
157 | ];
158 |
159 | /**
160 | * Contains meeting information about a class section
161 | * (**Note** that this is an **array** (tuple), not an object)
162 | */
163 | export type Meeting = [
164 | /**
165 | * an integer index into `caches.periods`,
166 | * specifying the class's start/end times
167 | */
168 | periodIndex: number,
169 | /**
170 | * a string specifying what days the class takes place on
171 | * (e.g. `"MW"` or `"TR"`)
172 | */
173 | days: string,
174 | /**
175 | * a string giving the room/location where the course will be held
176 | * (e.g. `"College of Business 224"`)
177 | */
178 | room: string,
179 | /**
180 | * an integer index into `caches.locations`,
181 | * containing the latitude and longitude for a given course
182 | */
183 | locationIndex: number,
184 | /**
185 | * an array of strings listing all the instructors for this section,
186 | * along with a 1-character code to mark the principal instructor
187 | * (e.g. `["Katarzyna Rubar (P)"]`)
188 | */
189 | instructors: string[],
190 | /**
191 | * an integer index into `caches.dateRanges`,
192 | * specifying the start/end date of the class this semester
193 | */
194 | dateRangeIndex: number,
195 | /**
196 | * integer index into caches.finalDates,
197 | * specifying the date at which the final is
198 | * -1 when no match could be found and
199 | * as a default value
200 | */
201 | finalDateIndex: number,
202 | /**
203 | * integer index into caches.finalTimes,
204 | * specifying the time at which the final is
205 | * -1 when no match could be found
206 | * and as a default value
207 | */
208 | finalTimeIdx: number
209 | ];
210 |
211 | export type MinimumGrade = "A" | "B" | "C" | "D" | "T" | "S" | "U" | "V";
212 | export type PrerequisiteCourse = { id: string; grade?: MinimumGrade };
213 | export type PrerequisiteClause = PrerequisiteCourse | PrerequisiteSet;
214 | export type PrerequisiteOperator = "and" | "or";
215 | export type PrerequisiteSet = [
216 | operator: PrerequisiteOperator,
217 | ...clauses: PrerequisiteClause[]
218 | ];
219 |
220 | /**
221 | * Location information about the building where a class takes place
222 | */
223 | export class Location {
224 | lat: number;
225 |
226 | long: number;
227 |
228 | constructor(lat: number, long: number) {
229 | this.lat = lat;
230 | this.long = long;
231 | }
232 | }
233 |
234 | /**
235 | * Recursive data structure that is the sequence of all prerequisites in prefix notation,
236 | * parsed from the information on Oscar
237 | *
238 | * @example
239 | *
240 | * ```json
241 | [
242 | "and",
243 | [
244 | "or",
245 | {"id":"CS 3510", "grade":"C"},
246 | {"id":"CS 3511", "grade":"C"}
247 | ]
248 | ]
249 | * ```
250 | */
251 | export type Prerequisites = PrerequisiteSet | [];
252 |
253 | export type BannerResponse = {
254 | success: boolean;
255 | totalCount: number;
256 | data: SectionResponse[] | null;
257 | pageOffset: number;
258 | pageMaxSize: number;
259 | sectionsFetchedCount: 0;
260 | pathMode: string | null;
261 | searchResultsConfigs: SearchResultsConfigResponse[] | null;
262 | ztcEncodedImage: string | null;
263 | };
264 |
265 | export type SearchResultsConfigResponse = {
266 | config: string;
267 | display: string;
268 | title: string;
269 | required: boolean;
270 | width: string;
271 | };
272 |
273 | export type SectionResponse = {
274 | id: number;
275 | term: string;
276 | termDesc: string;
277 | courseReferenceNumber: string;
278 | partOfTerm: string;
279 | subject: string;
280 | subjectDescription: string;
281 | sequenceNumber: string;
282 | campusDescription: string;
283 | scheduleTypeDescription: string;
284 | courseTitle: string;
285 | creditHours: number;
286 | maximumEnrollment: number;
287 | enrollment: number;
288 | seatsAvailable: number;
289 | waitCapacity: number;
290 | waitCount: number;
291 | crossList: unknown;
292 | crossListCapacity: unknown;
293 | crossListCount: unknown;
294 | crossListAvailable: unknown;
295 | creditHourHigh: number;
296 | creditHourLow: number;
297 | creditHourIndicator: unknown;
298 | openSection: boolean;
299 | linkIdentifier: unknown;
300 | isSectionLinked: boolean;
301 | subjectCourse: string;
302 | faculty: FacultyResponse[];
303 | meetingsFaculty: MeetingsFacultyResponse[];
304 | reservedSeatSummary: unknown;
305 | sectionAttributes: SectionAttributeResponse[];
306 | instructionalMethod: unknown;
307 | instructionalMethodDescription: unknown;
308 | };
309 |
310 | export interface SectionAttributeResponse {
311 | class: string;
312 | code: string;
313 | courseReferenceNumber: string;
314 | description: string;
315 | isZTCAttribute: boolean;
316 | termCode: string;
317 | }
318 |
319 | export interface FacultyResponse {
320 | bannerId: string;
321 | category: unknown;
322 | class: string;
323 | courseReferenceNumber: string;
324 | displayName: string;
325 | emailAddress: string;
326 | primaryIndicator: boolean;
327 | term: string;
328 | }
329 |
330 | export interface MeetingsFacultyResponse {
331 | category: string;
332 | class: string;
333 | courseReferenceNumber: string;
334 | faculty: FacultyResponse[];
335 | meetingTime: MeetingsResponse;
336 | term: string;
337 | }
338 |
339 | export interface MeetingsResponse {
340 | beginTime: string;
341 | building: string;
342 | buildingDescription: string;
343 | campus: string;
344 | campusDescription: string;
345 | category: string;
346 | class: string;
347 | courseReferenceNumber: string;
348 | creditHourSession: number;
349 | endDate: string;
350 | endTime: string;
351 | friday: boolean;
352 | hoursWeek: number;
353 | meetingScheduleType: string;
354 | meetingType: string;
355 | meetingTypeDescription: string;
356 | monday: boolean;
357 | room: string;
358 | saturday: boolean;
359 | startDate: string;
360 | sunday: boolean;
361 | term: string;
362 | thursday: boolean;
363 | tuesday: boolean;
364 | wednesday: boolean;
365 | }
366 |
367 | export interface Term {
368 | term: string;
369 | finalized: boolean;
370 | }
371 |
--------------------------------------------------------------------------------
/src/utils.ts:
--------------------------------------------------------------------------------
1 | import fs from "fs";
2 | import { error } from "./log";
3 | import { Location } from "./types";
4 |
5 | export function extract(
6 | text: string,
7 | regexp: RegExp,
8 | callback: (execArray: RegExpExecArray) => T
9 | ): T[] {
10 | const array: T[] = [];
11 | for (
12 | let results = regexp.exec(text);
13 | results != null;
14 | results = regexp.exec(text)
15 | ) {
16 | array.push(callback(results));
17 | }
18 | return array;
19 | }
20 |
21 | export function match(text: string, regexp: RegExp): string | null {
22 | const results = regexp.exec(text);
23 | return results && results[1];
24 | }
25 |
26 | export function cache(
27 | array: (Location | string | null)[],
28 | value: Location | string | null
29 | ): number {
30 | let index = array.indexOf(value);
31 | if (index === -1) {
32 | array.push(value);
33 | index = array.length - 1;
34 | }
35 | return index;
36 | }
37 |
38 | export function writeFile(path: string, json: unknown): Promise {
39 | return new Promise((resolve, reject) => {
40 | const content = JSON.stringify(json);
41 | fs.writeFile(path, content, (err) => {
42 | if (err) {
43 | reject(err);
44 | } else {
45 | resolve();
46 | }
47 | });
48 | });
49 | }
50 |
51 | export function concatParams(params: Record): string {
52 | return Object.entries(params)
53 | .map(([key, value]) => `${key}=${encodeURIComponent(value)}`)
54 | .join("&");
55 | }
56 |
57 | /**
58 | * Ensures a regular expression executes with a match,
59 | * or throws an exception
60 | * @param regex - Source regular expression
61 | * @param str - Target string
62 | */
63 | export function regexExec(regex: RegExp, str: string): RegExpExecArray {
64 | const result = regex.exec(str);
65 | if (result == null)
66 | throw new Error(
67 | "Regular expression '${}' failed to execute on string '${}'"
68 | );
69 | return result;
70 | }
71 |
72 | export function getIntConfig(key: string): number | null {
73 | const value = process.env[key];
74 | if (value == null) return null;
75 | try {
76 | return parseInt(value, 10);
77 | } catch (err) {
78 | error(`invalid integer config value provided`, err, { key, value });
79 | process.exit(1);
80 | // Unreachable
81 | return null;
82 | }
83 | }
84 |
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | /* Visit https://aka.ms/tsconfig.json to read more about this file */
4 |
5 | /* Basic Options */
6 | // "incremental": true, /* Enable incremental compilation */
7 | "target": "es6" /* Specify ECMAScript target version: 'ES3' (default), 'ES5', 'ES2015', 'ES2016', 'ES2017', 'ES2018', 'ES2019', 'ES2020', or 'ESNEXT'. */,
8 | "module": "commonjs" /* Specify module code generation: 'none', 'commonjs', 'amd', 'system', 'umd', 'es2015', 'es2020', or 'ESNext'. */,
9 | "lib": [
10 | "ES2018"
11 | ] /* Specify library files to be included in the compilation. */,
12 | // "allowJs": true, /* Allow javascript files to be compiled. */
13 | // "checkJs": true, /* Report errors in .js files. */
14 | // "jsx": "preserve", /* Specify JSX code generation: 'preserve', 'react-native', or 'react'. */
15 | // "declaration": true, /* Generates corresponding '.d.ts' file. */
16 | // "declarationMap": true, /* Generates a sourcemap for each corresponding '.d.ts' file. */
17 | // "sourceMap": true, /* Generates corresponding '.map' file. */
18 | // "outFile": "./", /* Concatenate and emit output to single file. */
19 | // "outDir": "./", /* Redirect output structure to the directory. */
20 | // "rootDir": "./", /* Specify the root directory of input files. Use to control the output directory structure with --outDir. */
21 | // "composite": true, /* Enable project compilation */
22 | // "tsBuildInfoFile": "./", /* Specify file to store incremental compilation information */
23 | // "removeComments": true, /* Do not emit comments to output. */
24 | // "noEmit": true, /* Do not emit outputs. */
25 | // "importHelpers": true, /* Import emit helpers from 'tslib'. */
26 | "downlevelIteration": true /* Provide full support for iterables in 'for-of', spread, and destructuring when targeting 'ES5' or 'ES3'. */,
27 | // "isolatedModules": true, /* Transpile each file as a separate module (similar to 'ts.transpileModule'). */
28 |
29 | /* Strict Type-Checking Options */
30 | "strict": true /* Enable all strict type-checking options. */,
31 | // "noImplicitAny": true, /* Raise error on expressions and declarations with an implied 'any' type. */
32 | // "strictNullChecks": true, /* Enable strict null checks. */
33 | // "strictFunctionTypes": true, /* Enable strict checking of function types. */
34 | // "strictBindCallApply": true, /* Enable strict 'bind', 'call', and 'apply' methods on functions. */
35 | // "strictPropertyInitialization": true, /* Enable strict checking of property initialization in classes. */
36 | // "noImplicitThis": true, /* Raise error on 'this' expressions with an implied 'any' type. */
37 | // "alwaysStrict": true, /* Parse in strict mode and emit "use strict" for each source file. */
38 |
39 | /* Additional Checks */
40 | // "noUnusedLocals": true, /* Report errors on unused locals. */
41 | // "noUnusedParameters": true, /* Report errors on unused parameters. */
42 | // "noImplicitReturns": true, /* Report error when not all code paths in function return a value. */
43 | // "noFallthroughCasesInSwitch": true, /* Report errors for fallthrough cases in switch statement. */
44 |
45 | /* Module Resolution Options */
46 | // "moduleResolution": "node", /* Specify module resolution strategy: 'node' (Node.js) or 'classic' (TypeScript pre-1.6). */
47 | // "baseUrl": "./", /* Base directory to resolve non-absolute module names. */
48 | // "paths": {}, /* A series of entries which re-map imports to lookup locations relative to the 'baseUrl'. */
49 | // "rootDirs": [], /* List of root folders whose combined content represents the structure of the project at runtime. */
50 | // "typeRoots": [], /* List of folders to include type definitions from. */
51 | // "types": [], /* Type declaration files to be included in compilation. */
52 | // "allowSyntheticDefaultImports": true, /* Allow default imports from modules with no default export. This does not affect code emit, just typechecking. */
53 | "esModuleInterop": true /* Enables emit interoperability between CommonJS and ES Modules via creation of namespace objects for all imports. Implies 'allowSyntheticDefaultImports'. */,
54 | // "preserveSymlinks": true, /* Do not resolve the real path of symlinks. */
55 | // "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */
56 |
57 | /* Source Map Options */
58 | // "sourceRoot": "", /* Specify the location where debugger should locate TypeScript files instead of source locations. */
59 | // "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */
60 | // "inlineSourceMap": true, /* Emit a single file with source maps instead of having a separate file. */
61 | // "inlineSources": true, /* Emit the source alongside the sourcemaps within a single file; requires '--inlineSourceMap' or '--sourceMap' to be set. */
62 |
63 | /* Experimental Options */
64 | // "experimentalDecorators": true, /* Enables experimental support for ES7 decorators. */
65 | // "emitDecoratorMetadata": true, /* Enables experimental support for emitting type metadata for decorators. */
66 |
67 | /* Advanced Options */
68 | "skipLibCheck": true /* Skip type checking of declaration files. */,
69 | "forceConsistentCasingInFileNames": true /* Disallow inconsistently-cased references to the same file. */
70 | }
71 | }
72 |
--------------------------------------------------------------------------------