├── .dockerignore
├── .github
├── PULL_REQUEST_TEMPLATE.md
├── dependabot.yml
└── workflows
│ └── ci.yml
├── .gitignore
├── .pre-commit-config.yaml
├── CODE_OF_CONDUCT.md
├── Dockerfile
├── LICENSE
├── README.md
├── api
├── __init__.py
├── deprecated
│ ├── __init__.py
│ ├── ssr.py
│ ├── static
│ │ ├── css
│ │ │ └── styles.css
│ │ ├── images
│ │ │ ├── increase-16px.png
│ │ │ ├── increase-32px.png
│ │ │ └── increase-512px.png
│ │ └── js
│ │ │ ├── questions-finished-plot.js
│ │ │ └── user-rank-plot.js
│ └── templates
│ │ ├── base.html
│ │ ├── contest.html
│ │ └── index.html
├── entry.py
├── routers
│ ├── __init__.py
│ ├── contest_records.py
│ ├── contests.py
│ └── questions.py
└── utils.py
├── app
├── __init__.py
├── config.py
├── constants.py
├── core
│ ├── __init__.py
│ ├── elo.py
│ ├── fft.py
│ └── predictor.py
├── crawler
│ ├── __init__.py
│ ├── contest.py
│ ├── contest_record_and_submission.py
│ ├── question.py
│ ├── user.py
│ └── utils.py
├── db
│ ├── __init__.py
│ ├── components.py
│ ├── models.py
│ ├── mongodb.py
│ └── views.py
├── handler
│ ├── __init__.py
│ ├── contest.py
│ ├── contest_record.py
│ ├── question.py
│ ├── submission.py
│ └── user.py
├── schedulers.py
└── utils.py
├── client
├── .eslintrc.cjs
├── .gitignore
├── index.html
├── package-lock.json
├── package.json
├── postcss.config.js
├── public
│ └── favicon.ico
├── src
│ ├── App.jsx
│ ├── components
│ │ ├── Footer.jsx
│ │ ├── Navbar.jsx
│ │ ├── Pagination.jsx
│ │ └── charts
│ │ │ ├── ContestsUserNumStackedArea.jsx
│ │ │ ├── QuestionFinishedChart.jsx
│ │ │ └── RealTimeRankChart.jsx
│ ├── data
│ │ └── constants.js
│ ├── index.css
│ ├── main.jsx
│ ├── pages
│ │ ├── Contests
│ │ │ └── ContestsUserNum.jsx
│ │ └── Predicted
│ │ │ ├── PredictedContests.jsx
│ │ │ └── PredictedRecords.jsx
│ └── utils.js
├── tailwind.config.js
└── vite.config.js
├── config.yaml.template
├── main.py
├── requirements.txt
└── tests
├── __init__.py
├── app
├── __init__.py
└── core
│ ├── __init__.py
│ ├── test_elo.py
│ └── test_fft.py
├── tests_data
└── contest_prediction_1.npy
└── utils.py
/.dockerignore:
--------------------------------------------------------------------------------
1 | # git
2 | .git
3 | .gitignore
4 |
5 | # python
6 | venv/
7 |
8 | # root directory config
9 | /config.yaml
10 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ## Description
2 |
3 |
6 |
7 | ## Checks
8 |
9 | - [ ] **Pre-Commit Checks:** Successfully ran and passed to ensure code quality and adherence to coding standards.
10 |
11 | - [ ] **Tests:** Executed and passed, validating the correctness and reliability of the implementation.
12 |
13 | ## Close Issue(s)
14 |
15 |
19 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 |
4 | # Check for updates to GitHub Actions every week
5 | - package-ecosystem: "github-actions"
6 | directory: "/"
7 | schedule:
8 | interval: "weekly"
9 | commit-message:
10 | prefix: "⬆️ chore(deps):"
11 |
12 | # Check for updates to Python packages every month
13 | - package-ecosystem: "pip"
14 | directory: "/"
15 | schedule:
16 | interval: "monthly"
17 | commit-message:
18 | prefix: "⬆️ chore(deps):"
19 | groups:
20 | all:
21 | patterns:
22 | - "*"
23 |
24 | # Check for updates to JavaScript packages every month
25 | - package-ecosystem: "npm"
26 | directory: "/client"
27 | schedule:
28 | interval: "monthly"
29 | commit-message:
30 | prefix: "⬆️ chore(deps):"
31 | groups:
32 | all:
33 | patterns:
34 | - "*"
35 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: Run Linters and Tests
2 |
3 | on:
4 | pull_request:
5 | push:
6 | branches: ["main"]
7 |
8 | jobs:
9 | precommit:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - name: Checkout code
13 | uses: actions/checkout@v4
14 |
15 | - name: Set up python
16 | uses: actions/setup-python@v5
17 | with:
18 | python-version: "3.10"
19 |
20 | - name: Install pre-commit
21 | run: python -m pip install pre-commit
22 |
23 | - name: Execute pre-commit
24 | run: pre-commit run --show-diff-on-failure --color=always --all-files
25 |
26 | test:
27 | runs-on: ubuntu-latest
28 |
29 | steps:
30 | - name: Checkout code
31 | uses: actions/checkout@v4
32 |
33 | - name: Set up python
34 | uses: actions/setup-python@v5
35 | with:
36 | python-version: "3.10"
37 |
38 | - name: Install dependencies
39 | run: |
40 | python -m pip install --upgrade pip
41 | pip install -r requirements.txt
42 |
43 | - name: Run tests
44 | run: pytest tests/
45 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # MacOS
2 | .DS_Store
3 |
4 | # root directory config
5 | /config.yaml
6 |
7 | # temporary directory
8 | /tmp
9 |
10 | # jetbrains
11 | .idea/
12 |
13 | # Byte-compiled / optimized / DLL files
14 | __pycache__/
15 | *.py[cod]
16 | *$py.class
17 |
18 | # C extensions
19 | *.so
20 |
21 | # Distribution / packaging
22 | .Python
23 | build/
24 | develop-eggs/
25 | dist/
26 | downloads/
27 | eggs/
28 | .eggs/
29 | lib/
30 | lib64/
31 | parts/
32 | sdist/
33 | var/
34 | wheels/
35 | pip-wheel-metadata/
36 | share/python-wheels/
37 | *.egg-info/
38 | .installed.cfg
39 | *.egg
40 | MANIFEST
41 |
42 | # PyInstaller
43 | # Usually these files are written by a python script from a template
44 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
45 | *.manifest
46 | *.spec
47 |
48 | # Installer logs
49 | pip-log.txt
50 | pip-delete-this-directory.txt
51 |
52 | # Unit test / coverage reports
53 | htmlcov/
54 | .tox/
55 | .nox/
56 | .coverage
57 | .coverage.*
58 | .cache
59 | nosetests.xml
60 | coverage.xml
61 | *.cover
62 | *.py,cover
63 | .hypothesis/
64 | .pytest_cache/
65 |
66 | # Translations
67 | *.mo
68 | *.pot
69 |
70 | # Django stuff:
71 | *.log
72 | local_settings.py
73 | db.sqlite3
74 | db.sqlite3-journal
75 |
76 | # Flask stuff:
77 | instance/
78 | .webassets-cache
79 |
80 | # Scrapy stuff:
81 | .scrapy
82 |
83 | # Sphinx documentation
84 | docs/_build/
85 |
86 | # PyBuilder
87 | target/
88 |
89 | # Jupyter Notebook
90 | .ipynb_checkpoints
91 |
92 | # IPython
93 | profile_default/
94 | ipython_config.py
95 |
96 | # pyenv
97 | .python-version
98 |
99 | # pipenv
100 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
101 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
102 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
103 | # install all needed dependencies.
104 | #Pipfile.lock
105 |
106 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
107 | __pypackages__/
108 |
109 | # Celery stuff
110 | celerybeat-schedule
111 | celerybeat.pid
112 |
113 | # SageMath parsed files
114 | *.sage.py
115 |
116 | # Environments
117 | .env
118 | .venv
119 | env/
120 | venv/
121 | ENV/
122 | env.bak/
123 | venv.bak/
124 |
125 | # Spyder project settings
126 | .spyderproject
127 | .spyproject
128 |
129 | # Rope project settings
130 | .ropeproject
131 |
132 | # mkdocs documentation
133 | /site
134 |
135 | # mypy
136 | .mypy_cache/
137 | .dmypy.json
138 | dmypy.json
139 |
140 | # Pyre type checker
141 | .pyre/
142 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/pre-commit/pre-commit-hooks
3 | rev: v4.4.0
4 | hooks:
5 | - id: check-yaml
6 | - id: end-of-file-fixer
7 | - id: trailing-whitespace
8 | - repo: https://github.com/pycqa/isort
9 | rev: 5.12.0
10 | hooks:
11 | - id: isort
12 | args: ["--profile", "black"]
13 | - repo: https://github.com/psf/black
14 | rev: 22.10.0
15 | hooks:
16 | - id: black
17 | language_version: python3.10
18 | - repo: https://github.com/pycqa/flake8
19 | rev: 6.0.0
20 | hooks:
21 | - id: flake8
22 | args:
23 | - "--max-line-length=120"
24 | - "--max-complexity=10"
25 | - repo: https://github.com/pre-commit/mirrors-prettier
26 | rev: "v2.7.1"
27 | hooks:
28 | - id: prettier
29 | files: '\.(jsx?|tsx?|css)$'
30 | additional_dependencies:
31 | - prettier@2.8.2
32 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | We as members, contributors, and leaders pledge to make participation in our
6 | community a harassment-free experience for everyone, regardless of age, body
7 | size, visible or invisible disability, ethnicity, sex characteristics, gender
8 | identity and expression, level of experience, education, socio-economic status,
9 | nationality, personal appearance, race, religion, or sexual identity
10 | and orientation.
11 |
12 | We pledge to act and interact in ways that contribute to an open, welcoming,
13 | diverse, inclusive, and healthy community.
14 |
15 | ## Our Standards
16 |
17 | Examples of behavior that contributes to a positive environment for our
18 | community include:
19 |
20 | * Demonstrating empathy and kindness toward other people
21 | * Being respectful of differing opinions, viewpoints, and experiences
22 | * Giving and gracefully accepting constructive feedback
23 | * Accepting responsibility and apologizing to those affected by our mistakes,
24 | and learning from the experience
25 | * Focusing on what is best not just for us as individuals, but for the
26 | overall community
27 |
28 | Examples of unacceptable behavior include:
29 |
30 | * The use of sexualized language or imagery, and sexual attention or
31 | advances of any kind
32 | * Trolling, insulting or derogatory comments, and personal or political attacks
33 | * Public or private harassment
34 | * Publishing others' private information, such as a physical or email
35 | address, without their explicit permission
36 | * Other conduct which could reasonably be considered inappropriate in a
37 | professional setting
38 |
39 | ## Enforcement Responsibilities
40 |
41 | Community leaders are responsible for clarifying and enforcing our standards of
42 | acceptable behavior and will take appropriate and fair corrective action in
43 | response to any behavior that they deem inappropriate, threatening, offensive,
44 | or harmful.
45 |
46 | Community leaders have the right and responsibility to remove, edit, or reject
47 | comments, commits, code, wiki edits, issues, and other contributions that are
48 | not aligned to this Code of Conduct, and will communicate reasons for moderation
49 | decisions when appropriate.
50 |
51 | ## Scope
52 |
53 | This Code of Conduct applies within all community spaces, and also applies when
54 | an individual is officially representing the community in public spaces.
55 | Examples of representing our community include using an official e-mail address,
56 | posting via an official social media account, or acting as an appointed
57 | representative at an online or offline event.
58 |
59 | ## Enforcement
60 |
61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
62 | reported to the community leaders responsible for enforcement at
63 | https://github.com/baoliay2008.
64 | All complaints will be reviewed and investigated promptly and fairly.
65 |
66 | All community leaders are obligated to respect the privacy and security of the
67 | reporter of any incident.
68 |
69 | ## Enforcement Guidelines
70 |
71 | Community leaders will follow these Community Impact Guidelines in determining
72 | the consequences for any action they deem in violation of this Code of Conduct:
73 |
74 | ### 1. Correction
75 |
76 | **Community Impact**: Use of inappropriate language or other behavior deemed
77 | unprofessional or unwelcome in the community.
78 |
79 | **Consequence**: A private, written warning from community leaders, providing
80 | clarity around the nature of the violation and an explanation of why the
81 | behavior was inappropriate. A public apology may be requested.
82 |
83 | ### 2. Warning
84 |
85 | **Community Impact**: A violation through a single incident or series
86 | of actions.
87 |
88 | **Consequence**: A warning with consequences for continued behavior. No
89 | interaction with the people involved, including unsolicited interaction with
90 | those enforcing the Code of Conduct, for a specified period of time. This
91 | includes avoiding interactions in community spaces as well as external channels
92 | like social media. Violating these terms may lead to a temporary or
93 | permanent ban.
94 |
95 | ### 3. Temporary Ban
96 |
97 | **Community Impact**: A serious violation of community standards, including
98 | sustained inappropriate behavior.
99 |
100 | **Consequence**: A temporary ban from any sort of interaction or public
101 | communication with the community for a specified period of time. No public or
102 | private interaction with the people involved, including unsolicited interaction
103 | with those enforcing the Code of Conduct, is allowed during this period.
104 | Violating these terms may lead to a permanent ban.
105 |
106 | ### 4. Permanent Ban
107 |
108 | **Community Impact**: Demonstrating a pattern of violation of community
109 | standards, including sustained inappropriate behavior, harassment of an
110 | individual, or aggression toward or disparagement of classes of individuals.
111 |
112 | **Consequence**: A permanent ban from any sort of public interaction within
113 | the community.
114 |
115 | ## Attribution
116 |
117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage],
118 | version 2.0, available at
119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
120 |
121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct
122 | enforcement ladder](https://github.com/mozilla/diversity).
123 |
124 | [homepage]: https://www.contributor-covenant.org
125 |
126 | For answers to common questions about this code of conduct, see the FAQ at
127 | https://www.contributor-covenant.org/faq. Translations are available at
128 | https://www.contributor-covenant.org/translations.
129 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # OS
2 | FROM ubuntu:22.04
3 |
4 | # Python 3.10
5 | RUN apt update && apt upgrade && \
6 | apt install software-properties-common -y && \
7 | add-apt-repository ppa:deadsnakes/ppa -y && \
8 | apt update && \
9 | apt install python3.10 -y && \
10 | apt install python3-pip python3.10-venv -y
11 |
12 | # Copy project
13 | ADD . /lccn_predictor
14 |
15 | # Path
16 | WORKDIR /lccn_predictor
17 |
18 | # Install packages
19 | # add `-i https://pypi.tuna.tsinghua.edu.cn/simple/` if you were in China mainland.
20 | RUN pip install --no-cache-dir -r requirements.txt
21 |
22 | # Run project
23 | # make main.py a background task, although it's not a good idea to run two processes in a single container
24 | CMD python3 main.py & uvicorn api.entry:app --host 0.0.0.0 --port 55555
25 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Li Bao
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | # Introduction
3 |
4 | This is a LeetCode weekly and biweekly contest rating predictor. The APP is available online at [🔗 lccn.lbao.site](https://lccn.lbao.site/)
5 |
6 | Hopefully, you can get the predicted result within **15-30 minutes** after the contest has finished.
7 |
8 | ## Features
9 |
10 | * ⚡️ Fast
11 | * The core Elo rating algorithm is significantly enhanced by a **JIT compiler** through [Numba](https://numba.pydata.org), reducing execution time to approximately 20 seconds on a dual-core *Intel(R) Xeon(R) Platinum 8255C CPU* (@ 2.50GHz).
12 | * In addition to the JIT implementation, this project incorporates a **FFT implementation**. The Elo rating system employed by LeetCode benefits significantly from the FFT algorithm, achieving speedups ranging from **65 to 1,000 times** for individual contest predictions. The most efficient FFT implementation (`EXPAND_SIZE=1`) completes predictions in under **0.25 seconds**, maintaining an impressively low MSE of approximately 0.027.
13 | * **Caching** the user's latest rating before initiating the prediction process leads to a substantial reduction in the time required for data retrieval.
14 | * Fully **asynchronous**, using non-blocking libraries.
15 | * 🎯 Accurate
16 | * If there were no significant rejudges (assuming everyone's global ranking remains unchanged), it is **ensured** that the prediction error for rating deltas for **EACH** participant is within the precision limit of 0.05. As a result, the rating difference should be negligible.
17 | * Please note that a normal case is that there would be some misconduct detection, so your global ranking will be slightly higher even if your submissions are not rejudged, which results in a slightly higher rating :)
18 | * 📱 Responsive web page
19 | * Tested on phones and tablets.
20 |
21 | # Underlying Mechanism
22 |
23 | ## Algorithm
24 |
25 | * [🔗 English official illustration on leetcode.com](https://leetcode.com/discuss/general-discussion/468851/New-Contest-Rating-Algorithm-(Coming-Soon))
26 | * 🔗 Detailed post about FFT acceleration
27 | - ❤️ Special thanks to [@tiger2005](https://github.com/tiger2005) for proposing this idea in [issue #8](https://github.com/baoliay2008/lccn_predictor/issues/8)
28 |
29 | ## Database
30 |
31 | * [MongoDB](https://www.mongodb.com/): NoSQL database
32 | * [Beanie](https://beanie-odm.dev/): ODM for MongoDB
33 |
34 | ## Backend
35 |
36 | * [Numpy](https://numpy.org/) and [Numba](https://numba.pydata.org/): core prediction algorithm implementation and acceleration
37 | * [FastAPI](https://fastapi.tiangolo.com/): restful API
38 | * 🚮 ~~[Jinja](https://jinja.palletsprojects.com/): HTML templates for server-side rendering~~
39 |
40 | ## Frontend
41 |
42 | * [React](https://reactjs.org/): most popular front-end library
43 | * [TailwindCSS](https://tailwindcss.com/) and [DaisyUI](https://daisyui.com/): modern CSS framework and its component library
44 | * 🚮 ~~[Materialize](https://materializecss.com/): responsive front-end framework~~
45 | * [Echarts](https://echarts.apache.org/en/index.html): data visualization
46 |
47 | # Development
48 |
49 | ## Backend Deployment
50 |
51 | ### virtualenv
52 |
53 | ```shell
54 | git clone git@github.com:baoliay2008/lccn_predictor.git
55 | cd lccn_predictor
56 |
57 | # write your mongodb environment config
58 | cp config.yaml.template config.yaml
59 | vi config.yaml
60 |
61 | python3.10 -m virtualenv venv/
62 | source venv/bin/activate
63 |
64 | pip3 install -r requirements.txt
65 |
66 | python main.py
67 | uvicorn api.entry:app --host 0.0.0.0 --port 55555
68 | ```
69 |
70 | ### Docker
71 |
72 | ```shell
73 | git clone git@github.com:baoliay2008/lccn_predictor.git
74 | cd lccn_predictor
75 |
76 | # write production environment mongodb config
77 | cp config.yaml.template config.yaml
78 | vi config.yaml
79 |
80 | # build docker image
81 | docker image build -t lccn_predictor:0.2.4 .
82 |
83 | # create docker volume
84 | docker volume create lccn_predictor
85 |
86 | # run container
87 | docker run -d -v lccn_predictor:/lccn_predictor -p 55555:55555 --name lp lccn_predictor:0.2.4
88 |
89 | docker exec -it lp bash
90 |
91 | docker container stop lp
92 |
93 | docker container start lp
94 |
95 | ```
96 |
97 | ## Frontend Deployment
98 |
99 | ```shell
100 | cd client
101 |
102 | # install dependencies
103 | npm install
104 |
105 | # change `baseUrl` to your local backend process
106 | vi src/data/constants.js
107 | # if you followed instruction above
108 | # it should be "http://localhost:55555/api/v1"
109 |
110 | # local test
111 | npm run dev
112 |
113 | # publish
114 | npm run build
115 |
116 | ```
117 |
118 | ## More Information
119 |
120 | * [🔗 refined-leetcode](https://github.com/XYShaoKang/refined-leetcode): A Chrome extension for leetcode.cn, created by [@XYShaoKang](https://github.com/XYShaoKang)
121 |
122 |
123 | # License
124 |
125 | [MIT License](LICENSE)
126 |
127 | # Changelog
128 |
129 | * v0.0.1(2022/11/14)
130 | > make this repo public, first release.
131 | * v0.0.2(2022/11/25)
132 | > first version in production
133 | * v0.1.1(2023/02/14)
134 | > change frontend from server-side rendering([Jinja](https://jinja.palletsprojects.com/) + [Materialize](https://materializecss.com/)) to client-side rendering([React](https://reactjs.org/)).
135 | * v0.1.2(2023/10/04)
136 | > refine backend logic to enhance robustness and clean up deprecated static site rendering code
137 | * v0.1.3(2023/12/28)
138 | > last version prior to the rewrite of the Elo rating algorithm
139 | * v0.2.1(2023/12/29)
140 | > add FFT implementation
141 | * v0.2.2(2024/01/12)
142 | > refactor to improve backend code clarity
143 | * v0.2.3(2024/08/31)
144 | > add visualization for the number of contest entrants
145 | * v0.2.4(2024/09/16)
146 | > improvement(frontend): add pageNum URL parameter
147 | ---
148 |
149 | # Supported by
150 |
151 | [](https://jb.gg/OpenSourceSupport)
152 |
--------------------------------------------------------------------------------
/api/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/baoliay2008/lccn_predictor/db5332a3b145720bf902df3d366edbaf2c478159/api/__init__.py
--------------------------------------------------------------------------------
/api/deprecated/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/baoliay2008/lccn_predictor/db5332a3b145720bf902df3d366edbaf2c478159/api/deprecated/__init__.py
--------------------------------------------------------------------------------
/api/deprecated/ssr.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import math
3 | from datetime import datetime
4 | from typing import Final, List, Literal, Optional
5 |
6 | from fastapi import Body, FastAPI, Form, HTTPException, Request
7 | from fastapi.responses import HTMLResponse
8 | from fastapi.staticfiles import StaticFiles
9 | from fastapi.templating import Jinja2Templates
10 | from loguru import logger
11 | from pydantic import BaseModel
12 |
13 | from app.db.models import Contest, ContestRecordArchive, ContestRecordPredict, Question
14 | from app.db.mongodb import start_async_mongodb
15 | from app.utils import start_loguru
16 |
17 |
18 | class KeyUniqueContestRecord(BaseModel):
19 | contest_name: str
20 | username: str
21 | data_region: str
22 |
23 |
24 | app = FastAPI()
25 | app.mount("/static", StaticFiles(directory="api/deprecated/static"), name="static")
26 | templates = Jinja2Templates(directory="api/deprecated/templates")
27 |
28 |
29 | @app.on_event("startup")
30 | async def startup_event():
31 | start_loguru(process="api")
32 | await start_async_mongodb()
33 |
34 |
35 | @app.get("/", response_class=HTMLResponse)
36 | async def index_page_get(
37 | request: Request,
38 | ):
39 | logger.info(f"index_page_get {request.client=}")
40 | predict_contests = (
41 | # Contest.predict_time != None, # beanie does not support `is not None` here.
42 | await Contest.find(
43 | Contest.predict_time > datetime(2000, 1, 1),
44 | )
45 | .sort(-Contest.startTime)
46 | .to_list()
47 | )
48 | logger.trace(f"{predict_contests=}")
49 | return templates.TemplateResponse(
50 | "index.html",
51 | {
52 | "request": request,
53 | "predict_contests": predict_contests,
54 | },
55 | )
56 |
57 |
58 | @app.get("/{contest_name}/{page}", response_class=HTMLResponse)
59 | async def contest_page_get(
60 | request: Request,
61 | contest_name: str,
62 | page: int = 1,
63 | ):
64 | logger.info(f"{request.client=} {contest_name=}, {page=}")
65 | total_num = await ContestRecordPredict.find(
66 | ContestRecordPredict.contest_name == contest_name,
67 | ContestRecordPredict.score != 0,
68 | ).count()
69 | max_page = math.ceil(total_num / 25)
70 | pagination_list = [i for i in range(page - 4, page + 5) if 1 <= i <= max_page]
71 | records = (
72 | await ContestRecordPredict.find(
73 | ContestRecordPredict.contest_name == contest_name,
74 | ContestRecordPredict.score != 0,
75 | )
76 | .sort(ContestRecordPredict.rank)
77 | .skip(25 * (page - 1))
78 | .limit(25)
79 | .to_list()
80 | )
81 | return templates.TemplateResponse(
82 | "contest.html",
83 | {
84 | "request": request,
85 | "contest_name": contest_name,
86 | "user_list": records,
87 | "current_page": page,
88 | "max_page": max_page,
89 | "pagination_list": pagination_list,
90 | },
91 | )
92 |
93 |
94 | @app.post("/{contest_name}/query_user", response_class=HTMLResponse)
95 | async def contest_user_post(
96 | request: Request,
97 | contest_name: str,
98 | username: Optional[str] = Form(None),
99 | ):
100 | logger.info(f"{request.client=}, {contest_name=}, {username=}")
101 | record = await ContestRecordPredict.find_one(
102 | ContestRecordPredict.contest_name == contest_name,
103 | ContestRecordPredict.username == username,
104 | ContestRecordPredict.score != 0,
105 | )
106 | return templates.TemplateResponse(
107 | "contest.html",
108 | {
109 | "request": request,
110 | "contest_name": contest_name,
111 | "user_list": [record] if record else [],
112 | "current_page": None,
113 | },
114 | )
115 |
116 |
117 | @app.post("/user_rank_list")
118 | async def contest_user_rank_list(
119 | request: Request,
120 | unique_contest_record: KeyUniqueContestRecord,
121 | ):
122 | logger.info(f"{request.client=} {unique_contest_record=}")
123 | contest = await Contest.find_one(
124 | Contest.titleSlug == unique_contest_record.contest_name
125 | )
126 | if not contest:
127 | logger.error(f"contest not found for {unique_contest_record=}")
128 | return {}
129 | start_time = contest.startTime
130 | record = await ContestRecordArchive.find_one(
131 | ContestRecordArchive.contest_name == unique_contest_record.contest_name,
132 | ContestRecordArchive.username == unique_contest_record.username,
133 | ContestRecordArchive.data_region == unique_contest_record.data_region,
134 | )
135 | if not record:
136 | logger.error(f"user contest record not found for {unique_contest_record=}")
137 | data = [["Minute", "User", "Rank"],] + [
138 | [minute + 1, unique_contest_record.username, x]
139 | for minute, x in enumerate(
140 | record.real_time_rank if record and record.real_time_rank else []
141 | )
142 | ]
143 | logger.trace(f"{unique_contest_record=} {data=}")
144 | return {
145 | "real_time_rank": data,
146 | "start_time": start_time,
147 | }
148 |
149 |
150 | @app.post("/questions_finished_list")
151 | async def contest_questions_finished_list(
152 | request: Request,
153 | contest_name: str = Body(embed=True),
154 | ):
155 | logger.info(f"{request.client=} {contest_name=}")
156 | data = [["Minute", "Question", "Count"]]
157 | contest = await Contest.find_one(
158 | Contest.titleSlug == contest_name,
159 | )
160 | if not contest:
161 | logger.error(f"contest not found for {contest_name=}")
162 | return {}
163 | questions = await Question.find(Question.contest_name == contest_name).to_list()
164 | if not questions:
165 | logger.error(f"{questions=}, no data now")
166 | return {"real_time_count": data}
167 | questions.sort(key=lambda q: q.credit)
168 | logger.trace(f"{questions=}")
169 | for i, question in enumerate(questions):
170 | data.extend(
171 | [
172 | [minute + 1, f"Q{i+1}", count]
173 | for minute, count in enumerate(question.real_time_count)
174 | ]
175 | )
176 | logger.trace(f"{contest_name=} {data=}")
177 | return {"real_time_count": data}
178 |
179 |
180 | DATA_REGION = Literal["CN", "US"]
181 |
182 |
183 | class UniqueUser(BaseModel):
184 | username: str
185 | data_region: DATA_REGION
186 |
187 |
188 | class QueryPredictedRecords(BaseModel):
189 | contest_name: str
190 | users: List[UniqueUser]
191 |
192 |
193 | class ProjectionPredictedResult(BaseModel):
194 | old_rating: Optional[float] = None
195 | new_rating: Optional[float] = None
196 | delta_rating: Optional[float] = None
197 |
198 |
199 | @app.post("/predict_records")
200 | async def contest_predict_records(
201 | request: Request,
202 | query: QueryPredictedRecords,
203 | ):
204 | """
205 | Query multiple predicted records in a contest.
206 | :param request:
207 | :param query:
208 | :return:
209 | """
210 | logger.info(f"{request.client=} {query=}")
211 | MAX_USERS: Final[int] = 26
212 | contest = await Contest.find_one(Contest.titleSlug == query.contest_name)
213 | if not contest:
214 | logger.error(f"contest not found for {query.contest_name=}")
215 | raise HTTPException(
216 | status_code=400, detail=f"contest not found for {query.contest_name=}"
217 | )
218 | if (users_count := len(query.users)) > MAX_USERS:
219 | logger.error(f"{users_count=} per request, denied.")
220 | raise HTTPException(
221 | status_code=400,
222 | detail=f"request denied because {users_count=}, which is bigger than maximum value={MAX_USERS}",
223 | )
224 | tasks = (
225 | ContestRecordPredict.find_one(
226 | ContestRecordPredict.contest_name == query.contest_name,
227 | ContestRecordPredict.data_region == user.data_region,
228 | ContestRecordPredict.username == user.username,
229 | projection_model=ProjectionPredictedResult,
230 | )
231 | for user in query.users
232 | )
233 | return await asyncio.gather(*tasks)
234 |
--------------------------------------------------------------------------------
/api/deprecated/static/css/styles.css:
--------------------------------------------------------------------------------
1 | /* use my favorite color (#0288d1 light-blue darken-2) for all buttons, icons and highlights */
2 |
3 | main {
4 | margin-top: 3em;
5 | margin-bottom: 20em;
6 | }
7 |
8 | /* label focus color */
9 | .input-field input:focus + label {
10 | color: #0288d1 !important;
11 | }
12 | /* label underline focus color */
13 | .row .input-field input:focus {
14 | border-bottom: 1px solid #0288d1 !important;
15 | box-shadow: 0 1px 0 0 #0288d1 !important;
16 | }
17 | /* icon prefix focus color */
18 | .row .input-field .prefix.active {
19 | color: #0288d1;
20 | }
21 |
--------------------------------------------------------------------------------
/api/deprecated/static/images/increase-16px.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/baoliay2008/lccn_predictor/db5332a3b145720bf902df3d366edbaf2c478159/api/deprecated/static/images/increase-16px.png
--------------------------------------------------------------------------------
/api/deprecated/static/images/increase-32px.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/baoliay2008/lccn_predictor/db5332a3b145720bf902df3d366edbaf2c478159/api/deprecated/static/images/increase-32px.png
--------------------------------------------------------------------------------
/api/deprecated/static/images/increase-512px.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/baoliay2008/lccn_predictor/db5332a3b145720bf902df3d366edbaf2c478159/api/deprecated/static/images/increase-512px.png
--------------------------------------------------------------------------------
/api/deprecated/static/js/questions-finished-plot.js:
--------------------------------------------------------------------------------
1 | window.onload = function () {
2 | const contest_name = $("#question_plot").attr("contest_name");
3 | if (!contest_name) {
4 | console.log("not found");
5 | return;
6 | }
7 | $.ajax({
8 | url: "/questions_finished_list",
9 | type: "POST",
10 | contentType: "application/json",
11 | data: JSON.stringify({
12 | contest_name: contest_name,
13 | }),
14 | dataType: "json",
15 | success: function (data) {
16 | const real_time_count = data["real_time_count"];
17 | trend_plot(real_time_count);
18 | },
19 | error: function () {
20 | console.log("ajax to /questions_finished_list error.");
21 | },
22 | });
23 | };
24 |
25 | let chartDom = document.getElementById("question_plot");
26 | let myChart = echarts.init(chartDom);
27 | let option;
28 |
29 | function trend_plot(rank_list) {
30 | const questions = ["Q1", "Q2", "Q3", "Q4"];
31 | const datasetWithFilters = [];
32 | const seriesList = [];
33 | echarts.util.each(questions, function (question) {
34 | var datasetId = "dataset_" + question;
35 | datasetWithFilters.push({
36 | id: datasetId,
37 | fromDatasetId: "dataset_raw",
38 | transform: {
39 | type: "filter",
40 | config: {
41 | and: [{ dimension: "Question", "=": question }],
42 | },
43 | },
44 | });
45 | seriesList.push({
46 | type: "line",
47 | datasetId: datasetId,
48 | showSymbol: false,
49 | name: question,
50 | endLabel: {
51 | show: true,
52 | formatter: function (params) {
53 | return params.value[1] + ": " + params.value[2];
54 | },
55 | },
56 | labelLayout: {
57 | moveOverlap: "shiftY",
58 | },
59 | emphasis: {
60 | focus: "series",
61 | },
62 | encode: {
63 | x: "Minute",
64 | y: "Count",
65 | label: ["Question", "Count"],
66 | itemName: "Minute",
67 | tooltip: ["Count"],
68 | },
69 | });
70 | });
71 | option = {
72 | animationDuration: 10000,
73 | dataset: [
74 | {
75 | id: "dataset_raw",
76 | source: rank_list,
77 | },
78 | ...datasetWithFilters,
79 | ],
80 | title: {
81 | text: "Question Finished Count",
82 | x: "center",
83 | },
84 | tooltip: {
85 | order: "valueDesc",
86 | trigger: "axis",
87 | },
88 | xAxis: {
89 | type: "category",
90 | name: "Minute",
91 | },
92 | yAxis: {
93 | name: "Accepted",
94 | },
95 | grid: {
96 | right: 140,
97 | },
98 | series: seriesList,
99 | };
100 | myChart.setOption(option);
101 | }
102 |
103 | option && myChart.setOption(option, (notMerge = true));
104 |
--------------------------------------------------------------------------------
/api/deprecated/static/js/user-rank-plot.js:
--------------------------------------------------------------------------------
1 | $(".rankListClick").on("click", function () {
2 | const contest_name = $(this).attr("contest_name");
3 | const username = $(this).attr("username");
4 | const data_region = $(this).attr("data_region");
5 | $.ajax({
6 | url: "/user_rank_list",
7 | type: "POST",
8 | contentType: "application/json",
9 | data: JSON.stringify({
10 | contest_name: contest_name,
11 | username: username,
12 | data_region: data_region,
13 | }),
14 | dataType: "json",
15 | success: function (data) {
16 | const real_time_rank = data["real_time_rank"];
17 | trend_plot(real_time_rank, username);
18 | $("#question_plot").hide();
19 | $("#user_rank_plot").show();
20 | },
21 | error: function () {
22 | console.log("ajax to /user_rank_list error.");
23 | },
24 | });
25 | });
26 |
27 | let chartDom = document.getElementById("user_rank_plot");
28 | let myChart = echarts.init(chartDom);
29 | let option;
30 |
31 | function trend_plot(rank_list, username) {
32 | const users = [username];
33 | const datasetWithFilters = [];
34 | const seriesList = [];
35 | echarts.util.each(users, function (user) {
36 | var datasetId = "dataset_" + user;
37 | datasetWithFilters.push({
38 | id: datasetId,
39 | fromDatasetId: "dataset_raw",
40 | transform: {
41 | type: "filter",
42 | config: {
43 | and: [{ dimension: "User", "=": user }],
44 | },
45 | },
46 | });
47 | seriesList.push({
48 | type: "line",
49 | datasetId: datasetId,
50 | showSymbol: false,
51 | name: user,
52 | endLabel: {
53 | show: true,
54 | formatter: function (params) {
55 | return params.value[1] + ": " + params.value[2];
56 | },
57 | },
58 | labelLayout: {
59 | moveOverlap: "shiftY",
60 | },
61 | emphasis: {
62 | focus: "series",
63 | },
64 | encode: {
65 | x: "Minute",
66 | y: "Rank",
67 | label: ["User", "Rank"],
68 | itemName: "Minute",
69 | tooltip: ["Rank"],
70 | },
71 | });
72 | });
73 | option = {
74 | animationDuration: 10000,
75 | dataset: [
76 | {
77 | id: "dataset_raw",
78 | source: rank_list,
79 | },
80 | ...datasetWithFilters,
81 | ],
82 | title: {
83 | text: "User Real Time Rank",
84 | x: "center",
85 | },
86 | tooltip: {
87 | order: "valueDesc",
88 | trigger: "axis",
89 | },
90 | xAxis: {
91 | type: "category",
92 | name: "Minute",
93 | },
94 | yAxis: {
95 | name: "Rank",
96 | },
97 | grid: {
98 | right: 140,
99 | },
100 | series: seriesList,
101 | };
102 | myChart.setOption(option);
103 | }
104 |
105 | option && myChart.setOption(option);
106 |
--------------------------------------------------------------------------------
/api/deprecated/templates/base.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | {% block head %}
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 | {% endblock %}
20 |
21 |
22 |
23 |
24 | {% block header %}
25 |
35 | {% endblock %}
36 |
37 |
38 |
39 | {% block main %}{% endblock %}
40 |
41 |
42 | {% block submain %}{% endblock %}
43 |
44 |
82 |
83 |
84 | {% block scripts %}
85 |
86 |
87 |
88 |
89 |
95 |
96 | {% endblock %}
97 |
98 |
99 |
--------------------------------------------------------------------------------
/api/deprecated/templates/contest.html:
--------------------------------------------------------------------------------
1 | {% extends "base.html" %}
2 |
3 |
4 | {% block head %}
5 | {{ super() }}
6 | {{contest_name}}
7 |
8 | {% endblock %}
9 |
10 |
11 | {% block header %}
12 | {{ super() }}
13 | {% endblock %}
14 |
15 |
16 | {% block main %}
17 |
18 |
19 |
20 |
29 |
30 | {{' '.join(contest_name.split('-')) | title }}
31 |
32 | {% if current_page %}
33 |
38 | {% endif %}
39 |
40 |
41 |
42 |
58 |
59 | {% if current_page == 1 %}
60 |
61 | {% endif %}
62 |
63 |
64 |
65 |
66 |
67 | Rank |
68 | Username |
69 | Region |
70 | Old Rating |
71 | New Rating |
72 | Delta |
73 | Plot |
74 |
75 |
76 |
77 |
78 | {% for user in user_list %}
79 |
80 | #{{user.rank}} |
81 |
82 | {% if user.data_region == 'CN' %}
83 | {{user.username}}
84 | {% else %}
85 | {{user.username}}
86 | {% endif %}
87 | |
88 | {{user.country_name if user.country_name}} |
89 | {{user.old_rating | round(2)}} |
90 | {{user.new_rating | round(2)}} |
91 | {% if user.delta_rating > 0 %}
92 |
93 | {{"+{}".format(user.delta_rating | round(2))}}
94 | |
95 | {% else %}
96 |
97 | {{user.delta_rating | round(2)}}
98 | |
99 | {% endif %}
100 |
101 |
109 | |
110 |
111 | {% endfor %}
112 |
113 |
114 |
115 | {% if user_list.__len__() == 0 %}
116 | Cannot find any records
>
117 | {% endif %}
118 |
119 | {% if current_page %}
120 |
121 |
148 |
149 | {% endif %}
150 |
151 | {% endblock %}
152 |
153 |
154 | {% block footer %}
155 | {{ super() }}
156 | {% endblock %}
157 |
158 |
159 | {% block scripts %}
160 | {{ super() }}
161 |
162 |
163 |
164 |
165 |
166 |
167 |
168 |
169 |
170 | {% endblock %}
171 |
--------------------------------------------------------------------------------
/api/deprecated/templates/index.html:
--------------------------------------------------------------------------------
1 | {% extends "base.html" %}
2 |
3 |
4 | {% block head %}
5 | {{ super() }}
6 | LCCN Predictor
7 | {% endblock %}
8 |
9 |
10 | {% block header %}
11 | {{ super() }}
12 | {% endblock %}
13 |
14 |
15 | {% block main %}
16 |
17 |
18 | Leetcode Contest Rating Predictor
19 |
20 |
21 |
22 |
23 |
24 |
25 | Predicted Contests |
26 | Date |
27 | Official Result |
28 |
29 |
30 |
31 |
32 | {% for contest in predict_contests %}
33 |
34 |
35 | {{contest.title}}
36 | |
37 | {{contest.startTime.strftime('%Y-%m-%d')}} |
38 |
39 |
40 | LCCN / LCUS
41 |
42 | |
43 |
44 | {% endfor %}
45 |
46 |
47 |
48 | {% endblock %}
49 |
50 |
51 | {% block submain %}
52 |
59 | {% endblock %}
60 |
61 |
62 | {% block footer %}
63 | {{ super() }}
64 | {% endblock %}
65 |
66 |
67 | {% block scripts %}
68 | {{ super() }}
69 | {% endblock %}
70 |
--------------------------------------------------------------------------------
/api/entry.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | from fastapi import FastAPI, Request
4 | from fastapi.middleware.cors import CORSMiddleware
5 | from loguru import logger
6 |
7 | from app.config import get_yaml_config
8 | from app.db.mongodb import start_async_mongodb
9 | from app.utils import start_loguru
10 |
11 | from .routers import contest_records, contests, questions
12 |
13 | app = FastAPI()
14 | yaml_config = get_yaml_config().get("fastapi")
15 |
16 |
17 | app.include_router(contests.router, prefix="/api/v1")
18 | app.include_router(contest_records.router, prefix="/api/v1")
19 | app.include_router(questions.router, prefix="/api/v1")
20 |
21 |
22 | @app.on_event("startup")
23 | async def startup_event():
24 | start_loguru(process="api")
25 | await start_async_mongodb()
26 |
27 |
28 | app.add_middleware(
29 | CORSMiddleware,
30 | allow_origins=yaml_config.get("CORS_allow_origins"),
31 | allow_credentials=True,
32 | allow_methods=["*"],
33 | allow_headers=["*"],
34 | )
35 |
36 |
37 | @app.middleware("http")
38 | async def log_requests(request: Request, call_next):
39 | t1 = time.time()
40 | response = await call_next(request)
41 | t2 = time.time()
42 | logger.info(
43 | f"Received request: {request.client.host} {request.method} {request.url.path} "
44 | f"Cost {(t2 - t1) * 1e3:.2f} ms {response.status_code=}"
45 | )
46 | return response
47 |
--------------------------------------------------------------------------------
/api/routers/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/baoliay2008/lccn_predictor/db5332a3b145720bf902df3d366edbaf2c478159/api/routers/__init__.py
--------------------------------------------------------------------------------
/api/routers/contest_records.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from typing import List, Optional
3 |
4 | from beanie.operators import In
5 | from fastapi import APIRouter, Request
6 | from pydantic import BaseModel, NonNegativeInt, conint, conlist
7 |
8 | from api.utils import check_contest_name
9 | from app.db.models import ContestRecordArchive, ContestRecordPredict
10 | from app.db.views import UserKey
11 |
12 | router = APIRouter(
13 | prefix="/contest-records",
14 | tags=["contest_records"],
15 | )
16 |
17 |
18 | @router.get("/count")
19 | async def contest_records_count(
20 | request: Request,
21 | contest_name: str,
22 | archived: Optional[bool] = False,
23 | ) -> int:
24 | """
25 | Count records of a given contest.
26 | By default, count predicted contests only.
27 | Count archived contests when setting `archived = True` explicitly.
28 | :param request:
29 | :param contest_name:
30 | :param archived:
31 | :return:
32 | """
33 | await check_contest_name(contest_name)
34 | if not archived:
35 | total_num = await ContestRecordPredict.find(
36 | ContestRecordPredict.contest_name == contest_name,
37 | ContestRecordPredict.score != 0,
38 | ).count()
39 | else:
40 | total_num = await ContestRecordArchive.find(
41 | ContestRecordArchive.contest_name == contest_name,
42 | ContestRecordArchive.score != 0,
43 | ).count()
44 | return total_num
45 |
46 |
47 | @router.get("/")
48 | async def contest_records(
49 | request: Request,
50 | contest_name: str,
51 | archived: Optional[bool] = False,
52 | skip: Optional[NonNegativeInt] = 0,
53 | limit: Optional[conint(ge=1, le=100)] = 25,
54 | ) -> List[ContestRecordPredict | ContestRecordArchive]:
55 | """
56 | Query all records of a given contest.
57 | By default, query predicted contests only.
58 | Query archived contests when setting `archived = True` explicitly.
59 | :param request:
60 | :param contest_name:
61 | :param archived:
62 | :param skip:
63 | :param limit:
64 | :return:
65 | """
66 | await check_contest_name(contest_name)
67 | if not archived:
68 | records = (
69 | await ContestRecordPredict.find(
70 | ContestRecordPredict.contest_name == contest_name,
71 | ContestRecordPredict.score != 0,
72 | )
73 | .sort(ContestRecordPredict.rank)
74 | .skip(skip)
75 | .limit(limit)
76 | .to_list()
77 | )
78 | else:
79 | records = (
80 | await ContestRecordArchive.find(
81 | ContestRecordArchive.contest_name == contest_name,
82 | ContestRecordArchive.score != 0,
83 | )
84 | .sort(ContestRecordArchive.rank)
85 | .skip(skip)
86 | .limit(limit)
87 | .to_list()
88 | )
89 | return records
90 |
91 |
92 | @router.get("/user")
93 | async def contest_records_user(
94 | request: Request,
95 | contest_name: str,
96 | username: str,
97 | archived: Optional[bool] = False,
98 | ) -> List[ContestRecordPredict | ContestRecordArchive]:
99 | """
100 | Query records of a given contest by username.
101 | By default, query predicted contests only.
102 | Query archived contests when setting `archived = True` explicitly.
103 | :param request:
104 | :param contest_name:
105 | :param username:
106 | :param archived:
107 | :return:
108 | """
109 | await check_contest_name(contest_name)
110 | if not archived:
111 | records = await ContestRecordPredict.find(
112 | ContestRecordPredict.contest_name == contest_name,
113 | # ContestRecordPredict.username == username,
114 | # Temporary workaround for the LCUS API user_slug change.
115 | In(ContestRecordPredict.username, [username, username.lower()]),
116 | ContestRecordPredict.score != 0,
117 | ).to_list()
118 | else:
119 | records = await ContestRecordArchive.find(
120 | ContestRecordArchive.contest_name == contest_name,
121 | # ContestRecordArchive.username == username,
122 | # Temporary workaround for the LCUS API user_slug change.
123 | In(ContestRecordArchive.username, [username, username.lower()]),
124 | ContestRecordArchive.score != 0,
125 | ).to_list()
126 | return records
127 |
128 |
129 | class QueryOfPredictedRating(BaseModel):
130 | contest_name: str
131 | users: conlist(UserKey, min_length=1, max_length=26)
132 |
133 |
134 | class ResultOfPredictedRating(BaseModel):
135 | old_rating: Optional[float] = None
136 | new_rating: Optional[float] = None
137 | delta_rating: Optional[float] = None
138 |
139 |
140 | @router.post("/predicted-rating") # formal route
141 | async def predicted_rating(
142 | request: Request,
143 | query: QueryOfPredictedRating,
144 | ) -> List[Optional[ResultOfPredictedRating]]:
145 | """
146 | Query multiple predicted records in a contest.
147 | :param request:
148 | :param query:
149 | :return:
150 | """
151 | await check_contest_name(query.contest_name)
152 | tasks = (
153 | ContestRecordPredict.find_one(
154 | ContestRecordPredict.contest_name == query.contest_name,
155 | ContestRecordPredict.data_region == user.data_region,
156 | ContestRecordPredict.username == user.username,
157 | projection_model=ResultOfPredictedRating,
158 | )
159 | for user in query.users
160 | )
161 | return await asyncio.gather(*tasks)
162 |
163 |
164 | class QueryOfRealTimeRank(BaseModel):
165 | contest_name: str
166 | user: UserKey
167 |
168 |
169 | class ResultOfRealTimeRank(BaseModel):
170 | real_time_rank: Optional[list]
171 |
172 |
173 | @router.post("/real-time-rank")
174 | async def real_time_rank(
175 | request: Request,
176 | query: QueryOfRealTimeRank,
177 | ) -> ResultOfRealTimeRank:
178 | """
179 | Query user's realtime rank list of a given contest.
180 | :param request:
181 | :param query:
182 | :return:
183 | """
184 | await check_contest_name(query.contest_name)
185 | return await ContestRecordArchive.find_one(
186 | ContestRecordArchive.contest_name == query.contest_name,
187 | ContestRecordArchive.data_region == query.user.data_region,
188 | ContestRecordArchive.username == query.user.username,
189 | projection_model=ResultOfRealTimeRank,
190 | )
191 |
--------------------------------------------------------------------------------
/api/routers/contests.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 | from typing import List, Optional
3 |
4 | from fastapi import APIRouter, Request
5 | from pydantic import BaseModel, NonNegativeInt, conint
6 |
7 | from app.db.models import Contest
8 |
9 | router = APIRouter(
10 | prefix="/contests",
11 | tags=["contests"],
12 | )
13 |
14 |
15 | class ResultOfContestsUserNum(BaseModel):
16 | titleSlug: str
17 | title: str
18 | startTime: datetime
19 | user_num_us: Optional[int] = None
20 | user_num_cn: Optional[int] = None
21 |
22 |
23 | @router.get("/user-num-last-ten")
24 | async def contests_user_num_last_ten(
25 | request: Request,
26 | ) -> List[ResultOfContestsUserNum]:
27 | """
28 | Obtain user counts from both the US and CN regions for the last ten contests.
29 | :param request:
30 | :return:
31 | """
32 | records = (
33 | await Contest.find(
34 | # In any situation, there must have been more than 10 contests in the last 60 days
35 | Contest.startTime > datetime.utcnow() - timedelta(days=60),
36 | Contest.user_num_us >= 0,
37 | Contest.user_num_cn >= 0,
38 | projection_model=ResultOfContestsUserNum,
39 | )
40 | .sort(-Contest.startTime)
41 | .limit(10)
42 | .to_list()
43 | )
44 | return records
45 |
46 |
47 | @router.get("/count")
48 | async def contests_count(
49 | request: Request,
50 | archived: Optional[bool] = False,
51 | ) -> int:
52 | """
53 | Count total contests in database.
54 | By default, count predicted contests only.
55 | Count all archived contests when setting `archived = True` explicitly.
56 | :param request:
57 | :param archived:
58 | :return:
59 | """
60 | if archived:
61 | total_num = await Contest.count()
62 | else:
63 | total_num = await Contest.find(
64 | Contest.predict_time > datetime(1970, 1, 1),
65 | ).count()
66 | return total_num
67 |
68 |
69 | @router.get("/")
70 | async def contests(
71 | request: Request,
72 | archived: Optional[bool] = False,
73 | skip: Optional[NonNegativeInt] = 0,
74 | limit: Optional[conint(ge=1, le=25)] = 10,
75 | ) -> List[Contest]:
76 | """
77 | Query contests in database.
78 | By default, Query predicted contests only.
79 | Query archived contests when setting `archived = True` explicitly.
80 | :param request:
81 | :param archived:
82 | :param skip:
83 | :param limit:
84 | :return:
85 | """
86 | if archived:
87 | records = (
88 | await Contest.find_all()
89 | .sort(-Contest.startTime)
90 | .skip(skip)
91 | .limit(limit)
92 | .to_list()
93 | )
94 | else:
95 | records = (
96 | await Contest.find(
97 | Contest.predict_time > datetime(1970, 1, 1),
98 | )
99 | .sort(-Contest.startTime)
100 | .skip(skip)
101 | .limit(limit)
102 | .to_list()
103 | )
104 | return records
105 |
--------------------------------------------------------------------------------
/api/routers/questions.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from typing import List, Optional
3 |
4 | from fastapi import APIRouter, HTTPException, Request
5 | from loguru import logger
6 | from pydantic import BaseModel, NonNegativeInt, conlist
7 |
8 | from api.utils import check_contest_name
9 | from app.db.models import Question
10 |
11 | router = APIRouter(
12 | prefix="/questions",
13 | tags=["questions"],
14 | )
15 |
16 |
17 | class QueryOfQuestions(BaseModel):
18 | contest_name: Optional[str] = None
19 | question_id_list: Optional[
20 | conlist(NonNegativeInt, min_length=1, max_length=4)
21 | ] = None
22 |
23 |
24 | @router.post("/")
25 | async def questions(
26 | request: Request,
27 | query: QueryOfQuestions,
28 | ) -> List[Question]:
29 | """
30 | Query questions for a given contest.
31 | Questions number must between 1 and 4 inclusively.
32 | :param request:
33 | :param query:
34 | :return:
35 | """
36 | if not (bool(query.contest_name) ^ bool(query.question_id_list)):
37 | msg = "contest_name OR question_id_list must be given!"
38 | logger.error(msg)
39 | raise HTTPException(status_code=400, detail=msg)
40 | # if `contest_name` is given, use it to query
41 | if query.contest_name:
42 | await check_contest_name(query.contest_name)
43 | return await Question.find(
44 | Question.contest_name == query.contest_name
45 | ).to_list()
46 | # or use `question_id_list` to query
47 | else:
48 | tasks = (
49 | Question.find_one(Question.question_id == question_id)
50 | for question_id in query.question_id_list
51 | )
52 | return await asyncio.gather(*tasks)
53 | # notice that if both parameters are given, only use `contest_name`
54 |
--------------------------------------------------------------------------------
/api/utils.py:
--------------------------------------------------------------------------------
1 | from fastapi import HTTPException
2 | from loguru import logger
3 |
4 | from app.db.models import Contest
5 |
6 |
7 | async def check_contest_name(contest_name: str) -> None:
8 | """
9 | Check whether a contest_name is valid.
10 | - Valid: silently passed
11 | - Invalid: just raise HTTPException (fastapi will return error msg gracefully)
12 | :param contest_name:
13 | :return:
14 | """
15 | contest = await Contest.find_one(Contest.titleSlug == contest_name)
16 | if not contest:
17 | msg = f"contest not found for {contest_name=}"
18 | logger.error(msg)
19 | raise HTTPException(status_code=400, detail=msg)
20 |
--------------------------------------------------------------------------------
/app/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/baoliay2008/lccn_predictor/db5332a3b145720bf902df3d366edbaf2c478159/app/__init__.py
--------------------------------------------------------------------------------
/app/config.py:
--------------------------------------------------------------------------------
1 | import yaml
2 |
3 | yaml_config = None
4 |
5 |
6 | def get_yaml_config():
7 | """
8 | Parse `config.yaml`
9 | :return:
10 | """
11 | global yaml_config
12 | if yaml_config is None:
13 | with open("config.yaml", "r") as yaml_file:
14 | yaml_config = yaml.safe_load(yaml_file)
15 | return yaml_config
16 |
--------------------------------------------------------------------------------
/app/constants.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | from typing import Final, NamedTuple
3 |
4 | DEFAULT_NEW_USER_ATTENDED_CONTESTS_COUNT: Final[int] = 0
5 | DEFAULT_NEW_USER_RATING: Final[float] = 1500.0
6 |
7 |
8 | class CronTimePointWkdHrMin(NamedTuple):
9 | weekday: int
10 | hour: int
11 | minute: int
12 |
13 |
14 | # Observed that leetcode would update more user's result within 10 minutes after ending,
15 | # so safely set minute to 15 instead 0 in order to wait for final result.
16 | WEEKLY_CONTEST_START = CronTimePointWkdHrMin(
17 | 6, # Sunday
18 | 2, # hour
19 | 30, # minute
20 | )
21 | BIWEEKLY_CONTEST_START = CronTimePointWkdHrMin(
22 | 5, # Saturday
23 | 14, # hour
24 | 30, # minute
25 | )
26 |
27 |
28 | class SingleContestDatetime(NamedTuple):
29 | num: int
30 | dt: datetime
31 |
32 |
33 | # Take "weekly-contest-294" and "biweekly-contest-78" as two baselines
34 | WEEKLY_CONTEST_BASE = SingleContestDatetime(
35 | 294,
36 | datetime(2022, 5, 22, 2, 30),
37 | )
38 | BIWEEKLY_CONTEST_BASE = SingleContestDatetime(
39 | 78,
40 | datetime(2022, 5, 14, 14, 30),
41 | )
42 |
--------------------------------------------------------------------------------
/app/core/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/baoliay2008/lccn_predictor/db5332a3b145720bf902df3d366edbaf2c478159/app/core/__init__.py
--------------------------------------------------------------------------------
/app/core/elo.py:
--------------------------------------------------------------------------------
1 | from functools import lru_cache
2 | from typing import Final
3 |
4 | import numpy as np
5 | from numba import jit
6 |
7 |
8 | @lru_cache
9 | def pre_sum_of_sigma(k: int) -> float:
10 | """
11 | Series cache
12 | :param k:
13 | :return:
14 | """
15 | # if not isinstance(k, int):
16 | # raise TypeError("k must be an integer")
17 | if k < 0:
18 | raise ValueError(f"{k=}, pre_sum's index less than zero!")
19 | return (5 / 7) ** k + pre_sum_of_sigma(k - 1) if k >= 1 else 1
20 |
21 |
22 | @lru_cache
23 | def adjustment_for_delta_coefficient(k: int) -> float:
24 | """
25 | This function could also be `return 1 / (1 + sum((5 / 7) ** i for i in range(k + 1)))`
26 | but use a `pre_sum_of_sigma` function(which is also cached) is faster.
27 | When k is big enough, result approximately equals to 2/9.
28 | :param k:
29 | :return:
30 | """
31 | return 1 / (1 + pre_sum_of_sigma(k)) if k <= 100 else 2 / 9
32 |
33 |
34 | def delta_coefficients(ks: np.ndarray) -> np.ndarray:
35 | """
36 | Calculate delta coefficients for the given input array.
37 | :param ks:
38 | :return:
39 | """
40 | vectorized_func = np.vectorize(adjustment_for_delta_coefficient)
41 | return vectorized_func(ks)
42 |
43 |
44 | @jit(nopython=True, fastmath=True, parallel=True)
45 | def expected_win_rate(vector: np.ndarray, scalar: float) -> np.ndarray:
46 | """
47 | Calculate the expected win rate based on the Elo rating system.
48 | Test result had shown this function has a quite decent performance.
49 | :param vector:
50 | :param scalar:
51 | :return:
52 | """
53 | return 1 / (1 + np.power(10, (scalar - vector) / 400))
54 |
55 |
56 | @jit(nopython=True, fastmath=True, parallel=True)
57 | def binary_search_expected_rating(mean_rank: int, all_rating: np.ndarray) -> float:
58 | """
59 | Perform binary search to find the rating corresponding to the given mean rank.
60 | :param mean_rank:
61 | :param all_rating:
62 | :return:
63 | """
64 | target = mean_rank - 1
65 | lo, hi = 0, 4000
66 | max_iteration = 25
67 | precision: Final[float] = 0.01
68 | while hi - lo > precision and max_iteration >= 0:
69 | mid = lo + (hi - lo) / 2
70 | if np.sum(expected_win_rate(all_rating, mid)) < target:
71 | hi = mid
72 | else:
73 | lo = mid
74 | max_iteration -= 1
75 | return mid
76 |
77 |
78 | @jit(nopython=True, fastmath=True, parallel=True)
79 | def get_expected_rating(rank: int, rating: float, all_rating: np.ndarray) -> float:
80 | """
81 | Calculate the expected rating based on the given rank, player rating, and array of all ratings.
82 | :param rank:
83 | :param rating:
84 | :param all_rating:
85 | :return:
86 | """
87 | expected_rank = np.sum(expected_win_rate(all_rating, rating)) + 0.5
88 | mean_rank = np.sqrt(expected_rank * rank)
89 | return binary_search_expected_rating(mean_rank, all_rating)
90 |
91 |
92 | def elo_delta(ranks: np.ndarray, ratings: np.ndarray, ks: np.ndarray) -> np.ndarray:
93 | """
94 | Calculate the Elo rating changes (delta) based on the given ranks, current ratings, and coefficients.
95 | :param ranks:
96 | :param ratings:
97 | :param ks:
98 | :return:
99 | """
100 | expected_ratings = list()
101 | for i in range(len(ranks)):
102 | rank = ranks[i]
103 | rating = ratings[i]
104 | expected_ratings.append(get_expected_rating(rank, rating, ratings))
105 | delta_ratings = (np.array(expected_ratings) - ratings) * delta_coefficients(ks)
106 | return delta_ratings
107 |
--------------------------------------------------------------------------------
/app/core/fft.py:
--------------------------------------------------------------------------------
1 | from typing import Final
2 |
3 | import numpy as np
4 | from scipy.signal import fftconvolve
5 |
6 | from app.core.elo import delta_coefficients
7 |
8 | EXPAND_SIZE: Final[int] = 100
9 | MAX_RATING: Final[int] = 4000 * EXPAND_SIZE
10 |
11 |
12 | def pre_calc_convolution(old_rating: np.ndarray) -> np.ndarray:
13 | """
14 | Pre-calculate convolution values for the Elo rating update.
15 | :param old_rating:
16 | :return:
17 | """
18 | f = 1 / (
19 | 1 + np.power(10, np.arange(-MAX_RATING, MAX_RATING + 1) / (400 * EXPAND_SIZE))
20 | )
21 | g = np.bincount(np.round(old_rating * EXPAND_SIZE).astype(int))
22 | convolution = fftconvolve(f, g, mode="full")
23 | convolution = convolution[: 2 * MAX_RATING + 1]
24 | return convolution
25 |
26 |
27 | def get_expected_rank(convolution: np.ndarray, x: int) -> float:
28 | """
29 | Get the expected rank based on pre-calculated convolution values.
30 | :param convolution:
31 | :param x:
32 | :return:
33 | """
34 | return convolution[x + MAX_RATING] + 0.5
35 |
36 |
37 | def get_equation_left(convolution: np.ndarray, x: int) -> float:
38 | """
39 | Get the left side of equation for expected rating based on pre-calculated convolution values
40 | :param convolution:
41 | :param x:
42 | :return:
43 | """
44 | return convolution[x + MAX_RATING] + 1
45 |
46 |
47 | def binary_search_expected_rating(convolution: np.ndarray, mean_rank: float) -> int:
48 | """
49 | Perform binary search to find the expected rating for a given mean rank.
50 | :param convolution:
51 | :param mean_rank:
52 | :return:
53 | """
54 | lo, hi = 0, MAX_RATING
55 | while lo < hi:
56 | mid = (lo + hi) // 2
57 | if get_equation_left(convolution, mid) < mean_rank:
58 | hi = mid
59 | else:
60 | lo = mid + 1
61 | return mid
62 |
63 |
64 | def get_expected_rating(rank: int, rating: float, convolution: np.ndarray) -> float:
65 | """
66 | Calculate the expected rating based on current rank, rating, and pre-calculated convolution.
67 | :param rank:
68 | :param rating:
69 | :param convolution:
70 | :return:
71 | """
72 | expected_rank = get_expected_rank(convolution, round(rating * EXPAND_SIZE))
73 | mean_rank = np.sqrt(expected_rank * rank)
74 | return binary_search_expected_rating(convolution, mean_rank) / EXPAND_SIZE
75 |
76 |
77 | def fft_delta(ranks: np.ndarray, ratings: np.ndarray, ks: np.ndarray) -> np.ndarray:
78 | """
79 | Calculate Elo rating changes using Fast Fourier Transform (FFT)
80 | :param ranks:
81 | :param ratings:
82 | :param ks:
83 | :return:
84 | """
85 | convolution = pre_calc_convolution(ratings)
86 | expected_ratings = list()
87 | for i in range(len(ranks)):
88 | rank = ranks[i]
89 | rating = ratings[i]
90 | expected_ratings.append(get_expected_rating(rank, rating, convolution))
91 | delta_ratings = (np.array(expected_ratings) - ratings) * delta_coefficients(ks)
92 | return delta_ratings
93 |
--------------------------------------------------------------------------------
/app/core/predictor.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | from typing import List
3 |
4 | import numpy as np
5 | from beanie.odm.operators.update.general import Set
6 | from loguru import logger
7 |
8 | from app.core.elo import elo_delta
9 | from app.db.models import Contest, ContestRecordPredict, User
10 | from app.utils import exception_logger_reraise, gather_with_limited_concurrency
11 |
12 |
13 | async def update_rating_immediately(
14 | records: List[ContestRecordPredict],
15 | ) -> None:
16 | """
17 | Update users' rating and attendedContestsCount (if it's biweekly contest)
18 | :param records:
19 | :return:
20 | """
21 | logger.info("immediately write predicted result back into User collection")
22 | tasks = [
23 | User.find_one(
24 | User.username == record.username,
25 | User.data_region == record.data_region,
26 | ).update(
27 | Set(
28 | {
29 | User.rating: record.new_rating,
30 | User.attendedContestsCount: record.attendedContestsCount + 1,
31 | User.update_time: datetime.utcnow(),
32 | }
33 | )
34 | )
35 | for record in records
36 | ]
37 | await gather_with_limited_concurrency(tasks, max_con_num=50)
38 | logger.success("finished updating User using predicted result")
39 |
40 |
41 | @exception_logger_reraise
42 | async def predict_contest(
43 | contest_name: str,
44 | ) -> None:
45 | """
46 | Core predict function using official elo rating algorithm
47 | :param contest_name:
48 | :return:
49 | """
50 | records = (
51 | await ContestRecordPredict.find(
52 | ContestRecordPredict.contest_name == contest_name,
53 | ContestRecordPredict.score != 0,
54 | )
55 | .sort(ContestRecordPredict.rank)
56 | .to_list()
57 | )
58 |
59 | rank_array = np.array([record.rank for record in records])
60 | rating_array = np.array([record.old_rating for record in records])
61 | k_array = np.array([record.attendedContestsCount for record in records])
62 | # core prediction
63 | delta_rating_array = elo_delta(rank_array, rating_array, k_array)
64 | new_rating_array = rating_array + delta_rating_array
65 |
66 | # update ContestRecordPredict collection
67 | predict_time = datetime.utcnow()
68 | for i, record in enumerate(records):
69 | record.delta_rating = delta_rating_array[i]
70 | record.new_rating = new_rating_array[i]
71 | record.predict_time = predict_time
72 | tasks = [record.save() for record in records]
73 | await gather_with_limited_concurrency(tasks, max_con_num=50)
74 | logger.success("predict_contest finished updating ContestRecordPredict")
75 |
76 | if contest_name.lower().startswith("bi"):
77 | # for biweekly contests only, because next day's weekly contest needs the latest rating
78 | await update_rating_immediately(records)
79 |
80 | # update Contest collection to indicate that this contest has been predicted.
81 | # by design, predictions should only be run once.
82 | await Contest.find_one(Contest.titleSlug == contest_name).update(
83 | Set(
84 | {
85 | Contest.predict_time: datetime.utcnow(),
86 | }
87 | )
88 | )
89 | logger.info("finished updating predict_time in Contest database")
90 |
--------------------------------------------------------------------------------
/app/crawler/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/baoliay2008/lccn_predictor/db5332a3b145720bf902df3d366edbaf2c478159/app/crawler/__init__.py
--------------------------------------------------------------------------------
/app/crawler/contest.py:
--------------------------------------------------------------------------------
1 | import re
2 | from typing import Dict, Final, List, Optional
3 |
4 | from loguru import logger
5 |
6 | from app.crawler.utils import multi_http_request
7 | from app.db.models import DATA_REGION
8 |
9 |
10 | async def request_contest_user_num(
11 | contest_name: str,
12 | data_region: DATA_REGION,
13 | ) -> Optional[int]:
14 | """
15 | Fetch user_num in a given data region
16 | :param contest_name:
17 | :param data_region:
18 | :return:
19 | """
20 | url: Final[str] = (
21 | f"https://leetcode.com/contest/api/ranking/{contest_name}/?region=us"
22 | if data_region == "US"
23 | else f"https://leetcode.cn/contest/api/ranking/{contest_name}/?region=cn"
24 | )
25 | data = (
26 | await multi_http_request(
27 | {
28 | "req": {
29 | "url": url,
30 | "method": "GET",
31 | }
32 | }
33 | )
34 | )[0].json()
35 | user_num = data.get("user_num")
36 | return user_num
37 |
38 |
39 | async def request_past_contests(
40 | max_page_num: int,
41 | ) -> List[Dict]:
42 | """
43 | Fetch past contests information
44 | :param max_page_num:
45 | :return:
46 | """
47 | response_list = await multi_http_request(
48 | {
49 | page_num: {
50 | "url": "https://leetcode.com/graphql/",
51 | "method": "POST",
52 | "json": {
53 | "query": """
54 | query pastContests($pageNo: Int) {
55 | pastContests(pageNo: $pageNo) {
56 | data { title titleSlug startTime duration }
57 | }
58 | }
59 | """,
60 | "variables": {"pageNo": page_num},
61 | },
62 | }
63 | for page_num in range(1, max_page_num + 1)
64 | },
65 | concurrent_num=10,
66 | )
67 | past_contests = list()
68 | for response in response_list:
69 | past_contests.extend(
70 | response.json().get("data", {}).get("pastContests", {}).get("data", [])
71 | )
72 | logger.info(f"{max_page_num=} {len(past_contests)=}")
73 | return past_contests
74 |
75 |
76 | async def request_contest_homepage_text():
77 | req = (
78 | await multi_http_request(
79 | {
80 | "req": {
81 | "url": "https://leetcode.com/contest/",
82 | "method": "GET",
83 | }
84 | }
85 | )
86 | )[0]
87 | return req.text
88 |
89 |
90 | async def request_next_two_contests() -> List[Dict]:
91 | """
92 | save two coming contests
93 | :return:
94 | """
95 | contest_page_text = await request_contest_homepage_text()
96 | build_id_search = re.search(
97 | re.compile(r'"buildId":\s*"(.*?)",'),
98 | contest_page_text,
99 | )
100 | if not build_id_search:
101 | logger.error("cannot find buildId")
102 | return []
103 | build_id = build_id_search.groups()[0]
104 | next_data = (
105 | await multi_http_request(
106 | {
107 | "req": {
108 | "url": f"https://leetcode.com/_next/data/{build_id}/contest.json",
109 | "method": "GET",
110 | }
111 | }
112 | )
113 | )[0].json()
114 | top_two_contests = list()
115 | for queries in (
116 | next_data.get("pageProps", {}).get("dehydratedState", {}).get("queries", {})
117 | ):
118 | if "topTwoContests" in (data := queries.get("state", {}).get("data", {})):
119 | top_two_contests = data.get("topTwoContests")
120 | break
121 | if not top_two_contests:
122 | logger.error("cannot find topTwoContests")
123 | return []
124 | logger.info(f"{top_two_contests=}")
125 | return top_two_contests
126 |
127 |
128 | async def request_all_past_contests() -> List[Dict]:
129 | """
130 | Save past contests
131 | :return:
132 | """
133 | contest_page_text = await request_contest_homepage_text()
134 | max_page_num_search = re.search(
135 | re.compile(r'"pageNum":\s*(\d+)'),
136 | contest_page_text,
137 | )
138 | if not max_page_num_search:
139 | logger.error("cannot find pageNum")
140 | return []
141 | max_page_num = int(max_page_num_search.groups()[0])
142 | all_past_contests = await request_past_contests(max_page_num)
143 | return all_past_contests
144 |
145 |
146 | async def request_recent_contests() -> List[Dict]:
147 | """
148 | Save 10 past contests on the first page
149 | :return:
150 | """
151 | # 10 contests on the first page, which are enough
152 | ten_past_contests = await request_past_contests(1)
153 | return ten_past_contests
154 |
--------------------------------------------------------------------------------
/app/crawler/contest_record_and_submission.py:
--------------------------------------------------------------------------------
1 | from math import ceil
2 | from typing import Dict, Final, List, Tuple
3 |
4 | from loguru import logger
5 |
6 | from app.crawler.utils import multi_http_request
7 | from app.db.models import DATA_REGION
8 |
9 |
10 | async def request_contest_records(
11 | contest_name: str,
12 | data_region: DATA_REGION,
13 | ) -> Tuple[List[Dict], List[Dict]]:
14 | """
15 | Fetch all ranking records of a contest by sending http request per page concurrently
16 | :param contest_name:
17 | :param data_region:
18 | :return:
19 | """
20 | base_url: Final[str] = (
21 | "https://leetcode.com" if data_region == "US" else "https://leetcode.cn"
22 | )
23 | logger.info(f"start {base_url=}")
24 | req = (
25 | await multi_http_request(
26 | {
27 | "req": {
28 | "url": f"{base_url}/contest/api/ranking/{contest_name}/",
29 | "method": "GET",
30 | }
31 | }
32 | )
33 | )[0]
34 | data = req.json()
35 | user_num = data.get("user_num")
36 | page_max = ceil(user_num / 25)
37 | contest_record_list = list()
38 | nested_submission_list = list()
39 | url_list = [
40 | f"{base_url}/contest/api/ranking/{contest_name}/?pagination={page}®ion=global"
41 | for page in range(1, page_max + 1)
42 | ]
43 | responses = await multi_http_request(
44 | {url: {"url": url, "method": "GET"} for url in url_list},
45 | concurrent_num=5 if data_region == "US" else 10,
46 | )
47 | for res in responses:
48 | if res is None:
49 | continue
50 | res_dict = res.json()
51 | contest_record_list.extend(res_dict.get("total_rank"))
52 | nested_submission_list.extend(res_dict.get("submissions"))
53 | logger.success("finished")
54 | return contest_record_list, nested_submission_list
55 |
--------------------------------------------------------------------------------
/app/crawler/question.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, List, Optional
2 |
3 | from app.crawler.utils import multi_http_request
4 | from app.db.models import DATA_REGION
5 |
6 |
7 | async def request_question_list(
8 | contest_name: str,
9 | data_region: DATA_REGION = "CN",
10 | ) -> Optional[List[Dict]]:
11 | """
12 | Send HTTP request to get questions data of a given contest
13 | :param contest_name:
14 | :param data_region:
15 | :return:
16 | """
17 | if data_region == "US":
18 | url = f"https://leetcode.com/contest/api/info/{contest_name}/"
19 | elif data_region == "CN":
20 | url = f"https://leetcode.cn/contest/api/info/{contest_name}/"
21 | else:
22 | raise ValueError(f"{data_region=}")
23 | data = (
24 | await multi_http_request(
25 | {
26 | "req": {
27 | "url": url,
28 | "method": "GET",
29 | }
30 | }
31 | )
32 | )[0].json()
33 | question_list = data.get("questions")
34 | if data_region == "CN":
35 | for question in question_list:
36 | question["title"] = question["english_title"]
37 | return question_list
38 |
--------------------------------------------------------------------------------
/app/crawler/user.py:
--------------------------------------------------------------------------------
1 | from typing import Tuple
2 |
3 | from app.crawler.utils import multi_http_request
4 | from app.db.models import DATA_REGION
5 |
6 |
7 | async def request_user_rating_and_attended_contests_count(
8 | data_region: DATA_REGION,
9 | username: str,
10 | ) -> Tuple[float | None, int | None]:
11 | """
12 | request user's rating, attended contests count
13 | :param data_region:
14 | :param username:
15 | :return:
16 | """
17 | if data_region == "CN":
18 | req = (
19 | await multi_http_request(
20 | {
21 | (data_region, username): {
22 | "url": "https://leetcode.cn/graphql/noj-go/",
23 | "method": "POST",
24 | "json": {
25 | "query": """
26 | query userContestRankingInfo($userSlug: String!) {
27 | userContestRanking(userSlug: $userSlug) {
28 | attendedContestsCount
29 | rating
30 | }
31 | }
32 | """,
33 | "variables": {"userSlug": username},
34 | },
35 | }
36 | }
37 | )
38 | )[0]
39 | else:
40 | req = (
41 | await multi_http_request(
42 | {
43 | (data_region, username): {
44 | "url": "https://leetcode.com/graphql/",
45 | "method": "POST",
46 | "json": {
47 | "query": """
48 | query getContestRankingData($username: String!) {
49 | userContestRanking(username: $username) {
50 | attendedContestsCount
51 | rating
52 | }
53 | }
54 | """,
55 | "variables": {"username": username},
56 | },
57 | }
58 | }
59 | )
60 | )[0]
61 | if req is None:
62 | raise RuntimeError(f"HTTP request failed for {data_region=} {username=}")
63 | if (graphql_res := req.json().get("data", {}).get("userContestRanking")) is None:
64 | # Watch out: `None` means that it cannot request information about this user, it should be a new user.
65 | return None, None
66 | else:
67 | return graphql_res.get("rating"), graphql_res.get("attendedContestsCount")
68 |
--------------------------------------------------------------------------------
/app/crawler/utils.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from collections import defaultdict, deque
3 | from typing import Any, Dict, List, Optional
4 |
5 | import httpx
6 | from loguru import logger
7 |
8 | headers = {
9 | "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X x.y; rv:42.0) Gecko/20100101 Firefox/42.0",
10 | }
11 |
12 |
13 | async def multi_http_request(
14 | multi_requests: Dict,
15 | concurrent_num: int = 5,
16 | retry_num: int = 10,
17 | ) -> List[Optional[httpx.Response]]:
18 | """
19 | Simple HTTP requests queue with speed control and retry automatically, hopefully can get corresponding response.
20 | Failed response would be `None` but not a `response` object, so invokers MUST verify for None values.
21 | Notice that `multi_requests` is `Dict` but not `Sequence` so that data accessing would be easier.
22 | Because all the stuff are in memory, so DO NOT pass a long `multi_requests` in especially when `response` is huge.
23 | :param multi_requests:
24 | :param concurrent_num:
25 | :param retry_num:
26 | :return:
27 | """
28 | # response_mapper value means: [int: retried times / Response: successful result]
29 | response_mapper: Dict[Any, int | httpx.Response] = defaultdict(int)
30 | crawler_queue = deque(multi_requests.items())
31 | total_num = len(crawler_queue)
32 | # gradually adjust wait_time by detect number of failed requests in the last round.
33 | wait_time = 0
34 | while crawler_queue:
35 | requests_list = list()
36 | # gradually increase wait_time according to max retry times
37 | # wait_time = response_mapper[job_queue[-1][0]]
38 | while len(requests_list) < concurrent_num and crawler_queue:
39 | key, request = crawler_queue.popleft()
40 | if response_mapper[key] >= retry_num:
41 | logger.error(
42 | f"request reached max retry_num. {key=}, req={multi_requests[key]}"
43 | )
44 | continue
45 | requests_list.append((key, request))
46 | if not requests_list:
47 | break
48 | logger.info(
49 | f"remaining={len(crawler_queue) / total_num * 100 :.2f}% wait_time={wait_time} "
50 | f"requests_list={[(key, response_mapper[key]) for key, request in requests_list]}"
51 | )
52 | await asyncio.sleep(wait_time)
53 | async with httpx.AsyncClient(headers=headers) as client:
54 | tasks = [client.request(**request) for key, request in requests_list]
55 | response_list = await asyncio.gather(*tasks, return_exceptions=True)
56 | wait_time = 0
57 | for response, (key, request) in zip(response_list, requests_list):
58 | if isinstance(response, httpx.Response) and response.status_code == 200:
59 | # TODO: Very high memory usage here when saving response directly, say, if run 20000 requests.
60 | response_mapper[key] = response
61 | else:
62 | # response could be an Exception here
63 | logger.warning(
64 | f"multi_http_request error: {request=} "
65 | f"response.status_code: "
66 | f"{response.status_code if isinstance(response, httpx.Response) else response}"
67 | )
68 | response_mapper[key] += 1
69 | wait_time += 1
70 | crawler_queue.append((key, request))
71 | return [
72 | None if isinstance(response, int) else response
73 | for key, response in response_mapper.items()
74 | ]
75 |
--------------------------------------------------------------------------------
/app/db/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/baoliay2008/lccn_predictor/db5332a3b145720bf902df3d366edbaf2c478159/app/db/__init__.py
--------------------------------------------------------------------------------
/app/db/components.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | from typing import List, Literal, Optional
3 |
4 | from pydantic import BaseModel, Field
5 |
6 |
7 | class PredictionEvent(BaseModel):
8 | name: str
9 | description: Optional[str] = None
10 | timestamp: datetime = Field(default_factory=datetime.utcnow)
11 | status: Literal["Ongoing", "Passed", "Failed"] = "Ongoing"
12 |
13 |
14 | class UserContestHistoryRecord(BaseModel):
15 | contest_title: str
16 | finishTimeInSeconds: int
17 | # Actually, `rating` here is `new_rating` in ContestRecord
18 | rating: float
19 | ranking: int
20 | solved_questions_id: Optional[List[int]] = None
21 |
--------------------------------------------------------------------------------
/app/db/models.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | from typing import Counter, List, Literal, Optional, Tuple
3 |
4 | from beanie import Document
5 | from pydantic import Field
6 | from pymongo import IndexModel
7 |
8 | from app.db.components import PredictionEvent, UserContestHistoryRecord
9 |
10 | DATA_REGION = Literal["CN", "US"]
11 |
12 |
13 | class Contest(Document):
14 | titleSlug: str
15 | title: str
16 | startTime: datetime
17 | duration: int
18 | endTime: datetime
19 | past: bool
20 | update_time: datetime = Field(default_factory=datetime.utcnow)
21 | predict_time: Optional[datetime] = None
22 | user_num_us: Optional[int] = None
23 | user_num_cn: Optional[int] = None
24 | convolution_array: Optional[int] = None
25 | prediction_progress: Optional[List[PredictionEvent]] = None
26 |
27 | class Settings:
28 | indexes = [
29 | IndexModel("titleSlug", unique=True),
30 | "title",
31 | "startTime",
32 | "endTime",
33 | "predict_time",
34 | ]
35 |
36 |
37 | class ContestRecord(Document):
38 | contest_name: str
39 | contest_id: int
40 | username: str
41 | user_slug: str
42 | data_region: DATA_REGION
43 | country_code: Optional[str] = None
44 | country_name: Optional[str] = None
45 | rank: int
46 | score: int
47 | finish_time: datetime
48 | attendedContestsCount: Optional[int] = None
49 | old_rating: Optional[float] = None
50 | new_rating: Optional[float] = None
51 | delta_rating: Optional[float] = None
52 |
53 | class Settings:
54 | indexes = [
55 | "contest_name",
56 | "username",
57 | "user_slug",
58 | "rank",
59 | "data_region",
60 | ]
61 |
62 |
63 | class ContestRecordPredict(ContestRecord):
64 | # Predicted records' will be inserted only once, won't update any fields.
65 | # Records in this collection can be used to calculated MSE directly even after a long time because it won't change.
66 | insert_time: datetime = Field(default_factory=datetime.utcnow)
67 | predict_time: Optional[datetime] = None
68 |
69 |
70 | class ContestRecordArchive(ContestRecord):
71 | # Archived records will be updated.
72 | # LeetCode would rejudge some submissions(cheat detection, adding test cases, etc.)
73 | update_time: datetime = Field(default_factory=datetime.utcnow)
74 | real_time_rank: Optional[list] = None
75 |
76 |
77 | class Question(Document):
78 | question_id: int
79 | credit: int
80 | title: str
81 | title_slug: str
82 | update_time: datetime = Field(default_factory=datetime.utcnow)
83 | contest_name: str
84 | qi: int
85 | real_time_count: Optional[List[int]] = None
86 | # For every question, save the quantiles of users' ratings who passed that question.
87 | user_ratings_quantiles: Optional[List[float]] = None
88 | # For every question, save the rating bins for users who passed each question.
89 | # Each tuple represents a rating range, and the second element is the count of users within that range.
90 | # For example, if 888 users have ratings in the range `[1100, 1150)`, the tuple would be `(1100, 888)`.
91 | # The default range is 50, maintaining consistency with the distribution chart on LeetCode user homepages.
92 | user_ratings_bins: Optional[List[Tuple[int, int]]] = None
93 | average_fail_count: Optional[int] = None
94 | lang_counter: Optional[Counter] = None
95 | difficulty: Optional[float] = None
96 | # For every question, save the first 10 users who finished this question
97 | first_ten_users: Optional[List[Tuple[str, datetime]]] = None
98 | topics: Optional[List[str]] = None
99 |
100 | class Settings:
101 | indexes = [
102 | "question_id",
103 | "title_slug",
104 | "contest_name",
105 | ]
106 |
107 |
108 | class Submission(Document):
109 | # these four can be used as compound Index
110 | contest_name: str
111 | username: str
112 | data_region: DATA_REGION
113 | question_id: int
114 | date: datetime
115 | fail_count: int
116 | credit: int
117 | submission_id: int
118 | status: int
119 | contest_id: int
120 | update_time: datetime = Field(default_factory=datetime.utcnow)
121 | # watch out: US data_region doesn't have `lang` field before weekly-contest-364
122 | lang: Optional[str] = None
123 |
124 | class Settings:
125 | indexes = [
126 | "contest_name",
127 | "username",
128 | "data_region",
129 | "question_id",
130 | "date",
131 | ]
132 |
133 |
134 | class User(Document):
135 | username: str
136 | user_slug: str
137 | data_region: DATA_REGION
138 | attendedContestsCount: int
139 | rating: float
140 | update_time: datetime = Field(default_factory=datetime.utcnow)
141 | contest_history: Optional[List[UserContestHistoryRecord]] = None
142 | avatar_url: Optional[str] = None
143 |
144 | class Settings:
145 | indexes = [
146 | "username",
147 | "user_slug",
148 | "data_region",
149 | "rating",
150 | ]
151 |
--------------------------------------------------------------------------------
/app/db/mongodb.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import urllib.parse
3 | from typing import Optional
4 |
5 | # just for temporary autocompleting, given that motor doesn't have type annotations yet,
6 | # see https://jira.mongodb.org/browse/MOTOR-331
7 | # and https://www.mongodb.com/community/forums/t/support-for-type-hint/107593
8 | from beanie import init_beanie
9 | from loguru import logger
10 | from motor.core import ( # bad idea to use these three here,
11 | AgnosticClient,
12 | AgnosticCollection,
13 | AgnosticDatabase,
14 | )
15 | from motor.motor_asyncio import AsyncIOMotorClient
16 |
17 | from app.config import get_yaml_config
18 | from app.db.models import (
19 | Contest,
20 | ContestRecordArchive,
21 | ContestRecordPredict,
22 | Question,
23 | Submission,
24 | User,
25 | )
26 |
27 | async_mongodb_client = None
28 |
29 |
30 | def get_mongodb_config():
31 | """
32 | Get Mongodb config in `config.yaml`
33 | :return:
34 | """
35 | config = get_yaml_config()
36 | return config.get("mongodb")
37 |
38 |
39 | def get_async_mongodb_client() -> AgnosticClient:
40 | """
41 | Raw Motor client handler, use it when beanie cannot work
42 | :return:
43 | """
44 | global async_mongodb_client
45 | if async_mongodb_client is None:
46 | ip = get_mongodb_config().get("ip")
47 | port = get_mongodb_config().get("port")
48 | username = urllib.parse.quote_plus(get_mongodb_config().get("username"))
49 | password = urllib.parse.quote_plus(get_mongodb_config().get("password"))
50 | db = get_mongodb_config().get("db")
51 | async_mongodb_client = AsyncIOMotorClient(
52 | f"mongodb://{username}:{password}@{ip}:{port}/{db}",
53 | # connectTimeoutMS=None,
54 | )
55 | return async_mongodb_client
56 |
57 |
58 | def get_async_mongodb_database(db_name: Optional[str] = None) -> AgnosticDatabase:
59 | """
60 | Raw Motor database handler, use it when beanie cannot work
61 | :param db_name:
62 | :return:
63 | """
64 | if db_name is None:
65 | db_name = get_mongodb_config().get("db")
66 | client = get_async_mongodb_client()
67 | return client[db_name]
68 |
69 |
70 | def get_async_mongodb_collection(col_name: str) -> AgnosticCollection:
71 | """
72 | Raw Motor collection handler, use it when beanie cannot work
73 | :param col_name:
74 | :return:
75 | """
76 | db = get_async_mongodb_database()
77 | return db[col_name]
78 |
79 |
80 | async def start_async_mongodb() -> None:
81 | """
82 | Start beanie when process started.
83 | :return:
84 | """
85 | try:
86 | async_mongodb_database = get_async_mongodb_database()
87 | await init_beanie(
88 | database=async_mongodb_database,
89 | document_models=[
90 | Contest,
91 | ContestRecordPredict,
92 | ContestRecordArchive,
93 | User,
94 | Submission,
95 | Question,
96 | ],
97 | )
98 | logger.success("started mongodb connection")
99 | except Exception as e:
100 | logger.exception(f"Failed to start mongodb. error={e}")
101 | sys.exit(1)
102 |
--------------------------------------------------------------------------------
/app/db/views.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseModel
2 |
3 | from app.db.models import DATA_REGION
4 |
5 |
6 | class UserKey(BaseModel):
7 | # Unique key of User collection, DON'T miss `data_region` when dealing with User models
8 | username: str
9 | data_region: DATA_REGION
10 |
--------------------------------------------------------------------------------
/app/handler/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/baoliay2008/lccn_predictor/db5332a3b145720bf902df3d366edbaf2c478159/app/handler/__init__.py
--------------------------------------------------------------------------------
/app/handler/contest.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from typing import Dict, List
3 |
4 | from beanie.odm.operators.update.general import Set
5 | from loguru import logger
6 |
7 | from app.crawler.contest import (
8 | request_contest_user_num,
9 | request_next_two_contests,
10 | request_recent_contests,
11 | )
12 | from app.crawler.utils import multi_http_request
13 | from app.db.models import Contest
14 | from app.utils import (
15 | exception_logger_reraise,
16 | exception_logger_silence,
17 | gather_with_limited_concurrency,
18 | )
19 |
20 |
21 | async def multi_upsert_contests(
22 | contests: List[Dict],
23 | past: bool,
24 | ) -> None:
25 | """
26 | Save meta data of Contests into MongoDB
27 | :param contests:
28 | :param past:
29 | :return:
30 | """
31 | tasks = list()
32 | for contest_dict in contests:
33 | try:
34 | contest_dict["past"] = past
35 | contest_dict["endTime"] = (
36 | contest_dict["startTime"] + contest_dict["duration"]
37 | )
38 | logger.debug(f"{contest_dict=}")
39 | contest = Contest.model_validate(contest_dict)
40 | logger.debug(f"{contest=}")
41 | except Exception as e:
42 | logger.exception(
43 | f"parse contest_dict error {e}. skip upsert {contest_dict=}"
44 | )
45 | continue
46 | tasks.append(
47 | Contest.find_one(Contest.titleSlug == contest.titleSlug,).upsert(
48 | Set(
49 | {
50 | Contest.update_time: contest.update_time,
51 | Contest.title: contest.title,
52 | Contest.startTime: contest.startTime,
53 | Contest.duration: contest.duration,
54 | Contest.past: past,
55 | Contest.endTime: contest.endTime,
56 | }
57 | ),
58 | on_insert=contest,
59 | )
60 | )
61 | await gather_with_limited_concurrency(tasks)
62 | logger.success("finished")
63 |
64 |
65 | @exception_logger_reraise
66 | async def save_recent_and_next_two_contests() -> None:
67 | """
68 | Save past contests and top two coming contests
69 | :return:
70 | """
71 | # Send Http requests to same server, don't do it concurrently
72 | top_two_contests = await request_next_two_contests()
73 | ten_past_contests = await request_recent_contests()
74 | # Save them in database, do it concurrently
75 | await asyncio.gather(
76 | multi_upsert_contests(top_two_contests, past=False),
77 | multi_upsert_contests(ten_past_contests, past=True),
78 | )
79 |
80 |
81 | @exception_logger_silence
82 | async def save_user_num(
83 | contest_name: str,
84 | ) -> None:
85 | """
86 | Save user_num of US and CN data_region to database
87 | :param contest_name:
88 | :return:
89 | """
90 | user_num_us, user_num_cn = await asyncio.gather(
91 | request_contest_user_num(contest_name, "US"),
92 | request_contest_user_num(contest_name, "CN"),
93 | )
94 | logger.info(f"{user_num_us=} {user_num_cn=}")
95 | await Contest.find_one(Contest.titleSlug == contest_name,).update(
96 | Set(
97 | {
98 | Contest.user_num_us: user_num_us,
99 | Contest.user_num_cn: user_num_cn,
100 | }
101 | )
102 | )
103 |
104 |
105 | async def is_cn_contest_data_ready(
106 | contest_name: str,
107 | ) -> bool:
108 | """
109 | Check data from CN region when contest finished, if it is ready then return True
110 | :param contest_name:
111 | :return:
112 | """
113 | try:
114 | cn_data = (
115 | await multi_http_request(
116 | {
117 | "req": {
118 | "url": f"https://leetcode.cn/contest/api/ranking/{contest_name}/",
119 | "method": "GET",
120 | }
121 | }
122 | )
123 | )[0].json()
124 | fallback_local = cn_data.get("fallback_local")
125 | if fallback_local is not None:
126 | logger.info(f"check {fallback_local=} unsatisfied")
127 | return False
128 | us_data = (
129 | await multi_http_request(
130 | {
131 | "req": {
132 | "url": f"https://leetcode.com/contest/api/ranking/{contest_name}/",
133 | "method": "GET",
134 | }
135 | }
136 | )
137 | )[0].json()
138 | # check user_num in two different regions, if they are equal then return True
139 | is_satisfied = (cn_user_num := cn_data.get("user_num")) >= (
140 | us_user_num := us_data.get("user_num")
141 | )
142 | logger.info(f"check {cn_user_num=} {us_user_num=} {is_satisfied=}")
143 | if is_satisfied:
144 | await save_user_num(contest_name)
145 | return is_satisfied
146 | except Exception as e:
147 | logger.error(f"check fallback_local error={e}")
148 | return False
149 |
--------------------------------------------------------------------------------
/app/handler/contest_record.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 |
3 | from beanie.odm.operators.update.general import Set
4 | from loguru import logger
5 |
6 | from app.crawler.contest_record_and_submission import request_contest_records
7 | from app.db.models import DATA_REGION, ContestRecordArchive, ContestRecordPredict, User
8 | from app.handler.submission import save_submission
9 | from app.handler.user import save_users_of_contest
10 | from app.utils import exception_logger_reraise, gather_with_limited_concurrency
11 |
12 |
13 | @exception_logger_reraise
14 | async def save_predict_contest_records(
15 | contest_name: str,
16 | data_region: DATA_REGION,
17 | ) -> None:
18 | """
19 | Save fetched contest records into `ContestRecordPredict` collection for predicting new contest
20 | :param contest_name:
21 | :param data_region:
22 | :return:
23 | """
24 |
25 | async def _fill_old_rating_and_count(_contest_record: ContestRecordPredict):
26 | user = await User.find_one(
27 | User.username == _contest_record.username,
28 | User.data_region == _contest_record.data_region,
29 | )
30 | _contest_record.old_rating = user.rating
31 | _contest_record.attendedContestsCount = user.attendedContestsCount
32 | await _contest_record.save()
33 |
34 | contest_record_list, _ = await request_contest_records(contest_name, data_region)
35 | contest_records = list()
36 | # Full update, delete all old records
37 | await ContestRecordPredict.find(
38 | ContestRecordPredict.contest_name == contest_name,
39 | ).delete()
40 | unique_keys = set()
41 | for contest_record_dict in contest_record_list:
42 | # Only the API for the US site has changed. Now, `username` from LCCN is `user_slug` from LCUS.
43 | if data_region == "US":
44 | # TODO: LCUS changed API, now we have to use `user_slug`, not `username`
45 | contest_record_dict["username"] = contest_record_dict["user_slug"]
46 | key = (contest_record_dict["data_region"], contest_record_dict["username"])
47 | if key in unique_keys:
48 | # during the contest, request_contest_ranking may return duplicated records (user ranking is changing)
49 | logger.warning(f"duplicated user record. {contest_record_dict=}")
50 | continue
51 | unique_keys.add(key)
52 | contest_record_dict.update({"contest_name": contest_name})
53 | contest_record = ContestRecordPredict.model_validate(contest_record_dict)
54 | contest_records.append(contest_record)
55 | insert_tasks = [
56 | ContestRecordPredict.insert_one(contest_record)
57 | for contest_record in contest_records
58 | ]
59 | await gather_with_limited_concurrency(insert_tasks, max_con_num=50)
60 | await save_users_of_contest(contest_name=contest_name, predict=True)
61 | # fill rating and attended count, must be called after save_users_of_contest and before predict_contest,
62 | fill_tasks = [
63 | _fill_old_rating_and_count(contest_record)
64 | for contest_record in contest_records
65 | if contest_record.score != 0
66 | ]
67 | await gather_with_limited_concurrency(fill_tasks, max_con_num=50)
68 |
69 |
70 | @exception_logger_reraise
71 | async def save_archive_contest_records(
72 | contest_name: str,
73 | data_region: DATA_REGION = "US",
74 | save_users: bool = True,
75 | ) -> None:
76 | """
77 | Save fetched contest records into `ContestRecordArchive` collection for archiving old contests
78 | :param contest_name:
79 | :param data_region:
80 | :param save_users:
81 | :return:
82 | """
83 | time_point = datetime.utcnow()
84 | (contest_record_list, nested_submission_list) = await request_contest_records(
85 | contest_name, data_region
86 | )
87 | contest_records = list()
88 | for contest_record_dict in contest_record_list:
89 | # Only the API for the US site has changed. Now, `username` from LCCN is `user_slug` from LCUS.
90 | if data_region == "US":
91 | # TODO: LCUS changed API, now we have to use `user_slug`, not `username`
92 | contest_record_dict["username"] = contest_record_dict["user_slug"]
93 | contest_record_dict.update({"contest_name": contest_name})
94 | contest_record = ContestRecordArchive.model_validate(contest_record_dict)
95 | contest_records.append(contest_record)
96 | tasks = [
97 | ContestRecordArchive.find_one(
98 | ContestRecordArchive.contest_name == contest_record.contest_name,
99 | ContestRecordArchive.username == contest_record.username,
100 | ContestRecordArchive.data_region == contest_record.data_region,
101 | ).upsert(
102 | Set(
103 | {
104 | ContestRecordArchive.rank: contest_record.rank,
105 | ContestRecordArchive.score: contest_record.score,
106 | ContestRecordArchive.finish_time: contest_record.finish_time,
107 | ContestRecordArchive.update_time: contest_record.update_time,
108 | }
109 | ),
110 | on_insert=contest_record,
111 | )
112 | for contest_record in contest_records
113 | ]
114 | await gather_with_limited_concurrency(tasks, max_con_num=50)
115 | # remove old records
116 | await ContestRecordArchive.find(
117 | ContestRecordArchive.contest_name == contest_name,
118 | ContestRecordArchive.update_time < time_point,
119 | ).delete()
120 | if save_users is True:
121 | await save_users_of_contest(contest_name=contest_name, predict=False)
122 | else:
123 | logger.info(f"{save_users=}, will not save users")
124 | await save_submission(contest_name, contest_record_list, nested_submission_list)
125 |
--------------------------------------------------------------------------------
/app/handler/question.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from datetime import datetime, timedelta
3 | from typing import List
4 |
5 | from beanie.odm.operators.update.general import Set
6 | from loguru import logger
7 |
8 | from app.crawler.question import request_question_list
9 | from app.db.models import Question, Submission
10 | from app.utils import gather_with_limited_concurrency, get_contest_start_time
11 |
12 |
13 | async def real_time_count_at_time_point(
14 | contest_name: str,
15 | question_id: int,
16 | time_point: datetime,
17 | ) -> int:
18 | """
19 | For a single question, count its finished submission at a given time point.
20 | :param contest_name:
21 | :param question_id:
22 | :param time_point:
23 | :return:
24 | """
25 | return await Submission.find(
26 | Submission.contest_name == contest_name,
27 | Submission.question_id == question_id,
28 | Submission.date <= time_point,
29 | ).count()
30 |
31 |
32 | async def save_questions_real_time_count(
33 | contest_name: str,
34 | delta_minutes: int = 1,
35 | ) -> None:
36 | """
37 | For every delta_minutes, count accepted submissions for each question.
38 | :param contest_name:
39 | :param delta_minutes:
40 | :return:
41 | """
42 | time_series = list()
43 | start_time = get_contest_start_time(contest_name)
44 | end_time = start_time + timedelta(minutes=90)
45 | while (start_time := start_time + timedelta(minutes=delta_minutes)) <= end_time:
46 | time_series.append(start_time)
47 | logger.info(f"{contest_name=} {time_series=}")
48 | questions = await Question.find(
49 | Question.contest_name == contest_name,
50 | ).to_list()
51 | for question in questions:
52 | tasks = [
53 | real_time_count_at_time_point(
54 | contest_name, question.question_id, time_point
55 | )
56 | for time_point in time_series
57 | ]
58 | question.real_time_count = await gather_with_limited_concurrency(tasks)
59 | await question.save()
60 | logger.success("finished")
61 |
62 |
63 | async def save_questions(
64 | contest_name: str,
65 | ) -> List[Question]:
66 | """
67 | For the past contests, fetch questions list and fill into MongoDB
68 | :param contest_name:
69 | :return:
70 | """
71 | try:
72 | question_list = await request_question_list(contest_name)
73 | time_point = datetime.utcnow()
74 | additional_fields = {
75 | "contest_name": contest_name,
76 | }
77 | questions = list()
78 | for idx, question in enumerate(question_list):
79 | question.pop("id")
80 | question.update({"qi": idx + 1})
81 | questions.append(Question.model_validate(question | additional_fields))
82 | tasks = (
83 | Question.find_one(
84 | Question.question_id == question_obj.question_id,
85 | Question.contest_name == contest_name,
86 | ).upsert(
87 | Set(
88 | {
89 | Question.credit: question_obj.credit,
90 | Question.title: question_obj.title,
91 | Question.title_slug: question_obj.title_slug,
92 | Question.update_time: question_obj.update_time,
93 | Question.qi: question_obj.qi,
94 | }
95 | ),
96 | on_insert=question_obj,
97 | )
98 | for question_obj in questions
99 | )
100 | await asyncio.gather(*tasks)
101 | # Old questions may change, could delete here.
102 | await Question.find(
103 | Question.contest_name == contest_name,
104 | Question.update_time < time_point,
105 | ).delete()
106 | logger.success("finished")
107 | return questions
108 | except Exception as e:
109 | logger.error(f"failed to fill questions fields for {contest_name=} {e=}")
110 |
--------------------------------------------------------------------------------
/app/handler/submission.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 | from typing import Dict, List, Tuple
3 |
4 | from beanie.odm.operators.update.general import Set
5 | from loguru import logger
6 |
7 | from app.db.models import ContestRecordArchive, Submission
8 | from app.db.mongodb import get_async_mongodb_collection
9 | from app.db.views import UserKey
10 | from app.handler.question import save_questions, save_questions_real_time_count
11 | from app.utils import (
12 | exception_logger_reraise,
13 | gather_with_limited_concurrency,
14 | get_contest_start_time,
15 | )
16 |
17 |
18 | async def aggregate_rank_at_time_point(
19 | contest_name: str,
20 | time_point: datetime,
21 | ) -> Tuple[Dict[Tuple[str, str], int], int]:
22 | """
23 | For a single time_point, rank all the participants.
24 | Be careful that every wrong submission should add a 5-minutes penalty.
25 | :param contest_name:
26 | :param time_point:
27 | :return:
28 | """
29 | # hard to use beanie here, so use raw MongoDB aggregation
30 | col = get_async_mongodb_collection(Submission.__name__)
31 | rank_map = dict()
32 | last_credit_sum = None
33 | last_penalty_date = None
34 | tie_rank = raw_rank = 0
35 | pipeline = [
36 | {"$match": {"contest_name": contest_name, "date": {"$lte": time_point}}},
37 | {
38 | "$group": {
39 | "_id": {"username": "$username", "data_region": "$data_region"},
40 | "credit_sum": {"$sum": "$credit"},
41 | "fail_count_sum": {"$sum": "$fail_count"},
42 | "date_max": {"$max": "$date"},
43 | }
44 | },
45 | {
46 | "$addFields": {
47 | "penalty_date": {
48 | "$dateAdd": {
49 | "startDate": "$date_max",
50 | "unit": "minute",
51 | "amount": {"$multiply": [5, "$fail_count_sum"]},
52 | }
53 | },
54 | "username": "$_id.username",
55 | "data_region": "$_id.data_region",
56 | }
57 | },
58 | {"$unset": ["_id"]},
59 | {"$sort": {"credit_sum": -1, "penalty_date": 1}},
60 | {
61 | "$project": {
62 | "_id": 0,
63 | "data_region": 1,
64 | "username": 1,
65 | "credit_sum": 1,
66 | "fail_count_sum": 1,
67 | "penalty_date": 1,
68 | }
69 | },
70 | ]
71 | async for doc in col.aggregate(pipeline):
72 | raw_rank += 1
73 | if (
74 | doc["credit_sum"] == last_credit_sum
75 | and doc["penalty_date"] == last_penalty_date
76 | ):
77 | rank_map[(doc["username"], doc["data_region"])] = tie_rank
78 | else:
79 | tie_rank = raw_rank
80 | rank_map[(doc["username"], doc["data_region"])] = raw_rank
81 | last_credit_sum = doc["credit_sum"]
82 | last_penalty_date = doc["penalty_date"]
83 | return rank_map, raw_rank
84 |
85 |
86 | async def save_real_time_rank(
87 | contest_name: str,
88 | delta_minutes: int = 1,
89 | ) -> None:
90 | """
91 | For every delta_minutes, invoke `aggregate_rank_at_time_point` function to get ranking on single time_point
92 | Then append every user's ranking to a list and save it
93 | :param contest_name:
94 | :param delta_minutes:
95 | :return:
96 | """
97 | logger.info("started running real_time_rank update function")
98 | users = (
99 | await ContestRecordArchive.find(
100 | ContestRecordArchive.contest_name == contest_name,
101 | ContestRecordArchive.score != 0, # No need to query users who have 0 score
102 | )
103 | .project(UserKey)
104 | .to_list()
105 | )
106 | real_time_rank_map = {(user.username, user.data_region): list() for user in users}
107 | start_time = get_contest_start_time(contest_name)
108 | end_time = start_time + timedelta(minutes=90)
109 | i = 1
110 | while (start_time := start_time + timedelta(minutes=delta_minutes)) <= end_time:
111 | rank_map, last_rank = await aggregate_rank_at_time_point(
112 | contest_name, start_time
113 | )
114 | last_rank += 1
115 | for (username, data_region), rank in rank_map.items():
116 | real_time_rank_map[(username, data_region)].append(rank)
117 | for k in real_time_rank_map.keys():
118 | if len(real_time_rank_map[k]) != i:
119 | real_time_rank_map[k].append(last_rank)
120 | i += 1
121 | tasks = [
122 | ContestRecordArchive.find_one(
123 | ContestRecordArchive.contest_name == contest_name,
124 | ContestRecordArchive.username == username,
125 | ContestRecordArchive.data_region == data_region,
126 | ).update(
127 | Set(
128 | {
129 | ContestRecordArchive.real_time_rank: rank_list,
130 | }
131 | )
132 | )
133 | for (username, data_region), rank_list in real_time_rank_map.items()
134 | ]
135 | logger.info("updating real_time_rank field in ContestRecordArchive collection")
136 | await gather_with_limited_concurrency(tasks, max_con_num=5)
137 | logger.success(f"finished updating real_time_rank for {contest_name=}")
138 |
139 |
140 | @exception_logger_reraise
141 | async def save_submission(
142 | contest_name: str,
143 | contest_record_list: List[Dict],
144 | nested_submission_list: List[Dict],
145 | ) -> None:
146 | """
147 | Save all of submission-related data to MongoDB
148 | :param contest_name:
149 | :param contest_record_list:
150 | :param nested_submission_list:
151 | :return:
152 | """
153 | time_point = datetime.utcnow()
154 | questions = await save_questions(contest_name)
155 | question_credit_mapper = {
156 | question.question_id: question.credit for question in questions
157 | }
158 | submissions = list()
159 | for contest_record_dict, nested_submission_dict in zip(
160 | contest_record_list, nested_submission_list
161 | ):
162 | for question_id, submission_dict in nested_submission_dict.items():
163 | submission_dict.pop("id")
164 | submission_dict |= {
165 | "contest_name": contest_name,
166 | "username": contest_record_dict["username"],
167 | "credit": question_credit_mapper[int(question_id)],
168 | }
169 | submissions.extend(
170 | [
171 | Submission.model_validate(submission_dict)
172 | for submission_dict in nested_submission_dict.values()
173 | ]
174 | )
175 | tasks = [
176 | Submission.find_one(
177 | Submission.contest_name == submission.contest_name,
178 | Submission.username == submission.username,
179 | Submission.data_region == submission.data_region,
180 | Submission.question_id == submission.question_id,
181 | ).upsert(
182 | Set(
183 | {
184 | Submission.date: submission.date,
185 | Submission.fail_count: submission.fail_count,
186 | Submission.credit: submission.credit,
187 | Submission.update_time: submission.update_time,
188 | Submission.lang: submission.lang,
189 | }
190 | ),
191 | on_insert=submission,
192 | )
193 | for submission in submissions
194 | ]
195 | logger.info("updating Submission collection")
196 | await gather_with_limited_concurrency(tasks, max_con_num=5)
197 | # Old submissions may be rejudged, must be deleted here, or will cause error when plotting.
198 | await Submission.find(
199 | Submission.contest_name == contest_name,
200 | Submission.update_time < time_point,
201 | ).delete()
202 | logger.success("finished updating submissions, begin to save real_time_rank")
203 | await save_questions_real_time_count(contest_name)
204 | await save_real_time_rank(contest_name)
205 |
--------------------------------------------------------------------------------
/app/handler/user.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 | from typing import List
3 |
4 | from beanie.odm.operators.update.general import Set
5 | from loguru import logger
6 |
7 | from app.constants import (
8 | DEFAULT_NEW_USER_ATTENDED_CONTESTS_COUNT,
9 | DEFAULT_NEW_USER_RATING,
10 | )
11 | from app.crawler.user import request_user_rating_and_attended_contests_count
12 | from app.db.models import DATA_REGION, ContestRecordArchive, ContestRecordPredict, User
13 | from app.db.mongodb import get_async_mongodb_collection
14 | from app.db.views import UserKey
15 | from app.utils import exception_logger_reraise, gather_with_limited_concurrency
16 |
17 |
18 | async def upsert_users_rating_and_attended_contests_count(
19 | data_region: DATA_REGION,
20 | username: str,
21 | save_new_user: bool = True,
22 | ) -> None:
23 | """
24 | Upsert users rating and attendedContestsCount by sending HTTP request to get latest data.
25 | :param data_region:
26 | :param username:
27 | :param save_new_user:
28 | :return:
29 | """
30 | try:
31 | (
32 | rating,
33 | attended_contests_count,
34 | ) = await request_user_rating_and_attended_contests_count(data_region, username)
35 | if rating is None:
36 | logger.info(
37 | f"graphql data is None, new user found, {data_region=} {username=}"
38 | )
39 | if not save_new_user:
40 | logger.info(f"{save_new_user=} do nothing.")
41 | return
42 | rating = DEFAULT_NEW_USER_RATING
43 | attended_contests_count = DEFAULT_NEW_USER_ATTENDED_CONTESTS_COUNT
44 | user = User(
45 | username=username,
46 | user_slug=username,
47 | data_region=data_region,
48 | attendedContestsCount=attended_contests_count,
49 | rating=rating,
50 | )
51 | await User.find_one(
52 | User.username == user.username,
53 | User.data_region == user.data_region,
54 | ).upsert(
55 | Set(
56 | {
57 | User.update_time: user.update_time,
58 | User.attendedContestsCount: user.attendedContestsCount,
59 | User.rating: user.rating,
60 | }
61 | ),
62 | on_insert=user,
63 | )
64 | except Exception as e:
65 | logger.exception(f"user update error. {data_region=} {username=} Exception={e}")
66 |
67 |
68 | @exception_logger_reraise
69 | async def update_all_users_in_database(
70 | batch_size: int = 100,
71 | ) -> None:
72 | """
73 | For all users in the User collection, update their rating and attended_contests_count.
74 | :param batch_size:
75 | :return:
76 | """
77 | total_count = await User.count()
78 | logger.info(f"User collection now has {total_count=}")
79 | for i in range(0, total_count, batch_size):
80 | logger.info(f"progress = {i / total_count* 100 :.2f}%")
81 | docs: List[UserKey] = await (
82 | User.find_all()
83 | .sort(-User.rating)
84 | .skip(i)
85 | .limit(batch_size)
86 | .project(UserKey)
87 | .to_list()
88 | )
89 | cn_tasks = []
90 | us_tasks = []
91 | for doc in docs:
92 | if doc.data_region == "CN":
93 | cn_tasks.append(
94 | upsert_users_rating_and_attended_contests_count(
95 | doc.data_region, doc.username, False
96 | )
97 | )
98 | else:
99 | us_tasks.append(
100 | upsert_users_rating_and_attended_contests_count(
101 | doc.data_region, doc.username, False
102 | )
103 | )
104 | await gather_with_limited_concurrency(
105 | [
106 | # CN site has a strong rate limit
107 | gather_with_limited_concurrency(cn_tasks, 1),
108 | gather_with_limited_concurrency(us_tasks, 5),
109 | ],
110 | 30,
111 | )
112 |
113 |
114 | @exception_logger_reraise
115 | async def save_users_of_contest(
116 | contest_name: str,
117 | predict: bool,
118 | ) -> None:
119 | """
120 | Update all users' rating and attendedContestsCount.
121 | For the ContestRecordPredict collection, don't update users who have a zero score or were updated recently.
122 | :param contest_name:
123 | :param predict:
124 | :return:
125 | """
126 | if predict:
127 | col = get_async_mongodb_collection(ContestRecordPredict.__name__)
128 | pipeline = [
129 | {"$match": {"contest_name": contest_name, "score": {"$ne": 0}}},
130 | {
131 | "$lookup": {
132 | "from": "User",
133 | "let": {"data_region": "$data_region", "username": "$username"},
134 | "pipeline": [
135 | {
136 | "$match": {
137 | "$expr": {
138 | "$and": [
139 | {"$eq": ["$data_region", "$$data_region"]},
140 | {"$eq": ["$username", "$$username"]},
141 | {
142 | "$gte": [
143 | "$update_time",
144 | datetime.utcnow() - timedelta(hours=36),
145 | ]
146 | },
147 | ]
148 | }
149 | }
150 | },
151 | ],
152 | "as": "recent_updated_user",
153 | }
154 | },
155 | {"$match": {"recent_updated_user": {"$eq": []}}},
156 | {"$project": {"_id": 0, "data_region": 1, "username": 1}},
157 | ]
158 | else:
159 | col = get_async_mongodb_collection(ContestRecordArchive.__name__)
160 | pipeline = [
161 | {"$match": {"contest_name": contest_name}},
162 | {"$project": {"_id": 0, "data_region": 1, "username": 1}},
163 | ]
164 | cursor = col.aggregate(pipeline)
165 | docs = await cursor.to_list(length=None)
166 | logger.info(f"docs length = {len(docs)}")
167 | cn_tasks = []
168 | us_tasks = []
169 | for doc in docs:
170 | if doc["data_region"] == "CN":
171 | cn_tasks.append(
172 | upsert_users_rating_and_attended_contests_count(
173 | doc["data_region"], doc["username"]
174 | )
175 | )
176 | else:
177 | us_tasks.append(
178 | upsert_users_rating_and_attended_contests_count(
179 | doc["data_region"], doc["username"]
180 | )
181 | )
182 | await gather_with_limited_concurrency(
183 | [
184 | # CN site has a strong rate limit
185 | gather_with_limited_concurrency(cn_tasks, 1),
186 | gather_with_limited_concurrency(us_tasks, 5),
187 | ],
188 | 30,
189 | )
190 |
--------------------------------------------------------------------------------
/app/schedulers.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from datetime import datetime, timedelta
3 | from typing import Optional
4 |
5 | import pytz
6 | from apscheduler.schedulers.asyncio import AsyncIOScheduler
7 | from loguru import logger
8 |
9 | from app.constants import (
10 | BIWEEKLY_CONTEST_BASE,
11 | BIWEEKLY_CONTEST_START,
12 | WEEKLY_CONTEST_BASE,
13 | WEEKLY_CONTEST_START,
14 | CronTimePointWkdHrMin,
15 | )
16 | from app.core.predictor import predict_contest
17 | from app.handler.contest import (
18 | is_cn_contest_data_ready,
19 | save_recent_and_next_two_contests,
20 | )
21 | from app.handler.contest_record import (
22 | save_archive_contest_records,
23 | save_predict_contest_records,
24 | )
25 | from app.utils import exception_logger_reraise, get_passed_weeks
26 |
27 | global_scheduler: Optional[AsyncIOScheduler] = None
28 |
29 |
30 | @exception_logger_reraise
31 | async def save_last_two_contest_records() -> None:
32 | """
33 | Update last weekly contest, and last biweekly contest.
34 | Upsert contest records in ContestRecordArchive, its users will also be updated in the save_archive_contest function.
35 | :return:
36 | """
37 | utc = datetime.utcnow()
38 |
39 | biweekly_passed_weeks = get_passed_weeks(utc, BIWEEKLY_CONTEST_BASE.dt)
40 | last_biweekly_contest_name = (
41 | f"biweekly-contest-{biweekly_passed_weeks // 2 + BIWEEKLY_CONTEST_BASE.num}"
42 | )
43 | logger.info(f"{last_biweekly_contest_name=} update archive contests")
44 | await save_archive_contest_records(
45 | contest_name=last_biweekly_contest_name, data_region="CN"
46 | )
47 |
48 | weekly_passed_weeks = get_passed_weeks(utc, WEEKLY_CONTEST_BASE.dt)
49 | last_weekly_contest_name = (
50 | f"weekly-contest-{weekly_passed_weeks + WEEKLY_CONTEST_BASE.num}"
51 | )
52 | logger.info(f"{last_weekly_contest_name=} update archive contests")
53 | await save_archive_contest_records(
54 | contest_name=last_weekly_contest_name, data_region="CN"
55 | )
56 |
57 |
58 | @exception_logger_reraise
59 | async def composed_predict_jobs(
60 | contest_name: str,
61 | max_try_times: int = 300,
62 | ) -> None:
63 | """
64 | All three steps which should be run when the contest is just over
65 | :param contest_name:
66 | :param max_try_times:
67 | :return:
68 | """
69 | tried_times = 1
70 | while (
71 | not (cn_data_is_ready := await is_cn_contest_data_ready(contest_name))
72 | and tried_times < max_try_times
73 | ):
74 | await asyncio.sleep(60)
75 | tried_times += 1
76 | if not cn_data_is_ready:
77 | logger.error(
78 | f"give up after failed {tried_times=} times. CONTINUE WITH INCOMPLETE DATA"
79 | )
80 | await save_recent_and_next_two_contests()
81 | await save_predict_contest_records(contest_name=contest_name, data_region="CN")
82 | await predict_contest(contest_name=contest_name)
83 | await save_archive_contest_records(
84 | contest_name=contest_name, data_region="CN", save_users=False
85 | )
86 |
87 |
88 | async def pre_save_predict_users(contest_name: str) -> None:
89 | """
90 | Cache CN and US users during contest
91 | :param contest_name:
92 | :return:
93 | """
94 | await save_predict_contest_records(contest_name, "CN")
95 | await save_predict_contest_records(contest_name, "US")
96 |
97 |
98 | async def add_prediction_schedulers(contest_name: str) -> None:
99 | """
100 | First add two save_predict_contest_records jobs to caching participants' info (mainly whose latest rating)
101 | by doing so can improve the speed of real calculation job greatly
102 | because we are wasting most of the time at fetching participants' info. (`save_users_of_contest` function)
103 | Then add one composed_predict_jobs (real calculation jobs)
104 | :param contest_name:
105 | :return:
106 | """
107 | utc = datetime.utcnow()
108 | global global_scheduler
109 | for pre_save_time in [utc + timedelta(minutes=25), utc + timedelta(minutes=70)]:
110 | # preparation for prediction running, get users in advance.
111 | global_scheduler.add_job(
112 | pre_save_predict_users,
113 | kwargs={"contest_name": contest_name},
114 | trigger="date",
115 | run_date=pre_save_time,
116 | )
117 | # postpone 5 minutes to wait for LeetCode updating final result.
118 | predict_run_time = utc + timedelta(minutes=95)
119 | # real prediction running function.
120 | global_scheduler.add_job(
121 | composed_predict_jobs,
122 | kwargs={"contest_name": contest_name},
123 | trigger="date",
124 | run_date=predict_run_time,
125 | )
126 |
127 |
128 | async def scheduler_entry() -> None:
129 | """
130 | Dispatch jobs at every minute.
131 | :return:
132 | """
133 | global global_scheduler
134 | utc = datetime.utcnow()
135 | time_point = CronTimePointWkdHrMin(utc.weekday(), utc.hour, utc.minute)
136 | if time_point == WEEKLY_CONTEST_START:
137 | passed_weeks = get_passed_weeks(utc, WEEKLY_CONTEST_BASE.dt)
138 | contest_name = f"weekly-contest-{passed_weeks + WEEKLY_CONTEST_BASE.num}"
139 | logger.info(f"parsed {contest_name=}")
140 | await add_prediction_schedulers(contest_name)
141 | elif time_point == BIWEEKLY_CONTEST_START:
142 | passed_weeks = get_passed_weeks(utc, BIWEEKLY_CONTEST_BASE.dt)
143 | if passed_weeks % 2 != 0:
144 | logger.info(
145 | f"will not run biweekly prediction, passed_weeks={passed_weeks} is odd for now={utc}"
146 | )
147 | return
148 | contest_name = (
149 | f"biweekly-contest-{passed_weeks // 2 + BIWEEKLY_CONTEST_BASE.num}"
150 | )
151 | logger.info(f"parsed {contest_name=}")
152 | await add_prediction_schedulers(contest_name)
153 | elif (
154 | 2 <= time_point.weekday <= 5 # Wednesday, Tuesday, Friday and Saturday 00:00
155 | and time_point.hour == 0
156 | and time_point.minute == 0
157 | ):
158 | # do other low-priority jobs such as updating user's rating and participated contest count.
159 | global_scheduler.add_job(
160 | save_recent_and_next_two_contests,
161 | trigger="date",
162 | run_date=utc + timedelta(minutes=1),
163 | )
164 | global_scheduler.add_job(
165 | save_last_two_contest_records,
166 | trigger="date",
167 | run_date=utc + timedelta(minutes=10),
168 | )
169 | else:
170 | logger.trace(f"job_dispatcher nothing to do for {utc=} {time_point=}")
171 | if len(job_list := global_scheduler.get_jobs()) > 1:
172 | # logging when there are more schedulers besides scheduler_entry itself.
173 | logger.info(f"global_scheduler jobs={'; '.join(str(job) for job in job_list)}")
174 |
175 |
176 | async def start_scheduler() -> None:
177 | """
178 | Add `scheduler_entry` interval job when main process started.
179 | :return:
180 | """
181 | global global_scheduler
182 | if global_scheduler is not None:
183 | logger.error("global_scheduler could only be started once.")
184 | global_scheduler = AsyncIOScheduler(timezone=pytz.utc)
185 | global_scheduler.add_job(scheduler_entry, "interval", minutes=1)
186 | global_scheduler.start()
187 | logger.success("started schedulers")
188 |
--------------------------------------------------------------------------------
/app/utils.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import math
3 | import sys
4 | from asyncio import iscoroutinefunction
5 | from datetime import datetime, timedelta
6 | from functools import partial, wraps
7 | from typing import Any, Callable, Coroutine, List, Sequence, Union
8 |
9 | from loguru import logger
10 |
11 | from app.constants import BIWEEKLY_CONTEST_BASE, WEEKLY_CONTEST_BASE
12 |
13 |
14 | async def gather_with_limited_concurrency(
15 | crts: Sequence[Coroutine], max_con_num: int = 10, return_exceptions: bool = False
16 | ) -> List[Union[Exception, Any]]:
17 | """
18 | limit the concurrent number of tasks in `asyncio.gather`
19 | by using `asyncio.Semaphore`
20 | :param crts: coroutines
21 | :param max_con_num: Maximum number of concurrent tasks
22 | :param return_exceptions: Whether to return exceptions in the result
23 | :return: List of results or exceptions
24 | """
25 |
26 | async def crt_with_semaphore(crt: Coroutine):
27 | async with semaphore:
28 | return await crt
29 |
30 | semaphore = asyncio.Semaphore(max_con_num)
31 | tasks = [crt_with_semaphore(crt) for crt in crts]
32 | return await asyncio.gather(*tasks, return_exceptions=return_exceptions)
33 |
34 |
35 | def get_passed_weeks(t: datetime, base_t: datetime) -> int:
36 | """
37 | Calculate how many weeks passed from base_t to t
38 | :param t:
39 | :param base_t:
40 | :return:
41 | """
42 | return math.floor((t - base_t).total_seconds() / (7 * 24 * 60 * 60))
43 |
44 |
45 | def get_contest_start_time(contest_name: str) -> datetime:
46 | """
47 | It's a simple, bold, but EFFECTIVE conjecture here, take two baselines separately,
48 | then from the expected `contest_name` calculate its start time, just let it run on server periodically, no bother.
49 | It's just unnecessary to use dynamic configuration things instead.
50 | This conjecture worked precisely in the past, hopefully will still work well in the future.
51 | :param contest_name:
52 | :return:
53 | """
54 | contest_num = int(contest_name.split("-")[-1])
55 | if contest_name.lower().startswith("weekly"):
56 | start_time = WEEKLY_CONTEST_BASE.dt + timedelta(
57 | weeks=contest_num - WEEKLY_CONTEST_BASE.num
58 | )
59 | else:
60 | start_time = BIWEEKLY_CONTEST_BASE.dt + timedelta(
61 | weeks=(contest_num - BIWEEKLY_CONTEST_BASE.num) * 2
62 | )
63 | logger.info(f"{contest_name=} {start_time=}")
64 | return start_time
65 |
66 |
67 | def start_loguru(process: str = "main") -> None:
68 | """
69 | error-prone warning: misuse process parameter (for example, use main in fastapi process
70 | or reassign a different value) will mess up logs.
71 | TODO: could set a global singleton variable to make sure this function will only be called once in a single process.
72 | :param process: "main" for main.py backend process, "api" for fastapi http-server process.
73 | :return:
74 | """
75 | from app.config import get_yaml_config
76 |
77 | try:
78 | loguru_config = get_yaml_config().get("loguru").get(process)
79 | logger.add(
80 | sink=loguru_config["sink"],
81 | rotation=loguru_config["rotation"],
82 | level=loguru_config["level"],
83 | )
84 | except Exception as e:
85 | logger.exception(
86 | f"Failed to start loguru, check loguru config in config.yaml file. error={e}"
87 | )
88 | sys.exit(1)
89 |
90 |
91 | def exception_logger(func: Callable[..., Any], reraise: bool) -> Callable[..., Any]:
92 | """
93 | A decorator to write logs and try-catch for the key functions you want to keep eyes on.
94 | :param func:
95 | :param reraise:
96 | :return:
97 | """
98 |
99 | @wraps(func)
100 | async def async_wrapper(*args, **kwargs):
101 | try:
102 | logger.info(f"{func.__name__} is about to run.")
103 | res = await func(*args, **kwargs)
104 | logger.success(f"{func.__name__} is finished.")
105 | return res
106 | except Exception as e:
107 | logger.exception(f"{func.__name__} error={e}.")
108 | if reraise:
109 | raise e
110 |
111 | @wraps(func)
112 | def wrapper(*args, **kwargs):
113 | try:
114 | logger.info(f"{func.__name__} is about to run.")
115 | res = func(*args, **kwargs)
116 | logger.success(f"{func.__name__} is finished.")
117 | return res
118 | except Exception as e:
119 | logger.exception(f"{func.__name__} args={args} kwargs={kwargs} error={e}.")
120 | if reraise:
121 | raise e
122 |
123 | return async_wrapper if iscoroutinefunction(func) else wrapper
124 |
125 |
126 | exception_logger_reraise = partial(exception_logger, reraise=True)
127 | exception_logger_silence = partial(exception_logger, reraise=False)
128 |
--------------------------------------------------------------------------------
/client/.eslintrc.cjs:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | root: true,
3 | env: { browser: true, es2020: true },
4 | extends: [
5 | 'eslint:recommended',
6 | 'plugin:react/recommended',
7 | 'plugin:react/jsx-runtime',
8 | 'plugin:react-hooks/recommended',
9 | ],
10 | ignorePatterns: ['dist', '.eslintrc.cjs'],
11 | parserOptions: { ecmaVersion: 'latest', sourceType: 'module' },
12 | settings: { react: { version: '18.2' } },
13 | plugins: ['react-refresh'],
14 | rules: {
15 | 'react-refresh/only-export-components': [
16 | 'warn',
17 | { allowConstantExport: true },
18 | ],
19 | },
20 | }
21 |
--------------------------------------------------------------------------------
/client/.gitignore:
--------------------------------------------------------------------------------
1 | # Logs
2 | logs
3 | *.log
4 | npm-debug.log*
5 | yarn-debug.log*
6 | yarn-error.log*
7 | pnpm-debug.log*
8 | lerna-debug.log*
9 |
10 | node_modules
11 | dist
12 | dist-ssr
13 | *.local
14 |
15 | # Editor directories and files
16 | .vscode/*
17 | !.vscode/extensions.json
18 | .idea
19 | .DS_Store
20 | *.suo
21 | *.ntvs*
22 | *.njsproj
23 | *.sln
24 | *.sw?
25 |
--------------------------------------------------------------------------------
/client/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | LCCN - Loading React App ⏰
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
21 |
22 |
29 |
30 |
32 |
33 |
34 |
35 |
40 |
41 |
42 |
--------------------------------------------------------------------------------
/client/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "lccn-client",
3 | "private": true,
4 | "version": "0.1.1",
5 | "type": "module",
6 | "scripts": {
7 | "dev": "vite",
8 | "build": "vite build",
9 | "lint": "eslint . --ext js,jsx --report-unused-disable-directives --max-warnings 0",
10 | "preview": "vite preview"
11 | },
12 | "dependencies": {
13 | "@fortawesome/fontawesome-svg-core": "^6.6.0",
14 | "@fortawesome/free-solid-svg-icons": "^6.6.0",
15 | "@fortawesome/react-fontawesome": "^0.2.2",
16 | "echarts": "^5.5.1",
17 | "echarts-for-react": "^3.0.2",
18 | "react": "^18.2.0",
19 | "react-dom": "^18.2.0",
20 | "react-router-dom": "^6.26.2",
21 | "swr": "^2.2.4"
22 | },
23 | "devDependencies": {
24 | "@types/react": "^18.3.6",
25 | "@types/react-dom": "^18.2.17",
26 | "@vitejs/plugin-react": "^4.3.1",
27 | "autoprefixer": "^10.4.20",
28 | "daisyui": "^4.12.10",
29 | "eslint": "^8.57.1",
30 | "eslint-plugin-react": "^7.36.1",
31 | "eslint-plugin-react-hooks": "^4.6.0",
32 | "eslint-plugin-react-refresh": "^0.4.12",
33 | "postcss": "^8.4.47",
34 | "tailwindcss": "^3.4.11",
35 | "vite": "^5.4.6"
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/client/postcss.config.js:
--------------------------------------------------------------------------------
1 | export default {
2 | plugins: {
3 | tailwindcss: {},
4 | autoprefixer: {},
5 | },
6 | };
7 |
--------------------------------------------------------------------------------
/client/public/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/baoliay2008/lccn_predictor/db5332a3b145720bf902df3d366edbaf2c478159/client/public/favicon.ico
--------------------------------------------------------------------------------
/client/src/App.jsx:
--------------------------------------------------------------------------------
1 | import { useState, useEffect, createContext } from "react";
2 | import { BrowserRouter, Route, Routes } from "react-router-dom";
3 |
4 | import Navbar from "./components/Navbar";
5 | import PredictedContest from "./pages/Predicted/PredictedContests";
6 | import PredictedRecords from "./pages/Predicted/PredictedRecords";
7 | import Footer from "./components/Footer";
8 |
9 | export const DataThemeContext = createContext("light");
10 |
11 | function App() {
12 | const initializeDataTheme = () =>
13 | JSON.parse(localStorage.getItem("dataTheme")) ?? "light";
14 | const [dataTheme, setDataTheme] = useState(initializeDataTheme());
15 | useEffect(() => {
16 | localStorage.setItem("dataTheme", JSON.stringify(dataTheme));
17 | }, [dataTheme]);
18 |
19 | // useEffect(() => {
20 | // document.title = "Home";
21 | // }, []);
22 |
23 | return (
24 |
25 |
26 |
27 |
28 |
29 |
30 |
🤞 LeetCode contest rating predictor 👀
31 |
37 |
41 |
42 |
43 |
44 |
45 | } />
46 | }
49 | />
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 | );
58 | }
59 |
60 | export default App;
61 |
--------------------------------------------------------------------------------
/client/src/components/Footer.jsx:
--------------------------------------------------------------------------------
1 | import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
2 | import { faCopyright, faPen } from "@fortawesome/free-solid-svg-icons";
3 |
4 | const Footer = () => {
5 | return (
6 |
88 | );
89 | };
90 |
91 | export default Footer;
92 |
--------------------------------------------------------------------------------
/client/src/components/Navbar.jsx:
--------------------------------------------------------------------------------
1 | import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
2 | import { faCheck, faHouse, faPalette } from "@fortawesome/free-solid-svg-icons";
3 | import { themes } from "../data/constants";
4 | import { DataThemeContext } from "../App";
5 | import { Link } from "react-router-dom";
6 | import { useContext, useState, useEffect, useRef } from "react";
7 |
8 | const ThemeButton = () => {
9 | const { dataTheme, setDataTheme } = useContext(DataThemeContext);
10 | const [showThemeList, setShowThemeList] = useState(false);
11 | const dropdownRef = useRef(null);
12 |
13 | const handleClickOutside = (event) => {
14 | if (dropdownRef.current && !dropdownRef.current.contains(event.target)) {
15 | setShowThemeList(false);
16 | }
17 | };
18 | useEffect(() => {
19 | // Add event listener for clicks when the dropdown is open
20 | if (showThemeList) {
21 | document.addEventListener("mousedown", handleClickOutside);
22 | } else {
23 | document.removeEventListener("mousedown", handleClickOutside);
24 | }
25 | // Clean up event listener on component unmount
26 | return () => {
27 | document.removeEventListener("mousedown", handleClickOutside);
28 | };
29 | }, [showThemeList]);
30 |
31 | return (
32 |
33 |
41 |
42 | {showThemeList && (
43 |
63 | )}
64 |
65 | );
66 | };
67 |
68 | const Navbar = () => {
69 | return (
70 |
71 |
72 |
73 | Home
74 |
75 |
76 |
77 |
78 | {/*
*/}
98 |
99 |
100 |
101 |
102 |
103 | );
104 | };
105 |
106 | export default Navbar;
107 |
--------------------------------------------------------------------------------
/client/src/components/Pagination.jsx:
--------------------------------------------------------------------------------
1 | import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
2 | import { faAnglesLeft, faAnglesRight } from "@fortawesome/free-solid-svg-icons";
3 | import { Link } from "react-router-dom";
4 |
5 | const Pagination = ({ totalCount, pageNum, pageURL, pageSize }) => {
6 | const maxPageNum = Math.ceil(totalCount / pageSize);
7 | return (
8 |
15 |
16 |
20 |
21 |
22 | {pageNum - 4 >= 1 && pageNum >= maxPageNum && (
23 |
24 | {pageNum - 4}
25 |
26 | )}
27 | {pageNum - 3 >= 1 && pageNum >= maxPageNum - 1 && (
28 |
29 | {pageNum - 3}
30 |
31 | )}
32 | {pageNum - 2 >= 1 && (
33 |
34 | {pageNum - 2}
35 |
36 | )}
37 | {pageNum - 1 >= 1 && (
38 |
39 | {pageNum - 1}
40 |
41 | )}
42 |
43 | {pageNum}
44 |
45 | {pageNum + 1 <= maxPageNum && (
46 |
47 | {pageNum + 1}
48 |
49 | )}
50 | {pageNum + 2 <= maxPageNum && (
51 |
52 | {pageNum + 2}
53 |
54 | )}
55 | {pageNum + 3 <= maxPageNum && pageNum <= 2 && (
56 |
57 | {pageNum + 3}
58 |
59 | )}
60 | {pageNum + 4 <= maxPageNum && pageNum <= 1 && (
61 |
62 | {pageNum + 4}
63 |
64 | )}
65 |
71 |
72 |
73 |
74 |
75 | );
76 | };
77 |
78 | export default Pagination;
79 |
--------------------------------------------------------------------------------
/client/src/components/charts/ContestsUserNumStackedArea.jsx:
--------------------------------------------------------------------------------
1 | import ReactEcharts from "echarts-for-react";
2 |
3 | const ContestsUserNumStackedArea = ({ contests }) => {
4 | contests.sort((a, b) => new Date(a.startTime) - new Date(b.startTime));
5 | // console.log(contests);
6 | const titles = contests.map((contest) =>
7 | contest.title.replace(/eekly Contest /g, "")
8 | );
9 | const usNums = contests.map((contest) => contest.user_num_us);
10 | const cnNums = contests.map((contest) => contest.user_num_cn);
11 | const option = {
12 | title: {
13 | text: "Number of Contest Entrants",
14 | x: "center",
15 | },
16 | color: ["#ee6666", "#5470c6"],
17 | tooltip: {
18 | trigger: "axis",
19 | axisPointer: {
20 | type: "cross",
21 | label: {
22 | backgroundColor: "#6a7985",
23 | },
24 | },
25 | },
26 | legend: {
27 | data: ["US", "CN"],
28 | left: "80%",
29 | },
30 | toolbox: {
31 | feature: {
32 | saveAsImage: {},
33 | },
34 | },
35 | grid: {
36 | left: "3%",
37 | right: "4%",
38 | bottom: "3%",
39 | containLabel: true,
40 | },
41 | xAxis: [
42 | {
43 | type: "category",
44 | boundaryGap: false,
45 | name: "Contest",
46 | // axisLabel: {
47 | // rotate: 75,
48 | // },
49 | data: titles,
50 | },
51 | ],
52 | yAxis: [
53 | {
54 | type: "value",
55 | name: "User Count",
56 | },
57 | ],
58 | series: [
59 | {
60 | name: "CN",
61 | type: "line",
62 | stack: "Total",
63 | label: {
64 | show: true,
65 | position: "top",
66 | },
67 | areaStyle: {},
68 | emphasis: {
69 | focus: "series",
70 | },
71 | data: cnNums,
72 | },
73 | {
74 | name: "US",
75 | type: "line",
76 | stack: "Total",
77 | label: {
78 | show: true,
79 | position: "top",
80 | },
81 | areaStyle: {},
82 | emphasis: {
83 | focus: "series",
84 | },
85 | data: usNums,
86 | },
87 | ],
88 | };
89 |
90 | return (
91 |
100 | );
101 | };
102 |
103 | export default ContestsUserNumStackedArea;
104 |
--------------------------------------------------------------------------------
/client/src/components/charts/QuestionFinishedChart.jsx:
--------------------------------------------------------------------------------
1 | import ReactEcharts from "echarts-for-react";
2 |
3 | const QuestionFinishedChart = ({ questionsRaw }) => {
4 | // console.log("QuestionFinishedChart questionsRaw=", questionsRaw);
5 | const questions = [...questionsRaw].sort((a, b) =>
6 | a.credit === b.credit ? a.qi - b.qi : a.credit - b.credit
7 | );
8 |
9 | const real_time_count = [["Minute", "Question", "Count"]];
10 | for (let i = 1; i <= questions.length; i++) {
11 | for (let j = 1; j <= questions[0].real_time_count?.length; j++) {
12 | real_time_count.push([
13 | j,
14 | `Q${i}`,
15 | questions[i - 1].real_time_count[j - 1],
16 | ]);
17 | }
18 | }
19 |
20 | const questionsId = ["Q1", "Q2", "Q3", "Q4"];
21 | const datasetWithFilters = [];
22 | const seriesList = [];
23 |
24 | questionsId.forEach((id) => {
25 | const datasetId = "dataset_" + id;
26 | datasetWithFilters.push({
27 | id: datasetId,
28 | fromDatasetId: "dataset_raw",
29 | transform: {
30 | type: "filter",
31 | config: {
32 | and: [{ dimension: "Question", "=": id }],
33 | },
34 | },
35 | });
36 | seriesList.push({
37 | type: "line",
38 | datasetId: datasetId,
39 | showSymbol: false,
40 | name: id,
41 | endLabel: {
42 | show: true,
43 | formatter: function (params) {
44 | return params.value[1] + ": " + params.value[2];
45 | },
46 | },
47 | labelLayout: {
48 | moveOverlap: "shiftY",
49 | },
50 | emphasis: {
51 | focus: "series",
52 | },
53 | encode: {
54 | x: "Minute",
55 | y: "Count",
56 | label: ["Question", "Count"],
57 | itemName: "Minute",
58 | tooltip: ["Count"],
59 | },
60 | });
61 | });
62 |
63 | const option = {
64 | animation: true,
65 | animationDuration: 10000,
66 | dataset: [
67 | {
68 | id: "dataset_raw",
69 | source: real_time_count,
70 | },
71 | ...datasetWithFilters,
72 | ],
73 | title: {
74 | text: "Question Finished Count",
75 | x: "center",
76 | },
77 | tooltip: {
78 | order: "valueDesc",
79 | trigger: "axis",
80 | },
81 | xAxis: {
82 | type: "category",
83 | name: "Minute",
84 | },
85 | yAxis: {
86 | name: "Accepted",
87 | // axisLabel: {
88 | // rotate: 45,
89 | // margin: 1
90 | // }
91 | },
92 | grid: {
93 | left: "70em",
94 | right: "70em",
95 | },
96 | series: seriesList,
97 | };
98 |
99 | // console.log("question option= ", option);
100 | return (
101 |
110 | );
111 | };
112 |
113 | export default QuestionFinishedChart;
114 |
--------------------------------------------------------------------------------
/client/src/components/charts/RealTimeRankChart.jsx:
--------------------------------------------------------------------------------
1 | import ReactEcharts from "echarts-for-react";
2 |
3 | const RealTimeRankChart = ({ user, rankList }) => {
4 | const realTimeRank = [["Minute", "Username", "Rank"]];
5 | for (let j = 1; j <= rankList.length; j++) {
6 | realTimeRank.push([j, user.username, rankList[j - 1]]);
7 | }
8 |
9 | const users = [user.username];
10 | const datasetWithFilters = [];
11 | const seriesList = [];
12 |
13 | // console.log("users", users);
14 | // console.log("realTimeRank", realTimeRank);
15 |
16 | users.forEach((username) => {
17 | const datasetId = "dataset_" + username;
18 | datasetWithFilters.push({
19 | id: datasetId,
20 | fromDatasetId: "dataset_raw",
21 | transform: {
22 | type: "filter",
23 | config: {
24 | and: [{ dimension: "Username", "=": username }],
25 | },
26 | },
27 | });
28 | seriesList.push({
29 | type: "line",
30 | datasetId: datasetId,
31 | showSymbol: false,
32 | name: username,
33 | endLabel: {
34 | show: true,
35 | formatter: function (params) {
36 | return params.value[1] + ": " + params.value[2];
37 | },
38 | },
39 | labelLayout: {
40 | moveOverlap: "shiftY",
41 | },
42 | emphasis: {
43 | focus: "series",
44 | },
45 | encode: {
46 | x: "Minute",
47 | y: "Rank",
48 | label: ["Username", "Rank"],
49 | itemName: "Minute",
50 | tooltip: ["Rank"],
51 | },
52 | });
53 | });
54 |
55 | const option = {
56 | animationDuration: 10000,
57 | dataset: [
58 | {
59 | id: "dataset_raw",
60 | source: realTimeRank,
61 | },
62 | ...datasetWithFilters,
63 | ],
64 | title: {
65 | text: "User Real Time Rank",
66 | x: "center",
67 | },
68 | tooltip: {
69 | order: "valueDesc",
70 | trigger: "axis",
71 | },
72 | xAxis: {
73 | type: "category",
74 | name: "Minute",
75 | },
76 | yAxis: {
77 | name: "Rank",
78 | axisLabel: {
79 | rotate: 45,
80 | margin: 1,
81 | },
82 | },
83 | grid: {
84 | // left:"70em",
85 | right: "70em",
86 | },
87 | series: seriesList,
88 | };
89 |
90 | // console.log("realTimeRank option=", option);
91 |
92 | return (
93 |
100 | );
101 | };
102 |
103 | export default RealTimeRankChart;
104 |
--------------------------------------------------------------------------------
/client/src/data/constants.js:
--------------------------------------------------------------------------------
1 | export const baseUrl = "https://lccn.lbao.site/api/v1";
2 |
3 | // daisyui themes
4 | export const themes = [
5 | "light",
6 | "wireframe",
7 | "acid",
8 | "corporate",
9 | "nord",
10 | "fantasy",
11 | "pastel",
12 | "winter",
13 | "cyberpunk",
14 | "valentine",
15 | "dark",
16 | "business",
17 | "dracula",
18 | "halloween",
19 | "dim",
20 | "sunset",
21 | ];
22 |
23 | // trend color config of different regions
24 | // reference: https://graphicdesign.stackexchange.com/a/118989
25 | export const trendColorsHSLConfig = {
26 | default: {
27 | up: "120, 100%, 50%",
28 | down: "0, 100%, 50%",
29 | },
30 | China: {
31 | up: "0, 100%, 50%",
32 | down: "120, 100%, 50%",
33 | },
34 | Korea: {
35 | up: "0, 100%, 50%",
36 | down: "240, 100%, 50%",
37 | },
38 | Japan: {
39 | up: "0, 100%, 50%",
40 | down: "120, 100%, 50%",
41 | },
42 | };
43 |
--------------------------------------------------------------------------------
/client/src/index.css:
--------------------------------------------------------------------------------
1 | body {
2 | margin: 0;
3 | font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen",
4 | "Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue",
5 | sans-serif;
6 | -webkit-font-smoothing: antialiased;
7 | -moz-osx-font-smoothing: grayscale;
8 | }
9 |
10 | code {
11 | font-family: source-code-pro, Menlo, Monaco, Consolas, "Courier New",
12 | monospace;
13 | }
14 |
15 | /* Add the Tailwind directives to your CSS */
16 | @tailwind base;
17 | @tailwind components;
18 | @tailwind utilities;
19 |
20 | /* To make the footer sticky at the bottom of the page
21 | when the content height is less than the browser window height,
22 | one workaround is to set the minimum height of major-content
23 | to 100% of the viewport height.
24 | */
25 | .major-content {
26 | min-height: 100vh; /* 100% of the viewport height */
27 | }
28 |
29 | /* a thinner scrollbar for the scrollable theme list on navbar */
30 | .theme-list-scrollbar::-webkit-scrollbar {
31 | width: 1px;
32 | }
33 |
34 | .theme-list-scrollbar::-webkit-scrollbar-track {
35 | background: #f1f1f1;
36 | }
37 |
38 | .theme-list-scrollbar::-webkit-scrollbar-thumb {
39 | background: #888;
40 | border-radius: 1px;
41 | }
42 |
43 | .theme-list-scrollbar::-webkit-scrollbar-thumb:hover {
44 | background: #555;
45 | }
46 |
47 | .theme-list-scrollbar {
48 | scrollbar-width: thin;
49 | scrollbar-color: #888 #f1f1f1;
50 | }
51 |
52 | .theme-list-scrollbar::-webkit-scrollbar-corner {
53 | display: none;
54 | }
55 |
--------------------------------------------------------------------------------
/client/src/main.jsx:
--------------------------------------------------------------------------------
1 | import React from "react";
2 | import ReactDOM from "react-dom/client";
3 | import App from "./App.jsx";
4 | import "./index.css";
5 |
6 | ReactDOM.createRoot(document.getElementById("root")).render(
7 |
8 |
9 |
10 | );
11 |
--------------------------------------------------------------------------------
/client/src/pages/Contests/ContestsUserNum.jsx:
--------------------------------------------------------------------------------
1 | import ContestsUserNumStackedArea from "../../components/charts/ContestsUserNumStackedArea.jsx";
2 | import useSWR from "swr";
3 | import { baseUrl } from "../../data/constants.js";
4 |
5 | const ContestsUserNum = () => {
6 | const { data: contests } = useSWR(
7 | `${baseUrl}/contests/user-num-last-ten`,
8 | (url) => fetch(url).then((r) => r.json()),
9 | { revalidateOnFocus: false }
10 | );
11 | return (
12 | contests && (
13 |
14 |
15 |
16 | )
17 | );
18 | };
19 |
20 | export default ContestsUserNum;
21 |
--------------------------------------------------------------------------------
/client/src/pages/Predicted/PredictedContests.jsx:
--------------------------------------------------------------------------------
1 | import { useEffect } from "react";
2 | import { Link, useParams } from "react-router-dom";
3 |
4 | import useSWR from "swr";
5 | import Pagination from "../../components/Pagination";
6 | import { baseUrl } from "../../data/constants";
7 | import ContestsUserNum from "../Contests/ContestsUserNum";
8 |
9 | const ContestsTable = ({ contests }) => {
10 | return (
11 |
12 |
13 |
14 |
15 | |
16 | Predicted Contest |
17 | Started Time |
18 | Predicted Time |
19 | Official Result |
20 |
21 |
22 |
23 | {contests.map((contest, i) => (
24 |
30 | {i + 1} |
31 |
32 |
36 | {contest.title}
37 |
38 | |
39 |
40 | {new Date(contest.startTime + "Z").toLocaleString()}
41 | |
42 |
43 | {new Date(contest.predict_time + "Z").toLocaleString()}
44 | |
45 |
46 |
56 | /
57 |
67 | |
68 |
69 | ))}
70 |
71 |
72 |
73 | );
74 | };
75 |
76 | const PredictedContest = () => {
77 | useEffect(() => {
78 | document.title = "Predicted Contests";
79 | }, []);
80 |
81 | const pageSize = 10; // hard code `pageSize` temporarily
82 | const { pageNum: pageNumStr } = useParams();
83 | const pageNum = parseInt(pageNumStr) || 1;
84 | const skipNum = pageSize * (pageNum - 1);
85 |
86 | const {
87 | data: contests,
88 | isLoading,
89 | error,
90 | } = useSWR(
91 | `${baseUrl}/contests/?skip=${skipNum}&limit=${pageSize}`,
92 | (url) => fetch(url).then((r) => r.json()),
93 | { revalidateOnFocus: false }
94 | );
95 |
96 | // TODO: totalCount could +1 but won't refetch currently, a potential bug here.
97 | const { data: totalCount } = useSWR(
98 | `${baseUrl}/contests/count`,
99 | (url) => fetch(url).then((r) => r.json()),
100 | { revalidateOnFocus: false }
101 | );
102 | // console.log(`totalCount=${totalCount} pageNum=${pageNum}`);
103 |
104 | if (!contests || isLoading)
105 | return (
106 |
107 |
108 |
109 |
Loading Contests
110 |
111 |
112 | );
113 |
114 | if (error)
115 | return (
116 |
117 |
118 |
119 |
Error: {error.message}
120 |
Try Refresh
121 |
122 |
123 | );
124 |
125 | return (
126 | <>
127 |
128 | {contests ? : undefined}
129 |
135 | >
136 | );
137 | };
138 |
139 | export default PredictedContest;
140 |
--------------------------------------------------------------------------------
/client/src/pages/Predicted/PredictedRecords.jsx:
--------------------------------------------------------------------------------
1 | import { useState, useEffect } from "react";
2 | import { useParams, useNavigate } from "react-router-dom";
3 |
4 | import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
5 | import {
6 | faUser,
7 | faSearch,
8 | faLeftLong,
9 | } from "@fortawesome/free-solid-svg-icons";
10 |
11 | import useSWR from "swr";
12 | import Pagination from "../../components/Pagination";
13 | import QuestionFinishedChart from "../../components/charts/QuestionFinishedChart";
14 | import RealTimeRankChart from "../../components/charts/RealTimeRankChart";
15 | import { baseUrl } from "../../data/constants";
16 | import { trendColorsHSL } from "../../utils";
17 |
18 | const PredictedRecordsSearch = ({
19 | titleSlug,
20 | setPredictedRecordsURL,
21 | isSearching,
22 | setIsSearching,
23 | }) => {
24 | const [userName, setUserName] = useState("");
25 |
26 | const handleSubmit = (e) => {
27 | e.preventDefault();
28 | setPredictedRecordsURL(
29 | `${baseUrl}/contest-records/user?contest_name=${titleSlug}&username=${userName}&archived=false`
30 | );
31 | setIsSearching(true);
32 | };
33 |
34 | const navigate = useNavigate();
35 | const handleBack = (e) => {
36 | if (isSearching) {
37 | e.preventDefault();
38 | setIsSearching(false);
39 | } else {
40 | navigate("/");
41 | }
42 | };
43 |
44 | return (
45 |
85 | );
86 | };
87 |
88 | const PredictedRecordsTable = ({ predictedRecords, setUser }) => {
89 | return (
90 |
91 |
92 |
93 |
94 | Rank |
95 | Username |
96 | Region |
97 | Old Rating |
98 | Delta |
99 | New Rating |
100 | Trend |
101 |
102 |
103 |
104 | {predictedRecords.map((record, i) => (
105 |
106 | #{record.rank} |
107 |
108 | {record.data_region === "CN" ? (
109 |
115 | {record.username}
116 |
117 | ) : (
118 |
124 | {record.username}
125 |
126 | )}
127 | |
128 | {record.country_name} |
129 |
130 | {record.old_rating.toFixed(2)}
131 | |
132 | 0
136 | ? `hsla(${trendColorsHSL.up}, ${
137 | 0.1 + record.delta_rating / 100
138 | })`
139 | : `hsla(${trendColorsHSL.down}, ${
140 | 0.2 - record.delta_rating / 100
141 | })`,
142 | }}
143 | >
144 | {record.delta_rating > 0
145 | ? "+" + record.delta_rating.toFixed(2)
146 | : record.delta_rating.toFixed(2)}
147 | |
148 | {record.new_rating.toFixed(2)} |
149 |
150 |
162 | |
163 |
164 | ))}
165 |
166 |
167 |
168 | );
169 | };
170 |
171 | const PredictedRecords = () => {
172 | const pageSize = 25; // hard code `pageSize` temporarily
173 |
174 | const { titleSlug, pageNum: pageNumStr } = useParams();
175 | const pageNum = parseInt(pageNumStr) || 1;
176 | const skipNum = pageSize * (pageNum - 1);
177 |
178 | const [predictedRecordsURL, setPredictedRecordsURL] = useState(null);
179 | const [isSearching, setIsSearching] = useState(false);
180 | const [user, setUser] = useState(null);
181 |
182 | useEffect(() => {
183 | document.title = `${titleSlug} 🔮`;
184 | }, [titleSlug]);
185 |
186 | useEffect(() => {
187 | if (!isSearching) {
188 | setPredictedRecordsURL(
189 | `${baseUrl}/contest-records/?contest_name=${titleSlug}&archived=false&skip=${skipNum}&limit=${pageSize}`
190 | );
191 | }
192 | setUser(null);
193 | }, [pageNum, isSearching]);
194 |
195 | const { data: totalCount } = useSWR(
196 | `${baseUrl}/contest-records/count?contest_name=${titleSlug}&archived=false`,
197 | (url) => fetch(url).then((r) => r.json()),
198 | { revalidateOnFocus: false }
199 | );
200 |
201 | const { data: questionsRaw } = useSWR(
202 | [
203 | `${baseUrl}/questions/`,
204 | JSON.stringify({
205 | contest_name: titleSlug,
206 | }),
207 | ],
208 | ([url, body]) =>
209 | fetch(url, {
210 | method: "POST",
211 | headers: { "Content-Type": "application/json" },
212 | body: body,
213 | }).then((r) => r.json()),
214 | { revalidateOnFocus: false }
215 | );
216 | // console.log(`questionsRaw=${questionsRaw}`);
217 |
218 | // console.log(`predictedRecordsURL=${predictedRecordsURL}`);
219 | const {
220 | data: predictedRecords,
221 | isLoading,
222 | error,
223 | } = useSWR(predictedRecordsURL, (url) => fetch(url).then((r) => r.json()), {
224 | revalidateOnFocus: false,
225 | });
226 |
227 | // if (predictedRecordsURL === null) return;
228 | // console.log(`predictedRecords=${predictedRecords}`);
229 |
230 | // console.log(`user=${user} ${user?.username} ${user?.data_region}`);
231 |
232 | const { data: rankData } = useSWR(
233 | user
234 | ? [
235 | `${baseUrl}/contest-records/real-time-rank`,
236 | JSON.stringify({
237 | contest_name: titleSlug,
238 | user: user,
239 | }),
240 | ]
241 | : null,
242 | ([url, body]) =>
243 | fetch(url, {
244 | method: "POST",
245 | headers: { "Content-Type": "application/json" },
246 | body: body,
247 | }).then((r) => r.json()),
248 | { revalidateOnFocus: false }
249 | );
250 | const rankList = rankData?.real_time_rank;
251 | // console.log(`rankData=${rankData} rankList=${rankList} ${rankList?.length} ${!rankList}`)
252 |
253 | if (!predictedRecords || isLoading)
254 | return (
255 |
256 |
257 |
258 |
Loading Records
259 |
260 |
261 | );
262 |
263 | if (error)
264 | return (
265 |
266 |
267 |
272 |
Error! Try to revisit later!
273 |
274 |
275 | );
276 |
277 | return (
278 | <>
279 | {questionsRaw && (
280 |
281 |
282 |
283 | )}
284 |
285 | {titleSlug.split("-").join(" ")}
286 |
287 |
293 |
297 | {!isSearching && (
298 |
304 | )}
305 |
306 |
307 |
320 | >
321 | );
322 | };
323 |
324 | export default PredictedRecords;
325 |
--------------------------------------------------------------------------------
/client/src/utils.js:
--------------------------------------------------------------------------------
1 | import { trendColorsHSLConfig } from "./data/constants";
2 |
3 | function getTrendColorsHSL() {
4 | // get browser language config
5 | const language = navigator.language;
6 | // list of common primary language sub-tags:
7 | // https://en.wikipedia.org/wiki/IETF_language_tag#List_of_common_primary_language_subtags
8 | if (language.startsWith("zh")) return trendColorsHSLConfig.China;
9 | else if (language.startsWith("ko")) return trendColorsHSLConfig.Korea;
10 | else if (language.startsWith("ja")) return trendColorsHSLConfig.Japan;
11 | else return trendColorsHSLConfig.default;
12 | }
13 |
14 | export const trendColorsHSL = getTrendColorsHSL();
15 |
--------------------------------------------------------------------------------
/client/tailwind.config.js:
--------------------------------------------------------------------------------
1 | /** @type {import('tailwindcss').Config} */
2 | export default {
3 | content: ["./index.html", "./src/**/*.{js,ts,jsx,tsx}"],
4 | theme: {
5 | extend: {},
6 | },
7 | plugins: [require("daisyui")],
8 | daisyui: {
9 | themes: [
10 | "light",
11 | "wireframe",
12 | "acid",
13 | "corporate",
14 | "nord",
15 | "fantasy",
16 | "pastel",
17 | "winter",
18 | "cyberpunk",
19 | "valentine",
20 | "dark",
21 | "business",
22 | "dracula",
23 | "halloween",
24 | "dim",
25 | "sunset",
26 | ],
27 | },
28 | };
29 |
--------------------------------------------------------------------------------
/client/vite.config.js:
--------------------------------------------------------------------------------
1 | import { defineConfig } from "vite";
2 | import react from "@vitejs/plugin-react";
3 |
4 | // https://vitejs.dev/config/
5 | export default defineConfig({
6 | plugins: [react()],
7 | server: {
8 | host: "0.0.0.0",
9 | port: 12345,
10 | },
11 | });
12 |
--------------------------------------------------------------------------------
/config.yaml.template:
--------------------------------------------------------------------------------
1 | loguru:
2 | main:
3 | sink: './log/lccn_predictor_main.log'
4 | level: INFO
5 | rotation: 'saturday at 12:00'
6 | api:
7 | sink: './log/api/lccn_predictor_api.log'
8 | level: INFO
9 | rotation: '00:00'
10 | mongodb:
11 | ip: 127.0.0.1
12 | port: 27017
13 | username: 'username'
14 | password: 'password'
15 | db: lccn
16 | fastapi:
17 | CORS_allow_origins:
18 | - "http://localhost:3000"
19 | - "https://lccn.lbao.site"
20 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from loguru import logger
4 |
5 | from app.db.mongodb import start_async_mongodb
6 | from app.schedulers import start_scheduler
7 | from app.utils import start_loguru
8 |
9 |
10 | async def start() -> None:
11 | start_loguru()
12 | await start_async_mongodb()
13 | await start_scheduler()
14 | logger.success("started all entry functions")
15 |
16 |
17 | if __name__ == "__main__":
18 | loop = asyncio.new_event_loop()
19 | loop.create_task(start())
20 | try:
21 | loop.run_forever()
22 | except (KeyboardInterrupt, SystemExit) as e:
23 | logger.critical(f"Closing loop. {e=}")
24 | finally:
25 | loop.close()
26 | logger.critical("Closed loop.")
27 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | annotated-types==0.7.0
2 | anyio==4.4.0
3 | APScheduler==3.10.4
4 | beanie==1.26.0
5 | certifi==2024.8.30
6 | click==8.1.7
7 | dnspython==2.6.1
8 | exceptiongroup==1.2.2
9 | fastapi==0.114.2
10 | h11==0.14.0
11 | httpcore==1.0.5
12 | httpx==0.27.2
13 | idna==3.10
14 | iniconfig==2.0.0
15 | lazy-model==0.2.0
16 | llvmlite==0.43.0
17 | loguru==0.7.2
18 | motor==3.5.1
19 | numba==0.60.0
20 | numpy==2.0.2
21 | packaging==24.1
22 | pluggy==1.5.0
23 | pydantic==2.9.1
24 | pydantic_core==2.23.3
25 | pymongo==4.8.0
26 | pytest==8.3.3
27 | pytz==2024.2
28 | PyYAML==6.0.2
29 | scipy==1.14.1
30 | six==1.16.0
31 | sniffio==1.3.1
32 | starlette==0.38.5
33 | toml==0.10.2
34 | tomli==2.0.1
35 | typing_extensions==4.12.2
36 | tzlocal==5.2
37 | uvicorn==0.30.6
38 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/baoliay2008/lccn_predictor/db5332a3b145720bf902df3d366edbaf2c478159/tests/__init__.py
--------------------------------------------------------------------------------
/tests/app/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/baoliay2008/lccn_predictor/db5332a3b145720bf902df3d366edbaf2c478159/tests/app/__init__.py
--------------------------------------------------------------------------------
/tests/app/core/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/baoliay2008/lccn_predictor/db5332a3b145720bf902df3d366edbaf2c478159/tests/app/core/__init__.py
--------------------------------------------------------------------------------
/tests/app/core/test_elo.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 |
4 | from app.core.elo import elo_delta
5 | from tests.utils import RATING_DELTA_PRECISION, read_data_contest_prediction_first
6 |
7 |
8 | @pytest.fixture
9 | def data_contest_prediction_first():
10 | return read_data_contest_prediction_first()
11 |
12 |
13 | def test_elo_delta(data_contest_prediction_first):
14 | """
15 | Test function for the elo_delta function.
16 |
17 | Raises:
18 | AssertionError: If not all errors are within the specified precision.
19 | """
20 |
21 | ks, ranks, old_ratings, new_ratings = data_contest_prediction_first
22 |
23 | delta_ratings = elo_delta(ranks, old_ratings, ks)
24 | testing_new_ratings = old_ratings + delta_ratings
25 |
26 | errors = np.abs(new_ratings - testing_new_ratings)
27 | assert np.all(
28 | errors < RATING_DELTA_PRECISION
29 | ), f"Elo delta test failed. Some errors are not within {RATING_DELTA_PRECISION=}."
30 |
--------------------------------------------------------------------------------
/tests/app/core/test_fft.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 |
4 | from app.core.fft import fft_delta
5 | from tests.utils import RATING_DELTA_PRECISION, read_data_contest_prediction_first
6 |
7 |
8 | @pytest.fixture
9 | def data_contest_prediction_first():
10 | return read_data_contest_prediction_first()
11 |
12 |
13 | def test_fft_delta(data_contest_prediction_first):
14 | """
15 | Test function for the fft_delta function.
16 |
17 | Raises:
18 | AssertionError: If not all errors are within the specified precision.
19 | """
20 |
21 | ks, ranks, old_ratings, new_ratings = data_contest_prediction_first
22 |
23 | delta_ratings = fft_delta(ranks, old_ratings, ks)
24 | testing_new_ratings = old_ratings + delta_ratings
25 |
26 | errors = np.abs(new_ratings - testing_new_ratings)
27 | assert np.all(
28 | errors < RATING_DELTA_PRECISION
29 | ), f"FFT delta test failed. Some errors are not within {RATING_DELTA_PRECISION=}."
30 |
--------------------------------------------------------------------------------
/tests/tests_data/contest_prediction_1.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/baoliay2008/lccn_predictor/db5332a3b145720bf902df3d366edbaf2c478159/tests/tests_data/contest_prediction_1.npy
--------------------------------------------------------------------------------
/tests/utils.py:
--------------------------------------------------------------------------------
1 | from typing import Final
2 |
3 | import numpy as np
4 |
5 | # Ensure that the prediction error for rating deltas for EACH participant is within the specified precision limit
6 | RATING_DELTA_PRECISION: Final[float] = 0.05
7 |
8 |
9 | def read_data_contest_prediction_first():
10 | with open("tests/tests_data/contest_prediction_1.npy", "rb") as f:
11 | data = np.load(f)
12 | ks = data[:, 0]
13 | ranks = data[:, 1]
14 | old_ratings = data[:, 2]
15 | new_ratings = data[:, 3]
16 | return ks, ranks, old_ratings, new_ratings
17 |
--------------------------------------------------------------------------------