├── utils ├── __init__.py └── utils.py ├── runtime.txt ├── requirements.txt ├── vercel.json ├── docker-compose.yml ├── api ├── scrapers │ ├── __init__.py │ ├── health.py │ ├── news.py │ ├── stats.py │ ├── rankings.py │ ├── events.py │ └── matches.py └── scrape.py ├── .github ├── dependabot.yml ├── ISSUE_TEMPLATE │ └── bug-report.yml └── workflows │ ├── docker_dev.yml │ └── docker.yml ├── Dockerfile ├── main.py ├── LICENSE ├── .gitignore ├── routers └── vlr_router.py └── README.md /utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /runtime.txt: -------------------------------------------------------------------------------- 1 | python-3.9.5 2 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | requests==2.32.4 2 | uvicorn==0.34.3 3 | fastapi==0.115.13 4 | lxml==5.4.0 5 | slowapi==0.1.9 6 | selectolax==0.3.29 7 | -------------------------------------------------------------------------------- /vercel.json: -------------------------------------------------------------------------------- 1 | { 2 | "builds": [{ "src": "main.py", "use": "@vercel/python" }], 3 | "routes": [{ "src": "/(.*)", "dest": "main.py" }] 4 | } -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | bot: 5 | container_name: "vlrggapi" 6 | ports: 7 | - "3001:3001" 8 | build: . 9 | -------------------------------------------------------------------------------- /api/scrapers/__init__.py: -------------------------------------------------------------------------------- 1 | from .news import vlr_news 2 | from .rankings import vlr_rankings 3 | from .stats import vlr_stats 4 | from .matches import vlr_upcoming_matches, vlr_live_score, vlr_match_results 5 | from .events import vlr_events 6 | from .health import check_health 7 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: pip 4 | directory: / 5 | schedule: 6 | interval: weekly 7 | open-pull-requests-limit: 3 8 | allow: 9 | - dependency-type: 'production' 10 | target-branch: "dev" 11 | - package-ecosystem: github-actions 12 | directory: '/' 13 | schedule: 14 | interval: weekly 15 | open-pull-requests-limit: 2 16 | target-branch: "dev" -------------------------------------------------------------------------------- /utils/utils.py: -------------------------------------------------------------------------------- 1 | headers = { 2 | "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0", 3 | } 4 | 5 | 6 | region = { 7 | "na": "north-america", 8 | "eu": "europe", 9 | "ap": "asia-pacific", 10 | "la": "latin-america", 11 | "la-s": "la-s", 12 | "la-n": "la-n", 13 | "oce": "oceania", 14 | "kr": "korea", 15 | "mn": "mena", 16 | "gc": "gc", 17 | "br": "Brazil", 18 | "cn": "china", 19 | "jp": "japan", 20 | "col": "collegiate", 21 | } 22 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tiangolo/uvicorn-gunicorn:python3.9-alpine3.14 as base 2 | 3 | RUN mkdir -p /vlrggapi 4 | 5 | WORKDIR /vlrggapi 6 | 7 | COPY requirements.txt . 8 | RUN pip install --no-cache-dir -r requirements.txt 9 | 10 | 11 | FROM tiangolo/uvicorn-gunicorn:python3.9-alpine3.14 as final 12 | 13 | WORKDIR /vlrggapi 14 | COPY --from=base /usr/local/lib/python3.9/site-packages /usr/local/lib/python3.9/site-packages 15 | COPY . . 16 | 17 | RUN apk add curl 18 | 19 | CMD ["python", "main.py"] 20 | HEALTHCHECK --interval=5s --timeout=3s CMD curl --fail http://127.0.0.1:3001/health || exit 1 21 | -------------------------------------------------------------------------------- /api/scrapers/health.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | 4 | def check_health(): 5 | sites = ["https://vlrggapi.vercel.app", "https://vlr.gg"] 6 | results = {} 7 | for site in sites: 8 | try: 9 | response = requests.get(site, timeout=5) 10 | results[site] = { 11 | "status": "Healthy" if response.status_code == 200 else "Unhealthy", 12 | "status_code": response.status_code, 13 | } 14 | except requests.RequestException: 15 | results[site] = {"status": "Unhealthy", "status_code": None} 16 | return results 17 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import uvicorn 4 | from fastapi import FastAPI 5 | from fastapi.responses import RedirectResponse 6 | from slowapi import Limiter, _rate_limit_exceeded_handler 7 | from slowapi.errors import RateLimitExceeded 8 | from slowapi.util import get_remote_address 9 | 10 | from routers.vlr_router import router as vlr_router 11 | 12 | logging.basicConfig(level=logging.INFO) 13 | logger = logging.getLogger(__name__) 14 | 15 | app = FastAPI( 16 | title="vlrggapi", 17 | description="An Unofficial REST API for [vlr.gg](https://www.vlr.gg/), a site for Valorant Esports match and news coverage. Made by [axsddlr](https://github.com/axsddlr)", 18 | docs_url="/", 19 | redoc_url=None, 20 | ) 21 | 22 | 23 | limiter = Limiter(key_func=get_remote_address) 24 | app.state.limiter = limiter 25 | app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler) 26 | app.include_router(vlr_router) 27 | 28 | 29 | @app.get("/", include_in_schema=False) 30 | def root(): 31 | return RedirectResponse(url="/docs") 32 | 33 | 34 | if __name__ == "__main__": 35 | uvicorn.run("main:app", host="0.0.0.0", port=3001) 36 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021-2024 Andre Saddler 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /api/scrapers/news.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from selectolax.parser import HTMLParser 3 | 4 | from utils.utils import headers 5 | 6 | 7 | def vlr_news(): 8 | url = "https://www.vlr.gg/news" 9 | resp = requests.get(url, headers=headers) 10 | html = HTMLParser(resp.text) 11 | status = resp.status_code 12 | 13 | result = [] 14 | for item in html.css("a.wf-module-item"): 15 | date_author = item.css_first("div.ge-text-light").text() 16 | date, author = date_author.split("by") 17 | 18 | desc = item.css_first("div").css_first("div:nth-child(2)").text().strip() 19 | 20 | title = item.css_first("div:nth-child(1)").text().strip().split("\n")[0] 21 | title = title.replace("\t", "") 22 | 23 | url = item.css_first("a.wf-module-item").attributes["href"] 24 | 25 | result.append( 26 | { 27 | "title": title, 28 | "description": desc, 29 | "date": date.split("\u2022")[1].strip(), 30 | "author": author.strip(), 31 | "url_path": "https://vlr.gg" + url, 32 | } 33 | ) 34 | 35 | data = {"data": {"status": status, "segments": result}} 36 | 37 | if status != 200: 38 | raise Exception("API response: {}".format(status)) 39 | return data 40 | -------------------------------------------------------------------------------- /api/scrape.py: -------------------------------------------------------------------------------- 1 | from api.scrapers import ( 2 | check_health, 3 | vlr_events, 4 | vlr_live_score, 5 | vlr_match_results, 6 | vlr_news, 7 | vlr_rankings, 8 | vlr_stats, 9 | vlr_upcoming_matches, 10 | ) 11 | 12 | 13 | class Vlr: 14 | @staticmethod 15 | def vlr_news(): 16 | return vlr_news() 17 | 18 | @staticmethod 19 | def vlr_rankings(region): 20 | return vlr_rankings(region) 21 | 22 | @staticmethod 23 | def vlr_stats(region: str, timespan: str): 24 | return vlr_stats(region, timespan) 25 | 26 | @staticmethod 27 | def vlr_upcoming_matches(num_pages=1, from_page=None, to_page=None): 28 | return vlr_upcoming_matches(num_pages, from_page, to_page) 29 | 30 | @staticmethod 31 | def vlr_live_score(num_pages=1, from_page=None, to_page=None): 32 | return vlr_live_score(num_pages, from_page, to_page) 33 | 34 | @staticmethod 35 | def vlr_match_results(num_pages=1, from_page=None, to_page=None, max_retries=3, request_delay=1.0, timeout=30): 36 | return vlr_match_results(num_pages, from_page, to_page, max_retries, request_delay, timeout) 37 | 38 | @staticmethod 39 | def vlr_events(upcoming=True, completed=True, page=1): 40 | return vlr_events(upcoming, completed, page) 41 | 42 | @staticmethod 43 | def check_health(): 44 | return check_health() 45 | 46 | 47 | if __name__ == "__main__": 48 | print(Vlr.vlr_live_score()) 49 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug-report.yml: -------------------------------------------------------------------------------- 1 | name: 🐞 Bug report 2 | description: Create a report to help us improve 3 | labels: [bug] 4 | body: 5 | - type: input 6 | id: describe-the-bug 7 | attributes: 8 | label: Describe the bug 9 | description: | 10 | A clear and concise description of what the bug is. 11 | placeholder: | 12 | Example: "This endpoint is not working..." 13 | validations: 14 | required: true 15 | 16 | - type: textarea 17 | id: reproduce-steps 18 | attributes: 19 | label: Steps to reproduce 20 | description: Provide an example of the issue. 21 | placeholder: | 22 | Example: 23 | 1. First step 24 | 2. Second step 25 | 3. Issue here 26 | validations: 27 | required: true 28 | 29 | - type: textarea 30 | id: expected-behavior 31 | attributes: 32 | label: Expected behavior 33 | placeholder: | 34 | Example: 35 | "This should happen..." 36 | validations: 37 | required: true 38 | 39 | - type: textarea 40 | id: actual-behavior 41 | attributes: 42 | label: Actual behavior 43 | placeholder: | 44 | Example: 45 | "This happened instead..." 46 | validations: 47 | required: true 48 | 49 | - type: textarea 50 | id: additional-context 51 | attributes: 52 | label: Additional context 53 | description: | 54 | Add any other context about the problem here. 55 | placeholder: | 56 | Example: 57 | "Also ..." -------------------------------------------------------------------------------- /.github/workflows/docker_dev.yml: -------------------------------------------------------------------------------- 1 | name: vlrggapi_dev 2 | 3 | on: 4 | # run it on push to the default repository branch 5 | push: 6 | branches: [dev] 7 | # run it during pull request 8 | pull_request: 9 | 10 | env: 11 | REGISTRY: ghcr.io 12 | IMAGE_NAME: ${{ github.repository }} 13 | 14 | jobs: 15 | # define job to build and publish docker image 16 | build-and-push-docker-image: 17 | name: Build Docker image and push to repositories 18 | # run only when code is compiling and tests are passing 19 | runs-on: ubuntu-latest 20 | 21 | # steps to perform in job 22 | steps: 23 | - name: Checkout code 24 | uses: actions/checkout@v4 25 | 26 | - name: Set up QEMU 27 | uses: docker/setup-qemu-action@v3 28 | 29 | # setup Docker buld action 30 | - name: Set up Docker Buildx 31 | id: buildx 32 | uses: docker/setup-buildx-action@v3 33 | 34 | - name: Login to Github Packages 35 | uses: docker/login-action@v3 36 | with: 37 | registry: ghcr.io 38 | username: ${{ github.actor }} 39 | password: ${{ secrets.GHCR_PAT }} 40 | 41 | - name: Build image and push to Docker Hub and GitHub Container Registry 42 | uses: docker/build-push-action@v6 43 | with: 44 | platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v8 45 | # relative path to the place where source code with Dockerfile is located 46 | context: ./ 47 | # Note: tags has to be all lower-case 48 | tags: ghcr.io/axsddlr/vlrggapi:dev 49 | # build on feature branches, push only on main branch 50 | push: ${{ github.ref == 'refs/heads/dev' }} 51 | 52 | - name: Image digest 53 | run: echo ${{ steps.docker_build.outputs.digest }} -------------------------------------------------------------------------------- /.github/workflows/docker.yml: -------------------------------------------------------------------------------- 1 | name: vlrggapi_latest 2 | 3 | on: 4 | # run it on push to the default repository branch 5 | push: 6 | branches: [master] 7 | # run it during pull request 8 | pull_request: 9 | 10 | env: 11 | REGISTRY: ghcr.io 12 | IMAGE_NAME: ${{ github.repository }} 13 | 14 | jobs: 15 | # define job to build and publish docker image 16 | build-and-push-docker-image: 17 | name: Build Docker image and push to repositories 18 | # run only when code is compiling and tests are passing 19 | runs-on: ubuntu-latest 20 | 21 | # steps to perform in job 22 | steps: 23 | - name: Checkout code 24 | uses: actions/checkout@v4 25 | 26 | - name: Set up QEMU 27 | uses: docker/setup-qemu-action@v3 28 | 29 | # setup Docker build action 30 | - name: Set up Docker Buildx 31 | id: buildx 32 | uses: docker/setup-buildx-action@v3 33 | 34 | - name: Login to Github Packages 35 | uses: docker/login-action@v3 36 | with: 37 | registry: ${{ env.REGISTRY }} 38 | username: ${{ github.actor }} 39 | password: ${{ secrets.GHCR_PAT }} 40 | 41 | - name: Build image and push to Docker Hub and GitHub Container Registry 42 | uses: docker/build-push-action@v6 43 | with: 44 | platforms: linux/amd64,linux/arm64,linux/arm/v7 45 | # relative path to the place where source code with Dockerfile is located 46 | context: ./ 47 | # Note: tags has to be all lower-case 48 | tags: ${{ env.REGISTRY }}/${{ github.repository }}:latest 49 | # build on feature branches, push only on main branch 50 | push: ${{ github.ref == 'refs/heads/master' }} 51 | 52 | # 24-July-2023 Update: Use the new environment files for state and output 53 | - name: Save state 54 | run: echo "name=value" >> $GITHUB_STATE 55 | 56 | - name: Set output 57 | run: echo "name=value" >> $GITHUB_OUTPUT 58 | 59 | - name: Image digest 60 | run: echo ${{ steps.docker_build.outputs.digest }} 61 | -------------------------------------------------------------------------------- /api/scrapers/stats.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from selectolax.parser import HTMLParser 3 | 4 | from utils.utils import headers 5 | 6 | 7 | def vlr_stats(region: str, timespan: str): 8 | base_url = f"https://www.vlr.gg/stats/?event_group_id=all&event_id=all®ion={region}&country=all&min_rounds=200&min_rating=1550&agent=all&map_id=all" 9 | url = ( 10 | f"{base_url}×pan=all" 11 | if timespan.lower() == "all" 12 | else f"{base_url}×pan={timespan}d" 13 | ) 14 | 15 | resp = requests.get(url, headers=headers) 16 | html = HTMLParser(resp.text) 17 | status = resp.status_code 18 | 19 | result = [] 20 | for item in html.css("tbody tr"): 21 | player = item.text().replace("\t", "").replace("\n", " ").strip().split() 22 | player_name = player[0] 23 | org = player[1] if len(player) > 1 else "N/A" 24 | 25 | agents = [ 26 | agents.attributes["src"].split("/")[-1].split(".")[0] 27 | for agents in item.css("td.mod-agents img") 28 | ] 29 | color_sq = [stats.text() for stats in item.css("td.mod-color-sq")] 30 | rnd = item.css_first("td.mod-rnd").text() 31 | 32 | result.append( 33 | { 34 | "player": player_name, 35 | "org": org, 36 | "agents": agents, 37 | "rounds_played": rnd, 38 | "rating": color_sq[0], 39 | "average_combat_score": color_sq[1], 40 | "kill_deaths": color_sq[2], 41 | "kill_assists_survived_traded": color_sq[3], 42 | "average_damage_per_round": color_sq[4], 43 | "kills_per_round": color_sq[5], 44 | "assists_per_round": color_sq[6], 45 | "first_kills_per_round": color_sq[7], 46 | "first_deaths_per_round": color_sq[8], 47 | "headshot_percentage": color_sq[9], 48 | "clutch_success_percentage": color_sq[10], 49 | } 50 | ) 51 | 52 | segments = {"status": status, "segments": result} 53 | data = {"data": segments} 54 | 55 | if status != 200: 56 | raise Exception("API response: {}".format(status)) 57 | return data 58 | -------------------------------------------------------------------------------- /api/scrapers/rankings.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | import requests 4 | from selectolax.parser import HTMLParser 5 | 6 | from utils.utils import headers, region 7 | 8 | 9 | def vlr_rankings(region_key): 10 | url = "https://www.vlr.gg/rankings/" + region[str(region_key)] 11 | resp = requests.get(url, headers=headers) 12 | html = HTMLParser(resp.text) 13 | status = resp.status_code 14 | 15 | result = [] 16 | for item in html.css("div.rank-item"): 17 | rank = item.css_first("div.rank-item-rank-num").text().strip() 18 | team = item.css_first("div.ge-text").text().split("#")[0] 19 | logo = item.css_first("a.rank-item-team").css_first("img").attributes["src"] 20 | logo = re.sub(r"\/img\/vlr\/tmp\/vlr.png", "", logo) 21 | country = item.css_first("div.rank-item-team-country").text() 22 | last_played = ( 23 | item.css_first("a.rank-item-last") 24 | .text() 25 | .replace("\n", "") 26 | .replace("\t", "") 27 | .split("v")[0] 28 | ) 29 | last_played_team = ( 30 | item.css_first("a.rank-item-last") 31 | .text() 32 | .replace("\t", "") 33 | .replace("\n", "") 34 | .split("o")[1] 35 | .replace(".", ". ") 36 | ) 37 | last_played_team_logo = ( 38 | item.css_first("a.rank-item-last").css_first("img").attributes["src"] 39 | ) 40 | record = ( 41 | item.css_first("div.rank-item-record") 42 | .text() 43 | .replace("\t", "") 44 | .replace("\n", "") 45 | ) 46 | earnings = ( 47 | item.css_first("div.rank-item-earnings") 48 | .text() 49 | .replace("\t", "") 50 | .replace("\n", "") 51 | ) 52 | 53 | result.append( 54 | { 55 | "rank": rank, 56 | "team": team.strip(), 57 | "country": country, 58 | "last_played": last_played.strip(), 59 | "last_played_team": last_played_team.strip(), 60 | "last_played_team_logo": last_played_team_logo, 61 | "record": record, 62 | "earnings": earnings, 63 | "logo": logo, 64 | } 65 | ) 66 | 67 | data = {"status": status, "data": result} 68 | 69 | if status != 200: 70 | raise Exception("API response: {}".format(status)) 71 | return data 72 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Config 2 | config.py 3 | config.json 4 | info.json 5 | headers.json 6 | graph.png 7 | config.ini 8 | lock.lock 9 | app.db 10 | *.code-workspace 11 | 12 | # pycharm 13 | .idea 14 | 15 | # Byte-compiled / optimized / DLL files 16 | __pycache__/ 17 | *.py[cod] 18 | *$py.class 19 | 20 | # C extensions 21 | *.so 22 | 23 | # Distribution / packaging 24 | .Python 25 | build/ 26 | develop-eggs/ 27 | dist/ 28 | downloads/ 29 | eggs/ 30 | .eggs/ 31 | lib/ 32 | lib64/ 33 | parts/ 34 | sdist/ 35 | var/ 36 | wheels/ 37 | pip-wheel-metadata/ 38 | share/python-wheels/ 39 | *.egg-info/ 40 | .installed.cfg 41 | *.egg 42 | MANIFEST 43 | 44 | # PyInstaller 45 | # Usually these files are written by a python script from a template 46 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 47 | *.manifest 48 | *.spec 49 | 50 | # Installer logs 51 | pip-log.txt 52 | pip-delete-this-directory.txt 53 | 54 | # Unit test / coverage reports 55 | htmlcov/ 56 | .tox/ 57 | .nox/ 58 | .coverage 59 | .coverage.* 60 | .cache 61 | nosetests.xml 62 | coverage.xml 63 | *.cover 64 | *.py,cover 65 | .hypothesis/ 66 | .pytest_cache/ 67 | 68 | # Translations 69 | *.mo 70 | *.pot 71 | 72 | # Django stuff: 73 | *.log 74 | local_settings.py 75 | db.sqlite3 76 | db.sqlite3-journal 77 | 78 | # Flask stuff: 79 | instance/ 80 | .webassets-cache 81 | 82 | # Scrapy stuff: 83 | .scrapy 84 | 85 | # Sphinx documentation 86 | docs/_build/ 87 | 88 | # PyBuilder 89 | target/ 90 | 91 | # Jupyter Notebook 92 | .ipynb_checkpoints 93 | 94 | # IPython 95 | profile_default/ 96 | ipython_config.py 97 | 98 | # pyenv 99 | .python-version 100 | 101 | # pipenv 102 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 103 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 104 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 105 | # install all needed dependencies. 106 | #Pipfile.lock 107 | 108 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 109 | __pypackages__/ 110 | 111 | # Celery stuff 112 | celerybeat-schedule 113 | celerybeat.pid 114 | 115 | # SageMath parsed files 116 | *.sage.py 117 | 118 | # Environments 119 | .env 120 | .venv 121 | env/ 122 | venv/ 123 | ENV/ 124 | env.bak/ 125 | venv.bak/ 126 | 127 | # Spyder project settings 128 | .spyderproject 129 | .spyproject 130 | 131 | # Rope project settings 132 | .ropeproject 133 | 134 | # mkdocs documentation 135 | /site 136 | 137 | # mypy 138 | .mypy_cache/ 139 | .dmypy.json 140 | dmypy.json 141 | 142 | # Pyre type checker 143 | .pyre/ 144 | -------------------------------------------------------------------------------- /routers/vlr_router.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, Query, Request 2 | from slowapi import Limiter 3 | from slowapi.util import get_remote_address 4 | 5 | from api.scrape import Vlr 6 | 7 | router = APIRouter() 8 | limiter = Limiter(key_func=get_remote_address) 9 | vlr = Vlr() 10 | 11 | 12 | @router.get("/news") 13 | @limiter.limit("600/minute") 14 | async def VLR_news(request: Request): 15 | return vlr.vlr_news() 16 | 17 | 18 | @router.get("/stats") 19 | @limiter.limit("600/minute") 20 | async def VLR_stats( 21 | request: Request, 22 | region: str = Query(..., description="Region shortname"), 23 | timespan: str = Query(..., description="Timespan (30, 60, 90, or all)"), 24 | ): 25 | """ 26 | Get VLR stats with query parameters. 27 | 28 | region shortnames:\n 29 | "na": "north-america",\n 30 | "eu": "europe",\n 31 | "ap": "asia-pacific",\n 32 | "sa": "latin-america",\n 33 | "jp": "japan",\n 34 | "oce": "oceania",\n 35 | "mn": "mena"\n 36 | """ 37 | return vlr.vlr_stats(region, timespan) 38 | 39 | 40 | @router.get("/rankings") 41 | @limiter.limit("600/minute") 42 | async def VLR_ranks( 43 | request: Request, region: str = Query(..., description="Region shortname") 44 | ): 45 | """ 46 | Get VLR rankings for a specific region. 47 | 48 | region shortnames:\n 49 | "na": "north-america",\n 50 | "eu": "europe",\n 51 | "ap": "asia-pacific",\n 52 | "la": "latin-america",\n 53 | "la-s": "la-s",\n 54 | "la-n": "la-n",\n 55 | "oce": "oceania",\n 56 | "kr": "korea",\n 57 | "mn": "mena",\n 58 | "gc": "game-changers",\n 59 | "br": "Brazil",\n 60 | "cn": "china",\n 61 | "jp": "japan",\n 62 | "col": "collegiate",\n 63 | """ 64 | return vlr.vlr_rankings(region) 65 | 66 | 67 | @router.get("/match") 68 | @limiter.limit("600/minute") 69 | async def VLR_match( 70 | request: Request, 71 | q: str, 72 | num_pages: int = Query(1, description="Number of pages to scrape (default: 1)", ge=1, le=600), 73 | from_page: int = Query(None, description="Starting page number (1-based, optional)", ge=1, le=600), 74 | to_page: int = Query(None, description="Ending page number (1-based, inclusive, optional)", ge=1, le=600), 75 | max_retries: int = Query(3, description="Maximum retry attempts per page (default: 3)", ge=1, le=5), 76 | request_delay: float = Query(1.0, description="Delay between requests in seconds (default: 1.0)", ge=0.5, le=5.0), 77 | timeout: int = Query(30, description="Request timeout in seconds (default: 30)", ge=10, le=120) 78 | ): 79 | """ 80 | query parameters:\n 81 | "upcoming": upcoming matches,\n 82 | "live_score": live match scores,\n 83 | "results": match results,\n 84 | 85 | Page Range Options: 86 | - num_pages: Number of pages from page 1 (ignored if from_page/to_page specified) 87 | - from_page: Starting page number (1-based, optional) 88 | - to_page: Ending page number (1-based, inclusive, optional) 89 | 90 | Additional parameters for robust scraping: 91 | - max_retries: Maximum retry attempts per failed page (1-5, default: 3) 92 | - request_delay: Delay between requests in seconds (0.5-5.0, default: 1.0) 93 | - timeout: Request timeout in seconds (10-120, default: 30) 94 | 95 | Examples: 96 | - /match?q=results&num_pages=5 (scrapes pages 1-5) 97 | - /match?q=results&from_page=10&to_page=15 (scrapes pages 10-15) 98 | - /match?q=results&from_page=5&num_pages=3 (scrapes pages 5-7) 99 | """ 100 | if q == "upcoming": 101 | return vlr.vlr_upcoming_matches(num_pages, from_page, to_page) 102 | elif q == "live_score": 103 | return vlr.vlr_live_score(num_pages, from_page, to_page) 104 | elif q == "results": 105 | return vlr.vlr_match_results(num_pages, from_page, to_page, max_retries, request_delay, timeout) 106 | 107 | else: 108 | return {"error": "Invalid query parameter"} 109 | 110 | 111 | @router.get("/events") 112 | @limiter.limit("600/minute") 113 | async def VLR_events( 114 | request: Request, 115 | q: str = Query( 116 | None, 117 | description="Event type filter", 118 | example="completed", 119 | enum=["upcoming", "completed"] 120 | ), 121 | page: int = Query( 122 | 1, 123 | description="Page number for pagination (only applies to completed events)", 124 | example=1, 125 | ge=1, 126 | le=100 127 | ) 128 | ): 129 | """ 130 | Get Valorant events from VLR.GG with optional filtering and pagination. 131 | 132 | ## Event Types: 133 | - **upcoming**: Currently active or scheduled future events 134 | - **completed**: Historical events that have finished 135 | - **default**: Both upcoming and completed events (when q parameter is omitted) 136 | 137 | ## Pagination: 138 | - Only applies to **completed events** 139 | - Upcoming events are always from the first page 140 | - Page numbers range from 1 to 100 141 | - Each page contains approximately 25-30 events 142 | 143 | ## Usage Examples: 144 | - `GET /events` - All events (upcoming + completed page 1) 145 | - `GET /events?q=upcoming` - Only upcoming events 146 | - `GET /events?q=completed` - Only completed events (page 1) 147 | - `GET /events?q=completed&page=3` - Completed events from page 3 148 | - `GET /events?page=2` - All events (upcoming + completed page 2) 149 | 150 | ## Response Format: 151 | Returns event details including title, status, prize pool, dates, region, thumbnail, and event URL. 152 | """ 153 | if q == "upcoming": 154 | return vlr.vlr_events(upcoming=True, completed=False, page=page) 155 | elif q == "completed": 156 | return vlr.vlr_events(upcoming=False, completed=True, page=page) 157 | else: 158 | return vlr.vlr_events(upcoming=True, completed=True, page=page) 159 | 160 | 161 | @router.get("/health") 162 | def health(): 163 | return vlr.check_health() 164 | -------------------------------------------------------------------------------- /api/scrapers/events.py: -------------------------------------------------------------------------------- 1 | import re 2 | import requests 3 | from selectolax.parser import HTMLParser 4 | 5 | from utils.utils import headers 6 | 7 | 8 | def vlr_events(upcoming=True, completed=True, page=1): 9 | """ 10 | Get Valorant events from VLR.GG 11 | 12 | Args: 13 | upcoming (bool): If True, include upcoming events 14 | completed (bool): If True, include completed events 15 | page (int): Page number for pagination (only applies to completed events) 16 | 17 | Returns: 18 | dict: Response with status code and events data 19 | """ 20 | # Build URL with pagination for completed events 21 | if completed and page > 1: 22 | url = f"https://www.vlr.gg/events/?page={page}" 23 | else: 24 | url = "https://www.vlr.gg/events" 25 | 26 | resp = requests.get(url, headers=headers) 27 | html = HTMLParser(resp.text) 28 | status = resp.status_code 29 | 30 | # If both are False, show both (default behavior) 31 | if not upcoming and not completed: 32 | upcoming = True 33 | completed = True 34 | 35 | events = [] 36 | 37 | def parse_events(container): 38 | """Helper function to parse event cards""" 39 | for event_item in container.css("a.event-item"): 40 | title = event_item.css_first(".event-item-title") 41 | title = title.text(strip=True) if title else "" 42 | 43 | status_elem = event_item.css_first(".event-item-desc-item-status") 44 | event_status = status_elem.text(strip=True) if status_elem else "" 45 | 46 | # Prize - extract monetary value or TBD (before the nested label div) 47 | prize_elem = event_item.css_first(".event-item-desc-item.mod-prize") 48 | prize = "" 49 | if prize_elem: 50 | # Get the HTML and extract text before the first nested div 51 | full_text = prize_elem.text(strip=True) 52 | 53 | # Split by common separators and take the first meaningful part 54 | # The structure is: "$250,000