├── src ├── __init__.py ├── __pycache__ │ ├── scanvf.cpython-312.pyc │ ├── __init__.cpython-312.pyc │ ├── asurascans.cpython-312.pyc │ ├── flamescans.cpython-312.pyc │ ├── manganato.cpython-312.pyc │ ├── mangapark.cpython-312.pyc │ ├── mangapill.cpython-312.pyc │ ├── mangareader.cpython-312.pyc │ └── mangaworld.cpython-312.pyc ├── scanvf.py ├── mangareader.py ├── flamescans.py ├── mangapark.py ├── mangapill.py ├── mangaworld.py ├── asurascans.py └── manganato.py ├── .gitignore ├── __pycache__ ├── main.cpython-312.pyc └── test_main.cpython-312-pytest-8.2.2.pyc ├── requirements.txt ├── vercel.json ├── render.yaml ├── .dockerignore ├── LICENSE ├── compose.yaml ├── Dockerfile ├── README.md └── main.py /src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vercel 2 | test.txt 3 | /temp 4 | /__pycache__ 5 | /.pytest_cache -------------------------------------------------------------------------------- /__pycache__/main.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/real-zephex/manga-scrapers/HEAD/__pycache__/main.cpython-312.pyc -------------------------------------------------------------------------------- /src/__pycache__/scanvf.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/real-zephex/manga-scrapers/HEAD/src/__pycache__/scanvf.cpython-312.pyc -------------------------------------------------------------------------------- /src/__pycache__/__init__.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/real-zephex/manga-scrapers/HEAD/src/__pycache__/__init__.cpython-312.pyc -------------------------------------------------------------------------------- /src/__pycache__/asurascans.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/real-zephex/manga-scrapers/HEAD/src/__pycache__/asurascans.cpython-312.pyc -------------------------------------------------------------------------------- /src/__pycache__/flamescans.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/real-zephex/manga-scrapers/HEAD/src/__pycache__/flamescans.cpython-312.pyc -------------------------------------------------------------------------------- /src/__pycache__/manganato.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/real-zephex/manga-scrapers/HEAD/src/__pycache__/manganato.cpython-312.pyc -------------------------------------------------------------------------------- /src/__pycache__/mangapark.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/real-zephex/manga-scrapers/HEAD/src/__pycache__/mangapark.cpython-312.pyc -------------------------------------------------------------------------------- /src/__pycache__/mangapill.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/real-zephex/manga-scrapers/HEAD/src/__pycache__/mangapill.cpython-312.pyc -------------------------------------------------------------------------------- /src/__pycache__/mangareader.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/real-zephex/manga-scrapers/HEAD/src/__pycache__/mangareader.cpython-312.pyc -------------------------------------------------------------------------------- /src/__pycache__/mangaworld.cpython-312.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/real-zephex/manga-scrapers/HEAD/src/__pycache__/mangaworld.cpython-312.pyc -------------------------------------------------------------------------------- /__pycache__/test_main.cpython-312-pytest-8.2.2.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/real-zephex/manga-scrapers/HEAD/__pycache__/test_main.cpython-312-pytest-8.2.2.pyc -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | beautifulsoup4==4.12.3 2 | fastapi==0.111.0 3 | Requests==2.32.3 4 | uvicorn==0.30.1 5 | gunicorn==22.0.0 6 | pytest==8.2.2 7 | httpx==0.27.0 8 | 9 | -------------------------------------------------------------------------------- /vercel.json: -------------------------------------------------------------------------------- 1 | { 2 | "builds": [ 3 | {"src": "main.py", "use": "@vercel/python"} 4 | ], 5 | "routes": [ 6 | {"src": "/(.*)", "dest": "main.py"} 7 | ] 8 | } -------------------------------------------------------------------------------- /render.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | - type: web 3 | name: Manga-Scraper 4 | runtime: python 5 | repo: https://github.com/real-zephex/manga-scrapers 6 | branch: main 7 | plan: free 8 | region: singapore 9 | buildCommand: pip install -r requirements.txt 10 | startCommand: gunicorn main:app 11 | version: "1" -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # Include any files or directories that you don't want to be copied to your 2 | # container here (e.g., local build artifacts, temporary files, etc.). 3 | # 4 | # For more help, visit the .dockerignore file reference guide at 5 | # https://docs.docker.com/go/build-context-dockerignore/ 6 | 7 | **/.DS_Store 8 | **/__pycache__ 9 | **/.venv 10 | **/.classpath 11 | **/.dockerignore 12 | **/.env 13 | **/.git 14 | **/.gitignore 15 | **/.project 16 | **/.settings 17 | **/.toolstarget 18 | **/.vs 19 | **/.vscode 20 | **/*.*proj.user 21 | **/*.dbmdl 22 | **/*.jfm 23 | **/bin 24 | **/charts 25 | **/docker-compose* 26 | **/compose.y*ml 27 | **/Dockerfile* 28 | **/node_modules 29 | **/npm-debug.log 30 | **/obj 31 | **/secrets.dev.yaml 32 | **/values.dev.yaml 33 | LICENSE 34 | README.md 35 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 real-zephex 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /compose.yaml: -------------------------------------------------------------------------------- 1 | # Comments are provided throughout this file to help you get started. 2 | # If you need more help, visit the Docker Compose reference guide at 3 | # https://docs.docker.com/go/compose-spec-reference/ 4 | 5 | # Here the instructions define your application as a service called "server". 6 | # This service is built from the Dockerfile in the current directory. 7 | # You can add other services your application may depend on here, such as a 8 | # database or a cache. For examples, see the Awesome Compose repository: 9 | # https://github.com/docker/awesome-compose 10 | services: 11 | server: 12 | build: 13 | context: . 14 | ports: 15 | - 8000:8000 16 | 17 | # The commented out section below is an example of how to define a PostgreSQL 18 | # database that your application can use. `depends_on` tells Docker Compose to 19 | # start the database before your application. The `db-data` volume persists the 20 | # database data between container restarts. The `db-password` secret is used 21 | # to set the database password. You must create `db/password.txt` and add 22 | # a password of your choosing to it before running `docker compose up`. 23 | # depends_on: 24 | # db: 25 | # condition: service_healthy 26 | # db: 27 | # image: postgres 28 | # restart: always 29 | # user: postgres 30 | # secrets: 31 | # - db-password 32 | # volumes: 33 | # - db-data:/var/lib/postgresql/data 34 | # environment: 35 | # - POSTGRES_DB=example 36 | # - POSTGRES_PASSWORD_FILE=/run/secrets/db-password 37 | # expose: 38 | # - 5432 39 | # healthcheck: 40 | # test: [ "CMD", "pg_isready" ] 41 | # interval: 10s 42 | # timeout: 5s 43 | # retries: 5 44 | # volumes: 45 | # db-data: 46 | # secrets: 47 | # db-password: 48 | # file: db/password.txt 49 | 50 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | # Comments are provided throughout this file to help you get started. 4 | # If you need more help, visit the Dockerfile reference guide at 5 | # https://docs.docker.com/go/dockerfile-reference/ 6 | 7 | # Want to help us make this template better? Share your feedback here: https://forms.gle/ybq9Krt8jtBL3iCk7 8 | 9 | ARG PYTHON_VERSION=3.11 10 | FROM python:${PYTHON_VERSION}-slim as base 11 | 12 | # Prevents Python from writing pyc files. 13 | ENV PYTHONDONTWRITEBYTECODE=1 14 | 15 | # Keeps Python from buffering stdout and stderr to avoid situations where 16 | # the application crashes without emitting any logs due to buffering. 17 | ENV PYTHONUNBUFFERED=1 18 | 19 | WORKDIR /app 20 | 21 | # Create a non-privileged user that the app will run under. 22 | # See https://docs.docker.com/go/dockerfile-user-best-practices/ 23 | ARG UID=10001 24 | RUN adduser \ 25 | --disabled-password \ 26 | --gecos "" \ 27 | --home "/nonexistent" \ 28 | --shell "/sbin/nologin" \ 29 | --no-create-home \ 30 | --uid "${UID}" \ 31 | appuser 32 | 33 | # Download dependencies as a separate step to take advantage of Docker's caching. 34 | # Leverage a cache mount to /root/.cache/pip to speed up subsequent builds. 35 | # Leverage a bind mount to requirements.txt to avoid having to copy them into 36 | # into this layer. 37 | RUN --mount=type=cache,target=/root/.cache/pip \ 38 | --mount=type=bind,source=requirements.txt,target=requirements.txt \ 39 | python -m pip install -r requirements.txt 40 | 41 | # Switch to the non-privileged user to run the application. 42 | USER appuser 43 | 44 | # Copy the source code into the container. 45 | COPY . . 46 | 47 | # Expose the port that the application listens on. 48 | EXPOSE 8000 49 | 50 | # Run the application. 51 | CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000", "--reload"] -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Manga Scraper API [under development] 2 | 3 | This is a FastAPI-based web application for scraping manga information from various sources including Manganato, Mangareader, Mangapill, and Asurascans. 4 | 5 | ## Installation 6 | 7 | 1. Clone the repository: 8 | ```bash 9 | git clone https://github.com/real-zephex/Dramalama.git 10 | cd Dramalama 11 | ``` 12 | 13 | 2. Create and activate a virtual environment (optional but recommended): 14 | ```bash 15 | python -m venv env 16 | source env/bin/activate # On Windows use `env\Scripts\activate` 17 | ``` 18 | 19 | 3. Install the required packages: 20 | ```bash 21 | pip install -r requirements.txt 22 | ``` 23 | 24 | ## Usage 25 | 26 | 1. Run the FastAPI application: 27 | ```bash 28 | uvicorn main:app --reload 29 | ``` 30 | 31 | 2. Access the API documentation at `http://127.0.0.1:8000/docs`. 32 | 33 | ## API Endpoints 34 | 35 | ### Homepage 36 | - **GET /**: Welcome message. 37 | ```json 38 | { 39 | "message": "Welcome to the manganato scraper" 40 | } 41 | 42 | ## Sources Supported 43 | 1. [Manganato](https://manganato.com/) 44 | 2. [Mangareader](https://mangareader.tv/) 45 | 3. [Mangapill](https://mangapill.com/) 46 | 4. [Asurascans](https://asurascans.io/) - not working on demo site, works when hosted locally (help needed) 47 | 5. [Flamecomics](https://flamecomics.me/) 48 | 49 | 50 | ### Manganato 51 | - **GET /manganato/search/{path}**: Search manga by query. 52 | - **GET /manganato/info/{path}**: Get manga info by ID. 53 | - **GET /manganato/pages/{path}**: Get manga pages by ID. 54 | - **GET /manganato/latest/{path}**: Get the latest manga (with optional page number). 55 | - **GET /manganato/newest/{path}**: Get the newest manga (with optional page number). 56 | - **GET /manganato/hotest/{path}**: Get the hottest manga (with optional page number). 57 | - **GET /manganato/image/{path}**: Get the manga image by URL. 58 | 59 | ### Mangareader 60 | - **GET /mangareader/search/{path}**: Search manga by query. 61 | - **GET /mangareader/info/{path}**: Get manga info by ID. 62 | - **GET /mangareader/pages/{path}**: Get manga pages by ID. 63 | - **GET /mangareader/genre-list**: Get the list of genres. 64 | - **GET /mangareader/latest/{path}**: Get the latest manga by genre. 65 | 66 | ### Mangapill 67 | - **GET /mangapill/search/{path}**: Search manga by query. 68 | - **GET /mangapill/info/{path}**: Get manga info by ID. 69 | - **GET /mangapill/pages/{path}**: Get manga pages by ID. 70 | - **GET /mangapill/newest**: Get the newest manga. 71 | - **GET /mangapill/images/{path}**: Get the manga image by URL. 72 | 73 | ### Asurascans 74 | - **GET /asurascans/search/{path}**: Search manga by query. 75 | - **GET /asurascans/info/{path}**: Get manga info by ID. 76 | - **GET /asurascans/pages/{path}**: Get manga pages by ID. 77 | - **GET /asurascans/popular**: Get the popular manga. 78 | - **GET /asurascans/latest/{path}**: Get the latest manga (with optional page number). 79 | - **GET /asurascans/genres/{path}**: Get manga by genre type. 80 | - **GET /asurascans/genre-list**: Get the list of genres. 81 | 82 | ### Flamecomics 83 | - **GET /flamescans/search/{path}**: Search manga by title 84 | - **GET /flamescans/info/{path}**: Get manga info by ID. 85 | - **GET /flamescans/pages/{path}**: Get manga pages by ID. 86 | - **GET /flamescans/sort/{path}**: Get the popular manga. Accepts `title`, `titlereverse`, `update`, `popular`, `added` 87 | 88 | ## Example Queries 89 | 90 | - **Manganato Search**: `GET /manganato/search/one_piece` 91 | - **Mangareader Latest by Genre**: `GET /mangareader/latest/Action` 92 | - **Mangapill Newest**: `GET /mangapill/newest` 93 | - **Asurascans Popular**: `GET /asurascans/popular` 94 | 95 | ## Note 96 | For image retrieval endpoints, appropriate headers are set to ensure the correct referer is used to avoid access issues. 97 | 98 | ## License 99 | This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for details. 100 | -------------------------------------------------------------------------------- /src/scanvf.py: -------------------------------------------------------------------------------- 1 | from bs4 import BeautifulSoup 2 | import requests 3 | import html 4 | 5 | class Scanvf: 6 | def __init__(self) -> None: 7 | self.proxy_url = "https://sup-proxy.zephex0-f6c.workers.dev/api-text?url=" 8 | self.parent_url = "https://scanvf.org" 9 | self.results = { 10 | "status": "", 11 | "results": [] 12 | } 13 | 14 | def search(self, query:str): 15 | try: 16 | url = f"{self.proxy_url}{self.parent_url}/search?q={query}" 17 | response = requests.get(url) 18 | content = html.unescape(response.json()) # search page didn't have a separate page 19 | self.results["status"] = response.status_code 20 | soup = BeautifulSoup(content, "html.parser") 21 | 22 | cards = soup.select("div.container-fluid > div.row > div > div.series") 23 | 24 | for card in cards: 25 | tempContent = {} 26 | tempContent["id"] = card.find("div", class_="last-series-details").find("a").get("href").split("/")[2] 27 | tempContent["image"] = card.find("div", class_="last-series-details").find("a").find("div", class_="position-relative").find("div", class_="series-img-wrapper").find("img").get("data-src") 28 | tempContent["title"] = card.find("div", class_="justify-content-center").find("a", class_="link-series").find("h3").get_text() 29 | 30 | self.results["results"].append(tempContent) 31 | 32 | return self.results 33 | 34 | except Exception as e: 35 | self.results["results"] = e 36 | return self.results 37 | 38 | def info(self, id:str): 39 | try: 40 | url = f"{self.proxy_url}{self.parent_url}/manga/{id}" 41 | response = requests.get(url) 42 | self.results["status"] = response.status_code 43 | soup = BeautifulSoup(response.content, "html.parser") 44 | 45 | content = {} 46 | content["image"] = soup.select_one("body > main > div > div > div > div:nth-child(1) > div.col-12.col-md-auto > div > img").get("src") 47 | 48 | infoSelector = soup.select_one("body > main > div > div > div > div:nth-child(1) > div.col-12.col-md > div > div") 49 | content["title"] = infoSelector.find("div", class_="col-12 mb-4 align-self-center").find("div", class_="d-flex justify-content-between").find("h1").get_text() 50 | content["description"] = infoSelector.find("div", class_="col-12 mb-4").find("p").get_text() 51 | 52 | endContentSelector = soup.select_one("body > main > div > div > div > div:nth-child(1) > div.col-12.col-lg-3.mt-4.mt-lg-0 > div") 53 | content["author"] = ", ".join(i.get_text() for i in endContentSelector.find("div", class_="col-6 col-md-12 mb-4").find_all("div")) 54 | content["genres"] = ", ".join(i.get_text() for i in endContentSelector.find_all("div", class_="col-6 col-md-12 mb-4")[1].find_all("div")) 55 | 56 | chapterSelector = soup.select("body > main > div > div > div > div.row.list-books > div > div > div") 57 | chapters = [] 58 | 59 | for chapter in chapterSelector: 60 | tempChapter = {} 61 | tempChapter["id"] = chapter.find("a").get("href").split("/")[2] 62 | tempChapter["title"] = chapter.find("a").find("div").find("h5").get_text().replace("\n", " ") 63 | chapters.append(tempChapter) 64 | content["chapters"] = chapters 65 | 66 | self.results["results"] = content 67 | return self.results 68 | 69 | except Exception as e: 70 | self.results["results"] = e 71 | return self.results 72 | 73 | def pages(self, id:str): 74 | try: 75 | for i in range(1, 1000): 76 | 77 | url = f"{self.proxy_url}{self.parent_url}/scan/{id}/{str(i)}" 78 | response = requests.get(url) 79 | self.results["status"] = response.status_code 80 | soup = BeautifulSoup(response.content, "html.parser") 81 | 82 | imageSelector = soup.select_one("body > main > div > div > div > div > div.col.text-center.book-page > img") 83 | infoPageChecker = soup.select_one("body > main > div > div > div > div:nth-child(1) > div.col-12.col-md > div > div > div:nth-child(3) > p") 84 | 85 | if imageSelector: 86 | self.results["results"].append(imageSelector.get("src")) 87 | elif infoPageChecker: 88 | break 89 | 90 | return self.results 91 | 92 | except Exception as e: 93 | self.results["results"] = e 94 | return self.results -------------------------------------------------------------------------------- /src/mangareader.py: -------------------------------------------------------------------------------- 1 | from bs4 import BeautifulSoup 2 | import requests 3 | 4 | class Mangareader: 5 | def __init__(self) -> None: 6 | self.parent_url = "https://mangareader.tv" 7 | self.proxy_url = "https://sup-proxy.zephex0-f6c.workers.dev/api-text?url=" 8 | self.results = { 9 | "status": None, 10 | "results": [] 11 | } 12 | def search(self, query:str): 13 | try: 14 | formattedQuery = query.replace(" ", "+") 15 | url = f"{self.proxy_url}{self.parent_url}/search/?w={formattedQuery}" 16 | response = requests.get(url) 17 | self.results["status"] = response.status_code 18 | soup = BeautifulSoup(response.content, "html.parser") 19 | 20 | cards = soup.select("#ares > div > table > tbody > tr") 21 | 22 | for items in cards: 23 | tempContent = {} 24 | tempContent["title"] = items.find("a").get_text() 25 | tempContent["id"] = items.find("a").get("href").split("/")[2] 26 | tempContent["image"] = f"{self.parent_url}{items.find('div', class_='d56').get('data-src')}" 27 | tempContent["chapters"] = items.find("div", class_="d58").get_text().split(" ")[0] 28 | tempContent["status"] = items.find("div", class_="d58").get_text().rsplit(" ")[3] 29 | tempContent["genres"] = items.find("div", class_="d60").get_text().replace("\n", "").replace(" ", "").split(",")[:-1] 30 | self.results["results"].append(tempContent) 31 | 32 | return self.results 33 | except Exception as e: 34 | self.results["results"] = e 35 | return self.results 36 | 37 | def info(self, id:str): 38 | try: 39 | url = f"{self.proxy_url}{self.parent_url}/manga/{id}" 40 | response = requests.get(url) 41 | self.results["status"] = response.status_code 42 | soup = BeautifulSoup(response.content, "html.parser") 43 | 44 | tempContent = {} 45 | tempContent["image"] = f"{self.parent_url}{soup.select_one('#main > div.d14 > div > div.d37 > div.d38 > img').get('src')}" 46 | tempContent["title"] = soup.select_one("#main > div.d14 > div > div.d37 > div.d39 > div.d40").get_text() 47 | tempContent["status"] = soup.select_one("#main > div.d14 > div > div.d37 > div.d39 > table > tbody > tr:nth-child(4) > td:nth-child(2)").get_text() 48 | tempContent["author"] = soup.select_one("#main > div.d14 > div > div.d37 > div.d39 > table > tbody > tr:nth-child(5) > td:nth-child(2)").get_text().strip().split(",")[0] 49 | 50 | genresSelector = soup.select("#main > div.d14 > div > div.d37 > div.d39 > table > tbody > tr:nth-child(7) > td:nth-child(2) > a") 51 | tempContent["genres"] = ", ".join(i.get_text() for i in genresSelector) 52 | 53 | chapterList = [] 54 | chapterSelector = soup.select("#main > div.d14 > div > table > tbody > tr > td > a") 55 | for items in chapterSelector: 56 | tempChapter = {} 57 | tempChapter["title"] = items.get_text().strip() 58 | tempChapter["id"] = items.get("href").split("/", 1)[1] 59 | chapterList.append(tempChapter) 60 | 61 | tempContent["chapters"] = chapterList 62 | self.results["results"] = tempContent 63 | return self.results 64 | 65 | except Exception as e: 66 | self.results["results"] = e 67 | return self.results 68 | 69 | def pages(self, id: str): 70 | try: 71 | url = f"{self.parent_url}/{id}" 72 | response = requests.get(url) 73 | self.results["status"] = response.status_code 74 | soup = BeautifulSoup(response.content, "html.parser") 75 | 76 | imgSelectors = soup.select("#ib > div > img") 77 | images = [i.get("data-src") for i in imgSelectors] 78 | 79 | self.results["results"] = images 80 | return self.results 81 | 82 | except Exception as e: 83 | self.results["results"] = e 84 | return self.results 85 | 86 | def latest(self, genre: str = ""): 87 | try: 88 | url = f"{self.parent_url}/genre/{genre}" 89 | response = requests.get(url) 90 | self.results["status"] = response.status_code 91 | soup = BeautifulSoup(response.content, "html.parser") 92 | 93 | cards = soup.select("#main > div.d14 > div > div.d38 > div.d39 > table > tbody > tr") 94 | 95 | for card in cards: 96 | tempContent = {} 97 | tempContent["title"] = card.find("div", class_="d42").find("a").get_text() 98 | tempContent["id"] = card.find("div", class_="d42").find("a").get("href").split("/")[2] 99 | tempContent["image"] = f"{self.parent_url}{card.find('div', class_='d41').get('data-src')}" 100 | tempContent["author"] = card.find("div", class_="d43").get_text().strip().replace("\n", "").split(",")[:-1] 101 | tempContent["chapters"] = card.find("div", class_="d44").get_text().strip().split(" ")[0].replace("\xa0", " ") 102 | tempContent["status"] = card.find("div", class_="d44").get_text().strip().split(" ")[-1][1:-1] 103 | tempContent["genres"] = card.find("div", class_="d46").get_text().strip().replace(" ", "").replace("\n", " ").split(",")[:-1] 104 | self.results["results"].append(tempContent) 105 | return self.results 106 | 107 | except Exception as e: 108 | self.results["results"] = e 109 | return self.results 110 | -------------------------------------------------------------------------------- /src/flamescans.py: -------------------------------------------------------------------------------- 1 | from bs4 import BeautifulSoup 2 | import requests 3 | 4 | class Flamescans: 5 | def __init__(self) -> None: 6 | self.proxy_url = "https://sup-proxy.zephex0-f6c.workers.dev/api-text?url=" 7 | self.parent_url = "https://flamecomics.me" 8 | self.results = { 9 | "status": "", 10 | "results": [] 11 | } 12 | 13 | def search(self, query:str): 14 | try: 15 | newQuery = query.replace(" ", "+") 16 | url = f"{self.proxy_url}{self.parent_url}/?s={newQuery}" 17 | response = requests.get(url) 18 | self.results["status"] = response.status_code 19 | soup = BeautifulSoup(response.content, "html.parser") 20 | 21 | cards = soup.select("div.wrapper > div.postbody > div > div.listupd > div > div") 22 | 23 | for items in cards: 24 | tempContent = {} 25 | tempContent["title"] = items.find("a").get("title") 26 | tempContent["id"] = items.find("a").get("href").rsplit("/", 2)[-2] 27 | tempContent["image"] = items.find("img", class_="ts-post-image wp-post-image attachment-medium size-medium").get("src") 28 | tempContent["status"] = items.find("a").find("div", class_="bigor").find("div", class_="extra-info").find("div", class_="imptdt").find("div", class_="status").find("i").get_text() 29 | self.results["results"].append(tempContent) 30 | 31 | return self.results 32 | except Exception as e: 33 | self.results["results"] = e 34 | return self.results 35 | 36 | def info(self, id:str): 37 | try: 38 | url = f"{self.proxy_url}{self.parent_url}/series/{id}" 39 | response = requests.get(url) 40 | self.results["status"] = response.status_code 41 | soup = BeautifulSoup(response.content, "html.parser") 42 | 43 | content = {} 44 | content["image"] = soup.select_one("div.main-info > div.first-half > div.thumb-half > div.thumb > img").get("src") 45 | 46 | infoSelector = soup.select_one("div.main-info > div.first-half > div.info-half") 47 | content["title"] = infoSelector.find("div", class_="titles").find("h1", class_="entry-title").get_text() 48 | 49 | genreSelector = soup.select("div.main-info > div.first-half > div.info-half > div.genres-container > div > span > a") 50 | content["genres"] = ", ".join(i.get_text() for i in genreSelector).split(", ") 51 | 52 | content["description"] = infoSelector.find("div", class_="summary").find("div", class_="wd-full").find("div", class_="entry-content entry-content-single").get_text().strip() 53 | 54 | moreInfoSelector = soup.select_one("div.main-info > div.second-half > div.left-side > div") 55 | content["type"] = moreInfoSelector.select_one("div:nth-child(1) > i").get_text() 56 | content["status"] = moreInfoSelector.select_one("div:nth-child(2) > i").get_text() 57 | content["year"] = moreInfoSelector.select_one("div:nth-child(3) > i").get_text() 58 | content["author"] = moreInfoSelector.select_one("div:nth-child(4) > i").get_text() 59 | content["artist"] = moreInfoSelector.select_one("div:nth-child(5) > i").get_text() 60 | content["serialization"] = moreInfoSelector.select_one("div:nth-child(6) > i").get_text() 61 | 62 | chapterSelector = soup.select("#chapterlist > ul > li") 63 | chapter = [] 64 | for chap in chapterSelector: 65 | tempChapter = {} 66 | tempChapter["id"] = chap.find("a").get("href").rsplit("/", 2)[-2] 67 | tempChapter["title"] = chap.find("div", class_="chbox").find("div", class_="eph-num").find("span", class_="chapternum").get_text().strip().replace("\n", " ") 68 | chapter.append(tempChapter) 69 | content["chapters"] = chapter[::-1] 70 | 71 | self.results["results"] = content 72 | return self.results 73 | 74 | except Exception as e: 75 | self.results["results"] = e 76 | return self.results 77 | 78 | def pages(self, id:str): 79 | try: 80 | url = f"{self.proxy_url}{self.parent_url}/{id}" 81 | response = requests.get(url) 82 | self.results["status"] = response.status_code 83 | soup = BeautifulSoup(response.content, "html.parser") 84 | 85 | imageSelector = soup.select("#readerarea > p > img") 86 | self.results["results"] = [i.get("src") for i in imageSelector] 87 | 88 | return self.results 89 | except Exception as e: 90 | self.results["results"] = e 91 | return self.results 92 | 93 | def sort(self, type:str = ""): 94 | try: 95 | url = f"{self.proxy_url}{self.parent_url}/series/?order={type}" 96 | response = requests.get(url) 97 | self.results["status"] = response.status_code 98 | soup = BeautifulSoup(response.content, "html.parser") 99 | 100 | cardsSelector = soup.select("div.wrapper > div.postbody > div.bixbox.seriesearch > div.mrgn > div.listupd > div > div.bsx") 101 | 102 | for items in cardsSelector: 103 | tempContent = {} 104 | tempContent["title"] = items.find("a").get("title") 105 | tempContent["id"] = items.find("a").get("href").rsplit("/", 2)[-2] 106 | tempContent["image"] = items.find("img", class_="ts-post-image wp-post-image attachment-medium size-medium").get("src") 107 | tempContent["status"] = items.find("a").find("div", class_="bigor").find("div", class_="extra-info").find("div", class_="imptdt").find("div", class_="status").find("i").get_text() 108 | self.results["results"].append(tempContent) 109 | 110 | return self.results 111 | 112 | except Exception as e: 113 | self.results["results"] = e 114 | return self.results -------------------------------------------------------------------------------- /src/mangapark.py: -------------------------------------------------------------------------------- 1 | from bs4 import BeautifulSoup 2 | import requests 3 | import json 4 | import re 5 | 6 | class Mangapark: 7 | def __init__(self) -> None: 8 | self.parent_url = "https://mangapark.net" 9 | self.proxy_url = "https://sup-proxy.zephex0-f6c.workers.dev/api-text?url=" 10 | self.results = { 11 | "status": None, 12 | "results": [] 13 | } 14 | self.pattern = r"https:\/\/xfs-n\d+\.xfspp\.com\/comic\/\d+\/[a-zA-Z0-9]+\/[a-f0-9]+\/\d+_\d+_\d+_\d+\.(?:webp|jpeg)" 15 | self.pattern_two = r"https:\/\/xfs-n\d+\.xfspp\.com\/comic\/\d+\/[a-zA-Z0-9]+\/[a-zA-Z0-9]+\/[a-zA-Z0-9]+\/\d+_[a-zA-Z0-9]+_\d+_\d+\.(?:webp|jpeg)" 16 | self.pattern_three = r"https:\/\/xfs-n\d+\.xfspp\.com\/comic\/\d+\/images+\/[a-zA-Z0-9]+\/[a-zA-Z0-9]+\/[a-zA-Z0-9]+_\d+_\d+_\d+\.(?:webp|jpeg|jpg)" 17 | 18 | 19 | def search(self, query:str): 20 | try: 21 | url = f"{self.proxy_url}{self.parent_url}/search?word={query}" 22 | response = requests.get(url) 23 | self.results["status"] = response.status_code 24 | soup = BeautifulSoup(response.content, "html.parser") 25 | 26 | cardSelector = soup.select("#app-wrapper > main > div.grid.gap-5.grid-cols-1.border-t.border-t-base-200.pt-5 > div") 27 | 28 | for card in cardSelector: 29 | tempContent = {} 30 | tempContent["title"] = card.find("h3", class_="font-bold space-x-1").get_text() 31 | tempContent["image"] = card.find("div", class_="shrink-0 basis-20 md:basis-24").find("div", class_="group relative w-full").find("a").find("img").get("src") 32 | tempContent["id"] = card.find("h3", class_="font-bold space-x-1").find("a", class_="link-hover link-pri").get("href").split("/")[2] 33 | try: 34 | tempContent["authors"] = card.find("div", attrs={"q:key": "6N_0"}).get_text() 35 | except: 36 | tempContent["authors"] = "?" 37 | self.results["results"].append(tempContent) 38 | 39 | return self.results 40 | 41 | except Exception as e: 42 | self.results["results"] = e 43 | return self.results 44 | 45 | def info(self, id:str): 46 | try: 47 | url = f"{self.proxy_url}{self.parent_url}/title/{id}" 48 | response = requests.get(url) 49 | self.results["status"] = response.status_code 50 | soup = BeautifulSoup(response.content, "html.parser") 51 | 52 | content = {} 53 | content["image"] = soup.select_one("#app-wrapper > main > div.flex.flex-col > div.flex > div.w-24 > img").get("src") 54 | 55 | headerSection = soup.select_one("#app-wrapper > main > div.flex.flex-col > div.mt-3 > div.space-y-2.hidden") 56 | content["title"] = headerSection.find("h3").get_text() 57 | try: 58 | content["altTitle"] = ", ".join(i.get_text() for i in headerSection.find("div", attrs={"q:key": "tz_2"}).find_all("span") if i.get_text() != " / ") 59 | except: 60 | content["altTitle"] = "?" 61 | content["author"] = ", ".join(i.get_text() for i in headerSection.find("div", attrs={"q:key": "tz_4"}).find_all("a")) 62 | 63 | middleSection = soup.select_one("#app-wrapper > main > div.flex.flex-col > div.mt-3 > div:nth-child(2)") 64 | content["genres"] = " ".join(i.get_text() for i in middleSection.find("div", attrs={"q:key": "30_2"}).find_all("span")) 65 | content["status"] = middleSection.find("div", attrs={"q:key": "Yn_8"}).find("span", attrs={"q:key": "Yn_5"}).get_text() 66 | 67 | content["description"] = " ".join(i.get_text() for i in soup.select_one("#app-wrapper > main > div.flex.flex-col > div.mt-3 > div > div > div.overflow-y-hidden.max-h-28 > div:nth-child(1) > react-island > div").find_all("div")) 68 | 69 | chapterSelector = soup.select("#app-wrapper > main > div:nth-child(5) > div:nth-child(2) > div > div > div > div.space-x-1") 70 | chapters = [] 71 | 72 | for chapter in chapterSelector: 73 | tempChapter = {} 74 | tempChapter["id"] = chapter.find("a").get("href").split("/", 2)[2] 75 | tempChapter["title"] = chapter.find("a").get_text() 76 | chapters.append(tempChapter) 77 | 78 | content["chapters"] = chapters[::-1] 79 | 80 | self.results["results"] = content 81 | return self.results 82 | 83 | 84 | except Exception as e: 85 | self.results["results"] = e 86 | return self.results 87 | 88 | def pages(self, id:str): 89 | try: 90 | url = f"{self.proxy_url}{self.parent_url}/title/{id}" 91 | response = requests.get(url) 92 | self.results["status"] = response.status_code 93 | soup = BeautifulSoup(response.content, "html.parser") 94 | 95 | scriptTags = soup.find_all("script") 96 | jsonify = json.loads(scriptTags[-4].text) 97 | pages = [] 98 | 99 | for i in jsonify["objs"]: 100 | try: 101 | if re.match(self.pattern, i) or re.match(self.pattern_two, i) or re.match(self.pattern_three, i): 102 | pages.append(i) 103 | except: 104 | pass 105 | self.results["results"] = pages 106 | return self.results 107 | 108 | except Exception as e: 109 | self.results["results"] = e 110 | return self.results 111 | 112 | def latest(self, page:str = "1"): 113 | try: 114 | url = f"{self.proxy_url}{self.parent_url}/latest/{page}" 115 | response = requests.get(url) 116 | self.results["status"] = response.status_code 117 | soup = BeautifulSoup(response.content, "html.parser") 118 | 119 | cardSelector = soup.select("#app-wrapper > main > div > div.space-y-5 > div.grid.gap-5.grid-cols-1.border-t.border-t-base-200.pt-3 > div") 120 | 121 | for card in cardSelector: 122 | content = {} 123 | content["image"] = card.find("img").get("src") 124 | content["title"] = card.find("div", class_="pl-3 grow flex flex-col space-y-1 group").find("h3").get_text() 125 | content["id"] = card.find("div", class_="pl-3 grow flex flex-col space-y-1 group").find("h3").find("a").get("href").split("/")[2] 126 | content["chapterReleased"] = card.find("div", attrs={"q:key": "R7_8"}).find("span").find("a").get_text() 127 | self.results["results"].append(content) 128 | return self.results 129 | except Exception as e: 130 | self.results["results"] = e 131 | return self.results 132 | 133 | -------------------------------------------------------------------------------- /src/mangapill.py: -------------------------------------------------------------------------------- 1 | from bs4 import BeautifulSoup 2 | import requests 3 | 4 | class Mangapill: 5 | def __init__(self) -> None: 6 | self.proxy_url = "https://sup-proxy.zephex0-f6c.workers.dev/api-text?url=" 7 | self.parent_url = "https://mangapill.com" 8 | self.results = { 9 | "status": "", 10 | "results": [] 11 | } 12 | 13 | def search(self, query: str): 14 | try: 15 | newQuery = query.replace(" ", "+") 16 | url = f"{self.proxy_url}{self.parent_url}/search?q={newQuery}" 17 | response = requests.get(url) 18 | self.results["status"] = response.status_code 19 | soup = BeautifulSoup(response.content, "html.parser") 20 | 21 | cards = soup.select("body > div.container.py-3 > div.my-3.grid.justify-end.gap-3.grid-cols-2 > div") 22 | 23 | for items in cards: 24 | tempContent = {} 25 | tempContent["id"] = items.find("a", class_="relative block").get("href").split("/", 1)[1] 26 | tempContent["title"] = items.find("div", class_="mt-3 font-black leading-tight line-clamp-2").get_text() 27 | try: 28 | tempContent["subheading"] = items.find("div", class_="line-clamp-2 text-xs text-secondary mt-1").get_text() 29 | except: 30 | tempContent["subheading"] = "?" 31 | tempContent["image"] = items.find("a", class_="relative block").find("figure").find("img").get("data-src") # MARK: Referer is required 32 | genresSelector = items.find("div", class_="flex flex-wrap gap-1 mt-1").find_all("div") 33 | tempContent["type"] = genresSelector[0].get_text() 34 | tempContent["year"] = genresSelector[1].get_text() 35 | tempContent["status"] = genresSelector[2].get_text() 36 | self.results["results"].append(tempContent) 37 | 38 | return self.results 39 | except Exception as e: 40 | self.results["results"] = e 41 | return self.results 42 | 43 | def info(self, id:str): 44 | try: 45 | url = f"{self.proxy_url}{self.parent_url}/{id}" 46 | response = requests.get(url) 47 | self.results["status"] = response.status_code 48 | soup = BeautifulSoup(response.content, "html.parser") 49 | 50 | tempContent = {} 51 | tempContent["image"] = soup.select_one("body > div.container > div.flex.flex-col > div.text-transparent.flex-shrink-0.w-60.h-80.relative.rounded.bg-card.mr-3.mb-3 > img").get("data-src") 52 | tempContent["title"] = soup.select_one("body > div.container > div.flex.flex-col > div.flex.flex-col > div:nth-child(1) > h1").get_text() 53 | tempContent["description"] = soup.select_one("body > div.container > div.flex.flex-col > div.flex.flex-col > div:nth-child(2) > p").get_text() 54 | tempContent["type"] = soup.select_one("body > div.container > div.flex.flex-col > div.flex.flex-col > div.grid.grid-cols-1 > div:nth-child(1) > div").get_text() 55 | tempContent["status"] = soup.select_one("body > div.container > div.flex.flex-col > div.flex.flex-col > div.grid.grid-cols-1 > div:nth-child(2) > div").get_text() 56 | tempContent["year"] = soup.select_one("body > div.container > div.flex.flex-col > div.flex.flex-col > div.grid.grid-cols-1 > div:nth-child(3) > div").get_text() 57 | 58 | genresSelector = soup.select("body > div.container > div.flex.flex-col > div.flex.flex-col > div:nth-child(4) > a") 59 | tempContent["genres"] = [i.get_text() for i in genresSelector] 60 | 61 | chapterSelector = soup.select("#chapters > div > a") 62 | chapters = [] 63 | for items in chapterSelector: 64 | tempChapters = {} 65 | tempChapters["title"] = items.get_text() 66 | tempChapters["id"] = items.get("href").split("/", 1)[1] 67 | chapters.append(tempChapters) 68 | tempContent["chapters"] = chapters[::-1] 69 | 70 | self.results["results"] = tempContent 71 | return self.results 72 | except Exception as e: 73 | self.results["results"] = e 74 | return self.results 75 | 76 | def pages(self, id:str): 77 | try: 78 | url = f"{self.proxy_url}{self.parent_url}/{id}" 79 | response = requests.get(url) 80 | self.results["status"] = response.status_code 81 | soup = BeautifulSoup(response.content, "html.parser") 82 | 83 | imageSelector = soup.select("body > div > chapter-page > div > div.relative.bg-card.flex.justify-center.items-center > picture > img") 84 | self.results["results"] = [i.get("data-src") for i in imageSelector] 85 | return self.results 86 | 87 | except Exception as e: 88 | self.results["results"] = e 89 | return self.results 90 | 91 | def new(self, type:str): # Same as search 92 | try: 93 | url = f"{self.proxy_url}{self.parent_url}/mangas/new" 94 | response = requests.get(url) 95 | self.results["status"] = response.status_code 96 | soup = BeautifulSoup(response.content, "html.parser") 97 | 98 | cards = soup.select("body > div.container.py-3 > div.grid.justify-end.gap-3.grid-cols-2 > div") 99 | 100 | for items in cards: 101 | tempContent = {} 102 | tempContent["id"] = items.find("a", class_="relative block").get("href").split("/", 1)[1] 103 | tempContent["title"] = items.find("div", class_="mt-3 font-black leading-tight line-clamp-2").get_text() 104 | try: 105 | tempContent["subheading"] = items.find("div", class_="line-clamp-2 text-xs text-secondary mt-1").get_text() 106 | except: 107 | tempContent["subheading"] = "?" 108 | tempContent["image"] = items.find("a", class_="relative block").find("figure").find("img").get("data-src") # MARK: Referer is required 109 | genresSelector = items.find("div", class_="flex flex-wrap gap-1 mt-1").find_all("div") 110 | tempContent["type"] = genresSelector[0].get_text() 111 | tempContent["year"] = genresSelector[1].get_text() 112 | tempContent["status"] = genresSelector[2].get_text() 113 | self.results["results"].append(tempContent) 114 | 115 | return self.results 116 | except Exception as e: 117 | self.results["results"] = e 118 | return self.results 119 | 120 | def recent(self): # Same as search 121 | try: 122 | url = f"{self.proxy_url}{self.parent_url}/chapters" 123 | response = requests.get(url) 124 | self.results["status"] = response.status_code 125 | soup = BeautifulSoup(response.content, "html.parser") 126 | 127 | cards = soup.select("body > div.container.py-3 > div.grid.grid-cols-2 > div") 128 | 129 | for items in cards: 130 | tempContent = {} 131 | tempContent["id"] = items.find("div", class_="px-1").find("a", class_="mt-1.5 leading-tight text-secondary").get("href").split("/", 1)[1] 132 | tempContent["image"] = items.find("a").find("figure").find("img").get("data-src") 133 | tempContent["title"] = items.find("div", class_="px-1").find("a", class_="mt-1.5 leading-tight text-secondary").find("div", class_="line-clamp-2 text-sm font-bold").get_text() 134 | self.results["results"].append(tempContent) 135 | 136 | return self.results 137 | except Exception as e: 138 | self.results["results"] = e 139 | return self.results -------------------------------------------------------------------------------- /src/mangaworld.py: -------------------------------------------------------------------------------- 1 | from bs4 import BeautifulSoup 2 | import requests 3 | 4 | class Mangaworld: 5 | def __init__(self) -> None: 6 | self.parent_url = "https://www.mangaworld.ac" 7 | self.proxy_url = "https://sup-proxy.zephex0-f6c.workers.dev/api-text?url=" 8 | self.results = { 9 | "status": None, 10 | "results": [] 11 | } 12 | 13 | def search(self, query:str): 14 | try: 15 | url = f"{self.proxy_url}{self.parent_url}/archive?keyword={query}" 16 | response = requests.get(url) 17 | self.results["status"] = response.status_code 18 | soup = BeautifulSoup(response.content, "html.parser") 19 | 20 | cardSelector = soup.select("body > div.container > div > div > div.comics-grid > div.entry") 21 | 22 | for card in cardSelector: 23 | tempContent = {} 24 | tempContent["title"] = card.find("div", class_="content").find("p", class_="name").get_text() 25 | tempContent["id"] =card.find("a", class_="thumb position-relative").get("href").split("/", 3)[3] 26 | tempContent["image"] = card.find("a", class_="thumb position-relative").find("img").get("src") 27 | tempContent["type"] = card.find("div", class_="content").find("div", class_="genre").find("a").get_text() 28 | tempContent["author"] = card.find("div", class_="content").find("div", class_="author").find("a").get_text() 29 | tempContent["status"] = card.find("div", class_="content").find("div", class_="status").find("a").get_text() 30 | tempContent["artist"] = card.find("div", class_="content").find("div", class_="artist").find("a").get_text() 31 | tempContent["genres"] = ", ".join(i.get_text() for i in card.find("div", class_="content").find("div", class_="genres").find_all("a")) 32 | 33 | self.results["results"].append(tempContent) 34 | 35 | return self.results 36 | except Exception as e: 37 | self.results["results"] = e 38 | return self.results 39 | 40 | def info(self, id:str): 41 | try: 42 | url = f"{self.proxy_url}{self.parent_url}/{id}" 43 | response = requests.get(url) 44 | self.results["status"] = response.status_code 45 | soup = BeautifulSoup(response.content, "html.parser") 46 | 47 | content = {} 48 | infoPaneSelector = soup.select_one("#manga-page > div > div > div.col-sm-12.col-md-8.col-xl-9 > div > div:nth-child(1) > div.has-shadow.comic-info.d-block.d-sm-flex > div.info") 49 | content["title"] = infoPaneSelector.find("h1", class_="name bigger").get_text() 50 | content["alt-titles"] = infoPaneSelector.find("div", class_="meta-data").find("div", class_="col-12").get_text().split(": ", 1)[1].strip() 51 | content["image"] = soup.select_one("#manga-page > div > div > div.col-sm-12.col-md-8.col-xl-9 > div > div:nth-child(1) > div.has-shadow.comic-info.d-block.d-sm-flex > div.thumb.mb-3.text-center > img").get("src") 52 | content["type"] = infoPaneSelector.find("div", class_="meta-data").find_all("div", class_="col-12 col-md-6")[2].find("a").get_text() 53 | content["description"] = soup.select_one("#noidungm").get_text() 54 | content["status"] = infoPaneSelector.find("div", class_="meta-data").find_all("div", class_="col-12 col-md-6")[3].find("a").get_text() 55 | content["author"] = infoPaneSelector.find("div", class_="meta-data").find("div", class_="col-12 col-md-6").find("a").get_text() 56 | content["artist"] = infoPaneSelector.find("div", class_="meta-data").find_all("div", class_="col-12 col-md-6")[1].find("a").get_text() 57 | content["genres"] = ", ".join(i.get_text() for i in infoPaneSelector.find("div", class_="meta-data").find_all("div", class_="col-12")[1].find_all("a")) 58 | 59 | chapterSelector = soup.select("#chapterList > div.chapters-wrapper.py-2.pl-0 > div > div.volume-chapters.pl-2 > div.chapter") 60 | if len(chapterSelector) == 0: 61 | chapterSelector = soup.select("#chapterList > div.chapters-wrapper.py-2.pl-0 > div") 62 | chapter = [] 63 | 64 | for item in chapterSelector: 65 | tempChapter = {} 66 | tempChapter["id"] = item.find("a", class_="chap").get("href").split("/", 3)[3] 67 | tempChapter["title"] = item.find("a", class_="chap").get("title") 68 | chapter.append(tempChapter) 69 | content["chapters"] = chapter[::-1] 70 | 71 | self.results["results"] = content 72 | return self.results 73 | 74 | except Exception as e: 75 | self.results["results"] = e 76 | return self.results 77 | 78 | def pages(self, id:str): 79 | try: 80 | url = f"{self.proxy_url}{self.parent_url}/{id}" 81 | response = requests.get(url) 82 | self.results["status"] = response.status_code 83 | soup = BeautifulSoup(response.content, "html.parser") 84 | 85 | imagesSelector = soup.select("#page > img") 86 | for i in imagesSelector: 87 | self.results["results"].append(i.get("src")) 88 | return self.results 89 | except Exception as e: 90 | self.results["results"] = e 91 | return self.results 92 | 93 | def trending(self): 94 | try: 95 | url = f"{self.proxy_url}{self.parent_url}" 96 | response = requests.get(url) 97 | self.results["status"] = response.status_code 98 | soup = BeautifulSoup(response.content, "html.parser") 99 | 100 | trendingCardsSelector = soup.select("#popular > div.row > div.col-12 > div.comics-flex > div.vertical") 101 | 102 | for card in trendingCardsSelector: 103 | tempContent = {} 104 | tempContent["title"] = card.find("a", class_="thumb").get("title") 105 | tempContent["id"] = card.find("a", class_="thumb").get("href").split("/", 3)[3] 106 | tempContent["image"] = card.find("a", class_="thumb").find("img").get("src") 107 | tempContent["chapterReleased"] = card.find("a", class_="thumb").find("div", class_="chapter").get_text() 108 | self.results["results"].append(tempContent) 109 | 110 | return self.results 111 | except Exception as e: 112 | self.results["results"] = e 113 | return self.results 114 | 115 | def popular(self, page:str = "1"): 116 | try: 117 | url = f"{self.proxy_url}{self.parent_url}/?page={page}" 118 | response = requests.get(url) 119 | self.results["status"] = response.status_code 120 | soup = BeautifulSoup(response.content, "html.parser") 121 | 122 | cardsSelector = soup.select("body > div.container > div > div.col-sm-12.col-md-8.col-xl-9 > div.comics-grid > div") 123 | 124 | for card in cardsSelector: 125 | tempContent = {} 126 | tempContent["title"] = card.find("a", class_="thumb").get("title") 127 | tempContent["id"] = card.find("a", class_="thumb").get("href").split("/", 3)[3] 128 | tempContent["image"] = card.find("a", class_="thumb").find("img").get("src") 129 | tempContent["type"] = card.find("div", class_="content").find("div", class_="genre").find("a").get_text() 130 | tempContent["status"] = card.find("div", class_="content").find("div", class_="status").find("a").get_text() 131 | 132 | 133 | self.results["results"].append(tempContent) 134 | 135 | return self.results 136 | except Exception as e: 137 | self.results["results"] = e 138 | return self.results -------------------------------------------------------------------------------- /src/asurascans.py: -------------------------------------------------------------------------------- 1 | from bs4 import BeautifulSoup 2 | import requests 3 | 4 | class Asurascans: 5 | def __init__(self) -> None: 6 | self.proxy_url = "https://sup-proxy.zephex0-f6c.workers.dev/api-text?url=" 7 | self.parent_url = "https://asurascans.io" 8 | self.results = { 9 | "status": "", 10 | "results": [] 11 | } 12 | 13 | def search(self, query:str): 14 | try: 15 | url = f"{self.proxy_url}{self.parent_url}/?s={query}" 16 | response = requests.get(url) 17 | self.results["status"] = response.status_code 18 | soup = BeautifulSoup(response.content, "html.parser") 19 | 20 | cards = soup.select("#content > div > div.postbody > div > div.listupd > div > div.bsx") 21 | content = [] 22 | 23 | for items in cards: 24 | tempContent = {} 25 | tempContent["title"] = items.find("a").get("title") 26 | tempContent["id"] = items.find("a").get("href").rsplit("/", 2)[-2] 27 | tempContent["image"] = items.find("img", class_="ts-post-image wp-post-image attachment-medium size-medium").get("src") 28 | tempContent["chapters"] = items.find("div", class_="epxs").get_text() 29 | content.append(tempContent) 30 | 31 | self.results["results"].append(content) 32 | return self.results 33 | except Exception as e: 34 | self.results["results"] = e 35 | return self.results 36 | 37 | def info(self, id:str): 38 | try: 39 | url = f"{self.proxy_url}{self.parent_url}/manga/{id}" 40 | response = requests.get(url) 41 | self.results["status"] = response.status_code 42 | soup = BeautifulSoup(response.content, "html.parser") 43 | 44 | content = {} 45 | content["images"] = soup.select_one("div.seriestucon > div.seriestucontent > div.seriestucontl > div.thumb > img").get("data-src") 46 | content["description"] = soup.select_one("div.seriestucon > div.seriestucontent > div.seriestucontentr > div.seriestuhead > div.entry-content.entry-content-single > p").get_text() 47 | 48 | infoSelector = soup.select_one("div.seriestucon > div.seriestucontent > div.seriestucontentr > div.seriestucont > div > table > tbody") 49 | content["status"] = infoSelector.select_one("tr:nth-child(1) > td:nth-child(2)").get_text() 50 | content["type"] = soup.select_one("tr:nth-child(2) > td:nth-child(2)").get_text() 51 | content["year"] = soup.select_one("tr:nth-child(3) > td:nth-child(2)").get_text() 52 | content["author"] = soup.select_one("tr:nth-child(4) > td:nth-child(2)").get_text().split(",") 53 | content["artists"] = soup.select_one("tr:nth-child(5) > td:nth-child(2)").get_text().split(",") 54 | content["serialization"] = soup.select_one("tr:nth-child(6) > td:nth-child(2)").get_text().split(",") 55 | 56 | genresSelector = soup.select("div.seriestucon > div.seriestucontent > div.seriestucontentr > div.seriestucont > div > div > a") 57 | content["genres"] = ", ".join(i.get_text() for i in genresSelector) 58 | 59 | chapterSelector = soup.select("#chapterlist > ul > li > div > div") 60 | chapters = [] 61 | for items in chapterSelector: 62 | tempChapter = {} 63 | tempChapter["title"] = items.find("span", class_="chapternum").get_text() 64 | tempChapter["date"] = items.find("span", class_="chapterdate").get_text() 65 | tempChapter["id"] = items.find("a").get("href").rsplit("/", 2)[-2] 66 | chapters.append(tempChapter) 67 | content["chapters"] = chapters 68 | 69 | self.results["results"].append(content) 70 | return self.results 71 | except Exception as e: 72 | self.results["results"] = e 73 | return self.results 74 | 75 | def pages(self, id:str): 76 | try: 77 | url = f"{self.proxy_url}{self.parent_url}/{id}" 78 | response = requests.get(url) 79 | self.results["status"] = response.status_code 80 | soup = BeautifulSoup(response.content, "html.parser") 81 | 82 | imgSelector = soup.select("#readerarea > p > img") 83 | self.results["results"] = [i.get("data-src") for i in imgSelector] 84 | return self.results 85 | 86 | except Exception as e: 87 | self.results["results"] = e 88 | return self.results 89 | 90 | def popular(self): 91 | try: 92 | url = f"{self.proxy_url}{self.parent_url}" 93 | response = requests.get(url) 94 | self.results["status"] = response.status_code 95 | soup = BeautifulSoup(response.content, "html.parser") 96 | 97 | cards = soup.select("#content > div > div.hotslid > div > div.listupd.popularslider > div > div > div.bsx") 98 | content = [] 99 | 100 | for items in cards: 101 | tempContent = {} 102 | tempContent["title"] = items.find("a").get("title") 103 | tempContent["id"] = items.find("a").get("href").rsplit("/", 2)[-2] 104 | tempContent["image"] = items.find("img", class_="ts-post-image wp-post-image attachment-medium size-medium").get("data-src") 105 | tempContent["chapters"] = items.find("div", class_="epxs").get_text() 106 | content.append(tempContent) 107 | 108 | self.results["results"].append(content) 109 | return self.results 110 | except Exception as e: 111 | self.results["results"] = e 112 | return self.results 113 | 114 | def latest(self, page:str = "1"): 115 | try: 116 | url = f"{self.proxy_url}{self.parent_url}/manga/?page={page}&order=update" 117 | response = requests.get(url) 118 | self.results["status"] = response.status_code 119 | soup = BeautifulSoup(response.content, "html.parser") 120 | 121 | cards = soup.select("#content > div > div.postbody > div.bixbox.seriesearch > div.mrgn > div.listupd > div > div.bsx") 122 | content = [] 123 | 124 | for items in cards: 125 | tempContent = {} 126 | tempContent["title"] = items.find("a").get("title") 127 | tempContent["id"] = items.find("a").get("href").rsplit("/", 2)[-2] 128 | tempContent["image"] = items.find("img", class_="ts-post-image wp-post-image attachment-medium size-medium").get("data-src") 129 | tempContent["chapters"] = items.find("div", class_="epxs").get_text() 130 | content.append(tempContent) 131 | 132 | self.results["results"].append(content) 133 | return self.results 134 | except Exception as e: 135 | self.results["results"] = e 136 | return self.results 137 | 138 | def genres(self, type:str): 139 | try: 140 | url = f"{self.proxy_url}{self.parent_url}/genres/{type}" 141 | response = requests.get(url) 142 | self.results["status"] = response.status_code 143 | soup = BeautifulSoup(response.content, "html.parser") 144 | 145 | cards = soup.select("#content > div > div > div > div.listupd > div > div.bsx") 146 | content = [] 147 | 148 | for items in cards: 149 | tempContent = {} 150 | tempContent["title"] = items.find("a").get("title") 151 | tempContent["id"] = items.find("a").get("href").rsplit("/", 2)[-2] 152 | tempContent["image"] = items.find("img", class_="ts-post-image wp-post-image attachment-medium size-medium").get("data-src") 153 | tempContent["chapters"] = items.find("div", class_="epxs").get_text() 154 | content.append(tempContent) 155 | 156 | self.results["results"].append(content) 157 | return self.results 158 | except Exception as e: 159 | self.results["results"] = e 160 | return self.results -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI, Response 2 | import requests 3 | 4 | from src.manganato import Manganato 5 | from src.mangareader import Mangareader 6 | from src.mangapill import Mangapill 7 | from src.asurascans import Asurascans 8 | from src.flamescans import Flamescans 9 | from src.mangaworld import Mangaworld 10 | from src.mangapark import Mangapark 11 | from src.scanvf import Scanvf 12 | 13 | app = FastAPI() 14 | 15 | mangareader_genres = ["Action, Adventure, Comedy, Cooking, Doujinshi, Drama, Erotica, Fantasy, Gender Bender, Harem, Historical, Horror, Isekai, Josei, Manhua, Manhwa, Martial arts, Mature, Mecha, Medical, Mystery, One shot, Pornographic, Pschological, Romance, School life, Sci fi, Seinen, Shoujo, Shounen ai, Slice of life, Smut, Sports, Supernatural, Tragedy, Webtoons, Yaoi, Yuri"] 16 | 17 | @app.get("/") 18 | def homepage(): 19 | return ( 20 | { 21 | "message": "Welcome to the manganato scraper" 22 | } 23 | ) 24 | 25 | @app.head("/") 26 | async def read_root_head(): 27 | return Response(headers={"Custom-Header": "Value"}) 28 | 29 | # Manganato 30 | @app.get("/manganato/{category}/{path:path}") 31 | def manganato(category: str, path: str = None): 32 | if category == "search": 33 | if path: 34 | newQuery = path.replace(" ", "_") 35 | return Manganato().search(query=newQuery) 36 | elif category == "info": 37 | if path: 38 | return Manganato().info(id=path) 39 | elif category == "pages": 40 | if path: 41 | return Manganato().pages(id=path) 42 | elif category == "latest": 43 | if path: 44 | return Manganato().latest(page=path) 45 | else: 46 | return Manganato().latest() 47 | elif category == "newest": 48 | if path: 49 | return Manganato().newest(page=path) 50 | else: 51 | return Manganato().newest() 52 | elif category == "hottest": 53 | if path: 54 | return Manganato().hotest(page=path) 55 | else: 56 | return Manganato().hotest() 57 | elif category == "images": 58 | if path: 59 | headers = { 60 | "Referer": "https://chapmanganato.to/" 61 | } 62 | content = requests.get(url=path, headers=headers).content 63 | return Response(content=content, media_type="image/jpg") 64 | else: 65 | return { 66 | "detail": "Invalid parameter" 67 | } 68 | 69 | # Mangareader 70 | @app.get("/mangareader/{category}/{path:path}") 71 | def mangareader(category: str, path: str): 72 | if category == "search": 73 | return Mangareader().search(query=path) 74 | elif category == "info": 75 | return Mangareader().info(id=path) 76 | elif category == "pages": 77 | return Mangareader().pages(id=path) 78 | elif category == "genre-list": 79 | return { 80 | "endpoint": "mangareader", 81 | "genres": mangareader_genres 82 | } 83 | elif category == "latest": 84 | return Mangareader().latest(genre=path) 85 | else: 86 | return { 87 | "detail": "Invalid parameter" 88 | } 89 | 90 | # Mangapill 91 | @app.get("/mangapill/{category}/{path:path}") 92 | def mangapill(category:str, path:str): 93 | if category == "search": 94 | return Mangapill().search(query=path) 95 | elif category == "info": 96 | return Mangapill().info(id=path) 97 | elif category == "pages": 98 | return Mangapill().pages(id=path) 99 | elif category == "newest": 100 | return Mangapill().new() 101 | elif category == "recent": 102 | return Mangapill().recent() 103 | elif category == "images": 104 | if path: 105 | headers = { 106 | "Referer": "https://mangapill.com/" 107 | } 108 | content = requests.get(url=path, headers=headers).content 109 | return Response(content=content, media_type="image/jpg") 110 | else: 111 | return { 112 | "detail": "image url is required" 113 | } 114 | else: 115 | return { 116 | "detail": "Invalid parameter" 117 | } 118 | 119 | # Asurascans 120 | @app.get("/asurascans/{category}/{path:path}") 121 | def asurascans(category:str, path:str): 122 | if category == "search": 123 | if path: 124 | newQuery = path.replace(" ", "+") 125 | return Asurascans().search(query=newQuery) 126 | elif category == "info": 127 | return Asurascans().info(id=path) 128 | elif category == "pages": 129 | return Asurascans().pages(id=path) 130 | elif category == "popular": 131 | return Asurascans().popular() 132 | elif category == "latest": 133 | return Asurascans().latest(page=path) 134 | elif category == "genres": 135 | return Asurascans().genres(type=path) 136 | elif category == "genre-list": 137 | return { 138 | "endpoint": "asurascans", 139 | "genres": "action, adventure, comedy, romance" 140 | } 141 | else: 142 | return { 143 | "detail": "Invalid parameter" 144 | } 145 | 146 | # Flamescans 147 | @app.get("/flamescans/{category}/{path:path}") 148 | def flamescans(category:str, path:str): 149 | if category == "search": 150 | return Flamescans().search(query=path) 151 | elif category == "info": 152 | return Flamescans().info(id=path) 153 | elif category == "pages": 154 | return Flamescans().pages(id=path) 155 | elif category == "sort": 156 | return Flamescans().sort(type=path) 157 | # accepts: title, titlereverse, update, popular, added 158 | else: 159 | return { 160 | "detail": "Invalid parameter" 161 | } 162 | 163 | @app.get("/mangaworld/{category}/{path:path}") 164 | def mangaworld(category:str, path:str): 165 | if category == "search": 166 | return Mangaworld().search(query=path) 167 | elif category == "info": 168 | return Mangaworld().info(id=path) 169 | elif category == "pages": 170 | return Mangaworld().pages(id=path) 171 | elif category == "trending": 172 | return Mangaworld().trending() 173 | elif category == "popular": 174 | return Mangaworld().popular(page=path) 175 | else: 176 | return { 177 | "detail": "Invalid parameter" 178 | } 179 | 180 | @app.get("/mangapark/{category}/{path:path}") 181 | def mangapark(category:str, path:str): 182 | if category == "search": 183 | return Mangapark().search(query=path) 184 | elif category == "info": 185 | return Mangapark().info(id=path) 186 | elif category == "pages": 187 | return Mangapark().pages(id=path) 188 | elif category == "latest": 189 | return Mangapark().latest(page=path) 190 | else: 191 | return { 192 | "detail": "Invalid parameter" 193 | } 194 | 195 | @app.get("/scanvf/{category}/{path:path}") 196 | def scanvf(category:str, path:str): 197 | if category == "search": 198 | return Scanvf().search(query=path) 199 | elif category == "info": 200 | return Scanvf().info(id=path) 201 | elif category == "pages": 202 | return Scanvf().pages(id=path) 203 | else: 204 | return { 205 | "detail": "Invalid parameter" 206 | } 207 | -------------------------------------------------------------------------------- /src/manganato.py: -------------------------------------------------------------------------------- 1 | from bs4 import BeautifulSoup 2 | import requests 3 | 4 | class Manganato: 5 | def __init__(self) -> None: 6 | self.proxy_url = "https://sup-proxy.zephex0-f6c.workers.dev/api-text?url=" 7 | self.parent_url = "https://manganato.com" 8 | self.chapter_url = "https://chapmanganato.to" 9 | self.results = { 10 | "status": "", 11 | "results": [] 12 | } 13 | 14 | def search(self, query): 15 | try: 16 | url = f"{self.proxy_url}{self.parent_url}/search/story/{query}" 17 | response = requests.get(url) 18 | self.results["status"] = response.status_code 19 | soup = BeautifulSoup(response.content, "html.parser") 20 | 21 | cards = soup.select("body > div.body-site > div.container.container-main > div.container-main-left > div.panel-search-story > div") 22 | 23 | for items in cards: 24 | tempContent = {} 25 | tempContent["id"] = items.find("a", class_="item-img").get("href").rsplit("/", 1)[1] 26 | tempContent["title"] = items.find("div", class_="item-right").find("h3").find("a", class_="a-h text-nowrap item-title").get_text() 27 | tempContent["image"] = items.find("img", class_="img-loading").get("src") 28 | tempContent["author"] = items.find("span", class_="item-author").get("title") 29 | tempContent["heading"] = items.find("a")["title"] 30 | tempContent["updated"] = items.find("span", class_="item-time").get_text().split(":", 1)[1].strip().split(" - ") 31 | self.results["results"].append(tempContent) 32 | 33 | return self.results 34 | except Exception as e: 35 | self.results["results"] = e 36 | return self.results 37 | 38 | def info(self, id): 39 | try: 40 | url = f"{self.proxy_url}{self.chapter_url}/{id}" 41 | response = requests.get(url) 42 | self.results["status"] = response.status_code 43 | soup = BeautifulSoup(response.content, "html.parser") 44 | 45 | tempContent = {} 46 | tempContent["image"] = soup.select_one("body > div.body-site > div.container.container-main > div.container-main-left > div.panel-story-info > div.story-info-left > span.info-image > img").get("src") 47 | infoPanel = soup.select_one("body > div.body-site > div.container.container-main > div.container-main-left > div.panel-story-info > div.story-info-right") 48 | tempContent["title"] = infoPanel.find("h1").get_text() 49 | tempContent["author"] = infoPanel.find("a", class_="a-h").get_text() 50 | tempContent["status"] = soup.select_one("body > div.body-site > div.container.container-main > div.container-main-left > div.panel-story-info > div.story-info-right > table > tbody > tr:nth-child(3) > td.table-value").get_text() 51 | genres = soup.select_one("body > div.body-site > div.container.container-main > div.container-main-left > div.panel-story-info > div.story-info-right > table > tbody > tr:nth-child(4) > td.table-value").find_all("a", class_="a-h") 52 | tempContent["genres"] = ", ".join(i.get_text() for i in genres) 53 | tempContent["description"] = soup.select_one("#panel-story-info-description").get_text().strip().removeprefix("Description :\r\n ") 54 | 55 | chapters = soup.select("body > div.body-site > div.container.container-main > div.container-main-left > div.panel-story-chapter-list > ul > li") 56 | chapDic = [] 57 | for items in chapters: 58 | tempChap = {} 59 | tempChap["title"] = items.find("a", class_="chapter-name").get_text() 60 | tempChap["id"] = items.find("a", class_="chapter-name").get("href").split("https://chapmanganato.to/")[1] 61 | chapDic.append(tempChap) 62 | 63 | tempContent["chapters"] = chapDic[::-1] 64 | 65 | self.results["results"] = tempContent 66 | return self.results 67 | except Exception as e: 68 | self.results["results"] = e 69 | return self.results 70 | 71 | def pages(self, id): 72 | try: 73 | url = f"{self.proxy_url}{self.chapter_url}/{id}" 74 | response = requests.get(url) 75 | self.results["status"] = response.status_code 76 | soup = BeautifulSoup(response.content, "html.parser") 77 | 78 | images_selector = soup.select("body > div.body-site > div.container-chapter-reader > img") 79 | images_url = [i.get("src") for i in images_selector] 80 | 81 | self.results["results"] = images_url 82 | return self.results 83 | except Exception as e: 84 | self.results["results"] = e 85 | return self.results 86 | 87 | def latest(self, page: str = 1): 88 | try: 89 | url = f"{self.proxy_url}{self.parent_url}/genre-all/{page}" 90 | response = requests.get(url) 91 | self.results["status"] = response.status_code 92 | soup = BeautifulSoup(response.content, "html.parser") 93 | 94 | cards = soup.select("body > div.body-site > div.container.container-main > div.panel-content-genres > div") 95 | 96 | for items in cards: 97 | tempContent = {} 98 | tempContent["img"] = items.find("img", class_="img-loading").get("src") 99 | tempContent["title"] = items.find("div", class_="genres-item-info").find("h3").find("a", class_="genres-item-name").get_text() 100 | tempContent["id"] = items.find("div", class_="genres-item-info").find("h3").find("a", class_="genres-item-name").get("href").rsplit("/", 1)[1] 101 | infoSelector = items.select_one("body > div.body-site > div.container.container-main > div.panel-content-genres > div > div > p") 102 | tempContent["date"] = infoSelector.find("span", class_="genres-item-time").get_text() 103 | tempContent["author"] = infoSelector.find("span", class_="genres-item-author").get_text() 104 | tempContent["description"] = items.select_one("body > div.body-site > div.container.container-main > div.panel-content-genres > div > div > div").get_text().strip() 105 | self.results["results"].append(tempContent) 106 | return self.results 107 | except Exception as e: 108 | self.results["results"] = e 109 | return self.results 110 | 111 | def newest(self, page: str = 1): 112 | try: 113 | url = f"{self.proxy_url}{self.parent_url}/genre-all/{page}?type=newest" 114 | response = requests.get(url) 115 | self.results["status"] = response.status_code 116 | soup = BeautifulSoup(response.content, "html.parser") 117 | 118 | cards = soup.select("body > div.body-site > div.container.container-main > div.panel-content-genres > div") 119 | 120 | for items in cards: 121 | tempContent = {} 122 | tempContent["img"] = items.find("img", class_="img-loading").get("src") 123 | tempContent["title"] = items.find("div", class_="genres-item-info").find("h3").find("a", class_="genres-item-name").get_text() 124 | tempContent["id"] = items.find("div", class_="genres-item-info").find("h3").find("a", class_="genres-item-name").get("href").rsplit("/", 1)[1] 125 | infoSelector = items.select_one("body > div.body-site > div.container.container-main > div.panel-content-genres > div > div > p") 126 | tempContent["date"] = infoSelector.find("span", class_="genres-item-time").get_text() 127 | tempContent["author"] = infoSelector.find("span", class_="genres-item-author").get_text() 128 | tempContent["description"] = items.select_one("body > div.body-site > div.container.container-main > div.panel-content-genres > div > div > div").get_text().strip() 129 | self.results["results"].append(tempContent) 130 | 131 | return self.results 132 | except Exception as e: 133 | self.results["results"] = e 134 | return self.results 135 | 136 | def hotest(self, page:str = 1): 137 | try: 138 | url = f"{self.proxy_url}{self.parent_url}/genre-all/{page}?type=topview" 139 | response = requests.get(url) 140 | self.results["status"] = response.status_code 141 | soup = BeautifulSoup(response.content, "html.parser") 142 | 143 | cards = soup.select("body > div.body-site > div.container.container-main > div.panel-content-genres > div") 144 | 145 | for items in cards: 146 | tempContent = {} 147 | tempContent["img"] = items.find("img", class_="img-loading").get("src") 148 | tempContent["title"] = items.find("div", class_="genres-item-info").find("h3").find("a", class_="genres-item-name").get_text() 149 | tempContent["id"] = items.find("div", class_="genres-item-info").find("h3").find("a", class_="genres-item-name").get("href").rsplit("/", 1)[1] 150 | infoSelector = items.select_one("body > div.body-site > div.container.container-main > div.panel-content-genres > div > div > p") 151 | tempContent["date"] = infoSelector.find("span", class_="genres-item-time").get_text() 152 | tempContent["author"] = infoSelector.find("span", class_="genres-item-author").get_text() 153 | tempContent["description"] = items.select_one("body > div.body-site > div.container.container-main > div.panel-content-genres > div > div > div").get_text().strip() 154 | self.results["results"].append(tempContent) 155 | 156 | return self.results 157 | except Exception as e: 158 | self.results["results"] = e 159 | return self.results 160 | 161 | 162 | print(Manganato().search("solo leveling")) --------------------------------------------------------------------------------