├── runtime.txt
├── Procfile
├── requirements.txt
├── anibot
├── __main__.py
├── utils
│ ├── db.py
│ ├── google_trans_new.py
│ ├── helper.py
│ └── data_parser.py
├── plugins
│ ├── jikan.py
│ ├── animequotes.py
│ ├── animefillerslist.py
│ ├── watch.py
│ ├── tracemoepy.py
│ ├── livechartme.py
│ └── bot.py
└── __init__.py
├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ └── feature_request.md
└── workflows
│ └── codeql-analysis.yml
├── LICENSE
├── app.json
├── .gitignore
└── README.md
/runtime.txt:
--------------------------------------------------------------------------------
1 | python-3.10.1
--------------------------------------------------------------------------------
/Procfile:
--------------------------------------------------------------------------------
1 | worker: python3 -m anibot
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | pyrogram
2 | tgcrypto
3 | requests
4 | aiofiles>=0.6.0
5 | aiohttp[speedups]>=3.7.3
6 | tracemoepy
7 | lottie
8 | bs4
9 | motor
10 | dnspython
11 | natsort
12 | apscheduler
13 | lxml
14 | html5lib
--------------------------------------------------------------------------------
/anibot/__main__.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from pyrogram import idle
3 | from . import anibot, has_user, session
4 | from .utils.db import _close_db
5 |
6 | user = None
7 | if has_user:
8 | from . import user
9 |
10 | async def main():
11 | await anibot.start()
12 | if user is not None:
13 | await user.start()
14 | await idle()
15 | await anibot.stop()
16 | if user is not None:
17 | await user.stop()
18 | _close_db()
19 | await session.close()
20 |
21 |
22 | asyncio.get_event_loop().run_until_complete(main())
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. Go to '...'
16 | 2. Click on '....'
17 | 3. Scroll down to '....'
18 | 4. See error
19 |
20 | **Expected behavior**
21 | A clear and concise description of what you expected to happen.
22 |
23 | **Screenshots**
24 | If applicable, add screenshots to help explain your problem.
25 |
26 | **Additional context**
27 | Add any other context about the problem here.
28 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/anibot/utils/db.py:
--------------------------------------------------------------------------------
1 | # below code is taken from USERGE-X repo
2 | # all credits to the respective author (dunno who wrote it will find later
3 | # n update)
4 |
5 |
6 | __all__ = ['get_collection']
7 |
8 | import asyncio
9 | from motor.motor_asyncio import AsyncIOMotorClient
10 | from motor.core import AgnosticClient, AgnosticDatabase, AgnosticCollection
11 | from .. import DB_URL
12 |
13 | print("Connecting to Database ...")
14 |
15 | _MGCLIENT: AgnosticClient = AsyncIOMotorClient(DB_URL)
16 | _RUN = asyncio.get_event_loop().run_until_complete
17 |
18 | if "anibot" in _RUN(_MGCLIENT.list_database_names()):
19 | print("anibot Database Found :) => Now Logging to it...")
20 | else:
21 | print("anibot Database Not Found :( => Creating New Database...")
22 |
23 | _DATABASE: AgnosticDatabase = _MGCLIENT["anibot"]
24 |
25 |
26 | def get_collection(name: str) -> AgnosticCollection:
27 | """ Create or Get Collection from your database """
28 | return _DATABASE[name]
29 |
30 |
31 | def _close_db() -> None:
32 | _MGCLIENT.close()
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Lucky Jain
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/anibot/plugins/jikan.py:
--------------------------------------------------------------------------------
1 | # uses jikanpy (Jikan API)
2 | from pyrogram import filters, Client
3 | from pyrogram.types import Message, CallbackQuery
4 | from .. import BOT_NAME, TRIGGERS as trg, anibot
5 | from ..utils.data_parser import get_scheduled
6 | from ..utils.helper import (
7 | control_user,
8 | get_btns,
9 | check_user,
10 | get_user_from_channel as gcc
11 | )
12 | from ..utils.db import get_collection
13 |
14 | DC = get_collection('DISABLED_CMDS')
15 |
16 |
17 | @anibot.on_message(
18 | filters.command(["schedule", f"schedule{BOT_NAME}"], prefixes=trg)
19 | )
20 | @control_user
21 | async def get_schuled(client: Client, message: Message, mdata: dict):
22 | """Get List of Scheduled Anime"""
23 | gid = mdata['chat']['id']
24 | find_gc = await DC.find_one({'_id': gid})
25 | if find_gc is not None and 'schedule' in find_gc['cmd_list'].split():
26 | return
27 | x = await client.send_message(
28 | gid, "Fetching Scheduled Animes"
29 | )
30 | try:
31 | user = mdata['from_user']['id']
32 | except KeyError:
33 | user = mdata['sender_chat']['id']
34 | msg = await get_scheduled()
35 | buttons = get_btns("SCHEDULED", result=[msg[1]], user=user)
36 | await x.edit_text(msg[0], reply_markup=buttons)
37 |
38 |
39 | @anibot.on_callback_query(filters.regex(pattern=r"sched_(.*)"))
40 | @check_user
41 | async def ns_(client: anibot, cq: CallbackQuery, cdata: dict):
42 | kek, day, user = cdata['data'].split("_")
43 | msg = await get_scheduled(int(day))
44 | buttons = get_btns("SCHEDULED", result=[int(day)], user=user)
45 | await cq.edit_message_text(msg[0], reply_markup=buttons)
46 |
47 |
48 | @anibot.on_edited_message(
49 | filters.command(["schedule", f"schedule{BOT_NAME}"], prefixes=trg)
50 | )
51 | async def get_schuled_edit(client: Client, message: Message):
52 | await get_schuled(client, message)
--------------------------------------------------------------------------------
/anibot/plugins/animequotes.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from pyrogram import filters
3 | from pyrogram.types import (
4 | Message,
5 | CallbackQuery,
6 | InlineKeyboardMarkup as IKM,
7 | InlineKeyboardButton as IKB
8 | )
9 | from .. import BOT_NAME, TRIGGERS as trg, anibot
10 | from ..utils.helper import control_user, check_user
11 | from ..utils.db import get_collection
12 |
13 | DC = get_collection('DISABLED_CMDS')
14 |
15 | @anibot.on_message(
16 | filters.command(
17 | ["quote", f"quote{BOT_NAME}"],
18 | prefixes=trg
19 | )
20 | )
21 | @control_user
22 | async def quote(_, message: Message, mdata: dict):
23 | gid = mdata['chat']['id']
24 | try:
25 | user = mdata['from_user']['id']
26 | except KeyError:
27 | user = mdata['sender_chat']['id']
28 | find_gc = await DC.find_one({'_id': gid})
29 | if find_gc is not None and 'quote' in find_gc['cmd_list'].split():
30 | return
31 | q = requests.get("https://animechan.vercel.app/api/random").json()
32 | btn = IKM([[IKB("Refresh", callback_data=f"quoteref_{user}")]])
33 | await message.reply_text(
34 | '`'+q['quote']+'`\n\n— **'+q['character']
35 | +'** (From __'+q['anime']+'__)',
36 | reply_markup=btn
37 | )
38 |
39 |
40 | @anibot.on_callback_query(filters.regex(pattern=r"quoteref_(.*)"))
41 | @check_user
42 | async def quote_btn(client: anibot, cq: CallbackQuery, cdata: dict):
43 | kek, user = cdata['data'].split("_")
44 | await cq.answer()
45 | q = requests.get("https://animechan.vercel.app/api/random").json()
46 | btn = IKM([[IKB("Refresh", callback_data=f"quoteref_{user}")]])
47 | await cq.edit_message_text(
48 | '`'+q['quote']+'`\n\n— **'+q['character']
49 | +'** (From __'+q['anime']+'__)',
50 | reply_markup=btn
51 | )
52 |
53 |
54 | @anibot.on_message(
55 | filters.command(
56 | ["quote", f"quote{BOT_NAME}"],
57 | prefixes=trg
58 | )
59 | )
60 | async def quote_edit(_, message: Message):
61 | await quote(_, message)
62 |
--------------------------------------------------------------------------------
/app.json:
--------------------------------------------------------------------------------
1 | {
2 | "name":"Ani-Info",
3 | "description":"telegram anime info provider bot",
4 | "keywords":[
5 | "Pyrogram",
6 | "Telegram",
7 | "Bot",
8 | "Anime"
9 | ],
10 | "repository":"https://github.com/lostb053/anibot",
11 | "website":"https://github.com/lostb053/anibot",
12 | "success_url":"https://t.me/lostb053",
13 | "env":{
14 | "API_ID":{
15 | "description":"Get this value from https://my.telegram.org"
16 | },
17 | "API_HASH":{
18 | "description":"Get this value from https://my.telegram.org"
19 | },
20 | "DATABASE_URL":{
21 | "description":"Mongodb url from https://cloud.mongodb.com/, guide: https://del.dog/mongodb_guide"
22 | },
23 | "LOG_CHANNEL_ID":{
24 | "description":"[ Private Telegram Log Channel ID ], Note: Also add your Bot to LOG CHANNEL !"
25 | },
26 | "BOT_TOKEN":{
27 | "description":"Get this from https://t.me/botfather and enable Inline Mode"
28 | },
29 | "BOT_NAME":{
30 | "description":"Your bot name with @, like @hanabi_robot, Note: enter exact bot name with exact letters that are in capital"
31 | },
32 | "OWNER_ID":{
33 | "description":"Your user_id e.g 123456789, for multiple ids just add a space between them"
34 | },
35 | "ANILIST_CLIENT": {
36 | "description":"Get from https://anilist.co/settings/developer"
37 | },
38 | "ANILIST_SECRET": {
39 | "description":"Get from https://anilist.co/settings/developer"
40 | },
41 | "ANILIST_REDIRECT_URL": {
42 | "description":"If you don't wish to change auth method, just leave it that way",
43 | "required":false
44 | },
45 | "TRIGGERS":{
46 | "description":"Custom triggers, for multiple, just put a space between like '/ ?'. / and ! are defaults",
47 | "required":false
48 | },
49 | "PREFERRED_LANGUAGE":{
50 | "description":"Sets custom description language, let's say you want all descriptions in Malay. Check and add appropriate language code from http://telegra.ph/Supported-Languages-01-05-2",
51 | "required":false
52 | }
53 | },
54 | "buildpacks":[
55 | {
56 | "url":"https://github.com/jonathanong/heroku-buildpack-ffmpeg-latest.git"
57 | },
58 | {
59 | "url":"heroku/python"
60 | }
61 | ],
62 | "formation":{
63 | "worker":{
64 | "quantity":1,
65 | "size":"free"
66 | }
67 | }
68 | }
69 |
--------------------------------------------------------------------------------
/.github/workflows/codeql-analysis.yml:
--------------------------------------------------------------------------------
1 | # For most projects, this workflow file will not need changing; you simply need
2 | # to commit it to your repository.
3 | #
4 | # You may wish to alter this file to override the set of languages analyzed,
5 | # or to provide custom queries or build logic.
6 | #
7 | # ******** NOTE ********
8 | # We have attempted to detect the languages in your repository. Please check
9 | # the `language` matrix defined below to confirm you have the correct set of
10 | # supported CodeQL languages.
11 | #
12 | name: "CodeQL"
13 |
14 | on:
15 | push:
16 | branches: [ main ]
17 | pull_request:
18 | # The branches below must be a subset of the branches above
19 | branches: [ main ]
20 | schedule:
21 | - cron: '39 13 * * 6'
22 |
23 | jobs:
24 | analyze:
25 | name: Analyze
26 | runs-on: ubuntu-latest
27 | permissions:
28 | actions: read
29 | contents: read
30 | security-events: write
31 |
32 | strategy:
33 | fail-fast: false
34 | matrix:
35 | language: [ 'python' ]
36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
37 | # Learn more:
38 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
39 |
40 | steps:
41 | - name: Checkout repository
42 | uses: actions/checkout@v2
43 |
44 | # Initializes the CodeQL tools for scanning.
45 | - name: Initialize CodeQL
46 | uses: github/codeql-action/init@v1
47 | with:
48 | languages: ${{ matrix.language }}
49 | # If you wish to specify custom queries, you can do so here or in a config file.
50 | # By default, queries listed here will override any specified in a config file.
51 | # Prefix the list here with "+" to use these queries and those in the config file.
52 | # queries: ./path/to/local/query, your-org/your-repo/queries@main
53 |
54 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
55 | # If this step fails, then you should remove it and run the build manually (see below)
56 | - name: Autobuild
57 | uses: github/codeql-action/autobuild@v1
58 |
59 | # ℹ️ Command-line programs to run using the OS shell.
60 | # 📚 https://git.io/JvXDl
61 |
62 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
63 | # and modify them (or add more) to build your code if your project
64 | # uses a compiled language
65 |
66 | #- run: |
67 | # make bootstrap
68 | # make release
69 |
70 | - name: Perform CodeQL Analysis
71 | uses: github/codeql-action/analyze@v1
72 |
--------------------------------------------------------------------------------
/anibot/plugins/animefillerslist.py:
--------------------------------------------------------------------------------
1 | from pyrogram import filters
2 | from pyrogram.types import (
3 | InlineKeyboardButton,
4 | InlineKeyboardMarkup,
5 | CallbackQuery,
6 | Message
7 | )
8 | from ..utils.data_parser import search_filler, parse_filler
9 | from ..utils.helper import (
10 | check_user,
11 | control_user,
12 | rand_key,
13 | get_user_from_channel as gcc
14 | )
15 | from ..utils.db import get_collection
16 | from .. import BOT_NAME, TRIGGERS as trg, anibot
17 |
18 | FILLERS = {}
19 | DC = get_collection('DISABLED_CMDS')
20 |
21 |
22 | @anibot.on_message(
23 | filters.command(['fillers', f"fillers{BOT_NAME}"], prefixes=trg)
24 | )
25 | @control_user
26 | async def fillers_cmd(client: anibot, message: Message, mdata: dict):
27 | find_gc = await DC.find_one({'_id': mdata['chat']['id']})
28 | try:
29 | user = mdata['from_user']['id']
30 | except KeyError:
31 | user = mdata['sender_chat']['id']
32 | if find_gc is not None and 'watch' in find_gc['cmd_list'].split():
33 | return
34 | qry = mdata['text'].split(" ", 1)
35 | if len(qry)==1:
36 | return await message.reply_text(
37 | """Give some anime name to search fillers for
38 | example: /fillers Detective Conan"""
39 | )
40 | k = search_filler(qry[1])
41 | if k == {}:
42 | await message.reply_text("No fillers found for the given anime...")
43 | return
44 | button = []
45 | list_ = list(k.keys())
46 | if len(list_)==1:
47 | result = parse_filler(k.get(list_[0]))
48 | msg = ""
49 | msg += f"Fillers for anime `{list_[0]}`\n\nManga Canon episodes:\n"
50 | msg += str(result.get("total_ep"))
51 | msg += "\n\nMixed/Canon fillers:\n"
52 | msg += str(result.get("mixed_ep"))
53 | msg += "\n\nFillers:\n"
54 | msg += str(result.get("filler_ep"))
55 | if result.get("ac_ep") is not None:
56 | msg += "\n\nAnime Canon episodes:\n"
57 | msg += str(result.get("ac_ep"))
58 | await message.reply_text(msg)
59 | return
60 | for i in list_:
61 | fl_js = rand_key()
62 | FILLERS[fl_js] = [k.get(i), i]
63 | button.append(
64 | [InlineKeyboardButton(i, callback_data=f"fill_{fl_js}_{user}")]
65 | )
66 | await message.reply_text(
67 | "Pick anime you want to see fillers list for:",
68 | reply_markup=InlineKeyboardMarkup(button)
69 | )
70 |
71 |
72 | @anibot.on_callback_query(filters.regex(pattern=r"fill_(.*)"))
73 | @check_user
74 | async def filler_btn(client: anibot, cq: CallbackQuery, cdata: dict):
75 | kek, req, user = cdata['data'].split("_")
76 | result = parse_filler((FILLERS.get(req))[0])
77 | msg = ""
78 | msg += f"**Fillers for anime** `{(FILLERS.get(req))[1]}`"
79 | msg += "\n\n**Manga Canon episodes:**\n"
80 | msg += str(result.get("total_ep"))
81 | msg += "\n\n**Mixed/Canon fillers:**\n"
82 | msg += str(result.get("mixed_ep"))
83 | msg += "\n\n**Fillers:**\n"
84 | msg += str(result.get("filler_ep"))
85 | if result.get("ac_ep") is not None:
86 | msg += "\n\n**Anime Canon episodes:**\n"
87 | msg += str(result.get("ac_ep"))
88 | await cq.edit_message_text(msg)
89 |
90 |
91 | @anibot.on_message(
92 | filters.command(['fillers', f"fillers{BOT_NAME}"], prefixes=trg)
93 | )
94 | async def fillers_cmd(client: anibot, message: Message):
95 | await fillers_cmd(client, message)
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/#use-with-ide
110 | .pdm.toml
111 |
112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113 | __pypackages__/
114 |
115 | # Celery stuff
116 | celerybeat-schedule
117 | celerybeat.pid
118 |
119 | # SageMath parsed files
120 | *.sage.py
121 |
122 | # Environments
123 | .env
124 | .venv
125 | env/
126 | venv/
127 | ENV/
128 | env.bak/
129 | venv.bak/
130 |
131 | # Spyder project settings
132 | .spyderproject
133 | .spyproject
134 |
135 | # Rope project settings
136 | .ropeproject
137 |
138 | # mkdocs documentation
139 | /site
140 |
141 | # mypy
142 | .mypy_cache/
143 | .dmypy.json
144 | dmypy.json
145 |
146 | # Pyre type checker
147 | .pyre/
148 |
149 | # pytype static type analyzer
150 | .pytype/
151 |
152 | # Cython debug symbols
153 | cython_debug/
154 |
155 | # PyCharm
156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158 | # and can be added to the global gitignore or merged into this file. For a more nuclear
159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160 | #.idea/
161 |
162 | *.session*
163 | .vscode/
--------------------------------------------------------------------------------
/anibot/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | from pyrogram import Client
3 | from aiohttp import ClientSession
4 |
5 | TRIGGERS = os.environ.get("TRIGGERS", "/ !").split()
6 | API_HASH = os.environ.get("API_HASH")
7 | BOT_TOKEN = os.environ.get("BOT_TOKEN")
8 | BOT_NAME = os.environ.get("BOT_NAME")
9 | DB_URL = os.environ.get("DATABASE_URL")
10 | ANILIST_CLIENT = os.environ.get("ANILIST_CLIENT")
11 | ANILIST_SECRET = os.environ.get("ANILIST_SECRET")
12 | ANILIST_REDIRECT_URL = os.environ.get("ANILIST_REDIRECT_URL", "https://anilist.co/api/v2/oauth/pin")
13 | API_ID = int(os.environ.get("API_ID"))
14 | LOG_CHANNEL_ID = int(os.environ.get("LOG_CHANNEL_ID"))
15 | OWNER = list(filter(lambda x: x, map(int, os.environ.get("OWNER_ID", "1005170481 804248372 1993696756").split()))) ## sudos can be included
16 |
17 | DOWN_PATH = "anibot/downloads/"
18 | HELP_DICT = dict()
19 |
20 | session = ClientSession()
21 | plugins = dict(root="anibot/plugins")
22 | anibot = Client("anibot", bot_token=BOT_TOKEN, api_id=API_ID, api_hash=API_HASH, plugins=plugins)
23 |
24 | has_user: bool = False
25 | if os.environ.get('USER_SESSION'):
26 | has_user: bool = True
27 | user = Client(os.environ.get('USER_SESSION'), api_id=API_ID, api_hash=API_HASH)
28 |
29 | HELP_DICT['Group'] = '''
30 | Group based commands:
31 |
32 | /settings - Toggle stuff like whether to allow 18+ stuff in group or whether to notify about aired animes, etc and change UI
33 |
34 | /disable - Disable use of a cmd in the group (Disable multiple cmds by adding space between them)
35 | `/disable anime anilist me user`
36 |
37 | /enable - Enable use of a cmd in the group (Enable multiple cmds by adding space between them)
38 | `/enable anime anilist me user`
39 |
40 | /disabled - List out disabled cmds
41 | '''
42 |
43 | HELP_DICT["Additional"] = """Use /reverse cmd to get reverse search via tracemoepy API
44 | __Note: This works best on uncropped anime pic,
45 | when used on cropped media, you may get result but it might not be too reliable__
46 |
47 | Use /schedule cmd to get scheduled animes based on weekdays
48 |
49 | Use /watch cmd to get watch order of searched anime
50 |
51 | Use /fillers cmd to get a list of fillers for an anime
52 |
53 | Use /quote cmd to get a random quote
54 | """
55 |
56 | HELP_DICT["Anilist"] = """
57 | Below is the list of basic anilist cmds for info on anime, character, manga, etc.
58 |
59 | /anime - Use this cmd to get info on specific anime using keywords (anime name) or Anilist ID
60 | (Can lookup info on sequels and prequels)
61 |
62 | /anilist - Use this cmd to choose between multiple animes with similar names related to searched query
63 | (Doesn't includes buttons for prequel and sequel)
64 |
65 | /character - Use this cmd to get info on character
66 |
67 | /manga - Use this cmd to get info on manga
68 |
69 | /airing - Use this cmd to get info on airing status of anime
70 |
71 | /top - Use this cmd to lookup top animes of a genre/tag or from all animes
72 | (To get a list of available tags or genres send /gettags or /getgenres
73 | '/gettags nsfw' for nsfw tags)
74 |
75 | /user - Use this cmd to get info on an anilist user
76 |
77 | /browse - Use this cmd to get updates about latest animes
78 | """
79 |
80 | HELP_DICT["Oauth"] = """
81 | This includes advanced anilist features
82 |
83 | Use /auth or !auth cmd to get details on how to authorize your Anilist account with bot
84 | Authorising yourself unlocks advanced features of bot like:
85 | - adding anime/character/manga to favourites
86 | - viewing your anilist data related to anime/manga in your searches which includes score, status, and favourites
87 | - unlock /flex, /me, /activity and /favourites commands
88 | - adding/updating anilist entry like completed or plan to watch/read
89 | - deleting anilist entry
90 |
91 | Use /flex or !flex cmd to get your anilist stats
92 |
93 | Use /logout or !logout cmd to disconnect your Anilist account
94 |
95 | Use /me or !me cmd to get your anilist recent activity
96 | Can also use /activity or !activity
97 |
98 | Use /favourites or !favourites cmd to get your anilist favourites
99 | """
100 |
--------------------------------------------------------------------------------
/anibot/plugins/watch.py:
--------------------------------------------------------------------------------
1 | # credits to @NotThatMF on telegram for chiaki fast api
2 | # well i also borrowed the base code from him
3 |
4 | from pyrogram import filters, Client
5 | from pyrogram.types import (
6 | CallbackQuery,
7 | InlineKeyboardButton,
8 | InlineKeyboardMarkup,
9 | Message
10 | )
11 | from .. import BOT_NAME, TRIGGERS as trg, anibot
12 | from ..utils.data_parser import get_wo, get_wols
13 | from ..utils.helper import (
14 | check_user,
15 | control_user,
16 | get_user_from_channel as gcc
17 | )
18 | from ..utils.db import get_collection
19 |
20 | DC = get_collection('DISABLED_CMDS')
21 |
22 |
23 | @anibot.on_message(
24 | filters.command(["watch", f"watch{BOT_NAME}"], prefixes=trg)
25 | )
26 | @control_user
27 | async def get_watch_order(client: Client, message: Message, mdata: dict):
28 | """Get List of Scheduled Anime"""
29 | gid = mdata['chat']['id']
30 | find_gc = await DC.find_one({'_id': gid})
31 | if find_gc is not None and 'watch' in find_gc['cmd_list'].split():
32 | return
33 | x = message.text.split(" ", 1)
34 | if len(x)==1:
35 | await message.reply_text("Nothing given to search for!!!")
36 | return
37 | try:
38 | user = mdata['from_user']['id']
39 | except KeyError:
40 | user = mdata['sender_chat']['id']
41 | data = get_wols(x[1])
42 | msg = f"Found related animes for the query {x[1]}"
43 | buttons = []
44 | if data == []:
45 | await client.send_message(gid, 'No results found!!!')
46 | return
47 | for i in data:
48 | buttons.append(
49 | [
50 | InlineKeyboardButton(
51 | str(i[1]),
52 | callback_data=f"watch_{i[0]}_{x[1]}_0_{user}"
53 | )
54 | ]
55 | )
56 | await client.send_message(
57 | gid, msg, reply_markup=InlineKeyboardMarkup(buttons)
58 | )
59 |
60 |
61 | @anibot.on_callback_query(filters.regex(pattern=r"watch_(.*)"))
62 | @check_user
63 | async def watch_(client: anibot, cq: CallbackQuery, cdata: dict):
64 | kek, id_, qry, req, user = cdata['data'].split("_")
65 | msg, total = get_wo(int(id_), int(req))
66 | totalpg, lol = divmod(total, 50)
67 | button = []
68 | if lol!=0:
69 | totalpg + 1
70 | if total>50:
71 | if int(req)==0:
72 | button.append(
73 | [
74 | InlineKeyboardButton(
75 | text="Next",
76 | callback_data=f"{kek}_{id_}_{qry}_{int(req)+1}_{user}"
77 | )
78 | ]
79 | )
80 | elif int(req)==totalpg:
81 | button.append(
82 | [
83 | InlineKeyboardButton(
84 | text="Prev",
85 | callback_data=f"{kek}_{id_}_{qry}_{int(req)-1}_{user}"
86 | )
87 | ]
88 | )
89 | else:
90 | button.append(
91 | [
92 | InlineKeyboardButton(
93 | text="Prev",
94 | callback_data=f"{kek}_{id_}_{qry}_{int(req)-1}_{user}"
95 | ),
96 | InlineKeyboardButton(
97 | text="Next",
98 | callback_data=f"{kek}_{id_}_{qry}_{int(req)+1}_{user}"
99 | )
100 | ]
101 | )
102 | button.append([
103 | InlineKeyboardButton("Back", callback_data=f"wol_{qry}_{user}")
104 | ])
105 | await cq.edit_message_text(msg, reply_markup=InlineKeyboardMarkup(button))
106 |
107 |
108 | @anibot.on_callback_query(filters.regex(pattern=r"wol_(.*)"))
109 | @check_user
110 | async def wls(client: anibot, cq: CallbackQuery, cdata: dict):
111 | kek, qry, user = cdata['data'].split("_")
112 | data = get_wols(qry)
113 | msg = f"Found related animes for the query {qry}"
114 | buttons = []
115 | for i in data:
116 | buttons.append(
117 | [
118 | InlineKeyboardButton(
119 | str(i[1]),
120 | callback_data=f"watch_{i[0]}_{qry}_0_{user}"
121 | )
122 | ]
123 | )
124 | await cq.edit_message_text(msg, reply_markup=InlineKeyboardMarkup(buttons))
125 |
126 |
127 | @anibot.on_edited_message(
128 | filters.command(["watch", f"watch{BOT_NAME}"], prefixes=trg)
129 | )
130 | async def get_watch_order_edit(client: Client, message: Message):
131 | await get_watch_order(client, message)
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Telegram Bot Repo Capable of fetching the following Info via Anilist API inspired from [AniFluid](https://t.me/anifluidbot) and [Nepgear](https://t.me/nepgearbot)
2 | * Anime
3 | * Airing
4 | * Manga
5 | * Character
6 | * Studio
7 | * Scheduled
8 | * Top animes
9 | * Favourites
10 | * Anilist Activity
11 | * Update Anilist entry using bot
12 | * Popular, trending and upcoming animes for a season
13 | * Random anime quotes
14 | * Anime fillers from [animefillerslist](https://www.animefillerlist.com)
15 | * Anime Airing notifications from [LiveChart](https://livechart.me)
16 | * Anime Headlines from [LiveChart](https://livechart.me)
17 | * Anime Headlines from [MyAnimeList](https://myanimelist.net)
18 | * Anime release notifications for [Crunchyroll](https://crunchyroll.com)
19 | * Anime release notifications for [Subsplease](https://subsplease.org)
20 | * Anime Reverse Search Powered by [tracemoepy](https://github.com/dragsama/tracemoepy)
21 | * Watch Order from [Chiaki](https://chiaki.site/) using [web api](https://chiaki.vercel.app)
22 | * Supports custom UI to be set for all results shown by /anime and /anilist in a group
23 |
{cmd}\n\n"
774 | final_output += "OUTPUT:\n"
775 | final_output += f"{evaluation.strip()} \n"
776 | if len(final_output) > 4096:
777 | with io.BytesIO(str.encode(final_output)) as out_file:
778 | out_file.name = "eval.txt"
779 | await reply_to_.reply_document(
780 | document=out_file,
781 | caption=cmd[:1000],
782 | disable_notification=True
783 | )
784 | else:
785 | await reply_to_.reply_text(final_output)
786 | await status_message.delete()
787 |
788 |
789 | async def aexec(code, client, message):
790 | exec(
791 | "async def __aexec(client, message): "
792 | + "".join(f"\n {l_}" for l_ in code.split("\n"))
793 | )
794 | return await locals()["__aexec"](client, message)
795 |
796 |
797 | @anibot.on_message(
798 | filters.user(OWNER) & filters.command(
799 | ["term", f"term{BOT_NAME}"], prefixes=trg
800 | )
801 | )
802 | @control_user
803 | async def terminal(client: Client, message: Message, mdata: dict):
804 | if len(message.text.split()) == 1:
805 | await message.reply_text("Usage: `/term echo owo`")
806 | return
807 | args = message.text.split(None, 1)
808 | teks = args[1]
809 | if "\n" in teks:
810 | code = teks.split("\n")
811 | output = ""
812 | for x in code:
813 | shell = re.split(""" (?=(?:[^'"]|'[^']*'|"[^"]*")*$)""", x)
814 | try:
815 | process = subprocess.Popen(
816 | shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE
817 | )
818 | except Exception as err:
819 | print(err)
820 | await message.reply_text(
821 | """
822 | **Error:**
823 | ```{}```
824 | """.format(
825 | err
826 | ),
827 | parse_mode=enums.ParseMode.MARKDOWN,
828 | )
829 | output += "**{}**\n".format(code)
830 | output += process.stdout.read()[:-1].decode("utf-8")
831 | output += "\n"
832 | else:
833 | shell = re.split(""" (?=(?:[^'"]|'[^']*'|"[^"]*")*$)""", teks)
834 | for a in range(len(shell)):
835 | shell[a] = shell[a].replace('"', "")
836 | try:
837 | process = subprocess.Popen(
838 | shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE
839 | )
840 | except Exception as err:
841 | exc_type, exc_obj, exc_tb = sys.exc_info()
842 | errors = traceback.format_exception(
843 | etype=exc_type, value=exc_obj, tb=exc_tb
844 | )
845 | await message.reply_text(
846 | """**Error:**\n```{}```""".format("".join(errors)),
847 | parse_mode=enums.ParseMode.MARKDOWN
848 | )
849 | return
850 | output = process.stdout.read()[:-1].decode("utf-8")
851 | if str(output) == "\n":
852 | output = None
853 | if output:
854 | if len(output) > 4096:
855 | filename = "output.txt"
856 | with open(filename, "w+") as file:
857 | file.write(output)
858 | await client.send_document(
859 | message.chat.id,
860 | filename,
861 | reply_to_message_id=message.id,
862 | caption="`Output file`",
863 | )
864 | os.remove(filename)
865 | return
866 | await message.reply_text(
867 | f"**Output:**\n```{output}```",
868 | parse_mode=enums.ParseMode.MARKDOWN
869 | )
870 | else:
871 | await message.reply_text("**Output:**\n`No Output`")
872 |
873 |
874 | ##########################################################################
875 |
876 | @anibot.on_edited_message(
877 | ~filters.private & filters.command(
878 | ['disable', f'disable{BOT_NAME}', 'enable', f'enable{BOT_NAME}'],
879 | prefixes=trg
880 | )
881 | )
882 | @control_user
883 | async def en_dis__able_cmd_edit(client: Client, message: Message, mdata: dict):
884 | await en_dis__able_cmd(client, message)
885 |
886 |
887 | @anibot.on_edited_message(
888 | ~filters.private & filters.command(
889 | ['disabled', f'disabled{BOT_NAME}'],
890 | prefixes=trg
891 | )
892 | )
893 | @control_user
894 | async def list_disabled_edit(client: Client, message: Message, mdata: dict):
895 | await list_disabled(client, message)
896 |
897 | @anibot.on_edited_message(
898 | filters.user(OWNER) & filters.command(
899 | ['dbcleanup', f'dbcleanup{BOT_NAME}'], prefixes=trg
900 | )
901 | )
902 | @control_user
903 | async def db_cleanup_edit(client: Client, message: Message, mdata: dict):
904 | await db_cleanup(client, message)
905 |
906 | @anibot.on_edited_message(
907 | filters.command(['start', f'start{BOT_NAME}'], prefixes=trg)
908 | )
909 | @control_user
910 | async def start_edit(client: Client, message: Message, mdata: dict):
911 | await start_(client, message)
912 |
913 | @anibot.on_edited_message(
914 | filters.command(['help', f'help{BOT_NAME}'], prefixes=trg)
915 | )
916 | @control_user
917 | async def help_edit(client: Client, message: Message, mdata: dict):
918 | await help_(client, message)
919 |
920 | @anibot.on_edited_message(
921 | filters.command(
922 | [
923 | 'connect',
924 | f'connect{BOT_NAME}',
925 | 'disconnect',
926 | f'disconnect{BOT_NAME}'
927 | ],
928 | prefixes=trg
929 | )
930 | )
931 | @control_user
932 | async def connect_edit(client: Client, message: Message, mdata: dict):
933 | await connect_(client, message)
934 |
935 | @anibot.on_edited_message(
936 | filters.user(OWNER) & filters.command(
937 | ['stats', f'stats{BOT_NAME}'], prefixes=trg
938 | )
939 | )
940 | @control_user
941 | async def stats_edit(client: Client, message: Message, mdata: dict):
942 | await stats_(client, message)
943 |
944 | @anibot.on_edited_message(
945 | filters.command(['ping', f'ping{BOT_NAME}'], prefixes=trg)
946 | )
947 | @control_user
948 | async def pong_edit(client: Client, message: Message, mdata: dict):
949 | await pong_(client, message)
950 |
951 | @anibot.on_edited_message(
952 | filters.private & filters.command(
953 | ['feedback', f'feedback{BOT_NAME}'], prefixes=trg
954 | )
955 | )
956 | @control_user
957 | async def feed_edit(client: Client, message: Message, mdata: dict):
958 | await feed_(client, message)
959 |
960 | @anibot.on_edited_message(
961 | filters.command(
962 | ['eval', f'eval{BOT_NAME}'], prefixes=trg
963 | ) & filters.user(OWNER)
964 | )
965 | @control_user
966 | async def eval_edit(client: Client, message: Message, mdata: dict):
967 | await eval_(client, message)
968 |
969 | @anibot.on_edited_message(
970 | filters.user(OWNER) & filters.command(
971 | ["term", f"term{BOT_NAME}"], prefixes=trg
972 | )
973 | )
974 | @control_user
975 | async def terminal_edit(client: Client, message: Message, mdata: dict):
976 | await terminal(client, message)
--------------------------------------------------------------------------------
/anibot/utils/data_parser.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import time
3 | import os
4 | from bs4 import BeautifulSoup
5 | from .db import get_collection
6 | from .google_trans_new import google_translator
7 | from .helper import (
8 | cflag,
9 | make_it_rw,
10 | pos_no,
11 | return_json_senpai,
12 | day_,
13 | season_
14 | )
15 | from .. import BOT_NAME
16 | from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
17 | from datetime import datetime
18 |
19 | tr = google_translator()
20 | ANIME_DB, MANGA_DB, CHAR_DB, STUDIO_DB, AIRING_DB = {}, {}, {}, {}, {}
21 | GUI = get_collection('GROUP_UI')
22 |
23 | async def uidata(id_):
24 | data = await GUI.find_one({'_id': str(id_)})
25 | if data is not None:
26 | bullet = str(data['bl'])+" "
27 | if data['bl'] is None:
28 | bullet = ""
29 | return bullet, data['cs']
30 | return ["➤ ", "UPPER"]
31 |
32 |
33 | async def get_ui_text(case):
34 | if case=="UPPER":
35 | return [
36 | "SOURCE",
37 | "TYPE",
38 | "SCORE",
39 | "DURATION",
40 | "USER DATA",
41 | "ADULT RATED",
42 | "STATUS",
43 | "GENRES",
44 | "TAGS",
45 | "SEQUEL",
46 | "PREQUEL",
47 | "NEXT AIRING",
48 | "DESCRIPTION",
49 | "VOLUMES",
50 | "CHAPTERS"
51 | ]
52 | else:
53 | return [
54 | "Source",
55 | "Type",
56 | "Score",
57 | "Duration",
58 | "User Data",
59 | "Adult Rated",
60 | "Status",
61 | "Genres",
62 | "Tags",
63 | "Sequel",
64 | "Prequel",
65 | "Next Airing",
66 | "Description",
67 | "Volumes",
68 | "Chapters"
69 | ]
70 |
71 |
72 |
73 | #### Anilist part ####
74 |
75 | ANIME_TEMPLATE = """{name}
76 |
77 | **ID | MAL ID:** `{idm}` | `{idmal}`
78 | {bl}**{psrc}:** `{source}`
79 | {bl}**{ptype}:** `{formats}`{avscd}{dura}{user_data}
80 | {status_air}{gnrs_}{tags_}
81 |
82 | 🎬 {trailer_link}
83 | 📖 Synopsis
84 | 📖 Official Site
85 | Recommendations
86 |
87 | {additional}"""
88 |
89 |
90 | # GraphQL Queries.
91 | ANIME_QUERY = """
92 | query ($id: Int, $idMal:Int, $search: String) {
93 | Media (id: $id, idMal: $idMal, search: $search, type: ANIME) {
94 | id
95 | idMal
96 | title {
97 | romaji
98 | english
99 | native
100 | }
101 | format
102 | status
103 | episodes
104 | duration
105 | countryOfOrigin
106 | source (version: 2)
107 | trailer {
108 | id
109 | site
110 | }
111 | genres
112 | tags {
113 | name
114 | }
115 | averageScore
116 | relations {
117 | edges {
118 | node {
119 | title {
120 | romaji
121 | english
122 | }
123 | id
124 | type
125 | }
126 | relationType
127 | }
128 | }
129 | nextAiringEpisode {
130 | timeUntilAiring
131 | episode
132 | }
133 | isAdult
134 | isFavourite
135 | mediaListEntry {
136 | status
137 | score
138 | id
139 | }
140 | siteUrl
141 | }
142 | }
143 | """
144 |
145 | ISADULT = """
146 | query ($id: Int) {
147 | Media (id: $id) {
148 | isAdult
149 | }
150 | }
151 | """
152 |
153 | BROWSE_QUERY = """
154 | query ($s: MediaSeason, $y: Int, $sort: [MediaSort]) {
155 | Page {
156 | media (season: $s, seasonYear: $y, sort: $sort) {
157 | title {
158 | romaji
159 | }
160 | format
161 | }
162 | }
163 | }
164 | """
165 |
166 | FAV_ANI_QUERY = """
167 | query ($id: Int, $page: Int) {
168 | User (id: $id) {
169 | favourites {
170 | anime (page: $page, perPage: 10) {
171 | pageInfo {
172 | lastPage
173 | hasNextPage
174 | }
175 | edges {
176 | node {
177 | title {
178 | romaji
179 | }
180 | siteUrl
181 | }
182 | }
183 | }
184 | }
185 | }
186 | }
187 | """
188 |
189 | FAV_MANGA_QUERY = """
190 | query ($id: Int, $page: Int) {
191 | User (id: $id) {
192 | favourites {
193 | manga (page: $page, perPage: 10) {
194 | pageInfo {
195 | lastPage
196 | hasNextPage
197 | }
198 | edges {
199 | node {
200 | title {
201 | romaji
202 | }
203 | siteUrl
204 | }
205 | }
206 | }
207 | }
208 | }
209 | }
210 | """
211 |
212 | FAV_CHAR_QUERY = """
213 | query ($id: Int, $page: Int) {
214 | User (id: $id) {
215 | favourites {
216 | characters (page: $page, perPage: 10) {
217 | pageInfo {
218 | lastPage
219 | hasNextPage
220 | }
221 | edges {
222 | node {
223 | name {
224 | full
225 | }
226 | siteUrl
227 | }
228 | }
229 | }
230 | }
231 | }
232 | }
233 | """
234 |
235 | VIEWER_QRY = """
236 | query {
237 | Viewer {
238 | id
239 | name
240 | siteUrl
241 | statistics {
242 | anime {
243 | count
244 | minutesWatched
245 | episodesWatched
246 | meanScore
247 | }
248 | manga {
249 | count
250 | chaptersRead
251 | volumesRead
252 | meanScore
253 | }
254 | }
255 | }
256 | }
257 | """
258 |
259 | USER_QRY = """
260 | query ($search: String) {
261 | User (name: $search) {
262 | id
263 | name
264 | siteUrl
265 | statistics {
266 | anime {
267 | count
268 | minutesWatched
269 | episodesWatched
270 | meanScore
271 | }
272 | manga {
273 | count
274 | chaptersRead
275 | volumesRead
276 | meanScore
277 | }
278 | }
279 | }
280 | }
281 | """
282 |
283 | ANIME_MUTATION = """
284 | mutation ($id: Int) {
285 | ToggleFavourite (animeId: $id) {
286 | anime {
287 | pageInfo {
288 | total
289 | }
290 | }
291 | }
292 | }
293 | """
294 |
295 | MANGA_MUTATION = """
296 | mutation ($id: Int) {
297 | ToggleFavourite (mangaId: $id) {
298 | manga {
299 | pageInfo {
300 | total
301 | }
302 | }
303 | }
304 | }
305 | """
306 |
307 | STUDIO_MUTATION = """
308 | mutation ($id: Int) {
309 | ToggleFavourite (studioId: $id) {
310 | studios {
311 | pageInfo {
312 | total
313 | }
314 | }
315 | }
316 | }
317 | """
318 |
319 | CHAR_MUTATION = """
320 | mutation ($id: Int) {
321 | ToggleFavourite (characterId: $id) {
322 | characters {
323 | pageInfo {
324 | total
325 | }
326 | }
327 | }
328 | }
329 | """
330 |
331 | ANILIST_MUTATION = """
332 | mutation ($id: Int, $status: MediaListStatus) {
333 | SaveMediaListEntry (mediaId: $id, status: $status) {
334 | media {
335 | title {
336 | romaji
337 | }
338 | }
339 | }
340 | }
341 | """
342 |
343 | ANILIST_MUTATION_UP = """
344 | mutation ($id: [Int], $status: MediaListStatus) {
345 | UpdateMediaListEntries (ids: $id, status: $status) {
346 | media {
347 | title {
348 | romaji
349 | }
350 | }
351 | }
352 | }
353 | """
354 |
355 | ANILIST_MUTATION_DEL = """
356 | mutation ($id: Int) {
357 | DeleteMediaListEntry (id: $id) {
358 | deleted
359 | }
360 | }
361 | """
362 |
363 | AIR_QUERY = """
364 | query ($search: String, $page: Int) {
365 | Page (perPage: 1, page: $page) {
366 | pageInfo {
367 | total
368 | hasNextPage
369 | }
370 | media (search: $search, type: ANIME) {
371 | id
372 | title {
373 | romaji
374 | english
375 | }
376 | status
377 | countryOfOrigin
378 | nextAiringEpisode {
379 | timeUntilAiring
380 | episode
381 | }
382 | siteUrl
383 | isFavourite
384 | isAdult
385 | mediaListEntry {
386 | status
387 | id
388 | }
389 | }
390 | }
391 | }
392 | """
393 |
394 | DES_INFO_QUERY = """
395 | query ($id: Int) {
396 | Media (id: $id) {
397 | id
398 | description (asHtml: false)
399 | }
400 | }
401 | """
402 |
403 | CHA_INFO_QUERY = """
404 | query ($id: Int, $page: Int) {
405 | Media (id: $id, type: ANIME) {
406 | id
407 | characters (page: $page, perPage: 25, sort: ROLE) {
408 | pageInfo {
409 | hasNextPage
410 | lastPage
411 | total
412 | }
413 | edges {
414 | node {
415 | name {
416 | full
417 | }
418 | }
419 | role
420 | }
421 | }
422 | }
423 | }
424 | """
425 |
426 | REL_INFO_QUERY = """
427 | query ($id: Int) {
428 | Media (id: $id, type: ANIME) {
429 | id
430 | relations {
431 | edges {
432 | node {
433 | title {
434 | romaji
435 | }
436 | type
437 | }
438 | relationType
439 | }
440 | }
441 | }
442 | }
443 | """
444 |
445 | PAGE_QUERY = """
446 | query ($search: String, $page: Int) {
447 | Page (perPage: 1, page: $page) {
448 | pageInfo {
449 | total
450 | hasNextPage
451 | }
452 | media (search: $search, type: ANIME) {
453 | id
454 | idMal
455 | title {
456 | romaji
457 | english
458 | native
459 | }
460 | format
461 | status
462 | episodes
463 | duration
464 | countryOfOrigin
465 | source (version: 2)
466 | trailer {
467 | id
468 | site
469 | }
470 | genres
471 | tags {
472 | name
473 | }
474 | averageScore
475 | relations {
476 | edges {
477 | node {
478 | title {
479 | romaji
480 | english
481 | }
482 | type
483 | }
484 | relationType
485 | }
486 | }
487 | nextAiringEpisode {
488 | timeUntilAiring
489 | episode
490 | }
491 | isAdult
492 | isFavourite
493 | mediaListEntry {
494 | status
495 | score
496 | id
497 | }
498 | siteUrl
499 | }
500 | }
501 | }
502 | """
503 |
504 | CHARACTER_QUERY = """
505 | query ($id: Int, $search: String, $page: Int) {
506 | Page (perPage: 1, page: $page) {
507 | pageInfo {
508 | total
509 | hasNextPage
510 | }
511 | characters (id: $id, search: $search) {
512 | id
513 | name {
514 | full
515 | native
516 | }
517 | image {
518 | large
519 | }
520 | media (type: ANIME) {
521 | edges {
522 | node {
523 | title {
524 | romaji
525 | }
526 | type
527 | }
528 | voiceActors (language: JAPANESE) {
529 | name {
530 | full
531 | }
532 | siteUrl
533 | }
534 | }
535 | }
536 | isFavourite
537 | siteUrl
538 | }
539 | }
540 | }
541 | """
542 |
543 | MANGA_QUERY = """
544 | query ($search: String, $page: Int) {
545 | Page (perPage: 1, page: $page) {
546 | pageInfo {
547 | total
548 | hasNextPage
549 | }
550 | media (search: $search, type: MANGA) {
551 | id
552 | title {
553 | romaji
554 | english
555 | native
556 | }
557 | format
558 | countryOfOrigin
559 | source (version: 2)
560 | status
561 | description(asHtml: true)
562 | chapters
563 | isFavourite
564 | mediaListEntry {
565 | status
566 | score
567 | id
568 | }
569 | volumes
570 | averageScore
571 | siteUrl
572 | isAdult
573 | }
574 | }
575 | }
576 | """
577 |
578 |
579 | DESC_INFO_QUERY = """
580 | query ($id: Int) {
581 | Character (id: $id) {
582 | image {
583 | large
584 | }
585 | description(asHtml: false)
586 | }
587 | }
588 | """
589 |
590 | LS_INFO_QUERY = """
591 | query ($id: Int) {
592 | Character (id: $id) {
593 | image {
594 | large
595 | }
596 | media (page: 1, perPage: 25) {
597 | nodes {
598 | title {
599 | romaji
600 | english
601 | }
602 | type
603 | }
604 | }
605 | }
606 | }
607 | """
608 |
609 | ACTIVITY_QUERY = """
610 | query ($id: Int) {
611 | Page (perPage: 12) {
612 | activities (userId: $id, type: MEDIA_LIST, sort: ID_DESC) {
613 | ...kek
614 | }
615 | }
616 | }
617 | fragment kek on ListActivity {
618 | type
619 | media {
620 | title {
621 | romaji
622 | }
623 | siteUrl
624 | }
625 | progress
626 | status
627 | }
628 | """
629 |
630 | TOP_QUERY = """
631 | query ($gnr: String, $page: Int) {
632 | Page (perPage: 15, page: $page) {
633 | pageInfo {
634 | lastPage
635 | total
636 | hasNextPage
637 | }
638 | media (genre: $gnr, sort: SCORE_DESC, type: ANIME) {
639 | title {
640 | romaji
641 | }
642 | }
643 | }
644 | }
645 | """
646 |
647 | TOPT_QUERY = """
648 | query ($gnr: String, $page: Int) {
649 | Page (perPage: 15, page: $page) {
650 | pageInfo {
651 | lastPage
652 | total
653 | hasNextPage
654 | }
655 | media (tag: $gnr, sort: SCORE_DESC, type: ANIME) {
656 | title {
657 | romaji
658 | }
659 | }
660 | }
661 | }
662 | """
663 |
664 | ALLTOP_QUERY = """
665 | query ($page: Int) {
666 | Page (perPage: 15, page: $page) {
667 | pageInfo {
668 | lastPage
669 | total
670 | hasNextPage
671 | }
672 | media (sort: SCORE_DESC, type: ANIME) {
673 | title {
674 | romaji
675 | }
676 | }
677 | }
678 | }
679 | """
680 |
681 | GET_GENRES = """
682 | query {
683 | GenreCollection
684 | }
685 | """
686 |
687 | GET_TAGS = """
688 | query{
689 | MediaTagCollection {
690 | name
691 | isAdult
692 | }
693 | }
694 | """
695 |
696 | RECOMMENDTIONS_QUERY = '''
697 | query ($id: Int) {
698 | Media (id: $id) {
699 | recommendations (perPage: 25) {
700 | edges {
701 | node {
702 | mediaRecommendation {
703 | title {
704 | romaji
705 | }
706 | id
707 | siteUrl
708 | }
709 | }
710 | }
711 | }
712 | }
713 | }
714 | '''
715 |
716 | STUDIO_QUERY = '''
717 | query ($search: String, $page: Int) {
718 | Page (page: $page, perPage: 1) {
719 | pageInfo {
720 | total
721 | hasNextPage
722 | }
723 | studios (search: $search) {
724 | id
725 | name
726 | siteUrl
727 | isFavourite
728 | }
729 | }
730 | }
731 | '''
732 |
733 | STUDIO_ANI_QUERY = '''
734 | query ($id: Int, $page: Int) {
735 | Studio (id: $id) {
736 | name
737 | media (page: $page) {
738 | pageInfo {
739 | total
740 | lastPage
741 | hasNextPage
742 | }
743 | edges {
744 | node {
745 | title {
746 | romaji
747 | }
748 | seasonYear
749 | }
750 | }
751 | }
752 | }
753 | }
754 | '''
755 |
756 |
757 | async def get_studios(qry, page, user, duser = None, auth: bool = False):
758 | page = int(page)
759 | vars_ = {'search': STUDIO_DB[qry], 'page': int(page)}
760 | result = await return_json_senpai(STUDIO_QUERY, vars_, auth, user)
761 | if result["data"]['Page']['studios']==[]:
762 | return ["Not Found"]
763 | data = result["data"]['Page']['studios'][0]
764 | isFav = data['isFavourite']
765 | msg = (
766 | f"**{data['name']}**{', ♥️' if isFav is True else ''}"
767 | +f"\n\n**ID:** {data['id']}\n[Website]({data['siteUrl']})"
768 | )
769 | if not duser:
770 | duser = user
771 | btns = []
772 | btns.append([
773 | InlineKeyboardButton(
774 | "List Animes",
775 | callback_data=f"stuani_1_{data['id']}_{page}_{qry}_{auth}_{duser}"
776 | )
777 | ])
778 | if auth:
779 | btns.append([
780 | InlineKeyboardButton(
781 | "Remove from Favs" if isFav else "Add To Favs",
782 | callback_data=f"fav_STUDIO_{data['id']}_{qry}_{page}_{duser}"
783 | )
784 | ])
785 | pi = result["data"]['Page']['pageInfo']['hasNextPage']
786 | if pi is False:
787 | if int(page)==1:
788 | return msg, btns
789 | else:
790 | btns.append([
791 | InlineKeyboardButton(
792 | "Prev",
793 | callback_data=f"pgstudio_{page-1}_{qry}_{auth}_{duser}"
794 | )
795 | ])
796 | else:
797 | if int(page)==1:
798 | btns.append([
799 | InlineKeyboardButton(
800 | "Next", callback_data=f"pgstudio_2_{qry}_{auth}_{duser}"
801 | )
802 | ])
803 | else:
804 | btns.append(
805 | [
806 | InlineKeyboardButton(
807 | "Prev", callback_data=f"pgstudio_{page-1}_{qry}_{auth}_{duser}"
808 | ),
809 | InlineKeyboardButton(
810 | "Next", callback_data=f"pgstudio_{page+1}_{qry}_{auth}_{duser}"
811 | )
812 | ]
813 | )
814 | return msg, InlineKeyboardMarkup(btns)
815 |
816 |
817 | async def get_studio_animes(id_, page, qry, rp, user, duser = None, auth: bool = False):
818 | vars_ = {'id': id_, 'page': int(page)}
819 | result = await return_json_senpai(STUDIO_ANI_QUERY, vars_, auth, user)
820 | data = result['data']['Studio']['media']['edges']
821 | if data==[]:
822 | return ["No results found"]
823 | msg = f"List of animes by {result['data']['Studio']['name']} studio\n"
824 | for i in data:
825 | msg += (
826 | f"\n⚬ `{i['node']['title']['romaji']}`"
827 | +f" __({i['node']['seasonYear']})__"
828 | )
829 | btns = []
830 | if not duser:
831 | duser = user
832 | pi = result["data"]['Studio']['media']['pageInfo']
833 | if pi['hasNextPage'] is False:
834 | if int(page)==1:
835 | btns.append([
836 | InlineKeyboardButton(
837 | "Back", callback_data=f"pgstudio_{rp}_{qry}_{auth}_{duser}"
838 | )
839 | ])
840 | return msg, btns
841 | else:
842 | btns.append([
843 | InlineKeyboardButton(
844 | "Prev",
845 | callback_data=f"stuani_{int(page)-1}_{id_}_{rp}_{qry}_{auth}_{duser}"
846 | )
847 | ])
848 | else:
849 | if int(page)==1:
850 | btns.append([
851 | InlineKeyboardButton(
852 | "Next", callback_data=f"stuani_2_{id_}_{rp}_{qry}_{auth}_{duser}"
853 | )
854 | ])
855 | else:
856 | btns.append([
857 | InlineKeyboardButton(
858 | "Prev",
859 | callback_data=f"stuani_{int(page)-1}_{id_}_{rp}_{qry}_{auth}_{duser}"
860 | ),
861 | InlineKeyboardButton(
862 | "Next",
863 | callback_data=f"stuani_{int(page)+1}_{id_}_{rp}_{qry}_{auth}_{duser}"
864 | )
865 | ])
866 | btns.append([
867 | InlineKeyboardButton(
868 | "Back", callback_data=f"pgstudio_{rp}_{qry}_{auth}_{duser}"
869 | )
870 | ])
871 | return msg, InlineKeyboardMarkup(btns)
872 |
873 |
874 | async def get_all_tags(text: str = None):
875 | vars_ = {}
876 | result = await return_json_senpai(GET_TAGS, vars_, auth=False, user=None)
877 | msg = "**Tags List:**\n\n`"
878 | kek = []
879 | for i in result['data']['MediaTagCollection']:
880 | if text is not None and 'nsfw' in text:
881 | if str(i['isAdult'])!='False':
882 | kek.append(i['name'])
883 | else:
884 | if str(i['isAdult'])=='False':
885 | kek.append(i['name'])
886 | msg += ", ".join(kek)
887 | msg += "`"
888 | return msg
889 |
890 |
891 | async def get_all_genres():
892 | vars_ = {}
893 | result = await return_json_senpai(GET_GENRES, vars_, auth=False)
894 | msg = "**Genres List:**\n\n"
895 | for i in result['data']['GenreCollection']:
896 | msg += f"`{i}`\n"
897 | return msg
898 |
899 |
900 | async def get_user_activity(id_, user, duser = None):
901 | vars_ = {"id": id_}
902 | result = await return_json_senpai(
903 | ACTIVITY_QUERY, vars_, auth=True, user=user
904 | )
905 | data = result["data"]["Page"]["activities"]
906 | msg = ""
907 | for i in data:
908 | try:
909 | name = (
910 | f"[{i['media']['title']['romaji']}]"
911 | +f"({i['media']['siteUrl']})"
912 | )
913 | if i['status'] in ["watched episode", "read chapter"]:
914 | msg += (
915 | f"⚬ {str(i['status']).capitalize()} "
916 | +f"{i['progress']} of {name}\n"
917 | )
918 | else:
919 | progress = i['progress']
920 | of = "of"
921 | if i['status'] == "dropped":
922 | of = "at"
923 | msg += (
924 | f"⚬ {str(i['status']).capitalize()}"
925 | +f"{f'{progress} {of} ' if progress is not None else ' '}"
926 | +f"{name}\n"
927 | )
928 | except KeyError:
929 | pass
930 | if duser is None:
931 | duser = user
932 | btn = [[InlineKeyboardButton("Back", callback_data=f"getusrbc_{duser}")]]
933 | return [
934 | f"https://img.anili.st/user/{id_}?a={time.time()}",
935 | msg,
936 | InlineKeyboardMarkup(btn)
937 | ]
938 |
939 |
940 | async def get_recommendations(id_):
941 | vars_ = {'id': int(id_)}
942 | result = await return_json_senpai(RECOMMENDTIONS_QUERY, vars_)
943 | data = result['data']['Media']['recommendations']['edges']
944 | rc_ls = []
945 | for i in data:
946 | ii = i['node']['mediaRecommendation']
947 | rc_ls.append([ii['title']['romaji'], ii['id'], ii['siteUrl']])
948 | if rc_ls == []:
949 | return "No Recommendations available related to given anime!!!"
950 | outstr = "Recommended animes:\n\n"
951 | for i in rc_ls:
952 | outstr += (
953 | f"**{i[0]}**\n ➥[Synopsis]"
954 | +f"(https://t.me/{BOT_NAME.replace('@', '')}?start=anime_{i[1]})"
955 | +f"\n ➥[Official Site]({i[2]})\n\n"
956 | )
957 | return outstr
958 |
959 |
960 | async def get_top_animes(gnr: str, page, user):
961 | vars_ = {"gnr": gnr.lower(), "page": int(page)}
962 | query = TOP_QUERY
963 | msg = f"Top animes for genre `{gnr.capitalize()}`:\n\n"
964 | if gnr=="None":
965 | query = ALLTOP_QUERY
966 | vars_ = {"page": int(page)}
967 | msg = f"Top animes:\n\n"
968 | nsfw = False
969 | result = await return_json_senpai(query, vars_, auth=False, user=user)
970 | if len(result['data']['Page']['media'])==0:
971 | query = TOPT_QUERY
972 | msg = f"Top animes for tag `{gnr.capitalize()}`:\n\n"
973 | result = await return_json_senpai(query, vars_, auth=False, user=user)
974 | if len(result['data']['Page']['media'])==0:
975 | return [f"No results Found"]
976 | nsls = await get_all_tags('nsfw')
977 | nsfw = True if gnr.lower() in nsls.lower() else False
978 | data = result["data"]["Page"]
979 | for i in data['media']:
980 | msg += f"⚬ `{i['title']['romaji']}`\n"
981 | msg += f"\nTotal available animes: `{data['pageInfo']['total']}`"
982 | btn = []
983 | if int(page)==1:
984 | if int(data['pageInfo']['lastPage'])!=1:
985 | btn.append([
986 | InlineKeyboardButton(
987 | "Next",
988 | callback_data=f"topanimu_{gnr}_{int(page)+1}_{user}"
989 | )
990 | ])
991 | elif int(page) == int(data['pageInfo']['lastPage']):
992 | btn.append([
993 | InlineKeyboardButton(
994 | "Prev",
995 | callback_data=f"topanimu_{gnr}_{int(page)-1}_{user}"
996 | )
997 | ])
998 | else:
999 | btn.append([
1000 | InlineKeyboardButton(
1001 | "Prev",
1002 | callback_data=f"topanimu_{gnr}_{int(page)-1}_{user}"
1003 | ),
1004 | InlineKeyboardButton(
1005 | "Next",
1006 | callback_data=f"topanimu_{gnr}_{int(page)+1}_{user}"
1007 | )
1008 | ])
1009 | return [msg, nsfw], InlineKeyboardMarkup(btn) if len(btn)!=0 else ""
1010 |
1011 |
1012 | async def get_user_favourites(id_, user, req, page, sighs, duser = None):
1013 | vars_ = {"id": int(id_), "page": int(page)}
1014 | result = await return_json_senpai(
1015 | FAV_ANI_QUERY if req=="ANIME"
1016 | else FAV_CHAR_QUERY if req=="CHAR"
1017 | else FAV_MANGA_QUERY,
1018 | vars_,
1019 | auth=True,
1020 | user=int(user)
1021 | )
1022 | data = (
1023 | result["data"]["User"]["favourites"][
1024 | "anime" if req=="ANIME"
1025 | else "characters" if req=="CHAR"
1026 | else "manga"
1027 | ]
1028 | )
1029 | msg = (
1030 | "Favourite Animes:\n\n" if req=="ANIME"
1031 | else "Favourite Characters:\n\n" if req=="CHAR"
1032 | else "Favourite Manga:\n\n"
1033 | )
1034 | for i in data["edges"]:
1035 | node_name = (
1036 | i['node']['title']['romaji'] if req!='CHAR'
1037 | else i['node']['name']['full']
1038 | )
1039 | msg += (
1040 | f"⚬ [{node_name}]({i['node']['siteUrl']})\n"
1041 | )
1042 | btn = []
1043 | if duser is None:
1044 | duser = user
1045 | if int(page)==1:
1046 | if int(data['pageInfo']['lastPage'])!=1:
1047 | btn.append([
1048 | InlineKeyboardButton(
1049 | "Next",
1050 | callback_data=(
1051 | f"myfavqry_{req}_{id_}_{str(int(page)+1)}"
1052 | +f"_{sighs}_{duser}"
1053 | )
1054 | )
1055 | ])
1056 | elif int(page) == int(data['pageInfo']['lastPage']):
1057 | btn.append([
1058 | InlineKeyboardButton(
1059 | "Prev",
1060 | callback_data=(
1061 | f"myfavqry_{req}_{id_}_{str(int(page)-1)}_{sighs}_{duser}"
1062 | )
1063 | )
1064 | ])
1065 | else:
1066 | btn.append([
1067 | InlineKeyboardButton(
1068 | "Prev",
1069 | callback_data=(
1070 | f"myfavqry_{req}_{id_}_{str(int(page)-1)}_{sighs}_{duser}"
1071 | )
1072 | ),
1073 | InlineKeyboardButton(
1074 | "Next",
1075 | callback_data=(
1076 | f"myfavqry_{req}_{id_}_{str(int(page)+1)}_{sighs}_{duser}"
1077 | )
1078 | )
1079 | ])
1080 | btn.append([
1081 | InlineKeyboardButton(
1082 | "Back", callback_data=f"myfavs_{id_}_{sighs}_{user}"
1083 | )
1084 | ])
1085 | return [
1086 | f"https://img.anili.st/user/{id_}?a=({time.time()})",
1087 | msg,
1088 | InlineKeyboardMarkup(btn)
1089 | ]
1090 |
1091 |
1092 | async def get_featured_in_lists(
1093 | idm,
1094 | req,
1095 | auth: bool = False,
1096 | user: int = None,
1097 | page: int = 0
1098 | ):
1099 | vars_ = {"id": int(idm)}
1100 | result = await return_json_senpai(
1101 | LS_INFO_QUERY, vars_, auth=auth, user=user
1102 | )
1103 | data = result["data"]["Character"]["media"]["nodes"]
1104 | if req == "ANI":
1105 | out = "ANIMES:\n\n"
1106 | out_ = []
1107 | for ani in data:
1108 | k = ani["title"]["english"] or ani["title"]["romaji"]
1109 | kk = ani["type"]
1110 | if kk == "ANIME":
1111 | out_.append(f"• __{k}__\n")
1112 | else:
1113 | out = "MANGAS:\n\n"
1114 | out_ = []
1115 | for ani in data:
1116 | k = ani["title"]["english"] or ani["title"]["romaji"]
1117 | kk = ani["type"]
1118 | if kk == "MANGA":
1119 | out_.append(f"• __{k}__\n")
1120 | total = len(out_)
1121 | for _ in range(15*page):
1122 | out_.pop(0)
1123 | out_ = "".join(out_[:15])
1124 | return (
1125 | [out+out_, total] if len(out_) != 0 else False
1126 | ), result["data"]["Character"]["image"]["large"]
1127 |
1128 |
1129 | async def get_additional_info(
1130 | idm,
1131 | ctgry,
1132 | req = None,
1133 | auth: bool = False,
1134 | user: int = None,
1135 | page: int = 0
1136 | ):
1137 | vars_ = {"id": int(idm)}
1138 | if req=='char':
1139 | vars_['page'] = page
1140 | result = await return_json_senpai(
1141 | (
1142 | (
1143 | DES_INFO_QUERY
1144 | if req == "desc"
1145 | else CHA_INFO_QUERY
1146 | if req == "char"
1147 | else REL_INFO_QUERY
1148 | )
1149 | if ctgry == "ANI"
1150 | else DESC_INFO_QUERY
1151 | ),
1152 | vars_,
1153 | )
1154 | data = (
1155 | result["data"]["Media"] if ctgry == "ANI"
1156 | else result["data"]["Character"]
1157 | )
1158 | pic = f"https://img.anili.st/media/{idm}"
1159 | if req == "desc":
1160 | synopsis = data.get("description")
1161 | if os.environ.get("PREFERRED_LANGUAGE"):
1162 | synopsis = tr.translate(
1163 | synopsis, lang_tgt=os.environ.get("PREFERRED_LANGUAGE")
1164 | )
1165 | return (pic if ctgry == "ANI" else data["image"]["large"]), synopsis
1166 | elif req == "char":
1167 | charlist = []
1168 | for char in data["characters"]['edges']:
1169 | charlist.append(
1170 | f"• `{char['node']['name']['full']}` ({char['role']})"
1171 | )
1172 | chrctrs = ("\n").join(charlist)
1173 | charls = f"{chrctrs}" if len(charlist) != 0 else ""
1174 | return pic, charls, data["characters"]['pageInfo']
1175 | else:
1176 | prqlsql = data.get("relations").get("edges")
1177 | ps = ""
1178 | for i in prqlsql:
1179 | ps += (
1180 | f'• {i["node"]["title"]["romaji"]} '
1181 | +f'({i["node"]["type"]}) `{i["relationType"]}`\n'
1182 | )
1183 | return pic, ps
1184 |
1185 |
1186 | async def get_anime(
1187 | vars_,
1188 | auth: bool = False,
1189 | user: int = None,
1190 | cid: int = None
1191 | ):
1192 | result = await return_json_senpai(
1193 | ANIME_QUERY, vars_, auth=auth, user=user
1194 | )
1195 |
1196 | error = result.get("errors")
1197 | if error:
1198 | error_sts = error[0].get("message")
1199 | return [f"[{error_sts}]"]
1200 |
1201 | data = result["data"]["Media"]
1202 |
1203 | # Data of all fields in returned json
1204 | # pylint: disable=possibly-unused-variable
1205 | idm = data.get("id")
1206 | idmal = data.get("idMal")
1207 | romaji = data["title"]["romaji"]
1208 | english = data["title"]["english"]
1209 | native = data["title"]["native"]
1210 | formats = data.get("format")
1211 | status = data.get("status")
1212 | episodes = data.get("episodes")
1213 | duration = data.get("duration")
1214 | country = data.get("countryOfOrigin")
1215 | c_flag = cflag(country)
1216 | source = data.get("source")
1217 | prqlsql = data.get("relations").get("edges")
1218 | adult = data.get("isAdult")
1219 | url = data.get("siteUrl")
1220 | trailer_link = "N/A"
1221 | gnrs = ", ".join(data['genres'])
1222 | score = data['averageScore']
1223 | bl, cs = await uidata(cid)
1224 | text = await get_ui_text(cs)
1225 | psrc, ptype = text[0], text[1]
1226 | avscd = (
1227 | f"\n{bl}**{text[2]}:** `{score}%` 🌟" if score is not None
1228 | else ""
1229 | )
1230 | tags = []
1231 | for i in data['tags']:
1232 | tags.append(i["name"])
1233 | tags_ = (
1234 | f"\n{bl}**{text[8]}:** `{', '.join(tags[:5])}`" if tags != []
1235 | else ""
1236 | )
1237 | bot = BOT_NAME.replace("@", "")
1238 | gnrs_ = ""
1239 | if len(gnrs)!=0:
1240 | gnrs_ = f"\n{bl}**{text[7]}:** `{gnrs}`"
1241 | isfav = data.get("isFavourite")
1242 | fav = ", in Favourites" if isfav is True else ""
1243 | user_data = ""
1244 | in_ls = False
1245 | in_ls_id = ""
1246 | if auth is True:
1247 | in_list = data.get("mediaListEntry")
1248 | if in_list is not None:
1249 | in_ls = True
1250 | in_ls_id = in_list['id']
1251 | in_ls_stts = in_list['status']
1252 | in_ls_score = (
1253 | f" and scored {in_list['score']}" if in_list['score']!=0
1254 | else ""
1255 | )
1256 | user_data = (
1257 | f"\n{bl}**{text[4]}:** `{in_ls_stts}{fav}{in_ls_score}`"
1258 | )
1259 | if data["title"]["english"] is not None:
1260 | name = f"""[{c_flag}]**{romaji}**
1261 | __{english}__
1262 | {native}"""
1263 | else:
1264 | name = f"""[{c_flag}]**{romaji}**
1265 | {native}"""
1266 | prql, prql_id, sql, sql_id = "", "None", "", "None"
1267 | for i in prqlsql:
1268 | if i["relationType"] == "PREQUEL" and i["node"]["type"]=="ANIME":
1269 | pname = (
1270 | i["node"]["title"]["english"]
1271 | if i["node"]["title"]["english"] is not None
1272 | else i["node"]["title"]["romaji"]
1273 | )
1274 | prql += f"**{text[10]}:** `{pname}`\n"
1275 | prql_id = i["node"]["id"]
1276 | break
1277 | for i in prqlsql:
1278 | if i["relationType"] == "SEQUEL" and i["node"]["type"]=="ANIME":
1279 | sname = (
1280 | i["node"]["title"]["english"]
1281 | if i["node"]["title"]["english"] is not None
1282 | else i["node"]["title"]["romaji"]
1283 | )
1284 | sql += f"**{text[9]}:** `{sname}`\n"
1285 | sql_id = i["node"]["id"]
1286 | break
1287 | additional = f"{prql}{sql}"
1288 | surl = f"https://t.me/{bot}/?start=des_ANI_{idm}_desc"
1289 | dura = (
1290 | f"\n{bl}**{text[3]}:** `{duration} min/ep`"
1291 | if duration is not None
1292 | else ""
1293 | )
1294 | air_on = None
1295 | if data["nextAiringEpisode"]:
1296 | nextAir = data["nextAiringEpisode"]["timeUntilAiring"]
1297 | air_on = make_it_rw(nextAir*1000)
1298 | eps = data["nextAiringEpisode"]["episode"]
1299 | th = pos_no(str(eps))
1300 | air_on += f" | {eps}{th} eps"
1301 | if air_on is None:
1302 | eps_ = f"` | `{episodes} eps" if episodes is not None else ""
1303 | status_air = f"{bl}**{text[6]}:** `{status}{eps_}`"
1304 | else:
1305 | status_air = (
1306 | f"{bl}**{text[6]}:** `{status}`\n{bl}**{text[11]}:** `{air_on}`"
1307 | )
1308 | if data["trailer"] and data["trailer"]["site"] == "youtube":
1309 | trailer_link = (
1310 | f"Trailer"
1311 | )
1312 | title_img = f"https://img.anili.st/media/{idm}"
1313 | try:
1314 | finals_ = ANIME_TEMPLATE.format(**locals())
1315 | except KeyError as kys:
1316 | return [f"{kys}"]
1317 | return title_img, finals_, [
1318 | idm, in_ls, in_ls_id, isfav, str(adult)
1319 | ], prql_id, sql_id
1320 |
1321 |
1322 | async def get_anilist(
1323 | qdb, page, auth: bool = False, user: int = None, cid: int = None
1324 | ):
1325 | vars_ = {"search": ANIME_DB[qdb], "page": page}
1326 | result = await return_json_senpai(PAGE_QUERY, vars_, auth=auth, user=user)
1327 |
1328 | if len(result['data']['Page']['media'])==0:
1329 | return [f"No results Found"]
1330 |
1331 | data = result["data"]["Page"]["media"][0]
1332 | # Data of all fields in returned json
1333 | # pylint: disable=possibly-unused-variable
1334 | idm = data.get("id")
1335 | bot = BOT_NAME.replace("@", "")
1336 | idmal = data.get("idMal")
1337 | romaji = data["title"]["romaji"]
1338 | english = data["title"]["english"]
1339 | native = data["title"]["native"]
1340 | formats = data.get("format")
1341 | status = data.get("status")
1342 | episodes = data.get("episodes")
1343 | duration = data.get("duration")
1344 | country = data.get("countryOfOrigin")
1345 | c_flag = cflag(country)
1346 | source = data.get("source")
1347 | prqlsql = data.get("relations").get("edges")
1348 | adult = data.get("isAdult")
1349 | trailer_link = "N/A"
1350 | isfav = data.get("isFavourite")
1351 | gnrs = ", ".join(data['genres'])
1352 | gnrs_ = ""
1353 | bl, cs = await uidata(cid)
1354 | text = await get_ui_text(cs)
1355 | psrc, ptype = text[0], text[1]
1356 | if len(gnrs)!=0:
1357 | gnrs_ = f"\n{bl}**{text[7]}:** `{gnrs}`"
1358 | fav = ", in Favourites" if isfav is True else ""
1359 | score = data['averageScore']
1360 | avscd = (
1361 | f"\n{bl}**{text[2]}:** `{score}%` 🌟" if score is not None else ""
1362 | )
1363 | tags = []
1364 | for i in data['tags']:
1365 | tags.append(i["name"])
1366 | tags_ = (
1367 | f"\n{bl}**{text[8]}:** `{', '.join(tags[:5])}`" if tags != [] else ""
1368 | )
1369 | in_ls = False
1370 | in_ls_id = ""
1371 | user_data = ""
1372 | if auth is True:
1373 | in_list = data.get("mediaListEntry")
1374 | if in_list is not None:
1375 | in_ls = True
1376 | in_ls_id = in_list['id']
1377 | in_ls_stts = in_list['status']
1378 | in_ls_score = (
1379 | f" and scored {in_list['score']}" if in_list['score']!=0
1380 | else ""
1381 | )
1382 | user_data = (
1383 | f"\n{bl}**{text[4]}:** `{in_ls_stts}{fav}{in_ls_score}`"
1384 | )
1385 | if data["title"]["english"] is not None:
1386 | name = f"[{c_flag}]**{english}** (`{native}`)"
1387 | else:
1388 | name = f"[{c_flag}]**{romaji}** (`{native}`)"
1389 | prql, sql = "", ""
1390 | for i in prqlsql:
1391 | if i["relationType"] == "PREQUEL" and i["node"]["type"]=="ANIME":
1392 | pname = (
1393 | i["node"]["title"]["english"]
1394 | if i["node"]["title"]["english"] is not None
1395 | else i["node"]["title"]["romaji"]
1396 | )
1397 | prql += f"**{text[10]}:** `{pname}`\n"
1398 | break
1399 | for i in prqlsql:
1400 | if i["relationType"] == "SEQUEL" and i["node"]["type"]=="ANIME":
1401 | sname = (
1402 | i["node"]["title"]["english"]
1403 | if i["node"]["title"]["english"] is not None
1404 | else i["node"]["title"]["romaji"]
1405 | )
1406 | sql += f"**{text[9]}:** `{sname}`\n"
1407 | break
1408 | additional = f"{prql}{sql}"
1409 | additional.replace("-", "")
1410 | dura = (
1411 | f"\n{bl}**{text[3]}:** `{duration} min/ep`"
1412 | if duration is not None
1413 | else ""
1414 | )
1415 | air_on = None
1416 | if data["nextAiringEpisode"]:
1417 | nextAir = data["nextAiringEpisode"]["timeUntilAiring"]
1418 | air_on = make_it_rw(nextAir*1000)
1419 | eps = data["nextAiringEpisode"]["episode"]
1420 | th = pos_no(str(eps))
1421 | air_on += f" | {eps}{th} eps"
1422 | if air_on is None:
1423 | eps_ = f"` | `{episodes} eps" if episodes is not None else ""
1424 | status_air = f"{bl}**{text[6]}:** `{status}{eps_}`"
1425 | else:
1426 | status_air = (
1427 | f"{bl}**{text[6]}:** `{status}`\n{bl}**{text[11]}:** `{air_on}`"
1428 | )
1429 | if data["trailer"] and data["trailer"]["site"] == "youtube":
1430 | trailer_link = (
1431 | f"Trailer"
1432 | )
1433 | url = data.get("siteUrl")
1434 | title_img = f"https://img.anili.st/media/{idm}"
1435 | surl = f"https://t.me/{bot}/?start=des_ANI_{idm}_desc"
1436 | hasNextPage = result["data"]["Page"]["pageInfo"]["hasNextPage"]
1437 | try:
1438 | finals_ = ANIME_TEMPLATE.format(**locals())
1439 | except KeyError as kys:
1440 | return [f"{kys}"]
1441 | return title_img, [
1442 | finals_, hasNextPage
1443 | ], [
1444 | idm, in_ls, in_ls_id, isfav, str(adult)
1445 | ]
1446 |
1447 |
1448 | async def get_character(query, page, auth: bool = False, user: int = None):
1449 | var = {"search": CHAR_DB[query], "page": int(page)}
1450 | result = await return_json_senpai(
1451 | CHARACTER_QUERY, var, auth=auth, user=user
1452 | )
1453 | if len(result['data']['Page']['characters'])==0:
1454 | return [f"No results Found"]
1455 | data = result["data"]["Page"]["characters"][0]
1456 | # Character Data
1457 | id_ = data["id"]
1458 | name = data["name"]["full"]
1459 | native = data["name"]["native"]
1460 | img = data["image"]["large"]
1461 | site_url = data["siteUrl"]
1462 | isfav = data.get("isFavourite")
1463 | va = []
1464 | for i in data['media']['edges']:
1465 | for ii in i['voiceActors']:
1466 | if f"[{ii['name']['full']}]({ii['siteUrl']})" not in va:
1467 | va.append(f"[{ii['name']['full']}]({ii['siteUrl']})")
1468 | lva = None
1469 | if len(va)>1:
1470 | lva = va.pop()
1471 | sva = (
1472 | f"\n**Voice Actors:** {', '.join(va)}"
1473 | +f"{' and '+lva if lva is not None else ''}\n" if va!= []
1474 | else ""
1475 | )
1476 | cap_text = f"""
1477 | __{native}__
1478 | (`{name}`)
1479 | **ID:** {id_}
1480 | {sva}
1481 | Visit Website"""
1482 | hasNextPage = result["data"]["Page"]["pageInfo"]["hasNextPage"]
1483 | return img, [cap_text, hasNextPage], [id_, isfav]
1484 |
1485 |
1486 | async def browse_(qry: str):
1487 | s, y = season_()
1488 | sort = "POPULARITY_DESC"
1489 | if qry == 'upcoming':
1490 | s, y = season_(True)
1491 | if qry == 'trending':
1492 | sort = "TRENDING_DESC"
1493 | vars_ = {"s": s, "y": y, "sort": sort}
1494 | result = await return_json_senpai(BROWSE_QUERY, vars_)
1495 | data = result["data"]["Page"]["media"]
1496 | ls = []
1497 | for i in data:
1498 | if i['format'] in ['TV', 'MOVIE', 'ONA']:
1499 | ls.append('• `' + i['title']['romaji'] + '`')
1500 | out = f'{qry.capitalize()} animes in {s} {y}:\n\n'
1501 | return out + "\n".join(ls[:20])
1502 |
1503 |
1504 | async def get_manga(
1505 | qdb, page, auth: bool = False, user: int = None, cid: int = None
1506 | ):
1507 | vars_ = {"search": MANGA_DB[qdb], "asHtml": True, "page": page}
1508 | result = await return_json_senpai(
1509 | MANGA_QUERY, vars_, auth=auth, user=user
1510 | )
1511 | if len(result['data']['Page']['media'])==0:
1512 | return [f"No results Found"]
1513 | data = result["data"]["Page"]["media"][0]
1514 |
1515 | # Data of all fields in returned json
1516 | # pylint: disable=possibly-unused-variable
1517 | idm = data.get("id")
1518 | romaji = data["title"]["romaji"]
1519 | english = data["title"]["english"]
1520 | native = data["title"]["native"]
1521 | status = data.get("status")
1522 | synopsis = data.get("description")
1523 | description = synopsis[:500]
1524 | description_s = ""
1525 | if len(synopsis) > 500:
1526 | description += f"..."
1527 | description_s = (
1528 | f"[Click for more info](https://t.me/{BOT_NAME.replace('@', '')}"
1529 | +f"/?start=des_ANI_{idm}_desc)"
1530 | )
1531 | volumes = data.get("volumes")
1532 | chapters = data.get("chapters")
1533 | score = data.get("averageScore")
1534 | url = data.get("siteUrl")
1535 | format_ = data.get("format")
1536 | country = data.get("countryOfOrigin")
1537 | source = data.get("source")
1538 | c_flag = cflag(country)
1539 | isfav = data.get("isFavourite")
1540 | adult = data.get("isAdult")
1541 | fav = ", in Favourites" if isfav is True else ""
1542 | in_ls = False
1543 | in_ls_id = ""
1544 | bl, cs = await uidata(cid)
1545 | text = await get_ui_text(cs)
1546 | user_data = ""
1547 | if auth is True:
1548 | in_list = data.get("mediaListEntry")
1549 | if in_list is not None:
1550 | in_ls = True
1551 | in_ls_id = in_list['id']
1552 | in_ls_stts = in_list['status']
1553 | in_ls_score = (
1554 | f" and scored {in_list['score']}" if in_list['score']!=0
1555 | else ""
1556 | )
1557 | user_data = (
1558 | f"{bl}**{text[4]}:** `{in_ls_stts}{fav}{in_ls_score}`\n"
1559 | )
1560 | name = f"""[{c_flag}]**{romaji}**
1561 | __{english}__
1562 | {native}"""
1563 | if english is None:
1564 | name = f"""[{c_flag}]**{romaji}**
1565 | {native}"""
1566 | finals_ = f"{name}\n\n"
1567 | finals_ += f"{bl}**ID:** `{idm}`\n"
1568 | finals_ += f"{bl}**{text[6]}:** `{status}`\n"
1569 | finals_ += f"{bl}**{text[13]}:** `{volumes}`\n"
1570 | finals_ += f"{bl}**{text[14]}:** `{chapters}`\n"
1571 | finals_ += f"{bl}**{text[2]}:** `{score}`\n"
1572 | finals_ += f"{bl}**{text[1]}:** `{format_}`\n"
1573 | finals_ += f"{bl}**{text[0]}:** `{source}`\n"
1574 | finals_ += user_data
1575 | if os.environ.get("PREFERRED_LANGUAGE"):
1576 | description = tr.translate(
1577 | description, lang_tgt=os.environ.get("PREFERRED_LANGUAGE")
1578 | )
1579 | findesc = '' if description == '' else f'`{description}`'
1580 | finals_ += f"\n**{text[12]}**: {findesc}\n\n{description_s}"
1581 | pic = f"https://img.anili.st/media/{idm}"
1582 | return pic, [
1583 | finals_, result["data"]["Page"]["pageInfo"]["hasNextPage"], url
1584 | ], [
1585 | idm, in_ls, in_ls_id, isfav, str(adult)
1586 | ]
1587 |
1588 |
1589 | async def get_airing(qry, ind: int, auth: bool = False, user: int = None):
1590 | vars_ = {"search": AIRING_DB[qry], "page": int(ind)}
1591 | result = await return_json_senpai(AIR_QUERY, vars_, auth=auth, user=user)
1592 | error = result.get("errors")
1593 | if error:
1594 | error_sts = error[0].get("message")
1595 | return [f"{error_sts}"]
1596 | try:
1597 | data = result["data"]["Page"]["media"][0]
1598 | except IndexError:
1599 | return ["No results Found"]
1600 | # Airing Details
1601 | mid = data.get("id")
1602 | romaji = data["title"]["romaji"]
1603 | english = data["title"]["english"]
1604 | status = data.get("status")
1605 | country = data.get("countryOfOrigin")
1606 | c_flag = cflag(country)
1607 | coverImg = f"https://img.anili.st/media/{mid}"
1608 | isfav = data.get("isFavourite")
1609 | adult = data.get("isAdult")
1610 | in_ls = False
1611 | in_ls_id = ""
1612 | user_data = ""
1613 | if auth is True:
1614 | in_list = data.get("mediaListEntry")
1615 | if in_list is not None:
1616 | in_ls = True
1617 | in_ls_id = in_list['id']
1618 | in_ls_stts = in_list['status']
1619 | user_data = f"**USER DATA:** `{in_ls_stts}`\n"
1620 | air_on = None
1621 | if data["nextAiringEpisode"]:
1622 | nextAir = data["nextAiringEpisode"]["timeUntilAiring"]
1623 | episode = data["nextAiringEpisode"]["episode"]
1624 | th = pos_no(episode)
1625 | air_on = make_it_rw(nextAir*1000)
1626 | title_ = english or romaji
1627 | out = f"[{c_flag}] **{title_}**"
1628 | out += f"\n\n**ID:** `{mid}`"
1629 | out += f"\n**Status:** `{status}`\n"
1630 | out += user_data
1631 | if air_on:
1632 | out += f"Airing Episode `{episode}{th}` in `{air_on}`"
1633 | site = data["siteUrl"]
1634 | return [
1635 | coverImg, out
1636 | ], [
1637 | site, result["data"]["Page"]["pageInfo"]["hasNextPage"]
1638 | ], [
1639 | mid, in_ls, in_ls_id, isfav, str(adult)
1640 | ]
1641 |
1642 |
1643 | async def toggle_favourites(id_: int, media: str, user: int):
1644 | vars_ = {"id": int(id_)}
1645 | query = (
1646 | ANIME_MUTATION if media=="ANIME" or media=="AIRING"
1647 | else CHAR_MUTATION if media=="CHARACTER"
1648 | else MANGA_MUTATION if media=="MANGA"
1649 | else STUDIO_MUTATION
1650 | )
1651 | k = await return_json_senpai(
1652 | query=query, vars_=vars_, auth=True, user=int(user)
1653 | )
1654 | try:
1655 | kek = k['data']['ToggleFavourite']
1656 | return "ok"
1657 | except KeyError:
1658 | return "failed"
1659 |
1660 |
1661 | async def get_user(vars_, req, user, display_user = None):
1662 | query = USER_QRY if "user" in req else VIEWER_QRY
1663 | k = await return_json_senpai(
1664 | query=query,
1665 | vars_=vars_,
1666 | auth=False if "user" in req else True,
1667 | user=int(user)
1668 | )
1669 | error = k.get("errors")
1670 | if error:
1671 | error_sts = error[0].get("message")
1672 | return [f"{error_sts}"]
1673 |
1674 | data = k['data']['User' if "user" in req else 'Viewer']
1675 | anime = data['statistics']['anime']
1676 | manga = data['statistics']['manga']
1677 | stats = f"""
1678 | **Anime Stats**:
1679 |
1680 | Total Anime Watched: `{anime['count']}`
1681 | Total Episode Watched: `{anime['episodesWatched']}`
1682 | Total Time Spent: `{anime['minutesWatched']}`
1683 | Average Score: `{anime['meanScore']}`
1684 |
1685 | **Manga Stats**:
1686 |
1687 | Total Manga Read: `{manga['count']}`
1688 | Total Chapters Read: `{manga['chaptersRead']}`
1689 | Total Volumes Read: `{manga['volumesRead']}`
1690 | Average Score: `{manga['meanScore']}`
1691 | """
1692 | btn = []
1693 | if not "user" in req:
1694 | btn.append([
1695 | InlineKeyboardButton(
1696 | "Favourites",
1697 | callback_data=f"myfavs_{data['id']}_yes_{display_user}"
1698 | ),
1699 | InlineKeyboardButton(
1700 | "Activity",
1701 | callback_data=f"myacc_{data['id']}_{display_user}"
1702 | )
1703 | ])
1704 | btn.append([
1705 | InlineKeyboardButton(
1706 | "Profile", url=str(data['siteUrl'])
1707 | )
1708 | ])
1709 | return [
1710 | f'https://img.anili.st/user/{data["id"]}?a={time.time()}',
1711 | stats,
1712 | InlineKeyboardMarkup(btn)
1713 | ]
1714 |
1715 |
1716 | async def update_anilist(id_, req, user, eid: int = None, status: str = None):
1717 | vars_ = {"id": int(id_), "status": status}
1718 | if req=="lsus":
1719 | vars_ = {"id": int(eid), "status": status}
1720 | if req=="dlt":
1721 | vars_ = {"id": int(eid)}
1722 | k = await return_json_senpai(
1723 | query=(
1724 | ANILIST_MUTATION if req=="lsas"
1725 | else ANILIST_MUTATION_UP if req=="lsus"
1726 | else ANILIST_MUTATION_DEL
1727 | ),
1728 | vars_=vars_,
1729 | auth=True,
1730 | user=int(user)
1731 | )
1732 | try:
1733 | (
1734 | k['data']['SaveMediaListEntry'] if req=="lsas"
1735 | else k['data']['UpdateMediaListEntries'] if req=="lsus"
1736 | else k["data"]['DeleteMediaListEntry']
1737 | )
1738 | return "ok"
1739 | except KeyError:
1740 | return "failed"
1741 |
1742 |
1743 | async def check_if_adult(id_):
1744 | vars_ = {"id": int(id_)}
1745 | k = await return_json_senpai(query=ISADULT, vars_=vars_, auth=False)
1746 | if str(k['data']['Media']['isAdult'])=="True":
1747 | return "True"
1748 | else:
1749 | return "False"
1750 |
1751 | #### END ####
1752 |
1753 | #### Jikanpy part ####
1754 |
1755 | async def get_scheduled(x: int = 9):
1756 | base_url = "https://api.jikan.moe/v4/schedules/"
1757 | day = str(day_(x if x!=9 else datetime.now().weekday())).lower()
1758 | out = f"Scheduled animes for {day.capitalize()}\n\n"
1759 | data = requests.get(base_url+day).json()
1760 | sched_ls = data["data"]
1761 | for i in sched_ls:
1762 | try:
1763 | title = i['titles'][0]['title']
1764 | except IndexError:
1765 | title = i['title']
1766 | out += f"• `{title}`\n"
1767 | return out, x if x!=9 else datetime.now().weekday()
1768 |
1769 | #### END ####
1770 |
1771 | #### chiaki part ####
1772 |
1773 | def get_wols(x: str):
1774 | data = requests.get(
1775 | f"https://chiaki.vercel.app/search2?query={x}"
1776 | ).json()
1777 | ls = []
1778 | for i in data:
1779 | sls = [data[i], i]
1780 | ls.append(sls)
1781 | return ls
1782 |
1783 |
1784 | def get_wo(x: int, page: int):
1785 | data = requests.get(
1786 | f"https://chiaki.vercel.app/get2?group_id={x}"
1787 | ).json()
1788 | msg = "Watch order for the given query is:\n\n"
1789 | out = []
1790 | for i in data:
1791 | out.append(f"{i['index']}. `{i['name']}`\n")
1792 | total = len(out)
1793 | for _ in range(50*page):
1794 | out.pop(0)
1795 | out_ = "".join(out[:50])
1796 | return msg+out_, total
1797 |
1798 | #### END ####
1799 |
1800 | ##### Anime Fillers Part #####
1801 |
1802 | def search_filler(query):
1803 | html = requests.get("https://www.animefillerlist.com/shows").text
1804 | soup = BeautifulSoup(html, "html.parser")
1805 | div = soup.findAll("div", attrs={"class": "Group"})
1806 | index = {}
1807 | for i in div:
1808 | li = i.findAll("li")
1809 | for jk in li:
1810 | yum = jk.a["href"].split("/")[-1]
1811 | cum = jk.text
1812 | index[cum] = yum
1813 | ret = {}
1814 | keys = list(index.keys())
1815 | for i in range(len(keys)):
1816 | if query.lower() in keys[i].lower():
1817 | ret[keys[i]] = index[keys[i]]
1818 | return ret
1819 |
1820 |
1821 | def parse_filler(filler_id):
1822 | url = "https://www.animefillerlist.com/shows/" + filler_id
1823 | html = requests.get(url).text
1824 | soup = BeautifulSoup(html, "html.parser")
1825 | div = soup.find("div", attrs={"id": "Condensed"})
1826 | all_ep = div.find_all("span", attrs={"class": "Episodes"})
1827 | if len(all_ep) == 1:
1828 | ttl_ep = all_ep[0].findAll("a")
1829 | total_ep = []
1830 | mix_ep = None
1831 | filler_ep = None
1832 | ac_ep = None
1833 | for tol in ttl_ep:
1834 | total_ep.append(tol.text)
1835 | dict_ = {
1836 | "filler_id": filler_id,
1837 | "total_ep": ", ".join(total_ep),
1838 | "mixed_ep": mix_ep,
1839 | "filler_ep": filler_ep,
1840 | "ac_ep": ac_ep
1841 | }
1842 | return dict_
1843 | if len(all_ep) == 2:
1844 | ttl_ep = all_ep[0].findAll("a")
1845 | fl_ep = all_ep[1].findAll("a")
1846 | total_ep = []
1847 | mix_ep = None
1848 | ac_ep = None
1849 | filler_ep = []
1850 | for tol in ttl_ep:
1851 | total_ep.append(tol.text)
1852 | for fol in fl_ep:
1853 | filler_ep.append(fol.text)
1854 | dict_ = {
1855 | "filler_id": filler_id,
1856 | "total_ep": ", ".join(total_ep),
1857 | "mixed_ep": mix_ep,
1858 | "filler_ep": ", ".join(filler_ep),
1859 | "ac_ep": ac_ep
1860 | }
1861 | return dict_
1862 | if len(all_ep) == 3:
1863 | ttl_ep = all_ep[0].findAll("a")
1864 | mxl_ep = all_ep[1].findAll("a")
1865 | fl_ep = all_ep[2].findAll("a")
1866 | total_ep = []
1867 | mix_ep = []
1868 | filler_ep = []
1869 | ac_ep = None
1870 | for tol in ttl_ep:
1871 | total_ep.append(tol.text)
1872 | for fol in fl_ep:
1873 | filler_ep.append(fol.text)
1874 | for mol in mxl_ep:
1875 | mix_ep.append(mol.text)
1876 | dict_ = {
1877 | "filler_id": filler_id,
1878 | "total_ep": ", ".join(total_ep),
1879 | "mixed_ep": ", ".join(mix_ep),
1880 | "filler_ep": ", ".join(filler_ep),
1881 | "ac_ep": ac_ep
1882 | }
1883 | return dict_
1884 | if len(all_ep) == 4:
1885 | ttl_ep = all_ep[0].findAll("a")
1886 | mxl_ep = all_ep[1].findAll("a")
1887 | fl_ep = all_ep[2].findAll("a")
1888 | al_ep = all_ep[3].findAll("a")
1889 | total_ep = []
1890 | mix_ep = []
1891 | filler_ep = []
1892 | ac_ep = []
1893 | for tol in ttl_ep:
1894 | total_ep.append(tol.text)
1895 | for fol in fl_ep:
1896 | filler_ep.append(fol.text)
1897 | for mol in mxl_ep:
1898 | mix_ep.append(mol.text)
1899 | for aol in al_ep:
1900 | ac_ep.append(aol.text)
1901 | dict_ = {
1902 | "filler_id": filler_id,
1903 | "total_ep": ", ".join(total_ep),
1904 | "mixed_ep": ", ".join(mix_ep),
1905 | "filler_ep": ", ".join(filler_ep),
1906 | "ac_ep": ", ".join(ac_ep),
1907 | }
1908 | return dict_
1909 |
1910 |
1911 | ##### END #####
--------------------------------------------------------------------------------