├── readme.txt ├── animdl ├── core │ ├── cli │ │ ├── __init__.py │ │ ├── commands │ │ │ ├── __init__.py │ │ │ ├── grab.py │ │ │ ├── search.py │ │ │ ├── schedule.py │ │ │ ├── update.py │ │ │ └── download.py │ │ ├── exit_codes.py │ │ ├── helpers │ │ │ ├── constants.py │ │ │ ├── players │ │ │ │ ├── __init__.py │ │ │ │ ├── ffplay.py │ │ │ │ ├── android.py │ │ │ │ ├── vlc.py │ │ │ │ ├── base_player.py │ │ │ │ └── mpv.py │ │ │ ├── special.py │ │ │ ├── fuzzysearch.py │ │ │ ├── processors.py │ │ │ ├── player.py │ │ │ ├── aniskip.py │ │ │ ├── stream_handlers.py │ │ │ ├── banner.py │ │ │ ├── logger.py │ │ │ ├── rpc.py │ │ │ ├── decorators.py │ │ │ ├── __init__.py │ │ │ └── prompts.py │ │ └── http_client.py │ ├── codebase │ │ ├── __init__.py │ │ ├── extractors │ │ │ ├── mycloud │ │ │ │ └── __init__.py │ │ │ ├── videobin │ │ │ │ └── __init__.py │ │ │ ├── __init__.py │ │ │ ├── streamtape │ │ │ │ └── __init__.py │ │ │ ├── vidstream │ │ │ │ └── __init__.py │ │ │ ├── doodstream │ │ │ │ └── __init__.py │ │ │ ├── streamlare │ │ │ │ └── __init__.py │ │ │ ├── okru │ │ │ │ └── __init__.py │ │ │ ├── rapidvideo │ │ │ │ ├── utils.py │ │ │ │ ├── __init__.py │ │ │ │ └── polling.py │ │ │ ├── streamsb │ │ │ │ └── __init__.py │ │ │ ├── mp4upload │ │ │ │ └── __init__.py │ │ │ ├── dailymotion │ │ │ │ └── __init__.py │ │ │ └── gogoplay │ │ │ │ └── __init__.py │ │ ├── downloader │ │ │ ├── __init__.py │ │ │ ├── idmanlib.py │ │ │ └── hls.py │ │ ├── providers │ │ │ ├── animixplay │ │ │ │ ├── __init__.py │ │ │ │ ├── hardstream.py │ │ │ │ └── stream_url.py │ │ │ ├── __init__.py │ │ │ ├── animepahe │ │ │ │ ├── inner │ │ │ │ │ ├── archive.py │ │ │ │ │ └── __init__.py │ │ │ │ └── __init__.py │ │ │ ├── twistmoe │ │ │ │ ├── __init__.py │ │ │ │ └── stream_url.py │ │ │ ├── hahomoe │ │ │ │ └── __init__.py │ │ │ ├── nineanime │ │ │ │ ├── decipher.py │ │ │ │ └── __init__.py │ │ │ ├── animeout │ │ │ │ └── __init__.py │ │ │ ├── kawaiifu │ │ │ │ └── __init__.py │ │ │ ├── animtime │ │ │ │ └── __init__.py │ │ │ ├── yugen │ │ │ │ └── __init__.py │ │ │ ├── hentaistream │ │ │ │ └── __init__.py │ │ │ ├── gogoanime │ │ │ │ └── __init__.py │ │ │ ├── animekaizoku │ │ │ │ └── __init__.py │ │ │ ├── marinmoe │ │ │ │ └── __init__.py │ │ │ ├── animeonsen │ │ │ │ └── __init__.py │ │ │ ├── zoro │ │ │ │ └── __init__.py │ │ │ ├── animexin │ │ │ │ └── __init__.py │ │ │ ├── crunchyroll │ │ │ │ └── __init__.py │ │ │ ├── kamyroll │ │ │ │ ├── __init__.py │ │ │ │ └── api.py │ │ │ └── allanime │ │ │ │ ├── gql_api.py │ │ │ │ └── __init__.py │ │ └── helpers │ │ │ ├── __init__.py │ │ │ ├── superscrapers.py │ │ │ └── uwu.py │ ├── __init__.py │ ├── __version__.py │ ├── package_resolver.py │ └── config │ │ └── __init__.py ├── __init__.py ├── utils │ ├── __init__.py │ ├── optopt.py │ ├── serverfiles.py │ ├── searching.py │ ├── powertools.py │ ├── http_client.py │ └── media_downloader.py └── __main__.py ├── animdl_demo.gif ├── .github ├── workflows │ ├── pypi.yml │ └── codeql-analysis.yml ├── ISSUE_TEMPLATE │ ├── inquiry.md │ ├── bug_report.md │ └── feature_request.md ├── dependabot.yml └── FUNDING.yml ├── pyproject.toml ├── disclaimer.md └── .gitignore /readme.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /animdl/core/cli/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /animdl/core/cli/commands/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /animdl/__init__.py: -------------------------------------------------------------------------------- 1 | from . import utils 2 | -------------------------------------------------------------------------------- /animdl/core/codebase/__init__.py: -------------------------------------------------------------------------------- 1 | from .downloader import * 2 | -------------------------------------------------------------------------------- /animdl/core/codebase/extractors/mycloud/__init__.py: -------------------------------------------------------------------------------- 1 | from ..vidstream import extract 2 | -------------------------------------------------------------------------------- /animdl_demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/justfoolingaround/animdl/HEAD/animdl_demo.gif -------------------------------------------------------------------------------- /animdl/core/codebase/downloader/__init__.py: -------------------------------------------------------------------------------- 1 | from .handle import handle_download, sanitize_filename 2 | -------------------------------------------------------------------------------- /animdl/core/__init__.py: -------------------------------------------------------------------------------- 1 | from .package_resolver import resolve 2 | 3 | resolve((("lxml", "~=4.9.1"),)) 4 | -------------------------------------------------------------------------------- /animdl/core/cli/exit_codes.py: -------------------------------------------------------------------------------- 1 | """ 2 | Exit code specifications. 3 | """ 4 | 5 | NO_CONTENT_FOUND = 2 6 | STREAMER_CONFIGURATION_REQUIRED = 3 7 | 8 | INTERNET_ISSUE = 4 9 | -------------------------------------------------------------------------------- /animdl/core/cli/helpers/constants.py: -------------------------------------------------------------------------------- 1 | LABELS = { 2 | "storage.googleapis.com": "Google API Storage", 3 | "v.vrv.co": "VRV", 4 | "pl.crunchyroll.com": "Crunchyroll", 5 | } 6 | 7 | SOURCE_REPOSITORY = "justfoolingaround", "animdl" 8 | MODULE_NAME = "animdl" 9 | VERSION_FILE_PATH = "master", "pyproject.toml" 10 | -------------------------------------------------------------------------------- /animdl/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utilities for animdl 3 | 4 | Scraping is often quite messy and might require 5 | a lot of complicated algorithms. Keeping these 6 | complicated algorithms scattered across the codebase 7 | is not good for maintainability. 8 | """ 9 | 10 | from . import http_caching, http_client, optopt, powertools, searching 11 | -------------------------------------------------------------------------------- /animdl/core/cli/helpers/players/__init__.py: -------------------------------------------------------------------------------- 1 | from .android import AndroidIntentVIEW 2 | from .ffplay import FFPlay 3 | from .mpv import CelluloidPlayer, IINAPlayer, MPVDefaultPlayer 4 | from .vlc import VLCPlayer 5 | 6 | player_mapping = { 7 | "mpv": MPVDefaultPlayer, 8 | "iina": IINAPlayer, 9 | "vlc": VLCPlayer, 10 | "celluloid": CelluloidPlayer, 11 | "ffplay": FFPlay, 12 | "android": AndroidIntentVIEW, 13 | } 14 | -------------------------------------------------------------------------------- /animdl/core/__version__.py: -------------------------------------------------------------------------------- 1 | import importlib.metadata 2 | 3 | try: 4 | __core__ = importlib.metadata.version("animdl") 5 | except importlib.metadata.PackageNotFoundError: 6 | import pathlib 7 | 8 | from animdl.utils.optopt import regexlib 9 | 10 | __core__ = regexlib.search( 11 | r'name = "animdl"\nversion = "(.+?)"', 12 | (pathlib.Path(__file__).parent.parent / "pyproject.toml").read_text(), 13 | ).group(1) 14 | -------------------------------------------------------------------------------- /animdl/core/codebase/providers/animixplay/__init__.py: -------------------------------------------------------------------------------- 1 | import regex 2 | 3 | from ....config import ANIMIXPLAY 4 | from ...helpers import construct_site_based_regex 5 | from .stream_url import fetcher 6 | 7 | REGEX = construct_site_based_regex(ANIMIXPLAY, extra_regex=r"/v\d+/([^?&/]+)") 8 | 9 | TITLES_REGEX = regex.compile(r'(.+?)') 10 | 11 | 12 | def metadata_fetcher(session, url, match): 13 | return {"titles": TITLES_REGEX.findall(session.get(url).text)} 14 | -------------------------------------------------------------------------------- /animdl/core/codebase/extractors/videobin/__init__.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import regex 4 | 5 | SOURCES_REGEX = regex.compile(r"sources: (\[.+?\])") 6 | 7 | 8 | def extract(session, url, **opts): 9 | response = session.get(url) 10 | if response.status_code >= 400: 11 | return [] 12 | 13 | sources = SOURCES_REGEX.search(response.text) 14 | 15 | if not sources: 16 | return [] 17 | 18 | return [{"stream_url": stream} for stream in json.loads(sources.group(1))] 19 | -------------------------------------------------------------------------------- /animdl/utils/optopt.py: -------------------------------------------------------------------------------- 1 | """ 2 | animdl: Optional optimisations for the project. 3 | """ 4 | 5 | try: 6 | import regex as regexlib 7 | except ImportError: 8 | import re as regexlib 9 | 10 | try: 11 | import orjson as jsonlib 12 | 13 | dumps_function = jsonlib.dumps 14 | 15 | def patched_dumps(*args, **kwargs): 16 | return dumps_function(*args, **kwargs).decode("utf-8") 17 | 18 | jsonlib.dumps = patched_dumps 19 | 20 | except ImportError: 21 | import json as jsonlib 22 | -------------------------------------------------------------------------------- /animdl/core/codebase/extractors/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import pathlib 3 | 4 | EXEMPT = ["__init__.py", "__pycache__"] 5 | 6 | if "__path__" in globals(): 7 | __this_path__ = pathlib.Path(__path__[0]) 8 | else: 9 | __this_path__ = pathlib.Path() 10 | 11 | 12 | def iter_extractors(*, exempt=EXEMPT): 13 | for path in __this_path__.glob("*/"): 14 | if path.name not in exempt: 15 | yield importlib.import_module( 16 | ".{.name}".format(path), package=__name__ 17 | ), path.name 18 | -------------------------------------------------------------------------------- /.github/workflows/pypi.yml: -------------------------------------------------------------------------------- 1 | name: Publish version changes to PyPI 2 | on: 3 | push: 4 | branches: 5 | - "master" 6 | paths: 7 | - "pyproject.toml" 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout 14 | uses: actions/checkout@v3 15 | with: 16 | fetch-depth: 0 17 | - run: python3 -m pip install --upgrade build && python3 -m build 18 | - name: Publish package 19 | uses: pypa/gh-action-pypi-publish@release/v1 20 | with: 21 | password: ${{ secrets.PYPI_API_TOKEN }} -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/inquiry.md: -------------------------------------------------------------------------------- 1 | *** 2 | 3 | name: Inquiry 4 | about: What does this do? How to do this? 5 | title: '' 6 | labels: 'inquiry: api, inquiry: cli, inquiry: code reason' 7 | assignees: '' 8 | 9 | *** 10 | 11 | From maintainers: All 3 labels have been handed to this issue, remove the inapplicable ones. Your title should answer at least 3 of 5W1H questions for fastest responses. 12 | 13 | ### The inquiry 14 | 15 | Be clear and elaborate, do back read. If you feel your English may not be the best for inquiring, talk in your own language given that it is supported by Google Translate. 16 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "pip" # See documentation for possible values 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "weekly" 12 | - package-ecosystem: "github-actions" 13 | directory: "/" 14 | schedule: 15 | interval: "weekly" 16 | -------------------------------------------------------------------------------- /animdl/core/cli/http_client.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import httpx 4 | 5 | from animdl import utils 6 | 7 | from .exit_codes import INTERNET_ISSUE 8 | 9 | http_logger = logging.getLogger("http-client") 10 | 11 | client = httpx.Client( 12 | headers={"user-agent": "animdl/1.0.0"}, 13 | timeout=30, 14 | follow_redirects=True, 15 | ) 16 | 17 | utils.http_client.integrate_ddg_bypassing( 18 | client, 19 | ".marin.moe", 20 | ) 21 | 22 | utils.http_client.setup_global_http_exception_hook( 23 | exit_code=INTERNET_ISSUE, 24 | http_error_baseclass=httpx.HTTPError, 25 | logger=http_logger, 26 | ) 27 | 28 | setattr(client, "cf_request", utils.http_client.cors_proxify) 29 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | *** 2 | 3 | name: Bug report 4 | about: Create a report to help us improve 5 | title: '' 6 | labels: 'bug: site error' 7 | assignees: '' 8 | 9 | *** 10 | 11 | ### Describe the bug 12 | 13 | A clear and concise description of what the bug is. This includes whether the bug wasn't there in previous versions (in which case you're expected to provide a version number.) 14 | 15 | ### Did the error occur in between an active task (`download` / `stream` / `grab`)? 16 | 17 | This indicates if the scraper or program is broken totally or the scraper or program may have a few issues on the inside. 18 | 19 | ### Screenshots \[Optional] 20 | 21 | If applicable, add screenshots to help explain your problem. 22 | 23 | ### Additional information 24 | 25 | * animdl version\* 26 | * The site and provider this issue is related to. 27 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | # github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] 4 | # patreon: # Replace with a single Patreon username 5 | # open_collective: # Replace with a single Open Collective username 6 | ko_fi: justfoolingaroundkr 7 | # tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | # community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | # liberapay: # Replace with a single Liberapay username 10 | # issuehunt: # Replace with a single IssueHunt username 11 | # otechie: # Replace with a single Otechie username 12 | # lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry 13 | # custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 14 | -------------------------------------------------------------------------------- /animdl/core/cli/helpers/players/ffplay.py: -------------------------------------------------------------------------------- 1 | from .base_player import BasePlayer 2 | 3 | 4 | class FFPlay(BasePlayer): 5 | 6 | opts_spec = { 7 | "headers": "-headers", 8 | "metadata": "-metadata", 9 | "title_field": "title", 10 | } 11 | 12 | def play(self, stream_url, headers=None, title=None, opts=None, **kwargs): 13 | args = (self.executable, *self.args, stream_url) 14 | 15 | if opts is not None: 16 | args += tuple(opts) 17 | 18 | if headers is not None: 19 | args += ( 20 | f"{self.opts_spec['headers']}={self.headers_joiner.join(f'{key}: {value}' for key, value in headers.items())}", 21 | ) 22 | 23 | if title is not None: 24 | args += (self.opts_spec["metadata"], f"{self.opts_spec['title']}={title}") 25 | 26 | self.spawn(args) 27 | -------------------------------------------------------------------------------- /animdl/core/cli/helpers/players/android.py: -------------------------------------------------------------------------------- 1 | from warnings import warn 2 | 3 | from .base_player import BasePlayer 4 | 5 | 6 | class AndroidIntentVIEW(BasePlayer): 7 | 8 | intent = "android.intent.action.VIEW" 9 | 10 | warnable_opts = {"headers", "subtitles"} 11 | 12 | def play(self, stream_url, **kwargs): 13 | 14 | args = ( 15 | "am", 16 | "start", 17 | "-a", 18 | self.intent, 19 | "-d", 20 | stream_url, 21 | *self.args, 22 | ) 23 | 24 | keys = set(kwargs.keys()) 25 | 26 | if self.warnable_opts & keys: 27 | warn( 28 | f"Android does not support {', '.join(map(repr, self.warnable_opts & keys))} options. " 29 | "These options may cause the stream to fail to load or one or more stream attributes to fail." 30 | ) 31 | 32 | self.spawn(args) 33 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | *** 2 | 3 | name: Feature request 4 | about: Suggest an idea for this project 5 | title: '' 6 | labels: 'request: feature, request: site' 7 | assignees: '' 8 | 9 | *** 10 | 11 | From maintainers: Both labels have been handed to this issue, remove the inapplicable one. Your title should answer at least 3 of 5W1H questions for fastest responses. 12 | 13 | ### Is your feature request related to a problem? Please describe. 14 | 15 | A clear and concise description of what the problem is. Ex. I'm always frustrated when \[...] 16 | 17 | ### Describe the solution you'd like 18 | 19 | A clear and concise description of what you want to happen. 20 | 21 | ### Describe alternatives you've considered 22 | 23 | A clear and concise description of any alternative solutions or features you've considered. 24 | 25 | ### Additional context 26 | 27 | Add any other context or screenshots about the feature request here. 28 | -------------------------------------------------------------------------------- /animdl/core/codebase/extractors/streamtape/__init__.py: -------------------------------------------------------------------------------- 1 | import regex 2 | import logging 3 | 4 | STREAMTAPE_REGEX = regex.compile(r"'robotlink'\)\.innerHTML = '(.+?)'\+ \('xcd(.+?)'\)") 5 | 6 | 7 | def extract(session, url, **opts): 8 | """ 9 | A safe extraction for Streamtape. 10 | """ 11 | logger = logging.getLogger("streamtape-extractor") 12 | streamtape_embed_page = session.get(url) 13 | regex_match = STREAMTAPE_REGEX.search(streamtape_embed_page.text) 14 | if not regex_match: 15 | logger.warning( 16 | "Could not find stream links. {}".format( 17 | "The file was deleted." 18 | if streamtape_embed_page.status_code == 404 19 | else "Failed to extract from: {}".format(url) 20 | ) 21 | ) 22 | return [] 23 | 24 | content_get_uri = "https:{}".format("".join(regex_match.groups())) 25 | 26 | streamtape_redirect = session.get(content_get_uri, follow_redirects=False) 27 | return [{"stream_url": streamtape_redirect.headers.get("location")}] 28 | -------------------------------------------------------------------------------- /animdl/core/codebase/extractors/vidstream/__init__.py: -------------------------------------------------------------------------------- 1 | import functools 2 | 3 | import regex 4 | 5 | from ...providers.nineanime.decipher import vrf_futoken 6 | 7 | EMBED_URL_REGEX = regex.compile(r"(.+?)/(?:e(?:mbed)?)/([a-zA-Z0-9]+)\?t=(.+?)(?:&|$)") 8 | 9 | 10 | @functools.lru_cache() 11 | def futoken_resolve(session, url): 12 | return session.get(url).text 13 | 14 | 15 | def extract(session, url, **opts): 16 | match = EMBED_URL_REGEX.search(url) 17 | host = match.group(1) 18 | 19 | futoken = futoken_resolve(session, host + "/futoken") 20 | data = vrf_futoken( 21 | session, 22 | futoken, 23 | "rawvizcloud" if host != "https://mcloud.to" else "rawmcloud", 24 | match.group(2), 25 | ) 26 | vidstream_info = session.get( 27 | data + f"?t={match.group(3)}", 28 | headers={"referer": url}, 29 | ) 30 | 31 | return [ 32 | {"stream_url": content.get("file", ""), "headers": {"referer": url}} 33 | for content in vidstream_info.json()["result"].get("sources", []) 34 | ] 35 | -------------------------------------------------------------------------------- /animdl/core/codebase/extractors/doodstream/__init__.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import regex 4 | 5 | DOODSTREAM = "https://dood.la/" 6 | 7 | PASS_MD5_RE = regex.compile(r"/(pass_md5/.+?)'") 8 | TOKEN_RE = regex.compile(r"\?token=([^&]+)") 9 | 10 | 11 | def extract(session, url): 12 | response = session.get(url) 13 | 14 | if not response.status_code < 400: 15 | return [] 16 | 17 | embed_page = session.get(url).text 18 | has_md5 = PASS_MD5_RE.search(embed_page) 19 | 20 | if not has_md5: 21 | return [] 22 | 23 | has_token = TOKEN_RE.search(embed_page) 24 | if not has_token: 25 | return [] 26 | 27 | return [ 28 | { 29 | "stream_url": "{}doodstream?token={}&expiry={}".format( 30 | session.get( 31 | DOODSTREAM + has_md5.group(1), headers={"referer": DOODSTREAM} 32 | ).text, 33 | has_token.group(1), 34 | int(time.time() * 1000), 35 | ), 36 | "headers": {"referer": DOODSTREAM}, 37 | } 38 | ] 39 | -------------------------------------------------------------------------------- /animdl/core/codebase/extractors/streamlare/__init__.py: -------------------------------------------------------------------------------- 1 | import lxml.html as htmlparser 2 | import regex 3 | 4 | STREAMLARE = "https://streamlare.com/" 5 | 6 | CONTENT_ID = regex.compile(r"/[ve]/([^?#&/]+)") 7 | 8 | 9 | def extract(session, url, **opts): 10 | 11 | csrf_token = ( 12 | htmlparser.fromstring(session.get(url).text) 13 | .cssselect("meta[name='csrf-token']")[0] 14 | .get("content") 15 | ) 16 | content_id = CONTENT_ID.search(url).group(1) 17 | 18 | def fast_yield(): 19 | for _, streams in ( 20 | session.post( 21 | STREAMLARE + "api/video/get", 22 | headers={ 23 | "x-requested-with": "XMLHttpRequest", 24 | "x-csrf-token": csrf_token, 25 | }, 26 | json={"id": content_id}, 27 | ) 28 | .json() 29 | .get("result") 30 | .items() 31 | ): 32 | yield {"stream_url": streams.get("src"), "headers": {"referer": STREAMLARE}} 33 | 34 | return list(fast_yield()) 35 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ "poetry-core>=1.0.0",] 3 | build-backend = "poetry.core.masonry.api" 4 | 5 | [tool.poetry] 6 | name = "animdl" 7 | version = "1.7.27" 8 | description = "A highly efficient, fast, powerful and light-weight anime downloader and streamer for your favorite anime." 9 | readme = "readme.txt" 10 | authors = [ "justfoolingaround ",] 11 | license = "GPL-3.0-only" 12 | 13 | [tool.poetry.dependencies] 14 | python = ">=3.7,<4.0" 15 | anchor-kr = "~=0.1.3" 16 | anitopy = "~=2.1.0" 17 | click = ">=8.0.4,<8.2.0" 18 | comtypes = "~=1.1.11" 19 | cssselect = ">=1.1,<1.3" 20 | httpx = "~=0.23.0" 21 | tqdm = ">=4.62.3,<4.66.0" 22 | pycryptodomex = "~=3.14.1" 23 | regex = "~=2022.10.31" 24 | yarl = "~=1.8.1" 25 | pyyaml = "~=6.0" 26 | packaging = ">=22,<24" 27 | pkginfo = "^1.9.2" 28 | rich = ">=13.3.1,<13.3.4" 29 | 30 | [tool.poetry.dev-dependencies] 31 | 32 | [tool.poetry.scripts] 33 | animdl = "animdl.__main__:__animdl_cli__" 34 | 35 | [tool.poetry.dependencies.lxml] 36 | version = "4.9.1" 37 | markers = "sys_platform != 'win32'" 38 | -------------------------------------------------------------------------------- /animdl/core/codebase/extractors/okru/__init__.py: -------------------------------------------------------------------------------- 1 | import json 2 | from html import unescape 3 | 4 | import lxml.html as htmlparser 5 | 6 | OKRU = "https://ok.ru/" 7 | 8 | QUALITY = {"mobile": 144, "lowest": 240, "low": 360, "sd": 480, "hd": 720} 9 | 10 | 11 | def extract(session, url, **opts): 12 | response = session.get(url) 13 | 14 | if response.status_code >= 400: 15 | return [] 16 | 17 | metadata = htmlparser.fromstring(response.text).cssselect( 18 | 'div[data-module="OKVideo"]' 19 | ) 20 | 21 | if not metadata: 22 | return [] 23 | 24 | data_opts = json.loads( 25 | json.loads(unescape(metadata[0].get("data-options"))) 26 | .get("flashvars", {}) 27 | .get("metadata") 28 | ) 29 | 30 | def fast_yield(): 31 | for videos in data_opts.get("videos", []): 32 | yield { 33 | "quality": QUALITY.get(videos.get("name"), 1080), 34 | "stream_url": videos.get("url"), 35 | "headers": {"referer": OKRU}, 36 | } 37 | 38 | return list(fast_yield()) + [ 39 | {"stream_url": data_opts.get("hlsManifestUrl"), "headers": {"referer": OKRU}} 40 | ] 41 | -------------------------------------------------------------------------------- /animdl/core/cli/helpers/special.py: -------------------------------------------------------------------------------- 1 | """ 2 | Special arguments parser 3 | """ 4 | 5 | import regex 6 | 7 | SPECIAL_PARSER = regex.compile(r"(?P\D+)(-?\d*)", regex.IGNORECASE) 8 | 9 | strings = {"last": ["l", "latest"], "last*": ["l*", "latest*", "latest-all"]} 10 | 11 | 12 | def get_qualified_name(special): 13 | for key, values in strings.items(): 14 | if special.lower() in (*values, key): 15 | return key 16 | 17 | 18 | def special_parser(streams, string): 19 | 20 | returnee = [] 21 | 22 | for match in SPECIAL_PARSER.finditer(string): 23 | special, index = match.group("special", 2) 24 | index = None if not index else int(index) 25 | 26 | special = get_qualified_name(special) 27 | 28 | if special is None: 29 | continue 30 | 31 | if special == "last": 32 | returnee.extend([streams[-(index or 1)]]) 33 | else: 34 | if special == "last*": 35 | returnee.extend(streams[-(index or 1) :]) 36 | returnee.extend(streams[: -(index or 1)]) 37 | """ 38 | More specials to be thought about. 39 | """ 40 | 41 | yield from returnee 42 | -------------------------------------------------------------------------------- /animdl/core/codebase/providers/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import pathlib 3 | 4 | from ..helpers import append_protocol 5 | 6 | EXEMPT = ["__init__.py", "__pycache__"] 7 | 8 | if "__path__" in globals(): 9 | __this_path__ = pathlib.Path(__path__[0]) 10 | else: 11 | __this_path__ = pathlib.Path() 12 | 13 | 14 | def iter_providers(*, exempt=EXEMPT): 15 | for path in __this_path__.glob("*/"): 16 | if path.name not in exempt: 17 | yield importlib.import_module( 18 | ".{.name}".format(path), package=__name__ 19 | ), path.name 20 | 21 | 22 | def get_provider(url, *, raise_on_failure=True): 23 | for provider_module, name in iter_providers(): 24 | match = provider_module.REGEX.match(url) 25 | if match is not None: 26 | return match, provider_module, name 27 | 28 | if raise_on_failure: 29 | raise Exception("Can't find a provider for the url {!r}.") 30 | 31 | return None, None, None 32 | 33 | 34 | def get_appropriate(session, url, check=lambda *args: True): 35 | regex_match, provider_module, _ = get_provider(append_protocol(url)) 36 | return provider_module.fetcher(session, url, check, match=regex_match) 37 | -------------------------------------------------------------------------------- /animdl/core/codebase/providers/animepahe/inner/archive.py: -------------------------------------------------------------------------------- 1 | from base64 import b64decode 2 | 3 | import regex 4 | 5 | YTSM = regex.compile(r"ysmm = '([^']+)") 6 | 7 | 8 | def decode_adfly(coded_key): 9 | r, j = "", "" 10 | for n, l in enumerate(coded_key): 11 | if n & 1: 12 | r += l 13 | else: 14 | j = l + j 15 | 16 | encoded_uri = list(r + j) 17 | numbers = ((i, n) for i, n in enumerate(encoded_uri) if str.isdigit(n)) 18 | for first, second in zip(numbers, numbers): 19 | xor = int(first[1]) ^ int(second[1]) 20 | if xor < 10: 21 | encoded_uri[first[0]] = str(xor) 22 | 23 | return b64decode(("".join(encoded_uri)).encode("utf-8"))[16:-16].decode( 24 | "utf-8", errors="ignore" 25 | ) 26 | 27 | 28 | def bypass_adfly(session, adfly_url): 29 | 30 | response_code = 302 31 | 32 | while response_code != 200: 33 | adfly_content = session.get( 34 | session.get(adfly_url, follow_redirects=False).headers.get("location"), 35 | follow_redirects=False, 36 | ) 37 | response_code = adfly_content.status_code 38 | return decode_adfly(YTSM.search(adfly_content.text).group(1)) 39 | -------------------------------------------------------------------------------- /animdl/utils/serverfiles.py: -------------------------------------------------------------------------------- 1 | import mimetypes 2 | from pathlib import Path 3 | from typing import TYPE_CHECKING 4 | 5 | from .optopt import regexlib 6 | 7 | if TYPE_CHECKING: 8 | from typing import Optional 9 | 10 | CONTENT_DISP_RE = regexlib.compile(r'filename=(?:"(.+?)"|([^;]+))') 11 | 12 | 13 | def guess_from_path(path: str) -> "Optional[str]": 14 | """ 15 | Attempts to guess the filename from the path. 16 | """ 17 | return Path(path).name 18 | 19 | 20 | def guess_from_content_disposition(content_disposition: str) -> "Optional[str]": 21 | """ 22 | Attempts to guess the filename from the content disposition header. 23 | """ 24 | match = CONTENT_DISP_RE.search(content_disposition) 25 | 26 | if match is None: 27 | return None 28 | 29 | return guess_from_path(match.group(1) or match.group(2)) 30 | 31 | 32 | def guess_from_content_type(filename, content_type): 33 | """ 34 | Attempts to guess the filename from the content type header. 35 | """ 36 | 37 | guessed_extension = mimetypes.guess_extension(content_type) 38 | 39 | if guessed_extension is None or guessed_extension == ".bin": 40 | return filename 41 | 42 | return (filename or "file") + guessed_extension 43 | -------------------------------------------------------------------------------- /animdl/core/codebase/extractors/rapidvideo/utils.py: -------------------------------------------------------------------------------- 1 | from base64 import b64decode 2 | from hashlib import md5 3 | 4 | from Cryptodome.Cipher import AES 5 | from Cryptodome.Util.Padding import unpad 6 | 7 | 8 | def pkcs5_bytes_to_key(salt: bytes, secret, *, output=48, hash_module=md5): 9 | key = hash_module(secret + salt).digest() 10 | current_key = key 11 | 12 | while len(current_key) < output: 13 | key = hash_module(key + secret + salt).digest() 14 | current_key += key 15 | 16 | return current_key[:output] 17 | 18 | 19 | def decipher_salted_aes(encoded_url: str, key_finders, *, aes_mode=AES.MODE_CBC): 20 | pwd_from_url = "" 21 | untouched_url = encoded_url 22 | 23 | for start, end in key_finders: 24 | pwd_from_url += untouched_url[start:end] 25 | encoded_url = encoded_url.replace(untouched_url[start:end], "") 26 | 27 | raw_value = b64decode(encoded_url.encode("utf-8")) 28 | 29 | assert raw_value.startswith(b"Salted__"), "Not a salt." 30 | 31 | key = pkcs5_bytes_to_key(raw_value[8:16], pwd_from_url.encode("utf-8")) 32 | return ( 33 | unpad(AES.new(key[:32], aes_mode, key[32:]).decrypt(raw_value[16:]), 16) 34 | .decode("utf-8", "ignore") 35 | .lstrip(" ") 36 | ) 37 | -------------------------------------------------------------------------------- /animdl/core/codebase/extractors/streamsb/__init__.py: -------------------------------------------------------------------------------- 1 | import binascii 2 | 3 | import regex 4 | import yarl 5 | 6 | PAYLOAD = "616e696d646c616e696d646c7c7c{}7c7c616e696d646c616e696d646c7c7c73747265616d7362/616e696d646c616e696d646c7c7c363136653639366436343663363136653639366436343663376337633631366536393664363436633631366536393664363436633763376336313665363936643634366336313665363936643634366337633763373337343732363536313664373336327c7c616e696d646c616e696d646c7c7c73747265616d7362" 7 | CONTENT_ID_REGEX = regex.compile(r"/e/([^?#&/.]+)") 8 | 9 | 10 | def extract(session, url, **opts): 11 | 12 | content_id = CONTENT_ID_REGEX.search(url).group(1) 13 | content_url = "https://{}/".format(yarl.URL(url).host) 14 | 15 | sources = ( 16 | session.get( 17 | content_url 18 | + "sources41/{}".format( 19 | PAYLOAD.format(binascii.hexlify(content_id.encode()).decode()) 20 | ), 21 | headers={"watchsb": "streamsb"}, 22 | ) 23 | .json() 24 | .get("stream_data", {}) 25 | ) 26 | 27 | return [ 28 | {"stream_url": sources.get("file"), "headers": {"referer": content_url}}, 29 | {"stream_url": sources.get("backup"), "headers": {"referer": content_url}}, 30 | ] 31 | -------------------------------------------------------------------------------- /animdl/core/cli/helpers/fuzzysearch.py: -------------------------------------------------------------------------------- 1 | from typing import Callable, Iterable, TypeVar 2 | 3 | import regex 4 | 5 | search_type = TypeVar("search_type") 6 | 7 | options = { 8 | "ignore_whitespace": lambda x: regex.sub(r"\s+", "", x), 9 | "ignore_punctuation": lambda x: regex.sub(r"\p{P}+", "", x), 10 | } 11 | 12 | 13 | def search( 14 | query: "str", 15 | possibilities: "Iterable[search_type]", 16 | *, 17 | processor: "Callable[[search_type], str]" = lambda r: r, 18 | search_options: tuple = ("ignore_punctuation",), 19 | ): 20 | 21 | pattern = regex.compile( 22 | r"(.*?)".join(map(regex.escape, query.strip())), 23 | flags=regex.IGNORECASE, 24 | ) 25 | 26 | def genexp(): 27 | for search_value in possibilities: 28 | processed_search_value = processor(search_value) 29 | 30 | for option in search_options: 31 | if option in options: 32 | processed_search_value = options[option](processed_search_value) 33 | 34 | match = pattern.search(processed_search_value) 35 | if match: 36 | yield len(processed_search_value), search_value 37 | 38 | for _, search_value in sorted(genexp(), key=lambda x: x[0]): 39 | yield search_value 40 | -------------------------------------------------------------------------------- /animdl/core/codebase/extractors/mp4upload/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import regex 4 | 5 | MP4UPLOAD_REGEX = regex.compile(r"player\|(.*)\|videojs") 6 | 7 | 8 | def extract_480(splitted_values): 9 | return { 10 | "stream_url": "{4}://{18}.mp4upload.{1}:{70}/d/{69}/{68}.{67}".format( 11 | *splitted_values 12 | ) 13 | } 14 | 15 | 16 | def extract_any(splitted_values): 17 | return { 18 | "stream_url": "{3}://{18}.mp4upload.{0}:{73}/d/{72}/{71}.{70}".format( 19 | *splitted_values 20 | ) 21 | } 22 | 23 | 24 | def extract(session, url, **opts): 25 | logger = logging.getLogger("mp4upload-extractor") 26 | 27 | mp4upload_embed_page = session.get(url) 28 | if mp4upload_embed_page.text == "File was deleted": 29 | return [] 30 | 31 | content = MP4UPLOAD_REGEX.search(mp4upload_embed_page.text).group(1).split("|") 32 | 33 | try: 34 | return [ 35 | { 36 | **(extract_480 if "480" in content else extract_any)(content), 37 | "headers": {"referer": url, "ssl_verification": False}, 38 | } 39 | ] 40 | except Exception as e: 41 | return logger.error("'%s' occurred when extracting from '%s'." % (e, url)) or [] 42 | 43 | 44 | extract.disabled = True 45 | -------------------------------------------------------------------------------- /animdl/core/codebase/extractors/rapidvideo/__init__.py: -------------------------------------------------------------------------------- 1 | import json 2 | from functools import lru_cache 3 | 4 | import regex 5 | import yarl 6 | 7 | from .utils import decipher_salted_aes 8 | 9 | CONTENT_ID_REGEX = regex.compile(r"embed-6/([^?#&/.]+)") 10 | 11 | SALT_SECRET_ENDPOINT = "https://github.com/enimax-anime/key/raw/e6/key.txt" 12 | 13 | 14 | @lru_cache() 15 | def get_associative_key(session, endpoint): 16 | return session.get(endpoint).json() 17 | 18 | 19 | def extract(session, url, **opts): 20 | url = yarl.URL(url) 21 | 22 | ajax_response = session.get( 23 | f"https://{url.host}/embed-2/ajax/e-1/getSources", 24 | params={"id": url.name}, 25 | ) 26 | 27 | sources = ajax_response.json() 28 | 29 | key_finders = None 30 | encrypted: bool = sources["encrypted"] 31 | 32 | if encrypted: 33 | key_finders = get_associative_key(session, SALT_SECRET_ENDPOINT) 34 | 35 | subtitles = [ 36 | _.get("file") for _ in sources.get("tracks") if _.get("kind") == "captions" 37 | ] 38 | 39 | if encrypted: 40 | retval = json.loads(decipher_salted_aes(sources["sources"], key_finders)) 41 | 42 | else: 43 | retval = sources["sources"] 44 | 45 | def yielder(): 46 | for _ in retval: 47 | yield { 48 | "stream_url": _["file"], 49 | "subtitle": subtitles, 50 | } 51 | 52 | return list(yielder()) 53 | -------------------------------------------------------------------------------- /animdl/core/codebase/providers/animixplay/hardstream.py: -------------------------------------------------------------------------------- 1 | """ 2 | For when animixplay does not know 3 | but the project knows. 4 | """ 5 | 6 | from functools import partial 7 | 8 | import regex 9 | 10 | REPL_REGEX = regex.compile(r"'(.+?)'") 11 | 12 | 13 | hard_urls = { 14 | "overflow": { 15 | "provider": "bestcdn", 16 | "streams": [(f"Overflow/Overflow {_:02d}.mp4", _) for _ in range(1, 9)], 17 | }, 18 | "jimihen-jimiko-wo-kaechau-jun-isei-kouyuu": { 19 | "provider": "bestcdn", 20 | "streams": [(f"Jimihen/Jimihen {_:02d}.mp4", _) for _ in range(1, 9)], 21 | }, 22 | } 23 | 24 | 25 | def yield_from_bestcdn(session, attributes): 26 | 27 | repl_string = REPL_REGEX.search( 28 | session.get("https://anfruete.github.io/play/env.js").text 29 | ).group(1) 30 | 31 | for stream, episode in attributes["streams"]: 32 | yield partial( 33 | lambda stream: [{"stream_url": f"https://{repl_string}/{stream}"}], stream 34 | ), episode 35 | 36 | 37 | yielder_mapping = { 38 | "bestcdn": yield_from_bestcdn, 39 | } 40 | 41 | 42 | def get_hardstream_generator(session, hard_stream): 43 | 44 | if hard_stream not in hard_urls: 45 | return 46 | 47 | attrs = hard_urls[hard_stream] 48 | provider = attrs["provider"] 49 | 50 | if provider not in yielder_mapping: 51 | return 52 | 53 | yielder = yielder_mapping[provider] 54 | return yielder(session, attrs) 55 | -------------------------------------------------------------------------------- /animdl/core/codebase/providers/twistmoe/__init__.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | 3 | import yarl 4 | 5 | from ....config import TWIST 6 | from ...helpers import construct_site_based_regex, parse_from_content 7 | from .stream_url import * 8 | 9 | REGEX = construct_site_based_regex(TWIST, extra_regex=r"/a/([^?&/]+)") 10 | 11 | 12 | def fetcher(session, url, check, match): 13 | anime_name = match.group(1) 14 | 15 | for episode, stream in sorted( 16 | iter_episodes(session, anime_name), key=lambda k: k[0] 17 | ): 18 | if check(episode): 19 | yield partial( 20 | lambda s: [ 21 | parse_from_content( 22 | yarl.URL(s), 23 | name_processor=lambda u: u.name, 24 | stream_url_processor=lambda u: u.human_repr(), 25 | overrides={"headers": {"referer": "https://twist.moe/"}}, 26 | episode_parsed=True, 27 | ) 28 | ], 29 | stream, 30 | ), episode 31 | 32 | 33 | def metadata_fetcher(session, url, match): 34 | return { 35 | "titles": [ 36 | session.get( 37 | "https://api.twist.moe/api/anime/{}".format(match.group(1)), 38 | headers={"x-access-token": "0df14814b9e590a1f26d3071a4ed7974"}, 39 | ) 40 | .json() 41 | .get("title") 42 | ] 43 | } 44 | -------------------------------------------------------------------------------- /animdl/core/cli/commands/grab.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import click 4 | 5 | from ...__version__ import __core__ 6 | from ...codebase import providers 7 | from ...config import CHECK_FOR_UPDATES, DEFAULT_PROVIDER 8 | from .. import helpers 9 | from ..http_client import client 10 | 11 | 12 | @click.command( 13 | name="grab", help="Stream the stream links to the stdout stream for external usage." 14 | ) 15 | @helpers.decorators.content_fetch_options( 16 | include_quality_options=False, 17 | include_special_options=False, 18 | ) 19 | @helpers.decorators.automatic_selection_options() 20 | @helpers.decorators.logging_options() 21 | @helpers.decorators.setup_loggers() 22 | @helpers.decorators.banner_gift_wrapper( 23 | client, __core__, check_for_updates=CHECK_FOR_UPDATES 24 | ) 25 | def animdl_grab(query, index, **kwargs): 26 | 27 | console = helpers.stream_handlers.get_console() 28 | console.print( 29 | "The content is outputted to [green]stdout[/] while these messages are outputted to [red]stderr[/]." 30 | ) 31 | 32 | anime, provider = helpers.process_query( 33 | client, query, console, auto_index=index, provider=DEFAULT_PROVIDER 34 | ) 35 | 36 | if not anime: 37 | return 38 | 39 | for stream_url_caller, episode in providers.get_appropriate( 40 | client, anime.get("anime_url"), check=kwargs.get("range") 41 | ): 42 | stream_url = list(helpers.ensure_extraction(client, stream_url_caller)) 43 | click.echo(json.dumps({"episode": episode, "streams": stream_url})) 44 | -------------------------------------------------------------------------------- /animdl/core/codebase/extractors/dailymotion/__init__.py: -------------------------------------------------------------------------------- 1 | import functools 2 | 3 | import regex 4 | 5 | DAILYMOTION_VIDEO = regex.compile(r"/embed/video/([^&?/]+)") 6 | HLS_STREAM_REGEX = regex.compile(r'NAME="(\d+?)",PROGRESSIVE-URI="(.+?)"') 7 | 8 | 9 | def get_mp4s(session, m3u8_playlist): 10 | for match in HLS_STREAM_REGEX.finditer( 11 | session.get( 12 | m3u8_playlist, headers={"Referer": "https://www.dailymotion.com/"} 13 | ).text 14 | ): 15 | yield { 16 | "quality": int(match.group(1)), 17 | "stream_url": match.group(2), 18 | } 19 | 20 | 21 | def extract(session, url, **opts): 22 | match = DAILYMOTION_VIDEO.search(url) 23 | 24 | if not match: 25 | return [] 26 | 27 | metadata_uri = "https://www.dailymotion.com/player/metadata/video/{}".format( 28 | match.group(1) 29 | ) 30 | 31 | metadata = session.get(metadata_uri).json() 32 | 33 | subtitles = functools.reduce( 34 | list.__add__, 35 | (list(data.values()) for _, data in metadata.get("subtitles", {}).get("data")), 36 | [], 37 | ) 38 | 39 | def genexp(): 40 | 41 | for _, streams in metadata.get("qualities", {}).items(): 42 | for stream in streams: 43 | yield from ( 44 | list(get_mp4s(session, stream.get("url"))) 45 | or [{"stream_url": stream.get("url")}] 46 | ) 47 | 48 | return [ 49 | { 50 | **stream, 51 | **({"subtitles": subtitles} if subtitles else {}), 52 | } 53 | for stream in genexp() 54 | ] 55 | -------------------------------------------------------------------------------- /animdl/core/cli/helpers/processors.py: -------------------------------------------------------------------------------- 1 | from ...codebase.providers import get_provider 2 | from .prompts import get_prompt_manager 3 | from .searcher import provider_searcher_mapping 4 | 5 | 6 | def prompt_user(logger, anime_list_genexp, provider): 7 | 8 | manager = get_prompt_manager() 9 | 10 | return manager( 11 | logger, 12 | anime_list_genexp, 13 | processor=lambda component: (component, provider), 14 | component_name="search result", 15 | fallback=({}, None), 16 | error_message=f"Failed to find anything of that query on {provider!r}. Try searching on other providers.", 17 | stdout_processor=lambda component: f"{component[0]['name']} / {component[0]['anime_url']}", 18 | ) 19 | 20 | 21 | def process_query(session, query: str, console, provider: str, *, auto_index=1): 22 | 23 | match, module, provider_name = get_provider(query, raise_on_failure=False) 24 | 25 | if module: 26 | return { 27 | "anime_url": query, 28 | "name": module.metadata_fetcher(session, query, match).get( 29 | "titles", [None] 30 | )[0], 31 | }, provider_name 32 | 33 | provider_name, *custom_query = query.split(":", 1) 34 | 35 | if provider_name in provider_searcher_mapping: 36 | provider, query = provider_name, ":".join(custom_query) 37 | 38 | genexp = provider_searcher_mapping[provider](session, query) 39 | 40 | if auto_index is None: 41 | return prompt_user(console, genexp, provider) 42 | 43 | expanded = list(genexp) 44 | 45 | if not expanded: 46 | return {}, None 47 | 48 | return expanded[(auto_index - 1) % len(expanded)], provider 49 | -------------------------------------------------------------------------------- /animdl/core/codebase/providers/hahomoe/__init__.py: -------------------------------------------------------------------------------- 1 | import functools 2 | 3 | import lxml.html as htmlparser 4 | 5 | from ....config import HAHO 6 | from ...helpers import construct_site_based_regex, optopt 7 | 8 | REGEX = construct_site_based_regex(HAHO, extra_regex=r"/anime/([^?&/]+)") 9 | 10 | TITLES_REGEX = optopt.regexlib.compile(r'

(.+?)

') 11 | SORUCES_REGEX = optopt.regexlib.compile( 12 | r'', 13 | flags=optopt.regexlib.I, 14 | ) 15 | 16 | 17 | def iter_stream_urls(session, episode_page): 18 | episode_page_content = session.get(episode_page) 19 | 20 | iframes = htmlparser.fromstring(episode_page_content.text).cssselect("iframe") 21 | 22 | if not iframes: 23 | return 24 | 25 | embed_source = session.get( 26 | iframes[0].get("src"), headers={"referer": episode_page} 27 | ).text 28 | 29 | for source in SORUCES_REGEX.finditer(embed_source): 30 | yield { 31 | "stream_url": source.group(1), 32 | "quality": int(source.group(2)), 33 | } 34 | 35 | 36 | def fetcher(session, url, check, match): 37 | url = match.group(0) 38 | 39 | episode_list_page = session.get(url) 40 | count = int( 41 | htmlparser.fromstring(episode_list_page.text) 42 | .cssselect("span.badge")[0] 43 | .text_content() 44 | ) 45 | 46 | for episode in range(1, count + 1): 47 | if check(episode): 48 | yield functools.partial( 49 | (lambda url: list(iter_stream_urls(session, url))), f"{url}/{episode}" 50 | ), episode 51 | 52 | 53 | def metadata_fetcher(session, url, match): 54 | 55 | url = match.group(0) 56 | 57 | return {"titles": TITLES_REGEX.findall(session.get(url).text)} 58 | -------------------------------------------------------------------------------- /animdl/core/cli/helpers/player.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | from warnings import warn 3 | 4 | from ...config import PLAYERS 5 | from .players import player_mapping 6 | 7 | 8 | def iter_available_players(): 9 | for player, player_info in PLAYERS.items(): 10 | 11 | executable = player_info.get("executable") 12 | 13 | if executable is None or shutil.which(executable) is None: 14 | continue 15 | 16 | yield player, player_info 17 | 18 | 19 | def get_player(default_player, user_selected_player=None): 20 | 21 | players = tuple(iter_available_players()) 22 | 23 | if not players: 24 | raise RuntimeError( 25 | "No players found on the system. " 26 | "Make sure the player is installed and in PATH or in the current directory or has a configured executable." 27 | ) 28 | 29 | if user_selected_player is None: 30 | user_selected_player = default_player 31 | 32 | for player, player_info in players: 33 | if player == user_selected_player: 34 | return player, player_info 35 | 36 | fallback_player, _ = players[0] 37 | 38 | warn( 39 | f"Could not find {user_selected_player!r} in the system. " 40 | f"Falling back to the first available player {fallback_player!r}." 41 | ) 42 | 43 | return fallback_player, _ 44 | 45 | 46 | def handle_player(default_player: str, player_opts: tuple, user_selected_player=None): 47 | if default_player is None: 48 | raise RuntimeError("No default player set in the configuration.") 49 | 50 | player, player_info = get_player(default_player, user_selected_player) 51 | 52 | cls = player_mapping[player] 53 | 54 | return cls( 55 | executable=player_info["executable"], 56 | args=tuple(player_info.get("opts", ())) + player_opts, 57 | ) 58 | -------------------------------------------------------------------------------- /animdl/core/cli/commands/search.py: -------------------------------------------------------------------------------- 1 | import click 2 | 3 | from ...__version__ import __core__ 4 | from ...codebase.helpers import optopt 5 | from ...codebase.providers import get_provider 6 | from ...config import CHECK_FOR_UPDATES, DEFAULT_PROVIDER 7 | from .. import helpers 8 | from ..http_client import client 9 | 10 | 11 | @click.command(name="search", help="Search for an anime in the provider.") 12 | @click.argument("query", required=True) 13 | @click.option( 14 | "-p", 15 | "--provider", 16 | help="Provider to search in.", 17 | default=DEFAULT_PROVIDER, 18 | type=click.Choice(helpers.provider_searcher_mapping.keys(), case_sensitive=False), 19 | ) 20 | @helpers.decorators.logging_options() 21 | @click.option( 22 | "-j", 23 | "--json", 24 | help="Output as json.", 25 | is_flag=True, 26 | flag_value=True, 27 | ) 28 | @helpers.decorators.setup_loggers() 29 | @helpers.decorators.banner_gift_wrapper( 30 | client, __core__, check_for_updates=CHECK_FOR_UPDATES 31 | ) 32 | def animdl_search(query, json, provider, **kwargs): 33 | 34 | console = helpers.stream_handlers.get_console() 35 | 36 | match, module, _ = get_provider(query, raise_on_failure=False) 37 | 38 | if module is not None: 39 | genexp = ( 40 | { 41 | "name": ( 42 | module.metadata_fetcher(client, query, match)["titles"] or [None] 43 | )[0] 44 | or "", 45 | "anime_url": query, 46 | }, 47 | ) 48 | else: 49 | genexp = helpers.provider_searcher_mapping.get(provider)(client, query) 50 | 51 | for count, search_data in enumerate(genexp, 1): 52 | if json: 53 | print(optopt.jsonlib.dumps(search_data)) 54 | else: 55 | console.print( 56 | f"{count}. {search_data['name']} / {search_data['anime_url']}" 57 | ) 58 | -------------------------------------------------------------------------------- /animdl/core/codebase/providers/twistmoe/stream_url.py: -------------------------------------------------------------------------------- 1 | from base64 import b64decode 2 | from hashlib import md5 3 | 4 | from Cryptodome.Cipher import AES 5 | 6 | TWISTMOE_SECRET = b"267041df55ca2b36f2e322d05ee2c9cf" 7 | 8 | TWISTMOE_CDN = "https://{}cdn.twist.moe" 9 | TWISTMOE_API = "https://api.twist.moe/api/anime/" 10 | 11 | 12 | def unpad_content(content): 13 | return content[ 14 | : -(content[-1] if isinstance(content[-1], int) else ord(content[-1])) 15 | ] 16 | 17 | 18 | def generate_key(salt: bytes, *, output=48): 19 | 20 | key = md5(TWISTMOE_SECRET + salt).digest() 21 | current_key = key 22 | 23 | while len(current_key) < output: 24 | key = md5(key + TWISTMOE_SECRET + salt).digest() 25 | current_key += key 26 | 27 | return current_key[:output] 28 | 29 | 30 | def decipher(encoded_url: str): 31 | 32 | s1 = b64decode(encoded_url.encode("utf-8")) 33 | assert s1.startswith(b"Salted__"), "Not a salt." 34 | key = generate_key(s1[8:16]) 35 | return ( 36 | unpad_content(AES.new(key[:32], AES.MODE_CBC, key[32:]).decrypt(s1[16:])) 37 | .decode("utf-8", "ignore") 38 | .lstrip(" ") 39 | ) 40 | 41 | 42 | def api(session, endpoint, content_slug): 43 | return session.get( 44 | TWISTMOE_API + content_slug + endpoint, 45 | headers={"x-access-token": "0df14814b9e590a1f26d3071a4ed7974"}, 46 | ) 47 | 48 | 49 | def iter_episodes(session, content_slug): 50 | 51 | ongoing = api(session, "/", content_slug).json().get("ongoing") 52 | 53 | source_base = TWISTMOE_CDN.format("air-" if ongoing else "") 54 | 55 | episodes_page = api(session, "/sources", content_slug) 56 | 57 | if episodes_page.status_code >= 400: 58 | return 59 | 60 | for episode in api(session, "/sources", content_slug).json(): 61 | yield episode.get("number", 0), source_base + decipher(episode.get("source")) 62 | -------------------------------------------------------------------------------- /animdl/core/codebase/providers/nineanime/decipher.py: -------------------------------------------------------------------------------- 1 | import base64 2 | from urllib.parse import quote, unquote 3 | 4 | from Cryptodome.Cipher import ARC4 5 | 6 | VRF_KEY = b"iECwVsmW38Qe94KN" 7 | URL_KEY = b"hlPeNwkncH0fq9so" 8 | CHAR_SUBST_OFFSETS = (-4, -4, 3, 3, 6, -4, 3, -6, -2, -4) 9 | 10 | 11 | VRF_ENDPOINT = "https://9anime.eltik.net" 12 | VRF_API_KEY = "jerry" # Remind me to get my own key someday 13 | 14 | 15 | def vrf_api(session, content_id: str, api_key=VRF_API_KEY, *, endpoint="vrf"): 16 | return session.get( 17 | VRF_ENDPOINT + f"/{endpoint}", 18 | params={"query": content_id, "apikey": api_key}, 19 | ).json()["url"] 20 | 21 | 22 | def vrf_futoken(session, futoken, endpoint, query): 23 | return session.post( 24 | VRF_ENDPOINT + f"/{endpoint}", 25 | params={ 26 | "query": query, 27 | "apikey": VRF_API_KEY, 28 | }, 29 | data={ 30 | "futoken": futoken, 31 | "query": query, 32 | }, 33 | ).json()["rawURL"] 34 | 35 | 36 | def char_subst(content: bytes, *, offsets=CHAR_SUBST_OFFSETS): 37 | for n, value in enumerate(content): 38 | yield (value + offsets[n % len(offsets)]) 39 | 40 | 41 | def generate_vrf_from_content_id( 42 | content_id: str, 43 | key=VRF_KEY, 44 | *, 45 | offsets=CHAR_SUBST_OFFSETS, 46 | encoding="utf-8", 47 | reverse_later=True, 48 | ): 49 | encoded_id = base64.b64encode( 50 | ARC4.new(key).encrypt(quote(content_id).encode(encoding)) 51 | ) 52 | 53 | if reverse_later: 54 | encoded_id = bytes(char_subst(encoded_id, offsets=offsets))[::-1] 55 | else: 56 | encoded_id = bytes(char_subst(encoded_id[::-1], offsets=offsets)) 57 | 58 | return base64.b64encode(encoded_id).decode(encoding) 59 | 60 | 61 | def decrypt_url(encrypted_url: str, key=URL_KEY, *, encoding="utf-8"): 62 | return unquote( 63 | ARC4.new(key).decrypt(base64.b64decode(encrypted_url)).decode(encoding) 64 | ) 65 | -------------------------------------------------------------------------------- /animdl/utils/searching.py: -------------------------------------------------------------------------------- 1 | """ 2 | animdl: Utilities for the internal searching system. 3 | """ 4 | import string 5 | from typing import Callable, Iterable, Optional, TypeVar 6 | 7 | from .optopt import regexlib 8 | 9 | search_type = TypeVar("search_type") 10 | 11 | 12 | options = { 13 | "ignore_whitespace": lambda x: regexlib.sub(r"\s+", "", x), 14 | "ignore_punctuation": lambda x: regexlib.sub( 15 | rf"[{regexlib.escape(string.punctuation)}]+", "", x 16 | ), 17 | } 18 | 19 | 20 | def iter_search_results( 21 | query: str, 22 | possibilities: Iterable[search_type], 23 | *, 24 | processor: Optional[Callable[[search_type], str]] = None, 25 | search_options: Iterable[str] = ("ignore_punctuation",), 26 | ): 27 | """ 28 | Powerful searching function that uses regex building 29 | for matching the query with the possibilities. 30 | 31 | :param query: The query to search for. 32 | :param possibilities: The possibilities to search in. 33 | :param processor: A function that processes the possibilities. 34 | :param search_options: The options to use for searching. 35 | 36 | :return: A generator that yields the search results. 37 | """ 38 | pattern = regexlib.compile( 39 | r"(.*?)".join(map(regexlib.escape, query.strip())), 40 | flags=regexlib.IGNORECASE, 41 | ) 42 | 43 | def genexp(): 44 | for search_value in possibilities: 45 | if processor is not None: 46 | processed_value = processor(search_value) 47 | else: 48 | processed_value: str = search_value # type: ignore 49 | 50 | for option in search_options: 51 | if option in options: 52 | search_value = options[option](search_value) 53 | 54 | match = pattern.search(processed_value) 55 | 56 | if match: 57 | yield len(processed_value), search_value 58 | 59 | for _, search_value in sorted(genexp(), key=lambda x: x[0]): 60 | yield search_value 61 | -------------------------------------------------------------------------------- /animdl/core/cli/helpers/players/vlc.py: -------------------------------------------------------------------------------- 1 | from warnings import warn 2 | 3 | from .base_player import BasePlayer 4 | 5 | 6 | class VLCPlayer(BasePlayer): 7 | optimisation_args = ("--http-forward-cookies",) 8 | 9 | opts_spec = { 10 | "http_referrer": "--http-referrer", 11 | "user_agent": "--user-agent", 12 | "subtitle": "--sub-file", 13 | "audio": "--audio-file", 14 | } 15 | 16 | def play( 17 | self, 18 | stream_url, 19 | subtitles=None, 20 | headers=None, 21 | title=None, 22 | opts=None, 23 | audio_tracks=None, 24 | **kwargs, 25 | ): 26 | args = (self.executable, *self.args, stream_url) 27 | 28 | if opts is not None: 29 | args += tuple(opts) 30 | 31 | if headers is not None: 32 | lowercased_headers = {key.lower(): value for key, value in headers.items()} 33 | 34 | keys = set(lowercased_headers.keys()) 35 | 36 | if "user-agent" in keys: 37 | args += ( 38 | f"{self.opts_spec['user_agent']}={lowercased_headers['user-agent']}", 39 | ) 40 | 41 | if "referer" in keys: 42 | args += ( 43 | f"{self.opts_spec['http_referrer']}={lowercased_headers['referer']}", 44 | ) 45 | 46 | extra = keys - {"user-agent", "referer"} 47 | 48 | if extra: 49 | warn( 50 | f"VLC does not support {', '.join(map(repr, extra))} headers. " 51 | "This may result in stream loading failure." 52 | ) 53 | 54 | if title is not None: 55 | args += (f"--meta-title={title}",) 56 | 57 | if subtitles is not None: 58 | args += tuple(f"--sub-file={subtitles}" for subtitles in subtitles) 59 | 60 | if audio_tracks is not None: 61 | args += tuple(f"--audio-file={audio_track}" for audio_track in audio_tracks) 62 | 63 | self.spawn(args) 64 | -------------------------------------------------------------------------------- /animdl/core/codebase/providers/animeout/__init__.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | 3 | import lxml.html as htmlparser 4 | import regex 5 | import yarl 6 | 7 | from ....config import ANIMEOUT 8 | from ...helpers import construct_site_based_regex, group_episodes, parse_from_content 9 | 10 | REGEX = construct_site_based_regex(ANIMEOUT, extra_regex=r"/([^?&/]+)") 11 | 12 | TITLES_REGEX = regex.compile(r'

(.+?)

') 13 | 14 | public_domains = { 15 | "nimbus": "pub9", 16 | "chomusuke": "pub8", 17 | "chunchunmaru": "pub7", 18 | "ains": "pub6", 19 | "yotsuba": "pub5", 20 | "slaine": "pub4", 21 | "jibril": "pub3", 22 | "sv1": "pub2", 23 | "sv4": "pub2", 24 | "download": "pub1", 25 | } 26 | 27 | 28 | def animeout_stream_url(url: "yarl.URL") -> str: 29 | 30 | host_prefix, _ = url.host.split(".", 1) 31 | 32 | if host_prefix in public_domains: 33 | url = url.with_host(f"{public_domains[host_prefix]}.{_}") 34 | 35 | return url.human_repr() 36 | 37 | 38 | def fetcher(session, url, check, match): 39 | animeout_page = session.cf_request(session, "GET", url) 40 | parsed = htmlparser.fromstring(animeout_page.text) 41 | 42 | downloadables = ( 43 | yarl.URL(_.get("href")) 44 | for _ in parsed.cssselect('.article-content a[href$="mkv"]') 45 | if "Download" in _.text_content() 46 | ) 47 | 48 | for episode, content in sorted( 49 | ( 50 | group_episodes( 51 | parse_from_content( 52 | content, 53 | name_processor=lambda c: c.name, 54 | stream_url_processor=animeout_stream_url, 55 | ) 56 | for content in downloadables 57 | ).items() 58 | ), 59 | key=lambda x: x[0], 60 | ): 61 | if check(episode): 62 | yield partial(list, content), episode 63 | 64 | 65 | def metadata_fetcher(session, url, match): 66 | return {"titles": TITLES_REGEX.findall(session.cf_request("GET", url).text)} 67 | -------------------------------------------------------------------------------- /animdl/core/cli/helpers/aniskip.py: -------------------------------------------------------------------------------- 1 | from .fuzzysearch import search 2 | 3 | ENDPOINT = "https://api.aniskip.com/v1" 4 | 5 | MAL_XHR_SEARCH = "https://myanimelist.net/search/prefix.json" 6 | 7 | 8 | type_keywords = { 9 | "op": "Opening", 10 | "ed": "Ending", 11 | } 12 | 13 | 14 | def iter_general_timestamps(aniskip_data): 15 | 16 | op_end = None 17 | ed_start = None 18 | 19 | for item in aniskip_data: 20 | 21 | skip_type = item["skip_type"] 22 | 23 | if skip_type == "op": 24 | op_end = item["interval"]["end_time"] 25 | 26 | if skip_type == "ed": 27 | ed_start = item["interval"]["start_time"] 28 | 29 | yield { 30 | "chapter": type_keywords.get(skip_type, skip_type), 31 | "start": item["interval"]["start_time"], 32 | "end": item["interval"]["end_time"], 33 | } 34 | 35 | if op_end is not None: 36 | yield { 37 | "chapter": "Episode", 38 | "start": op_end, 39 | "end": ed_start or op_end, 40 | } 41 | 42 | # NOTE: This is a continuation fix, mpv does not seem to like unmarked chapters between two chapters. 43 | 44 | 45 | def get_timestamps(session, anime_name, anime_episode): 46 | 47 | data = ( 48 | session.get( 49 | MAL_XHR_SEARCH, 50 | params={"type": "anime", "keyword": anime_name}, 51 | ) 52 | .json() 53 | .get("categories", [{}])[0] 54 | .get("items", []) 55 | ) 56 | 57 | if not data: 58 | return 59 | 60 | top_result = ( 61 | tuple(search(anime_name, data, processor=lambda _: _["name"])) or data 62 | )[0] 63 | 64 | ani_skip_response = session.get( 65 | f"{ENDPOINT}/skip-times/{top_result['id']}/{anime_episode}", 66 | params={ 67 | "types[]": ("op", "ed"), 68 | }, 69 | ) 70 | 71 | if ani_skip_response.status_code < 400: 72 | json_data = ani_skip_response.json() 73 | if json_data["found"]: 74 | return list(iter_general_timestamps(json_data["results"])) 75 | -------------------------------------------------------------------------------- /animdl/core/codebase/providers/kawaiifu/__init__.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | from functools import partial 3 | 4 | import lxml.html as htmlparser 5 | import regex 6 | import yarl 7 | 8 | REGEX = regex.compile( 9 | r"(?:https?://)?(?:\S+\.)?(?Pdomdom\.stream|bestanime3\.xyz|kawaiifu\.com)/(?Panime/)?(?Pseason/[^/]+|.+)/(?P[^?&#]+)" 10 | ) 11 | TITLES_REGEX = regex.compile(r'

(.+?)

') 12 | 13 | 14 | def get_int(content): 15 | d = regex.search(r"[0-9]+", content) 16 | if d: 17 | return int(d.group(0)) 18 | 19 | 20 | def extract_stream_urls(session, urls): 21 | for url in urls: 22 | html_element = htmlparser.fromstring(session.get(url).text) 23 | for source in html_element.cssselect("source"): 24 | yield { 25 | "quality": get_int(source.get("data-quality")), 26 | "stream_url": source.get("src"), 27 | "headers": {"referer": url}, 28 | } 29 | 30 | 31 | def get_from_url(session, url): 32 | episodes = defaultdict(list) 33 | html_element = htmlparser.fromstring(session.get(url).text) 34 | 35 | for servers in html_element.cssselect(".list-server"): 36 | for element in servers.cssselect(".list-ep a"): 37 | episodes[get_int(element.text_content()) or 0].append(element.get("href")) 38 | return episodes 39 | 40 | 41 | def fetcher(session, url, check, match): 42 | 43 | url = yarl.URL(url).with_host("bestanime3.xyz").human_repr() 44 | 45 | for episode, episode_urls in sorted( 46 | get_from_url(session, url).items(), key=lambda x: x[0] 47 | ): 48 | if check(episode): 49 | yield partial( 50 | lambda episode_urls: list(extract_stream_urls(session, episode_urls)), 51 | episode_urls, 52 | ), episode 53 | 54 | 55 | def metadata_fetcher(session, url, match): 56 | return { 57 | "titles": TITLES_REGEX.findall( 58 | session.get(yarl.URL(url).with_host("bestanime3.xyz").human_repr()).text 59 | ) 60 | } 61 | -------------------------------------------------------------------------------- /animdl/core/codebase/providers/animepahe/inner/__init__.py: -------------------------------------------------------------------------------- 1 | import regex 2 | 3 | KWIK_PARAMS_RE = regex.compile(r'\("(\w+)",\d+,"(\w+)",(\d+),(\d+),\d+\)') 4 | KWIK_D_URL = regex.compile(r'action="(.+?)"') 5 | KWIK_D_TOKEN = regex.compile(r'value="(.+?)"') 6 | 7 | KWIK_REDIRECTION_RE = regex.compile(r'Redirect me') 8 | 9 | CHARACTER_MAP = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/" 10 | 11 | 12 | def get_string(content, s1, s2): 13 | 14 | slice_2 = CHARACTER_MAP[0:s2] 15 | 16 | acc = 0 17 | for n, i in enumerate(content[::-1]): 18 | acc += int(i if i.isdigit() else 0) * s1**n 19 | 20 | k = "" 21 | while acc > 0: 22 | k = slice_2[int(acc % s2)] + k 23 | acc = (acc - (acc % s2)) / s2 24 | 25 | return k or "0" 26 | 27 | 28 | def decrypt(full_string, key, v1, v2): 29 | v1, v2 = int(v1), int(v2) 30 | r = "" 31 | i = 0 32 | while i < len(full_string): 33 | s = "" 34 | while full_string[i] != key[v2]: 35 | s += full_string[i] 36 | i += 1 37 | j = 0 38 | while j < len(key): 39 | s = s.replace(key[j], str(j)) 40 | j += 1 41 | r += chr(int(get_string(s, v2, 10)) - v1) 42 | i += 1 43 | return r 44 | 45 | 46 | def get_animepahe_url(session, pahe_win_url): 47 | 48 | response = session.get(pahe_win_url) 49 | response.raise_for_status() 50 | 51 | url = KWIK_REDIRECTION_RE.search(response.text).group(1) 52 | 53 | download_page = session.get(url) 54 | 55 | full_key, key, v1, v2 = KWIK_PARAMS_RE.search(download_page.text).group(1, 2, 3, 4) 56 | 57 | decrypted = decrypt( 58 | full_key, 59 | key, 60 | v1, 61 | v2, 62 | ) 63 | 64 | content = session.post( 65 | KWIK_D_URL.search(decrypted).group(1), 66 | follow_redirects=False, 67 | data={"_token": KWIK_D_TOKEN.search(decrypted).group(1)}, 68 | headers={ 69 | "Referer": "https://kwik.cx/", 70 | }, 71 | ) 72 | 73 | return content.headers["Location"] 74 | -------------------------------------------------------------------------------- /disclaimer.md: -------------------------------------------------------------------------------- 1 |

Disclaimer

2 | 3 |
4 | 5 |

This project: animdl

6 | 7 |
8 | 9 | The core aim of this project is to co-relate automation and efficiency to extract what is provided to a user on the internet. All content available through the project is hosted by external non-affiliated sources. 10 | 11 |
12 | 13 | All content served through this project is publicly accessible. If your site is listed in this project, the code is pretty much public. Take necessary measures to counter the exploits used to extract content in your site. 14 | 15 | Think of this project as your normal browser, but a bit more straight-forward and specific. While an average browser makes hundreds of requests to get everything from a site, this project goes on to only make requests associated with getting the content served by the sites. 16 | 17 | 18 | 19 | This project is to be used at the user's own risk, based on their government and laws. 20 | 21 | This project has no control on the content it is serving, using copyrighted content from the providers is not going to be accounted for by the developer. It is the user's own risk. 22 | 23 | 24 | 25 | 26 |
27 | 28 |

DMCA and Copyright Infrigements

29 | 30 |
31 | 32 | 33 | 34 | A browser is a tool, and the maliciousness of the tool is directly based on the user. 35 | 36 | 37 | 38 | This project uses client-side content access mechanisms. Hence, the copyright infrigements or DMCA in this project's regards are to be forwarded to the associated site by the associated notifier of any such claims. This is one of the main reasons the sites are listed in this project. 39 | 40 | Do not harass the developer. Any personal information about the developer is intentionally not made public. Exploiting such information without consent in regards to this topic will lead to legal actions by the developer themselves. 41 | 42 | 43 |

44 | Contacting the developer 45 |

46 |
47 | 48 | Begin by making a GitHub issue or sending an email to kr.justfoolingaround@gmail.com. 49 | 50 |
51 | -------------------------------------------------------------------------------- /animdl/core/codebase/providers/animtime/__init__.py: -------------------------------------------------------------------------------- 1 | import functools 2 | from functools import partial 3 | 4 | from animdl.utils.optopt import regexlib 5 | 6 | from ....config import ANIMTIME 7 | from ...helpers import construct_site_based_regex 8 | 9 | CONTENT_RE = regexlib.compile(r't\[t\.(?P.+?)=(?P\d+)\]="\1"') 10 | REGEX = construct_site_based_regex(ANIMTIME, extra_regex=r"/title/([^/?&]+)") 11 | 12 | MAIN_JS_RE = regexlib.compile(r'