├── .github ├── dependabot.yml └── workflows │ ├── python-lint.yml │ └── test.yml ├── .gitignore ├── .readthedocs.yaml ├── .ruff.toml ├── CHANGELOG.md ├── LICENSE ├── MANIFEST.in ├── README.md ├── docs ├── code_of_conduct.md ├── img │ └── eagle.png ├── index.md ├── overrides │ └── partials │ │ └── nav.html ├── parameter_parsing.md ├── quickstart.md ├── requirements.txt ├── third_party_packages.md └── video_quality_options.md ├── example ├── fetch_douyin_stream.py ├── fetch_rednote_stream.py └── fetch_soop_stream.py ├── mkdocs.yml ├── poetry.toml ├── pyproject.toml ├── requirements.txt ├── setup.py └── streamget ├── __init__.py ├── __version__.py ├── cli.py ├── data.py ├── help.py ├── js ├── crypto-js.min.js ├── haixiu.js ├── liveme.js ├── taobao-sign.js └── x-bogus.js ├── platforms ├── __init__.py ├── acfun │ ├── __init__.py │ └── live_stream.py ├── baidu │ ├── __init__.py │ └── live_stream.py ├── base.py ├── bigo │ ├── __init__.py │ └── live_stream.py ├── bilibili │ ├── __init__.py │ └── live_stream.py ├── blued │ ├── __init__.py │ └── live_stream.py ├── chzzk │ ├── __init__.py │ └── live_stream.py ├── douyin │ ├── __init__.py │ ├── live_stream.py │ └── utils.py ├── douyu │ ├── __init__.py │ └── live_stream.py ├── faceit │ ├── __init__.py │ └── live_stream.py ├── flextv │ ├── __init__.py │ └── live_stream.py ├── haixiu │ ├── __init__.py │ └── live_stream.py ├── huajiao │ ├── __init__.py │ └── live_stream.py ├── huamao │ ├── __init__.py │ └── live_stream.py ├── huya │ ├── __init__.py │ └── live_stream.py ├── inke │ ├── __init__.py │ └── live_stream.py ├── jd │ ├── __init__.py │ └── live_stream.py ├── kuaishou │ ├── __init__.py │ └── live_stream.py ├── kugou │ ├── __init__.py │ └── live_stream.py ├── langlive │ ├── __init__.py │ └── live_stream.py ├── lehai │ ├── __init__.py │ └── live_stream.py ├── liveme │ ├── __init__.py │ └── live_stream.py ├── look │ ├── __init__.py │ └── live_stream.py ├── maoer │ ├── __init__.py │ └── live_stream.py ├── netease │ ├── __init__.py │ └── live_stream.py ├── pandatv │ ├── __init__.py │ └── live_stream.py ├── piaopiao │ ├── __init__.py │ └── live_stream.py ├── popkontv │ ├── __init__.py │ └── live_stream.py ├── qiandurebo │ ├── __init__.py │ └── live_stream.py ├── rednote │ ├── __init__.py │ └── live_stream.py ├── shopee │ ├── __init__.py │ └── live_stream.py ├── showroom │ ├── __init__.py │ └── live_stream.py ├── sixroom │ ├── __init__.py │ └── live_stream.py ├── soop │ ├── __init__.py │ └── live_stream.py ├── taobao │ ├── __init__.py │ └── live_stream.py ├── tiktok │ ├── __init__.py │ └── live_stream.py ├── twitcasting │ ├── __init__.py │ └── live_stream.py ├── twitch │ ├── __init__.py │ └── live_stream.py ├── vvxq │ ├── __init__.py │ └── live_stream.py ├── weibo │ ├── __init__.py │ └── live_stream.py ├── winktv │ ├── __init__.py │ └── live_stream.py ├── yinbo │ ├── __init__.py │ └── live_stream.py ├── yiqilive │ ├── __init__.py │ └── live_stream.py ├── youtube │ ├── __init__.py │ └── live_stream.py ├── yy │ ├── __init__.py │ └── live_stream.py └── zhihu │ ├── __init__.py │ └── live_stream.py ├── requests ├── __init__.py └── async_http.py ├── scripts ├── node_installer.py └── node_setup.py └── utils.py /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "pip" 4 | directory: "/" 5 | schedule: 6 | interval: "monthly" 7 | open-pull-requests-limit: 5 8 | versioning-strategy: "increase" 9 | allow: 10 | - dependency-type: "direct" 11 | - dependency-type: "indirect" -------------------------------------------------------------------------------- /.github/workflows/python-lint.yml: -------------------------------------------------------------------------------- 1 | name: Run Python Lint Checks 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | paths: 8 | - 'streamget/**' 9 | - 'requirements.txt' 10 | - '.ruff.toml' 11 | 12 | pull_request: 13 | types: 14 | - opened 15 | - synchronize 16 | paths: 17 | - 'streamget/**' 18 | - 'requirements.txt' 19 | - '.ruff.toml' 20 | 21 | jobs: 22 | lint-python: 23 | runs-on: ubuntu-latest 24 | steps: 25 | - name: Checkout 26 | uses: actions/checkout@v4 27 | 28 | - name: Cache dependencies 29 | uses: actions/cache@v3 30 | with: 31 | path: ~/.cache/pip 32 | key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }} 33 | restore-keys: | 34 | ${{ runner.os }}-pip- 35 | 36 | - name: Set up Python 37 | uses: actions/setup-python@v5 38 | with: 39 | python-version: "3.10" 40 | 41 | - name: Install dependencies 42 | run: | 43 | python -m pip install --upgrade pip 44 | pip install -r requirements.txt 45 | pip install ruff 46 | 47 | - name: Run ruff lint check 48 | run: ruff check streamget --config .ruff.toml 49 | working-directory: . 50 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: 4 | pull_request: 5 | branches: [main] 6 | 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | 11 | steps: 12 | - uses: actions/checkout@v2 13 | - name: Set up Python 14 | uses: actions/setup-python@v2 15 | with: 16 | python-version: '3.10' 17 | - name: Install dependencies 18 | run: | 19 | python -m pip install --upgrade pip 20 | pip install -r requirements.txt 21 | - name: Run tests 22 | run: | 23 | python -m unittest discover -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.pot 56 | 57 | # Django stuff: 58 | *.log 59 | local_settings.py 60 | db.sqlite3 61 | db.sqlite3-journal 62 | 63 | # Flask stuff: 64 | instance/ 65 | .webassets-cache 66 | 67 | # Scrapy stuff: 68 | .scrapy 69 | 70 | # Sphinx documentation 71 | docs/_build/ 72 | 73 | # PyBuilder 74 | .pybuilder/ 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # custom 85 | backup_config/ 86 | logs/ 87 | node/ 88 | node-v*.zip 89 | 90 | # pyenv 91 | # For a library or package, you might want to ignore these files since the code is 92 | # intended to run in multiple environments; otherwise, check them in: 93 | # .python-version 94 | 95 | # pipenv 96 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 97 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 98 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 99 | # install all needed dependencies. 100 | #Pipfile.lock 101 | 102 | # poetry 103 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 104 | # This is especially recommended for binary packages to ensure reproducibility, and is more 105 | # commonly ignored for libraries. 106 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 107 | poetry.lock 108 | 109 | # pdm 110 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 111 | #pdm.lock 112 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 113 | # in version control. 114 | # https://pdm.fming.dev/#use-with-ide 115 | .pdm.toml 116 | 117 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 118 | __pypackages__/ 119 | 120 | # Celery stuff 121 | celerybeat-schedule 122 | celerybeat.pid 123 | 124 | # SageMath parsed files 125 | *.sage.py 126 | 127 | # Environments 128 | .env 129 | .venv 130 | env/ 131 | venv/ 132 | ENV/ 133 | env.bak/ 134 | venv.bak/ 135 | 136 | # Spyder project settings 137 | .spyderproject 138 | .spyproject 139 | 140 | # Rope project settings 141 | .ropeproject 142 | 143 | # mkdocs documentation 144 | /site 145 | 146 | # mypy 147 | .mypy_cache/ 148 | .dmypy.json 149 | dmypy.json 150 | 151 | # Pyre type checker 152 | .pyre/ 153 | 154 | # pytype static type analyzer 155 | .pytype/ 156 | 157 | # Cython debug symbols 158 | cython_debug/ 159 | 160 | # PyCharm 161 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 162 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 163 | # and can be added to the global gitignore or merged into this file. For a more nuclear 164 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 165 | #.idea/ 166 | 167 | backup_config/ 168 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # Read the Docs configuration file 2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 3 | 4 | # Required 5 | version: 2 6 | 7 | # Set the OS, Python version, and other tools you might need 8 | build: 9 | os: ubuntu-24.04 10 | tools: 11 | python: "3.13" 12 | 13 | # Build documentation with Mkdocs 14 | mkdocs: 15 | configuration: mkdocs.yml 16 | 17 | # Optionally, but recommended, 18 | # declare the Python requirements required to build your documentation 19 | # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html 20 | python: 21 | install: 22 | - requirements: docs/requirements.txt 23 | -------------------------------------------------------------------------------- /.ruff.toml: -------------------------------------------------------------------------------- 1 | line-length = 120 2 | 3 | [format] 4 | quote-style = "double" 5 | 6 | 7 | [lint] 8 | preview = false 9 | select = [ 10 | "B", # flake8-bugbear rules 11 | "C4", # flake8-comprehensions 12 | "E", # pycodestyle E rules 13 | "F", # pyflakes rules 14 | "FURB", # refurb rules 15 | "I", # isort rules 16 | "N", # pep8-naming 17 | "PT", # flake8-pytest-style rules 18 | "PLC0208", # iteration-over-set 19 | "PLC0414", # useless-import-alias 20 | "PLE0604", # invalid-all-object 21 | "PLE0605", # invalid-all-format 22 | "PLR0402", # manual-from-import 23 | "PLR1711", # useless-return 24 | "PLR1714", # repeated-equality-comparison 25 | "RUF013", # implicit-optional 26 | "RUF019", # unnecessary-key-check 27 | "RUF100", # unused-noqa 28 | "RUF101", # redirected-noqa 29 | "RUF200", # invalid-pyproject-toml 30 | "RUF022", # unsorted-dunder-all 31 | "S506", # unsafe-yaml-load 32 | "SIM", # flake8-simplify rules 33 | "TRY400", # error-instead-of-exception 34 | "TRY401", # verbose-log-message 35 | "UP", # pyupgrade rules 36 | "W191", # tab-indentation 37 | "W605", # invalid-escape-sequence 38 | ] 39 | 40 | ignore = [ 41 | "E402", # module-import-not-at-top-of-file 42 | "E711", # none-comparison 43 | "E712", # true-false-comparison 44 | "E721", # type-comparison 45 | "E722", # bare-except 46 | "F821", # undefined-name 47 | "F841", # unused-variable 48 | "FURB113", # repeated-append 49 | "FURB152", # math-constant 50 | "UP007", # non-pep604-annotation 51 | "UP032", # f-string 52 | "UP045", # non-pep604-annotation-optional 53 | "B005", # strip-with-multi-characters 54 | "B006", # mutable-argument-default 55 | "B007", # unused-loop-control-variable 56 | "B026", # star-arg-unpacking-after-keyword-arg 57 | "B903", # class-as-data-structure 58 | "B904", # raise-without-from-inside-except 59 | "B905", # zip-without-explicit-strict 60 | "N806", # non-lowercase-variable-in-function 61 | "N815", # mixed-case-variable-in-class-scope 62 | "PT011", # pytest-raises-too-broad 63 | "SIM102", # collapsible-if 64 | "SIM103", # needless-bool 65 | "SIM105", # suppressible-exception 66 | "SIM107", # return-in-try-except-finally 67 | "SIM108", # if-else-block-instead-of-if-exp 68 | "SIM113", # enumerate-for-loop 69 | "SIM117", # multiple-with-statements 70 | "SIM210", # if-expr-with-true-false 71 | ] 72 | 73 | 74 | [lint.per-file-ignores] 75 | "__init__.py" = [ 76 | "F401", # unused-import 77 | "F811", # redefined-while-unused 78 | ] -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). 6 | 7 | 8 | ## 4.0.5 (22nd May, 2025) 9 | 10 | ### Fixed 11 | 12 | Fix blued and taobao live stream URL fetch. 13 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Hmily 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include streamget/js/*.js -------------------------------------------------------------------------------- /docs/code_of_conduct.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | We expect contributors to our projects and online spaces to follow [the Python Software Foundation’s Code of Conduct](https://www.python.org/psf/conduct/). 4 | 5 | The Python community is made up of members from around the globe with a diverse set of skills, personalities, and experiences. It is through these differences that our community experiences great successes and continued growth. When you're working with members of the community, this Code of Conduct will help steer your interactions and keep Python a positive, successful, and growing community. 6 | 7 | ## Our Community 8 | 9 | Members of the Python community are **open, considerate, and respectful**. Behaviours that reinforce these values contribute to a positive environment, and include: 10 | 11 | * **Being open.** Members of the community are open to collaboration, whether it's on PEPs, patches, problems, or otherwise. 12 | * **Focusing on what is best for the community.** We're respectful of the processes set forth in the community, and we work within them. 13 | * **Acknowledging time and effort.** We're respectful of the volunteer efforts that permeate the Python community. We're thoughtful when addressing the efforts of others, keeping in mind that often times the labor was completed simply for the good of the community. 14 | * **Being respectful of differing viewpoints and experiences.** We're receptive to constructive comments and criticism, as the experiences and skill sets of other members contribute to the whole of our efforts. 15 | * **Showing empathy towards other community members.** We're attentive in our communications, whether in person or online, and we're tactful when approaching differing views. 16 | * **Being considerate.** Members of the community are considerate of their peers -- other Python users. 17 | * **Being respectful.** We're respectful of others, their positions, their skills, their commitments, and their efforts. 18 | * **Gracefully accepting constructive criticism.** When we disagree, we are courteous in raising our issues. 19 | * **Using welcoming and inclusive language.** We're accepting of all who wish to take part in our activities, fostering an environment where anyone can participate and everyone can make a difference. 20 | 21 | ## Our Standards 22 | 23 | Every member of our community has the right to have their identity respected. The Python community is dedicated to providing a positive experience for everyone, regardless of age, gender identity and expression, sexual orientation, disability, physical appearance, body size, ethnicity, nationality, race, or religion (or lack thereof), education, or socio-economic status. 24 | 25 | ## Inappropriate Behavior 26 | 27 | Examples of unacceptable behavior by participants include: 28 | 29 | * Harassment of any participants in any form 30 | * Deliberate intimidation, stalking, or following 31 | * Logging or taking screenshots of online activity for harassment purposes 32 | * Publishing others' private information, such as a physical or electronic address, without explicit permission 33 | * Violent threats or language directed against another person 34 | * Incitement of violence or harassment towards any individual, including encouraging a person to commit suicide or to engage in self-harm 35 | * Creating additional online accounts in order to harass another person or circumvent a ban 36 | * Sexual language and imagery in online communities or in any conference venue, including talks 37 | * Insults, put downs, or jokes that are based upon stereotypes, that are exclusionary, or that hold others up for ridicule 38 | * Excessive swearing 39 | * Unwelcome sexual attention or advances 40 | * Unwelcome physical contact, including simulated physical contact (eg, textual descriptions like "hug" or "backrub") without consent or after a request to stop 41 | * Pattern of inappropriate social contact, such as requesting/assuming inappropriate levels of intimacy with others 42 | * Sustained disruption of online community discussions, in-person presentations, or other in-person events 43 | * Continued one-on-one communication after requests to cease 44 | * Other conduct that is inappropriate for a professional audience including people of many different backgrounds 45 | 46 | Community members asked to stop any inappropriate behavior are expected to comply immediately. 47 | 48 | ## Enforcement 49 | 50 | We take Code of Conduct violations seriously, and will act to ensure our spaces are welcoming, inclusive, and professional environments to communicate in. 51 | 52 | you may [make a report to the Python Software Foundation](https://www.python.org/psf/conduct/reporting/). -------------------------------------------------------------------------------- /docs/img/eagle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/docs/img/eagle.png -------------------------------------------------------------------------------- /docs/overrides/partials/nav.html: -------------------------------------------------------------------------------- 1 | {% import "partials/nav-item.html" as item with context %} 2 | 3 | 4 | {% set class = "md-nav md-nav--primary" %} 5 | {% if "navigation.tabs" in features %} 6 | {% set class = class ~ " md-nav--lifted" %} 7 | {% endif %} 8 | {% if "toc.integrate" in features %} 9 | {% set class = class ~ " md-nav--integrated" %} 10 | {% endif %} 11 | 12 | 13 | 48 | -------------------------------------------------------------------------------- /docs/parameter_parsing.md: -------------------------------------------------------------------------------- 1 | # Parameter Parsing 2 | 3 | ## Overview 4 | 5 | This guide provides a comprehensive introduction to using the `StreamGet` library to fetch and process live streaming data from various platforms. It covers the essential request parameters, methods, and attributes you need to know to get started. 6 | 7 | ## Instantiating an Object 8 | 9 | To begin, you need to instantiate an object for the specific live streaming platform you are interested in. For example, to work with Douyin Live, you would use: 10 | 11 | ```python 12 | >>> from streamget import DouyinLiveStream 13 | >>> live = DouyinLiveStream() 14 | ``` 15 | 16 | You can also pass additional parameters during instantiation, such as cookies or proxy settings, which will be discussed later in this guide. 17 | 18 | ## fetch_web_stream_data Method 19 | 20 | The `fetch_web_stream_data` method is used to fetch data from a live streaming webpage. It has two main parameters: 21 | 22 | - **url**: The URL of the live streaming webpage. 23 | - **process_data**: A boolean parameter that determines whether the fetched data should be processed or returned in its raw form. 24 | 25 | ### Example Usage 26 | 27 | ```python 28 | >>> url = "https://example.com/live" 29 | >>> data = asyncio.run(live.fetch_web_stream_data(url, process_data=True)) 30 | ``` 31 | 32 | ### Parameters 33 | 34 | - **url**: The URL of the live streaming webpage. 35 | - **process_data**: If `True`, the data will be processed and returned in a structured format. If `False`, the raw data from the official API will be returned. 36 | 37 | ### Return Value 38 | 39 | The method returns a dictionary containing the processed data. If `process_data` is `True`, the dictionary might look like this: 40 | 41 | ```json 42 | { 43 | "anchor_name": "xxxxx", 44 | "is_live": True, 45 | "title": "xxxx", 46 | ... 47 | } 48 | ``` 49 | 50 | If `process_data` is `False`, the raw data from the official API will be returned. 51 | 52 | ## fetch_stream_url Method 53 | 54 | The `fetch_stream_url` method is used to fetch the streaming URL from the processed data obtained from `fetch_web_stream_data`. It has two main parameters: 55 | 56 | - **data**: The processed data returned by `fetch_web_stream_data`. 57 | - **video_quality**: The desired video quality of the stream. 58 | 59 | ### Example Usage 60 | 61 | ```python 62 | >>> stream_obj = asyncio.run(live.fetch_stream_url(data, video_quality="OD")) 63 | ``` 64 | 65 | ### Parameters 66 | 67 | - **data**: The processed data returned by `fetch_web_stream_data`. 68 | - **video_quality**: The desired video quality of the stream (e.g., "OD" for original definition). please refer to the [Video Quality Options](https://streamget.readthedocs.io/video_quality_options/). 69 | 70 | ### Return Value 71 | 72 | The method returns a `Stream` object containing the streaming URL and other relevant information. 73 | 74 | ## Stream Object 75 | 76 | The `Stream` object returned by `fetch_stream_url` has the following attributes: 77 | 78 | - **platform**: The name of the live streaming platform. 79 | - **anchor_name**: The name of the live stream anchor. 80 | - **is_live**: A boolean indicating whether the stream is live. 81 | - **title**: The title of the live stream. 82 | - **quality**: The quality of the stream. 83 | - **m3u8_url**: The URL of the stream in M3U8 format. 84 | - **flv_url**: The URL of the stream in FLV format. 85 | - **record_url**: The URL for recording the stream. 86 | - **new_cookies**: Any new cookies obtained during the request. 87 | - **new_token**: Any new token obtained during the request. 88 | - **extra**: Any additional information. 89 | 90 | ### Example Stream Object 91 | 92 | ```python 93 | StreamData( 94 | platform='抖音', 95 | anchor_name='Jack', 96 | is_live=True, 97 | title='Hello everyone ~', 98 | quality='OD', 99 | m3u8_url='https://example.com/xxxx.m3u8', 100 | flv_url='https://example.com/xxxx.flv', 101 | record_url='https://example.com/xxxx.flv', 102 | new_cookies=None, 103 | new_token=None, 104 | extra=None 105 | ) 106 | ``` 107 | 108 | ## Converting to JSON 109 | 110 | The `Stream` object provides a `.to_json()` method that converts the object's attributes to a JSON string. 111 | 112 | ### Example Usage 113 | 114 | ```python 115 | >>> json_str = stream_obj.to_json() 116 | >>> print(json_str) 117 | '{"platform": "抖音", "anchor_name": "Jack", "is_live": True, "flv_url": "https://example.com/xxxx.flv", "m3u8_url": "https://example.com/xxxx.m3u8" ...}' 118 | ``` 119 | 120 | ## Using Cookies 121 | 122 | To include additional cookies in the outgoing request, you can pass a string of cookies during object instantiation: 123 | 124 | ```python 125 | >>> cookies = 'key1=value1;key2=value2;' 126 | >>> live = DouyinLiveStream(cookies=cookies) 127 | ``` 128 | 129 | The final `Stream` object returned will also contain a `new_cookies` attribute if any new cookies are obtained during the request. 130 | 131 | ## Using Proxy 132 | 133 | For platforms that require proxy access, you can pass a proxy URL during object instantiation: 134 | 135 | ```python 136 | >>> proxy = "http://127.0.0.1:7890" 137 | >>> live = DouyinLiveStream(proxy=proxy) 138 | ``` 139 | 140 | 141 | 142 | ## Troubleshooting 143 | 144 | If you encounter issues with parsing URLs, it might be due to network issues or an invalid URL: 145 | 146 | In such cases, please check the following: 147 | 148 | 1. **URL Validity**: Ensure that the URL is correct and accessible. 149 | 2. **Network Connection**: Verify that your network connection is stable. 150 | 3. **Retry the Request**: Sometimes, retrying the request can resolve transient issues. 151 | 152 | If the problem persists, you may need to manually configure your environment or seek further assistance. 153 | 154 | -------------------------------------------------------------------------------- /docs/quickstart.md: -------------------------------------------------------------------------------- 1 | # QuickStart 2 | 3 | First, start by importing StreamGet: 4 | 5 | ```python 6 | >>> import asyncio 7 | >>> import streamget 8 | ``` 9 | 10 | Now, let’s try to get a webpage. 11 | 12 | ```python 13 | >>> live = streamget.DouyinLiveStream() 14 | >>> data = asyncio.run(live.fetch_web_stream_data(url)) 15 | >>> data 16 | {'anchor_name': 'xxxxx', 'is_live': False} 17 | or 18 | {'anchor_name': 'xxxxx', 'is_live': True, 'title': 'xxxx', ...} 19 | ``` 20 | 21 | Similarly, to get the official API raw data: 22 | 23 | ```python 24 | >>> data = asyncio.run(live.fetch_web_stream_data(url, process_data=False)) 25 | >>> data 26 | The official API raw data will be returned 27 | ``` 28 | 29 | Or, you can also directly obtain live streaming source data. 30 | 31 | ```python 32 | >>> import asyncio 33 | >>> from streamget import DouyinLiveStream 34 | >>> live = DouyinLiveStream() 35 | >>> data = asyncio.run(live.fetch_web_stream_data(url, process_data=True)) 36 | 37 | >>> stream_obj = asyncio.run(live.fetch_stream_url(data, "OD")) 38 | StreamData(platform='xxxx', anchor_name='xxxx', is_live=True, m3u8_url="xxx"...) 39 | 40 | ``` 41 | 42 | Note: that `process_data` params must be True in this case. 43 | 44 | ## Install NodeJS 45 | 46 | Some live streaming platforms require Node.js dependencies to obtain data 47 | 48 | You can install Node.js using built-in commands 49 | 50 | ```python 51 | streamget install-node 52 | ``` 53 | 54 | You can also view installation nodejs help info 55 | 56 | ```python 57 | streamget install-node -h 58 | ``` 59 | 60 | If the installation cannot be successful, please manually download and configure the environment variables. 61 | 62 | ## JSON Response Content 63 | 64 | Use `to_json` method will be encoded as JSON. 65 | 66 | ```python 67 | >>> stream_obj = asyncio.run(live.fetch_stream_url(data, "OD")) 68 | StreamData(platform='xxxx', anchor_name='xxxx', is_live=True, m3u8_url="xxx"...) 69 | >>> json_str = stream_obj.to_json() 70 | '{"anchor_name": "xxxx", "is_live": True, "flv_url": "...", "m3u8_url": "..."}' 71 | ``` 72 | 73 | ## Use Cookies 74 | 75 | To include additional cookies in the outgoing request, use the `cookies` keyword argument: 76 | 77 | ```python 78 | >>> cookies = 'key1=value1;key2=value2;' # string 79 | >>> live = streamget.DouyinLiveStream(cookies=cookies) 80 | ``` 81 | 82 | By initiating the request in this way, the final `Stream` object returned also contains a new_comkie attribute 83 | 84 | ## Use Proxy 85 | 86 | For platforms that require proxy access, you can use the proxy parameter when instantiating objects 87 | 88 | ```python 89 | >>> proxy = 'http://127.0.0.1:7890' 90 | >>> live = streamget.DouyinLiveStream(proxy=proxy) 91 | ``` 92 | 93 | ## Supported Platforms 94 | 95 | The currently supported platforms are as follows: 96 | 97 | ```markdown 98 | 抖音 -> DouyinLiveStream 99 | TikTok -> TikTokLiveStream 100 | 快手 -> KwaiLiveStream 101 | 虎牙 -> HuyaLiveStream 102 | 斗鱼 -> DouyuLiveStream 103 | YY -> YYLiveStream 104 | B站 -> BilibiliLiveStream 105 | 小红书 -> RedNoteLiveStream 106 | Bigo -> BigoLiveStream 107 | Blued -> BluedLiveStream 108 | SOOP -> SoopLiveStream 109 | 网易CC -> NeteaseLiveStream 110 | 千度热播 -> QiandureboLiveStream 111 | PandaTV -> PandaLiveStream 112 | 猫耳FM -> MaoerLiveStream 113 | Look -> LookLiveStream 114 | WinkTV -> WinkTVLiveStream 115 | FlexTV -> FlexTVLiveStream 116 | PopkonTV -> PopkonTVLiveStream 117 | TwitCasting -> TwitCastingLiveStream 118 | 百度直播 -> BaiduLiveStream 119 | 微博直播 -> WeiboLiveStream 120 | 酷狗直播 -> KugouLiveStream 121 | TwitchTV -> TwitchLiveStream 122 | LiveMe -> LiveMeLiveStream 123 | 花椒直播 -> HuajiaoLiveStream 124 | ShowRoom -> ShowRoomLiveStream 125 | Acfun -> AcfunLiveStream 126 | 映客直播 -> InkeLiveStream 127 | 音播直播 -> YinboLiveStream 128 | 知乎直播 -> ZhihuLiveStream 129 | CHZZK -> ChzzkLiveStream 130 | 嗨秀直播 -> HaixiuLiveStream 131 | VV星球直播 -> VVXQLiveStream 132 | 17Live -> YiqiLiveStream 133 | 浪Live -> LangLiveStream 134 | 飘飘直播 -> PiaopaioLiveStream 135 | 六间房直播 -> SixRoomLiveStream 136 | 乐嗨直播 -> LehaiLiveStream 137 | 花猫直播 -> HuamaoLiveStream 138 | Shopee -> ShopeeLiveStream 139 | Youtube -> YoutubeLiveStream 140 | 淘宝 -> TaobaoLiveStream 141 | 京东 -> JDLiveStream 142 | Faceit -> FaceitLiveStream 143 | ``` 144 | 145 | You can use the command line to view the supported live streaming platforms: 146 | 147 | ```bash 148 | streamget -h 149 | ``` 150 | 151 | Will return the help info, it inclued return a list of supported platforms. 152 | 153 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | # Documentation 2 | mkdocs==1.6.1 3 | mkautodoc==0.2.0 4 | mkdocs-material==9.6.14 5 | -------------------------------------------------------------------------------- /docs/third_party_packages.md: -------------------------------------------------------------------------------- 1 | # Third Party Packages 2 | 3 | As StreamGet usage grows, there is an expanding community of developers building tools and libraries that integrate with StreamGet, or depend on StreamGet. Here are some of them. 4 | 5 | ## Plugins 6 | 7 | ### StreamCap 8 | 9 | [GitHub](https://github.com/ihmily/StreamCap/) - [Documentation](https://github.com/ihmily/StreamCap/) 10 | 11 | 一个多平台直播流自动录制工具 · 基于FFmpeg · 支持监控/定时/转码 -------------------------------------------------------------------------------- /docs/video_quality_options.md: -------------------------------------------------------------------------------- 1 | # Video Quality Options 2 | 3 | When using the `fetch_stream_url` method, the `video_quality` parameter allows you to specify the desired video quality of the stream. The supported video qualities and their corresponding values are listed below: 4 | 5 | ## Supported Video Qualities 6 | 7 | | Quality Index | Quality Name | Description | 8 | | ------------- | ------------ | ------------------------------ | 9 | | 0 | OD | Original Definition (最高画质) | 10 | | 1 | UHD | Ultra High Definition (超高清) | 11 | | 2 | HD | High Definition (高清) | 12 | | 3 | SD | Standard Definition (标清) | 13 | | 4 | LD | Low Definition (流畅) | 14 | 15 | ## Usage 16 | 17 | You can specify the video quality using either the quality name (e.g., "OD", "UHD") or the corresponding index (e.g., 0, 1, 2, 3, 4). 18 | 19 | If the `video_quality` parameter is not provided or set to `None`, the default quality will be **OD (Original Definition)**, which is the highest available quality. For example: 20 | 21 | ```python 22 | # Using the default quality (OD) 23 | stream_obj = asyncio.run(live.fetch_stream_url(data)) 24 | 25 | # Explicitly setting the quality to OD 26 | stream_obj = asyncio.run(live.fetch_stream_url(data, video_quality="OD")) 27 | 28 | # Using quality index for OD 29 | stream_obj = asyncio.run(live.fetch_stream_url(data, video_quality=0)) 30 | ``` 31 | 32 | --- 33 | 34 | ### Important Notes 35 | 36 | - **Default Quality**: If the `video_quality` parameter is omitted or set to `None`, the method will automatically select the highest available quality, which is **OD (Original Definition)**. 37 | - **Fallback Behavior**: If the live broadcast room does not support the selected video quality, the method will automatically fall back to the highest available quality in descending order (e.g., if "UHD" is not supported, it will try "HD", then "SD", and so on). 38 | -------------------------------------------------------------------------------- /example/fetch_douyin_stream.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from streamget import DouyinLiveStream 4 | 5 | 6 | async def main(): 7 | # URL of the Douyin live stream 8 | url = "https://live.douyin.com/991562466558" 9 | 10 | # Initialize the DouyinLiveStream object 11 | douyin_stream = DouyinLiveStream() 12 | 13 | try: 14 | # Fetch the live stream data from the provided URL 15 | data = await douyin_stream.fetch_web_stream_data(url) 16 | 17 | # Fetch the stream URL 18 | stream_data = await douyin_stream.fetch_stream_url(data, "OD") 19 | print(stream_data) 20 | 21 | # Convert to json string 22 | json_str = stream_data.to_json() 23 | print(json_str) 24 | except Exception as e: 25 | print(f"An error occurred: {e}") 26 | 27 | 28 | if __name__ == "__main__": 29 | asyncio.run(main()) 30 | -------------------------------------------------------------------------------- /example/fetch_rednote_stream.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from streamget import RedNoteLiveStream 4 | 5 | 6 | async def main(): 7 | # URL of the RedNote live stream 8 | url = "https://www.xiaohongshu.com/user/profile/5ac7123ce8ac2b1d1503e920" 9 | 10 | # Initialize the RedNote object 11 | rednote_stream = RedNoteLiveStream() 12 | 13 | try: 14 | # Fetch the live stream data from the provided URL 15 | data = await rednote_stream.fetch_app_stream_data(url) 16 | 17 | # Fetch the stream URL and convert it to JSON format 18 | stream_data = await rednote_stream.fetch_stream_url(data) 19 | print(stream_data) 20 | 21 | # Convert to json string 22 | json_str = stream_data.to_json() 23 | print(json_str) 24 | except Exception as e: 25 | print(f"An error occurred: {e}") 26 | 27 | 28 | if __name__ == "__main__": 29 | asyncio.run(main()) 30 | -------------------------------------------------------------------------------- /example/fetch_soop_stream.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from streamget import SoopLiveStream 4 | 5 | 6 | async def main(): 7 | # URL of the SOOP Live 8 | url = "https://play.sooplive.co.kr/alswl2208/281343812" 9 | 10 | # Initialize the SoopLiveStream object 11 | soop_stream = SoopLiveStream( 12 | # Use proxy, Ensure that your agent can access it normally 13 | proxy_addr='http://127.0.0.1:7890' 14 | ) 15 | 16 | try: 17 | # Fetch the live stream data from the provided URL 18 | data = await soop_stream.fetch_web_stream_data(url) 19 | 20 | # Fetch the stream data object 21 | stream_data = await soop_stream.fetch_stream_url(data, "OD") 22 | print(stream_data) 23 | 24 | # Convert object to json string 25 | json_str = stream_data.to_json() 26 | print(json_str) 27 | except Exception as e: 28 | print(f"An error occurred: {e}") 29 | 30 | 31 | # Run the asynchronous main function 32 | if __name__ == "__main__": 33 | asyncio.run(main()) 34 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: StreamGet 2 | site_description: A Multi-Platform Live Stream Parser Library for Python. 3 | site_url: https://streamget.readthedocs.io/ 4 | 5 | theme: 6 | name: 'material' 7 | custom_dir: 'docs/overrides' 8 | palette: 9 | - scheme: 'default' 10 | media: '(prefers-color-scheme: light)' 11 | toggle: 12 | icon: 'material/lightbulb' 13 | name: "Switch to dark mode" 14 | - scheme: 'slate' 15 | media: '(prefers-color-scheme: dark)' 16 | primary: 'blue' 17 | toggle: 18 | icon: 'material/lightbulb-outline' 19 | name: 'Switch to light mode' 20 | 21 | repo_name: ihmily/streamget 22 | repo_url: https://github.com/ihmily/streamget/ 23 | edit_uri: "" 24 | 25 | nav: 26 | - Introduction: 'index.md' 27 | - QuickStart: 'quickstart.md' 28 | - Guides: 29 | - Parameter Parsing: 'parameter_parsing.md' 30 | - Video Quality Options: 'video_quality_options.md' 31 | - Community: 32 | - Third Party Packages: 'third_party_packages.md' 33 | - Code of Conduct: 'code_of_conduct.md' 34 | 35 | markdown_extensions: 36 | - admonition 37 | - codehilite: 38 | css_class: highlight 39 | - mkautodoc -------------------------------------------------------------------------------- /poetry.toml: -------------------------------------------------------------------------------- 1 | [virtualenvs] 2 | in-project = true 3 | create = true 4 | prefer-active-python = true -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "streamget" 3 | version = "4.0.5" 4 | description = "A Multi-Platform Live Stream Parser Library." 5 | authors = [{ name = "Hmily" }] 6 | license = {text = "MIT"} 7 | readme = "README.md" 8 | url='https://github.com/ihmily/streamget' 9 | keywords = ["live", "stream"] 10 | requires-python = ">=3.10,<4.0" 11 | 12 | dependencies = [ 13 | "requests>=2.31.0", 14 | "loguru>=0.7.3", 15 | "pycryptodome>=3.20.0", 16 | "distro>=1.9.0", 17 | "tqdm>=4.67.1", 18 | "httpx[http2]>=0.28.1", 19 | "PyExecJS>=1.5.1" 20 | ] 21 | 22 | [project.urls] 23 | Changelog = "https://github.com/ihmily/streamget/blob/main/CHANGELOG.md" 24 | Documentation = "https://streamget.readthedocs.io" 25 | Homepage = "https://github.com/ihmily/streamget" 26 | Source = "https://github.com/ihmily/streamget" 27 | 28 | [build-system] 29 | requires = ["poetry-core>=1.0.0"] 30 | build-backend = "poetry.core.masonry.api" 31 | 32 | [project.scripts] 33 | streamget = "streamget.cli:main" -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | requests>=2.31.0 2 | loguru>=0.7.3 3 | pycryptodome>=3.20.0 4 | distro>=1.9.0 5 | tqdm>=4.67.1 6 | httpx[http2]>=0.28.1 7 | PyExecJS>=1.5.1 8 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import find_packages, setup 2 | 3 | with open('README.md', encoding='utf-8') as f: 4 | readme = f.read() 5 | 6 | setup( 7 | name='streamget', 8 | version='4.0.5', 9 | author='Hmily', 10 | description='A Multi-Platform Live Stream Parser Library.', 11 | long_description=readme, 12 | long_description_content_type='text/markdown', 13 | url='https://github.com/ihmily/streamget', 14 | project_urls={ 15 | "Documentation": "https://streamget.readthedocs.io", 16 | "Source": "https://github.com/ihmily/streamget" 17 | }, 18 | include_package_data=True, 19 | package_data={ 20 | 'streamget': ['js/*.js'], 21 | }, 22 | packages=find_packages(), 23 | install_requires=[ 24 | 'requests>=2.31.0', 25 | 'loguru>=0.7.3', 26 | 'pycryptodome>=3.20.0', 27 | 'distro>=1.9.0', 28 | 'tqdm>=4.67.1', 29 | 'httpx[http2]>=0.28.1', 30 | 'PyExecJS>=1.5.1', 31 | ], 32 | classifiers=[ 33 | 'Development Status :: 3 - Alpha', 34 | 'Intended Audience :: Developers', 35 | 'Programming Language :: Python :: 3', 36 | 'Programming Language :: Python :: 3 :: Only', 37 | 'Programming Language :: Python :: 3.10', 38 | 'Programming Language :: Python :: 3.11', 39 | 'Programming Language :: Python :: 3.12', 40 | 'Programming Language :: Python :: 3.13', 41 | ], 42 | entry_points={ 43 | 'console_scripts': [ 44 | 'streamget=streamget.cli:main' 45 | ] 46 | } 47 | ) 48 | -------------------------------------------------------------------------------- /streamget/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from pathlib import Path 4 | 5 | from .__version__ import __description__, __title__, __version__ 6 | 7 | current_file_path = Path(__file__).resolve() 8 | current_dir = current_file_path.parent 9 | JS_SCRIPT_PATH = current_dir / 'js' 10 | 11 | execute_dir = os.path.split(os.path.realpath(sys.argv[0]))[0] 12 | node_execute_dir = Path(execute_dir) / 'node' 13 | current_env_path = os.environ.get('PATH') 14 | os.environ['PATH'] = str(node_execute_dir) + os.pathsep + current_env_path 15 | 16 | from .data import StreamData 17 | from .platforms.acfun.live_stream import AcfunLiveStream 18 | from .platforms.baidu.live_stream import BaiduLiveStream 19 | from .platforms.bigo.live_stream import BigoLiveStream 20 | from .platforms.bilibili.live_stream import BilibiliLiveStream 21 | from .platforms.blued.live_stream import BluedLiveStream 22 | from .platforms.chzzk.live_stream import ChzzkLiveStream 23 | from .platforms.douyin.live_stream import DouyinLiveStream 24 | from .platforms.douyu.live_stream import DouyuLiveStream 25 | from .platforms.faceit.live_stream import FaceitLiveStream 26 | from .platforms.flextv.live_stream import FlexTVLiveStream 27 | from .platforms.haixiu.live_stream import HaixiuLiveStream 28 | from .platforms.huajiao.live_stream import HuajiaoLiveStream 29 | from .platforms.huamao.live_stream import HuamaoLiveStream 30 | from .platforms.huya.live_stream import HuyaLiveStream 31 | from .platforms.inke.live_stream import InkeLiveStream 32 | from .platforms.jd.live_stream import JDLiveStream 33 | from .platforms.kuaishou.live_stream import KwaiLiveStream 34 | from .platforms.kugou.live_stream import KugouLiveStream 35 | from .platforms.langlive.live_stream import LangLiveStream 36 | from .platforms.lehai.live_stream import LehaiLiveStream 37 | from .platforms.liveme.live_stream import LiveMeLiveStream 38 | from .platforms.look.live_stream import LookLiveStream 39 | from .platforms.maoer.live_stream import MaoerLiveStream 40 | from .platforms.netease.live_stream import NeteaseLiveStream 41 | from .platforms.pandatv.live_stream import PandaLiveStream 42 | from .platforms.piaopiao.live_stream import PiaopaioLiveStream 43 | from .platforms.popkontv.live_stream import PopkonTVLiveStream 44 | from .platforms.qiandurebo.live_stream import QiandureboLiveStream 45 | from .platforms.rednote.live_stream import RedNoteLiveStream 46 | from .platforms.shopee.live_stream import ShopeeLiveStream 47 | from .platforms.showroom.live_stream import ShowRoomLiveStream 48 | from .platforms.sixroom.live_stream import SixRoomLiveStream 49 | from .platforms.soop.live_stream import SoopLiveStream 50 | from .platforms.taobao.live_stream import TaobaoLiveStream 51 | from .platforms.tiktok.live_stream import TikTokLiveStream 52 | from .platforms.twitcasting.live_stream import TwitCastingLiveStream 53 | from .platforms.twitch.live_stream import TwitchLiveStream 54 | from .platforms.vvxq.live_stream import VVXQLiveStream 55 | from .platforms.weibo.live_stream import WeiboLiveStream 56 | from .platforms.winktv.live_stream import WinkTVLiveStream 57 | from .platforms.yinbo.live_stream import YinboLiveStream 58 | from .platforms.yiqilive.live_stream import YiqiLiveStream 59 | from .platforms.youtube.live_stream import YoutubeLiveStream 60 | from .platforms.yy.live_stream import YYLiveStream 61 | from .platforms.zhihu.live_stream import ZhihuLiveStream 62 | 63 | __all__ = [ 64 | "AcfunLiveStream", 65 | "BaiduLiveStream", 66 | "BigoLiveStream", 67 | "BilibiliLiveStream", 68 | "BluedLiveStream", 69 | "ChzzkLiveStream", 70 | "DouyinLiveStream", 71 | "DouyuLiveStream", 72 | "FaceitLiveStream", 73 | "FlexTVLiveStream", 74 | "HaixiuLiveStream", 75 | "HuajiaoLiveStream", 76 | "HuamaoLiveStream", 77 | "HuyaLiveStream", 78 | "InkeLiveStream", 79 | "JDLiveStream", 80 | "KugouLiveStream", 81 | "KwaiLiveStream", 82 | "LangLiveStream", 83 | "LehaiLiveStream", 84 | "LiveMeLiveStream", 85 | "LookLiveStream", 86 | "MaoerLiveStream", 87 | "NeteaseLiveStream", 88 | "PandaLiveStream", 89 | "PiaopaioLiveStream", 90 | "PopkonTVLiveStream", 91 | "QiandureboLiveStream", 92 | "RedNoteLiveStream", 93 | "ShopeeLiveStream", 94 | "ShowRoomLiveStream", 95 | "SixRoomLiveStream", 96 | "SoopLiveStream", 97 | "StreamData", 98 | "TaobaoLiveStream", 99 | "TikTokLiveStream", 100 | "TwitCastingLiveStream", 101 | "TwitchLiveStream", 102 | "VVXQLiveStream", 103 | "WeiboLiveStream", 104 | "WinkTVLiveStream", 105 | "YYLiveStream", 106 | "YinboLiveStream", 107 | "YiqiLiveStream", 108 | "YoutubeLiveStream", 109 | "ZhihuLiveStream", 110 | "__description__", 111 | "__title__", 112 | "__version__", 113 | ] 114 | 115 | __locals = locals() 116 | for __name in __all__: 117 | if not __name.startswith("__"): 118 | __locals[__name].__module__ = "streamget" 119 | 120 | # from .scripts.node_setup import check_node 121 | # check_node() 122 | -------------------------------------------------------------------------------- /streamget/__version__.py: -------------------------------------------------------------------------------- 1 | __title__ = "streamget" 2 | __description__ = "A Multi-Platform Live Stream Parser Library." 3 | __version__ = "4.0.5" 4 | __author__ = "Hmily" 5 | __license__ = "MIT" 6 | __copyright__ = "Copyright Hmily" 7 | __url__ = "https://github.com/ihmily/streamget" 8 | -------------------------------------------------------------------------------- /streamget/cli.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import platform 3 | import sys 4 | from pathlib import Path 5 | 6 | from .help import show_welcome_help 7 | from .scripts.node_installer import install_node 8 | 9 | 10 | def main(): 11 | """Main command entry (supports subcommands like streamget install-node)""" 12 | if is_main_help_request(): 13 | show_welcome_help() 14 | sys.exit(0) 15 | 16 | parser = argparse.ArgumentParser( 17 | prog='streamget', 18 | add_help=False 19 | ) 20 | 21 | # Manually add help option 22 | parser.add_argument( 23 | '-h', '--help', 24 | action='help', 25 | default=argparse.SUPPRESS, 26 | help='Show help message' 27 | ) 28 | 29 | subparsers = parser.add_subparsers( 30 | title="Available Commands", 31 | dest="command", 32 | required=True, 33 | help="Node.js runtime installation" 34 | ) 35 | 36 | # install-node subcommand 37 | node_parser = subparsers.add_parser( 38 | 'install-node', 39 | description='Install specific Node.js version to custom path', 40 | formatter_class=argparse.RawTextHelpFormatter, 41 | epilog='''Example usage: 42 | streamget install-node # Install default version 43 | streamget install-node --version 20.0.0 # Specify version 44 | streamget install-node --path ./node_dir # Custom install path 45 | 46 | Version reference: https://nodejs.org/dist/ 47 | ''', 48 | add_help=False 49 | ) 50 | node_parser.add_argument( 51 | '--version', 52 | default='22.14.0', 53 | help='Node.js version (default: %(default)s)' 54 | ) 55 | node_parser.add_argument( 56 | '--path', 57 | type=Path, 58 | default=None, 59 | help='Custom installation path (default: ~/.streamget_node)' 60 | ) 61 | node_parser.add_argument( 62 | '-h', '--help', 63 | action='help', 64 | help='Show this help message' 65 | ) 66 | node_parser.set_defaults(func=handle_install_node) 67 | 68 | args = parser.parse_args() 69 | if hasattr(args, 'func'): 70 | args.func(args) 71 | else: 72 | show_welcome_help() 73 | 74 | 75 | def is_main_help_request(): 76 | """Check if it's a global help request (not subcommand level)""" 77 | # Case 1: streamget -h/--help 78 | if len(sys.argv) == 2 and sys.argv[1] in ('-h', '--help'): 79 | return True 80 | # Case 2: streamget (no arguments) 81 | if len(sys.argv) == 1: 82 | return True 83 | return False 84 | 85 | 86 | def handle_install_node(args): 87 | """Handle install-node subcommand""" 88 | try: 89 | # Parameter validation 90 | if args.path and not args.path.parent.exists(): 91 | raise ValueError(f"Path {args.path.parent} does not exist") 92 | 93 | if args.path and not args.path.parent.is_dir(): 94 | raise ValueError(f"{args.path.parent} is not a valid directory") 95 | 96 | # Version format validation 97 | if not all(c.isdigit() or c == '.' for c in args.version): 98 | raise ValueError("Invalid version format, should be like 20.0.0") 99 | 100 | # Execute installation 101 | install_node( 102 | version=args.version, 103 | install_path=args.path.expanduser() if args.path else None 104 | ) 105 | 106 | print("✅ Node.js installed successfully!\n") 107 | 108 | except Exception as e: 109 | print(f"❌ Installation failed: {str(e)}\n") 110 | print("💡 Try adding --help for usage\n") 111 | sys.exit(1) 112 | 113 | 114 | def get_bin_path(version, custom_path): 115 | """Generate Node.js binary path""" 116 | system = platform.system().lower() 117 | base_dir = custom_path or Path.home() / ".streamget_node" 118 | return base_dir / f"node-v{version}-{system}-x64" / ("bin" if system != 'windows' else "") 119 | -------------------------------------------------------------------------------- /streamget/data.py: -------------------------------------------------------------------------------- 1 | import json 2 | from dataclasses import dataclass 3 | 4 | 5 | @dataclass 6 | class StreamData: 7 | """ 8 | Represents metadata and URLs associated with a streaming session. 9 | 10 | This class encapsulates essential information about a stream, including platform details, 11 | streamer information, stream status, and URLs for different stream formats. 12 | It also provides a method to convert the object to a JSON string. 13 | 14 | Attributes: 15 | platform (str): The streaming platform (e.g., "Twitch", "SOOP", "TikTok"). 16 | anchor_name (str): The name of the streamer. 17 | is_live (bool): Indicates whether the stream is currently live. 18 | title (str): The title of the stream. 19 | quality (str): The quality of the stream (e.g., "OD", "BD", "UHD", "HD"). 20 | m3u8_url (str): The URL for the m3u8 stream format. 21 | flv_url (str): The URL for the FLV stream format. 22 | record_url (str): The URL for recording the stream. 23 | new_cookies (str): Updated cookies required for accessing the stream. 24 | new_token (str): Updated token required for accessing the stream. 25 | extra (dict): Additional metadata or custom fields. 26 | 27 | Example: 28 | >>> stream_data = StreamData(platform="Twitch", anchor_name="StreamerName", is_live=True, title="Live Title") 29 | >>> json_data = stream_data.to_json() 30 | >>> print(json_data) 31 | JSON representation of the stream data 32 | 33 | Note: 34 | The `extra` attribute can be used to store any additional metadata that is not explicitly defined in the class. 35 | """ 36 | platform: str = None 37 | anchor_name: str = None 38 | is_live: bool = None 39 | title: str = None 40 | quality: str = None 41 | m3u8_url: str = None 42 | flv_url: str = None 43 | record_url: str = None 44 | new_cookies: str = None 45 | new_token: str = None 46 | extra: dict = None 47 | 48 | def to_json(self) -> str: 49 | """ 50 | Converts the StreamData object to a JSON string. 51 | 52 | This method serializes the object's attributes into a JSON format, making it easy to 53 | transmit or store the stream data. 54 | 55 | Returns: 56 | str: A JSON representation of the StreamData object. 57 | 58 | Example: 59 | >>> stream_data = StreamData(platform="Twitch", anchor_name="StreamerName") 60 | >>> json_data = stream_data.to_json() 61 | >>> print(json_data) 62 | { 63 | "platform": "Twitch", 64 | "anchor_name": "StreamerName", 65 | ... 66 | } 67 | """ 68 | return json.dumps(self.__dict__, ensure_ascii=False, indent=4) 69 | 70 | 71 | def wrap_stream(data: dict) -> StreamData: 72 | """ 73 | Wraps a dictionary into a StreamData object with default values for missing fields. 74 | 75 | This function ensures that all required and optional fields are present in the input dictionary. 76 | If a field is missing, it is set to `None`. 77 | 78 | Args: 79 | data (dict): A dictionary containing stream data. 80 | 81 | Returns: 82 | StreamData: An instance of StreamData with default values for missing fields. 83 | 84 | Raises: 85 | TypeError: If the input is not a dictionary. 86 | 87 | Example: 88 | >>> json_data = {"platform": "Bilibili", "anchor_name": "StreamerName"} 89 | >>> stream_data = wrap_stream(json_data) 90 | >>> print(stream_data) 91 | StreamData(platform='Bilibili', anchor_name='StreamerName', ...) 92 | 93 | Note: 94 | The function assumes that the input dictionary contains valid data types for each field. 95 | """ 96 | if not isinstance(data, dict): 97 | raise TypeError("Input must be a dictionary") 98 | 99 | required_fields = ["platform", "anchor_name", "is_live", "title", "quality", "m3u8_url", "flv_url", "record_url"] 100 | optional_fields = ["new_cookies", "new_token"] 101 | 102 | for field in required_fields + optional_fields: 103 | if field not in data: 104 | data[field] = None 105 | 106 | return StreamData(**data) 107 | -------------------------------------------------------------------------------- /streamget/help.py: -------------------------------------------------------------------------------- 1 | from . import __all__, __version__ 2 | 3 | 4 | def show_welcome_help(): 5 | """ 6 | Print help information for the streamget package. 7 | """ 8 | print("Welcome to streamget!") 9 | print(f"Version: {__version__}") 10 | print("Description: A Multi-Platform Live Stream Parser Library.") 11 | 12 | print("\nCommand Line Tools:") 13 | print(" streamget [-h] [-help] -- help info") 14 | print(" Install Node.js runtime:") 15 | print(" streamget install-node [--version] [--path] [--help]") 16 | print(" Example:") 17 | print(" streamget install-node") 18 | print(" streamget install-node --version 20.0.0") 19 | print(" streamget install-node --version 20.0.0 --path ./node") 20 | 21 | print("\nSupported Platforms:") 22 | print(__all__[4:]) 23 | print("\nUsage:") 24 | print(" import asyncio") 25 | print(" from streamget import DouyinLiveStream") 26 | print(" stream = DouyinLiveStream()") 27 | print(" data = asyncio.run(stream.fetch_web_stream_data('https://live.douyin.com/xxxxxx'))") 28 | print(" stream_obj = asyncio.run(stream.fetch_stream_url(data))") 29 | print(" stream_json_str = stream_obj.to_json()") 30 | print("\nFor more information, visit the GitHub repository: https://github.com/ihmily/streamget\n") 31 | 32 | 33 | if __name__ == '__main__': 34 | show_welcome_help() 35 | -------------------------------------------------------------------------------- /streamget/js/taobao-sign.js: -------------------------------------------------------------------------------- 1 | function sign(e) { 2 | function t(e, t) { 3 | return e << t | e >>> 32 - t 4 | } 5 | function o(e, t) { 6 | var o, n, r, i, a; 7 | return r = 2147483648 & e, 8 | i = 2147483648 & t, 9 | a = (1073741823 & e) + (1073741823 & t), 10 | (o = 1073741824 & e) & (n = 1073741824 & t) ? 2147483648 ^ a ^ r ^ i : o | n ? 1073741824 & a ? 3221225472 ^ a ^ r ^ i : 1073741824 ^ a ^ r ^ i : a ^ r ^ i 11 | } 12 | function n(e, n, r, i, a, s, u) { 13 | return o(t(e = o(e, o(o(function(e, t, o) { 14 | return e & t | ~e & o 15 | }(n, r, i), a), u)), s), n) 16 | } 17 | function r(e, n, r, i, a, s, u) { 18 | return o(t(e = o(e, o(o(function(e, t, o) { 19 | return e & o | t & ~o 20 | }(n, r, i), a), u)), s), n) 21 | } 22 | function i(e, n, r, i, a, s, u) { 23 | return o(t(e = o(e, o(o(function(e, t, o) { 24 | return e ^ t ^ o 25 | }(n, r, i), a), u)), s), n) 26 | } 27 | function a(e, n, r, i, a, s, u) { 28 | return o(t(e = o(e, o(o(function(e, t, o) { 29 | return t ^ (e | ~o) 30 | }(n, r, i), a), u)), s), n) 31 | } 32 | function s(e) { 33 | var t, o = "", n = ""; 34 | for (t = 0; 3 >= t; t++) 35 | o += (n = "0" + (e >>> 8 * t & 255).toString(16)).substr(n.length - 2, 2); 36 | return o 37 | } 38 | var u, l, d, c, p, f, h, m, y, g; 39 | for (g = function(e) { 40 | for (var t = e.length, o = t + 8, n = 16 * ((o - o % 64) / 64 + 1), r = Array(n - 1), i = 0, a = 0; t > a; ) 41 | i = a % 4 * 8, 42 | r[(a - a % 4) / 4] |= e.charCodeAt(a) << i, 43 | a++; 44 | return i = a % 4 * 8, 45 | r[(a - a % 4) / 4] |= 128 << i, 46 | r[n - 2] = t << 3, 47 | r[n - 1] = t >>> 29, 48 | r 49 | }(e = function(e) { 50 | var t = String.fromCharCode; 51 | e = e.replace(/\r\n/g, "\n"); 52 | for (var o, n = "", r = 0; r < e.length; r++) 53 | 128 > (o = e.charCodeAt(r)) ? n += t(o) : o > 127 && 2048 > o ? (n += t(o >> 6 | 192), 54 | n += t(63 & o | 128)) : (n += t(o >> 12 | 224), 55 | n += t(o >> 6 & 63 | 128), 56 | n += t(63 & o | 128)); 57 | return n 58 | }(e)), 59 | f = 1732584193, 60 | h = 4023233417, 61 | m = 2562383102, 62 | y = 271733878, 63 | u = 0; u < g.length; u += 16) 64 | l = f, 65 | d = h, 66 | c = m, 67 | p = y, 68 | h = a(h = a(h = a(h = a(h = i(h = i(h = i(h = i(h = r(h = r(h = r(h = r(h = n(h = n(h = n(h = n(h, m = n(m, y = n(y, f = n(f, h, m, y, g[u + 0], 7, 3614090360), h, m, g[u + 1], 12, 3905402710), f, h, g[u + 2], 17, 606105819), y, f, g[u + 3], 22, 3250441966), m = n(m, y = n(y, f = n(f, h, m, y, g[u + 4], 7, 4118548399), h, m, g[u + 5], 12, 1200080426), f, h, g[u + 6], 17, 2821735955), y, f, g[u + 7], 22, 4249261313), m = n(m, y = n(y, f = n(f, h, m, y, g[u + 8], 7, 1770035416), h, m, g[u + 9], 12, 2336552879), f, h, g[u + 10], 17, 4294925233), y, f, g[u + 11], 22, 2304563134), m = n(m, y = n(y, f = n(f, h, m, y, g[u + 12], 7, 1804603682), h, m, g[u + 13], 12, 4254626195), f, h, g[u + 14], 17, 2792965006), y, f, g[u + 15], 22, 1236535329), m = r(m, y = r(y, f = r(f, h, m, y, g[u + 1], 5, 4129170786), h, m, g[u + 6], 9, 3225465664), f, h, g[u + 11], 14, 643717713), y, f, g[u + 0], 20, 3921069994), m = r(m, y = r(y, f = r(f, h, m, y, g[u + 5], 5, 3593408605), h, m, g[u + 10], 9, 38016083), f, h, g[u + 15], 14, 3634488961), y, f, g[u + 4], 20, 3889429448), m = r(m, y = r(y, f = r(f, h, m, y, g[u + 9], 5, 568446438), h, m, g[u + 14], 9, 3275163606), f, h, g[u + 3], 14, 4107603335), y, f, g[u + 8], 20, 1163531501), m = r(m, y = r(y, f = r(f, h, m, y, g[u + 13], 5, 2850285829), h, m, g[u + 2], 9, 4243563512), f, h, g[u + 7], 14, 1735328473), y, f, g[u + 12], 20, 2368359562), m = i(m, y = i(y, f = i(f, h, m, y, g[u + 5], 4, 4294588738), h, m, g[u + 8], 11, 2272392833), f, h, g[u + 11], 16, 1839030562), y, f, g[u + 14], 23, 4259657740), m = i(m, y = i(y, f = i(f, h, m, y, g[u + 1], 4, 2763975236), h, m, g[u + 4], 11, 1272893353), f, h, g[u + 7], 16, 4139469664), y, f, g[u + 10], 23, 3200236656), m = i(m, y = i(y, f = i(f, h, m, y, g[u + 13], 4, 681279174), h, m, g[u + 0], 11, 3936430074), f, h, g[u + 3], 16, 3572445317), y, f, g[u + 6], 23, 76029189), m = i(m, y = i(y, f = i(f, h, m, y, g[u + 9], 4, 3654602809), h, m, g[u + 12], 11, 3873151461), f, h, g[u + 15], 16, 530742520), y, f, g[u + 2], 23, 3299628645), m = a(m, y = a(y, f = a(f, h, m, y, g[u + 0], 6, 4096336452), h, m, g[u + 7], 10, 1126891415), f, h, g[u + 14], 15, 2878612391), y, f, g[u + 5], 21, 4237533241), m = a(m, y = a(y, f = a(f, h, m, y, g[u + 12], 6, 1700485571), h, m, g[u + 3], 10, 2399980690), f, h, g[u + 10], 15, 4293915773), y, f, g[u + 1], 21, 2240044497), m = a(m, y = a(y, f = a(f, h, m, y, g[u + 8], 6, 1873313359), h, m, g[u + 15], 10, 4264355552), f, h, g[u + 6], 15, 2734768916), y, f, g[u + 13], 21, 1309151649), m = a(m, y = a(y, f = a(f, h, m, y, g[u + 4], 6, 4149444226), h, m, g[u + 11], 10, 3174756917), f, h, g[u + 2], 15, 718787259), y, f, g[u + 9], 21, 3951481745), 69 | f = o(f, l), 70 | h = o(h, d), 71 | m = o(m, c), 72 | y = o(y, p); 73 | return (s(f) + s(h) + s(m) + s(y)).toLowerCase() 74 | } 75 | 76 | // 正确sign值:05748e8359cd3e6deaab02d15caafc11 77 | // var sg =sign('5655b7041ca049730330701082886efd&1719411639403&12574478&{"componentKey":"wp_pc_shop_basic_info","params":"{\\"memberId\\":\\"b2b-22133374292418351a\\"}"}') 78 | // console.log(sg) -------------------------------------------------------------------------------- /streamget/platforms/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/acfun/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/acfun/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/acfun/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | import urllib.parse 3 | from operator import itemgetter 4 | 5 | from ... import utils 6 | from ...data import StreamData, wrap_stream 7 | from ...requests.async_http import async_req 8 | from ..base import BaseLiveStream 9 | 10 | 11 | class AcfunLiveStream(BaseLiveStream): 12 | """ 13 | A class for fetching and processing Acfun live stream information. 14 | """ 15 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 16 | super().__init__(proxy_addr, cookies) 17 | self.pc_headers = self._get_pc_headers() 18 | 19 | async def _get_acfun_sign_params(self) -> tuple: 20 | did = f'web_{utils.generate_random_string(16)}' 21 | headers = { 22 | 'referer': 'https://live.acfun.cn/', 23 | 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0', 24 | 'cookie': f'_did={did};', 25 | } 26 | data = { 27 | 'sid': 'acfun.api.visitor', 28 | } 29 | api = 'https://id.app.acfun.cn/rest/app/visitor/login' 30 | json_str = await async_req(api, data=data, proxy_addr=self.proxy_addr, headers=headers) 31 | json_data = json.loads(json_str) 32 | user_id = json_data["userId"] 33 | visitor_st = json_data["acfun.api.visitor_st"] 34 | return user_id, did, visitor_st 35 | 36 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 37 | """ 38 | Fetches web stream data for a live room. 39 | 40 | Args: 41 | url (str): The room URL. 42 | process_data (bool): Whether to process the data. Defaults to True. 43 | 44 | Returns: 45 | dict: A dictionary containing anchor name, live status, room URL, and title. 46 | """ 47 | author_id = url.split('?')[0].rsplit('/', maxsplit=1)[1] 48 | user_info_api = f'https://live.acfun.cn/rest/pc-direct/user/userInfo?userId={author_id}' 49 | json_str = await async_req(user_info_api, proxy_addr=self.proxy_addr, headers=self.pc_headers) 50 | json_data = json.loads(json_str) 51 | anchor_name = json_data['profile']['name'] 52 | status = 'liveId' in json_data['profile'] 53 | result = {"anchor_name": anchor_name, "is_live": False} 54 | if status: 55 | result["is_live"] = True 56 | user_id, did, visitor_st = await self._get_acfun_sign_params() 57 | params = { 58 | 'subBiz': 'mainApp', 59 | 'kpn': 'ACFUN_APP', 60 | 'kpf': 'PC_WEB', 61 | 'userId': user_id, 62 | 'did': did, 63 | 'acfun.api.visitor_st': visitor_st, 64 | } 65 | 66 | data = { 67 | 'authorId': author_id, 68 | 'pullStreamType': 'FLV', 69 | } 70 | play_api = f'https://api.kuaishouzt.com/rest/zt/live/web/startPlay?{urllib.parse.urlencode(params)}' 71 | json_str = await async_req(play_api, data=data, proxy_addr=self.proxy_addr, headers=self.pc_headers) 72 | json_data = json.loads(json_str) 73 | live_title = json_data['data']['caption'] 74 | videoPlayRes = json_data['data']['videoPlayRes'] 75 | play_url_list = json.loads(videoPlayRes)['liveAdaptiveManifest'][0]['adaptationSet']['representation'] 76 | play_url_list = sorted(play_url_list, key=itemgetter('bitrate'), reverse=True) 77 | result |= {'play_url_list': play_url_list, 'title': live_title} 78 | return result 79 | 80 | async def fetch_stream_url(self, json_data: dict, video_quality: str | int | None = None) -> StreamData: 81 | """ 82 | Fetches the stream URL for a live room and wraps it into a StreamData object. 83 | """ 84 | data = await self.get_stream_url( 85 | json_data, video_quality, url_type='flv', flv_extra_key='url', platform='Acfun') 86 | return wrap_stream(data) 87 | -------------------------------------------------------------------------------- /streamget/platforms/baidu/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/baidu/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/baidu/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | import random 3 | import re 4 | import time 5 | import urllib.parse 6 | 7 | from ...data import StreamData, wrap_stream 8 | from ...requests.async_http import async_req 9 | from ..base import BaseLiveStream 10 | 11 | 12 | class BaiduLiveStream(BaseLiveStream): 13 | """ 14 | A class for fetching and processing Baidu live stream information. 15 | """ 16 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 17 | super().__init__(proxy_addr, cookies) 18 | self.pc_headers = self._get_pc_headers() 19 | 20 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 21 | """ 22 | Fetches web stream data for a live room. 23 | 24 | Args: 25 | url (str): The room URL. 26 | process_data (bool): Whether to process the data. Defaults to True. 27 | 28 | Returns: 29 | dict: A dictionary containing anchor name, live status, room URL, and title. 30 | """ 31 | uid = random.choice([ 32 | 'h5-683e85bdf741bf2492586f7ca39bf465', 33 | 'h5-c7c6dc14064a136be4215b452fab9eea', 34 | 'h5-4581281f80bb8968bd9a9dfba6050d3a' 35 | ]) 36 | room_id = re.search('room_id=(.*?)&', url).group(1) 37 | params = { 38 | 'cmd': '371', 39 | 'action': 'star', 40 | 'service': 'bdbox', 41 | 'osname': 'baiduboxapp', 42 | 'data': '{"data":{"room_id":"' + room_id + '","device_id":"h5-683e85bdf741bf2492586f7ca39bf465",' 43 | '"source_type":0,"osname":"baiduboxapp"},"replay_slice":0,' 44 | '"nid":"","schemeParams":{"src_pre":"pc","src_suf":"other",' 45 | '"bd_vid":"","share_uid":"","share_cuk":"","share_ecid":"",' 46 | '"zb_tag":"","shareTaskInfo":"{\\"room_id\\":\\"9175031377\\"}",' 47 | '"share_from":"","ext_params":"","nid":""}}', 48 | 'ua': '360_740_ANDROID_0', 49 | 'bd_vid': '', 50 | 'uid': uid, 51 | '_': str(int(time.time() * 1000)), 52 | } 53 | app_api = f'https://mbd.baidu.com/searchbox?{urllib.parse.urlencode(params)}' 54 | json_str = await async_req(url=app_api, proxy_addr=self.proxy_addr, headers=self.pc_headers) 55 | json_data = json.loads(json_str) 56 | if not process_data: 57 | return json_data 58 | key = list(json_data['data'].keys())[0] 59 | data = json_data['data'][key] 60 | anchor_name = data['host']['name'] 61 | result = {"anchor_name": anchor_name, "is_live": False} 62 | if data['status'] == "0": 63 | result["is_live"] = True 64 | live_title = data['video']['title'] 65 | play_url_list = data['video']['url_clarity_list'] 66 | url_list = [] 67 | prefix = 'https://hls.liveshow.bdstatic.com/live/' 68 | if play_url_list: 69 | for i in play_url_list: 70 | url_list.append( 71 | prefix + i['urls']['flv'].rsplit('.', maxsplit=1)[0].rsplit('/', maxsplit=1)[1] + '.m3u8') 72 | else: 73 | play_url_list = data['video']['url_list'] 74 | for i in play_url_list: 75 | url_list.append(prefix + i['urls'][0]['hls'].rsplit('?', maxsplit=1)[0].rsplit('/', maxsplit=1)[1]) 76 | 77 | if url_list: 78 | result |= {"is_live": True, "title": live_title, 'play_url_list': url_list} 79 | return result 80 | 81 | async def fetch_stream_url(self, json_data: dict, video_quality: str | int | None = None) -> StreamData: 82 | """ 83 | Fetches the stream URL for a live room and wraps it into a StreamData object. 84 | """ 85 | data = await self.get_stream_url(json_data, video_quality, platform='百度') 86 | return wrap_stream(data) 87 | -------------------------------------------------------------------------------- /streamget/platforms/bigo/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/bigo/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/bigo/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | 4 | from ...data import StreamData, wrap_stream 5 | from ...requests.async_http import async_req 6 | from ..base import BaseLiveStream 7 | 8 | 9 | class BigoLiveStream(BaseLiveStream): 10 | """ 11 | A class for fetching and processing Bigo live stream information. 12 | """ 13 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 14 | super().__init__(proxy_addr, cookies) 15 | self.pc_headers = self._get_pc_headers() 16 | 17 | def _get_pc_headers(self) -> dict: 18 | return { 19 | 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0', 20 | 'accept-language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2', 21 | 'cookie': self.cookies or '', 22 | } 23 | 24 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 25 | """ 26 | Fetches web stream data for a live room. 27 | 28 | Args: 29 | url (str): The room URL. 30 | process_data (bool): Whether to process the data. Defaults to True. 31 | 32 | Returns: 33 | dict: A dictionary containing anchor name, live status, room URL, and title. 34 | """ 35 | if 'bigo.tv' not in url: 36 | html_str = await async_req(url, proxy_addr=self.proxy_addr, headers=self.pc_headers) 37 | web_url = re.search( 38 | '', 39 | html_str).group(1) 40 | room_id = web_url.split('&h=')[-1] 41 | else: 42 | if '&h=' in url: 43 | room_id = url.split('&h=')[-1] 44 | else: 45 | room_id = re.search('www.bigo.tv/cn/(\\w+)', url).group(1) 46 | 47 | data = {'siteId': room_id} # roomId 48 | url2 = 'https://ta.bigo.tv/official_website/studio/getInternalStudioInfo' 49 | json_str = await async_req(url=url2, proxy_addr=self.proxy_addr, headers=self.pc_headers, data=data) 50 | json_data = json.loads(json_str) 51 | if not process_data: 52 | return json_data 53 | anchor_name = json_data['data']['nick_name'] 54 | live_status = json_data['data']['alive'] 55 | result = {"anchor_name": anchor_name, "is_live": False} 56 | 57 | if live_status == 1: 58 | live_title = json_data['data']['roomTopic'] 59 | m3u8_url = json_data['data']['hls_src'] 60 | result['m3u8_url'] = m3u8_url 61 | result['record_url'] = m3u8_url 62 | result |= {"title": live_title, "is_live": True, "m3u8_url": m3u8_url, 'record_url': m3u8_url} 63 | elif result['anchor_name'] == '': 64 | html_str = await async_req(url=f'https://www.bigo.tv/cn/{room_id}', 65 | proxy_addr=self.proxy_addr, headers=self.pc_headers) 66 | result['anchor_name'] = re.search('欢迎来到(.*?)的直播间', html_str, re.DOTALL).group(1) 67 | 68 | return result 69 | 70 | @staticmethod 71 | async def fetch_stream_url(json_data: dict, video_quality: str | int | None = None) -> StreamData: 72 | """ 73 | Fetches the stream URL for a live room and wraps it into a StreamData object. 74 | """ 75 | json_data |= {"platform": 'Bigo'} 76 | return wrap_stream(json_data) 77 | -------------------------------------------------------------------------------- /streamget/platforms/bilibili/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/bilibili/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/blued/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/blued/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/blued/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | import urllib.parse 4 | 5 | from ...data import StreamData, wrap_stream 6 | from ...requests.async_http import async_req 7 | from ..base import BaseLiveStream 8 | 9 | 10 | class BluedLiveStream(BaseLiveStream): 11 | """ 12 | A class for fetching and processing Blued live stream information. 13 | """ 14 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 15 | super().__init__(proxy_addr, cookies) 16 | self.mobile_headers = self._get_mobile_headers() 17 | 18 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 19 | """ 20 | Fetches web stream data for a live room. 21 | 22 | Args: 23 | url (str): The room URL. 24 | process_data (bool): Whether to process the data. Defaults to True. 25 | 26 | Returns: 27 | dict: A dictionary containing anchor name, live status, room URL, and title. 28 | """ 29 | html_str = await async_req(url=url, proxy_addr=self.proxy_addr, headers=self.mobile_headers) 30 | json_str = re.search('decodeURIComponent\\(\"(.*?)\"\\)\\),window\\.Promise', html_str, re.DOTALL).group(1) 31 | json_str = urllib.parse.unquote(json_str) 32 | json_data = json.loads(json_str) 33 | if not process_data: 34 | return json_data 35 | anchor_name = json_data['userInfo']['name'] 36 | live_status = json_data['userInfo']['onLive'] 37 | result = {"anchor_name": anchor_name, "is_live": False} 38 | 39 | if live_status: 40 | m3u8_url = json_data['liveInfo']['liveUrl'] 41 | result |= {"is_live": True, "m3u8_url": m3u8_url, 'record_url': m3u8_url} 42 | return result 43 | 44 | @staticmethod 45 | async def fetch_stream_url(json_data: dict, video_quality: str | int | None = None) -> StreamData: 46 | """ 47 | Fetches the stream URL for a live room and wraps it into a StreamData object. 48 | """ 49 | json_data |= {"platform": 'Blued'} 50 | return wrap_stream(json_data) 51 | -------------------------------------------------------------------------------- /streamget/platforms/chzzk/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/chzzk/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/chzzk/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from ...data import StreamData, wrap_stream 4 | from ...requests.async_http import async_req 5 | from ..base import BaseLiveStream 6 | 7 | 8 | class ChzzkLiveStream(BaseLiveStream): 9 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 10 | super().__init__(proxy_addr, cookies) 11 | self.pc_headers = self._get_pc_headers() 12 | 13 | def _get_pc_headers(self) -> dict: 14 | return { 15 | 'accept': 'application/json, text/plain, */*', 16 | 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6', 17 | 'origin': 'https://chzzk.naver.com', 18 | 'referer': 'https://chzzk.naver.com/live/458f6ec20b034f49e0fc6d03921646d2', 19 | 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0', 20 | 'cookie': self.cookies or '', 21 | } 22 | 23 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 24 | """ 25 | Fetches web stream data for a live room. 26 | 27 | Args: 28 | url (str): The room URL. 29 | process_data (bool): Whether to process the data. Defaults to True. 30 | 31 | Returns: 32 | dict: A dictionary containing anchor name, live status, room URL, and title. 33 | """ 34 | room_id = url.split('?')[0].rsplit('/', maxsplit=1)[-1] 35 | play_api = f'https://api.chzzk.naver.com/service/v3/channels/{room_id}/live-detail' 36 | json_str = await async_req(play_api, proxy_addr=self.proxy_addr, headers=self.pc_headers) 37 | json_data = json.loads(json_str) 38 | if not process_data: 39 | return json_data 40 | live_data = json_data['content'] 41 | anchor_name = live_data['channel']['channelName'] 42 | live_status = live_data['status'] 43 | 44 | result = {"anchor_name": anchor_name, "is_live": False} 45 | if live_status == 'OPEN': 46 | play_data = json.loads(live_data['livePlaybackJson']) 47 | m3u8_url = play_data['media'][0]['path'] 48 | m3u8_url_list = await self.get_play_url_list(m3u8_url, proxy=self.proxy_addr, headers=self.pc_headers) 49 | prefix = m3u8_url.split('?')[0].rsplit('/', maxsplit=1)[0] 50 | m3u8_url_list = [prefix + '/' + i for i in m3u8_url_list] 51 | result |= {"is_live": True, "m3u8_url": m3u8_url, "play_url_list": m3u8_url_list} 52 | return result 53 | 54 | async def fetch_stream_url(self, json_data: dict, video_quality: str | int | None = None) -> StreamData: 55 | data = await self.get_stream_url(json_data, video_quality, spec=True, platform='CHZZK') 56 | return wrap_stream(data) 57 | -------------------------------------------------------------------------------- /streamget/platforms/douyin/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/douyin/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/douyu/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/douyu/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/douyu/live_stream.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import json 3 | import re 4 | import time 5 | 6 | import execjs 7 | 8 | from ...data import StreamData, wrap_stream 9 | from ...requests.async_http import async_req 10 | from ..base import BaseLiveStream 11 | 12 | 13 | class DouyuLiveStream(BaseLiveStream): 14 | """ 15 | A class for fetching and processing Douyu live stream information. 16 | """ 17 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 18 | super().__init__(proxy_addr, cookies) 19 | self.mobile_headers = self._get_mobile_headers() 20 | self.pc_headers = self._get_pc_headers() 21 | 22 | def _get_mobile_headers(self) -> dict: 23 | return { 24 | 'user-agent': 'ios/7.830 (ios 17.0; ; iPhone 15 (A2846/A3089/A3090/A3092))', 25 | 'cookie': self.cookies or '', 26 | 'referer': 'https://m.douyu.com/3125893?rid=3125893&dyshid=0-96003918aa5365bc6dcb4933000316p1&dyshci=181', 27 | } 28 | 29 | @staticmethod 30 | def _get_md5(data) -> str: 31 | return hashlib.md5(data.encode('utf-8')).hexdigest() 32 | 33 | async def _get_token_js(self, rid: str, did: str) -> list[str]: 34 | url = f'https://www.douyu.com/{rid}' 35 | html_str = await async_req(url=url, proxy_addr=self.proxy_addr) 36 | result = re.search(r'(vdwdae325w_64we[\s\S]*function ub98484234[\s\S]*?)function', html_str).group(1) 37 | func_ub9 = re.sub(r'eval.*?;}', 'strc;}', result) 38 | js = execjs.compile(func_ub9) 39 | res = js.call('ub98484234') 40 | 41 | t10 = str(int(time.time())) 42 | v = re.search(r'v=(\d+)', res).group(1) 43 | rb = self._get_md5(str(rid) + str(did) + str(t10) + str(v)) 44 | 45 | func_sign = re.sub(r'return rt;}\);?', 'return rt;}', res) 46 | func_sign = func_sign.replace('(function (', 'function sign(') 47 | func_sign = func_sign.replace('CryptoJS.MD5(cb).toString()', '"' + rb + '"') 48 | 49 | try: 50 | js = execjs.compile(func_sign) 51 | params = js.call('sign', rid, did, t10) 52 | params_list = re.findall('=(.*?)(?=&|$)', params) 53 | return params_list 54 | except execjs.ProgramError: 55 | raise execjs.ProgramError('Failed to execute JS code. Please check if the Node.js environment') 56 | 57 | async def _fetch_web_stream_url(self, rid: str, rate: str = '-1') -> dict: 58 | 59 | did = '10000000000000000000000000003306' 60 | params_list = await self._get_token_js(rid, did) 61 | data = { 62 | 'v': params_list[0], 63 | 'did': params_list[1], 64 | 'tt': params_list[2], 65 | 'sign': params_list[3], # 10分钟有效期 66 | 'ver': '22011191', 67 | 'rid': rid, 68 | 'rate': rate, # 0蓝光、3超清、2高清、-1默认 69 | } 70 | 71 | # app_api = 'https://m.douyu.com/hgapi/livenc/room/getStreamUrl' 72 | app_api = f'https://www.douyu.com/lapi/live/getH5Play/{rid}' 73 | json_str = await async_req(url=app_api, proxy_addr=self.proxy_addr, headers=self.mobile_headers, data=data) 74 | json_data = json.loads(json_str) 75 | return json_data 76 | 77 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 78 | """ 79 | Fetches web stream data for a live room. 80 | 81 | Args: 82 | url (str): The room URL. 83 | process_data (bool): Whether to process the data. Defaults to True. 84 | 85 | Returns: 86 | dict: A dictionary containing anchor name, live status, room URL, and title. 87 | """ 88 | match_rid = re.search('rid=(.*?)(?=&|$)', url) 89 | if match_rid: 90 | rid = match_rid.group(1) 91 | else: 92 | rid = re.search('douyu.com/(.*?)(?=\\?|$)', url).group(1) 93 | html_str = await async_req(url=f'https://m.douyu.com/{rid}', proxy_addr=self.proxy_addr, 94 | headers=self.pc_headers) 95 | json_str = re.findall('', html_str)[0] 96 | json_data = json.loads(json_str) 97 | rid = json_data['pageProps']['room']['roomInfo']['roomInfo']['rid'] 98 | 99 | url2 = f'https://www.douyu.com/betard/{rid}' 100 | json_str = await async_req(url2, proxy_addr=self.proxy_addr, headers=self.pc_headers) 101 | json_data = json.loads(json_str) 102 | if not process_data: 103 | return json_data 104 | result = { 105 | "anchor_name": json_data['room']['nickname'], 106 | "is_live": False 107 | } 108 | if json_data['room']['videoLoop'] == 0 and json_data['room']['show_status'] == 1: 109 | result["title"] = json_data['room']['room_name'].replace(' ', '') 110 | result["is_live"] = True 111 | result["room_id"] = json_data['room']['room_id'] 112 | return result 113 | 114 | async def fetch_stream_url(self, json_data: dict, video_quality: str | int | None = None) -> StreamData: 115 | """ 116 | Fetches the stream URL for a live room and wraps it into a StreamData object. 117 | """ 118 | platform = '斗鱼直播' 119 | if not json_data["is_live"]: 120 | json_data |= {"platform": platform} 121 | return wrap_stream(json_data) 122 | video_quality_options = { 123 | "OD": '0', 124 | "BD": '0', 125 | "UHD": '3', 126 | "HD": '2', 127 | "SD": '1', 128 | "LD": '1' 129 | } 130 | rid = str(json_data["room_id"]) 131 | json_data.pop("room_id") 132 | 133 | if not video_quality: 134 | video_quality = "OD" 135 | else: 136 | if str(video_quality).isdigit(): 137 | video_quality = list(video_quality_options.keys())[int(video_quality)] 138 | else: 139 | video_quality = video_quality.upper() 140 | 141 | rate = video_quality_options.get(video_quality, '0') 142 | flv_data = await self._fetch_web_stream_url(rid=rid, rate=rate) 143 | rtmp_url = flv_data['data'].get('rtmp_url') 144 | rtmp_live = flv_data['data'].get('rtmp_live') 145 | if rtmp_live: 146 | flv_url = f'{rtmp_url}/{rtmp_live}' 147 | json_data |= {"platform": platform, 'quality': video_quality, 'flv_url': flv_url, 'record_url': flv_url} 148 | return wrap_stream(json_data) 149 | 150 | -------------------------------------------------------------------------------- /streamget/platforms/faceit/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/faceit/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/faceit/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | 4 | from ...data import StreamData, wrap_stream 5 | from ...requests.async_http import async_req 6 | from ..base import BaseLiveStream 7 | from ..twitch.live_stream import TwitchLiveStream 8 | 9 | 10 | class FaceitLiveStream(BaseLiveStream): 11 | """ 12 | A class for fetching and processing Faceit live stream information. 13 | """ 14 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 15 | super().__init__(proxy_addr, cookies) 16 | self.pc_headers = self._get_pc_headers() 17 | 18 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 19 | """ 20 | Fetches web stream data for a live room. 21 | 22 | Args: 23 | url (str): The room URL. 24 | process_data (bool): Whether to process the data. Defaults to True. 25 | 26 | Returns: 27 | dict: A dictionary containing anchor name, live status, room URL, and title. 28 | """ 29 | nickname = re.findall('/players/(.*?)/stream', url)[0] 30 | api = f'https://www.faceit.com/api/users/v1/nicknames/{nickname}' 31 | json_str = await async_req(api, proxy_addr=self.proxy_addr, headers=self.pc_headers) 32 | json_data = json.loads(json_str) 33 | user_id = json_data['payload']['id'] 34 | api2 = f'https://www.faceit.com/api/stream/v1/streamings?userId={user_id}' 35 | json_str2 = await async_req(api2, proxy_addr=self.proxy_addr, headers=self.pc_headers) 36 | json_data2 = json.loads(json_str2) 37 | if not json_data2.get('payload'): 38 | return {'anchor_name': nickname, 'is_live': False} 39 | platform_info = json_data2['payload'][0] 40 | anchor_name = platform_info.get('userNickname') 41 | anchor_id = platform_info.get('platformId') 42 | platform = platform_info.get('platform') 43 | if platform == 'twitch': 44 | twitch_stream = TwitchLiveStream(proxy_addr=self.proxy_addr) 45 | result = await twitch_stream.fetch_web_stream_data(f'https://www.twitch.tv/{anchor_id}') 46 | result['anchor_name'] = anchor_name 47 | else: 48 | result = {'anchor_name': anchor_name, 'is_live': False} 49 | return result 50 | 51 | async def fetch_stream_url(self, json_data: dict, video_quality: str | int | None = None) -> StreamData: 52 | """ 53 | Fetches the stream URL for a live room and wraps it into a StreamData object. 54 | """ 55 | data = await self.get_stream_url(json_data, video_quality, spec=True, platform='Faceit') 56 | return wrap_stream(data) -------------------------------------------------------------------------------- /streamget/platforms/flextv/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/flextv/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/haixiu/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/haixiu/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/haixiu/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | import time 3 | import urllib.parse 4 | 5 | import execjs 6 | 7 | from ... import JS_SCRIPT_PATH 8 | from ...data import StreamData, wrap_stream 9 | from ...requests.async_http import async_req 10 | from ..base import BaseLiveStream 11 | 12 | 13 | class HaixiuLiveStream(BaseLiveStream): 14 | """ 15 | A class for fetching and processing Haixiu live stream information. 16 | """ 17 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 18 | super().__init__(proxy_addr, cookies) 19 | self.mobile_headers = self._get_mobile_headers() 20 | 21 | def _get_mobile_headers(self) -> dict: 22 | return { 23 | 'origin': 'https://www.haixiutv.com', 24 | 'referer': 'https://www.haixiutv.com/', 25 | 'user-agent': 'ios/7.830 (ios 17.0; ; iPhone 15 (A2846/A3089/A3090/A3092))', 26 | 'cookie': self.cookies or '', 27 | } 28 | 29 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 30 | """ 31 | Fetches web stream data for a live room. 32 | 33 | Args: 34 | url (str): The room URL. 35 | process_data (bool): Whether to process the data. Defaults to True. 36 | 37 | Returns: 38 | dict: A dictionary containing anchor name, live status, room URL, and title. 39 | """ 40 | room_id = url.split("?")[0].rsplit('/', maxsplit=1)[-1] 41 | if 'haixiutv' in url: 42 | access_token = "pLXSC%252FXJ0asc1I21tVL5FYZhNJn2Zg6d7m94umCnpgL%252BuVm31GQvyw%253D%253D" 43 | else: 44 | access_token = "s7FUbTJ%252BjILrR7kicJUg8qr025ZVjd07DAnUQd8c7g%252Fo4OH9pdSX6w%253D%253D" 45 | 46 | params = { 47 | "accessToken": access_token, 48 | "tku": "3000006", 49 | "c": "10138100100000", 50 | "_st1": int(time.time() * 1000) 51 | } 52 | try: 53 | with open(f'{JS_SCRIPT_PATH}/haixiu.js') as f: 54 | js_code = f.read() 55 | ajax_data = execjs.compile(js_code).call('sign', params, f'{JS_SCRIPT_PATH}/crypto-js.min.js') 56 | except execjs.ProgramError: 57 | raise execjs.ProgramError('Failed to execute JS code. Please check if the Node.js environment') 58 | 59 | params["accessToken"] = urllib.parse.unquote(urllib.parse.unquote(access_token)) 60 | params['_ajaxData1'] = ajax_data 61 | params['_'] = int(time.time() * 1000) 62 | 63 | encode_params = urllib.parse.urlencode(params) 64 | if 'haixiutv' in url: 65 | api = f'https://service.haixiutv.com/v2/room/{room_id}/media/advanceInfoRoom?{encode_params}' 66 | else: 67 | self.mobile_headers['origin'] = 'https://www.lehaitv.com' 68 | self.mobile_headers['referer'] = 'https://www.lehaitv.com' 69 | api = f'https://service.lehaitv.com/v2/room/{room_id}/media/advanceInfoRoom?{encode_params}' 70 | 71 | json_str = await async_req(api, proxy_addr=self.proxy_addr, headers=self.mobile_headers) 72 | json_data = json.loads(json_str) 73 | if not process_data: 74 | return json_data 75 | stream_data = json_data['data'] 76 | anchor_name = stream_data['nickname'] 77 | live_status = stream_data['live_status'] 78 | result = {"anchor_name": anchor_name, "is_live": False} 79 | if live_status == 1: 80 | flv_url = stream_data['media_url_web'] 81 | result |= {'is_live': True, 'flv_url': flv_url, 'record_url': flv_url} 82 | return result 83 | 84 | @staticmethod 85 | async def fetch_stream_url(json_data: dict, video_quality: str | int | None = None) -> StreamData: 86 | """ 87 | Fetches the stream URL for a live room and wraps it into a StreamData object. 88 | """ 89 | json_data |= {"platform": "嗨秀直播"} 90 | return wrap_stream(json_data) 91 | -------------------------------------------------------------------------------- /streamget/platforms/huajiao/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/huajiao/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/huamao/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/huamao/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/huamao/live_stream.py: -------------------------------------------------------------------------------- 1 | from ...data import StreamData 2 | from ..base import BaseLiveStream 3 | from ..piaopiao.live_stream import PiaopaioLiveStream 4 | 5 | 6 | class HuamaoLiveStream(BaseLiveStream): 7 | """ 8 | A class for fetching and processing Huamao live stream information. 9 | """ 10 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 11 | super().__init__(proxy_addr, cookies) 12 | self.stream = PiaopaioLiveStream(proxy_addr=self.proxy_addr, cookies=self.cookies) 13 | 14 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 15 | """ 16 | Fetches web stream data for a live room. 17 | 18 | Args: 19 | url (str): The room URL. 20 | process_data (bool): Whether to process the data. Defaults to True. 21 | 22 | Returns: 23 | dict: A dictionary containing anchor name, live status, room URL, and title. 24 | """ 25 | return await self.stream.fetch_web_stream_data(url) 26 | 27 | async def fetch_stream_url(self, json_data: dict, video_quality: str | int | None = None) -> StreamData: 28 | """ 29 | Fetches the stream URL for a live room and wraps it into a StreamData object. 30 | """ 31 | json_data |= {"platform": "花猫直播"} 32 | return await self.stream.fetch_stream_url(json_data) 33 | -------------------------------------------------------------------------------- /streamget/platforms/huya/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/huya/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/inke/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/inke/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/inke/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | import time 3 | import urllib.parse 4 | 5 | from ...data import StreamData, wrap_stream 6 | from ...requests.async_http import async_req 7 | from ..base import BaseLiveStream 8 | 9 | 10 | class InkeLiveStream(BaseLiveStream): 11 | """ 12 | A class for fetching and processing Inke live stream information. 13 | """ 14 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 15 | super().__init__(proxy_addr, cookies) 16 | self.pc_headers = self._get_pc_headers() 17 | 18 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 19 | """ 20 | Fetches web stream data for a live room. 21 | 22 | Args: 23 | url (str): The room URL. 24 | process_data (bool): Whether to process the data. Defaults to True. 25 | 26 | Returns: 27 | dict: A dictionary containing anchor name, live status, room URL, and title. 28 | """ 29 | parsed_url = urllib.parse.urlparse(url) 30 | query_params = urllib.parse.parse_qs(parsed_url.query) 31 | uid = query_params['uid'][0] 32 | live_id = query_params['id'][0] 33 | params = { 34 | 'uid': uid, 35 | 'id': live_id, 36 | '_t': str(int(time.time())), 37 | } 38 | 39 | api = f'https://webapi.busi.inke.cn/web/live_share_pc?{urllib.parse.urlencode(params)}' 40 | json_str = await async_req(api, proxy_addr=self.proxy_addr, headers=self.pc_headers) 41 | json_data = json.loads(json_str) 42 | if not process_data: 43 | return json_data 44 | anchor_name = json_data['data']['media_info']['nick'] 45 | live_status = json_data['data']['status'] 46 | 47 | result = {"anchor_name": anchor_name, "is_live": False} 48 | if live_status == 1: 49 | m3u8_url = json_data['data']['live_addr'][0]['hls_stream_addr'] 50 | flv_url = json_data['data']['live_addr'][0]['stream_addr'] 51 | result |= {'is_live': True, 'm3u8_url': m3u8_url, 'flv_url': flv_url, 'record_url': m3u8_url} 52 | return result 53 | 54 | @staticmethod 55 | async def fetch_stream_url(json_data: dict, video_quality: str | int | None = None) -> StreamData: 56 | """ 57 | Fetches the stream URL for a live room and wraps it into a StreamData object. 58 | """ 59 | json_data |= {"platform": "映客直播"} 60 | return wrap_stream(json_data) 61 | -------------------------------------------------------------------------------- /streamget/platforms/jd/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/jd/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/jd/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | import urllib.parse 4 | 5 | from ...data import StreamData, wrap_stream 6 | from ...requests.async_http import async_req 7 | from ..base import BaseLiveStream 8 | 9 | 10 | class JDLiveStream(BaseLiveStream): 11 | """ 12 | A class for fetching and processing JD live stream information. 13 | """ 14 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 15 | super().__init__(proxy_addr, cookies) 16 | self.mobile_headers = self._get_mobile_headers() 17 | 18 | def _get_mobile_headers(self) -> dict: 19 | return { 20 | 'user-agent': 'ios/7.830 (ios 17.0; ; iPhone 15 (A2846/A3089/A3090/A3092))', 21 | 'origin': 'https://lives.jd.com', 22 | 'referer': 'https://lives.jd.com/', 23 | 'x-referer-page': 'https://lives.jd.com/', 24 | 'cookie': self.cookies or '', 25 | } 26 | 27 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 28 | """ 29 | Fetches web stream data for a live room. 30 | 31 | Args: 32 | url (str): The room URL. 33 | process_data (bool): Whether to process the data. Defaults to True. 34 | 35 | Returns: 36 | dict: A dictionary containing anchor name, live status, room URL, and title. 37 | """ 38 | redirect_url = await async_req(url, proxy_addr=self.proxy_addr, headers=self.mobile_headers, redirect_url=True) 39 | author_id = self.get_params(redirect_url, 'authorId') 40 | result = {"anchor_name": '', "is_live": False} 41 | if not author_id: 42 | live_id = re.search('#/(.*?)\\?origin', redirect_url) 43 | if not live_id: 44 | return result 45 | live_id = live_id.group(1) 46 | result['anchor_name'] = f'jd_{live_id}' 47 | else: 48 | data = { 49 | 'functionId': 'talent_head_findTalentMsg', 50 | 'appid': 'dr_detail', 51 | 'body': '{"authorId":"' + author_id + '","monitorSource":"1","userId":""}', 52 | } 53 | info_api = 'https://api.m.jd.com/talent_head_findTalentMsg' 54 | json_str = await async_req(info_api, data=data, proxy_addr=self.proxy_addr, headers=self.mobile_headers) 55 | json_data = json.loads(json_str) 56 | anchor_name = json_data['result']['talentName'] 57 | result['anchor_name'] = anchor_name 58 | if 'livingRoomJump' not in json_data['result']: 59 | return result 60 | live_id = json_data['result']['livingRoomJump']['params']['id'] 61 | params = { 62 | "body": '{"liveId": "' + live_id + '"}', 63 | "functionId": "getImmediatePlayToM", 64 | "appid": "h5-live" 65 | } 66 | 67 | api = f'https://api.m.jd.com/client.action?{urllib.parse.urlencode(params)}' 68 | # backup_api: https://api.m.jd.com/api 69 | json_str = await async_req(api, proxy_addr=self.proxy_addr, headers=self.mobile_headers) 70 | json_data = json.loads(json_str) 71 | live_status = json_data['data']['status'] 72 | if live_status == 1: 73 | if author_id: 74 | data = { 75 | 'functionId': 'jdTalentContentList', 76 | 'appid': 'dr_detail', 77 | 'body': '{"authorId":"' + author_id + '","type":1,"userId":"","page":1,"offset":"-1",' 78 | '"monitorSource":"1","pageSize":1}', 79 | } 80 | json_str2 = await async_req('https://api.m.jd.com/jdTalentContentList', data=data, 81 | proxy_addr=self.proxy_addr, headers=self.mobile_headers) 82 | json_data2 = json.loads(json_str2) 83 | result['title'] = json_data2['result']['content'][0]['title'] 84 | 85 | flv_url = json_data['data']['videoUrl'] 86 | m3u8_url = json_data['data']['h5VideoUrl'] 87 | result |= {"is_live": True, "m3u8_url": m3u8_url, "flv_url": flv_url, "record_url": m3u8_url} 88 | return result 89 | 90 | @staticmethod 91 | async def fetch_stream_url(json_data: dict, video_quality: str | int | None = None) -> StreamData: 92 | """ 93 | Fetches the stream URL for a live room and wraps it into a StreamData object. 94 | """ 95 | json_data |= {"platform": "京东直播"} 96 | return wrap_stream(json_data) 97 | -------------------------------------------------------------------------------- /streamget/platforms/kuaishou/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/kuaishou/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/kuaishou/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | 4 | from ...data import StreamData, wrap_stream 5 | from ...requests.async_http import async_req 6 | from ..base import BaseLiveStream 7 | 8 | 9 | class KwaiLiveStream(BaseLiveStream): 10 | """ 11 | A class for fetching and processing Kuaishou live stream information. 12 | """ 13 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 14 | super().__init__(proxy_addr, cookies) 15 | self.pc_headers = self._get_pc_headers() 16 | 17 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict | None: 18 | """ 19 | Fetches web stream data for a live room. 20 | 21 | Args: 22 | url (str): The room URL. 23 | process_data (bool): Whether to process the data. Defaults to True. 24 | 25 | Returns: 26 | dict: A dictionary containing anchor name, live status, room URL, and title. 27 | """ 28 | try: 29 | html_str = await async_req(url=url, proxy_addr=self.proxy_addr, headers=self.pc_headers) 30 | except Exception as e: 31 | raise Exception(f"Failed to fetch data from {url}.{e}") 32 | 33 | try: 34 | json_str = re.search('', 31 | html_str, re.DOTALL).group(1) 32 | json_data = json.loads(json_str) 33 | if not process_data: 34 | return json_data 35 | room_data = json_data['props']['pageProps']['roomInfoInitData'] 36 | live_data = room_data['live'] 37 | result = {"is_live": False} 38 | live_status = live_data.get('status') == 1 39 | result["anchor_name"] = live_data.get('nickname', room_data.get('nickname')) 40 | if live_status: 41 | result |= { 42 | 'is_live': True, 43 | 'title': live_data['title'], 44 | 'stream_list': live_data.get('quickplay'), 45 | 'm3u8_url': live_data.get('sharefile') 46 | } 47 | return result 48 | 49 | async def fetch_stream_url(self, json_data: dict, video_quality: str | int | None = None) -> StreamData: 50 | """ 51 | Fetches the stream URL for a live room and wraps it into a StreamData object. 52 | """ 53 | if not json_data['is_live']: 54 | json_data |= {"platform": "网易CC直播"} 55 | return wrap_stream(json_data) 56 | 57 | m3u8_url = json_data['m3u8_url'] 58 | flv_url = None 59 | if json_data.get('stream_list'): 60 | stream_list = json_data['stream_list']['resolution'] 61 | order = ['blueray', 'ultra', 'high', 'standard'] 62 | sorted_keys = [key for key in order if key in stream_list] 63 | while len(sorted_keys) < 5: 64 | sorted_keys.append(sorted_keys[-1]) 65 | video_quality, quality_index = self.get_quality_index(video_quality) 66 | selected_quality = sorted_keys[quality_index] 67 | flv_url_list = stream_list[selected_quality]['cdn'] 68 | selected_cdn = list(flv_url_list.keys())[0] 69 | flv_url = flv_url_list[selected_cdn] 70 | 71 | data = { 72 | "platform": "网易CC直播", 73 | "anchor_name": json_data['anchor_name'], 74 | "is_live": True, 75 | "title": json_data['title'], 76 | 'quality': video_quality, 77 | "m3u8_url": m3u8_url, 78 | "flv_url": flv_url, 79 | "record_url": flv_url or m3u8_url 80 | } 81 | return wrap_stream(data) 82 | -------------------------------------------------------------------------------- /streamget/platforms/pandatv/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/pandatv/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/pandatv/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from ...data import StreamData, wrap_stream 4 | from ...requests.async_http import async_req 5 | from ..base import BaseLiveStream 6 | 7 | 8 | class PandaLiveStream(BaseLiveStream): 9 | """ 10 | A class for fetching and processing PandaLive live stream information. 11 | """ 12 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 13 | super().__init__(proxy_addr, cookies) 14 | self.pc_headers = self._get_pc_headers() 15 | 16 | def _get_pc_headers(self) -> dict: 17 | return { 18 | 'origin': 'https://www.pandalive.co.kr', 19 | 'referer': 'https://www.pandalive.co.kr/', 20 | 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:124.0) Gecko/20100101 Firefox/124.0', 21 | 'cookie': self.cookies or '', 22 | } 23 | 24 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 25 | """ 26 | Fetches web stream data for a live room. 27 | 28 | Args: 29 | url (str): The room URL. 30 | process_data (bool): Whether to process the data. Defaults to True. 31 | 32 | Returns: 33 | dict: A dictionary containing anchor name, live status, room URL, and title. 34 | """ 35 | user_id = url.split('?')[0].rsplit('/', maxsplit=1)[1] 36 | url2 = 'https://api.pandalive.co.kr/v1/live/play' 37 | data = { 38 | 'userId': user_id, 39 | 'info': 'media fanGrade', 40 | } 41 | room_password = self.get_params(url, "pwd") 42 | if not room_password: 43 | room_password = '' 44 | data2 = { 45 | 'action': 'watch', 46 | 'userId': user_id, 47 | 'password': room_password, 48 | 'shareLinkType': '', 49 | } 50 | 51 | result = {"anchor_name": "", "is_live": False} 52 | json_str = await async_req('https://api.pandalive.co.kr/v1/member/bj', 53 | proxy_addr=self.proxy_addr, headers=self.pc_headers, data=data) 54 | json_data = json.loads(json_str) 55 | if not process_data: 56 | return json_data 57 | anchor_id = json_data['bjInfo']['id'] 58 | anchor_name = f"{json_data['bjInfo']['nick']}-{anchor_id}" 59 | result['anchor_name'] = anchor_name 60 | live_status = 'media' in json_data 61 | 62 | if live_status: 63 | json_str = await async_req(url2, proxy_addr=self.proxy_addr, headers=self.pc_headers, data=data2) 64 | json_data = json.loads(json_str) 65 | if 'errorData' in json_data: 66 | if json_data['errorData']['code'] == 'needAdult': 67 | raise RuntimeError(f"{url} The live room requires login and is only accessible to adults. Please " 68 | f"correctly fill in the login cookie in the configuration file.") 69 | else: 70 | raise RuntimeError(json_data['errorData']['code'], json_data['message']) 71 | play_url = json_data['PlayList']['hls'][0]['url'] 72 | play_url_list = await self.get_play_url_list(m3u8=play_url, proxy=self.proxy_addr, headers=self.pc_headers) 73 | result |= {'is_live': True, 'm3u8_url': play_url, 'play_url_list': play_url_list} 74 | return result 75 | 76 | async def fetch_stream_url(self, json_data: dict, video_quality: str | int | None = None) -> StreamData: 77 | """ 78 | Fetches the stream URL for a live room and wraps it into a StreamData object. 79 | """ 80 | data = await self.get_stream_url(json_data, video_quality, spec=True, platform='PandaTV') 81 | return wrap_stream(data) 82 | 83 | -------------------------------------------------------------------------------- /streamget/platforms/piaopiao/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/piaopiao/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/piaopiao/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from ...data import StreamData, wrap_stream 4 | from ...requests.async_http import async_req 5 | from ..base import BaseLiveStream 6 | 7 | 8 | class PiaopaioLiveStream(BaseLiveStream): 9 | """ 10 | A class for fetching and processing Piaopiao live stream information. 11 | """ 12 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 13 | super().__init__(proxy_addr, cookies) 14 | self.pc_headers = self._get_pc_headers() 15 | 16 | def _get_pc_headers(self) -> dict: 17 | return { 18 | 'content-type': 'application/json', 19 | 'origin': 'https://m.pp.weimipopo.com', 20 | 'referer': 'https://m.pp.weimipopo.com/', 21 | 'user-agent': 'ios/7.830 (ios 17.0; ; iPhone 15 (A2846/A3089/A3090/A3092))', 22 | 'cookie': self.cookies or '', 23 | } 24 | 25 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 26 | """ 27 | Fetches web stream data for a live room. 28 | 29 | Args: 30 | url (str): The room URL. 31 | process_data (bool): Whether to process the data. Defaults to True. 32 | 33 | Returns: 34 | dict: A dictionary containing anchor name, live status, room URL, and title. 35 | """ 36 | room_id = self.get_params(url, 'anchorUid') 37 | json_data = { 38 | 'inviteUuid': '', 39 | 'anchorUuid': room_id, 40 | } 41 | 42 | if 'catshow' in url: 43 | api = 'https://api.catshow168.com/live/preview' 44 | self.pc_headers['Origin'] = 'https://h.catshow168.com' 45 | self.pc_headers['Referer'] = 'https://h.catshow168.com' 46 | else: 47 | api = 'https://api.pp.weimipopo.com/live/preview' 48 | json_str = await async_req(api, json_data=json_data, proxy_addr=self.proxy_addr, headers=self.pc_headers) 49 | json_data = json.loads(json_str) 50 | if not process_data: 51 | return json_data 52 | live_info = json_data['data'] 53 | anchor_name = live_info['name'] 54 | live_status = live_info['living'] 55 | result = {"anchor_name": anchor_name, "is_live": False} 56 | if live_status: 57 | m3u8_url = live_info['pullUrl'] 58 | result |= {'is_live': True, 'm3u8_url': m3u8_url, 'record_url': m3u8_url} 59 | return result 60 | 61 | @staticmethod 62 | async def fetch_stream_url(json_data: dict, video_quality: str | int | None = None) -> StreamData: 63 | """ 64 | Fetches the stream URL for a live room and wraps it into a StreamData object. 65 | """ 66 | json_data |= {"platform": "飘飘直播"} 67 | return wrap_stream(json_data) 68 | -------------------------------------------------------------------------------- /streamget/platforms/popkontv/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/popkontv/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/qiandurebo/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/qiandurebo/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/qiandurebo/live_stream.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | from ...data import StreamData, wrap_stream 4 | from ...requests.async_http import async_req 5 | from ..base import BaseLiveStream 6 | 7 | 8 | class QiandureboLiveStream(BaseLiveStream): 9 | """ 10 | A class for fetching and processing Qiandurebo live stream information. 11 | """ 12 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 13 | super().__init__(proxy_addr, cookies) 14 | self.pc_headers = self._get_pc_headers() 15 | 16 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 17 | """ 18 | Fetches web stream data for a live room. 19 | 20 | Args: 21 | url (str): The room URL. 22 | process_data (bool): Whether to process the data. Defaults to True. 23 | 24 | Returns: 25 | dict: A dictionary containing anchor name, live status, room URL, and title. 26 | """ 27 | html_str = await async_req(url=url, proxy_addr=self.proxy_addr, headers=self.pc_headers) 28 | data = re.search('var user = (.*?)\r\n\\s+user\\.play_url', html_str, re.DOTALL).group(1) 29 | anchor_name = re.findall('"zb_nickname": "(.*?)",\r\n', data) 30 | 31 | result = {"anchor_name": "", "is_live": False} 32 | if len(anchor_name) > 0: 33 | result['anchor_name'] = anchor_name[0] 34 | play_url = re.findall('"play_url": "(.*?)",\r\n', data) 35 | 36 | if len(play_url) > 0 and 'common-text-center" style="display:block' not in html_str: 37 | result |= { 38 | 'anchor_name': anchor_name[0], 39 | 'is_live': True, 40 | 'flv_url': play_url[0], 41 | 'record_url': play_url[0] 42 | } 43 | return result 44 | 45 | @staticmethod 46 | async def fetch_stream_url(json_data: dict, video_quality: str | int | None = None) -> StreamData: 47 | """ 48 | Fetches the stream URL for a live room and wraps it into a StreamData object. 49 | """ 50 | json_data |= {"platform": "千度热播直播"} 51 | return wrap_stream(json_data) 52 | -------------------------------------------------------------------------------- /streamget/platforms/rednote/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/rednote/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/rednote/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | 4 | from ...data import StreamData, wrap_stream 5 | from ...requests.async_http import async_req 6 | from ..base import BaseLiveStream 7 | 8 | 9 | class RedNoteLiveStream(BaseLiveStream): 10 | """ 11 | A class for fetching and processing RedNote live stream information. 12 | """ 13 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 14 | super().__init__(proxy_addr, cookies) 15 | self.mobile_headers = self._get_mobile_headers() 16 | 17 | def _get_mobile_headers(self) -> dict: 18 | return { 19 | "user-agent": "ios/7.830 (ios 17.0; ; iPhone 15 (A2846/A3089/A3090/A3092))", 20 | "xy-common-params": "platform=iOS&sid=session.1722166379345546829388", 21 | "referer": "https://app.xhs.cn/", 22 | } 23 | 24 | async def fetch_app_stream_data(self, url: str, process_data: bool = True) -> dict: 25 | """ 26 | Fetches app stream data for a live room. 27 | 28 | Args: 29 | url (str): The room URL. 30 | process_data (bool): Whether to process the data. Defaults to True. 31 | 32 | Returns: 33 | dict: A dictionary containing anchor name, live status, room URL, and title. 34 | """ 35 | if "xhslink.com" in url: 36 | url = await async_req(url, proxy_addr=self.proxy_addr, headers=self.mobile_headers, redirect_url=True) 37 | 38 | host_id = self.get_params(url, "host_id") 39 | user_id = re.search("/user/profile/(.*?)(?=/|\\?|$)", url) 40 | user_id = user_id.group(1) if user_id else host_id 41 | result = {"anchor_name": '', "is_live": False} 42 | html_str = await async_req(url, proxy_addr=self.proxy_addr, headers=self.mobile_headers) 43 | match_data = re.search("", html_str) 44 | 45 | if match_data: 46 | json_str = match_data.group(1).replace("undefined", "null") 47 | json_data = json.loads(json_str) 48 | if not process_data: 49 | return json_data 50 | 51 | if json_data.get("liveStream"): 52 | stream_data = json_data["liveStream"] 53 | if stream_data.get("liveStatus") == "success": 54 | room_info = stream_data["roomData"]["roomInfo"] 55 | title = room_info.get("roomTitle") 56 | if title and "回放" not in title: 57 | live_link = room_info["deeplink"] 58 | anchor_name = self.get_params(live_link, "host_nickname") 59 | flv_url = self.get_params(live_link, "flvUrl") 60 | room_id = flv_url.split('live/')[1].split('.')[0] 61 | flv_url = f"http://live-source-play.xhscdn.com/live/{room_id}.flv" 62 | m3u8_url = flv_url.replace('.flv', '.m3u8') 63 | result |= { 64 | "anchor_name": anchor_name, 65 | "is_live": True, 66 | "title": title, 67 | "flv_url": flv_url, 68 | "m3u8_url": m3u8_url, 69 | 'record_url': flv_url 70 | } 71 | return result 72 | 73 | profile_url = f"https://www.xiaohongshu.com/user/profile/{user_id}" 74 | html_str = await async_req(profile_url, proxy_addr=self.proxy_addr, headers=self.mobile_headers) 75 | anchor_name = re.search("@(.*?) 的个人主页", html_str) 76 | if anchor_name: 77 | result["anchor_name"] = anchor_name.group(1) 78 | 79 | return result 80 | 81 | @staticmethod 82 | async def fetch_stream_url(json_data: dict, video_quality: str | int | None = None) -> StreamData: 83 | """ 84 | Fetches the stream URL for a live room and wraps it into a StreamData object. 85 | """ 86 | json_data |= {"platform": "小红书"} 87 | return wrap_stream(json_data) 88 | 89 | -------------------------------------------------------------------------------- /streamget/platforms/shopee/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/shopee/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/shopee/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from ...data import StreamData, wrap_stream 4 | from ...requests.async_http import async_req 5 | from ..base import BaseLiveStream 6 | 7 | 8 | class ShopeeLiveStream(BaseLiveStream): 9 | """ 10 | A class for fetching and processing Shopee live stream information. 11 | """ 12 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 13 | super().__init__(proxy_addr, cookies) 14 | self.mobile_headers = self._get_mobile_headers() 15 | 16 | def _get_mobile_headers(self) -> dict: 17 | 18 | return { 19 | 'accept': 'application/json, text/plain, */*', 20 | 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6', 21 | 'referer': 'https://live.shopee.sg/share?from=live&session=802458&share_user_id=', 22 | 'user-agent': 'ios/7.830 (ios 17.0; ; iPhone 15 (A2846/A3089/A3090/A3092))', 23 | 'cookie': self.cookies or '', 24 | } 25 | 26 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 27 | """ 28 | Fetches web stream data for a live room. 29 | 30 | Args: 31 | url (str): The room URL. 32 | process_data (bool): Whether to process the data. Defaults to True. 33 | 34 | Returns: 35 | dict: A dictionary containing anchor name, live status, room URL, and title. 36 | """ 37 | result = {"anchor_name": "", "is_live": False} 38 | is_living = False 39 | 40 | if 'live.shopee' not in url and 'uid' not in url: 41 | url = await async_req(url, proxy_addr=self.proxy_addr, headers=self.mobile_headers, redirect_url=True) 42 | 43 | if 'live.shopee' in url: 44 | host_suffix = url.split('/')[2].rsplit('.', maxsplit=1)[1] 45 | is_living = self.get_params(url, 'uid') is None 46 | else: 47 | host_suffix = url.split('/')[2].split('.', maxsplit=1)[0] 48 | 49 | uid = self.get_params(url, 'uid') 50 | api_host = f'https://live.shopee.{host_suffix}' 51 | session_id = self.get_params(url, 'session') 52 | if uid: 53 | json_str = await async_req(f'{api_host}/api/v1/shop_page/live/ongoing?uid={uid}', 54 | proxy_addr=self.proxy_addr, headers=self.mobile_headers) 55 | json_data = json.loads(json_str) 56 | if json_data['data']['ongoing_live']: 57 | session_id = json_data['data']['ongoing_live']['session_id'] 58 | is_living = True 59 | else: 60 | json_str = await async_req(f'{api_host}/api/v1/shop_page/live/replay_list?offset=0&limit=1&uid={uid}', 61 | proxy_addr=self.proxy_addr, headers=self.mobile_headers) 62 | json_data = json.loads(json_str) 63 | if json_data['data']['replay']: 64 | result['anchor_name'] = json_data['data']['replay'][0]['nick_name'] 65 | return result 66 | 67 | json_str = await async_req(f'{api_host}/api/v1/session/{session_id}', 68 | proxy_addr=self.proxy_addr, headers=self.mobile_headers) 69 | json_data = json.loads(json_str) 70 | if not json_data.get('data'): 71 | raise Exception( 72 | "Fetch shopee live data failed, please update the address of the live broadcast room and try again.") 73 | uid = json_data['data']['session']['uid'] 74 | anchor_name = json_data['data']['session']['nickname'] 75 | live_status = json_data['data']['session']['status'] 76 | result["anchor_name"] = anchor_name 77 | result['extra'] = {'uid': f'uid={uid}&session={session_id}'} 78 | if live_status == 1 and is_living: 79 | flv_url = json_data['data']['session']['play_url'] 80 | title = json_data['data']['session']['title'] 81 | result |= {'is_live': True, 'title': title, 'flv_url': flv_url, 'record_url': flv_url} 82 | return result 83 | 84 | @staticmethod 85 | async def fetch_stream_url(json_data: dict, video_quality: str | int | None = None) -> StreamData: 86 | """ 87 | Fetches the stream URL for a live room and wraps it into a StreamData object. 88 | """ 89 | json_data |= {"platform": "Shopee"} 90 | return wrap_stream(json_data) 91 | -------------------------------------------------------------------------------- /streamget/platforms/showroom/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/showroom/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/showroom/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | 4 | from ...data import StreamData, wrap_stream 5 | from ...requests.async_http import async_req 6 | from ..base import BaseLiveStream 7 | 8 | 9 | class ShowRoomLiveStream(BaseLiveStream): 10 | """ 11 | A class for fetching and processing ShowRoom live stream information. 12 | """ 13 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 14 | super().__init__(proxy_addr, cookies) 15 | self.pc_headers = self._get_pc_headers() 16 | 17 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 18 | """ 19 | Fetches web stream data for a live room. 20 | 21 | Args: 22 | url (str): The room URL. 23 | process_data (bool): Whether to process the data. Defaults to True. 24 | 25 | Returns: 26 | dict: A dictionary containing anchor name, live status, room URL, and title. 27 | """ 28 | url = url.strip() 29 | if '/room/profile' in url: 30 | room_id = url.split('room_id=')[-1] 31 | else: 32 | html_str = await async_req(url, proxy_addr=self.proxy_addr, headers=self.pc_headers) 33 | room_id = re.search('href="/room/profile\\?room_id=(.*?)"', html_str).group(1) 34 | info_api = f'https://www.showroom-live.com/api/live/live_info?room_id={room_id}' 35 | json_str = await async_req(info_api, proxy_addr=self.proxy_addr, headers=self.pc_headers) 36 | json_data = json.loads(json_str) 37 | anchor_name = json_data['room_name'] 38 | result = {"anchor_name": anchor_name, "is_live": False} 39 | live_status = json_data['live_status'] 40 | if live_status == 2: 41 | result["is_live"] = True 42 | web_api = f'https://www.showroom-live.com/api/live/streaming_url?room_id={room_id}&abr_available=1' 43 | json_str = await async_req(web_api, proxy_addr=self.proxy_addr, headers=self.pc_headers) 44 | if json_str: 45 | json_data = json.loads(json_str) 46 | streaming_url_list = json_data['streaming_url_list'] 47 | 48 | for i in streaming_url_list: 49 | if i['type'] == 'hls_all': 50 | m3u8_url = i['url'] 51 | result['m3u8_url'] = m3u8_url 52 | if m3u8_url: 53 | m3u8_url_list = await self.get_play_url_list( 54 | m3u8_url, proxy=self.proxy_addr, headers=self.pc_headers) 55 | if m3u8_url_list: 56 | result['play_url_list'] = [f"{m3u8_url.rsplit('/', maxsplit=1)[0]}/{i}" for i in 57 | m3u8_url_list] 58 | else: 59 | result['play_url_list'] = [m3u8_url] 60 | result['play_url_list'] = [i.replace('https://', 'http://') for i in 61 | result['play_url_list']] 62 | break 63 | return result 64 | 65 | async def fetch_stream_url(self, json_data: dict, video_quality: str | int | None = None) -> StreamData: 66 | """ 67 | Fetches the stream URL for a live room and wraps it into a StreamData object. 68 | """ 69 | data = await self.get_stream_url(json_data, video_quality, spec=True, platform="ShowRoom") 70 | return wrap_stream(data) 71 | 72 | -------------------------------------------------------------------------------- /streamget/platforms/sixroom/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/sixroom/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/sixroom/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | 4 | from ...data import StreamData, wrap_stream 5 | from ...requests.async_http import async_req 6 | from ..base import BaseLiveStream 7 | 8 | 9 | class SixRoomLiveStream(BaseLiveStream): 10 | """ 11 | A class for fetching and processing SixRoom live stream information. 12 | """ 13 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 14 | super().__init__(proxy_addr, cookies) 15 | self.mobile_headers = self._get_mobile_headers() 16 | 17 | def _get_mobile_headers(self) -> dict: 18 | 19 | return { 20 | 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6', 21 | 'referer': 'https://ios.6.cn/?ver=8.0.3&build=4', 22 | 'user-agent': 'ios/7.830 (ios 17.0; ; iPhone 15 (A2846/A3089/A3090/A3092))', 23 | 'cookie': self.cookies or '', 24 | } 25 | 26 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 27 | """ 28 | Fetches web stream data for a live room. 29 | 30 | Args: 31 | url (str): The room URL. 32 | process_data (bool): Whether to process the data. Defaults to True. 33 | 34 | Returns: 35 | dict: A dictionary containing anchor name, live status, room URL, and title. 36 | """ 37 | room_id = url.split('?')[0].rsplit('/', maxsplit=1)[1] 38 | html_str = await async_req(f'https://v.6.cn/{room_id}', proxy_addr=self.proxy_addr, headers=self.mobile_headers) 39 | room_id = re.search('rid: \'(.*?)\',\n\\s+roomid', html_str).group(1) 40 | data = { 41 | 'av': '3.1', 42 | 'encpass': '', 43 | 'logiuid': '', 44 | 'project': 'v6iphone', 45 | 'rate': '1', 46 | 'rid': '', 47 | 'ruid': room_id, 48 | } 49 | 50 | api = 'https://v.6.cn/coop/mobile/index.php?padapi=coop-mobile-inroom.php' 51 | json_str = await async_req(api, data=data, proxy_addr=self.proxy_addr, headers=self.mobile_headers) 52 | json_data = json.loads(json_str) 53 | if not process_data: 54 | return json_data 55 | flv_title = json_data['content']['liveinfo']['flvtitle'] 56 | anchor_name = json_data['content']['roominfo']['alias'] 57 | result = {"anchor_name": anchor_name, "is_live": False} 58 | if flv_title: 59 | flv_url = f'https://wlive.6rooms.com/httpflv/{flv_title}.flv' 60 | result |= {'is_live': True, 'flv_url': flv_url, 'record_url': flv_url} 61 | return result 62 | 63 | @staticmethod 64 | async def fetch_stream_url(json_data: dict, video_quality: str | int | None = None) -> StreamData: 65 | """ 66 | Fetches the stream URL for a live room and wraps it into a StreamData object. 67 | """ 68 | json_data |= {"platform": "六间房直播"} 69 | return wrap_stream(json_data) 70 | -------------------------------------------------------------------------------- /streamget/platforms/soop/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/soop/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/taobao/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/taobao/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/taobao/live_stream.py: -------------------------------------------------------------------------------- 1 | import re 2 | import time 3 | import urllib.parse 4 | 5 | import execjs 6 | 7 | from ... import JS_SCRIPT_PATH, utils 8 | from ...data import StreamData, wrap_stream 9 | from ...requests.async_http import async_req 10 | from ..base import BaseLiveStream 11 | 12 | 13 | class TaobaoLiveStream(BaseLiveStream): 14 | """ 15 | A class for fetching and processing Taobao live stream information. 16 | """ 17 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 18 | super().__init__(proxy_addr, cookies) 19 | self.pc_headers = self._get_pc_headers() 20 | 21 | def _get_pc_headers(self) -> dict: 22 | return { 23 | 'referer': 'https://huodong.m.taobao.com/', 24 | 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0', 25 | 'cookie': self.cookies or '', 26 | } 27 | 28 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 29 | """ 30 | Fetches web stream data for a live room. 31 | 32 | Args: 33 | url (str): The room URL. 34 | process_data (bool): Whether to process the data. Defaults to True. 35 | 36 | Returns: 37 | dict: A dictionary containing anchor name, live status, room URL, and title. 38 | """ 39 | if '_m_h5_tk' not in self.pc_headers['cookie']: 40 | raise Exception('Error: Cookies is empty! please input correct cookies') 41 | 42 | live_id = self.get_params(url, 'id') 43 | if not live_id: 44 | html_str = await async_req(url, proxy_addr=self.proxy_addr, headers=self.pc_headers) 45 | redirect_url = re.findall("var url = '(.*?)';", html_str)[0] 46 | live_id = self.get_params(redirect_url, 'id') 47 | 48 | params = { 49 | 'jsv': '2.7.0', 50 | 'appKey': '12574478', 51 | 't': '1733104933120', 52 | 'sign': '', 53 | 'AntiFlood': 'true', 54 | 'AntiCreep': 'true', 55 | 'api': 'mtop.mediaplatform.live.livedetail', 56 | 'v': '4.0', 57 | 'preventFallback': 'true', 58 | 'type': 'jsonp', 59 | 'dataType': 'jsonp', 60 | 'callback': 'mtopjsonp1', 61 | 'data': '{"liveId":"' + live_id + '","creatorId":null}', 62 | } 63 | 64 | for i in range(2): 65 | app_key = '12574478' 66 | _m_h5_tk = re.findall('_m_h5_tk=(.*?);', self.pc_headers['cookie'])[0] 67 | t13 = int(time.time() * 1000) 68 | pre_sign_str = f'{_m_h5_tk.split("_")[0]}&{t13}&{app_key}&' + params['data'] 69 | try: 70 | with open(f'{JS_SCRIPT_PATH}/taobao-sign.js') as f: 71 | js_code = f.read() 72 | sign = execjs.compile(js_code).call('sign', pre_sign_str) 73 | except execjs.ProgramError: 74 | raise execjs.ProgramError('Failed to execute JS code. Please check if the Node.js environment') 75 | params |= {'sign': sign, 't': t13} 76 | api = f'https://h5api.m.taobao.com/h5/mtop.mediaplatform.live.livedetail/4.0/?{urllib.parse.urlencode(params)}' 77 | jsonp_str, new_cookie = await async_req(url=api, proxy_addr=self.proxy_addr, headers=self.pc_headers, 78 | timeout=20, return_cookies=True, include_cookies=True) 79 | json_data = utils.jsonp_to_json(jsonp_str) 80 | if not process_data: 81 | return json_data 82 | ret_msg = json_data['ret'] 83 | if ret_msg == ['SUCCESS::调用成功']: 84 | anchor_name = json_data['data']['broadCaster']['accountName'] 85 | result = {"anchor_name": anchor_name, "is_live": False} 86 | live_status = json_data['data']['streamStatus'] 87 | if live_status == '1': 88 | live_title = json_data['data']['title'] 89 | play_url_list = json_data['data']['liveUrlList'] 90 | 91 | def get_sort_key(item): 92 | definition_priority = { 93 | "lld": 0, "ld": 1, "md": 2, "hd": 3, "ud": 4 94 | } 95 | def_value = item.get('definition') or item.get('newDefinition') 96 | priority = definition_priority.get(def_value, -1) 97 | return priority 98 | 99 | play_url_list = sorted(play_url_list, key=get_sort_key, reverse=True) 100 | result |= {"is_live": True, "title": live_title, "play_url_list": play_url_list, 'live_id': live_id} 101 | 102 | return result 103 | else: 104 | raise Exception(f'Error: Taobao live data fetch failed, {ret_msg[0]}') 105 | 106 | async def fetch_stream_url(self, json_data: dict, video_quality: str | int | None = None) -> StreamData: 107 | """ 108 | Fetches the stream URL for a live room and wraps it into a StreamData object. 109 | """ 110 | data = await self.get_stream_url( 111 | json_data, video_quality, url_type='all', hls_extra_key='hlsUrl', 112 | flv_extra_key='flvUrl', platform='淘宝直播') 113 | return wrap_stream(data) 114 | -------------------------------------------------------------------------------- /streamget/platforms/tiktok/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/tiktok/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/tiktok/live_stream.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | import re 4 | from operator import itemgetter 5 | 6 | from ...data import StreamData, wrap_stream 7 | from ...requests.async_http import async_req 8 | from ..base import BaseLiveStream 9 | 10 | 11 | class TikTokLiveStream(BaseLiveStream): 12 | """ 13 | A class for fetching and processing TikTok live stream information. 14 | """ 15 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 16 | super().__init__(proxy_addr, cookies) 17 | self.pc_headers = self._get_pc_headers() 18 | 19 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 20 | """ 21 | Fetches web stream data for a live room. 22 | 23 | Args: 24 | url (str): The room URL. 25 | process_data (bool): Whether to process the data. Defaults to True. 26 | 27 | Returns: 28 | dict: A dictionary containing anchor name, live status, room URL, and title. 29 | """ 30 | for i in range(3): 31 | html_str = await async_req(url=url, proxy_addr=self.proxy_addr, headers=self.pc_headers) 32 | await asyncio.sleep(1) 33 | if "We regret to inform you that we have discontinued operating TikTok" in html_str: 34 | msg = re.search('

\n\\s+(We regret to inform you that we have discontinu.*?)\\.\n\\s+

', html_str) 35 | raise ConnectionError( 36 | f"Your proxy node's regional network is blocked from accessing TikTok; please switch to a node in " 37 | f"another region to access. {msg.group(1) if msg else ''}" 38 | ) 39 | if 'UNEXPECTED_EOF_WHILE_READING' not in html_str: 40 | try: 41 | json_str = re.findall( 42 | '', 43 | html_str, re.DOTALL)[0] 44 | except Exception: 45 | raise ConnectionError("Please check if your network can access the TikTok website normally") 46 | json_data = json.loads(json_str) 47 | return json_data 48 | 49 | async def fetch_stream_url(self, json_data: dict, video_quality: str | int | None = None) -> StreamData: 50 | """ 51 | Fetches the stream URL for a live room and wraps it into a StreamData object. 52 | """ 53 | if not json_data: 54 | return wrap_stream({"platform": "TikTok", "anchor_name": None, "is_live": False}) 55 | 56 | def get_video_quality_url(stream, q_key) -> list: 57 | play_list = [] 58 | for key in stream: 59 | url_info = stream[key]['main'] 60 | play_url = url_info[q_key] 61 | sdk_params = url_info['sdk_params'] 62 | sdk_params = json.loads(sdk_params) 63 | vbitrate = int(sdk_params['vbitrate']) 64 | resolution = sdk_params['resolution'] 65 | if vbitrate != 0 and resolution: 66 | width, height = map(int, resolution.split('x')) 67 | play_list.append({'url': play_url, 'vbitrate': vbitrate, 'resolution': (width, height)}) 68 | 69 | play_list.sort(key=itemgetter('vbitrate'), reverse=True) 70 | play_list.sort(key=lambda x: (-x['vbitrate'], -x['resolution'][0], -x['resolution'][1])) 71 | return play_list 72 | 73 | live_room = json_data['LiveRoom']['liveRoomUserInfo'] 74 | user = live_room['user'] 75 | anchor_name = f"{user['nickname']}-{user['uniqueId']}" 76 | status = user.get("status", 4) 77 | 78 | result = { 79 | "platform": "TikTok", 80 | "anchor_name": anchor_name, 81 | "is_live": False, 82 | } 83 | 84 | if status == 2: 85 | if 'streamData' not in live_room['liveRoom']: 86 | raise Exception("This live stream may be uncomfortable for some viewers. Log in to confirm your age") 87 | data = live_room['liveRoom']['streamData']['pull_data']['stream_data'] 88 | data = json.loads(data).get('data', {}) 89 | flv_url_list = get_video_quality_url(data, 'flv') 90 | m3u8_url_list = get_video_quality_url(data, 'hls') 91 | 92 | while len(flv_url_list) < 5: 93 | flv_url_list.append(flv_url_list[-1]) 94 | while len(m3u8_url_list) < 5: 95 | m3u8_url_list.append(m3u8_url_list[-1]) 96 | video_quality, quality_index = self.get_quality_index(video_quality) 97 | flv_dict: dict = flv_url_list[quality_index] 98 | m3u8_dict: dict = m3u8_url_list[quality_index] 99 | flv_url = flv_dict['url'].replace("https://", "http://") 100 | m3u8_url = m3u8_dict['url'].replace("https://", "http://") 101 | result |= { 102 | 'is_live': True, 103 | 'title': live_room['liveRoom']['title'], 104 | 'quality': video_quality, 105 | 'm3u8_url': m3u8_url, 106 | 'flv_url': flv_url, 107 | 'record_url': m3u8_url or flv_url, 108 | } 109 | return wrap_stream(result) 110 | -------------------------------------------------------------------------------- /streamget/platforms/twitcasting/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/twitcasting/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/twitcasting/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | 4 | from ... import utils 5 | from ...data import StreamData, wrap_stream 6 | from ...requests.async_http import async_req 7 | from ..base import BaseLiveStream 8 | 9 | 10 | class TwitCastingLiveStream(BaseLiveStream): 11 | """ 12 | A class for fetching and processing TwitCasting live stream information. 13 | """ 14 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None, username: str | None = None, 15 | password: str | None = None, account_type: str | None = None): 16 | super().__init__(proxy_addr, cookies) 17 | self.username = username 18 | self.password = password 19 | self.account_type = account_type 20 | self.mobile_headers = self._get_mobile_headers() 21 | 22 | def _get_mobile_headers(self) -> dict: 23 | return { 24 | 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,' 25 | '*/*;q=0.8,application/signed-exchange;v=b3;q=0.7', 26 | 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6', 27 | 'content-type': 'application/x-www-form-urlencoded', 28 | 'referer': 'https://twitcasting.tv/indexcaslogin.php?redir=%2Findexloginwindow.php%3Fnext%3D%252F&keep=1', 29 | 'user-agent': 'ios/7.830 (ios 17.0; ; iPhone 15 (A2846/A3089/A3090/A3092))', 30 | 'cookie': self.cookies or '', 31 | } 32 | 33 | async def login_twitcasting(self) -> str | None: 34 | if self.account_type == "twitter": 35 | login_url = 'https://twitcasting.tv/indexpasswordlogin.php' 36 | login_api = 'https://twitcasting.tv/indexpasswordlogin.php?redir=/indexloginwindow.php?next=%2F&keep=1' 37 | else: 38 | login_url = 'https://twitcasting.tv/indexcaslogin.php?redir=%2F&keep=1' 39 | login_api = 'https://twitcasting.tv/indexcaslogin.php?redir=/indexloginwindow.php?next=%2F&keep=1' 40 | 41 | html_str = await async_req(login_url, proxy_addr=self.proxy_addr, headers=self.mobile_headers) 42 | cs_session_id = re.search('', html_str).group(1) 43 | 44 | data = { 45 | 'username': self.username, 46 | 'password': self.password, 47 | 'action': 'login', 48 | 'cs_session_id': cs_session_id, 49 | } 50 | try: 51 | _, cookie_dict = await async_req( 52 | login_api, proxy_addr=self.proxy_addr, headers=self.mobile_headers, 53 | json_data=data, return_cookies=True, timeout=20) 54 | if 'tc_ss' in cookie_dict: 55 | self.cookies = utils.dict_to_cookie_str(cookie_dict) 56 | self.mobile_headers['cookie'] = self.cookies 57 | return self.cookies 58 | except Exception as e: 59 | raise Exception("TwitCasting login error,", e) 60 | 61 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 62 | """ 63 | Fetches web stream data for a live room. 64 | 65 | Args: 66 | url (str): The room URL. 67 | process_data (bool): Whether to process the data. Defaults to True. 68 | 69 | Returns: 70 | dict: A dictionary containing anchor name, live status, room URL, and title. 71 | """ 72 | anchor_id = url.split('/')[3] 73 | 74 | async def get_data() -> tuple: 75 | html_str = await async_req(url, proxy_addr=self.proxy_addr, headers=self.mobile_headers) 76 | anchor = re.search("(.*?) \\(@(.*?)\\) 的直播 - Twit", html_str) 77 | title = re.search('<meta name="twitter:title" content="(.*?)">\n\\s+<meta', html_str) 78 | status = re.search('data-is-onlive="(.*?)"\n\\s+data-view-mode', html_str) 79 | movie_id = re.search('data-movie-id="(.*?)" data-audience-id', html_str) 80 | return f'{anchor.group(1).strip()}-{anchor.group(2)}-{movie_id.group(1)}', status.group(1), title.group(1) 81 | 82 | result = {"anchor_name": '', "is_live": False} 83 | new_cookie = None 84 | try: 85 | to_login = self.get_params(url, "login") 86 | if to_login == 'true': 87 | # print("Attempting to log in to TwitCasting...") 88 | new_cookie = await self.login_twitcasting() 89 | if not new_cookie: 90 | raise RuntimeError("TwitCasting login failed, please check if the account password in the " 91 | "configuration file is correct") 92 | # print("TwitCasting login successful! Starting to fetch data...") 93 | self.mobile_headers['Cookie'] = new_cookie 94 | anchor_name, live_status, live_title = await get_data() 95 | except AttributeError: 96 | # print("Failed to retrieve TwitCasting data, attempting to log in...") 97 | new_cookie = await self.login_twitcasting() 98 | if not new_cookie: 99 | raise RuntimeError("TwitCasting login failed, please check if the account and password in the " 100 | "configuration file are correct") 101 | # print("TwitCasting login successful! Starting to fetch data...") 102 | self.mobile_headers['Cookie'] = new_cookie 103 | anchor_name, live_status, live_title = await get_data() 104 | 105 | result["anchor_name"] = anchor_name 106 | if live_status == 'true': 107 | url_streamserver = f"https://twitcasting.tv/streamserver.php?target={anchor_id}&mode=client&player=pc_web" 108 | Twitcasting_str = await async_req(url_streamserver, proxy_addr=self.proxy_addr, headers=self.mobile_headers) 109 | json_data = json.loads(Twitcasting_str) 110 | if not json_data.get('tc-hls') or not json_data['tc-hls'].get("streams"): 111 | raise RuntimeError("No m3u8_url,please check the url") 112 | 113 | stream_dict = json_data['tc-hls']["streams"] 114 | quality_order = {"high": 0, "medium": 1, "low": 2} 115 | sorted_streams = sorted(stream_dict.items(), key=lambda item: quality_order[item[0]]) 116 | play_url_list = [url for quality, url in sorted_streams] 117 | result |= {'title': live_title, 'is_live': True, "play_url_list": play_url_list} 118 | result['new_cookies'] = new_cookie 119 | return result 120 | 121 | async def fetch_stream_url(self, json_data: dict, video_quality: str | int | None = None) -> StreamData: 122 | """ 123 | Fetches the stream URL for a live room and wraps it into a StreamData object. 124 | """ 125 | data = await self.get_stream_url(json_data, video_quality, spec=False, platform='TwitCasting') 126 | return wrap_stream(data) 127 | -------------------------------------------------------------------------------- /streamget/platforms/twitch/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/twitch/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/twitch/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | import random 3 | import urllib.parse 4 | 5 | from ...data import StreamData, wrap_stream 6 | from ...requests.async_http import async_req 7 | from ..base import BaseLiveStream 8 | 9 | 10 | class TwitchLiveStream(BaseLiveStream): 11 | """ 12 | A class for fetching and processing Twitch live stream information. 13 | """ 14 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None, access_token: str | None = None): 15 | super().__init__(proxy_addr, cookies) 16 | self.access_token = access_token 17 | self.pc_headers = self._get_pc_headers() 18 | 19 | def _get_pc_headers(self) -> dict: 20 | return { 21 | 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0', 22 | 'accept-language': 'en-US', 23 | 'referer': 'https://www.twitch.tv/', 24 | 'client-id': 'kimne78kx3ncx6brgo4mv6wki5h1ko', 25 | 'client-integrity': self.access_token or '', 26 | 'content-type': 'text/plain;charset=UTF-8', 27 | 'cookie': self.cookies or '', 28 | } 29 | 30 | async def get_twitchtv_room_info(self, url: str) -> tuple: 31 | 32 | uid = url.split('?')[0].rsplit('/', maxsplit=1)[-1] 33 | 34 | data = [ 35 | { 36 | "operationName": "ChannelShell", 37 | "variables": { 38 | "login": uid 39 | }, 40 | "extensions": { 41 | "persistedQuery": { 42 | "version": 1, 43 | "sha256Hash": "580ab410bcd0c1ad194224957ae2241e5d252b2c5173d8e0cce9d32d5bb14efe" 44 | } 45 | } 46 | }, 47 | ] 48 | 49 | json_str = await async_req('https://gql.twitch.tv/gql', proxy_addr=self.proxy_addr, headers=self.pc_headers, 50 | json_data=data) 51 | json_data = json.loads(json_str) 52 | user_data = json_data[0]['data']['userOrError'] 53 | login_name = user_data["login"] 54 | nickname = f"{user_data['displayName']}-{login_name}" 55 | status = True if user_data['stream'] else False 56 | return nickname, status 57 | 58 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 59 | """ 60 | Fetches web stream data for a live room. 61 | 62 | Args: 63 | url (str): The room URL. 64 | process_data (bool): Whether to process the data. Defaults to True. 65 | 66 | Returns: 67 | dict: A dictionary containing anchor name, live status, room URL, and title. 68 | """ 69 | 70 | uid = url.split('?')[0].rsplit('/', maxsplit=1)[-1] 71 | 72 | data = { 73 | "operationName": "PlaybackAccessToken_Template", 74 | "query": "query PlaybackAccessToken_Template($login: String!, $isLive: Boolean!, $vodID: ID!, " 75 | "$isVod: Boolean!, $playerType: String!) { streamPlaybackAccessToken(channelName: $login, " 76 | "params: {platform: \"web\", playerBackend: \"mediaplayer\", playerType: $playerType}) " 77 | "@include(if: $isLive) { value signature authorization { isForbidden forbiddenReasonCode }" 78 | " __typename } videoPlaybackAccessToken(id: $vodID, params: {platform: \"web\", " 79 | "playerBackend: \"mediaplayer\", playerType: $playerType}) @include(if: $isVod) { value " 80 | " signature __typename }}", 81 | "variables": { 82 | "isLive": True, 83 | "login": uid, 84 | "isVod": False, 85 | "vodID": "", 86 | "playerType": "site" 87 | } 88 | } 89 | 90 | json_str = await async_req('https://gql.twitch.tv/gql', proxy_addr=self.proxy_addr, headers=self.pc_headers, 91 | json_data=data) 92 | json_data = json.loads(json_str) 93 | token = json_data['data']['streamPlaybackAccessToken']['value'] 94 | sign = json_data['data']['streamPlaybackAccessToken']['signature'] 95 | 96 | anchor_name, live_status = await self.get_twitchtv_room_info(url.strip()) 97 | result = {"anchor_name": anchor_name, "is_live": live_status} 98 | if live_status: 99 | play_session_id = random.choice(["bdd22331a986c7f1073628f2fc5b19da", "064bc3ff1722b6f53b0b5b8c01e46ca5"]) 100 | params = { 101 | "acmb": "e30=", 102 | "allow_source": "true", 103 | "browser_family": "firefox", 104 | "browser_version": "124.0", 105 | "cdm": "wv", 106 | "fast_bread": "true", 107 | "os_name": "Windows", 108 | "os_version": "NT%2010.0", 109 | "p": "3553732", 110 | "platform": "web", 111 | "play_session_id": play_session_id, 112 | "player_backend": "mediaplayer", 113 | "player_version": "1.28.0-rc.1", 114 | "playlist_include_framerate": "true", 115 | "reassignments_supported": "true", 116 | "sig": sign, 117 | "token": token, 118 | "transcode_mode": "cbr_v1" 119 | } 120 | access_key = urllib.parse.urlencode(params) 121 | m3u8_url = f'https://usher.ttvnw.net/api/channel/hls/{uid}.m3u8?{access_key}' 122 | play_url_list = await self.get_play_url_list(m3u8=m3u8_url, proxy=self.proxy_addr, 123 | headers=self.pc_headers) 124 | result |= {'m3u8_url': m3u8_url, 'play_url_list': play_url_list} 125 | return result 126 | 127 | async def fetch_stream_url(self, json_data: dict, video_quality: str | int | None = None) -> StreamData: 128 | """ 129 | Fetches the stream URL for a live room and wraps it into a StreamData object. 130 | """ 131 | data = await self.get_stream_url(json_data, video_quality, spec=True, platform='Twitch') 132 | return wrap_stream(data) 133 | 134 | -------------------------------------------------------------------------------- /streamget/platforms/vvxq/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/vvxq/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/vvxq/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | import urllib.parse 3 | 4 | from ...data import StreamData, wrap_stream 5 | from ...requests.async_http import async_req 6 | from ..base import BaseLiveStream 7 | 8 | 9 | class VVXQLiveStream(BaseLiveStream): 10 | """ 11 | A class for fetching and processing VVXQ live stream information. 12 | """ 13 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 14 | super().__init__(proxy_addr, cookies) 15 | self.mobile_headers = self._get_mobile_headers() 16 | 17 | def _get_mobile_headers(self) -> dict: 18 | return { 19 | 'user-agent': 'ios/7.830 (ios 17.0; ; iPhone 15 (A2846/A3089/A3090/A3092))', 20 | 'access-control-request-method': 'GET', 21 | 'origin': 'https://h5webcdn-pro.vvxqiu.com', 22 | 'referer': 'https://h5webcdn-pro.vvxqiu.com/', 23 | 'cookie': self.cookies or '', 24 | } 25 | 26 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 27 | """ 28 | Fetches web stream data for a live room. 29 | 30 | Args: 31 | url (str): The room URL. 32 | process_data (bool): Whether to process the data. Defaults to True. 33 | 34 | Returns: 35 | dict: A dictionary containing anchor name, live status, room URL, and title. 36 | """ 37 | room_id = self.get_params(url, "roomId") 38 | api_1 = f'https://h5p.vvxqiu.com/activity-center/fanclub/activity/captain/banner?roomId={room_id}&product=vvstar' 39 | json_str = await async_req(api_1, proxy_addr=self.proxy_addr, headers=self.mobile_headers) 40 | json_data = json.loads(json_str) 41 | anchor_name = json_data['data']['anchorName'] 42 | if not anchor_name: 43 | params = { 44 | 'sessionId': '', 45 | 'userId': '', 46 | 'product': 'vvstar', 47 | 'tickToken': '', 48 | 'roomId': room_id, 49 | } 50 | json_str = await async_req( 51 | f'https://h5p.vvxqiu.com/activity-center/halloween2023/banner?{urllib.parse.urlencode(params)}', 52 | proxy_addr=self.proxy_addr, headers=self.mobile_headers 53 | ) 54 | json_data = json.loads(json_str) 55 | anchor_name = json_data['data']['memberVO']['memberName'] 56 | 57 | result = {"anchor_name": anchor_name, "is_live": False} 58 | m3u8_url = f'https://liveplay-pro.wasaixiu.com/live/1400442770_{room_id}_{room_id[2:]}_single.m3u8' 59 | resp = await async_req(m3u8_url, proxy_addr=self.proxy_addr, headers=self.mobile_headers) 60 | if 'Not Found' not in resp: 61 | result |= {'is_live': True, 'm3u8_url': m3u8_url, 'record_url': m3u8_url} 62 | return result 63 | 64 | @staticmethod 65 | async def fetch_stream_url(json_data: dict, video_quality: str | int | None = None) -> StreamData: 66 | """ 67 | Fetches the stream URL for a live room and wraps it into a StreamData object. 68 | """ 69 | json_data |= {"platform": "VV星球直播"} 70 | return wrap_stream(json_data) 71 | -------------------------------------------------------------------------------- /streamget/platforms/weibo/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/weibo/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/weibo/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from ...data import StreamData, wrap_stream 4 | from ...requests.async_http import async_req 5 | from ..base import BaseLiveStream 6 | 7 | 8 | class WeiboLiveStream(BaseLiveStream): 9 | """ 10 | A class for fetching and processing Blued live stream information. 11 | """ 12 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 13 | super().__init__(proxy_addr, cookies) 14 | self.pc_headers = self._get_pc_headers() 15 | 16 | def _get_pc_headers(self) -> dict: 17 | default_cookie = ('SUB=_2AkMRNMCwf8NxqwFRmfwWymPrbI9-' 18 | 'zgzEieKnaDFrJRMxHRl-yT9kqmkhtRB6OrTuX5z9N_7qk9C3xxEmNR-8WLcyo2PM; ' 19 | 'SUBP=0033WrSXqPxfM72-Ws9jqgMF55529P9D9WWemwcqkukCduUO11o9sBqA;') 20 | return { 21 | 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6', 22 | 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:124.0) Gecko/20100101 Firefox/124.0', 23 | 'cookie': self.cookies or default_cookie, 24 | } 25 | 26 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 27 | """ 28 | Fetches web stream data for a live room. 29 | 30 | Args: 31 | url (str): The room URL. 32 | process_data (bool): Whether to process the data. Defaults to True. 33 | 34 | Returns: 35 | dict: A dictionary containing anchor name, live status, room URL, and title. 36 | """ 37 | room_id = '' 38 | result = {"anchor_name": '', "is_live": False} 39 | if 'show/' in url: 40 | room_id = url.split('?')[0].split('show/')[1] 41 | else: 42 | uid = url.split('?')[0].rsplit('/u/', maxsplit=1)[1] 43 | web_api = f'https://weibo.com/ajax/statuses/mymblog?uid={uid}&page=1&feature=0' 44 | json_str = await async_req(web_api, proxy_addr=self.proxy_addr, headers=self.pc_headers) 45 | json_data = json.loads(json_str) 46 | for i in json_data['data']['list']: 47 | if 'page_info' in i and i['page_info']['object_type'] == 'live': 48 | room_id = i['page_info']['object_id'] 49 | break 50 | result['anchor_name'] = json_data['data']['list'][0]['user']['screen_name'] 51 | 52 | if room_id: 53 | app_api = f'https://weibo.com/l/pc/anchor/live?live_id={room_id}' 54 | # app_api = f'https://weibo.com/l/!/2/wblive/room/show_pc_live.json?live_id={room_id}' 55 | json_str = await async_req(url=app_api, proxy_addr=self.proxy_addr, headers=self.pc_headers) 56 | json_data = json.loads(json_str) 57 | if not process_data: 58 | return json_data 59 | anchor_name = json_data['data']['user_info']['name'] 60 | result["anchor_name"] = anchor_name 61 | live_status = json_data['data']['item']['status'] 62 | if live_status == 1: 63 | result["is_live"] = True 64 | live_title = json_data['data']['item']['desc'] 65 | play_url_list = json_data['data']['item']['stream_info']['pull'] 66 | m3u8_url = play_url_list['live_origin_hls_url'] 67 | flv_url = play_url_list['live_origin_flv_url'] 68 | result['title'] = live_title 69 | result['play_url_list'] = [ 70 | {"m3u8_url": m3u8_url, "flv_url": flv_url}, 71 | {"m3u8_url": m3u8_url.split('_')[0] + '.m3u8', "flv_url": flv_url.split('_')[0] + '.flv'} 72 | ] 73 | return result 74 | 75 | async def fetch_stream_url(self, json_data: dict, video_quality: str | int | None = None) -> StreamData: 76 | """ 77 | Fetches the stream URL for a live room and wraps it into a StreamData object. 78 | """ 79 | data = await self.get_stream_url(json_data, video_quality, spec=True, platform='微博直播') 80 | return wrap_stream(data) 81 | 82 | -------------------------------------------------------------------------------- /streamget/platforms/winktv/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/winktv/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/winktv/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from ...data import StreamData, wrap_stream 4 | from ...requests.async_http import async_req 5 | from ..base import BaseLiveStream 6 | 7 | 8 | class WinkTVLiveStream(BaseLiveStream): 9 | """ 10 | A class for fetching and processing WinkTV live stream information. 11 | """ 12 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 13 | super().__init__(proxy_addr, cookies) 14 | self.pc_headers = self._get_pc_headers() 15 | 16 | def _get_pc_headers(self) -> dict: 17 | return { 18 | 'accept': 'application/json, text/plain, */*', 19 | 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6', 20 | 'content-type': 'application/x-www-form-urlencoded', 21 | 'referer': 'https://www.winktv.co.kr', 22 | 'origin': 'https://www.winktv.co.kr', 23 | 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:124.0) Gecko/20100101 Firefox/124.0', 24 | 'cookie': self.cookies or '', 25 | } 26 | 27 | async def get_winktv_bj_info(self, url: str) -> tuple: 28 | 29 | user_id = url.split('?')[0].rsplit('/', maxsplit=1)[-1] 30 | data = { 31 | 'userId': user_id, 32 | 'info': 'media', 33 | } 34 | 35 | info_api = 'https://api.winktv.co.kr/v1/member/bj' 36 | json_str = await async_req(url=info_api, proxy_addr=self.proxy_addr, headers=self.pc_headers, data=data) 37 | json_data = json.loads(json_str) 38 | live_status = 'media' in json_data 39 | anchor_id = json_data['bjInfo']['id'] 40 | anchor_name = f"{json_data['bjInfo']['nick']}-{anchor_id}" 41 | return anchor_name, live_status 42 | 43 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 44 | """ 45 | Fetches web stream data for a live room. 46 | 47 | Args: 48 | url (str): The room URL. 49 | process_data (bool): Whether to process the data. Defaults to True. 50 | 51 | Returns: 52 | dict: A dictionary containing anchor name, live status, room URL, and title. 53 | """ 54 | user_id = url.split('?')[0].rsplit('/', maxsplit=1)[-1] 55 | room_password = self.get_params(url, "pwd") 56 | if not room_password: 57 | room_password = '' 58 | data = { 59 | 'action': 'watch', 60 | 'userId': user_id, 61 | 'password': room_password, 62 | 'shareLinkType': '', 63 | } 64 | 65 | anchor_name, live_status = await self.get_winktv_bj_info(url) 66 | result = {"anchor_name": anchor_name, "is_live": live_status} 67 | if live_status: 68 | play_api = 'https://api.winktv.co.kr/v1/live/play' 69 | json_str = await async_req(url=play_api, proxy_addr=self.proxy_addr, headers=self.pc_headers, data=data) 70 | if '403: Forbidden' in json_str: 71 | raise ConnectionError(f"Your network has been banned from accessing WinkTV ({json_str})") 72 | json_data = json.loads(json_str) 73 | if 'errorData' in json_data: 74 | if json_data['errorData']['code'] == 'needAdult': 75 | raise RuntimeError( 76 | f"{url} The live stream is only accessible to logged-in adults. Please ensure that " 77 | f"the cookie is correctly filled in the configuration file after logging in.") 78 | else: 79 | raise RuntimeError(json_data['errorData']['code'], json_data['message']) 80 | if not process_data: 81 | return json_data 82 | m3u8_url = json_data['PlayList']['hls'][0]['url'] 83 | play_url_list = await self.get_play_url_list(m3u8=m3u8_url, proxy=self.proxy_addr, headers=self.pc_headers) 84 | result['m3u8_url'] = m3u8_url 85 | result['play_url_list'] = play_url_list 86 | return result 87 | 88 | async def fetch_stream_url(self, json_data: dict, video_quality: str | int | None = None) -> StreamData: 89 | """ 90 | Fetches the stream URL for a live room and wraps it into a StreamData object. 91 | """ 92 | data = await self.get_stream_url(json_data, video_quality, spec=True, platform='WinkTV') 93 | return wrap_stream(data) 94 | 95 | -------------------------------------------------------------------------------- /streamget/platforms/yinbo/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/yinbo/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/yinbo/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | import urllib.parse 4 | 5 | from ...data import StreamData, wrap_stream 6 | from ...requests.async_http import async_req 7 | from ..base import BaseLiveStream 8 | 9 | 10 | class YinboLiveStream(BaseLiveStream): 11 | """ 12 | A class for fetching and processing Yinbo live stream information. 13 | """ 14 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 15 | super().__init__(proxy_addr, cookies) 16 | self.mobile_headers = self._get_mobile_headers() 17 | 18 | def _get_mobile_headers(self) -> dict: 19 | return { 20 | 'user-agent': 'ios/7.830 (ios 17.0; ; iPhone 15 (A2846/A3089/A3090/A3092))', 21 | 'accept': 'application/json, text/plain, */*', 22 | 'accept-language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2', 23 | 'referer': 'https://live.ybw1666.com/800005143?promoters=0', 24 | 'cookie': self.cookies or '', 25 | } 26 | 27 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 28 | """ 29 | Fetches web stream data for a live room. 30 | 31 | Args: 32 | url (str): The room URL. 33 | process_data (bool): Whether to process the data. Defaults to True. 34 | 35 | Returns: 36 | dict: A dictionary containing anchor name, live status, room URL, and title. 37 | """ 38 | room_id = url.split('?')[0].rsplit('/', maxsplit=1)[-1] 39 | params = { 40 | 'roomidx': room_id, 41 | 'currentUrl': f'https://wap.ybw1666.com/{room_id}', 42 | } 43 | play_api = f'https://wap.ybw1666.com/api/ui/room/v1.0.0/live.ashx?{urllib.parse.urlencode(params)}' 44 | json_str = await async_req(play_api, proxy_addr=self.proxy_addr, headers=self.mobile_headers) 45 | json_data = json.loads(json_str) 46 | room_data = json_data['data']['roomInfo'] 47 | anchor_name = room_data['nickname'] 48 | live_status = room_data['live_stat'] 49 | 50 | async def get_live_domain(page_url): 51 | html_str = await async_req(page_url, proxy_addr=self.proxy_addr, headers=self.mobile_headers) 52 | config_json_str = re.findall("var config = (.*?)config.webskins", 53 | html_str, re.DOTALL)[0].rsplit(";", maxsplit=1)[0].strip() 54 | config_json_data = json.loads(config_json_str) 55 | stream_flv_domain = config_json_data['domainpullstream_flv'] 56 | stream_hls_domain = config_json_data['domainpullstream_hls'] 57 | return stream_flv_domain, stream_hls_domain 58 | 59 | result = {"anchor_name": anchor_name, "is_live": False} 60 | if live_status == 1: 61 | flv_domain, hls_domain = await get_live_domain(url) 62 | live_id = room_data['liveID'] 63 | flv_url = f'{flv_domain}/{live_id}.flv' 64 | m3u8_url = f'{hls_domain}/{live_id}.m3u8' 65 | result |= {'is_live': True, 'm3u8_url': m3u8_url, 'flv_url': flv_url, 'record_url': flv_url} 66 | return result 67 | 68 | @staticmethod 69 | async def fetch_stream_url(json_data: dict, video_quality: str | int | None = None) -> StreamData: 70 | """ 71 | Fetches the stream URL for a live room and wraps it into a StreamData object. 72 | """ 73 | json_data |= {"platform": "音播直播"} 74 | return wrap_stream(json_data) 75 | -------------------------------------------------------------------------------- /streamget/platforms/yiqilive/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/yiqilive/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/yiqilive/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from ...data import StreamData, wrap_stream 4 | from ...requests.async_http import async_req 5 | from ..base import BaseLiveStream 6 | 7 | 8 | class YiqiLiveStream(BaseLiveStream): 9 | """ 10 | A class for fetching and processing YiqiLive live stream information. 11 | """ 12 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 13 | super().__init__(proxy_addr, cookies) 14 | self.pc_headers = self._get_pc_headers() 15 | 16 | def _get_pc_headers(self) -> dict: 17 | return { 18 | 'origin': 'https://17.live', 19 | 'referer': 'https://17.live/', 20 | 'user-agent': 'ios/7.830 (ios 17.0; ; iPhone 15 (A2846/A3089/A3090/A3092))', 21 | 'cookie': self.cookies or '', 22 | } 23 | 24 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 25 | """ 26 | Fetches web stream data for a live room. 27 | 28 | Args: 29 | url (str): The room URL. 30 | process_data (bool): Whether to process the data. Defaults to True. 31 | 32 | Returns: 33 | dict: A dictionary containing anchor name, live status, room URL, and title. 34 | """ 35 | room_id = url.split('?')[0].rsplit('/', maxsplit=1)[-1] 36 | api_1 = f'https://wap-api.17app.co/api/v1/user/room/{room_id}' 37 | json_str = await async_req(api_1, proxy_addr=self.proxy_addr, headers=self.pc_headers) 38 | json_data = json.loads(json_str) 39 | anchor_name = json_data["displayName"] 40 | result = {"anchor_name": anchor_name, "is_live": False} 41 | json_data = { 42 | 'liveStreamID': room_id, 43 | } 44 | api_1 = f'https://wap-api.17app.co/api/v1/lives/{room_id}/viewers/alive' 45 | json_str = await async_req(api_1, json_data=json_data, proxy_addr=self.proxy_addr, headers=self.pc_headers) 46 | json_data = json.loads(json_str) 47 | live_status = json_data.get("status") 48 | if live_status and live_status == 2: 49 | flv_url = json_data['pullURLsInfo']['rtmpURLs'][0]['urlHighQuality'] 50 | result |= {'is_live': True, 'flv_url': flv_url, 'record_url': flv_url} 51 | return result 52 | 53 | @staticmethod 54 | async def fetch_stream_url(json_data: dict, video_quality: str | int | None = None) -> StreamData: 55 | """ 56 | Fetches the stream URL for a live room and wraps it into a StreamData object. 57 | """ 58 | json_data |= {"platform": "17直播"} 59 | return wrap_stream(json_data) 60 | 61 | -------------------------------------------------------------------------------- /streamget/platforms/youtube/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/youtube/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/youtube/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | 4 | from ...data import StreamData, wrap_stream 5 | from ...requests.async_http import async_req 6 | from ..base import BaseLiveStream 7 | 8 | 9 | class YoutubeLiveStream(BaseLiveStream): 10 | """ 11 | A class for fetching and processing Youtube live stream information. 12 | """ 13 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 14 | super().__init__(proxy_addr, cookies) 15 | self.pc_headers = self._get_pc_headers() 16 | 17 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 18 | """ 19 | Fetches web stream data for a live room. 20 | 21 | Args: 22 | url (str): The room URL. 23 | process_data (bool): Whether to process the data. Defaults to True. 24 | 25 | Returns: 26 | dict: A dictionary containing anchor name, live status, room URL, and title. 27 | """ 28 | html_str = await async_req(url, proxy_addr=self.proxy_addr, headers=self.pc_headers) 29 | json_str = re.search('var ytInitialPlayerResponse = (.*?);var meta = document\\.createElement', html_str).group( 30 | 1) 31 | json_data = json.loads(json_str) 32 | if not process_data: 33 | return json_data 34 | result = {"anchor_name": "", "is_live": False} 35 | if 'videoDetails' not in json_data: 36 | raise Exception( 37 | "Error: Please log in to YouTube on your device's webpage and configure cookies in the config.ini") 38 | result['anchor_name'] = json_data['videoDetails']['author'] 39 | live_status = json_data['videoDetails'].get('isLive') 40 | if live_status: 41 | live_title = json_data['videoDetails']['title'] 42 | m3u8_url = json_data['streamingData']["hlsManifestUrl"] 43 | play_url_list = await self.get_play_url_list(m3u8_url, proxy=self.proxy_addr, headers=self.pc_headers) 44 | result |= {"is_live": True, "title": live_title, "m3u8_url": m3u8_url, "play_url_list": play_url_list} 45 | return result 46 | 47 | async def fetch_stream_url(self, json_data: dict, video_quality: str | int | None = None) -> StreamData: 48 | """ 49 | Fetches the stream URL for a live room and wraps it into a StreamData object. 50 | """ 51 | data = await self.get_stream_url(json_data, video_quality, spec=True, platform='Youtube') 52 | return wrap_stream(data) 53 | 54 | -------------------------------------------------------------------------------- /streamget/platforms/yy/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/yy/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/yy/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | import time 4 | import urllib.parse 5 | 6 | from ...data import StreamData, wrap_stream 7 | from ...requests.async_http import async_req 8 | from ..base import BaseLiveStream 9 | 10 | 11 | class YYLiveStream(BaseLiveStream): 12 | """ 13 | A class for fetching and processing YY live stream information. 14 | """ 15 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 16 | super().__init__(proxy_addr, cookies) 17 | self.pc_headers = self._get_pc_headers() 18 | 19 | def _get_pc_headers(self) -> dict: 20 | return { 21 | 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0', 22 | 'accept-language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2', 23 | 'cookie': self.cookies or '', 24 | } 25 | 26 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 27 | """ 28 | Fetches web stream data for a live room. 29 | 30 | Args: 31 | url (str): The room URL. 32 | process_data (bool): Whether to process the data. Defaults to True. 33 | 34 | Returns: 35 | dict: A dictionary containing anchor name, live status, room URL, and title. 36 | """ 37 | html_str = await async_req(url.strip(), proxy_addr=self.proxy_addr, headers=self.pc_headers) 38 | anchor_name = re.search('nick: "(.*?)",\n\\s+logo', html_str).group(1) 39 | cid = re.search('sid : "(.*?)",\n\\s+ssid', html_str, re.DOTALL).group(1) 40 | 41 | data = ('{"head":{"seq":1701869217590,"appidstr":"0","bidstr":"121","cidstr":"' + cid + '","sidstr":"' + cid + 42 | '","uid64":0,"client_type":108,"client_ver":"5.17.0","stream_sys_ver":1,"app":"yylive_web",' 43 | '"playersdk_ver":"5.17.0","thundersdk_ver":"0","streamsdk_ver":"5.17.0"},' 44 | '"client_attribute":{"client":"web","model":"web0","cpu":"","graphics_card":"",' 45 | '"os":"chrome","osversion":"0","vsdk_version":"","app_identify":"","app_version":"",' 46 | '"business":"","width":"1920","height":"1080","scale":"","client_type":8,"h265":0},' 47 | '"avp_parameter":{"version":1,"client_type":8,"service_type":0,"imsi":0,"send_time":1701869217,' 48 | '"line_seq":-1,"gear":4,"ssl":1,"stream_format":0}}') 49 | data_bytes = data.encode('utf-8') 50 | params = { 51 | "uid": "0", 52 | "cid": cid, 53 | "sid": cid, 54 | "appid": "0", 55 | "sequence": "1701869217590", 56 | "encode": "json" 57 | } 58 | api = f'https://stream-manager.yy.com/v3/channel/streams?{urllib.parse.urlencode(params)}' 59 | json_str = await async_req(api, data=data_bytes, proxy_addr=self.proxy_addr, headers=self.pc_headers) 60 | json_data = json.loads(json_str) 61 | json_data['anchor_name'] = anchor_name 62 | 63 | params = { 64 | 'uid': '', 65 | 'sid': cid, 66 | 'ssid': cid, 67 | '_': int(time.time() * 1000), 68 | } 69 | detail_api = f'https://www.yy.com/live/detail?{urllib.parse.urlencode(params)}' 70 | json_str2 = await async_req(detail_api, proxy_addr=self.proxy_addr, headers=self.pc_headers) 71 | json_data2 = json.loads(json_str2) 72 | json_data['title'] = json_data2['data']['roomName'] 73 | return json_data 74 | 75 | @staticmethod 76 | async def fetch_stream_url(json_data: dict, video_quality: str | int | None = None) -> StreamData: 77 | """ 78 | Fetches the stream URL for a live room and wraps it into a StreamData object. 79 | """ 80 | anchor_name = json_data.get('anchor_name', '') 81 | result = { 82 | "platform": "YY直播", 83 | "anchor_name": anchor_name, 84 | "is_live": False, 85 | } 86 | if 'avp_info_res' in json_data: 87 | stream_line_addr = json_data['avp_info_res']['stream_line_addr'] 88 | cdn_info = list(stream_line_addr.values())[0] 89 | flv_url = cdn_info['cdn_info']['url'] 90 | result |= { 91 | 'is_live': True, 92 | 'title': json_data['title'], 93 | 'quality': 'OD', 94 | 'flv_url': flv_url, 95 | 'record_url': flv_url 96 | } 97 | return wrap_stream(result) 98 | -------------------------------------------------------------------------------- /streamget/platforms/zhihu/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/platforms/zhihu/__init__.py -------------------------------------------------------------------------------- /streamget/platforms/zhihu/live_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | 4 | from ...data import StreamData, wrap_stream 5 | from ...requests.async_http import async_req 6 | from ..base import BaseLiveStream 7 | 8 | 9 | class ZhihuLiveStream(BaseLiveStream): 10 | """ 11 | A class for fetching and processing Zhihu live stream information. 12 | """ 13 | def __init__(self, proxy_addr: str | None = None, cookies: str | None = None): 14 | super().__init__(proxy_addr, cookies) 15 | self.mobile_headers = self._get_mobile_headers() 16 | 17 | def _get_mobile_headers(self) -> dict: 18 | return { 19 | 'user-agent': 'osee2unifiedRelease/21914 osee2unifiedReleaseVersion/10.39.0 Mozilla/5.0 (iPhone; CPU ' 20 | 'iPhone OS 17_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148', 21 | 'accept': 'application/json, text/plain, */*', 22 | 'accept-language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2', 23 | 'referer': 'https://live.ybw1666.com/800005143?promoters=0', 24 | 'cookie': self.cookies or '', 25 | } 26 | 27 | async def fetch_web_stream_data(self, url: str, process_data: bool = True) -> dict: 28 | """ 29 | Fetches web stream data for a live room. 30 | 31 | Args: 32 | url (str): The room URL. 33 | process_data (bool): Whether to process the data. Defaults to True. 34 | 35 | Returns: 36 | dict: A dictionary containing anchor name, live status, room URL, and title. 37 | """ 38 | 39 | result = {"anchor_name": '', "is_live": False} 40 | if 'people/' in url: 41 | user_id = url.split('people/')[1] 42 | api = f'https://api.zhihu.com/people/{user_id}/profile?profile_new_version=' 43 | json_str = await async_req(api, proxy_addr=self.proxy_addr, headers=self.mobile_headers) 44 | json_data = json.loads(json_str) 45 | result['anchor_name'] = json_data['name'] 46 | live_page_url = json_data['drama']['living_theater']['theater_url'] 47 | else: 48 | live_page_url = url 49 | 50 | web_id = live_page_url.split('?')[0].rsplit('/', maxsplit=1)[-1] 51 | html_str = await async_req(live_page_url, proxy_addr=self.proxy_addr, headers=self.mobile_headers) 52 | json_str2 = re.search('<script id="js-initialData" type="text/json">(.*?)</script>', html_str) 53 | if not json_str2: 54 | return result 55 | json_data2 = json.loads(json_str2.group(1)) 56 | if not process_data: 57 | return json_data2 58 | live_data = json_data2['initialState']['theater']['theaters'][web_id] 59 | anchor_name = live_data['actor']['name'] 60 | result['anchor_name'] = anchor_name 61 | live_status = live_data['drama']['status'] 62 | if live_status == 1: 63 | live_title = live_data['theme'] 64 | play_url = live_data['drama']['playInfo'] 65 | result |= { 66 | 'is_live': True, 67 | 'title': live_title, 68 | 'm3u8_url': play_url['hlsUrl'], 69 | 'flv_url': play_url['playUrl'], 70 | 'record_url': play_url['hlsUrl'] 71 | } 72 | return result 73 | 74 | @staticmethod 75 | async def fetch_stream_url(json_data: dict, video_quality: str | int | None = None) -> StreamData: 76 | """ 77 | Fetches the stream URL for a live room and wraps it into a StreamData object. 78 | """ 79 | json_data |= {"platform": "知乎直播"} 80 | return wrap_stream(json_data) 81 | -------------------------------------------------------------------------------- /streamget/requests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ihmily/streamget/2e44fccbb6cfb99077e728c34bb5c96f618bc0bf/streamget/requests/__init__.py -------------------------------------------------------------------------------- /streamget/requests/async_http.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | import httpx 4 | 5 | from .. import utils 6 | 7 | OptionalStr = str | None 8 | OptionalDict = dict[str, Any] | None 9 | 10 | 11 | async def async_req( 12 | url: str, 13 | proxy_addr: OptionalStr = None, 14 | headers: OptionalDict = None, 15 | data: dict | bytes | None = None, 16 | json_data: dict | list | None = None, 17 | timeout: int = 20, 18 | redirect_url: bool = False, 19 | return_cookies: bool = False, 20 | include_cookies: bool = False, 21 | verify: bool = False, 22 | http2: bool = True 23 | ) -> OptionalDict | OptionalStr | tuple: 24 | """ 25 | Sends an asynchronous HTTP request to the specified URL. 26 | 27 | This function supports both GET and POST requests. It allows for customization of headers, 28 | data, and other request parameters. It also handles proxy addresses, SSL verification, 29 | and HTTP/2 support. 30 | 31 | Args: 32 | url (str): The URL to send the request to. 33 | proxy_addr (OptionalStr): The proxy address to use. Defaults to None. 34 | headers (OptionalDict): Custom headers to include in the request. Defaults to None. 35 | data (dict | bytes | None): Data to send in the request body. Defaults to None. 36 | json_data (dict | list | None): JSON data to send in the request body. Defaults to None. 37 | timeout (int): The request timeout in seconds. Defaults to 20. 38 | redirect_url (bool): If True, returns the final URL after redirects. Defaults to False. 39 | return_cookies (bool): If True, returns the response cookies. Defaults to False. 40 | include_cookies (bool): If True, includes cookies in the response tuple. Defaults to False. 41 | verify (bool): If, True verifies the SSL certificate. Defaults to False. 42 | http2 (bool): If True, enables HTTP/2 support. Defaults to True. 43 | 44 | Returns: 45 | OptionalDict | OptionalStr | tuple: The response text, JSON data, 46 | or a tuple containing the response text and cookies. 47 | 48 | Raises: 49 | Exception: If an error occurs during the request. 50 | 51 | Example: 52 | >>> import asyncio 53 | >>> async def main(): 54 | ... result = await async_req("https://example.com", proxy_addr="http://proxy.example.com") 55 | ... print(result) 56 | >>> asyncio.run(main()) 57 | Response text or JSON data 58 | 59 | Note: 60 | - If `data` or `json_data` is provided, a POST request is sent; otherwise, a GET request is sent. 61 | - The `redirect_url` parameter only returns the final URL after following redirects. 62 | - If `return_cookies` is True, the function returns a tuple containing the response text and cookies. 63 | """ 64 | if headers is None: 65 | headers = {} 66 | try: 67 | proxy_addr = utils.handle_proxy_addr(proxy_addr) 68 | if data or json_data: 69 | async with httpx.AsyncClient(proxy=proxy_addr, timeout=timeout, verify=verify, http2=http2) as client: 70 | response = await client.post(url, data=data, json=json_data, headers=headers) 71 | else: 72 | async with httpx.AsyncClient(proxy=proxy_addr, timeout=timeout, verify=verify, http2=http2) as client: 73 | response = await client.get(url, headers=headers, follow_redirects=True) 74 | 75 | if redirect_url: 76 | return str(response.url) 77 | elif return_cookies: 78 | cookies_dict = dict(response.cookies.items()) 79 | return (response.text, cookies_dict) if include_cookies else cookies_dict 80 | else: 81 | resp_str = response.text 82 | except Exception as e: 83 | resp_str = str(e) 84 | 85 | return resp_str 86 | 87 | 88 | async def get_response_status( 89 | url: str, 90 | proxy_addr: OptionalStr = None, 91 | headers: OptionalDict = None, 92 | timeout: int = 10, 93 | verify: bool = False, 94 | http2: bool = True 95 | ) -> int: 96 | """ 97 | Checks if a URL returns a successful HTTP status code (200 OK). 98 | 99 | This function sends a HEAD request to the specified URL and checks the response status code. 100 | It supports custom headers, proxy addresses, and SSL verification. 101 | 102 | Args: 103 | url (str): The URL to check. 104 | proxy_addr (OptionalStr): The proxy address to use. Defaults to None. 105 | headers (OptionalDict): Custom headers to include in the request. Defaults to None. 106 | timeout (int): The request timeout in seconds. Defaults to 10. 107 | verify (bool): If True, verifies the SSL certificate. Defaults to False. 108 | http2 (bool): If True, enables HTTP/2 support. Defaults to True. 109 | 110 | Returns: 111 | int: such as 200, 304, 403. 112 | 113 | Raises: 114 | Exception: If an error occurs during the request. 115 | 116 | Example: 117 | >>> import asyncio 118 | >>> async def main(): 119 | ... status = await get_response_status("https://example.com") 120 | ... print(status) 121 | >>> asyncio.run(main()) 122 | 200 123 | 124 | Note: 125 | - This function uses the HEAD request method, which is lightweight and suitable for checking status codes. 126 | - returns a status code other than 200 OK. 127 | """ 128 | try: 129 | proxy_addr = utils.handle_proxy_addr(proxy_addr) 130 | async with httpx.AsyncClient(proxy=proxy_addr, timeout=timeout, verify=verify, http2=http2) as client: 131 | response = await client.head(url, headers=headers, follow_redirects=True) 132 | return response.status_code 133 | except Exception as e: 134 | print(e) 135 | return False 136 | -------------------------------------------------------------------------------- /streamget/scripts/node_installer.py: -------------------------------------------------------------------------------- 1 | import platform 2 | import sys 3 | import tarfile 4 | import zipfile 5 | from pathlib import Path 6 | 7 | import httpx 8 | from tqdm import tqdm 9 | 10 | 11 | def install_node(version="22.14.0", install_path=None): 12 | """Cross-platform installation of Node.js to specified path""" 13 | system = platform.system().lower() 14 | node_dist_urls = { 15 | 'windows': f"https://nodejs.org/dist/v{version}/node-v{version}-win-x64.zip", 16 | 'darwin': f"https://nodejs.org/dist/v{version}/node-v{version}-darwin-x64.tar.gz", 17 | 'linux': f"https://nodejs.org/dist/v{version}/node-v{version}-linux-x64.tar.xz" 18 | } 19 | 20 | url = node_dist_urls.get(system) 21 | if not url: 22 | raise NotImplementedError(f"Unsupported platform: {system}") 23 | 24 | # Setup paths 25 | default_path = Path.home() / ".streamget_node" 26 | install_path = Path(install_path) if install_path else default_path 27 | install_path.mkdir(parents=True, exist_ok=True) 28 | archive_path = install_path / f"node-v{version}.{'zip' if system == 'windows' else 'tar.gz'}" 29 | 30 | success = False 31 | try: 32 | print(f"📥 Downloading Node.js v{version}...") 33 | with httpx.Client(timeout=30) as client: 34 | with client.stream("GET", url) as response: 35 | response.raise_for_status() 36 | 37 | total_size = int(response.headers.get('content-length', 0)) 38 | progress_bar = tqdm( 39 | total=total_size, 40 | ncols=100, 41 | unit='iB', 42 | unit_scale=True, 43 | desc="Downloading", 44 | dynamic_ncols=False, 45 | bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]" 46 | ) 47 | 48 | with open(archive_path, 'wb') as f: 49 | for chunk in response.iter_bytes(chunk_size=8192): 50 | f.write(chunk) 51 | progress_bar.update(len(chunk)) 52 | progress_bar.close() 53 | 54 | print("\n📦 Extracting files...") 55 | if system == 'windows': 56 | with zipfile.ZipFile(archive_path) as zf: 57 | file_list = zf.infolist() 58 | with tqdm( 59 | total=len(file_list), 60 | ncols=100, 61 | desc="Extracting", 62 | unit="files", 63 | dynamic_ncols=False 64 | ) as pbar: 65 | for file in file_list: 66 | zf.extract(file, install_path) 67 | pbar.update(1) 68 | else: 69 | with tarfile.open(archive_path) as tf: 70 | members = tf.getmembers() 71 | with tqdm( 72 | total=len(members), 73 | ncols=100, 74 | desc="Extracting", 75 | unit="files", 76 | dynamic_ncols=False 77 | ) as pbar: 78 | for member in members: 79 | tf.extract(member, install_path) 80 | pbar.update(1) 81 | 82 | success = True 83 | 84 | except Exception as e: 85 | print(f"\n❌ Installation failed: {str(e)}") 86 | if archive_path.exists(): 87 | print(f"⚠️ Archive retained for debugging: {archive_path}") 88 | sys.exit(1) 89 | 90 | finally: 91 | if success and archive_path.exists(): 92 | try: 93 | archive_path.unlink() 94 | print(f"♻️ Cleaned up: {archive_path.name}") 95 | except Exception as cleanup_error: 96 | print(f"⚠️ Failed to clean up archive: {str(cleanup_error)}") 97 | 98 | bin_path = install_path / f"node-v{version}-{system}-x64" / ("bin" if system != 'windows' else "") 99 | print(f"\n🎉 Installation complete! Please add to PATH:\n👉 {bin_path}\n") 100 | -------------------------------------------------------------------------------- /streamget/utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import random 3 | import re 4 | import string 5 | 6 | 7 | class Color: 8 | RED = "\033[31m" 9 | GREEN = "\033[32m" 10 | YELLOW = "\033[33m" 11 | BLUE = "\033[34m" 12 | MAGENTA = "\033[35m" 13 | CYAN = "\033[36m" 14 | WHITE = "\033[37m" 15 | RESET = "\033[0m" 16 | 17 | @staticmethod 18 | def print_colored(text, color): 19 | print(f"{color}{text}{Color.RESET}") 20 | 21 | 22 | def dict_to_cookie_str(cookies_dict: dict) -> str: 23 | cookie_str = '; '.join([f"{key}={value}" for key, value in cookies_dict.items()]) 24 | return cookie_str 25 | 26 | 27 | def remove_emojis(text: str, replace_text: str = '') -> str: 28 | emoji_pattern = re.compile( 29 | "[" 30 | "\U0001F1E0-\U0001F1FF" # flags (iOS) 31 | "\U0001F300-\U0001F5FF" # symbols & pictographs 32 | "\U0001F600-\U0001F64F" # emoticons 33 | "\U0001F680-\U0001F6FF" # transport & map symbols 34 | "\U0001F700-\U0001F77F" # alchemical symbols 35 | "\U0001F780-\U0001F7FF" # Geometric Shapes Extended 36 | "\U0001F800-\U0001F8FF" # Supplemental Arrows-C 37 | "\U0001F900-\U0001F9FF" # Supplemental Symbols and Pictographs 38 | "\U0001FA00-\U0001FA6F" # Chess Symbols 39 | "\U0001FA70-\U0001FAFF" # Symbols and Pictographs Extended-A 40 | "\U00002702-\U000027B0" # Dingbats 41 | "]+", 42 | flags=re.UNICODE 43 | ) 44 | return emoji_pattern.sub(replace_text, text) 45 | 46 | 47 | def handle_proxy_addr(proxy_addr): 48 | if proxy_addr: 49 | if not proxy_addr.startswith('http'): 50 | proxy_addr = 'http://' + proxy_addr 51 | else: 52 | proxy_addr = None 53 | return proxy_addr 54 | 55 | 56 | def generate_random_string(length: int) -> str: 57 | characters = string.ascii_uppercase + string.digits 58 | random_string = ''.join(random.choices(characters, k=length)) 59 | return random_string 60 | 61 | 62 | def jsonp_to_json(jsonp_str: str) -> dict | None: 63 | pattern = r'(\w+)\((.*)\);?$' 64 | match = re.search(pattern, jsonp_str) 65 | 66 | if match: 67 | _, json_str = match.groups() 68 | json_obj = json.loads(json_str) 69 | return json_obj 70 | else: 71 | raise Exception("No JSON data found in JSONP response.") 72 | 73 | --------------------------------------------------------------------------------