├── Recorder ├── README.md ├── SU_Model.txt └── Showup.bat ├── bongacams.py ├── cam4.py ├── camsoda.py ├── chaturbate.py ├── generic.py ├── myfreecams.py ├── showup.py ├── stripchat.py └── zbiornik.py /Recorder/README.md: -------------------------------------------------------------------------------- 1 | Edit the bat file and customize the path of your download folder. 2 | Edit the bat file and text file and adapt it to the site supported by Streamlink eg Chaturbate, MyFreeCams etc. 3 | -------------------------------------------------------------------------------- /Recorder/SU_Model.txt: -------------------------------------------------------------------------------- 1 | Model1 2 | Model2 3 | Model3 -------------------------------------------------------------------------------- /Recorder/Showup.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | SETLOCAL EnableDelayedExpansion 3 | :START 4 | ECHO. 5 | SET n=0 6 | FOR /F "tokens=*" %%A IN (D:\Videos\Showup\SU_Model.txt) DO ( 7 | SET /A n=n+1 8 | SET _fav!n!=%%A 9 | ECHO !n! %%A 10 | ) 11 | ECHO. 12 | SET /P MODEL=Select Model (%M% %MODEL%): 13 | FOR /L %%f IN (1,1,!n!) DO ( 14 | IF /I '%MODEL%'=='%%f' SET M=%%f 15 | ) 16 | SET n=0 17 | FOR /F "tokens=*" %%A IN (D:\Videos\Showup\SU_Model.txt) DO ( 18 | SET /A n=n+1 19 | IF !n!==%M% SET MODEL=%%A 20 | ) 21 | :main 22 | ECHO. 23 | ECHO ##################################################### 24 | ECHO ### SHOWUP ### R E C O R D I N G - 2 4 / 7 ###### 25 | SET hour=%time:~0,2% 26 | IF "%hour:~0,1%" == " " SET hour=0%hour:~1,1% 27 | SET NOW=%date:~0,4%%date:~4,2%%date:~6,4%-%hour%%time:~3,2%%time:~6,2% 28 | FOR /f "tokens=1-2 delims=/:" %%a IN ('time /t') DO (set mytime=%%a%%b) 29 | SET OUT_DIR=D:\Videos\Showup\captures\ 30 | SET FILENAME=%MODEL%_SU_%NOW%.flv 31 | SET OUTPUT=%OUT_DIR%%FILENAME% 32 | SET FNAME=######## %FILENAME% ### %M% ############################## 33 | SET _FNAME_=%FNAME:~5,53% 34 | IF EXIST "%OUT_DIR%" (ECHO %_FNAME_%) ELSE (MD "%OUT_DIR%" 35 | ECHO %_FNAME_%) 36 | ECHO ##################################################### 37 | ECHO. 38 | STREAMLINK "https://showup.tv/%MODEL%" best -o "%OUT_DIR%%FILENAME%" 39 | 40 | TIMEOUT 30 41 | GOTO main 42 | ENDLOCAL 43 | -------------------------------------------------------------------------------- /bongacams.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | 4 | import requests 5 | 6 | from urllib.parse import urljoin, urlparse, urlunparse 7 | from streamlink.exceptions import PluginError, NoStreamsError 8 | from streamlink.plugin.api import validate, useragents 9 | from streamlink.plugin import Plugin 10 | from streamlink.stream import HLSStream 11 | from streamlink.utils import update_scheme 12 | 13 | 14 | CONST_HEADERS = {} 15 | CONST_HEADERS['User-Agent'] = useragents.CHROME 16 | CONST_HEADERS['X-Requested-With'] = 'XMLHttpRequest' 17 | 18 | url_re = re.compile(r"(http(s)?://)?(\w{2}.)?(bongacams\d*?\.com)/([\w\d_-]+)") 19 | 20 | schema = validate.Schema({ 21 | "status": "success" 22 | }) 23 | 24 | 25 | class bongacams(Plugin): 26 | @classmethod 27 | def can_handle_url(self, url): 28 | return url_re.match(url) 29 | 30 | def _get_streams(self): 31 | match = url_re.match(self.url) 32 | 33 | LISTING_PATH = 'tools/listing_v3.php' 34 | 35 | stream_page_scheme = 'https' 36 | stream_page_domain = match.group(4) 37 | model_name = match.group(5) 38 | 39 | listing_url = urlunparse((stream_page_scheme, stream_page_domain, LISTING_PATH, '', '', '')) 40 | 41 | # create http session and set headers 42 | http_session = self.session.http 43 | http_session.headers.update(CONST_HEADERS) 44 | 45 | params = { 46 | "livetab": None, 47 | "online_only": True, 48 | "offset": 0, 49 | "model_search[display_name][text]": model_name, 50 | "_online_filter": 0, 51 | "can_pin_models": False, 52 | "limit": 1 53 | } 54 | 55 | response = http_session.get(listing_url, params=params) 56 | 57 | self.logger.debug(response.text) 58 | 59 | if len(http_session.cookies) == 0: 60 | raise PluginError("Can't get a cookies") 61 | if response.status_code != 200: 62 | self.logger.debug("response for {0}:\n{1}".format(response.request.url, response.text)) 63 | raise PluginError("unexpected status code for {0}: {1}".format(response.url, response.status_code)) 64 | 65 | http_session.close() 66 | response = response.json() 67 | schema.validate(response) 68 | 69 | if not model_name.lower() in list([model['username'].lower() for model in response['models']]): 70 | raise NoStreamsError(self.url) 71 | if str(response['online_count']) == '0': 72 | raise NoStreamsError(self.url) 73 | 74 | esid = None 75 | for model in response['models']: 76 | if model['username'].lower() == model_name.lower(): 77 | #if model['room'] not in ('public', 'private', 'fullprivate'): 78 | # raise NoStreamsError(self.url) 79 | esid = model.get('esid') 80 | model_name = model['username'] 81 | 82 | if not esid: 83 | raise PluginError("unknown error, esid={0} for {1}.\nResponse: {2}".format(esid, model_name, response['models'])) 84 | 85 | hls_url = f'https://{esid}.bcvcdn.com/hls/stream_{model_name}/playlist.m3u8' 86 | 87 | if hls_url: 88 | self.logger.debug('HLS URL: {0}'.format(hls_url)) 89 | try: 90 | for s in HLSStream.parse_variant_playlist(self.session, hls_url).items(): 91 | yield s 92 | except Exception as e: 93 | if '404' in str(e): 94 | self.logger.debug(str(e)) 95 | self.logger.debug('Stream is currently offline/private/away') 96 | else: 97 | self.logger.error(str(e)) 98 | return 99 | 100 | 101 | __plugin__ = bongacams 102 | 103 | 104 | -------------------------------------------------------------------------------- /cam4.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | from streamlink.plugin import Plugin 4 | from streamlink.plugin.api import validate 5 | from streamlink.stream import HLSStream 6 | from datetime import datetime 7 | 8 | STREAM_INFO = "https://www.cam4.com/rest/v1.0/profile/{0}/streamInfo" 9 | INFO_URL = "https://www.cam4.com/rest/v1.0/search/performer/{0}" 10 | PROFILE_URL = "https://www.cam4.com/rest/v1.0/profile/{0}/info" 11 | 12 | _url_re = re.compile(r"https?://(\w+\.)?cam4\.com/(?P\w+)") 13 | 14 | class Cam4(Plugin): 15 | @classmethod 16 | def can_handle_url(cls, url): 17 | return _url_re.match(url) 18 | 19 | def _get_streams(self): 20 | match = _url_re.match(self.url) 21 | username = match.group("username") 22 | 23 | res = self.session.http.get(INFO_URL.format(username)) 24 | data = self.session.http.json(res) 25 | 26 | online = data["online"] 27 | self.logger.info("Stream status: {0}".format("online" if online else "offline")) 28 | if online: 29 | self.logger.info("Country: {0}".format(data["country"])) 30 | res = self.session.http.get(PROFILE_URL.format(username)) 31 | data = self.session.http.json(res) 32 | self.logger.info("City: {0}".format(data["city"])) 33 | self.logger.info("Body Hair: {0}".format(data["bodyHair"])) 34 | self.logger.info("Main Language: {0}".format(data["mainLanguage"])) 35 | self.logger.info("Breast Size: {0}".format(data["breastSize"])) 36 | self.logger.info("Birthdate: {0}".format(data["birthdate"])) 37 | self.logger.info("Age: {0}".format(int((datetime.now() - datetime.strptime(data["birthdate"], "%Y-%m-%d")).days / 365))) 38 | 39 | res = self.session.http.get(STREAM_INFO.format(username)) 40 | data = self.session.http.json(res) 41 | if data["canUseCDN"]: 42 | sStreamURL = data["cdnURL"] 43 | self.logger.debug("Playlist URL : {0}".format(sStreamURL)) 44 | for s in HLSStream.parse_variant_playlist(self.session, sStreamURL).items(): 45 | self.logger.debug("HLS Stream: {0}".format(s)) 46 | yield s 47 | else: 48 | self.logger.info("Access: private") 49 | 50 | __plugin__ = Cam4 51 | -------------------------------------------------------------------------------- /camsoda.py: -------------------------------------------------------------------------------- 1 | import random 2 | import re 3 | import json 4 | 5 | from streamlink.plugin import Plugin 6 | from streamlink.plugin.api import http 7 | from streamlink.plugin.api import validate 8 | from streamlink.plugin.api import useragents 9 | from streamlink.stream import HLSStream 10 | 11 | _url_re = re.compile(r"http(s)?://(www\.)?camsoda\.com/(?P[^\"\']+)") 12 | 13 | _api_user_schema = validate.Schema( 14 | { 15 | "status": validate.any(int, validate.text), 16 | validate.optional("user"): validate.Schema({ 17 | "chat": validate.Schema ({ 18 | "status": validate.any(int, validate.text) 19 | }) 20 | }) 21 | } 22 | ) 23 | 24 | _api_video_schema = validate.Schema( 25 | { 26 | "token": validate.text, 27 | "edge_servers": [validate.text], 28 | "stream_name": validate.text 29 | } 30 | ) 31 | 32 | 33 | class Camsoda(Plugin): 34 | API_URL_VIDEO = "https://www.camsoda.com/api/v1/video/vtoken/{0}?username=guest_{1}" 35 | HLS_URL_VIDEO_EDGE = "https://{server}/{stream_name}_v1/index.m3u8?token={token}" 36 | HLS_URL_VIDEO = "https://{server}/mp4:{stream_name}_aac/playlist.m3u8?token={token}" 37 | headers = { 38 | "User-Agent": useragents.FIREFOX 39 | } 40 | 41 | @classmethod 42 | def can_handle_url(cls, url): 43 | return _url_re.match(url) 44 | 45 | def _stream_status(self, data_user): 46 | 47 | invalid_username = data_user["status"] is False 48 | if invalid_username: 49 | self.logger.info("No validate username found for {0}".format(self.url)) 50 | return 51 | 52 | is_online = data_user["status"] is True and data_user["user"]["chat"]["status"] == "online" 53 | if is_online is False: 54 | self.logger.info("Stream is currently offline or private") 55 | return 56 | 57 | return True 58 | 59 | def _get_api_video(self, username): 60 | res = http.get(self.API_URL_VIDEO.format(username, str(random.randint(1000, 99999))), headers=self.headers, verify=False) 61 | data_video = http.json(res, schema=_api_video_schema) 62 | return data_video 63 | 64 | def _get_streams(self): 65 | match = _url_re.match(self.url) 66 | username = match.group("username") 67 | username = username.replace("/", "") 68 | 69 | data_video = self._get_api_video(username) 70 | 71 | if data_video["edge_servers"]: 72 | hls_url = self.HLS_URL_VIDEO.format( 73 | server=data_video["edge_servers"][0], 74 | stream_name=data_video["stream_name"], 75 | token=data_video["token"] 76 | ) 77 | if "edge" in data_video["edge_servers"][0]: 78 | self.session.http.verify = False 79 | hls_url = self.HLS_URL_VIDEO_EDGE.format( 80 | server=data_video["edge_servers"][0], 81 | stream_name=data_video["stream_name"], 82 | token=data_video["token"] 83 | ) 84 | 85 | for s in HLSStream.parse_variant_playlist(self.session, hls_url).items(): 86 | yield s 87 | 88 | 89 | __plugin__ = Camsoda 90 | -------------------------------------------------------------------------------- /chaturbate.py: -------------------------------------------------------------------------------- 1 | import re 2 | import uuid 3 | 4 | from streamlink.plugin import Plugin 5 | from streamlink.plugin.api import validate 6 | from streamlink.stream import HLSStream 7 | 8 | API_HLS = "https://chaturbate.com/get_edge_hls_url_ajax/" 9 | 10 | _url_re = re.compile(r"https?://(\w+\.)?chaturbate\.com/(?P\w+)") 11 | 12 | _post_schema = validate.Schema( 13 | { 14 | "url": validate.text, 15 | "room_status": validate.text, 16 | "success": int 17 | } 18 | ) 19 | 20 | 21 | class Chaturbate(Plugin): 22 | @classmethod 23 | def can_handle_url(cls, url): 24 | return _url_re.match(url) 25 | 26 | def _get_streams(self): 27 | match = _url_re.match(self.url) 28 | username = match.group("username") 29 | 30 | CSRFToken = str(uuid.uuid4().hex.upper()[0:32]) 31 | 32 | headers = { 33 | "Content-Type": "application/x-www-form-urlencoded", 34 | "X-CSRFToken": CSRFToken, 35 | "X-Requested-With": "XMLHttpRequest", 36 | "Referer": self.url, 37 | } 38 | 39 | cookies = { 40 | "csrftoken": CSRFToken, 41 | } 42 | 43 | post_data = "room_slug={0}&bandwidth=high".format(username) 44 | 45 | res = self.session.http.post(API_HLS, headers=headers, cookies=cookies, data=post_data) 46 | data = self.session.http.json(res, schema=_post_schema) 47 | 48 | self.logger.info("Stream status: {0}".format(data["room_status"])) 49 | if (data["success"] is True and data["room_status"] == "public" and data["url"]): 50 | for s in HLSStream.parse_variant_playlist(self.session, data["url"]).items(): 51 | yield s 52 | 53 | 54 | __plugin__ = Chaturbate 55 | 56 | 57 | -------------------------------------------------------------------------------- /generic.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | generic streamlink plugin 4 | 5 | source: https://github.com/back-to/generic 6 | issues: https://github.com/back-to/generic/issues 7 | """ 8 | import base64 9 | import codecs 10 | import logging 11 | import os.path 12 | import re 13 | 14 | from html import unescape as html_unescape 15 | from pathlib import Path 16 | from urllib.parse import parse_qsl, unquote, urljoin, urlparse 17 | 18 | from streamlink.exceptions import ( 19 | FatalPluginError, 20 | NoPluginError, 21 | NoStreamsError, 22 | ) 23 | from streamlink.plugin import Plugin, PluginArgument, PluginArguments 24 | from streamlink.plugin.api import useragents 25 | from streamlink.plugin.plugin import HIGH_PRIORITY, NO_PRIORITY 26 | from streamlink.stream import HLSStream, HTTPStream, DASHStream 27 | from streamlink.stream.ffmpegmux import MuxedStream 28 | from streamlink.utils.args import comma_list, num 29 | from streamlink.utils.url import update_scheme 30 | 31 | try: 32 | import youtube_dl 33 | HAS_YTDL = True 34 | except ImportError: 35 | HAS_YTDL = False 36 | 37 | GENERIC_VERSION = '2020-11-17' 38 | 39 | log = logging.getLogger(__name__) 40 | 41 | obfuscatorhtml_chunk_re = re.compile(r'''["'](?P[A-z0-9+/=]+)["']''') 42 | obfuscatorhtml_re = re.compile( 43 | r']*>[^<>]*var\s*(\w+)\s*=\s*\[(?P[^\[\]]+)\];\s*\1\.forEach.*-\s*(?P\d+)[^<>]*', 44 | ) 45 | unpack_packer_re = re.compile( 46 | r'''(?Peval\(function\(p,a,c,k,e,(?:d|r)\).*\))''') 47 | unpack_unescape_re = re.compile(r""" 48 | ]*>[^>]* 49 | document.write\(unescape\(\s* 50 | ["']((?=[^<>"']*%\w{2})[^<>"']+)["'] 51 | \)\);?[^<]*""", re.VERBOSE) 52 | 53 | unpack_source_url_re_1 = re.compile(r'''(?x)source:\s*(?Pwindow\.atob\( 54 | (?P["'])(?P[A-z0-9+/=]+)(?P=q)\)),\s* 55 | mimeType:\s*["']application/vnd\.apple\.mpegurl["'] 56 | ''') 57 | unpack_source_url_re_2 = re.compile(r'''(?x)var\s\w+url=(?Patob\( 58 | (?P["'])(?P[A-z0-9+/=]+)(?P=q)\));''') 59 | unpack_source_url_re_3 = re.compile(r'''(?x)Clappr\.Player\({\s* 60 | source:\s*(?Patob\((?P["'])(?P[A-z0-9+/=]+)(?P=q)\))''') 61 | unpack_u_m3u8_re = re.compile(r'(\\u0022[^\s,]+m3u8[^\s,]*\\u0022)') 62 | 63 | 64 | class UnpackingError(Exception): 65 | """Badly packed source or general error.""" 66 | 67 | 68 | class Packer(object): 69 | """ 70 | Unpacker for Dean Edward's p.a.c.k.e.r 71 | 72 | source: https://github.com/beautify-web/js-beautify/ 73 | version: commit - b0e5f23a2d04db233f428349eb59e63bdefa78bb 74 | 75 | """ 76 | 77 | def __init__(self): 78 | self.beginstr = '' 79 | self.endstr = '' 80 | 81 | def detect(self, source): 82 | """Detects whether `source` is P.A.C.K.E.R. coded.""" 83 | mystr = source.replace(' ', '').find('eval(function(p,a,c,k,e,') 84 | if(mystr > 0): 85 | self.beginstr = source[:mystr] 86 | if(mystr != -1): 87 | """ Find endstr""" 88 | if(source.split("')))", 1)[0] == source): 89 | try: 90 | self.endstr = source.split("}))", 1)[1] 91 | except IndexError: 92 | self.endstr = '' 93 | else: 94 | self.endstr = source.split("')))", 1)[1] 95 | return (mystr != -1) 96 | 97 | def unpack(self, source): 98 | """Unpacks P.A.C.K.E.R. packed js code.""" 99 | payload, symtab, radix, count = self._filterargs(source) 100 | 101 | if count != len(symtab): 102 | raise UnpackingError('Malformed p.a.c.k.e.r. symtab.') 103 | 104 | try: 105 | if radix == 1: 106 | unbase = int 107 | else: 108 | unbase = Unbaser(radix) 109 | except TypeError: 110 | raise UnpackingError('Unknown p.a.c.k.e.r. encoding.') 111 | 112 | def lookup(match): 113 | """Look up symbols in the synthetic symtab.""" 114 | word = match.group(0) 115 | return symtab[unbase(word)] or word 116 | 117 | source = re.sub(r'\b\w+\b', lookup, payload) 118 | return self._replacestrings(source) 119 | 120 | def _filterargs(self, source): 121 | """Juice from a source file the four args needed by decoder.""" 122 | juicers = [(r"}\('(.*)', *(\d+|\[\]), *(\d+), *'(.*)'\.split\('\|'\), *(\d+), *(.*)\)\)"), 123 | (r"}\('(.*)', *(\d+|\[\]), *(\d+), *'(.*)'\.split\('\|'\)"), 124 | ] 125 | for juicer in juicers: 126 | args = re.search(juicer, source, re.DOTALL) 127 | if args: 128 | a = args.groups() 129 | if a[1] == "[]": 130 | a = list(a) 131 | a[1] = 62 132 | a = tuple(a) 133 | try: 134 | return a[0], a[3].split('|'), int(a[1]), int(a[2]) 135 | except ValueError: 136 | raise UnpackingError('Corrupted p.a.c.k.e.r. data.') 137 | 138 | # could not find a satisfying regex 139 | raise UnpackingError('Could not make sense of p.a.c.k.e.r data (unexpected code structure)') 140 | 141 | def _replacestrings(self, source): 142 | """Strip string lookup table (list) and replace values in source.""" 143 | match = re.search(r'var *(_\w+)\=\["(.*?)"\];', source, re.DOTALL) 144 | 145 | if match: 146 | varname, strings = match.groups() 147 | startpoint = len(match.group(0)) 148 | lookup = strings.split('","') 149 | variable = '%s[%%d]' % varname 150 | for index, value in enumerate(lookup): 151 | source = source.replace(variable % index, '"%s"' % value) 152 | return source[startpoint:] 153 | return self.beginstr + source + self.endstr 154 | 155 | 156 | class Unbaser(object): 157 | """Functor for a given base. Will efficiently convert 158 | strings to natural numbers.""" 159 | ALPHABET = { 160 | 62: '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', 161 | 95: (' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ' 162 | '[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~') 163 | } 164 | 165 | def __init__(self, base): 166 | self.base = base 167 | # fill elements 37...61, if necessary 168 | if 36 < base < 62: 169 | if not hasattr(self.ALPHABET, 170 | self.ALPHABET[62][:base]): 171 | self.ALPHABET[base] = self.ALPHABET[62][:base] 172 | # attrs = self.ALPHABET 173 | # print ', '.join("%s: %s" % item for item in attrs.items()) 174 | # If base can be handled by int() builtin, let it do it for us 175 | if 2 <= base <= 36: 176 | self.unbase = lambda s: int(s, base) 177 | else: 178 | # Build conversion dictionary cache 179 | try: 180 | self.dictionary = dict( 181 | (cipher, index) for index, cipher in enumerate(self.ALPHABET[base])) 182 | except KeyError: 183 | raise TypeError('Unsupported base encoding.') 184 | self.unbase = self._dictunbaser 185 | 186 | def __call__(self, s): 187 | return self.unbase(s) 188 | 189 | def _dictunbaser(self, s): 190 | """Decodes a value to an integer.""" 191 | ret = 0 192 | for index, cipher in enumerate(s[::-1]): 193 | ret += (self.base ** index) * self.dictionary[cipher] 194 | return ret 195 | 196 | 197 | def unpack_packer(text): 198 | """unpack p.a.c.k.e.r""" 199 | packer = Packer() 200 | packer_list = unpack_packer_re.findall(text) 201 | if packer_list: 202 | for data in packer_list: 203 | if packer.detect(data): 204 | try: 205 | unpacked = packer.unpack(data).replace('\\', '') 206 | text = text.replace(data, unpacked) 207 | except UnpackingError: 208 | pass 209 | return text 210 | 211 | 212 | def unpack_obfuscatorhtml(text): 213 | """ 214 | Unpacker for Obfuscator HTML https://github.com/BlueEyesHF/Obfuscator-HTML 215 | """ 216 | while True: 217 | m = obfuscatorhtml_re.search(text) 218 | if m: 219 | unpacked = "" 220 | chunks = obfuscatorhtml_chunk_re.findall(m.group('chunks')) 221 | minus = int(m.group('minus')) 222 | for chunk in chunks: 223 | int_chunk = int(re.sub(r'\D', '', str(base64.b64decode(chunk)))) 224 | unpacked += chr(int_chunk - int(minus)) 225 | text = text.replace(m.group(0), unpacked) 226 | else: 227 | break 228 | return text 229 | 230 | 231 | def unpack_unescape(text): 232 | while True: 233 | m = unpack_unescape_re.search(text) 234 | if m: 235 | text = text.replace(m.group(0), unquote(m.group(1))) 236 | else: 237 | break 238 | return text 239 | 240 | 241 | def unpack_source_url(text, _unpack_source_url_re): 242 | while True: 243 | m1 = _unpack_source_url_re.search(text) 244 | if m1: 245 | try: 246 | atob = base64.b64decode(m1.group("atob")).decode("utf-8") 247 | except Exception: 248 | atob = 'INVALID unpack_source_url' 249 | 250 | try: 251 | atob = "{q}{atob}{q}".format(q=m1.group("q"), atob=atob) 252 | text = text.replace(m1.group("replace"), atob) 253 | except Exception: 254 | pass 255 | else: 256 | break 257 | return text 258 | 259 | 260 | def unpack_u_m3u8(text): 261 | def _unicode_escape(s): 262 | unicode_escape = codecs.getdecoder('unicode_escape') 263 | return re.sub(r'\\u[0-9a-fA-F]{4}', lambda m: unicode_escape(m.group(0))[0], s) 264 | 265 | while True: 266 | m = unpack_u_m3u8_re.search(text) 267 | if m: 268 | text = text.replace(m.group(0), _unicode_escape(m.group(0))) 269 | else: 270 | break 271 | return text 272 | 273 | 274 | def unpack(text): 275 | """ unpack html source code """ 276 | text = unpack_packer(text) 277 | text = unpack_obfuscatorhtml(text) 278 | text = unpack_unescape(text) 279 | text = unpack_source_url(text, unpack_source_url_re_1) 280 | text = unpack_source_url(text, unpack_source_url_re_2) 281 | text = unpack_source_url(text, unpack_source_url_re_3) 282 | text = unpack_u_m3u8(text) 283 | return text 284 | 285 | 286 | class GenericCache(object): 287 | '''GenericCache is useded as a temporary session cache 288 | - GenericCache.blacklist_path 289 | - GenericCache.cache_url_list 290 | - GenericCache.whitelist_path 291 | ''' 292 | pass 293 | 294 | 295 | class Generic(Plugin): 296 | pattern_re = re.compile(r'((?:generic|resolve)://)?(?P.+)') 297 | 298 | # iframes 299 | _iframe_re = re.compile(r'''(?isx) 300 | [^"'\s<>]+)\s?["'] 303 | [^<>]*?> 304 | ''') 305 | # playlists 306 | _playlist_re = re.compile(r'''(?sx) 307 | (?:["']|=|")(?P 308 | (?\s\;{}]+\.(?:m3u8|mp3|mp4|mpd) 311 | (?:\?[^"'<>\s\\{}]+)?)/? 312 | (?:\\?["']|(?|\\") 313 | ''') 314 | # mp3 and mp4 files 315 | _httpstream_bitrate_re = re.compile(r'''(?x) 316 | (?:_|\.|/|-) 317 | (?: 318 | (?P\d{1,4})(?:k)? 319 | | 320 | (?P\d{1,4}p) 321 | (?:\.h26(?:4|5))? 322 | ) 323 | \.mp(?:3|4) 324 | ''') 325 | _httpstream_common_resolution_list = [ 326 | '2160', '1440', '1080', '720', '576', '480', '360', '240', 327 | ] 328 | # javascript redirection 329 | _window_location_re = re.compile(r'''(?sx) 330 | [^"']+)["'];[^<>]+ 332 | ''') 333 | # obviously ad paths 334 | _ads_path_re = re.compile(r'''(?x) 335 | /ads?/?(?:\w+)? 336 | (?:\d+x\d+)? 337 | (?:_\w+)?\.(?:html?|php)$ 338 | ''') 339 | 340 | # START - _make_url_list 341 | # Not allowed at the end of the parsed url path 342 | blacklist_endswith = ( 343 | '.gif', 344 | '.jpg', 345 | '.png', 346 | '.svg', 347 | '.vtt', 348 | '/chat.html', 349 | '/chat', 350 | '/novideo.mp4', 351 | '/vidthumb.mp4', 352 | '/ads-iframe-display.php', 353 | ) 354 | # Not allowed at the end of the parsed url netloc 355 | blacklist_netloc = ( 356 | '127.0.0.1', 357 | 'a.adtng.com', 358 | 'about:blank', 359 | 'abv.bg', 360 | 'adfox.ru', 361 | 'cbox.ws', 362 | 'googletagmanager.com', 363 | 'javascript:false', 364 | 'accounts.google.com', 365 | ) 366 | # END - _make_url_list 367 | 368 | arguments = PluginArguments( 369 | PluginArgument( 370 | 'playlist-max', 371 | metavar='NUMBER', 372 | type=num(int, min=0, max=25), 373 | default=5, 374 | help=''' 375 | Number of how many playlist URLs of the same type 376 | are allowed to be resolved with this plugin. 377 | 378 | Default is 5 379 | ''' 380 | ), 381 | PluginArgument( 382 | 'playlist-referer', 383 | metavar='URL', 384 | help=''' 385 | Set a custom referer URL for the playlist URLs. 386 | 387 | This only affects playlist URLs of this plugin. 388 | 389 | Default is the URL of the last website. 390 | ''' 391 | ), 392 | PluginArgument( 393 | 'blacklist-netloc', 394 | metavar='NETLOC', 395 | type=comma_list, 396 | help=''' 397 | Blacklist domains that should not be used, 398 | by using a comma-separated list: 399 | 400 | 'example.com,localhost,google.com' 401 | 402 | Useful for websites with a lot of iframes. 403 | ''' 404 | ), 405 | PluginArgument( 406 | 'blacklist-path', 407 | metavar='PATH', 408 | type=comma_list, 409 | help=''' 410 | Blacklist the path of a domain that should not be used, 411 | by using a comma-separated list: 412 | 413 | 'example.com/mypath,localhost/example,google.com/folder' 414 | 415 | Useful for websites with different iframes of the same domain. 416 | ''' 417 | ), 418 | PluginArgument( 419 | 'blacklist-filepath', 420 | metavar='FILEPATH', 421 | type=comma_list, 422 | help=''' 423 | Blacklist file names for iframes and playlists 424 | by using a comma-separated list: 425 | 426 | 'index.html,ignore.m3u8,/ad/master.m3u8' 427 | 428 | Sometimes there are invalid URLs in the result list, 429 | this can be used to remove them. 430 | ''' 431 | ), 432 | PluginArgument( 433 | 'whitelist-netloc', 434 | metavar='NETLOC', 435 | type=comma_list, 436 | help=''' 437 | Whitelist domains that should only be searched for iframes, 438 | by using a comma-separated list: 439 | 440 | 'example.com,localhost,google.com' 441 | 442 | Useful for websites with lots of iframes, 443 | where the main iframe always has the same hosting domain. 444 | ''' 445 | ), 446 | PluginArgument( 447 | 'whitelist-path', 448 | metavar='PATH', 449 | type=comma_list, 450 | help=''' 451 | Whitelist the path of a domain that should only be searched 452 | for iframes, by using a comma-separated list: 453 | 454 | 'example.com/mypath,localhost/example,google.com/folder' 455 | 456 | Useful for websites with different iframes of the same domain, 457 | where the main iframe always has the same path. 458 | ''' 459 | ), 460 | PluginArgument( 461 | 'ignore-same-url', 462 | action='store_true', 463 | help=''' 464 | Do not remove URLs from the valid list if they were already used. 465 | 466 | Sometimes needed as a workaround for --player-external-http issues. 467 | 468 | Be careful this might result in an infinity loop. 469 | ''' 470 | ), 471 | PluginArgument( 472 | 'ytdl-disable', 473 | action='store_true', 474 | help=''' 475 | Disable youtube-dl fallback. 476 | ''' 477 | ), 478 | PluginArgument( 479 | 'ytdl-only', 480 | action='store_true', 481 | help=''' 482 | Disable generic plugin and use only youtube-dl. 483 | ''' 484 | ), 485 | PluginArgument( 486 | 'debug', 487 | action='store_true', 488 | help=''' 489 | Developer Command! 490 | 491 | Saves unpacked HTML code of all opened URLs to the local hard drive for easier debugging. 492 | ''' 493 | ), 494 | ) 495 | 496 | def __init__(self, url): 497 | super(Generic, self).__init__(url) 498 | self.url = update_scheme( 499 | 'http://', self.pattern_re.match(self.url).group('url')) 500 | 501 | self.html_text = '' 502 | self.title = None 503 | 504 | # START - cache every used url and set a referer 505 | if hasattr(GenericCache, 'cache_url_list'): 506 | GenericCache.cache_url_list += [self.url] 507 | # set the last url as a referer 508 | self.referer = GenericCache.cache_url_list[-2] 509 | else: 510 | GenericCache.cache_url_list = [self.url] 511 | self.referer = self.url 512 | self.session.http.headers.update({'Referer': self.referer}) 513 | # END 514 | 515 | # START - how often _get_streams already run 516 | self._run = len(GenericCache.cache_url_list) 517 | # END 518 | 519 | @classmethod 520 | def priority(cls, url): 521 | m = cls.pattern_re.match(url) 522 | if m: 523 | prefix, url = cls.pattern_re.match(url).groups() 524 | if prefix is not None: 525 | return HIGH_PRIORITY 526 | return NO_PRIORITY 527 | 528 | @classmethod 529 | def can_handle_url(cls, url): 530 | m = cls.pattern_re.match(url) 531 | if m: 532 | return m.group('url') is not None 533 | 534 | def compare_url_path(self, parsed_url, check_list, 535 | path_status='startswith'): 536 | status = False 537 | for netloc, path in check_list: 538 | if path_status == '==': 539 | if (parsed_url.netloc.endswith(netloc) and parsed_url.path == path): 540 | status = True 541 | break 542 | elif path_status == 'startswith': 543 | if (parsed_url.netloc.endswith(netloc) and parsed_url.path.startswith(path)): 544 | status = True 545 | break 546 | 547 | return status 548 | 549 | def merge_path_list(self, static, user): 550 | for _path_url in user: 551 | if not _path_url.startswith(('http', '//')): 552 | _path_url = update_scheme('http://', _path_url) 553 | _parsed_path_url = urlparse(_path_url) 554 | if _parsed_path_url.netloc and _parsed_path_url.path: 555 | static += [(_parsed_path_url.netloc, _parsed_path_url.path)] 556 | return static 557 | 558 | def repair_url(self, url, base_url, stream_base=''): 559 | # remove \ 560 | new_url = url.replace('\\', '') 561 | # repairs broken scheme 562 | if new_url.startswith('http://'): 563 | new_url = 'http:' + new_url[9:] 564 | elif new_url.startswith('https://'): 565 | new_url = 'https:' + new_url[10:] 566 | new_url = unquote(new_url) 567 | # creates a valid url from path only urls 568 | # and adds missing scheme for // urls 569 | if stream_base and new_url[1] != '/': 570 | if new_url[0] == '/': 571 | new_url = new_url[1:] 572 | new_url = urljoin(stream_base, new_url) 573 | else: 574 | new_url = urljoin(base_url, new_url) 575 | return new_url 576 | 577 | def _make_url_list(self, old_list, base_url, url_type=''): 578 | # START - List for not allowed URL Paths 579 | # --generic-blacklist-path 580 | if not hasattr(GenericCache, 'blacklist_path'): 581 | 582 | # static list 583 | blacklist_path = [ 584 | ('bigo.tv', '/show.mp4'), 585 | ('expressen.se', '/_livetvpreview/'), 586 | ('facebook.com', '/connect'), 587 | ('facebook.com', '/plugins'), 588 | ('google.com', '/recaptcha/'), 589 | ('haber7.com', '/radyohome/station-widget/'), 590 | ('static.tvr.by', '/upload/video/atn/promo'), 591 | ('twitter.com', '/widgets'), 592 | ('vesti.ru', '/native_widget.html'), 593 | ('www.blogger.com', '/static'), 594 | ('youtube.com', '/['), 595 | ] 596 | 597 | # merge user and static list 598 | blacklist_path_user = self.get_option('blacklist_path') 599 | if blacklist_path_user is not None: 600 | blacklist_path = self.merge_path_list( 601 | blacklist_path, blacklist_path_user) 602 | 603 | GenericCache.blacklist_path = blacklist_path 604 | # END 605 | 606 | blacklist_path_same = [ 607 | ('player.vimeo.com', '/video/'), 608 | ('youtube.com', '/embed/'), 609 | ] 610 | 611 | # START - List of only allowed URL Paths for Iframes 612 | # --generic-whitelist-path 613 | if not hasattr(GenericCache, 'whitelist_path'): 614 | whitelist_path = [] 615 | whitelist_path_user = self.get_option('whitelist_path') 616 | if whitelist_path_user is not None: 617 | whitelist_path = self.merge_path_list( 618 | [], whitelist_path_user) 619 | GenericCache.whitelist_path = whitelist_path 620 | # END 621 | 622 | allow_same_url = (self.get_option('ignore_same_url')) 623 | 624 | new_list = [] 625 | for url in old_list: 626 | new_url = self.repair_url(url, base_url) 627 | # parse the url 628 | parse_new_url = urlparse(new_url) 629 | 630 | # START 631 | REMOVE = False 632 | if new_url in GenericCache.cache_url_list and not allow_same_url: 633 | # Removes an already used url 634 | # ignored if --hls-session-reload is used 635 | REMOVE = 'SAME-URL' 636 | elif (not parse_new_url.scheme.startswith(('http'))): 637 | # Allow only an url with a valid scheme 638 | REMOVE = 'SCHEME' 639 | elif (url_type == 'iframe' 640 | and self.get_option('whitelist_netloc') 641 | and parse_new_url.netloc.endswith(tuple(self.get_option('whitelist_netloc'))) is False): 642 | # Allow only whitelisted domains for iFrames 643 | # --generic-whitelist-netloc 644 | REMOVE = 'WL-netloc' 645 | elif (url_type == 'iframe' 646 | and GenericCache.whitelist_path 647 | and self.compare_url_path(parse_new_url, GenericCache.whitelist_path) is False): 648 | # Allow only whitelisted paths from a domain for iFrames 649 | # --generic-whitelist-path 650 | REMOVE = 'WL-path' 651 | elif (parse_new_url.netloc.endswith(self.blacklist_netloc)): 652 | # Removes blacklisted domains from a static list 653 | # self.blacklist_netloc 654 | REMOVE = 'BL-static' 655 | elif (self.get_option('blacklist_netloc') 656 | and parse_new_url.netloc.endswith(tuple(self.get_option('blacklist_netloc')))): 657 | # Removes blacklisted domains 658 | # --generic-blacklist-netloc 659 | REMOVE = 'BL-netloc' 660 | elif (self.compare_url_path(parse_new_url, GenericCache.blacklist_path) is True): 661 | # Removes blacklisted paths from a domain 662 | # --generic-blacklist-path 663 | REMOVE = 'BL-path' 664 | elif (parse_new_url.path.endswith(self.blacklist_endswith)): 665 | # Removes unwanted endswith images and chatrooms 666 | REMOVE = 'BL-ew' 667 | elif (self.get_option('blacklist_filepath') 668 | and parse_new_url.path.endswith(tuple(self.get_option('blacklist_filepath')))): 669 | # Removes blacklisted file paths 670 | # --generic-blacklist-filepath 671 | REMOVE = 'BL-filepath' 672 | elif (self._ads_path_re.search(parse_new_url.path) or parse_new_url.netloc.startswith(('ads.'))): 673 | # Removes obviously AD URL 674 | REMOVE = 'ADS' 675 | elif (self.compare_url_path(parse_new_url, blacklist_path_same, path_status='==') is True): 676 | # Removes blacklisted same paths from a domain 677 | REMOVE = 'BL-path-same' 678 | elif parse_new_url.netloc == 'cdn.embedly.com' and parse_new_url.path == '/widgets/media.html': 679 | # do not use the direct URL for 'cdn.embedly.com', search the query for a new URL 680 | params = dict(parse_qsl(parse_new_url.query)) 681 | embedly_new_url = params.get('url') or params.get('src') 682 | if embedly_new_url: 683 | new_list += [embedly_new_url] 684 | else: 685 | log.error('Missing params URL or SRC for {0}'.format(new_url)) 686 | continue 687 | else: 688 | # valid URL 689 | new_list += [new_url] 690 | continue 691 | 692 | log.debug('{0} - Removed: {1}'.format(REMOVE, new_url)) 693 | # END 694 | 695 | # Remove duplicates 696 | log.debug('List length: {0} (with duplicates)'.format(len(new_list))) 697 | new_list = sorted(list(set(new_list))) 698 | return new_list 699 | 700 | def _window_location(self): 701 | match = self._window_location_re.search(self.html_text) 702 | if match: 703 | temp_url = urljoin(self.url, match.group('url')) 704 | if temp_url not in GenericCache.cache_url_list: 705 | log.debug('Found window_location: {0}'.format(temp_url)) 706 | return temp_url 707 | 708 | log.trace('No window_location') 709 | return False 710 | 711 | def _resolve_playlist(self, playlist_all): 712 | playlist_referer = self.get_option('playlist_referer') or self.url 713 | self.session.http.headers.update({'Referer': playlist_referer}) 714 | 715 | playlist_max = self.get_option('playlist_max') or 5 716 | count_playlist = { 717 | 'dash': 0, 718 | 'hls': 0, 719 | 'http': 0, 720 | } 721 | 722 | o = urlparse(self.url) 723 | origin_tuple = ( 724 | '.cloudfront.net', 725 | '.metube.id', 726 | ) 727 | 728 | for url in playlist_all: 729 | parsed_url = urlparse(url) 730 | if parsed_url.netloc.endswith(origin_tuple): 731 | self.session.http.headers.update({ 732 | 'Origin': '{0}://{1}'.format(o.scheme, o.netloc), 733 | }) 734 | 735 | if (parsed_url.path.endswith(('.m3u8')) 736 | or parsed_url.query.endswith(('.m3u8'))): 737 | if count_playlist['hls'] >= playlist_max: 738 | log.debug('Skip - {0}'.format(url)) 739 | continue 740 | try: 741 | streams = HLSStream.parse_variant_playlist(self.session, url).items() 742 | if not streams: 743 | yield 'live', HLSStream(self.session, url) 744 | for s in streams: 745 | yield s 746 | log.debug('HLS URL - {0}'.format(url)) 747 | count_playlist['hls'] += 1 748 | except Exception as e: 749 | log.error('Skip HLS with error {0}'.format(str(e))) 750 | elif (parsed_url.path.endswith(('.mp3', '.mp4')) 751 | or parsed_url.query.endswith(('.mp3', '.mp4'))): 752 | if count_playlist['http'] >= playlist_max: 753 | log.debug('Skip - {0}'.format(url)) 754 | continue 755 | try: 756 | name = 'vod' 757 | m = self._httpstream_bitrate_re.search(url) 758 | if m: 759 | bitrate = m.group('bitrate') 760 | resolution = m.group('resolution') 761 | if bitrate: 762 | if bitrate in self._httpstream_common_resolution_list: 763 | name = '{0}p'.format(m.group('bitrate')) 764 | else: 765 | name = '{0}k'.format(m.group('bitrate')) 766 | elif resolution: 767 | name = resolution 768 | yield name, HTTPStream(self.session, url) 769 | log.debug('HTTP URL - {0}'.format(url)) 770 | count_playlist['http'] += 1 771 | except Exception as e: 772 | log.error('Skip HTTP with error {0}'.format(str(e))) 773 | elif (parsed_url.path.endswith(('.mpd')) 774 | or parsed_url.query.endswith(('.mpd'))): 775 | if count_playlist['dash'] >= playlist_max: 776 | log.debug('Skip - {0}'.format(url)) 777 | continue 778 | try: 779 | for s in DASHStream.parse_manifest(self.session, 780 | url).items(): 781 | yield s 782 | log.debug('DASH URL - {0}'.format(url)) 783 | count_playlist['dash'] += 1 784 | except Exception as e: 785 | log.error('Skip DASH with error {0}'.format(str(e))) 786 | else: 787 | log.error('parsed URL - {0}'.format(url)) 788 | 789 | def _res_text(self, url): 790 | try: 791 | res = self.session.http.get(url, allow_redirects=True) 792 | except Exception as e: 793 | if 'Received response with content-encoding: gzip' in str(e): 794 | headers = { 795 | 'User-Agent': useragents.FIREFOX, 796 | 'Accept-Encoding': 'deflate' 797 | } 798 | res = self.session.http.get(url, headers=headers, allow_redirects=True) 799 | elif '403 Client Error' in str(e): 800 | log.error('Website Access Denied/Forbidden, you might be geo-' 801 | 'blocked or other params are missing.') 802 | raise NoStreamsError(self.url) 803 | elif '404 Client Error' in str(e): 804 | log.error('Website was not found, the link is broken or dead.') 805 | raise NoStreamsError(self.url) 806 | else: 807 | raise e 808 | 809 | if res.history: 810 | for resp in res.history: 811 | log.debug('Redirect: {0} - {1}'.format(resp.status_code, resp.url)) 812 | log.debug('URL: {0}'.format(res.url)) 813 | return res.text 814 | 815 | def settings_url(self): 816 | o = urlparse(self.url) 817 | 818 | # SSL Verification - http.verify 819 | http_verify = [ 820 | '.cdn.bg', 821 | 'sportal.bg', 822 | ] 823 | if (o.netloc.endswith(tuple(http_verify)) and self.session.http.verify): 824 | self.session.http.verify = False 825 | log.warning('SSL Verification disabled.') 826 | 827 | # http://www.latina.pe/tvenvivo 828 | if (o.netloc.endswith('latina.pe') and o.path.startswith('/tvenvivo')): 829 | self.session.http.get(self.url) 830 | 831 | def get_title(self): 832 | if self.title is None: 833 | if not self.html_text: 834 | self.html_text = self._res_text(self.url) 835 | _og_title_re = re.compile(r'[^<>]+)"\s*/?>') 836 | _title_re = re.compile(r']*>(?P[^<>]+)') 837 | m = _og_title_re.search(self.html_text) or _title_re.search(self.html_text) 838 | if m: 839 | self.title = re.sub(r'[\s]+', ' ', m.group('title')) 840 | self.title = re.sub(r'^\s*|\s*$', '', self.title) 841 | self.title = html_unescape(self.title) 842 | if self.title is None: 843 | # fallback if there is no 844 | self.title = self.url 845 | return self.title 846 | 847 | def ytdl_fallback(self): 848 | '''Basic support for m3u8 URLs with youtube-dl''' 849 | log.debug('Fallback youtube-dl') 850 | 851 | class YTDL_Logger(object): 852 | def debug(self, msg): 853 | log.debug(msg) 854 | 855 | def warning(self, msg): 856 | log.warning(msg) 857 | 858 | def error(self, msg): 859 | log.trace(msg) 860 | 861 | ydl_opts = { 862 | 'call_home': False, 863 | 'forcejson': True, 864 | 'logger': YTDL_Logger(), 865 | 'no_color': True, 866 | 'noplaylist': True, 867 | 'no_warnings': True, 868 | 'verbose': False, 869 | 'quiet': True, 870 | } 871 | 872 | with youtube_dl.YoutubeDL(ydl_opts) as ydl: 873 | try: 874 | info = ydl.extract_info(self.url, download=False) 875 | except Exception: 876 | return 877 | 878 | if not info or not info.get('formats'): 879 | return 880 | 881 | self.title = info['title'] 882 | 883 | streams = [] 884 | for stream in info['formats']: 885 | if stream['protocol'] in ['m3u8', 'm3u8_native'] and stream['ext'] == 'mp4': 886 | log.trace('{0!r}'.format(stream)) 887 | name = stream.get('height') or stream.get('width') 888 | if name: 889 | name = '{0}p'.format(name) 890 | streams.append((name, HLSStream(self.session, 891 | stream['url'], 892 | headers=stream['http_headers']))) 893 | 894 | if not streams: 895 | if ('youtube.com' in self.url 896 | and info.get('requested_formats') 897 | and len(info.get('requested_formats')) == 2 898 | and MuxedStream.is_usable(self.session)): 899 | audio_url = audio_format = video_url = video_format = video_name = None 900 | for stream in info.get('requested_formats'): 901 | if not stream.get('height'): 902 | audio_url = stream.get('url') 903 | audio_format = stream.get('format_id') 904 | if stream.get('height'): 905 | video_url = stream.get('url') 906 | video_format = stream.get('format_id') 907 | video_name = '{0}p'.format(stream.get('height')) 908 | 909 | log.debug('MuxedStream: v {video} a {audio} = {name}'.format( 910 | audio=audio_format, 911 | name=video_name, 912 | video=video_format, 913 | )) 914 | streams.append((video_name, 915 | MuxedStream(self.session, 916 | HTTPStream(self.session, video_url, headers=stream['http_headers']), 917 | HTTPStream(self.session, audio_url, headers=stream['http_headers'])) 918 | )) 919 | return streams 920 | 921 | def _get_streams(self): 922 | if HAS_YTDL and not self.get_option('ytdl-disable') and self.get_option('ytdl-only'): 923 | ___streams = self.ytdl_fallback() 924 | if ___streams and len(___streams) >= 1: 925 | return (s for s in ___streams) 926 | if self.get_option('ytdl-only'): 927 | return 928 | 929 | self.settings_url() 930 | 931 | if self._run <= 1: 932 | log.info('Version {0} - https://github.com/back-to/generic'.format(GENERIC_VERSION)) 933 | log.debug('User-Agent: {0}'.format(self.session.http.headers['User-Agent'])) 934 | 935 | new_url = False 936 | 937 | log.info(' {0}. URL={1}'.format(self._run, self.url)) 938 | 939 | # GET website content 940 | self.html_text = self._res_text(self.url) 941 | # unpack common javascript codes 942 | self.html_text = unpack(self.html_text) 943 | 944 | if self.get_option('debug'): 945 | _valid_filepath = re.sub(r'(?u)[^-\w.]', '', str(self.url).strip().replace(' ', '_')) 946 | _new_file = os.path.join(Path().absolute(), 947 | f'{self._run}_{_valid_filepath}.html') 948 | log.warning(f'NEW DEBUG FILE! {_new_file}') 949 | try: 950 | with open(_new_file, 'w+') as f: 951 | f.write(str(self.html_text)) 952 | except OSError: 953 | pass 954 | 955 | # Playlist URL 956 | playlist_all = self._playlist_re.findall(self.html_text) 957 | if playlist_all: 958 | log.debug('Found Playlists: {0}'.format(len(playlist_all))) 959 | playlist_list = self._make_url_list(playlist_all, 960 | self.url, 961 | url_type='playlist', 962 | ) 963 | if playlist_list: 964 | log.info('Found Playlists: {0} (valid)'.format( 965 | len(playlist_list))) 966 | return self._resolve_playlist(playlist_list) 967 | else: 968 | log.trace('No Playlists') 969 | 970 | # iFrame URL 971 | iframe_list = self._iframe_re.findall(self.html_text) 972 | if iframe_list: 973 | log.debug('Found Iframes: {0}'.format(len(iframe_list))) 974 | # repair and filter iframe url list 975 | new_iframe_list = self._make_url_list(iframe_list, 976 | self.url, 977 | url_type='iframe') 978 | if new_iframe_list: 979 | number_iframes = len(new_iframe_list) 980 | if number_iframes == 1: 981 | new_url = new_iframe_list[0] 982 | else: 983 | log.info('--- IFRAMES ---') 984 | for i, item in enumerate(new_iframe_list, start=1): 985 | log.info('{0} - {1}'.format(i, item)) 986 | log.info('--- IFRAMES ---') 987 | 988 | try: 989 | number = int(self.input_ask( 990 | 'Choose an iframe number from above').split(' ')[0]) 991 | new_url = new_iframe_list[number - 1] 992 | except FatalPluginError: 993 | new_url = new_iframe_list[0] 994 | except ValueError: 995 | log.error('invalid input answer') 996 | except (IndexError, TypeError): 997 | log.error('invalid input number') 998 | 999 | if not new_url: 1000 | new_url = new_iframe_list[0] 1001 | else: 1002 | log.trace('No iframes') 1003 | 1004 | if not new_url: 1005 | # search for window.location.href 1006 | new_url = self._window_location() 1007 | 1008 | if new_url: 1009 | # the Dailymotion Plugin does not work with this Referer 1010 | if 'dailymotion.com' in new_url: 1011 | del self.session.http.headers['Referer'] 1012 | 1013 | return self.session.streams(new_url) 1014 | 1015 | if HAS_YTDL and not self.get_option('ytdl-disable') and not self.get_option('ytdl-only'): 1016 | ___streams = self.ytdl_fallback() 1017 | if ___streams and len(___streams) >= 1: 1018 | return (s for s in ___streams) 1019 | 1020 | raise NoPluginError 1021 | 1022 | 1023 | __plugin__ = Generic 1024 | -------------------------------------------------------------------------------- /myfreecams.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import random 3 | import re 4 | import uuid 5 | 6 | from streamlink.compat import unquote 7 | from streamlink.exceptions import NoStreamsError, PluginError 8 | from streamlink.plugin import Plugin, PluginArgument, PluginArguments 9 | from streamlink.plugin.api import useragents, validate 10 | from streamlink.stream import DASHStream, HLSStream 11 | from streamlink.utils import parse_json 12 | 13 | from websocket import create_connection 14 | 15 | log = logging.getLogger(__name__) 16 | 17 | 18 | class MyFreeCams(Plugin): 19 | '''Streamlink Plugin for MyFreeCams 20 | 21 | UserName 22 | - https://m.myfreecams.com/models/UserName 23 | - https://myfreecams.com/#UserName 24 | - https://profiles.myfreecams.com/UserName 25 | - https://www.myfreecams.com/#UserName 26 | - https://www.myfreecams.com/UserName 27 | User ID with php fallback 28 | - https://myfreecams.com/?id=10101010 29 | ''' 30 | 31 | JS_SERVER_URL = 'https://www.myfreecams.com/_js/serverconfig.js' 32 | PHP_URL = 'https://www.myfreecams.com/php/FcwExtResp.php?respkey={respkey}&type={type}&opts={opts}&serv={serv}' 33 | 34 | _url_re = re.compile(r'''https?://(?:\w+\.)?myfreecams\.com/ 35 | (?: 36 | (?:models/)?\#?(?P<username>\w+) 37 | | 38 | \?id=(?P<user_id>\d+) 39 | )''', re.VERBOSE) 40 | _dict_re = re.compile(r'''(?P<data>{.*})''') 41 | _socket_re = re.compile(r'''(\w+) (\w+) (\w+) (\w+) (\w+)''') 42 | 43 | _data_schema = validate.Schema( 44 | { 45 | 'nm': validate.text, 46 | 'sid': int, 47 | 'uid': int, 48 | 'vs': int, 49 | validate.optional('u'): { 50 | 'camserv': int 51 | } 52 | } 53 | ) 54 | 55 | arguments = PluginArguments( 56 | PluginArgument( 57 | 'dash', 58 | action='store_true', 59 | default=False, 60 | help=''' 61 | Use DASH streams as an alternative source. 62 | 63 | %(prog)s --myfreecams-dash <url> [stream] 64 | 65 | ''' 66 | ) 67 | ) 68 | 69 | @classmethod 70 | def can_handle_url(cls, url): 71 | return cls._url_re.match(url) is not None 72 | 73 | def _php_fallback(self, username, user_id, php_message): 74 | '''Use the php website as a fallback when 75 | - UserId was used 76 | - Username failed for WebSocket 77 | - VS = 90 and no camserver 78 | 79 | Args: 80 | username: Model Username 81 | user_id: Model UserID 82 | php_message: data from self._websocket_data 83 | Returns: 84 | message: data to create a video url. 85 | ''' 86 | log.debug('Attempting to use php fallback') 87 | php_data = self._dict_re.search(php_message) 88 | if php_data is None: 89 | raise NoStreamsError(self.url) 90 | 91 | php_data = parse_json(php_data.group('data')) 92 | php_url = self.PHP_URL.format( 93 | opts=php_data['opts'], 94 | respkey=php_data['respkey'], 95 | serv=php_data['serv'], 96 | type=php_data['type'] 97 | ) 98 | php_params = { 99 | 'cid': 3149, 100 | 'gw': 1 101 | } 102 | res = self.session.http.get(php_url, params=php_params) 103 | 104 | if username: 105 | _username_php_re = str(username) 106 | _uid_php_re = r'''\d+''' 107 | elif user_id: 108 | _username_php_re = r'''[^"']+''' 109 | _uid_php_re = str(user_id) 110 | else: 111 | raise NoStreamsError(self.url) 112 | 113 | _data_php_re = re.compile( 114 | r'''\[["'](?P<username>{0})["'],(?P<sid>\d+),'''.format(_username_php_re) 115 | + r'''(?P<uid>{0}),(?P<vs>\d+),[^,]+,[^,]+,(?P<camserv>\d+)[^\]]+\]'''.format(_uid_php_re)) 116 | 117 | match = _data_php_re.search(res.text) 118 | if match is None: 119 | raise NoStreamsError(self.url) 120 | 121 | data = { 122 | 'nm': str(match.group('username')), 123 | 'sid': int(match.group('sid')), 124 | 'uid': int(match.group('uid')), 125 | 'vs': int(match.group('vs')), 126 | 'u': { 127 | 'camserv': int(match.group('camserv')) 128 | } 129 | } 130 | return data 131 | 132 | def _websocket_data(self, username, chat_servers): 133 | '''Get data from the websocket. 134 | 135 | Args: 136 | username: Model Username 137 | chat_servers: servername from self._get_servers 138 | 139 | Returns: 140 | message: data to create a video url. 141 | php_message: data for self._php_fallback 142 | ''' 143 | try_to_connect = 0 144 | while (try_to_connect < 5): 145 | try: 146 | xchat = str(random.choice(chat_servers)) 147 | host = 'wss://{0}.myfreecams.com/fcsl'.format(xchat) 148 | ws = create_connection(host) 149 | ws.send('hello fcserver\n\0') 150 | r_id = str(uuid.uuid4().hex[0:32]) 151 | ws.send('1 0 0 20071025 0 {0}@guest:guest\n'.format(r_id)) 152 | log.debug('Websocket server {0} connected'.format(xchat)) 153 | try_to_connect = 5 154 | except Exception: 155 | try_to_connect += 1 156 | log.debug('Failed to connect to WS server: {0} - try {1}'.format(xchat, try_to_connect)) 157 | if try_to_connect == 5: 158 | log.error('can\'t connect to the websocket') 159 | raise 160 | 161 | buff = '' 162 | php_message = '' 163 | ws_close = 0 164 | while ws_close == 0: 165 | socket_buffer = ws.recv() 166 | socket_buffer = buff + socket_buffer 167 | buff = '' 168 | while True: 169 | ws_answer = self._socket_re.search(socket_buffer) 170 | if bool(ws_answer) == 0: 171 | break 172 | 173 | FC = ws_answer.group(1) 174 | FCTYPE = int(FC[6:]) 175 | 176 | message_length = int(FC[0:6]) 177 | message = socket_buffer[6:6 + message_length] 178 | 179 | if len(message) < message_length: 180 | buff = ''.join(socket_buffer) 181 | break 182 | 183 | message = unquote(message) 184 | 185 | if FCTYPE == 1 and username: 186 | ws.send('10 0 0 20 0 {0}\n'.format(username)) 187 | elif FCTYPE == 81: 188 | php_message = message 189 | if username is None: 190 | ws_close = 1 191 | elif FCTYPE == 10: 192 | ws_close = 1 193 | 194 | socket_buffer = socket_buffer[6 + message_length:] 195 | 196 | if len(socket_buffer) == 0: 197 | break 198 | 199 | ws.send('99 0 0 0 0') 200 | ws.close() 201 | return message, php_message 202 | 203 | def _get_servers(self): 204 | res = self.session.http.get(self.JS_SERVER_URL) 205 | servers = parse_json(res.text) 206 | return servers 207 | 208 | def _get_camserver(self, servers, key): 209 | server_type = None 210 | value = None 211 | 212 | h5video_servers = servers['h5video_servers'] 213 | ngvideo_servers = servers['ngvideo_servers'] 214 | wzobs_servers = servers['wzobs_servers'] 215 | 216 | if h5video_servers.get(str(key)): 217 | value = h5video_servers[str(key)] 218 | server_type = 'h5video_servers' 219 | elif wzobs_servers.get(str(key)): 220 | value = wzobs_servers[str(key)] 221 | server_type = 'wzobs_servers' 222 | elif ngvideo_servers.get(str(key)): 223 | value = ngvideo_servers[str(key)] 224 | server_type = 'ngvideo_servers' 225 | 226 | return value, server_type 227 | 228 | def _get_streams(self): 229 | self.session.http.headers.update({'User-Agent': useragents.FIREFOX}) 230 | log.debug('Version 2018-07-12') 231 | log.info('This is a custom plugin. ') 232 | match = self._url_re.match(self.url) 233 | username = match.group('username') 234 | user_id = match.group('user_id') 235 | 236 | servers = self._get_servers() 237 | chat_servers = servers['chat_servers'] 238 | 239 | message, php_message = self._websocket_data(username, chat_servers) 240 | 241 | if user_id and not username: 242 | data = self._php_fallback(username, user_id, php_message) 243 | else: 244 | log.debug('Attempting to use WebSocket data') 245 | data = self._dict_re.search(message) 246 | if data is None: 247 | raise NoStreamsError(self.url) 248 | data = parse_json(data.group('data'), schema=self._data_schema) 249 | 250 | vs = data['vs'] 251 | ok_vs = [0, 90] 252 | if vs not in ok_vs: 253 | if vs == 2: 254 | log.info('Model is currently away') 255 | elif vs == 12: 256 | log.info('Model is currently in a private show') 257 | elif vs == 13: 258 | log.info('Model is currently in a group show') 259 | elif vs == 127: 260 | log.info('Model is currently offline') 261 | else: 262 | log.error('Stream status: {0}'.format(vs)) 263 | raise NoStreamsError(self.url) 264 | 265 | log.debug('VS: {0}'.format(vs)) 266 | 267 | nm = data['nm'] 268 | uid = data['uid'] 269 | uid_video = uid + 100000000 270 | camserver = data['u']['camserv'] 271 | 272 | server, server_type = self._get_camserver(servers, camserver) 273 | 274 | if server is None and not user_id: 275 | fallback_data = self._php_fallback(username, user_id, php_message) 276 | camserver = fallback_data['u']['camserv'] 277 | server, server_type = self._get_camserver(servers, camserver) 278 | 279 | log.info('Username: {0}'.format(nm)) 280 | log.info('User ID: {0}'.format(uid)) 281 | 282 | if not server: 283 | raise PluginError('Missing video server') 284 | 285 | log.debug('Video server: {0}'.format(server)) 286 | log.debug('Video server_type: {0}'.format(server_type)) 287 | 288 | if server_type == 'h5video_servers': 289 | DASH_VIDEO_URL = 'https://{0}.myfreecams.com/NxServer/ngrp:mfc_{1}.f4v_desktop/manifest.mpd'.format(server, uid_video) 290 | HLS_VIDEO_URL = 'https://{0}.myfreecams.com/NxServer/ngrp:mfc_{1}.f4v_mobile/playlist.m3u8'.format(server, uid_video) 291 | elif server_type == 'wzobs_servers': 292 | DASH_VIDEO_URL = '' 293 | HLS_VIDEO_URL = 'https://{0}.myfreecams.com/NxServer/ngrp:mfc_a_{1}.f4v_mobile/playlist.m3u8'.format(server, uid_video) 294 | elif server_type == 'ngvideo_servers': 295 | raise PluginError('ngvideo_servers are not supported.') 296 | else: 297 | raise PluginError('Unknow server type.') 298 | 299 | log.debug('HLS URL: {0}'.format(HLS_VIDEO_URL)) 300 | for s in HLSStream.parse_variant_playlist(self.session, 301 | HLS_VIDEO_URL).items(): 302 | yield s 303 | 304 | if DASH_VIDEO_URL and self.get_option('dash'): 305 | log.debug('DASH URL: {0}'.format(DASH_VIDEO_URL)) 306 | for s in DASHStream.parse_manifest(self.session, 307 | DASH_VIDEO_URL).items(): 308 | yield s 309 | 310 | 311 | __plugin__ = MyFreeCams 312 | -------------------------------------------------------------------------------- /showup.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import random 3 | import re 4 | import websocket 5 | 6 | from streamlink.plugin import Plugin 7 | from streamlink.plugin.api import validate, utils 8 | from streamlink.stream import RTMPStream 9 | 10 | SWF_URL = 'http://showup.tv/flash/suStreamer.swf' 11 | RANDOM_UID = '%032x' % random.getrandbits(128) 12 | JSON_UID = u'{"id":0,"value":["%s",""]}' 13 | JSON_CHANNEL = u'{"id":2,"value":["%s"]}' 14 | 15 | _url_re = re.compile(r'https?://(\w+.)?showup\.tv/(?P<channel>[A-Za-z0-9_-]+)') 16 | _websocket_url_re = re.compile(r'''socket\.connect\(["'](?P<ws>[^"']+)["']\)''') 17 | _schema = validate.Schema(validate.get('value')) 18 | 19 | log = logging.getLogger(__name__) 20 | 21 | 22 | class ShowUp(Plugin): 23 | @classmethod 24 | def can_handle_url(cls, url): 25 | return _url_re.match(url) 26 | 27 | def _get_stream_id(self, channel, ws_url): 28 | ws = websocket.WebSocket() 29 | ws.connect(ws_url) 30 | ws.send(JSON_UID % RANDOM_UID) 31 | ws.send(JSON_CHANNEL % channel) 32 | # STREAM_ID 33 | result = ws.recv() 34 | data = utils.parse_json(result, schema=_schema) 35 | log.debug('DATA 1 {0}'.format(data)) 36 | if 'failure' in data: 37 | ws.close() 38 | return False, False 39 | 40 | # RTMP CDN 41 | result_2 = ws.recv() 42 | data2 = utils.parse_json(result_2, schema=_schema) 43 | log.debug('DATA 2 {0}'.format(data2)) 44 | if 'failure' in data2: 45 | ws.close() 46 | return False, False 47 | 48 | # ERROR 49 | result_3 = ws.recv() 50 | data3 = utils.parse_json(result_3, schema=_schema) 51 | log.debug('DATA 3 {0}'.format(data3)) 52 | if 'failure' in data3: 53 | ws.close() 54 | return False, False 55 | 56 | return data[0], data2[1] 57 | 58 | def _get_websocket(self, html): 59 | ws_url = _websocket_url_re.search(html) 60 | if ws_url: 61 | ws_host = ws_url.group('ws') 62 | if ':' in ws_host: 63 | ws_host, ws_port = ws_host.split(':') 64 | return 'wss://%s' % ws_host 65 | 66 | def _get_streams(self): 67 | log.debug('Version 2018-08-19') 68 | log.info('This is a custom plugin.') 69 | url_match = _url_re.match(self.url) 70 | channel = url_match.group('channel') 71 | log.debug('Channel name: {0}'.format(channel)) 72 | self.session.http.parse_headers('Referer: %s' % self.url) 73 | self.session.http.parse_cookies('accept_rules=true') 74 | page = self.session.http.get(self.url) 75 | ws_url = self._get_websocket(page.text) 76 | log.debug('WebSocket: {0}'.format(ws_url)) 77 | stream_id, rtmp_cdn = self._get_stream_id(channel, ws_url) 78 | if not (stream_id or rtmp_cdn): 79 | log.error('Channel is not available.') 80 | return 81 | log.debug('Stream ID: {0}'.format(stream_id)) 82 | log.debug('RTMP CDN: {0}'.format(rtmp_cdn)) 83 | stream = RTMPStream(self.session, { 84 | 'rtmp': 'rtmp://{0}:1935/webrtc'.format(rtmp_cdn), 85 | 'pageUrl': self.url, 86 | 'playpath': '{0}_aac'.format(stream_id), 87 | 'swfVfy': SWF_URL, 88 | 'live': True 89 | }) 90 | return {'live': stream} 91 | 92 | 93 | __plugin__ = ShowUp 94 | -------------------------------------------------------------------------------- /stripchat.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | from streamlink.plugin import Plugin 4 | from streamlink.plugin.api import validate 5 | from streamlink.stream import HLSStream 6 | 7 | _url_re = re.compile(r"https?://(\w+\.)?stripchat\.com/(?P<username>[a-zA-Z0-9_-]+)") 8 | 9 | _post_schema = validate.Schema( 10 | { 11 | "cam": validate.Schema({ 12 | 'streamName' : validate.text, 13 | 'viewServers': validate.Schema({'flashphoner-hls': validate.text}) 14 | }), 15 | "user": validate.Schema({ 16 | 'user' : validate.Schema({ 17 | 'status' : validate.text, 18 | 'isLive' : bool 19 | }) 20 | }) 21 | } 22 | ) 23 | 24 | 25 | class Stripchat(Plugin): 26 | @classmethod 27 | def can_handle_url(cls, url): 28 | return _url_re.match(url) 29 | 30 | def _get_streams(self): 31 | match = _url_re.match(self.url) 32 | username = match.group("username") 33 | api_call = "https://stripchat.com/api/front/v2/models/username/{0}/cam".format(username) 34 | headers = { 35 | "Content-Type": "application/x-www-form-urlencoded", 36 | "X-Requested-With": "XMLHttpRequest", 37 | "Referer": self.url, 38 | } 39 | 40 | res = self.session.http.get(api_call, headers=headers) 41 | data = self.session.http.json(res, schema=_post_schema) 42 | 43 | server = "https://b-{0}.doppiocdn.com/hls/{1}/master_{1}.m3u8".format(data["cam"]["viewServers"]["flashphoner-hls"],data["cam"]["streamName"]) 44 | 45 | server0 = "https://b-{0}.doppiocdn.com/hls/{1}/{1}.m3u8".format(data["cam"]["viewServers"]["flashphoner-hls"],data["cam"]["streamName"]) 46 | 47 | self.logger.info("Stream status: {0}".format(data["user"]["user"]["status"])) 48 | 49 | if (data["user"]["user"]["isLive"] is True and data["user"]["user"]["status"] == "public" and server): 50 | try: 51 | for s in HLSStream.parse_variant_playlist(self.session,server,headers={'Referer': self.url}).items(): 52 | yield s 53 | except IOError as err: 54 | stream = HLSStream(self.session, server0) 55 | yield "Auto", stream 56 | 57 | __plugin__ = Stripchat 58 | -------------------------------------------------------------------------------- /zbiornik.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import re 3 | 4 | from streamlink.plugin import Plugin 5 | from streamlink.plugin.api import useragents, validate 6 | from streamlink.stream import RTMPStream 7 | from streamlink.utils import parse_json 8 | 9 | log = logging.getLogger(__name__) 10 | 11 | 12 | class Zbiornik(Plugin): 13 | 14 | SWF_URL = 'https://zbiornik.tv/wowza.swf' 15 | 16 | _url_re = re.compile( 17 | r'^https?://(?:www\.)?zbiornik\.tv/(?P<channel>[^/]+)/?$') 18 | 19 | _streams_re = re.compile(r'''var\sstreams\s*=\s*(?P<data>\[.+\]);''') 20 | _user_re = re.compile(r'''var\suser\s*=\s*(?P<data>\{[^;]+\});''') 21 | 22 | _user_schema = validate.Schema({ 23 | 'wowzaIam': { 24 | 'phash': validate.text, 25 | } 26 | }, validate.get('wowzaIam')) 27 | 28 | _streams_schema = validate.Schema([{ 29 | 'nick': validate.text, 30 | 'broadcasturl': validate.text, 31 | 'server': validate.text, 32 | 'id': validate.text, 33 | }]) 34 | 35 | @classmethod 36 | def can_handle_url(cls, url): 37 | return cls._url_re.match(url) is not None 38 | 39 | def _get_streams(self): 40 | log.debug('Version 2018-07-12') 41 | log.info('This is a custom plugin. ') 42 | channel = self._url_re.match(self.url).group('channel') 43 | log.info('Channel: {0}'.format(channel)) 44 | self.session.http.headers.update({'User-Agent': useragents.FIREFOX}) 45 | self.session.http.parse_cookies('adult=1') 46 | res = self.session.http.get(self.url) 47 | 48 | m = self._streams_re.search(res.text) 49 | if not m: 50 | log.debug('No streams data found.') 51 | return 52 | 53 | m2 = self._user_re.search(res.text) 54 | if not m: 55 | log.debug('No user data found.') 56 | return 57 | 58 | _streams = parse_json(m.group('data'), schema=self._streams_schema) 59 | _user = parse_json(m2.group('data'), schema=self._user_schema) 60 | 61 | _x = [] 62 | for _s in _streams: 63 | if _s.get('nick') == channel: 64 | _x = _s 65 | break 66 | 67 | if not _x: 68 | log.error('Channel is not available.') 69 | return 70 | 71 | app = 'videochat/?{0}'.format(_user['phash']) 72 | rtmp = 'rtmp://{0}/videochat/'.format(_x['server']) 73 | 74 | params = { 75 | 'rtmp': rtmp, 76 | 'pageUrl': self.url, 77 | 'app': app, 78 | 'playpath': _x['broadcasturl'], 79 | 'swfVfy': self.SWF_URL, 80 | 'live': True 81 | } 82 | return {'live': RTMPStream(self.session, params=params)} 83 | 84 | 85 | __plugin__ = Zbiornik 86 | --------------------------------------------------------------------------------