├── addon.xml ├── changelog.txt ├── credits.txt ├── fanart.jpg ├── icon.png └── lib ├── __init__.py └── resources ├── __init__.py └── lib ├── __init__.py ├── indexers ├── __init__.py ├── channels.py ├── docu.py ├── episodes.py ├── furk.py ├── movies.py ├── navigator.py └── tvshows.py ├── modules ├── __init__.py ├── anilist.py ├── cache.py ├── cfdecoder.py ├── cfscrape.py ├── changelog.py ├── checker.py ├── cleandate.py ├── cleangenre.py ├── cleantitle.py ├── client.py ├── control.py ├── debrid.py ├── directstream.py ├── dom_parser.py ├── dom_parser2.py ├── downloader.py ├── downloader_bennu.py ├── favourites.py ├── filmon.py ├── get_source_info.py ├── jsunfuck.py ├── jsunpack.py ├── keepalive.py ├── libtools.py ├── log_utils.py ├── metacache.py ├── playcount.py ├── player.py ├── proxy.py ├── pyaes │ ├── __init__.py │ ├── aes.py │ ├── blockfeeder.py │ └── util.py ├── regex.py ├── source_utils.py ├── sources.py ├── thexem.py ├── trailer.py ├── trakt.py ├── tvmaze.py ├── unjuice.py ├── utils.py ├── views.py ├── weblogin.py ├── workers.py ├── youtube.py └── youtube_menu.py └── sources └── __init__.py /addon.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | all 10 | Exodus Redux 11 | 12 | 13 | -------------------------------------------------------------------------------- /changelog.txt: -------------------------------------------------------------------------------- 1 | Exodus Redux Module Changelog: 2 | 2.0.3 3 | - Fix Trakt and IMDb lists 4 | 2.0.2 5 | - Fix Debrid Only playback 6 | 2.0.1 7 | - Replaced Lambdascraper with Openscrapers 8 | 2.0.0 9 | - Rebased 10 | 1.0.9 11 | - New Trakt.tv API key 12 | 1.0.8 13 | - Resume fix for Kodi 18 14 | - Removed Channels 15 | 1.0.7 16 | - Fix "In Theaters" 17 | 1.0.6 18 | - Cleanup module selection 19 | - Fix "In Theaters" 20 | - Fix Extended Info on TV Shows 21 | 1.0.5 - 22 | 1.0.4 - 23 | 1.0.3 - 24 | 1.0.2 - 25 | 1.0.1 26 | - Initial commit 27 | -------------------------------------------------------------------------------- /credits.txt: -------------------------------------------------------------------------------- 1 | ExodusRedux is a fork of Exodus. It has the same basic look and feel as the original Exodus with functionality and themes incorporated from the various iterations of Exodus. The project builds on the efforts of the developers listed below. 2 | 3 | - Lambda 4 | - TKNorris 5 | - Mr.Blamo 6 | - Nixgates 7 | - I-A-C 8 | - WilsonMagic 9 | - 13Clowns 10 | - Eggman 11 | - Jor-El 12 | - Maud'Dib 13 | - Jewbmx 14 | - Team Supremacy 15 | - and any others i may have missed! 16 | -------------------------------------------------------------------------------- /fanart.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/I-A-C/script.module.exodusredux/ec2ce6d8e28db15c23563a76dfa48d40251ca5c2/fanart.jpg -------------------------------------------------------------------------------- /icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/I-A-C/script.module.exodusredux/ec2ce6d8e28db15c23563a76dfa48d40251ca5c2/icon.png -------------------------------------------------------------------------------- /lib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/I-A-C/script.module.exodusredux/ec2ce6d8e28db15c23563a76dfa48d40251ca5c2/lib/__init__.py -------------------------------------------------------------------------------- /lib/resources/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/I-A-C/script.module.exodusredux/ec2ce6d8e28db15c23563a76dfa48d40251ca5c2/lib/resources/__init__.py -------------------------------------------------------------------------------- /lib/resources/lib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/I-A-C/script.module.exodusredux/ec2ce6d8e28db15c23563a76dfa48d40251ca5c2/lib/resources/lib/__init__.py -------------------------------------------------------------------------------- /lib/resources/lib/indexers/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /lib/resources/lib/indexers/channels.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | ''' 4 | Covenant Add-on 5 | 6 | This program is free software: you can redistribute it and/or modify 7 | it under the terms of the GNU General Public License as published by 8 | the Free Software Foundation, either version 3 of the License, or 9 | (at your option) any later version. 10 | 11 | This program is distributed in the hope that it will be useful, 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | GNU General Public License for more details. 15 | 16 | You should have received a copy of the GNU General Public License 17 | along with this program. If not, see . 18 | ''' 19 | 20 | 21 | from resources.lib.modules import cleangenre 22 | from resources.lib.modules import control 23 | from resources.lib.modules import client 24 | from resources.lib.modules import metacache 25 | from resources.lib.modules import workers 26 | from resources.lib.modules import trakt 27 | 28 | import sys,re,json,urllib,urlparse,datetime 29 | 30 | params = dict(urlparse.parse_qsl(sys.argv[2].replace('?',''))) if len(sys.argv) > 1 else dict() 31 | 32 | action = params.get('action') 33 | 34 | class channels: 35 | def __init__(self): 36 | self.list = [] ; self.items = [] 37 | 38 | self.uk_datetime = self.uk_datetime() 39 | self.systime = (self.uk_datetime).strftime('%Y%m%d%H%M%S%f') 40 | self.tm_img_link = 'https://image.tmdb.org/t/p/w%s%s' 41 | self.lang = control.apiLanguage()['trakt'] 42 | 43 | self.sky_now_link = 'http://epgservices.sky.com/5.1.1/api/2.0/channel/json/%s/now/nn/0' 44 | self.sky_programme_link = 'http://tv.sky.com/programme/channel/%s/%s/%s.json' 45 | 46 | 47 | def get(self): 48 | channels = [ 49 | ('01', 'Sky Premiere', '4021'), 50 | ('02', 'Sky Premiere +1', '1823'), 51 | ('03', 'Sky Showcase', '4033'), 52 | ('04', 'Sky Greats', '1815'), 53 | ('05', 'Sky Disney', '4013'), 54 | ('06', 'Sky Family', '4018'), 55 | ('07', 'Sky Action', '4014'), 56 | ('08', 'Sky Comedy', '4019'), 57 | ('09', 'Sky Crime', '4062'), 58 | ('10', 'Sky Drama', '4016'), 59 | ('11', 'Sky Sci Fi', '4017'), 60 | ('12', 'Sky Select', '4020'), 61 | ('13', 'Film4', '4044'), 62 | ('14', 'Film4 +1', '1629'), 63 | ('15', 'TCM', '3811'), 64 | ('16', 'TCM +1', '5275') 65 | ] 66 | 67 | threads = [] 68 | for i in channels: threads.append(workers.Thread(self.sky_list, i[0], i[1], i[2])) 69 | [i.start() for i in threads] 70 | [i.join() for i in threads] 71 | 72 | threads = [] 73 | for i in range(0, len(self.items)): threads.append(workers.Thread(self.items_list, self.items[i])) 74 | [i.start() for i in threads] 75 | [i.join() for i in threads] 76 | 77 | self.list = metacache.local(self.list, self.tm_img_link, 'poster2', 'fanart') 78 | 79 | try: self.list = sorted(self.list, key=lambda k: k['num']) 80 | except: pass 81 | 82 | self.channelDirectory(self.list) 83 | return self.list 84 | 85 | 86 | def sky_list(self, num, channel, id): 87 | try: 88 | url = self.sky_now_link % id 89 | result = client.request(url, timeout='10') 90 | result = json.loads(result) 91 | match = result['listings'][id][0]['url'] 92 | 93 | dt1 = (self.uk_datetime).strftime('%Y-%m-%d') 94 | dt2 = int((self.uk_datetime).strftime('%H')) 95 | if (dt2 < 6): dt2 = 0 96 | elif (dt2 >= 6 and dt2 < 12): dt2 = 1 97 | elif (dt2 >= 12 and dt2 < 18): dt2 = 2 98 | elif (dt2 >= 18): dt2 = 3 99 | 100 | url = self.sky_programme_link % (id, str(dt1), str(dt2)) 101 | result = client.request(url, timeout='10') 102 | result = json.loads(result) 103 | result = result['listings'][id] 104 | result = [i for i in result if i['url'] == match][0] 105 | 106 | year = result['d'] 107 | year = re.findall('[(](\d{4})[)]', year)[0].strip() 108 | year = year.encode('utf-8') 109 | 110 | title = result['t'] 111 | title = title.replace('(%s)' % year, '').strip() 112 | title = client.replaceHTMLCodes(title) 113 | title = title.encode('utf-8') 114 | 115 | self.items.append((title, year, channel, num)) 116 | except: 117 | pass 118 | 119 | 120 | def items_list(self, i): 121 | try: 122 | item = trakt.SearchAll(i[0], i[1], True)[0] 123 | 124 | content = item.get('movie') 125 | if not content: content = item.get('show') 126 | item = content 127 | 128 | title = item.get('title') 129 | title = client.replaceHTMLCodes(title) 130 | 131 | originaltitle = title 132 | 133 | year = item.get('year', 0) 134 | year = re.sub('[^0-9]', '', str(year)) 135 | 136 | imdb = item.get('ids', {}).get('imdb', '0') 137 | imdb = 'tt' + re.sub('[^0-9]', '', str(imdb)) 138 | 139 | tmdb = str(item.get('ids', {}).get('tmdb', 0)) 140 | 141 | premiered = item.get('released', '0') 142 | try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0] 143 | except: premiered = '0' 144 | 145 | genre = item.get('genres', []) 146 | genre = [x.title() for x in genre] 147 | genre = ' / '.join(genre).strip() 148 | if not genre: genre = '0' 149 | 150 | duration = str(item.get('Runtime', 0)) 151 | 152 | rating = item.get('rating', '0') 153 | if not rating or rating == '0.0': rating = '0' 154 | 155 | votes = item.get('votes', '0') 156 | try: votes = str(format(int(votes), ',d')) 157 | except: pass 158 | 159 | mpaa = item.get('certification', '0') 160 | if not mpaa: mpaa = '0' 161 | 162 | tagline = item.get('tagline', '0') 163 | 164 | plot = item.get('overview', '0') 165 | 166 | people = trakt.getPeople(imdb, 'movies') 167 | 168 | director = writer = '' 169 | if 'crew' in people and 'directing' in people['crew']: 170 | director = ', '.join([director['person']['name'] for director in people['crew']['directing'] if director['job'].lower() == 'director']) 171 | if 'crew' in people and 'writing' in people['crew']: 172 | writer = ', '.join([writer['person']['name'] for writer in people['crew']['writing'] if writer['job'].lower() in ['writer', 'screenplay', 'author']]) 173 | 174 | cast = [] 175 | for person in people.get('cast', []): 176 | cast.append({'name': person['person']['name'], 'role': person['character']}) 177 | cast = [(person['name'], person['role']) for person in cast] 178 | 179 | try: 180 | if self.lang == 'en' or self.lang not in item.get('available_translations', [self.lang]): raise Exception() 181 | 182 | trans_item = trakt.getMovieTranslation(imdb, self.lang, full=True) 183 | 184 | title = trans_item.get('title') or title 185 | tagline = trans_item.get('tagline') or tagline 186 | plot = trans_item.get('overview') or plot 187 | except: 188 | pass 189 | 190 | self.list.append({'title': title, 'originaltitle': originaltitle, 'year': year, 'premiered': premiered, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'tagline': tagline, 'imdb': imdb, 'tmdb': tmdb, 'poster': '0', 'channel': i[2], 'num': i[3]}) 191 | except: 192 | pass 193 | 194 | 195 | def uk_datetime(self): 196 | dt = datetime.datetime.utcnow() + datetime.timedelta(hours = 0) 197 | d = datetime.datetime(dt.year, 4, 1) 198 | dston = d - datetime.timedelta(days=d.weekday() + 1) 199 | d = datetime.datetime(dt.year, 11, 1) 200 | dstoff = d - datetime.timedelta(days=d.weekday() + 1) 201 | if dston <= dt < dstoff: 202 | return dt + datetime.timedelta(hours = 1) 203 | else: 204 | return dt 205 | 206 | 207 | def channelDirectory(self, items): 208 | if items == None or len(items) == 0: control.idle() ; sys.exit() 209 | 210 | sysaddon = sys.argv[0] 211 | 212 | syshandle = int(sys.argv[1]) 213 | 214 | addonPoster, addonBanner = control.addonPoster(), control.addonBanner() 215 | 216 | addonFanart, settingFanart = control.addonFanart(), control.setting('fanart') 217 | 218 | try: isOld = False ; control.item().getArt('type') 219 | except: isOld = True 220 | 221 | isPlayable = 'true' if not 'plugin' in control.infoLabel('Container.PluginName') else 'false' 222 | 223 | playbackMenu = control.lang(32063).encode('utf-8') if control.setting('hosts.mode') == '2' else control.lang(32064).encode('utf-8') 224 | 225 | queueMenu = control.lang(32065).encode('utf-8') 226 | 227 | refreshMenu = control.lang(32072).encode('utf-8') 228 | 229 | 230 | for i in items: 231 | try: 232 | label = '[B]%s[/B] : %s (%s)' % (i['channel'].upper(), i['title'], i['year']) 233 | sysname = urllib.quote_plus('%s (%s)' % (i['title'], i['year'])) 234 | systitle = urllib.quote_plus(i['title']) 235 | imdb, tmdb, year = i['imdb'], i['tmdb'], i['year'] 236 | 237 | meta = dict((k,v) for k, v in i.iteritems() if not v == '0') 238 | meta.update({'code': imdb, 'imdbnumber': imdb, 'imdb_id': imdb}) 239 | meta.update({'tmdb_id': tmdb}) 240 | meta.update({'mediatype': 'movie'}) 241 | meta.update({'trailer': '%s?action=trailer&name=%s' % (sysaddon, sysname)}) 242 | #meta.update({'trailer': 'plugin://script.extendedinfo/?info=playtrailer&&id=%s' % imdb}) 243 | meta.update({'playcount': 0, 'overlay': 6}) 244 | try: meta.update({'genre': cleangenre.lang(meta['genre'], self.lang)}) 245 | except: pass 246 | 247 | sysmeta = urllib.quote_plus(json.dumps(meta)) 248 | 249 | 250 | url = '%s?action=play&title=%s&year=%s&imdb=%s&meta=%s&t=%s' % (sysaddon, systitle, year, imdb, sysmeta, self.systime) 251 | sysurl = urllib.quote_plus(url) 252 | 253 | 254 | cm = [] 255 | 256 | cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon)) 257 | 258 | cm.append((refreshMenu, 'RunPlugin(%s?action=refresh)' % sysaddon)) 259 | 260 | cm.append((playbackMenu, 'RunPlugin(%s?action=alterSources&url=%s&meta=%s)' % (sysaddon, sysurl, sysmeta))) 261 | 262 | if isOld == True: 263 | cm.append((control.lang2(19033).encode('utf-8'), 'Action(Info)')) 264 | 265 | 266 | item = control.item(label=label) 267 | 268 | art = {} 269 | 270 | if 'poster2' in i and not i['poster2'] == '0': 271 | art.update({'icon': i['poster2'], 'thumb': i['poster2'], 'poster': i['poster2']}) 272 | elif 'poster' in i and not i['poster'] == '0': 273 | art.update({'icon': i['poster'], 'thumb': i['poster'], 'poster': i['poster']}) 274 | else: 275 | art.update({'icon': addonPoster, 'thumb': addonPoster, 'poster': addonPoster}) 276 | 277 | art.update({'banner': addonBanner}) 278 | 279 | if settingFanart == 'true' and 'fanart' in i and not i['fanart'] == '0': 280 | item.setProperty('Fanart_Image', i['fanart']) 281 | elif not addonFanart == None: 282 | item.setProperty('Fanart_Image', addonFanart) 283 | 284 | item.setArt(art) 285 | item.addContextMenuItems(cm) 286 | item.setProperty('IsPlayable', isPlayable) 287 | item.setInfo(type='Video', infoLabels = meta) 288 | 289 | video_streaminfo = {'codec': 'h264'} 290 | item.addStreamInfo('video', video_streaminfo) 291 | 292 | control.addItem(handle=syshandle, url=url, listitem=item, isFolder=False) 293 | except: 294 | pass 295 | 296 | control.content(syshandle, 'files') 297 | control.directory(syshandle, cacheToDisc=True) 298 | 299 | 300 | -------------------------------------------------------------------------------- /lib/resources/lib/indexers/docu.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | ''' 4 | 5 | This program is free software: you can redistribute it and/or modify 6 | it under the terms of the GNU General Public License as published by 7 | the Free Software Foundation, either version 3 of the License, or 8 | (at your option) any later version. 9 | 10 | This program is distributed in the hope that it will be useful, 11 | but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | GNU General Public License for more details. 14 | 15 | You should have received a copy of the GNU General Public License 16 | along with this program. If not, see . 17 | ''' 18 | 19 | import requests,os,sys,re,datetime,urlparse,json,xbmcgui,xbmcplugin 20 | 21 | from resources.lib.modules import log_utils 22 | from resources.lib.modules import cache 23 | from resources.lib.modules import client 24 | from resources.lib.modules import control 25 | 26 | sysaddon = sys.argv[0] ; syshandle = int(sys.argv[1]) 27 | artPath = control.artPath() ; addonFanart = control.addonFanart() 28 | 29 | class documentary: 30 | def __init__(self): 31 | self.list = [] 32 | 33 | self.docu_link = 'https://topdocumentaryfilms.com/' 34 | self.docu_cat_list = 'https://topdocumentaryfilms.com/watch-online/' 35 | 36 | def root(self): 37 | try: 38 | html = client.request(self.docu_cat_list) 39 | 40 | cat_list = client.parseDOM(html, 'div', attrs={'class':'sitemap-wraper clear'}) 41 | for content in cat_list: 42 | cat_info = client.parseDOM(content, 'h2')[0] 43 | cat_url = client.parseDOM(cat_info, 'a', ret='href')[0] 44 | cat_title = client.parseDOM(cat_info, 'a')[0].encode('utf-8', 'ignore').decode('utf-8').replace("&","&").replace(''',"'").replace('"','"').replace(''',"'").replace('–',' - ').replace('’',"'").replace('‘',"'").replace('&','&').replace('â','') 45 | try: 46 | cat_icon = client.parseDOM(content, 'img', ret='data-src')[0] 47 | except: 48 | cat_icon = client.parseDOM(content, 'img', ret='src')[0] 49 | cat_action = 'docuHeaven&docuCat=%s' % cat_url 50 | self.list.append({'name': cat_title, 'url': cat_url, 'image': cat_icon, 'action': cat_action}) 51 | except Exception as e: 52 | log_utils.log('documentary root : Exception - ' + str(e)) 53 | pass 54 | 55 | self.list = self.list[::-1] 56 | self.addDirectory(self.list) 57 | return self.list 58 | 59 | def docu_list(self, url): 60 | try: 61 | html = client.request(url) 62 | 63 | cat_list = client.parseDOM(html, 'article', attrs={'class':'module'}) 64 | for content in cat_list: 65 | docu_info = client.parseDOM(content, 'h2')[0] 66 | docu_url = client.parseDOM(docu_info, 'a', ret='href')[0] 67 | docu_title = client.parseDOM(docu_info, 'a')[0].replace("&","&").replace(''',"'").replace('"','"').replace(''',"'").replace('–',' - ').replace('’',"'").replace('‘',"'").replace('&','&').replace('â','') 68 | try: 69 | docu_icon = client.parseDOM(content, 'img', ret='data-src')[0] 70 | except: 71 | docu_icon = client.parseDOM(content, 'img', ret='src')[0] 72 | docu_action = 'docuHeaven&docuPlay=%s' % docu_url 73 | self.list.append({'name': docu_title, 'url': docu_url, 'image': docu_icon, 'action': docu_action}) 74 | 75 | try: 76 | navi_content = client.parseDOM(html, 'div', attrs={'class':'pagination module'})[0] 77 | links = client.parseDOM(navi_content, 'a', ret='href') 78 | tmp_list = [] 79 | link = links[(len(links)-1)] 80 | docu_action = 'docuHeaven&docuCat=%s' % link 81 | self.list.append({'name': control.lang(32053).encode('utf-8'), 'url': link, 'image': control.addonNext(), 'action': docu_action}) 82 | except: 83 | pass 84 | except Exception as e: 85 | log_utils.log('documentary docu_list : Exception - ' + str(e)) 86 | pass 87 | 88 | self.addDirectory(self.list) 89 | return self.list 90 | 91 | def docu_play(self, url): 92 | try: 93 | docu_page = client.request(url) 94 | docu_item = client.parseDOM(docu_page, 'meta', attrs={'itemprop':'embedUrl'}, ret='content')[0] 95 | if 'http:' not in docu_item and 'https:' not in docu_item: 96 | docu_item = 'https:' + docu_item 97 | url = docu_item 98 | 99 | docu_title = client.parseDOM(docu_page, 'meta', attrs={'property':'og:title'}, ret='content')[0].encode('utf-8', 'ignore').decode('utf-8').replace("&","&").replace(''',"'").replace('"','"').replace(''',"'").replace('–',' - ').replace('’',"'").replace('‘',"'").replace('&','&').replace('â','') 100 | 101 | if 'youtube' in url: 102 | if 'videoseries' not in url: 103 | video_id = client.parseDOM(docu_page, 'div', attrs={'class':'youtube-player'}, ret='data-id')[0] 104 | url = 'plugin://plugin.video.youtube/play/?video_id=%s' % video_id 105 | else: 106 | pass 107 | elif 'dailymotion' in url: 108 | video_id = client.parseDOM(docu_page, 'div', attrs={'class':'youtube-player'}, ret='data-id')[0] 109 | url = self.getDailyMotionStream(video_id) 110 | else: 111 | log_utils.log('Play Documentary: Unknown Host: ' + str(url)) 112 | control.infoDialog('Unknown Host - Report To Developer: ' + str(url), sound=True, icon='INFO') 113 | 114 | control.execute('PlayMedia(%s)' % url) 115 | 116 | # item = xbmcgui.ListItem(str(docu_title), iconImage='DefaultVideo.png', thumbnailImage='DefaultVideo.png') 117 | # item.setInfo(type='Video', infoLabels={'Title': str(docu_title), 'Plot': str(docu_title)}) 118 | # item.setProperty('IsPlayable','true') 119 | # item.setPath(url) 120 | # control.resolve(int(sys.argv[1]), True, item) 121 | except Exception as e: 122 | log_utils.log('docu_play: Exception - ' + str(e)) 123 | pass 124 | 125 | def sort_key(self, elem): 126 | if elem[0] == "auto": 127 | return 1 128 | else: 129 | return int(elem[0].split("@")[0]) 130 | 131 | # Code originally written by gujal, as part of the DailyMotion Addon in the official Kodi Repo. Modified to fit the needs here. 132 | def getDailyMotionStream(self, id): 133 | headers = {'User-Agent':'Android'} 134 | cookie = {'Cookie':"lang=en_US; ff=off"} 135 | r = requests.get("http://www.dailymotion.com/player/metadata/video/"+id,headers=headers,cookies=cookie) 136 | content = r.json() 137 | if content.get('error') is not None: 138 | Error = (content['error']['title']) 139 | xbmc.executebuiltin('XBMC.Notification(Info:,'+ Error +' ,5000)') 140 | return 141 | else: 142 | cc= content['qualities'] 143 | 144 | cc = cc.items() 145 | 146 | cc = sorted(cc,key=self.sort_key,reverse=True) 147 | m_url = '' 148 | other_playable_url = [] 149 | 150 | for source,json_source in cc: 151 | source = source.split("@")[0] 152 | for item in json_source: 153 | 154 | m_url = item.get('url',None) 155 | 156 | if m_url: 157 | if source == "auto" : 158 | continue 159 | 160 | elif int(source) <= 2 : 161 | if 'video' in item.get('type',None): 162 | return m_url 163 | 164 | elif '.mnft' in m_url: 165 | continue 166 | other_playable_url.append(m_url) 167 | 168 | if len(other_playable_url) >0: # probably not needed, only for last resort 169 | for m_url in other_playable_url: 170 | 171 | if '.m3u8?auth' in m_url: 172 | rr = requests.get(m_url,cookies=r.cookies.get_dict() ,headers=headers) 173 | if rr.headers.get('set-cookie'): 174 | print 'adding cookie to url' 175 | strurl = re.findall('(http.+)',rr.text)[0].split('#cell')[0]+'|Cookie='+rr.headers['set-cookie'] 176 | else: 177 | strurl = re.findall('(http.+)',rr.text)[0].split('#cell')[0] 178 | return strurl 179 | 180 | def addDirectoryItem(self, name, query, thumb, icon, context=None, queue=False, isAction=True, isFolder=True): 181 | try: name = control.lang(name).encode('utf-8') 182 | except: pass 183 | url = '%s?action=%s' % (sysaddon, query) if isAction == True else query 184 | thumb = os.path.join(artPath, thumb) if not artPath == None else icon 185 | cm = [] 186 | if queue == True: cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon)) 187 | if not context == None: cm.append((control.lang(context[0]).encode('utf-8'), 'RunPlugin(%s?action=%s)' % (sysaddon, context[1]))) 188 | item = control.item(label=name) 189 | item.addContextMenuItems(cm) 190 | item.setArt({'icon': thumb, 'thumb': thumb}) 191 | if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart) 192 | control.addItem(handle=syshandle, url=url, listitem=item, isFolder=isFolder) 193 | 194 | def endDirectory(self): 195 | control.content(syshandle, 'addons') 196 | control.directory(syshandle, cacheToDisc=True) 197 | 198 | def addDirectory(self, items, queue=False, isFolder=True): 199 | if items == None or len(items) == 0: control.idle() ; sys.exit() 200 | 201 | sysaddon = sys.argv[0] 202 | 203 | syshandle = int(sys.argv[1]) 204 | 205 | addonFanart, addonThumb, artPath = control.addonFanart(), control.addonThumb(), control.artPath() 206 | 207 | queueMenu = control.lang(32065).encode('utf-8') 208 | 209 | playRandom = control.lang(32535).encode('utf-8') 210 | 211 | addToLibrary = control.lang(32551).encode('utf-8') 212 | 213 | for i in items: 214 | try: 215 | name = i['name'] 216 | 217 | if i['image'].startswith('http'): thumb = i['image'] 218 | elif not artPath == None: thumb = os.path.join(artPath, i['image']) 219 | else: thumb = addonThumb 220 | 221 | item = control.item(label=name) 222 | 223 | if isFolder: 224 | url = '%s?action=%s' % (sysaddon, i['action']) 225 | try: url += '&url=%s' % urllib.quote_plus(i['url']) 226 | except: pass 227 | item.setProperty('IsPlayable', 'false') 228 | else: 229 | url = '%s?action=%s' % (sysaddon, i['action']) 230 | try: url += '&url=%s' % i['url'] 231 | except: pass 232 | item.setProperty('IsPlayable', 'true') 233 | item.setInfo("mediatype", "video") 234 | item.setInfo("audio", '') 235 | 236 | item.setArt({'icon': thumb, 'thumb': thumb}) 237 | if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart) 238 | 239 | control.addItem(handle=syshandle, url=url, listitem=item, isFolder=isFolder) 240 | except: 241 | pass 242 | 243 | control.content(syshandle, 'addons') 244 | control.directory(syshandle, cacheToDisc=True) 245 | -------------------------------------------------------------------------------- /lib/resources/lib/indexers/furk.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Exodus Redux Add-on 3 | 4 | This program is free software: you can redistribute it and/or modify 5 | it under the terms of the GNU General Public License as published by 6 | the Free Software Foundation, either version 3 of the License, or 7 | (at your option) any later version. 8 | 9 | This program is distributed in the hope that it will be useful, 10 | but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | GNU General Public License for more details. 13 | 14 | You should have received a copy of the GNU General Public License 15 | along with this program. If not, see . 16 | ''' 17 | 18 | from resources.lib.modules import control 19 | import sys, requests, json, urllib, urlparse, os 20 | 21 | sysaddon = sys.argv[0] ; syshandle = int(sys.argv[1]) 22 | accepted_extensions = ['mkv','mp4','avi', 'm4v'] 23 | 24 | class furk: 25 | def __init__(self): 26 | self.base_link = "https://www.furk.net" 27 | self.meta_search_link = "/api/plugins/metasearch?api_key=%s&q=%s" 28 | self.get_user_files_link = "/api/file/get?api_key=%s" 29 | self.file_info_link = "/api/file/info?api_key%s" 30 | self.file_link_link = "/api/file/link?" 31 | self.protect_file_link = "/api/file/protect?" 32 | self.user_feeds_link = "/api/feed/get?" 33 | self.add_download_link = "/api/dl/add?" 34 | self.api_key = control.setting('furk.api') 35 | self.list = [] 36 | 37 | def user_files(self): 38 | if self.api_key == '': 39 | return '' 40 | try: 41 | s = requests.Session() 42 | url = self.base_link + self.get_user_files_link % self.api_key 43 | p = s.get(url) 44 | p = json.loads(p.text) 45 | files = p['files'] 46 | for i in files: 47 | name = i['name'] 48 | id = i['id'] 49 | url_dl = '' 50 | for x in accepted_extensions: 51 | if i['url_dl'].endswith(x): 52 | url_dl = i['url_dl'] 53 | else: 54 | continue 55 | if url_dl == '': 56 | continue 57 | if not int(i['files_num_video_player']) > 1: 58 | if int(i['ss_num']) > 0: 59 | thumb = i['ss_urls'][0] 60 | else: 61 | thumb = '' 62 | 63 | self.addDirectoryItem(name , url_dl, thumb, '', False) 64 | 65 | else: 66 | pass 67 | self.endDirectory() 68 | return '' 69 | except: 70 | pass 71 | def search(self): 72 | from resources.lib.indexers import navigator 73 | 74 | navigator.navigator().addDirectoryItem('New Search', 'furkSearchNew', 'search.png', 'search.png') 75 | try: from sqlite3 import dbapi2 as database 76 | except: from pysqlite2 import dbapi2 as database 77 | 78 | dbcon = database.connect(control.searchFile) 79 | dbcur = dbcon.cursor() 80 | 81 | try: 82 | dbcur.executescript("CREATE TABLE IF NOT EXISTS furk (ID Integer PRIMARY KEY AUTOINCREMENT, term);") 83 | except: 84 | pass 85 | 86 | dbcur.execute("SELECT * FROM furk ORDER BY ID DESC") 87 | lst = [] 88 | 89 | delete_option = False 90 | for (id,term) in dbcur.fetchall(): 91 | if term not in str(lst): 92 | delete_option = True 93 | navigator.navigator().addDirectoryItem(term, 'furkMetaSearch&url=%s' % term, 'search.png', 'search.png') 94 | lst += [(term)] 95 | dbcur.close() 96 | 97 | if delete_option: 98 | navigator.navigator().addDirectoryItem(32605, 'clearCacheSearch', 'tools.png', 'DefaultAddonProgram.png') 99 | 100 | navigator.navigator().endDirectory() 101 | 102 | def search_new(self): 103 | control.idle() 104 | 105 | t = control.lang(32010).encode('utf-8') 106 | k = control.keyboard('', t) ; k.doModal() 107 | q = k.getText() if k.isConfirmed() else None 108 | 109 | if (q == None or q == ''): return 110 | 111 | try: from sqlite3 import dbapi2 as database 112 | except: from pysqlite2 import dbapi2 as database 113 | 114 | dbcon = database.connect(control.searchFile) 115 | dbcur = dbcon.cursor() 116 | dbcur.execute("INSERT INTO furk VALUES (?,?)", (None,q)) 117 | dbcon.commit() 118 | dbcur.close() 119 | url = urllib.quote_plus(q) 120 | url = '%s?action=furkMetaSearch&url=%s' % (sys.argv[0], urllib.quote_plus(url)) 121 | control.execute('Container.Update(%s)' % url) 122 | 123 | def furk_meta_search(self, url): 124 | if self.api_key == '': 125 | return '' 126 | try: 127 | s = requests.Session() 128 | url = (self.base_link + self.meta_search_link % (self.api_key, url)).replace(' ', '+') 129 | p = s.get(url) 130 | p = json.loads(p.text) 131 | files = p['files'] 132 | for i in files: 133 | name = i['name'] 134 | id = i['id'] 135 | url_dl = '' 136 | for x in accepted_extensions: 137 | if 'url_dl' in i: 138 | if i['url_dl'].endswith(x): 139 | url_dl = i['url_dl'] 140 | else: 141 | continue 142 | else: 143 | continue 144 | if url_dl == '': 145 | continue 146 | if not int(i['files_num_video_player']) > 1: 147 | if int(i['ss_num']) > 0: 148 | thumb = i['ss_urls'][0] 149 | else: 150 | thumb = '' 151 | 152 | self.addDirectoryItem(name, url_dl, thumb, '', False) 153 | 154 | else: 155 | # print(i['name']) 156 | # self.addDirectoryItem(i['name'].encode('utf-8'), i['url_dl'], '', '') 157 | continue 158 | self.endDirectory() 159 | return '' 160 | except: 161 | pass 162 | 163 | def addDirectoryItem(self, name, query, thumb, icon, isAction=True): 164 | try: 165 | name = name.encode('utf-8') 166 | url = '%s?action=%s' % (sysaddon, query) if isAction == True else query 167 | item = control.item(label=name) 168 | item.setArt({'icon': thumb, 'thumb': thumb}) 169 | control.addItem(handle=syshandle, url=url, listitem=item) 170 | except: 171 | pass 172 | 173 | def endDirectory(self): 174 | control.content(syshandle, 'addons') 175 | control.directory(syshandle, cacheToDisc=True) 176 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/anilist.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | ''' 4 | Covenant Add-on 5 | 6 | This program is free software: you can redistribute it and/or modify 7 | it under the terms of the GNU General Public License as published by 8 | the Free Software Foundation, either version 3 of the License, or 9 | (at your option) any later version. 10 | 11 | This program is distributed in the hope that it will be useful, 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | GNU General Public License for more details. 15 | 16 | You should have received a copy of the GNU General Public License 17 | along with this program. If not, see . 18 | ''' 19 | 20 | import urlparse, urllib 21 | 22 | from resources.lib.modules import cache 23 | from resources.lib.modules import client 24 | from resources.lib.modules import cleantitle 25 | from resources.lib.modules import utils 26 | 27 | 28 | def _getAniList(url): 29 | try: 30 | url = urlparse.urljoin('https://anilist.co', '/api%s' % url) 31 | return client.request(url, headers={'Authorization': '%s %s' % cache.get(_getToken, 1), 'Content-Type': 'application/x-www-form-urlencoded'}) 32 | except: 33 | pass 34 | 35 | 36 | def _getToken(): 37 | result = urllib.urlencode({'grant_type': 'client_credentials', 'client_id': 'kodiexodus-7erse', 'client_secret': 'XelwkDEccpHX2uO8NpqIjVf6zeg'}) 38 | result = client.request('https://anilist.co/api/auth/access_token', post=result, headers={'Content-Type': 'application/x-www-form-urlencoded'}, error=True) 39 | result = utils.json_loads_as_str(result) 40 | return result['token_type'], result['access_token'] 41 | 42 | 43 | def getAlternativTitle(title): 44 | try: 45 | t = cleantitle.get(title) 46 | 47 | r = _getAniList('/anime/search/%s' % title) 48 | r = [(i.get('title_romaji'), i.get('synonyms', [])) for i in utils.json_loads_as_str(r) if cleantitle.get(i.get('title_english', '')) == t] 49 | r = [i[1][0] if i[0] == title and len(i[1]) > 0 else i[0] for i in r] 50 | r = [i for i in r if i if i != title][0] 51 | return r 52 | except: 53 | pass 54 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/cache.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Covenant Add-on 4 | 5 | This program is free software: you can redistribute it and/or modify 6 | it under the terms of the GNU General Public License as published by 7 | the Free Software Foundation, either version 3 of the License, or 8 | (at your option) any later version. 9 | 10 | This program is distributed in the hope that it will be useful, 11 | but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | GNU General Public License for more details. 14 | 15 | You should have received a copy of the GNU General Public License 16 | along with this program. If not, see . 17 | """ 18 | import ast 19 | import hashlib 20 | import re 21 | import time 22 | from resources.lib.modules import control 23 | 24 | try: 25 | from sqlite3 import dbapi2 as db, OperationalError 26 | except ImportError: 27 | from pysqlite2 import dbapi2 as db, OperationalError 28 | 29 | """ 30 | This module is used to get/set cache for every action done in the system 31 | """ 32 | 33 | cache_table = 'cache' 34 | 35 | 36 | def get(function, duration, *args): 37 | # type: (function, int, object) -> object or None 38 | """ 39 | Gets cached value for provided function with optional arguments, or executes and stores the result 40 | :param function: Function to be executed 41 | :param duration: Duration of validity of cache in hours 42 | :param args: Optional arguments for the provided function 43 | """ 44 | 45 | try: 46 | key = _hash_function(function, args) 47 | cache_result = cache_get(key) 48 | if cache_result: 49 | if _is_cache_valid(cache_result['date'], duration): 50 | return ast.literal_eval(cache_result['value'].encode('utf-8')) 51 | 52 | fresh_result = repr(function(*args)) 53 | if not fresh_result: 54 | # If the cache is old, but we didn't get fresh result, return the old cache 55 | if cache_result: 56 | return cache_result 57 | return None 58 | 59 | cache_insert(key, fresh_result) 60 | return ast.literal_eval(fresh_result.encode('utf-8')) 61 | except Exception: 62 | return None 63 | 64 | 65 | def timeout(function, *args): 66 | try: 67 | key = _hash_function(function, args) 68 | result = cache_get(key) 69 | return int(result['date']) 70 | except Exception: 71 | return None 72 | 73 | 74 | def cache_get(key): 75 | # type: (str, str) -> dict or None 76 | try: 77 | cursor = _get_connection_cursor() 78 | cursor.execute("SELECT * FROM %s WHERE key = ?" % cache_table, [key]) 79 | return cursor.fetchone() 80 | except OperationalError: 81 | return None 82 | 83 | 84 | def cache_insert(key, value): 85 | # type: (str, str) -> None 86 | cursor = _get_connection_cursor() 87 | now = int(time.time()) 88 | cursor.execute( 89 | "CREATE TABLE IF NOT EXISTS %s (key TEXT, value TEXT, date INTEGER, UNIQUE(key))" 90 | % cache_table 91 | ) 92 | update_result = cursor.execute( 93 | "UPDATE %s SET value=?,date=? WHERE key=?" 94 | % cache_table, (value, now, key)) 95 | 96 | if update_result.rowcount is 0: 97 | cursor.execute( 98 | "INSERT INTO %s Values (?, ?, ?)" 99 | % cache_table, (key, value, now) 100 | ) 101 | 102 | cursor.connection.commit() 103 | 104 | 105 | def cache_clear(): 106 | try: 107 | cursor = _get_connection_cursor() 108 | 109 | for t in [cache_table, 'rel_list', 'rel_lib']: 110 | try: 111 | cursor.execute("DROP TABLE IF EXISTS %s" % t) 112 | cursor.execute("VACUUM") 113 | cursor.commit() 114 | except: 115 | pass 116 | except: 117 | pass 118 | 119 | 120 | def cache_clear_meta(): 121 | try: 122 | cursor = _get_connection_cursor_meta() 123 | 124 | for t in ['meta']: 125 | try: 126 | cursor.execute("DROP TABLE IF EXISTS %s" % t) 127 | cursor.execute("VACUUM") 128 | cursor.commit() 129 | except: 130 | pass 131 | except: 132 | pass 133 | 134 | 135 | def cache_clear_providers(): 136 | try: 137 | cursor = _get_connection_cursor_providers() 138 | 139 | for t in ['rel_src', 'rel_url']: 140 | try: 141 | cursor.execute("DROP TABLE IF EXISTS %s" % t) 142 | cursor.execute("VACUUM") 143 | cursor.commit() 144 | except: 145 | pass 146 | except: 147 | pass 148 | 149 | 150 | def cache_clear_search(): 151 | try: 152 | cursor = _get_connection_cursor_search() 153 | 154 | for t in ['tvshow', 'movies']: 155 | try: 156 | cursor.execute("DROP TABLE IF EXISTS %s" % t) 157 | cursor.execute("VACUUM") 158 | cursor.commit() 159 | except: 160 | pass 161 | except: 162 | pass 163 | 164 | 165 | def cache_clear_all(): 166 | cache_clear() 167 | cache_clear_meta() 168 | cache_clear_providers() 169 | 170 | 171 | def _get_connection_cursor(): 172 | conn = _get_connection() 173 | return conn.cursor() 174 | 175 | 176 | def _get_connection(): 177 | control.makeFile(control.dataPath) 178 | conn = db.connect(control.cacheFile) 179 | conn.row_factory = _dict_factory 180 | return conn 181 | 182 | 183 | def _get_connection_cursor_meta(): 184 | conn = _get_connection_meta() 185 | return conn.cursor() 186 | 187 | 188 | def _get_connection_meta(): 189 | control.makeFile(control.dataPath) 190 | conn = db.connect(control.metacacheFile) 191 | conn.row_factory = _dict_factory 192 | return conn 193 | 194 | 195 | def _get_connection_cursor_providers(): 196 | conn = _get_connection_providers() 197 | return conn.cursor() 198 | 199 | 200 | def _get_connection_providers(): 201 | control.makeFile(control.dataPath) 202 | conn = db.connect(control.providercacheFile) 203 | conn.row_factory = _dict_factory 204 | return conn 205 | 206 | 207 | def _get_connection_cursor_search(): 208 | conn = _get_connection_search() 209 | return conn.cursor() 210 | 211 | 212 | def _get_connection_search(): 213 | control.makeFile(control.dataPath) 214 | conn = db.connect(control.searchFile) 215 | conn.row_factory = _dict_factory 216 | return conn 217 | 218 | 219 | def _dict_factory(cursor, row): 220 | d = {} 221 | for idx, col in enumerate(cursor.description): 222 | d[col[0]] = row[idx] 223 | return d 224 | 225 | 226 | def _hash_function(function_instance, *args): 227 | return _get_function_name(function_instance) + _generate_md5(args) 228 | 229 | 230 | def _get_function_name(function_instance): 231 | return re.sub('.+\smethod\s|.+function\s|\sat\s.+|\sof\s.+', '', repr(function_instance)) 232 | 233 | 234 | def _generate_md5(*args): 235 | md5_hash = hashlib.md5() 236 | [md5_hash.update(str(arg)) for arg in args] 237 | return str(md5_hash.hexdigest()) 238 | 239 | 240 | def _is_cache_valid(cached_time, cache_timeout): 241 | now = int(time.time()) 242 | diff = now - cached_time 243 | return (cache_timeout * 3600) > diff 244 | 245 | 246 | def cache_version_check(): 247 | if _find_cache_version(): 248 | cache_clear();cache_clear_meta();cache_clear_providers() 249 | control.infoDialog(control.lang(32057).encode('utf-8'), sound=True, icon='INFO') 250 | 251 | 252 | def _find_cache_version(): 253 | import os 254 | 255 | versionFile = os.path.join(control.dataPath, 'cache.v') 256 | try: 257 | with open(versionFile, 'rb') as fh: oldVersion = fh.read() 258 | except: oldVersion = '0' 259 | try: 260 | curVersion = control.addon('script.module.exodusredux').getAddonInfo('version') 261 | if oldVersion != curVersion: 262 | with open(versionFile, 'wb') as fh: fh.write(curVersion) 263 | return True 264 | else: return False 265 | except: return False 266 | 267 | def _find_cache_versionAlt(): #Added to keep track of plugin.video.exodusredux version 268 | 269 | import os 270 | versionFile = os.path.join(control.dataPath, 'cache.v2') 271 | try: 272 | with open(versionFile, 'rb') as fh: oldVersion = fh.read() 273 | except: oldVersion = '0' 274 | try: 275 | curVersion = control.addon('plugin.video.exodusredux').getAddonInfo('version') 276 | if oldVersion != curVersion: 277 | with open(versionFile, 'wb') as fh: fh.write(curVersion) 278 | return True 279 | else: return False 280 | except: return False 281 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/cfdecoder.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # -------------------------------------------------------------------------------- 3 | # Cloudflare decoder 4 | # -------------------------------------------------------------------------------- 5 | 6 | import re 7 | import time 8 | import urllib 9 | import urlparse 10 | 11 | from decimal import Decimal, ROUND_UP 12 | 13 | 14 | class Cloudflare: 15 | def __init__(self, response): 16 | self.timeout = 5 17 | self.domain = urlparse.urlparse(response["url"])[1] 18 | self.protocol = urlparse.urlparse(response["url"])[0] 19 | self.js_data = {} 20 | self.header_data = {} 21 | 22 | if not "var s,t,o,p,b,r,e,a,k,i,n,g,f" in response["data"] or "chk_jschl" in response["url"]: 23 | return 24 | 25 | try: 26 | self.js_data["auth_url"] = \ 27 | re.compile('
').findall(response["data"])[0] 28 | self.js_data["params"] = {} 29 | self.js_data["params"]["jschl_vc"] = \ 30 | re.compile('').findall(response["data"])[0] 31 | self.js_data["params"]["pass"] = \ 32 | re.compile('').findall(response["data"])[0] 33 | var, self.js_data["value"] = \ 34 | re.compile('var s,t,o,p,b,r,e,a,k,i,n,g,f[^:]+"([^"]+)":([^\n]+)};', re.DOTALL).findall( 35 | response["data"])[0] 36 | self.js_data["op"] = re.compile(var + "([\+|\-|\*|\/])=([^;]+)", re.MULTILINE).findall(response["data"]) 37 | self.js_data["wait"] = int(re.compile("\}, ([\d]+)\);", re.MULTILINE).findall(response["data"])[0]) / 1000 38 | except Exception as e: 39 | print(e) 40 | self.js_data = {} 41 | 42 | if "refresh" in response["headers"]: 43 | try: 44 | self.header_data["wait"] = int(response["headers"]["refresh"].split(";")[0]) 45 | self.header_data["auth_url"] = response["headers"]["refresh"].split("=")[1].split("?")[0] 46 | self.header_data["params"] = {} 47 | self.header_data["params"]["pass"] = response["headers"]["refresh"].split("=")[2] 48 | except Exception as e: 49 | print(e) 50 | self.header_data = {} 51 | 52 | @property 53 | def wait_time(self): 54 | if self.js_data.get("wait", 0): 55 | return self.js_data["wait"] 56 | else: 57 | return self.header_data.get("wait", 0) 58 | 59 | @property 60 | def is_cloudflare(self): 61 | return self.header_data.get("wait", 0) > 0 or self.js_data.get("wait", 0) > 0 62 | 63 | def get_url(self): 64 | # Metodo #1 (javascript) 65 | if self.js_data.get("wait", 0): 66 | jschl_answer = self.decode2(self.js_data["value"]) 67 | 68 | for op, v in self.js_data["op"]: 69 | # jschl_answer = eval(str(jschl_answer) + op + str(self.decode2(v))) 70 | if op == '+': 71 | jschl_answer = jschl_answer + self.decode2(v) 72 | elif op == '-': 73 | jschl_answer = jschl_answer - self.decode2(v) 74 | elif op == '*': 75 | jschl_answer = jschl_answer * self.decode2(v) 76 | elif op == '/': 77 | jschl_answer = jschl_answer / self.decode2(v) 78 | 79 | self.js_data["params"]["jschl_answer"] = round(jschl_answer, 10) + len(self.domain) 80 | 81 | response = "%s://%s%s?%s" % ( 82 | self.protocol, self.domain, self.js_data["auth_url"], urllib.urlencode(self.js_data["params"])) 83 | 84 | time.sleep(self.js_data["wait"]) 85 | 86 | return response 87 | 88 | # Metodo #2 (headers) 89 | if self.header_data.get("wait", 0): 90 | response = "%s://%s%s?%s" % ( 91 | self.protocol, self.domain, self.header_data["auth_url"], urllib.urlencode(self.header_data["params"])) 92 | 93 | time.sleep(self.header_data["wait"]) 94 | 95 | return response 96 | 97 | def decode2(self, data): 98 | data = re.sub("\!\+\[\]", "1", data) 99 | data = re.sub("\!\!\[\]", "1", data) 100 | data = re.sub("\[\]", "0", data) 101 | 102 | pos = data.find("/") 103 | numerador = data[:pos] 104 | denominador = data[pos + 1:] 105 | 106 | aux = re.compile('\(([0-9\+]+)\)').findall(numerador) 107 | num1 = "" 108 | for n in aux: 109 | num1 += str(eval(n)) 110 | 111 | aux = re.compile('\(([0-9\+]+)\)').findall(denominador) 112 | num2 = "" 113 | for n in aux: 114 | num2 += str(eval(n)) 115 | 116 | # return float(num1) / float(num2) 117 | # return Decimal(Decimal(num1) / Decimal(num2)).quantize(Decimal('.0000000000000001'), rounding=ROUND_UP) 118 | return Decimal(Decimal(num1) / Decimal(num2)).quantize(Decimal('.0000000000000001')) 119 | 120 | def decode(self, data): 121 | t = time.time() 122 | timeout = False 123 | 124 | while not timeout: 125 | data = re.sub("\[\]", "''", data) 126 | data = re.sub("!\+''", "+1", data) 127 | data = re.sub("!''", "0", data) 128 | data = re.sub("!0", "1", data) 129 | 130 | if "(" in data: 131 | x, y = data.rfind("("), data.find(")", data.rfind("(")) + 1 132 | part = data[x + 1:y - 1] 133 | else: 134 | x = 0 135 | y = len(data) 136 | part = data 137 | 138 | val = "" 139 | 140 | if not part.startswith("+"): part = "+" + part 141 | 142 | for i, ch in enumerate(part): 143 | if ch == "+": 144 | if not part[i + 1] == "'": 145 | if val == "": val = 0 146 | if type(val) == str: 147 | val = val + self.get_number(part, i + 1) 148 | else: 149 | val = val + int(self.get_number(part, i + 1)) 150 | else: 151 | val = str(val) 152 | val = val + self.get_number(part, i + 1) or "0" 153 | 154 | if type(val) == str: val = "'%s'" % val 155 | data = data[0:x] + str(val) + data[y:] 156 | 157 | timeout = time.time() - t > self.timeout 158 | 159 | if not "+" in data and not "(" in data and not ")" in data: 160 | return int(self.get_number(data)) 161 | 162 | def get_number(self, str, start=0): 163 | ret = "" 164 | for chr in str[start:]: 165 | try: 166 | int(chr) 167 | except: 168 | if ret: break 169 | else: 170 | ret += chr 171 | return ret 172 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/cfscrape.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import random 3 | import re 4 | import subprocess 5 | from copy import deepcopy 6 | from time import sleep 7 | import requests 8 | from resources.lib.modules import cfdecoder 9 | 10 | from requests.sessions import Session 11 | 12 | try: 13 | from urlparse import urlparse 14 | except ImportError: 15 | from urllib.parse import urlparse 16 | 17 | __version__ = "1.9.4" 18 | 19 | DEFAULT_USER_AGENTS = [ 20 | "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36", 21 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36", 22 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36", 23 | "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0", 24 | "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:41.0) Gecko/20100101 Firefox/41.0" 25 | ] 26 | 27 | DEFAULT_USER_AGENT = random.choice(DEFAULT_USER_AGENTS) 28 | 29 | BUG_REPORT = """\ 30 | Cloudflare may have changed their technique, or there may be a bug in the script. 31 | 32 | Please read https://github.com/Anorov/cloudflare-scrape#updates, then file a \ 33 | bug report at https://github.com/Anorov/cloudflare-scrape/issues."\ 34 | """ 35 | 36 | ANSWER_ACCEPT_ERROR = """\ 37 | The challenge answer was not properly accepted by Cloudflare. This can occur if \ 38 | the target website is under heavy load, or if Cloudflare is experiencing issues. You can 39 | potentially resolve this by increasing the challenge answer delay (default: 5 seconds). \ 40 | For example: cfscrape.create_scraper(delay=10) 41 | 42 | If increasing the delay does not help, please open a GitHub issue at \ 43 | https://github.com/Anorov/cloudflare-scrape/issues\ 44 | """ 45 | 46 | class CloudflareScraper(Session): 47 | def __init__(self, *args, **kwargs): 48 | self.delay = kwargs.pop("delay", 5) 49 | super(CloudflareScraper, self).__init__(*args, **kwargs) 50 | 51 | if "requests" in self.headers["User-Agent"]: 52 | # Set a random User-Agent if no custom User-Agent has been set 53 | self.headers["User-Agent"] = DEFAULT_USER_AGENT 54 | 55 | def is_cloudflare_challenge(self, resp): 56 | return ( 57 | resp.status_code == 503 58 | and resp.headers.get("Server", "").startswith("cloudflare") 59 | and b"jschl_vc" in resp.content 60 | and b"jschl_answer" in resp.content 61 | ) 62 | 63 | def request(self, method, url, *args, **kwargs): 64 | resp = super(CloudflareScraper, self).request(method, url, *args, **kwargs) 65 | 66 | # Check if Cloudflare anti-bot is on 67 | if self.is_cloudflare_challenge(resp): 68 | resp = self.solve_cf_challenge(resp, **kwargs) 69 | if self.is_cloudflare_challenge(resp): 70 | raise ValueError(ANSWER_ACCEPT_ERROR) 71 | 72 | return resp 73 | 74 | def solve_cf_challenge(self, resp, **original_kwargs): 75 | sleep(5) # Cloudflare requires a delay before solving the challenge 76 | body = resp.text 77 | parsed_url = urlparse(resp.url) 78 | domain = urlparse(resp.url).netloc 79 | submit_url = "%s://%s/cdn-cgi/l/chk_jschl" % (parsed_url.scheme, domain) 80 | 81 | cloudflare_kwargs = deepcopy(original_kwargs) 82 | params = cloudflare_kwargs.setdefault("params", {}) 83 | headers = cloudflare_kwargs.setdefault("headers", {}) 84 | headers["Referer"] = resp.url 85 | request = {} 86 | request['data'] = body 87 | request['url'] = resp.url 88 | request['headers'] = resp.headers 89 | submit_url = cfdecoder.Cloudflare(request).get_url() 90 | method = resp.request.method 91 | cloudflare_kwargs["allow_redirects"] = False 92 | redirect = self.request(method, submit_url, **cloudflare_kwargs) 93 | return self.request(method, redirect.headers["Location"], **original_kwargs) 94 | 95 | @classmethod 96 | def create_scraper(cls, sess=None, **kwargs): 97 | """ 98 | Convenience function for creating a ready-to-go CloudflareScraper object. 99 | """ 100 | scraper = cls(**kwargs) 101 | 102 | if sess: 103 | attrs = ["auth", "cert", "cookies", "headers", "hooks", "params", "proxies", "data"] 104 | for attr in attrs: 105 | val = getattr(sess, attr, None) 106 | if val: 107 | setattr(scraper, attr, val) 108 | 109 | return scraper 110 | 111 | 112 | ## Functions for integrating cloudflare-scrape with other applications and scripts 113 | 114 | @classmethod 115 | def get_tokens(cls, url, user_agent=None, **kwargs): 116 | scraper = cls.create_scraper() 117 | if user_agent: 118 | scraper.headers["User-Agent"] = user_agent 119 | 120 | try: 121 | resp = scraper.get(url, **kwargs) 122 | resp.raise_for_status() 123 | except Exception as e: 124 | logging.error("'%s' returned an error. Could not collect tokens." % url) 125 | raise 126 | 127 | domain = urlparse(resp.url).netloc 128 | cookie_domain = None 129 | 130 | for d in scraper.cookies.list_domains(): 131 | if d.startswith(".") and d in ("." + domain): 132 | cookie_domain = d 133 | break 134 | else: 135 | raise ValueError("Unable to find Cloudflare cookies. Does the site actually have Cloudflare IUAM (\"I'm Under Attack Mode\") enabled?") 136 | 137 | return ({ 138 | "__cfduid": scraper.cookies.get("__cfduid", "", domain=cookie_domain), 139 | "cf_clearance": scraper.cookies.get("cf_clearance", "", domain=cookie_domain) 140 | }, 141 | scraper.headers["User-Agent"] 142 | ) 143 | 144 | @classmethod 145 | def get_cookie_string(cls, url, user_agent=None, **kwargs): 146 | """ 147 | Convenience function for building a Cookie HTTP header value. 148 | """ 149 | tokens, user_agent = cls.get_tokens(url, user_agent=user_agent, **kwargs) 150 | return "; ".join("=".join(pair) for pair in tokens.items()), user_agent 151 | 152 | create_scraper = CloudflareScraper.create_scraper 153 | get_tokens = CloudflareScraper.get_tokens 154 | get_cookie_string = CloudflareScraper.get_cookie_string 155 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/changelog.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright (C) 2013 Sean Poyser (seanpoyser@gmail.com) 3 | 4 | This program is free software: you can redistribute it and/or modify 5 | it under the terms of the GNU General Public License as published by 6 | the Free Software Foundation, either version 3 of the License, or 7 | (at your option) any later version. 8 | 9 | This program is distributed in the hope that it will be useful, 10 | but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | GNU General Public License for more details. 13 | 14 | You should have received a copy of the GNU General Public License 15 | along with this program. If not, see . 16 | ''' 17 | 18 | 19 | def get(version): 20 | try: 21 | import xbmc,xbmcgui,xbmcaddon,xbmcvfs 22 | 23 | f = xbmcvfs.File(xbmcaddon.Addon().getAddonInfo('changelog')) 24 | text = f.read() ; f.close() 25 | 26 | label = '%s - %s' % (xbmc.getLocalizedString(24054), xbmcaddon.Addon().getAddonInfo('name')) 27 | 28 | id = 10147 29 | 30 | xbmc.executebuiltin('ActivateWindow(%d)' % id) 31 | xbmc.sleep(100) 32 | 33 | win = xbmcgui.Window(id) 34 | 35 | retry = 50 36 | while (retry > 0): 37 | try: 38 | xbmc.sleep(10) 39 | win.getControl(1).setLabel(label) 40 | win.getControl(5).setText(text) 41 | retry = 0 42 | except: 43 | retry -= 1 44 | 45 | return '1' 46 | except: 47 | return '1' 48 | 49 | 50 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/checker.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | import re,requests,HTMLParser 4 | 5 | def name_clean(name): 6 | name = HTMLParser.HTMLParser().unescape(name) 7 | name = name.replace('"', '\"') 8 | name = name.replace('&', '&') 9 | name = name.strip() 10 | return name 11 | 12 | def check_quality(quality): 13 | try: 14 | quality=quality.lower().replace('p','').replace('-',' ') 15 | if 'http' in quality: 16 | if '1080' in quality: 17 | quality = '1080p' 18 | elif '720' in quality: 19 | quality = '720p' 20 | else: 21 | quality = 'SD' 22 | else: 23 | if '1080' in quality: 24 | quality = '1080p' 25 | elif '720' in quality: 26 | quality = '720p' 27 | elif 'hd' in quality: 28 | quality = '720p' 29 | elif 'blu' in quality: 30 | quality = '720p' 31 | elif 'bd' in quality: 32 | quality = '720p' 33 | elif 'br' in quality: 34 | quality = '720p' 35 | elif 'dvd' in quality: 36 | quality = '720p' 37 | else: 38 | quality = 'SD' 39 | 40 | return quality 41 | except: 42 | return 'SD' 43 | 44 | def check_site(host): 45 | try: 46 | Resolve = [ 47 | 'openload', 48 | 'oload', 49 | 'streamango', 50 | 'downace', 51 | 'rapidvideo', 52 | 'vidoza', 53 | 'clicknupload', 54 | 'estream', 55 | 'vidnode', 56 | 'vidzi', 57 | 'putload', 58 | 'blazefile', 59 | 'gorillavid', 60 | 'yourupload', 61 | 'entervideo', 62 | 'youtube', 63 | 'youtu', 64 | 'vimeo', 65 | 'vk', 66 | 'streamcherry', 67 | 'mp4upload', 68 | 'trollvid', 69 | 'vidstreaming', 70 | 'dailymotion', 71 | 'uptostream', 72 | 'uptobox', 73 | 'vidcloud', 74 | 'vcstream', 75 | 'vidto', 76 | 'flashx', 77 | 'thevideo', 78 | 'vshare', 79 | 'vidup' 80 | ] 81 | 82 | Debrid = [ 83 | '1fichier', 84 | 'rapidgator', 85 | 'userscloud', 86 | 'vidlox', 87 | 'filefactory', 88 | 'turbobit', 89 | 'nitroflare' 90 | ] 91 | 92 | if host in Resolve: 93 | return host+'Resolve' 94 | elif host in Debrid: 95 | return host+'Debrid' 96 | 97 | return host 98 | except: 99 | return 100 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/cleandate.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | ''' 4 | Covenant Add-on 5 | 6 | This program is free software: you can redistribute it and/or modify 7 | it under the terms of the GNU General Public License as published by 8 | the Free Software Foundation, either version 3 of the License, or 9 | (at your option) any later version. 10 | 11 | This program is distributed in the hope that it will be useful, 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | GNU General Public License for more details. 15 | 16 | You should have received a copy of the GNU General Public License 17 | along with this program. If not, see . 18 | ''' 19 | 20 | 21 | import time,datetime 22 | 23 | 24 | def iso_2_utc(iso_ts): 25 | if not iso_ts or iso_ts is None: return 0 26 | delim = -1 27 | if not iso_ts.endswith('Z'): 28 | delim = iso_ts.rfind('+') 29 | if delim == -1: delim = iso_ts.rfind('-') 30 | 31 | if delim > -1: 32 | ts = iso_ts[:delim] 33 | sign = iso_ts[delim] 34 | tz = iso_ts[delim + 1:] 35 | else: 36 | ts = iso_ts 37 | tz = None 38 | 39 | if ts.find('.') > -1: 40 | ts = ts[:ts.find('.')] 41 | 42 | try: d = datetime.datetime.strptime(ts, '%Y-%m-%dT%H:%M:%S') 43 | except TypeError: d = datetime.datetime(*(time.strptime(ts, '%Y-%m-%dT%H:%M:%S')[0:6])) 44 | 45 | dif = datetime.timedelta() 46 | if tz: 47 | hours, minutes = tz.split(':') 48 | hours = int(hours) 49 | minutes = int(minutes) 50 | if sign == '-': 51 | hours = -hours 52 | minutes = -minutes 53 | dif = datetime.timedelta(minutes=minutes, hours=hours) 54 | utc_dt = d - dif 55 | epoch = datetime.datetime.utcfromtimestamp(0) 56 | delta = utc_dt - epoch 57 | try: seconds = delta.total_seconds() # works only on 2.7 58 | except: seconds = delta.seconds + delta.days * 24 * 3600 # close enough 59 | return seconds 60 | 61 | 62 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/cleantitle.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | Covenant Add-on 5 | 6 | This program is free software: you can redistribute it and/or modify 7 | it under the terms of the GNU General Public License as published by 8 | the Free Software Foundation, either version 3 of the License, or 9 | (at your option) any later version. 10 | 11 | This program is distributed in the hope that it will be useful, 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | GNU General Public License for more details. 15 | 16 | You should have received a copy of the GNU General Public License 17 | along with this program. If not, see . 18 | """ 19 | 20 | import re 21 | import unicodedata 22 | 23 | 24 | def get(title): 25 | if title is None: return 26 | try: 27 | title = title.encode('utf-8') 28 | except: 29 | pass 30 | title = re.sub('&#(\d+);', '', title) 31 | title = re.sub('(&#[0-9]+)([^;^0-9]+)', '\\1;\\2', title) 32 | title = title.replace('"', '\"').replace('&', '&') 33 | title = re.sub('\n|([[].+?[]])|([(].+?[)])|\s(vs|v[.])\s|(:|;|-|–|"|,|\'|\_|\.|\?)|\s', '', title).lower() 34 | return title 35 | 36 | 37 | def geturl(title): 38 | if title is None: return 39 | title = title.lower() 40 | title = title.translate(None, ':*?"\'\.<>|&!,') 41 | title = title.replace('/', '-') 42 | title = title.replace(' ', '-') 43 | title = title.replace('--', '-') 44 | return title 45 | 46 | 47 | def get_simple(title): 48 | if title is None: return 49 | title = title.lower() 50 | title = re.sub('(\d{4})', '', title) 51 | title = re.sub('&#(\d+);', '', title) 52 | title = re.sub('(&#[0-9]+)([^;^0-9]+)', '\\1;\\2', title) 53 | title = title.replace('"', '\"').replace('&', '&') 54 | title = re.sub('\n|\(|\)|\[|\]|\{|\}|\s(vs|v[.])\s|(:|;|-|–|"|,|\'|\_|\.|\?)|\s', '', title).lower() 55 | return title 56 | 57 | 58 | def getsearch(title): 59 | if title is None: return 60 | title = title.lower() 61 | title = re.sub('&#(\d+);', '', title) 62 | title = re.sub('(&#[0-9]+)([^;^0-9]+)', '\\1;\\2', title) 63 | title = title.replace('"', '\"').replace('&', '&') 64 | title = re.sub('\\\|/|-|–|:|;|\*|\?|"|\'|<|>|\|', '', title).lower() 65 | return title 66 | 67 | 68 | def query(title): 69 | if title is None: return 70 | title = title.replace('\'', '').rsplit(':', 1)[0].rsplit(' -', 1)[0].replace('-', ' ') 71 | return title 72 | 73 | 74 | def get_query(title): 75 | if title is None: return 76 | title = title.replace(' ', '.').replace(':', '').replace('.-.', '.').replace('\'', '') 77 | return title 78 | def normalize(title): 79 | 80 | try: 81 | try: return title.decode('ascii').encode("utf-8") 82 | except: pass 83 | 84 | return str(''.join(c for c in unicodedata.normalize('NFKD', unicode(title.decode('utf-8'))) if unicodedata.category(c) != 'Mn')) 85 | except: 86 | return title 87 | 88 | 89 | def clean_search_query(url): 90 | url = url.replace('-','+') 91 | url = url.replace(' ', '+') 92 | return url 93 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/control.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | //Covenant Add-on// 5 | Updated for Exodus Redux Add-on 6 | 7 | This program is free software: you can redistribute it and/or modify 8 | it under the terms of the GNU General Public License as published by 9 | the Free Software Foundation, either version 3 of the License, or 10 | (at your option) any later version. 11 | 12 | This program is distributed in the hope that it will be useful, 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | GNU General Public License for more details. 16 | 17 | You should have received a copy of the GNU General Public License 18 | along with this program. If not, see . 19 | """ 20 | 21 | 22 | import os 23 | import sys 24 | import urllib 25 | import urlparse 26 | 27 | import xbmc 28 | import xbmcaddon 29 | import xbmcgui 30 | import xbmcplugin 31 | import xbmcvfs 32 | 33 | integer = 1000 34 | 35 | lang = xbmcaddon.Addon().getLocalizedString 36 | 37 | lang2 = xbmc.getLocalizedString 38 | 39 | setting = xbmcaddon.Addon().getSetting 40 | 41 | setSetting = xbmcaddon.Addon().setSetting 42 | 43 | addon = xbmcaddon.Addon 44 | 45 | addItem = xbmcplugin.addDirectoryItem 46 | 47 | item = xbmcgui.ListItem 48 | 49 | directory = xbmcplugin.endOfDirectory 50 | 51 | content = xbmcplugin.setContent 52 | 53 | property = xbmcplugin.setProperty 54 | 55 | addonInfo = xbmcaddon.Addon().getAddonInfo 56 | 57 | infoLabel = xbmc.getInfoLabel 58 | 59 | condVisibility = xbmc.getCondVisibility 60 | 61 | jsonrpc = xbmc.executeJSONRPC 62 | 63 | window = xbmcgui.Window(10000) 64 | 65 | dialog = xbmcgui.Dialog() 66 | 67 | progressDialog = xbmcgui.DialogProgress() 68 | 69 | progressDialogBG = xbmcgui.DialogProgressBG() 70 | 71 | windowDialog = xbmcgui.WindowDialog() 72 | 73 | button = xbmcgui.ControlButton 74 | 75 | image = xbmcgui.ControlImage 76 | 77 | getCurrentDialogId = xbmcgui.getCurrentWindowDialogId() 78 | 79 | keyboard = xbmc.Keyboard 80 | 81 | # Modified `sleep` command that honors a user exit request 82 | def sleep (time): 83 | while time > 0 and not xbmc.abortRequested: 84 | xbmc.sleep(min(100, time)) 85 | time = time - 100 86 | 87 | 88 | execute = xbmc.executebuiltin 89 | 90 | skin = xbmc.getSkinDir() 91 | 92 | player = xbmc.Player() 93 | 94 | playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO) 95 | 96 | resolve = xbmcplugin.setResolvedUrl 97 | 98 | openFile = xbmcvfs.File 99 | 100 | makeFile = xbmcvfs.mkdir 101 | 102 | deleteFile = xbmcvfs.delete 103 | 104 | deleteDir = xbmcvfs.rmdir 105 | 106 | listDir = xbmcvfs.listdir 107 | 108 | transPath = xbmc.translatePath 109 | 110 | skinPath = xbmc.translatePath('special://skin/') 111 | 112 | addonPath = xbmc.translatePath(addonInfo('path')) 113 | 114 | dataPath = xbmc.translatePath(addonInfo('profile')).decode('utf-8') 115 | 116 | settingsFile = os.path.join(dataPath, 'settings.xml') 117 | 118 | viewsFile = os.path.join(dataPath, 'views.db') 119 | 120 | bookmarksFile = os.path.join(dataPath, 'bookmarks.db') 121 | 122 | providercacheFile = os.path.join(dataPath, 'providers.13.db') 123 | 124 | metacacheFile = os.path.join(dataPath, 'meta.5.db') 125 | 126 | searchFile = os.path.join(dataPath, 'search.1.db') 127 | 128 | libcacheFile = os.path.join(dataPath, 'library.db') 129 | 130 | cacheFile = os.path.join(dataPath, 'cache.db') 131 | 132 | key = "RgUkXp2s5v8x/A?D(G+KbPeShVmYq3t6" 133 | 134 | iv = "p2s5v8y/B?E(H+Mb" 135 | 136 | def addonIcon(): 137 | theme = appearance() ; art = artPath() 138 | if not (art == None and theme in ['-', '']): return os.path.join(art, 'icon.png') 139 | return addonInfo('icon') 140 | 141 | 142 | def addonThumb(): 143 | theme = appearance() ; art = artPath() 144 | if not (art == None and theme in ['-', '']): return os.path.join(art, 'poster.png') 145 | elif theme == '-': return 'DefaultFolder.png' 146 | return addonInfo('icon') 147 | 148 | 149 | def addonPoster(): 150 | theme = appearance() ; art = artPath() 151 | if not (art == None and theme in ['-', '']): return os.path.join(art, 'poster.png') 152 | return 'DefaultVideo.png' 153 | 154 | 155 | def addonBanner(): 156 | theme = appearance() ; art = artPath() 157 | if not (art == None and theme in ['-', '']): return os.path.join(art, 'banner.png') 158 | return 'DefaultVideo.png' 159 | 160 | 161 | def addonFanart(): 162 | theme = appearance() ; art = artPath() 163 | if not (art == None and theme in ['-', '']): return os.path.join(art, 'fanart.jpg') 164 | return addonInfo('fanart') 165 | 166 | 167 | def addonNext(): 168 | theme = appearance() ; art = artPath() 169 | if not (art == None and theme in ['-', '']): return os.path.join(art, 'next.png') 170 | return 'DefaultVideo.png' 171 | 172 | 173 | def addonId(): 174 | return addonInfo('id') 175 | 176 | 177 | def addonName(): 178 | return addonInfo('name') 179 | 180 | 181 | def get_plugin_url(queries): 182 | try: 183 | query = urllib.urlencode(queries) 184 | except UnicodeEncodeError: 185 | for k in queries: 186 | if isinstance(queries[k], unicode): 187 | queries[k] = queries[k].encode('utf-8') 188 | query = urllib.urlencode(queries) 189 | addon_id = sys.argv[0] 190 | if not addon_id: addon_id = addonId() 191 | return addon_id + '?' + query 192 | 193 | 194 | def artPath(): 195 | theme = appearance() 196 | if theme in ['-', '']: return 197 | elif condVisibility('System.HasAddon(script.exodusredux.artwork)'): 198 | return os.path.join(xbmcaddon.Addon('script.exodusredux.artwork').getAddonInfo('path'), 'resources', 'media', theme) 199 | 200 | 201 | def appearance(): 202 | appearance = setting('appearance.1').lower() if condVisibility('System.HasAddon(script.exodusredux.artwork)') else setting('appearance.alt').lower() 203 | return appearance 204 | 205 | 206 | def artwork(): 207 | execute('RunPlugin(plugin://script.exodusredux.artwork)') 208 | 209 | 210 | def infoDialog(message, heading=addonInfo('name'), icon='', time=3000, sound=False): 211 | if icon == '': icon = addonIcon() 212 | elif icon == 'INFO': icon = xbmcgui.NOTIFICATION_INFO 213 | elif icon == 'WARNING': icon = xbmcgui.NOTIFICATION_WARNING 214 | elif icon == 'ERROR': icon = xbmcgui.NOTIFICATION_ERROR 215 | dialog.notification(heading, message, icon, time, sound=sound) 216 | 217 | 218 | def yesnoDialog(line1, line2, line3, heading=addonInfo('name'), nolabel='', yeslabel=''): 219 | return dialog.yesno(heading, line1, line2, line3, nolabel, yeslabel) 220 | 221 | 222 | def selectDialog(list, heading=addonInfo('name')): 223 | return dialog.select(heading, list) 224 | 225 | 226 | def metaFile(): 227 | if condVisibility('System.HasAddon(script.exodusredux.metadata)'): 228 | return os.path.join(xbmcaddon.Addon('script.exodusredux.metadata').getAddonInfo('path'), 'resources', 'data', 'meta.db') 229 | 230 | 231 | def apiLanguage(ret_name=None): 232 | langDict = {'Bulgarian': 'bg', 'Chinese': 'zh', 'Croatian': 'hr', 'Czech': 'cs', 'Danish': 'da', 'Dutch': 'nl', 'English': 'en', 'Finnish': 'fi', 'French': 'fr', 'German': 'de', 'Greek': 'el', 'Hebrew': 'he', 'Hungarian': 'hu', 'Italian': 'it', 'Japanese': 'ja', 'Korean': 'ko', 'Norwegian': 'no', 'Polish': 'pl', 'Portuguese': 'pt', 'Romanian': 'ro', 'Russian': 'ru', 'Serbian': 'sr', 'Slovak': 'sk', 'Slovenian': 'sl', 'Spanish': 'es', 'Swedish': 'sv', 'Thai': 'th', 'Turkish': 'tr', 'Ukrainian': 'uk'} 233 | 234 | trakt = ['bg','cs','da','de','el','en','es','fi','fr','he','hr','hu','it','ja','ko','nl','no','pl','pt','ro','ru','sk','sl','sr','sv','th','tr','uk','zh'] 235 | tvdb = ['en','sv','no','da','fi','nl','de','it','es','fr','pl','hu','el','tr','ru','he','ja','pt','zh','cs','sl','hr','ko'] 236 | youtube = ['gv', 'gu', 'gd', 'ga', 'gn', 'gl', 'ty', 'tw', 'tt', 'tr', 'ts', 'tn', 'to', 'tl', 'tk', 'th', 'ti', 'tg', 'te', 'ta', 'de', 'da', 'dz', 'dv', 'qu', 'zh', 'za', 'zu', 'wa', 'wo', 'jv', 'ja', 'ch', 'co', 'ca', 'ce', 'cy', 'cs', 'cr', 'cv', 'cu', 'ps', 'pt', 'pa', 'pi', 'pl', 'mg', 'ml', 'mn', 'mi', 'mh', 'mk', 'mt', 'ms', 'mr', 'my', 've', 'vi', 'is', 'iu', 'it', 'vo', 'ii', 'ik', 'io', 'ia', 'ie', 'id', 'ig', 'fr', 'fy', 'fa', 'ff', 'fi', 'fj', 'fo', 'ss', 'sr', 'sq', 'sw', 'sv', 'su', 'st', 'sk', 'si', 'so', 'sn', 'sm', 'sl', 'sc', 'sa', 'sg', 'se', 'sd', 'lg', 'lb', 'la', 'ln', 'lo', 'li', 'lv', 'lt', 'lu', 'yi', 'yo', 'el', 'eo', 'en', 'ee', 'eu', 'et', 'es', 'ru', 'rw', 'rm', 'rn', 'ro', 'be', 'bg', 'ba', 'bm', 'bn', 'bo', 'bh', 'bi', 'br', 'bs', 'om', 'oj', 'oc', 'os', 'or', 'xh', 'hz', 'hy', 'hr', 'ht', 'hu', 'hi', 'ho', 'ha', 'he', 'uz', 'ur', 'uk', 'ug', 'aa', 'ab', 'ae', 'af', 'ak', 'am', 'an', 'as', 'ar', 'av', 'ay', 'az', 'nl', 'nn', 'no', 'na', 'nb', 'nd', 'ne', 'ng', 'ny', 'nr', 'nv', 'ka', 'kg', 'kk', 'kj', 'ki', 'ko', 'kn', 'km', 'kl', 'ks', 'kr', 'kw', 'kv', 'ku', 'ky'] 237 | 238 | name = None 239 | name = setting('api.language') 240 | if not name: name = 'AUTO' 241 | 242 | if name[-1].isupper(): 243 | try: name = xbmc.getLanguage(xbmc.ENGLISH_NAME).split(' ')[0] 244 | except: pass 245 | try: name = langDict[name] 246 | except: name = 'en' 247 | lang = {'trakt': name} if name in trakt else {'trakt': 'en'} 248 | lang['tvdb'] = name if name in tvdb else 'en' 249 | lang['youtube'] = name if name in youtube else 'en' 250 | 251 | if ret_name: 252 | lang['trakt'] = [i[0] for i in langDict.iteritems() if i[1] == lang['trakt']][0] 253 | lang['tvdb'] = [i[0] for i in langDict.iteritems() if i[1] == lang['tvdb']][0] 254 | lang['youtube'] = [i[0] for i in langDict.iteritems() if i[1] == lang['youtube']][0] 255 | 256 | return lang 257 | 258 | 259 | def version(): 260 | num = '' 261 | try: version = addon('xbmc.addon').getAddonInfo('version') 262 | except: version = '999' 263 | for i in version: 264 | if i.isdigit(): num += i 265 | else: break 266 | return int(num) 267 | 268 | 269 | def cdnImport(uri, name): 270 | import imp 271 | from resources.lib.modules import client 272 | 273 | path = os.path.join(dataPath, 'py' + name) 274 | path = path.decode('utf-8') 275 | 276 | deleteDir(os.path.join(path, ''), force=True) 277 | makeFile(dataPath) ; makeFile(path) 278 | 279 | r = client.request(uri) 280 | p = os.path.join(path, name + '.py') 281 | f = openFile(p, 'w') ; f.write(r) ; f.close() 282 | m = imp.load_source(name, p) 283 | 284 | deleteDir(os.path.join(path, ''), force=True) 285 | return m 286 | 287 | 288 | def openSettings(query=None, id=addonInfo('id')): 289 | try: 290 | idle() 291 | execute('Addon.OpenSettings(%s)' % id) 292 | if query == None: raise Exception() 293 | c, f = query.split('.') 294 | execute('SetFocus(%i)' % (int(c) + 100)) 295 | execute('SetFocus(%i)' % (int(f) + 200)) 296 | except: 297 | return 298 | 299 | 300 | def getCurrentViewId(): 301 | win = xbmcgui.Window(xbmcgui.getCurrentWindowId()) 302 | return str(win.getFocusId()) 303 | 304 | 305 | def refresh(): 306 | return execute('Container.Refresh') 307 | 308 | def busy(): 309 | return execute('ActivateWindow(busydialog)') 310 | 311 | def idle(): 312 | return execute('Dialog.Close(busydialog)') 313 | 314 | 315 | def queueItem(): 316 | return execute('Action(Queue)') 317 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/debrid.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | Covenant Add-on 5 | 6 | This program is free software: you can redistribute it and/or modify 7 | it under the terms of the GNU General Public License as published by 8 | the Free Software Foundation, either version 3 of the License, or 9 | (at your option) any later version. 10 | 11 | This program is distributed in the hope that it will be useful, 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | GNU General Public License for more details. 15 | 16 | You should have received a copy of the GNU General Public License 17 | along with this program. If not, see . 18 | """ 19 | 20 | from resources.lib.modules import control, log_utils 21 | 22 | try: 23 | import resolveurl 24 | 25 | debrid_resolvers = [resolver() for resolver in resolveurl.relevant_resolvers(order_matters=True) if resolver.isUniversal()] 26 | 27 | if len(debrid_resolvers) == 0: 28 | # Support Rapidgator accounts! Unfortunately, `sources.py` assumes that rapidgator.net is only ever 29 | # accessed via a debrid service, so we add rapidgator as a debrid resolver and everything just works. 30 | # As a bonus(?), rapidgator links will be highlighted just like actual debrid links 31 | debrid_resolvers = [resolver() for resolver in resolveurl.relevant_resolvers(order_matters=True,include_universal=False) if 'rapidgator.net' in resolver.domains] 32 | 33 | except: 34 | debrid_resolvers = [] 35 | 36 | 37 | def status(torrent=False): 38 | debrid_check = debrid_resolvers != [] 39 | if debrid_check is True: 40 | if torrent: 41 | enabled = control.setting('torrent.enabled') 42 | if enabled == '' or enabled.lower() == 'true': 43 | return True 44 | else: 45 | return False 46 | return debrid_check 47 | 48 | 49 | def resolver(url, debrid): 50 | try: 51 | debrid_resolver = [resolver for resolver in debrid_resolvers if resolver.name == debrid][0] 52 | 53 | debrid_resolver.login() 54 | _host, _media_id = debrid_resolver.get_host_and_id(url) 55 | stream_url = debrid_resolver.get_media_url(_host, _media_id) 56 | 57 | return stream_url 58 | except Exception as e: 59 | log_utils.log('%s Resolve Failure: %s' % (debrid, e), log_utils.LOGWARNING) 60 | return None 61 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/directstream.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | Covenant Add-on 5 | 6 | This program is free software: you can redistribute it and/or modify 7 | it under the terms of the GNU General Public License as published by 8 | the Free Software Foundation, either version 3 of the License, or 9 | (at your option) any later version. 10 | 11 | This program is distributed in the hope that it will be useful, 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | GNU General Public License for more details. 15 | 16 | You should have received a copy of the GNU General Public License 17 | along with this program. If not, see . 18 | """ 19 | 20 | import re, os, urllib, urlparse, json, binascii 21 | from resources.lib.modules import client 22 | 23 | 24 | def google(url): 25 | try: 26 | if any(x in url for x in ['youtube.', 'docid=']): url = 'https://drive.google.com/file/d/%s/view' % re.compile('docid=([\w-]+)').findall(url)[0] 27 | 28 | netloc = urlparse.urlparse(url.strip().lower()).netloc 29 | netloc = netloc.split('.google')[0] 30 | 31 | if netloc == 'docs' or netloc == 'drive': 32 | url = url.split('/preview', 1)[0] 33 | url = url.replace('drive.google.com', 'docs.google.com') 34 | 35 | headers = {'User-Agent': client.agent()} 36 | 37 | result = client.request(url, output='extended', headers=headers) 38 | 39 | try: 40 | headers['Cookie'] = result[2]['Set-Cookie'] 41 | except: 42 | pass 43 | 44 | result = result[0] 45 | 46 | if netloc == 'docs' or netloc == 'drive': 47 | result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0] 48 | result = json.loads(result) 49 | result = [i.split('|')[-1] for i in result.split(',')] 50 | result = sum([googletag(i, append_height=True) for i in result], []) 51 | 52 | 53 | elif netloc == 'photos': 54 | result = result.replace('\r', '').replace('\n', '').replace('\t', '') 55 | result = re.compile('"\d*/\d*x\d*.+?","(.+?)"').findall(result)[0] 56 | 57 | result = result.replace('\\u003d', '=').replace('\\u0026', '&') 58 | result = re.compile('url=(.+?)&').findall(result) 59 | result = [urllib.unquote(i) for i in result] 60 | 61 | result = sum([googletag(i, append_height=True) for i in result], []) 62 | 63 | 64 | elif netloc == 'picasaweb': 65 | id = re.compile('#(\d*)').findall(url)[0] 66 | 67 | result = re.search('feedPreload:\s*(.*}]}})},', result, re.DOTALL).group(1) 68 | result = json.loads(result)['feed']['entry'] 69 | 70 | if len(result) > 1: 71 | result = [i for i in result if str(id) in i['link'][0]['href']][0] 72 | elif len(result) == 1: 73 | result = result[0] 74 | 75 | result = result['media']['content'] 76 | result = [i['url'] for i in result if 'video' in i['type']] 77 | result = sum([googletag(i, append_height=True) for i in result], []) 78 | 79 | 80 | elif netloc == 'plus': 81 | id = (urlparse.urlparse(url).path).split('/')[-1] 82 | 83 | result = result.replace('\r', '').replace('\n', '').replace('\t', '') 84 | result = result.split('"%s"' % id)[-1].split(']]')[0] 85 | 86 | result = result.replace('\\u003d', '=').replace('\\u0026', '&') 87 | result = re.compile('url=(.+?)&').findall(result) 88 | result = [urllib.unquote(i) for i in result] 89 | 90 | result = sum([googletag(i, append_height=True) for i in result], []) 91 | 92 | result = sorted(result, key=lambda i: i.get('height', 0), reverse=True) 93 | 94 | url = [] 95 | for q in ['4K', '1440p', '1080p', 'HD', 'SD']: 96 | try: 97 | url += [[i for i in result if i.get('quality') == q][0]] 98 | except: 99 | pass 100 | 101 | for i in url: 102 | i.pop('height', None) 103 | i.update({'url': i['url'] + '|%s' % urllib.urlencode(headers)}) 104 | 105 | if not url: return 106 | return url 107 | except: 108 | return 109 | 110 | 111 | def googletag(url, append_height=False): 112 | quality = re.compile('itag=(\d*)').findall(url) 113 | quality += re.compile('=m(\d*)$').findall(url) 114 | try: 115 | quality = quality[0] 116 | except: 117 | return [] 118 | 119 | itag_map = {'151': {'quality': 'SD', 'height': 72}, '212': {'quality': 'SD', 'height': 480}, '313': {'quality': '4K', 'height': 2160}, 120 | '242': {'quality': 'SD', 'height': 240}, '315': {'quality': '4K', 'height': 2160}, '219': {'quality': 'SD', 'height': 480}, 121 | '133': {'quality': 'SD', 'height': 240}, '271': {'quality': '1440p', 'height': 1440}, '272': {'quality': '4K', 'height': 2160}, 122 | '137': {'quality': '1080p', 'height': 1080}, '136': {'quality': 'HD', 'height': 720}, '135': {'quality': 'SD', 'height': 480}, 123 | '134': {'quality': 'SD', 'height': 360}, '82': {'quality': 'SD', 'height': 360}, '83': {'quality': 'SD', 'height': 480}, 124 | '218': {'quality': 'SD', 'height': 480}, '93': {'quality': 'SD', 'height': 360}, '84': {'quality': 'HD', 'height': 720}, 125 | '170': {'quality': '1080p', 'height': 1080}, '167': {'quality': 'SD', 'height': 360}, '22': {'quality': 'HD', 'height': 720}, 126 | '46': {'quality': '1080p', 'height': 1080}, '160': {'quality': 'SD', 'height': 144}, '44': {'quality': 'SD', 'height': 480}, 127 | '45': {'quality': 'HD', 'height': 720}, '43': {'quality': 'SD', 'height': 360}, '94': {'quality': 'SD', 'height': 480}, 128 | '5': {'quality': 'SD', 'height': 240}, '6': {'quality': 'SD', 'height': 270}, '92': {'quality': 'SD', 'height': 240}, 129 | '85': {'quality': '1080p', 'height': 1080}, '308': {'quality': '1440p', 'height': 1440}, '278': {'quality': 'SD', 'height': 144}, 130 | '78': {'quality': 'SD', 'height': 480}, '302': {'quality': 'HD', 'height': 720}, '303': {'quality': '1080p', 'height': 1080}, 131 | '245': {'quality': 'SD', 'height': 480}, '244': {'quality': 'SD', 'height': 480}, '247': {'quality': 'HD', 'height': 720}, 132 | '246': {'quality': 'SD', 'height': 480}, '168': {'quality': 'SD', 'height': 480}, '266': {'quality': '4K', 'height': 2160}, 133 | '243': {'quality': 'SD', 'height': 360}, '264': {'quality': '1440p', 'height': 1440}, '102': {'quality': 'HD', 'height': 720}, 134 | '100': {'quality': 'SD', 'height': 360}, '101': {'quality': 'SD', 'height': 480}, '95': {'quality': 'HD', 'height': 720}, 135 | '248': {'quality': '1080p', 'height': 1080}, '96': {'quality': '1080p', 'height': 1080}, '91': {'quality': 'SD', 'height': 144}, 136 | '38': {'quality': '4K', 'height': 3072}, '59': {'quality': 'SD', 'height': 480}, '17': {'quality': 'SD', 'height': 144}, 137 | '132': {'quality': 'SD', 'height': 240}, '18': {'quality': 'SD', 'height': 360}, '37': {'quality': '1080p', 'height': 1080}, 138 | '35': {'quality': 'SD', 'height': 480}, '34': {'quality': 'SD', 'height': 360}, '298': {'quality': 'HD', 'height': 720}, 139 | '299': {'quality': '1080p', 'height': 1080}, '169': {'quality': 'HD', 'height': 720}} 140 | 141 | if quality in itag_map: 142 | quality = itag_map[quality] 143 | if append_height: 144 | return [{'quality': quality['quality'], 'height': quality['height'], 'url': url}] 145 | else: 146 | return [{'quality': quality['quality'], 'url': url}] 147 | else: 148 | return [] 149 | 150 | def googlepass(url): 151 | try: 152 | try: 153 | headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1])) 154 | except: 155 | headers = None 156 | url = url.split('|')[0].replace('\\', '') 157 | url = client.request(url, headers=headers, output='geturl') 158 | if 'requiressl=yes' in url: 159 | url = url.replace('http://', 'https://') 160 | else: 161 | url = url.replace('https://', 'http://') 162 | if headers: url += '|%s' % urllib.urlencode(headers) 163 | return url 164 | except: 165 | return 166 | 167 | 168 | def vk(url): 169 | try: 170 | query = urlparse.parse_qs(urlparse.urlparse(url).query) 171 | 172 | try: 173 | oid, video_id = query['oid'][0], query['id'][0] 174 | except: 175 | oid, video_id = re.findall('\/video(.*)_(.*)', url)[0] 176 | 177 | sources_url = 'http://vk.com/al_video.php?act=show_inline&al=1&video=%s_%s' % (oid, video_id) 178 | html = client.request(sources_url) 179 | html = re.sub(r'[^\x00-\x7F]+', ' ', html) 180 | 181 | sources = re.findall('(\d+)x\d+.+?(http.+?\.m3u8.+?)n', html) 182 | 183 | if not sources: 184 | sources = re.findall('"url(\d+)"\s*:\s*"(.+?)"', html) 185 | 186 | sources = [(i[0], i[1].replace('\\', '')) for i in sources] 187 | sources = dict(sources) 188 | 189 | url = [] 190 | try: 191 | url += [{'quality': 'HD', 'url': sources['720']}] 192 | except: 193 | pass 194 | try: 195 | url += [{'quality': 'SD', 'url': sources['540']}] 196 | except: 197 | pass 198 | try: 199 | url += [{'quality': 'SD', 'url': sources['480']}] 200 | except: 201 | pass 202 | if not url == []: return url 203 | try: 204 | url += [{'quality': 'SD', 'url': sources['360']}] 205 | except: 206 | pass 207 | if not url == []: return url 208 | try: 209 | url += [{'quality': 'SD', 'url': sources['240']}] 210 | except: 211 | pass 212 | if not url == []: return url 213 | except: 214 | return 215 | 216 | 217 | def odnoklassniki(url): 218 | try: 219 | media_id = re.compile('//.+?/.+?/([\w]+)').findall(url)[0] 220 | 221 | result = client.request('http://ok.ru/dk', post={'cmd': 'videoPlayerMetadata', 'mid': media_id}) 222 | result = re.sub(r'[^\x00-\x7F]+', ' ', result) 223 | result = json.loads(result).get('videos', []) 224 | 225 | hd = [] 226 | for name, quali in {'ultra': '4K', 'quad': '1440p', 'full': '1080p', 'hd': 'HD'}.items(): 227 | hd += [{'quality': quali, 'url': i.get('url')} for i in result if i.get('name').lower() == name] 228 | 229 | sd = [] 230 | for name, quali in {'sd': 'SD', 'low': 'SD', 'lowest': 'SD', 'mobile': 'SD'}.items(): 231 | sd += [{'quality': quali, 'url': i.get('url')} for i in result if i.get('name').lower() == name] 232 | 233 | url = hd + sd[:1] 234 | if not url == []: return url 235 | except: 236 | return 237 | 238 | 239 | def cldmailru(url): 240 | try: 241 | v = url.split('public')[-1] 242 | 243 | r = client.request(url) 244 | r = re.sub(r'[^\x00-\x7F]+', ' ', r) 245 | 246 | tok = re.findall('"tokens"\s*:\s*{\s*"download"\s*:\s*"([^"]+)', r)[0] 247 | 248 | url = re.findall('"weblink_get"\s*:\s*\[.+?"url"\s*:\s*"([^"]+)', r)[0] 249 | 250 | url = '%s%s?key=%s' % (url, v, tok) 251 | 252 | return url 253 | except: 254 | return 255 | 256 | 257 | def yandex(url): 258 | try: 259 | cookie = client.request(url, output='cookie') 260 | 261 | r = client.request(url, cookie=cookie) 262 | r = re.sub(r'[^\x00-\x7F]+', ' ', r) 263 | 264 | sk = re.findall('"sk"\s*:\s*"([^"]+)', r)[0] 265 | 266 | idstring = re.findall('"id"\s*:\s*"([^"]+)', r)[0] 267 | 268 | idclient = binascii.b2a_hex(os.urandom(16)) 269 | 270 | post = {'idClient': idclient, 'version': '3.9.2', 'sk': sk, '_model.0': 'do-get-resource-url', 'id.0': idstring} 271 | post = urllib.urlencode(post) 272 | 273 | r = client.request('https://yadi.sk/models/?_m=do-get-resource-url', post=post, cookie=cookie) 274 | r = json.loads(r) 275 | 276 | url = r['models'][0]['data']['file'] 277 | 278 | return url 279 | except: 280 | return 281 | 282 | 283 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/dom_parser.py: -------------------------------------------------------------------------------- 1 | """ 2 | Based on Parsedom for XBMC plugins 3 | Copyright (C) 2010-2011 Tobias Ussing And Henrik Mosgaard Jensen 4 | 5 | This program is free software: you can redistribute it and/or modify 6 | it under the terms of the GNU General Public License as published by 7 | the Free Software Foundation, either version 3 of the License, or 8 | (at your option) any later version. 9 | 10 | This program is distributed in the hope that it will be useful, 11 | but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | GNU General Public License for more details. 14 | 15 | You should have received a copy of the GNU General Public License 16 | along with this program. If not, see . 17 | """ 18 | 19 | import re 20 | from collections import namedtuple 21 | 22 | DomMatch = namedtuple('DOMMatch', ['attrs', 'content']) 23 | re_type = type(re.compile('')) 24 | 25 | 26 | def __get_dom_content(html, name, match): 27 | if match.endswith('/>'): return '' 28 | 29 | # override tag name with tag from match if possible 30 | tag = re.match('<([^\s/>]+)', match) 31 | if tag: name = tag.group(1) 32 | 33 | start_str = '<%s' % name 34 | end_str = " return 42 | tend = html.find(end_str, end + len(end_str)) 43 | if tend != -1: 44 | end = tend 45 | pos = html.find(start_str, pos + 1) 46 | 47 | if start == -1 and end == -1: 48 | result = '' 49 | elif start > -1 and end > -1: 50 | result = html[start + len(match):end] 51 | elif end > -1: 52 | result = html[:end] 53 | elif start > -1: 54 | result = html[start + len(match):] 55 | else: 56 | result = '' 57 | 58 | return result 59 | 60 | 61 | def __get_dom_elements(item, name, attrs): 62 | if not attrs: 63 | pattern = '(<%s(?:\s[^>]*>|/?>))' % name 64 | this_list = re.findall(pattern, item, re.M | re.S | re.I) 65 | else: 66 | last_list = None 67 | for key, value in attrs.iteritems(): 68 | value_is_regex = isinstance(value, re_type) 69 | value_is_str = isinstance(value, basestring) 70 | pattern = '''(<{tag}[^>]*\s{key}=(?P['"])(.*?)(?P=delim)[^>]*>)'''.format(tag=name, key=key) 71 | re_list = re.findall(pattern, item, re.M | re.S | re.I) 72 | if value_is_regex: 73 | this_list = [r[0] for r in re_list if re.match(value, r[2])] 74 | else: 75 | temp_value = [value] if value_is_str else value 76 | this_list = [r[0] for r in re_list if set(temp_value) <= set(r[2].split(' '))] 77 | 78 | if not this_list: 79 | has_space = (value_is_regex and ' ' in value.pattern) or (value_is_str and ' ' in value) 80 | if not has_space: 81 | pattern = '''(<{tag}[^>]*\s{key}=((?:[^\s>]|/>)*)[^>]*>)'''.format(tag=name, key=key) 82 | re_list = re.findall(pattern, item, re.M | re.S | re.I) 83 | if value_is_regex: 84 | this_list = [r[0] for r in re_list if re.match(value, r[1])] 85 | else: 86 | this_list = [r[0] for r in re_list if value == r[1]] 87 | 88 | if last_list is None: 89 | last_list = this_list 90 | else: 91 | last_list = [item for item in this_list if item in last_list] 92 | this_list = last_list 93 | 94 | return this_list 95 | 96 | 97 | def __get_attribs(element): 98 | attribs = {} 99 | for match in re.finditer('''\s+(?P[^=]+)=\s*(?:(?P["'])(?P.*?)(?P=delim)|(?P[^"'][^>\s]*))''', element): 100 | match = match.groupdict() 101 | value1 = match.get('value1') 102 | value2 = match.get('value2') 103 | value = value1 if value1 is not None else value2 104 | if value is None: continue 105 | attribs[match['key'].lower().strip()] = value 106 | return attribs 107 | 108 | 109 | def parse_dom(html, name='', attrs=None, req=False, exclude_comments=False): 110 | if attrs is None: attrs = {} 111 | name = name.strip() 112 | if isinstance(html, unicode) or isinstance(html, DomMatch): 113 | html = [html] 114 | elif isinstance(html, str): 115 | try: 116 | html = [html.decode("utf-8")] # Replace with chardet thingy 117 | except: 118 | try: 119 | html = [html.decode("utf-8", "replace")] 120 | except: 121 | html = [html] 122 | elif not isinstance(html, list): 123 | return '' 124 | 125 | if not name: 126 | return '' 127 | 128 | if not isinstance(attrs, dict): 129 | return '' 130 | 131 | if req: 132 | if not isinstance(req, list): 133 | req = [req] 134 | req = set([key.lower() for key in req]) 135 | 136 | all_results = [] 137 | for item in html: 138 | if isinstance(item, DomMatch): 139 | item = item.content 140 | 141 | if exclude_comments: 142 | item = re.sub(re.compile('', re.DOTALL), '', item) 143 | 144 | results = [] 145 | for element in __get_dom_elements(item, name, attrs): 146 | attribs = __get_attribs(element) 147 | if req and not req <= set(attribs.keys()): continue 148 | temp = __get_dom_content(item, name, element).strip() 149 | results.append(DomMatch(attribs, temp)) 150 | item = item[item.find(temp, item.find(element)):] 151 | all_results += results 152 | 153 | return all_results 154 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/dom_parser2.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Based on Parsedom for XBMC plugins 3 | Copyright (C) 2010-2011 Tobias Ussing And Henrik Mosgaard Jensen 4 | 5 | This program is free software: you can redistribute it and/or modify 6 | it under the terms of the GNU General Public License as published by 7 | the Free Software Foundation, either version 3 of the License, or 8 | (at your option) any later version. 9 | 10 | This program is distributed in the hope that it will be useful, 11 | but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | GNU General Public License for more details. 14 | 15 | You should have received a copy of the GNU General Public License 16 | along with this program. If not, see . 17 | ''' 18 | 19 | import re 20 | from collections import namedtuple 21 | 22 | DomMatch = namedtuple('DOMMatch', ['attrs', 'content']) 23 | re_type = type(re.compile('')) 24 | 25 | def __get_dom_content(html, name, match): 26 | if match.endswith('/>'): return '' 27 | 28 | # override tag name with tag from match if possible 29 | tag = re.match('<([^\s/>]+)', match) 30 | if tag: name = tag.group(1) 31 | 32 | start_str = '<%s' % (name) 33 | end_str = " return 41 | tend = html.find(end_str, end + len(end_str)) 42 | if tend != -1: 43 | end = tend 44 | pos = html.find(start_str, pos + 1) 45 | 46 | if start == -1 and end == -1: 47 | result = '' 48 | elif start > -1 and end > -1: 49 | result = html[start + len(match):end] 50 | elif end > -1: 51 | result = html[:end] 52 | elif start > -1: 53 | result = html[start + len(match):] 54 | else: 55 | result = '' 56 | 57 | return result 58 | 59 | def __get_dom_elements(item, name, attrs): 60 | if not attrs: 61 | pattern = '(<%s(?:\s[^>]*>|/?>))' % (name) 62 | this_list = re.findall(pattern, item, re.M | re.S | re.I) 63 | else: 64 | last_list = None 65 | for key, value in attrs.iteritems(): 66 | value_is_regex = isinstance(value, re_type) 67 | value_is_str = isinstance(value, basestring) 68 | pattern = '''(<{tag}[^>]*\s{key}=(?P['"])(.*?)(?P=delim)[^>]*>)'''.format(tag=name, key=key) 69 | re_list = re.findall(pattern, item, re.M | re. S | re.I) 70 | if value_is_regex: 71 | this_list = [r[0] for r in re_list if re.match(value, r[2])] 72 | else: 73 | temp_value = [value] if value_is_str else value 74 | this_list = [r[0] for r in re_list if set(temp_value) <= set(r[2].split(' '))] 75 | 76 | if not this_list: 77 | has_space = (value_is_regex and ' ' in value.pattern) or (value_is_str and ' ' in value) 78 | if not has_space: 79 | pattern = '''(<{tag}[^>]*\s{key}=([^\s/>]*)[^>]*>)'''.format(tag=name, key=key) 80 | re_list = re.findall(pattern, item, re.M | re. S | re.I) 81 | if value_is_regex: 82 | this_list = [r[0] for r in re_list if re.match(value, r[1])] 83 | else: 84 | this_list = [r[0] for r in re_list if value == r[1]] 85 | 86 | if last_list is None: 87 | last_list = this_list 88 | else: 89 | last_list = [item for item in this_list if item in last_list] 90 | this_list = last_list 91 | 92 | return this_list 93 | 94 | def __get_attribs(element): 95 | attribs = {} 96 | for match in re.finditer('''\s+(?P[^=]+)=\s*(?:(?P["'])(?P.*?)(?P=delim)|(?P[^"'][^>\s]*))''', element): 97 | match = match.groupdict() 98 | value1 = match.get('value1') 99 | value2 = match.get('value2') 100 | value = value1 if value1 is not None else value2 101 | if value is None: continue 102 | attribs[match['key'].lower().strip()] = value 103 | return attribs 104 | 105 | def parse_dom(html, name='', attrs=None, req=False): 106 | if attrs is None: attrs = {} 107 | name = name.strip() 108 | if isinstance(html, unicode) or isinstance(html, DomMatch): 109 | html = [html] 110 | elif isinstance(html, str): 111 | try: 112 | html = [html.decode("utf-8")] # Replace with chardet thingy 113 | except: 114 | try: 115 | html = [html.decode("utf-8", "replace")] 116 | except: 117 | html = [html] 118 | elif not isinstance(html, list): 119 | return '' 120 | 121 | if not name: 122 | return '' 123 | 124 | if not isinstance(attrs, dict): 125 | return '' 126 | 127 | if req: 128 | if not isinstance(req, list): 129 | req = [req] 130 | req = set([key.lower() for key in req]) 131 | 132 | all_results = [] 133 | for item in html: 134 | if isinstance(item, DomMatch): 135 | item = item.content 136 | 137 | results = [] 138 | for element in __get_dom_elements(item, name, attrs): 139 | attribs = __get_attribs(element) 140 | if req and not req <= set(attribs.keys()): continue 141 | temp = __get_dom_content(item, name, element).strip() 142 | results.append(DomMatch(attribs, temp)) 143 | item = item[item.find(temp, item.find(element)):] 144 | all_results += results 145 | 146 | return all_results 147 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/downloader.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | ''' 4 | Simple XBMC Download Script 5 | Copyright (C) 2013 Sean Poyser (seanpoyser@gmail.com) 6 | 7 | This program is free software: you can redistribute it and/or modify 8 | it under the terms of the GNU General Public License as published by 9 | the Free Software Foundation, either version 3 of the License, or 10 | (at your option) any later version. 11 | 12 | This program is distributed in the hope that it will be useful, 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | GNU General Public License for more details. 16 | 17 | You should have received a copy of the GNU General Public License 18 | along with this program. If not, see . 19 | ''' 20 | 21 | 22 | import re 23 | import json 24 | import sys 25 | import urllib 26 | import urllib2 27 | import urlparse 28 | import xbmc 29 | import xbmcgui 30 | import xbmcplugin 31 | import xbmcvfs 32 | import os 33 | import inspect 34 | 35 | 36 | def download(name, image, url): 37 | 38 | if url == None: return 39 | 40 | from resources.lib.modules import control 41 | 42 | try: headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1])) 43 | except: headers = dict('') 44 | 45 | url = url.split('|')[0] 46 | 47 | content = re.compile('(.+?)\sS(\d*)E\d*$').findall(name) 48 | transname = name.translate(None, '\/:*?"<>|').strip('.') 49 | levels =['../../../..', '../../..', '../..', '..'] 50 | 51 | if len(content) == 0: 52 | dest = control.setting('movie.download.path') 53 | dest = control.transPath(dest) 54 | for level in levels: 55 | try: control.makeFile(os.path.abspath(os.path.join(dest, level))) 56 | except: pass 57 | control.makeFile(dest) 58 | dest = os.path.join(dest, transname) 59 | control.makeFile(dest) 60 | else: 61 | dest = control.setting('tv.download.path') 62 | dest = control.transPath(dest) 63 | for level in levels: 64 | try: control.makeFile(os.path.abspath(os.path.join(dest, level))) 65 | except: pass 66 | control.makeFile(dest) 67 | transtvshowtitle = content[0][0].translate(None, '\/:*?"<>|').strip('.') 68 | dest = os.path.join(dest, transtvshowtitle) 69 | control.makeFile(dest) 70 | dest = os.path.join(dest, 'Season %01d' % int(content[0][1])) 71 | control.makeFile(dest) 72 | 73 | ext = os.path.splitext(urlparse.urlparse(url).path)[1][1:] 74 | if not ext in ['mp4', 'mkv', 'flv', 'avi', 'mpg']: ext = 'mp4' 75 | dest = os.path.join(dest, transname + '.' + ext) 76 | 77 | sysheaders = urllib.quote_plus(json.dumps(headers)) 78 | 79 | sysurl = urllib.quote_plus(url) 80 | 81 | systitle = urllib.quote_plus(name) 82 | 83 | sysimage = urllib.quote_plus(image) 84 | 85 | sysdest = urllib.quote_plus(dest) 86 | 87 | script = inspect.getfile(inspect.currentframe()) 88 | cmd = 'RunScript(%s, %s, %s, %s, %s, %s)' % (script, sysurl, sysdest, systitle, sysimage, sysheaders) 89 | 90 | xbmc.executebuiltin(cmd) 91 | 92 | 93 | def getResponse(url, headers, size): 94 | try: 95 | if size > 0: 96 | size = int(size) 97 | headers['Range'] = 'bytes=%d-' % size 98 | 99 | req = urllib2.Request(url, headers=headers) 100 | 101 | resp = urllib2.urlopen(req, timeout=30) 102 | return resp 103 | except: 104 | return None 105 | 106 | 107 | def done(title, dest, downloaded): 108 | playing = xbmc.Player().isPlaying() 109 | 110 | text = xbmcgui.Window(10000).getProperty('GEN-DOWNLOADED') 111 | 112 | if len(text) > 0: 113 | text += '[CR]' 114 | 115 | if downloaded: 116 | text += '%s : %s' % (dest.rsplit(os.sep)[-1], '[COLOR forestgreen]Download succeeded[/COLOR]') 117 | else: 118 | text += '%s : %s' % (dest.rsplit(os.sep)[-1], '[COLOR red]Download failed[/COLOR]') 119 | 120 | xbmcgui.Window(10000).setProperty('GEN-DOWNLOADED', text) 121 | 122 | if (not downloaded) or (not playing): 123 | xbmcgui.Dialog().ok(title, text) 124 | xbmcgui.Window(10000).clearProperty('GEN-DOWNLOADED') 125 | 126 | 127 | def doDownload(url, dest, title, image, headers): 128 | 129 | headers = json.loads(urllib.unquote_plus(headers)) 130 | 131 | url = urllib.unquote_plus(url) 132 | 133 | title = urllib.unquote_plus(title) 134 | 135 | image = urllib.unquote_plus(image) 136 | 137 | dest = urllib.unquote_plus(dest) 138 | 139 | file = dest.rsplit(os.sep, 1)[-1] 140 | 141 | resp = getResponse(url, headers, 0) 142 | 143 | if not resp: 144 | xbmcgui.Dialog().ok(title, dest, 'Download failed', 'No response from server') 145 | return 146 | 147 | try: content = int(resp.headers['Content-Length']) 148 | except: content = 0 149 | 150 | try: resumable = 'bytes' in resp.headers['Accept-Ranges'].lower() 151 | except: resumable = False 152 | 153 | #print "Download Header" 154 | #print resp.headers 155 | if resumable: 156 | print "Download is resumable" 157 | 158 | if content < 1: 159 | xbmcgui.Dialog().ok(title, file, 'Unknown filesize', 'Unable to download') 160 | return 161 | 162 | size = 1024 * 1024 163 | mb = content / (1024 * 1024) 164 | 165 | if content < size: 166 | size = content 167 | 168 | total = 0 169 | notify = 0 170 | errors = 0 171 | count = 0 172 | resume = 0 173 | sleep = 0 174 | 175 | if xbmcgui.Dialog().yesno(title + ' - Confirm Download', file, 'Complete file is %dMB' % mb, 'Continue with download?', 'Confirm', 'Cancel') == 1: 176 | return 177 | 178 | print 'Download File Size : %dMB %s ' % (mb, dest) 179 | 180 | #f = open(dest, mode='wb') 181 | f = xbmcvfs.File(dest, 'w') 182 | 183 | chunk = None 184 | chunks = [] 185 | 186 | while True: 187 | downloaded = total 188 | for c in chunks: 189 | downloaded += len(c) 190 | percent = min(100 * downloaded / content, 100) 191 | if percent >= notify: 192 | xbmc.executebuiltin( "XBMC.Notification(%s,%s,%i,%s)" % ( title + ' - Download Progress - ' + str(percent)+'%', dest, 10000, image)) 193 | 194 | print 'Download percent : %s %s %dMB downloaded : %sMB File Size : %sMB' % (str(percent)+'%', dest, mb, downloaded / 1000000, content / 1000000) 195 | 196 | notify += 10 197 | 198 | chunk = None 199 | error = False 200 | 201 | try: 202 | chunk = resp.read(size) 203 | if not chunk: 204 | if percent < 99: 205 | error = True 206 | else: 207 | while len(chunks) > 0: 208 | c = chunks.pop(0) 209 | f.write(c) 210 | del c 211 | 212 | f.close() 213 | print '%s download complete' % (dest) 214 | return done(title, dest, True) 215 | 216 | except Exception, e: 217 | print str(e) 218 | error = True 219 | sleep = 10 220 | errno = 0 221 | 222 | if hasattr(e, 'errno'): 223 | errno = e.errno 224 | 225 | if errno == 10035: # 'A non-blocking socket operation could not be completed immediately' 226 | pass 227 | 228 | if errno == 10054: #'An existing connection was forcibly closed by the remote host' 229 | errors = 10 #force resume 230 | sleep = 30 231 | 232 | if errno == 11001: # 'getaddrinfo failed' 233 | errors = 10 #force resume 234 | sleep = 30 235 | 236 | if chunk: 237 | errors = 0 238 | chunks.append(chunk) 239 | if len(chunks) > 5: 240 | c = chunks.pop(0) 241 | f.write(c) 242 | total += len(c) 243 | del c 244 | 245 | if error: 246 | errors += 1 247 | count += 1 248 | print '%d Error(s) whilst downloading %s' % (count, dest) 249 | xbmc.sleep(sleep*1000) 250 | 251 | if (resumable and errors > 0) or errors >= 10: 252 | if (not resumable and resume >= 50) or resume >= 500: 253 | #Give up! 254 | print '%s download canceled - too many error whilst downloading' % (dest) 255 | return done(title, dest, False) 256 | 257 | resume += 1 258 | errors = 0 259 | if resumable: 260 | chunks = [] 261 | #create new response 262 | print 'Download resumed (%d) %s' % (resume, dest) 263 | resp = getResponse(url, headers, total) 264 | else: 265 | #use existing response 266 | pass 267 | 268 | 269 | if __name__ == '__main__': 270 | if 'downloader.py' in sys.argv[0]: 271 | doDownload(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]) 272 | 273 | 274 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/favourites.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | ''' 4 | Specto Add-on 5 | Copyright (C) 2015 lambda 6 | 7 | This program is free software: you can redistribute it and/or modify 8 | it under the terms of the GNU General Public License as published by 9 | the Free Software Foundation, either version 3 of the License, or 10 | (at your option) any later version. 11 | 12 | This program is distributed in the hope that it will be useful, 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | GNU General Public License for more details. 16 | 17 | You should have received a copy of the GNU General Public License 18 | along with this program. If not, see . 19 | ''' 20 | 21 | try: 22 | from sqlite3 import dbapi2 as database 23 | except: 24 | from pysqlite2 import dbapi2 as database 25 | 26 | import json,os,xbmc,xbmcaddon 27 | 28 | from resources.lib.modules import control 29 | 30 | addonInfo = xbmcaddon.Addon().getAddonInfo 31 | dataPath = xbmc.translatePath(addonInfo('profile')).decode('utf-8') 32 | favouritesFile = os.path.join(dataPath, 'favourites.db') 33 | progressFile = os.path.join(dataPath, 'progress.db') 34 | def getFavourites(content): 35 | try: 36 | dbcon = database.connect(favouritesFile) 37 | dbcur = dbcon.cursor() 38 | dbcur.execute("SELECT * FROM %s" % content) 39 | items = dbcur.fetchall() 40 | items = [(i[0].encode('utf-8'), eval(i[1].encode('utf-8'))) for i in items] 41 | except: 42 | items = [] 43 | 44 | return items 45 | 46 | 47 | def getProgress(content): 48 | try: 49 | dbcon = database.connect(progressFile) 50 | dbcur = dbcon.cursor() 51 | dbcur.execute("SELECT * FROM %s" % content) 52 | items = dbcur.fetchall() 53 | items = [(i[0].encode('utf-8'), eval(i[1].encode('utf-8'))) for i in items] 54 | except: 55 | items = [] 56 | 57 | return items 58 | 59 | 60 | def addFavourite(meta, content): 61 | try: 62 | item = dict() 63 | meta = json.loads(meta) 64 | # print "META DUMP FAVOURITES %s" % meta 65 | try: id = meta['imdb'] 66 | except: id = meta['tvdb'] 67 | 68 | if 'title' in meta: title = item['title'] = meta['title'] 69 | if 'tvshowtitle' in meta: title = item['title'] = meta['tvshowtitle'] 70 | if 'year' in meta: item['year'] = meta['year'] 71 | if 'poster' in meta: item['poster'] = meta['poster'] 72 | if 'fanart' in meta: item['fanart'] = meta['fanart'] 73 | if 'imdb' in meta: item['imdb'] = meta['imdb'] 74 | if 'tmdb' in meta: item['tmdb'] = meta['tmdb'] 75 | if 'tvdb' in meta: item['tvdb'] = meta['tvdb'] 76 | if 'tvrage' in meta: item['tvrage'] = meta['tvrage'] 77 | 78 | control.makeFile(dataPath) 79 | dbcon = database.connect(favouritesFile) 80 | dbcur = dbcon.cursor() 81 | dbcur.execute("CREATE TABLE IF NOT EXISTS %s (""id TEXT, ""items TEXT, ""UNIQUE(id)"");" % content) 82 | dbcur.execute("DELETE FROM %s WHERE id = '%s'" % (content, id)) 83 | dbcur.execute("INSERT INTO %s Values (?, ?)" % content, (id, repr(item))) 84 | dbcon.commit() 85 | 86 | control.refresh() 87 | control.infoDialog('Added to Watchlist', heading=title, icon=item['poster']) 88 | except: 89 | return 90 | 91 | def addEpisodes(meta, content): 92 | try: 93 | item = dict() 94 | meta = json.loads(meta) 95 | content = "episode" 96 | try: id = meta['imdb'] 97 | except: id = meta['tvdb'] 98 | 99 | if 'title' in meta: title = item['title'] = meta['title'] 100 | if 'tvshowtitle' in meta: title = item['tvshowtitle'] = meta['tvshowtitle'] 101 | if 'year' in meta: item['year'] = meta['year'] 102 | if 'poster' in meta: item['poster'] = meta['poster'] 103 | if 'fanart' in meta: item['fanart'] = meta['fanart'] 104 | if 'imdb' in meta: item['imdb'] = meta['imdb'] 105 | if 'tmdb' in meta: item['tmdb'] = meta['tmdb'] 106 | if 'tvdb' in meta: item['tvdb'] = meta['tvdb'] 107 | if 'tvrage' in meta: item['tvrage'] = meta['tvrage'] 108 | if 'episode' in meta: item['episode'] = meta['episode'] 109 | if 'season' in meta: item['season'] = meta['season'] 110 | if 'premiered' in meta: item['premiered'] = meta['premiered'] 111 | if 'original_year' in meta: item['original_year'] = meta['original_year'] 112 | 113 | control.makeFile(dataPath) 114 | dbcon = database.connect(favouritesFile) 115 | dbcur = dbcon.cursor() 116 | dbcur.execute("CREATE TABLE IF NOT EXISTS %s (""id TEXT, ""items TEXT, ""UNIQUE(id)"");" % content) 117 | dbcur.execute("DELETE FROM %s WHERE id = '%s'" % (content, id)) 118 | dbcur.execute("INSERT INTO %s Values (?, ?)" % content, (id, repr(item))) 119 | dbcon.commit() 120 | 121 | control.refresh() 122 | control.infoDialog('Added to Watchlist', heading=title) 123 | except: 124 | return 125 | 126 | 127 | def deleteFavourite(meta, content): 128 | try: 129 | meta = json.loads(meta) 130 | if 'title' in meta: title = meta['title'] 131 | if 'tvshowtitle' in meta: title = meta['tvshowtitle'] 132 | 133 | try: 134 | dbcon = database.connect(favouritesFile) 135 | dbcur = dbcon.cursor() 136 | try: dbcur.execute("DELETE FROM %s WHERE id = '%s'" % (content, meta['imdb'])) 137 | except: pass 138 | try: dbcur.execute("DELETE FROM %s WHERE id = '%s'" % (content, meta['tvdb'])) 139 | except: pass 140 | try: dbcur.execute("DELETE FROM %s WHERE id = '%s'" % (content, meta['tmdb'])) 141 | except: pass 142 | dbcon.commit() 143 | except: 144 | pass 145 | 146 | control.refresh() 147 | control.infoDialog('Removed From Watchlist', heading=title) 148 | except: 149 | return 150 | 151 | 152 | def deleteProgress(meta, content): 153 | try: 154 | meta = json.loads(meta) 155 | try: 156 | dbcon = database.connect(progressFile) 157 | dbcur = dbcon.cursor() 158 | try: dbcur.execute("DELETE FROM %s WHERE id = '%s'" % (content, meta['imdb'])) 159 | except: pass 160 | try: dbcur.execute("DELETE FROM %s WHERE id = '%s'" % (content, meta['tvdb'])) 161 | except: pass 162 | try: dbcur.execute("DELETE FROM %s WHERE id = '%s'" % (content, meta['tmdb'])) 163 | except: pass 164 | dbcon.commit() 165 | except: 166 | pass 167 | 168 | control.refresh() 169 | 170 | except: 171 | return 172 | 173 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/filmon.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | 4 | 5 | import re,urlparse,json 6 | from resources.lib.modules import client 7 | 8 | def resolve(url): 9 | try: 10 | if '/vod/' in url: 11 | url = re.compile('/(\d+)').findall(url)[-1] 12 | url = 'http://www.filmon.com/vod/info/%s' % url 13 | elif '/tv/' in url: 14 | url = url.replace('/tv/', '/channel/') 15 | elif not '/channel/' in url: 16 | raise Exception() 17 | 18 | 19 | headers = {'X-Requested-With': 'XMLHttpRequest'} 20 | 21 | cookie = client.request(url, output='cookie') 22 | 23 | cid = client.request(url, headers=headers) 24 | cid = json.loads(cid)['id'] 25 | 26 | 27 | headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': url} 28 | 29 | url = 'http://www.filmon.com/ajax/getChannelInfo?channel_id=%s' % cid 30 | 31 | result = client.request(url, cookie=cookie, headers=headers) 32 | 33 | result = json.loads(result) 34 | try: 35 | result = result['streams'] 36 | except: 37 | result = result['data']['streams'] 38 | result = [i[1] for i in result.items()] 39 | 40 | url = [(i['url'], int(i['watch-timeout'])) for i in result] 41 | url = [i for i in url if '.m3u8' in i[0]] 42 | 43 | url.sort() 44 | url = url[-1][0] 45 | 46 | return url 47 | except: 48 | return 49 | 50 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/get_source_info.py: -------------------------------------------------------------------------------- 1 | import requests.sessions 2 | from BeautifulSoup import BeautifulSoup 3 | 4 | def get_source_info(url): 5 | source_info = {} 6 | if 'thevideo' in url: 7 | source_info['source'] = 'thevideo.me' 8 | with requests.session() as s: 9 | p = s.get(url) 10 | soup = BeautifulSoup(p.text, 'html.parser') 11 | title = soup.findAll('script', src=False, type=False) 12 | for i in title: 13 | if "title" in i.prettify(): 14 | for line in i.prettify().split('\n'): 15 | if " title" in line: 16 | line = line.replace("title: '", '').replace("',", '') 17 | if "720" in line: 18 | source_info['qual'] = "720p" 19 | elif "1080" in line: 20 | source_info['qual'] = "1080p" 21 | else: 22 | source_info['qual'] = "SD" 23 | return source_info 24 | elif 'vidzi' in url: 25 | #Not completed 26 | return "SD" -------------------------------------------------------------------------------- /lib/resources/lib/modules/jsunfuck.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Covenant Add-on 4 | Copyright (C) 2016 tknorris 5 | 6 | This program is free software: you can redistribute it and/or modify 7 | it under the terms of the GNU General Public License as published by 8 | the Free Software Foundation, either version 3 of the License, or 9 | (at your option) any later version. 10 | 11 | This program is distributed in the hope that it will be useful, 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | GNU General Public License for more details. 15 | 16 | You should have received a copy of the GNU General Public License 17 | along with this program. If not, see . 18 | """ 19 | 20 | 21 | 22 | import re 23 | import sys 24 | import urllib 25 | import string 26 | import json 27 | 28 | class JSUnfuck(object): 29 | numbers = None 30 | words = { 31 | "(![]+[])": "false", 32 | "([]+{})": "[object Object]", 33 | "(!![]+[])": "true", 34 | "([][[]]+[])": "undefined", 35 | "(+{}+[])": "NaN", 36 | "([![]]+[][[]])": "falseundefined", 37 | "([][f+i+l+t+e+r]+[])": "function filter() { [native code] }", 38 | "(!![]+[][f+i+l+t+e+r])": "truefunction filter() { [native code] }", 39 | "(+![]+([]+[])[c+o+n+s+t+r+u+c+t+o+r])": "0function String() { [native code] }", 40 | "(+![]+[![]]+([]+[])[c+o+n+s+t+r+u+c+t+o+r])": "0falsefunction String() { [native code] }", 41 | "([]+[][s+o+r+t][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +l+o+c+a+t+i+o+n)())": "https://123movies.to", 42 | "([]+[])[f+o+n+t+c+o+l+o+r]()": '', 43 | "(+(+!![]+e+1+0+0+0)+[])": "Infinity", 44 | "(+[![]]+[][f+i+l+t+e+r])": 'NaNfunction filter() { [native code] }', 45 | '(+[![]]+[+(+!+[]+(!+[]+[])[3]+[1]+[0]+[0]+[0])])': 'NaNInfinity', 46 | '([]+[])[i+t+a+l+i+c+s]()': '', 47 | '[[]][c+o+n+c+a+t]([[]])+[]': ',', 48 | '([][f+i+l+l]+[])': 'function fill() { [native code]}', 49 | '(!![]+[][f+i+l+l])': 'truefunction fill() { [native code]}', 50 | '((+[])[c+o+n+s+t+r+u+c+t+o+r]+[])': 'function Number() {[native code]} _display:45:1', 51 | '(+(+!+[]+[1]+e+[2]+[0])+[])': '1.1e+21', 52 | '([]+[])[c+o+n+s+t+r+u+c+t+o+r][n+a+m+e]': 'S+t+r+i+n+g', 53 | '([][e+n+t+r+i+e+s]()+[])': '[object Array Iterator]', 54 | '([]+[])[l+i+n+k](")': '', 55 | '(![]+[0])[i+t+a+l+i+c+s]()': 'false0', 56 | # dummy to force array dereference 57 | 'DUMMY1': '6p', 58 | 'DUMMY2': '2x', 59 | 'DUMMY3': '%3C', 60 | 'DUMMY4': '%5B', 61 | 'DUMMY5': '6q', 62 | 'DUMMY6': '4h', 63 | } 64 | 65 | uniqs = { 66 | '[t+o+S+t+r+i+n+g]': 1, 67 | '[][f+i+l+t+e+r][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +e+s+c+a+p+e)()': 2, 68 | '[][f+i+l+t+e+r][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +u+n+e+s+c+a+p+e)()': 3, 69 | '[][s+o+r+t][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +e+s+c+a+p+e)()': 2, 70 | '[][s+o+r+t][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +u+n+e+s+c+a+p+e)()': 3, 71 | } 72 | 73 | def __init__(self, js): 74 | self.js = js 75 | 76 | def decode(self, replace_plus=True): 77 | while True: 78 | start_js = self.js 79 | self.repl_words(self.words) 80 | self.repl_numbers() 81 | self.repl_arrays(self.words) 82 | self.repl_uniqs(self.uniqs) 83 | if start_js == self.js: 84 | break 85 | 86 | if replace_plus: 87 | self.js = self.js.replace('+', '') 88 | self.js = re.sub('\[[A-Za-z]*\]', '', self.js) 89 | self.js = re.sub('\[(\d+)\]', '\\1', self.js) 90 | return self.js 91 | 92 | def repl_words(self, words): 93 | while True: 94 | start_js = self.js 95 | for key, value in sorted(words.items(), key=lambda x: len(x[0]), reverse=True): 96 | self.js = self.js.replace(key, value) 97 | 98 | if self.js == start_js: 99 | break 100 | 101 | def repl_arrays(self, words): 102 | for word in sorted(words.values(), key=lambda x: len(x), reverse=True): 103 | for index in xrange(0, 100): 104 | try: 105 | repl = word[index] 106 | self.js = self.js.replace('%s[%d]' % (word, index), repl) 107 | except: 108 | pass 109 | 110 | def repl_numbers(self): 111 | if self.numbers is None: 112 | self.numbers = self.__gen_numbers() 113 | 114 | while True: 115 | start_js = self.js 116 | for key, value in sorted(self.numbers.items(), key=lambda x: len(x[0]), reverse=True): 117 | self.js = self.js.replace(key, value) 118 | 119 | if self.js == start_js: 120 | break 121 | 122 | def repl_uniqs(self, uniqs): 123 | for key, value in uniqs.iteritems(): 124 | if key in self.js: 125 | if value == 1: 126 | self.__handle_tostring() 127 | elif value == 2: 128 | self.__handle_escape(key) 129 | elif value == 3: 130 | self.__handle_unescape(key) 131 | 132 | def __handle_tostring(self): 133 | for match in re.finditer('(\d+)\[t\+o\+S\+t\+r\+i\+n\+g\](\d+)', self.js): 134 | repl = to_base(match.group(1), match.group(2)) 135 | self.js = self.js.replace(match.group(0), repl) 136 | 137 | def __handle_escape(self, key): 138 | while True: 139 | start_js = self.js 140 | offset = self.js.find(key) + len(key) 141 | if self.js[offset] == '(' and self.js[offset + 2] == ')': 142 | c = self.js[offset + 1] 143 | self.js = self.js.replace('%s(%s)' % (key, c), urllib.quote(c)) 144 | 145 | if start_js == self.js: 146 | break 147 | 148 | def __handle_unescape(self, key): 149 | start = 0 150 | while True: 151 | start_js = self.js 152 | offset = self.js.find(key, start) 153 | if offset == -1: break 154 | 155 | offset += len(key) 156 | expr = '' 157 | extra = '' 158 | last_c = self.js[offset - 1] 159 | abort = False 160 | for i, c in enumerate(self.js[offset:]): 161 | extra += c 162 | if c == ')': 163 | break 164 | elif (i > 0 and c == '(') or (c == '[' and last_c != '+'): 165 | abort = True 166 | break 167 | elif c == '%' or c in string.hexdigits: 168 | expr += c 169 | last_c = c 170 | 171 | if not abort: 172 | self.js = self.js.replace(key + extra, urllib.unquote(expr)) 173 | 174 | if start_js == self.js: 175 | break 176 | else: 177 | start = offset 178 | 179 | def __gen_numbers(self): 180 | n = {'!+[]+!![]+!![]+!![]+!![]+!![]+!![]+!![]+!![]': '9', 181 | '!+[]+!![]+!![]+!![]+!![]': '5', '!+[]+!![]+!![]+!![]': '4', 182 | '!+[]+!![]+!![]+!![]+!![]+!![]': '6', '!+[]+!![]': '2', 183 | '!+[]+!![]+!![]': '3', '(+![]+([]+[]))': '0', '(+[]+[])': '0', '+[]':'0', 184 | '(+!![]+[])': '1', '!+[]+!![]+!![]+!![]+!![]+!![]+!![]': '7', 185 | '!+[]+!![]+!![]+!![]+!![]+!![]+!![]+!![]': '8', '+!![]': '1', 186 | '[+[]]': '[0]', '!+[]+!+[]': '2', '[+!+[]]': '[1]', '(+20)': '20', 187 | '[+!![]]': '[1]', '[+!+[]+[+[]]]': '[10]', '+(1+1)': '11'} 188 | 189 | for i in xrange(2, 20): 190 | key = '+!![]' * (i - 1) 191 | key = '!+[]' + key 192 | n['(' + key + ')'] = str(i) 193 | key += '+[]' 194 | n['(' + key + ')'] = str(i) 195 | n['[' + key + ']'] = '[' + str(i) + ']' 196 | 197 | for i in xrange(2, 10): 198 | key = '!+[]+' * (i - 1) + '!+[]' 199 | n['(' + key + ')'] = str(i) 200 | n['[' + key + ']'] = '[' + str(i) + ']' 201 | 202 | key = '!+[]' + '+!![]' * (i - 1) 203 | n['[' + key + ']'] = '[' + str(i) + ']' 204 | 205 | for i in xrange(0, 10): 206 | key = '(+(+!+[]+[%d]))' % (i) 207 | n[key] = str(i + 10) 208 | key = '[+!+[]+[%s]]' % (i) 209 | n[key] = '[' + str(i + 10) + ']' 210 | 211 | for tens in xrange(2, 10): 212 | for ones in xrange(0, 10): 213 | key = '!+[]+' * (tens) + '[%d]' % (ones) 214 | n['(' + key + ')'] = str(tens * 10 + ones) 215 | n['[' + key + ']'] = '[' + str(tens * 10 + ones) + ']' 216 | 217 | for hundreds in xrange(1, 10): 218 | for tens in xrange(0, 10): 219 | for ones in xrange(0, 10): 220 | key = '+!+[]' * hundreds + '+[%d]+[%d]))' % (tens, ones) 221 | if hundreds > 1: key = key[1:] 222 | key = '(+(' + key 223 | n[key] = str(hundreds * 100 + tens * 10 + ones) 224 | return n 225 | 226 | def to_base(n, base, digits="0123456789abcdefghijklmnopqrstuvwxyz"): 227 | n, base = int(n), int(base) 228 | if n < base: 229 | return digits[n] 230 | else: 231 | return to_base(n // base, base, digits).lstrip(digits[0]) + digits[n % base] 232 | 233 | 234 | def cfunfuck(fuckedup): 235 | fuck = re.findall(r's,t,o,p,b,r,e,a,k,i,n,g,f,\s*(\w+=).*?:\+?\(?(.*?)\)?\}', fuckedup) 236 | fucks = re.findall(r'(\w+)\.\w+([\+\-\*\/]=)\+?\(?(.*?)\)?;', fuckedup) 237 | endunfuck = fuck[0][0].split('=')[0] 238 | unfuck = JSUnfuck(fuck[0][1]).decode() 239 | unfuck = re.sub(r'[\(\)]', '', unfuck) 240 | unfuck = fuck[0][0]+unfuck 241 | exec(unfuck) 242 | 243 | for fucker in fucks: 244 | unfucker = JSUnfuck(fucker[2]).decode() 245 | unfucker = re.sub(r'[\(\)]', '', unfucker) 246 | unfucker = fucker[0]+fucker[1]+unfucker 247 | exec(unfucker) 248 | 249 | return str(eval(endunfuck)) 250 | 251 | def main(): 252 | with open(sys.argv[1]) as f: 253 | start_js = f.read() 254 | 255 | print JSUnfuck(start_js).decode() 256 | 257 | if __name__ == '__main__': 258 | sys.exit(main()) 259 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/log_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | tknorris shared module 3 | Copyright (C) 2016 tknorris 4 | 5 | This program is free software: you can redistribute it and/or modify 6 | it under the terms of the GNU General Public License as published by 7 | the Free Software Foundation, either version 3 of the License, or 8 | (at your option) any later version. 9 | 10 | 11 | This program is distributed in the hope that it will be useful, 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | GNU General Public License for more details. 15 | 16 | You should have received a copy of the GNU General Public License 17 | along with this program. If not, see . 18 | """ 19 | 20 | import cProfile 21 | import json 22 | import os 23 | import pstats 24 | import StringIO 25 | import time 26 | from datetime import datetime 27 | 28 | import xbmc 29 | from resources.lib.modules import control 30 | from xbmc import (LOGDEBUG, LOGERROR, LOGFATAL, LOGINFO, # @UnusedImport 31 | LOGNONE, LOGNOTICE, LOGSEVERE, LOGWARNING) 32 | 33 | name = control.addonInfo('name') 34 | # Using color coding, for color formatted log viewers like Assassin's Tools 35 | DEBUGPREFIX = '[COLOR red][ EXODUS REDUX DEBUG ][/COLOR]' 36 | LOGPATH = xbmc.translatePath('special://logpath/') 37 | 38 | 39 | def log(msg, level=LOGNOTICE): 40 | debug_enabled = control.setting('addon_debug') 41 | debug_log = control.setting('debug.location') 42 | 43 | print DEBUGPREFIX + ' Debug Enabled?: ' + str(debug_enabled) 44 | print DEBUGPREFIX + ' Debug Log?: ' + str(debug_log) 45 | 46 | if not control.setting('addon_debug') == 'true': 47 | return 48 | 49 | try: 50 | if isinstance(msg, unicode): 51 | msg = '%s (ENCODED)' % (msg.encode('utf-8')) 52 | 53 | if not control.setting('debug.location') == '0': 54 | log_file = os.path.join(LOGPATH, 'atreides.log') 55 | if not os.path.exists(log_file): 56 | f = open(log_file, 'w') 57 | f.close() 58 | with open(log_file, 'a') as f: 59 | line = '[%s %s] %s: %s' % (datetime.now().date(), str(datetime.now().time())[:8], DEBUGPREFIX, msg) 60 | f.write(line.rstrip('\r\n')+'\n') 61 | else: 62 | print('%s: %s' % (DEBUGPREFIX, msg)) 63 | except Exception as e: 64 | try: 65 | xbmc.log('Logging Failure: %s' % (e), level) 66 | except Exception: 67 | pass 68 | 69 | 70 | class Profiler(object): 71 | def __init__(self, file_path, sort_by='time', builtins=False): 72 | self._profiler = cProfile.Profile(builtins=builtins) 73 | self.file_path = file_path 74 | self.sort_by = sort_by 75 | 76 | def profile(self, f): 77 | def method_profile_on(*args, **kwargs): 78 | try: 79 | self._profiler.enable() 80 | result = self._profiler.runcall(f, *args, **kwargs) 81 | self._profiler.disable() 82 | return result 83 | except Exception as e: 84 | log('Profiler Error: %s' % (e), LOGWARNING) 85 | return f(*args, **kwargs) 86 | 87 | def method_profile_off(*args, **kwargs): 88 | return f(*args, **kwargs) 89 | 90 | if _is_debugging(): 91 | return method_profile_on 92 | else: 93 | return method_profile_off 94 | 95 | def __del__(self): 96 | self.dump_stats() 97 | 98 | def dump_stats(self): 99 | if self._profiler is not None: 100 | s = StringIO.StringIO() 101 | params = (self.sort_by,) if isinstance(self.sort_by, basestring) else self.sort_by 102 | ps = pstats.Stats(self._profiler, stream=s).sort_stats(*params) 103 | ps.print_stats() 104 | if self.file_path is not None: 105 | with open(self.file_path, 'w') as f: 106 | f.write(s.getvalue()) 107 | 108 | 109 | def trace(method): 110 | def method_trace_on(*args, **kwargs): 111 | start = time.time() 112 | result = method(*args, **kwargs) 113 | end = time.time() 114 | log('{name!r} time: {time:2.4f}s args: |{args!r}| kwargs: |{kwargs!r}|'.format( 115 | name=method.__name__, time=end - start, args=args, kwargs=kwargs), LOGDEBUG) 116 | return result 117 | 118 | def method_trace_off(*args, **kwargs): 119 | return method(*args, **kwargs) 120 | 121 | if _is_debugging(): 122 | return method_trace_on 123 | else: 124 | return method_trace_off 125 | 126 | 127 | def _is_debugging(): 128 | command = {'jsonrpc': '2.0', 'id': 1, 'method': 'Settings.getSettings', 129 | 'params': {'filter': {'section': 'system', 'category': 'logging'}}} 130 | js_data = execute_jsonrpc(command) 131 | for item in js_data.get('result', {}).get('settings', {}): 132 | if item['id'] == 'debug.showloginfo': 133 | return item['value'] 134 | 135 | return False 136 | 137 | 138 | def execute_jsonrpc(command): 139 | if not isinstance(command, basestring): 140 | command = json.dumps(command) 141 | response = control.jsonrpc(command) 142 | return json.loads(response) 143 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/metacache.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | ''' 4 | Covenant Add-on 5 | 6 | This program is free software: you can redistribute it and/or modify 7 | it under the terms of the GNU General Public License as published by 8 | the Free Software Foundation, either version 3 of the License, or 9 | (at your option) any later version. 10 | 11 | This program is distributed in the hope that it will be useful, 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | GNU General Public License for more details. 15 | 16 | You should have received a copy of the GNU General Public License 17 | along with this program. If not, see . 18 | ''' 19 | 20 | import time 21 | 22 | from resources.lib.modules import control 23 | 24 | try: 25 | from sqlite3 import dbapi2 as database 26 | except Exception: 27 | from pysqlite2 import dbapi2 as database 28 | 29 | 30 | def fetch(items, lang='en', user=''): 31 | try: 32 | t2 = int(time.time()) 33 | dbcon = database.connect(control.metacacheFile) 34 | dbcur = dbcon.cursor() 35 | except Exception: 36 | return items 37 | 38 | for i in range(0, len(items)): 39 | try: 40 | dbcur.execute( 41 | "SELECT * FROM meta WHERE (imdb = '%s' and lang = '%s' and user = '%s' and not imdb = '0') or (tvdb = '%s' and lang = '%s' and user = '%s' and not tvdb = '0')" 42 | % (items[i]['imdb'], 43 | lang, user, items[i]['tvdb'], 44 | lang, user)) 45 | match = dbcur.fetchone() 46 | 47 | t1 = int(match[5]) 48 | update = (abs(t2 - t1) / 3600) >= 720 49 | if update is True: 50 | raise Exception() 51 | 52 | item = eval(match[4].encode('utf-8')) 53 | item = dict((k, v) for k, v in item.iteritems() if not v == '0') 54 | 55 | items[i].update(item) 56 | items[i].update({'metacache': True}) 57 | except Exception: 58 | pass 59 | 60 | return items 61 | 62 | 63 | def insert(meta): 64 | try: 65 | control.makeFile(control.dataPath) 66 | dbcon = database.connect(control.metacacheFile) 67 | dbcur = dbcon.cursor() 68 | dbcur.execute( 69 | "CREATE TABLE IF NOT EXISTS meta (" 70 | "imdb TEXT, " 71 | "tvdb TEXT, " 72 | "lang TEXT, " 73 | "user TEXT, " 74 | "item TEXT, " 75 | "time TEXT, " 76 | "UNIQUE(imdb, tvdb, lang, user)" 77 | ");") 78 | t = int(time.time()) 79 | for m in meta: 80 | try: 81 | if "user" not in m: 82 | m["user"] = '' 83 | if "lang" not in m: 84 | m["lang"] = 'en' 85 | i = repr(m['item']) 86 | try: 87 | dbcur.execute( 88 | "DELETE * FROM meta WHERE (imdb = '%s' and lang = '%s' and user = '%s' and not imdb = '0') or (tvdb = '%s' and lang = '%s' and user = '%s' and not tvdb = '0')" 89 | % (m['imdb'], 90 | m['lang'], 91 | m['user'], 92 | m['tvdb'], 93 | m['lang'], 94 | m['user'])) 95 | except Exception: 96 | pass 97 | dbcur.execute("INSERT INTO meta Values (?, ?, ?, ?, ?, ?)", 98 | (m['imdb'], m['tvdb'], m['lang'], m['user'], i, t)) 99 | except Exception: 100 | pass 101 | 102 | dbcon.commit() 103 | except Exception: 104 | return 105 | 106 | 107 | def local(items, link, poster, fanart): 108 | try: 109 | dbcon = database.connect(control.metaFile()) 110 | dbcur = dbcon.cursor() 111 | args = [i['imdb'] for i in items] 112 | dbcur.execute('SELECT * FROM mv WHERE imdb IN (%s)' % ', '.join(list(map(lambda arg: "'%s'" % arg, args)))) 113 | data = dbcur.fetchall() 114 | except Exception: 115 | return items 116 | 117 | for i in range(0, len(items)): 118 | try: 119 | item = items[i] 120 | 121 | match = [x for x in data if x[1] == item['imdb']][0] 122 | 123 | try: 124 | if poster in item and not item[poster] == '0': 125 | raise Exception() 126 | if match[2] == '0': 127 | raise Exception() 128 | items[i].update({poster: link % ('300', '/%s.jpg' % match[2])}) 129 | except Exception: 130 | pass 131 | try: 132 | if fanart in item and not item[fanart] == '0': 133 | raise Exception() 134 | if match[3] == '0': 135 | raise Exception() 136 | items[i].update({fanart: link % ('1280', '/%s.jpg' % match[3])}) 137 | except Exception: 138 | pass 139 | except Exception: 140 | pass 141 | 142 | return items 143 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/playcount.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | ''' 4 | Covenant Add-on 5 | 6 | This program is free software: you can redistribute it and/or modify 7 | it under the terms of the GNU General Public License as published by 8 | the Free Software Foundation, either version 3 of the License, or 9 | (at your option) any later version. 10 | 11 | This program is distributed in the hope that it will be useful, 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | GNU General Public License for more details. 15 | 16 | You should have received a copy of the GNU General Public License 17 | along with this program. If not, see . 18 | ''' 19 | 20 | import json 21 | 22 | from resources.lib.modules import control 23 | from resources.lib.modules import trakt 24 | 25 | 26 | def getMovieIndicators(refresh=False): 27 | try: 28 | if trakt.getTraktIndicatorsInfo() == True: raise Exception() 29 | from metahandler import metahandlers 30 | indicators = metahandlers.MetaData(preparezip=False) 31 | return indicators 32 | except: 33 | pass 34 | try: 35 | if trakt.getTraktIndicatorsInfo() == False: raise Exception() 36 | if refresh == False: timeout = 720 37 | elif trakt.getWatchedActivity() < trakt.timeoutsyncMovies(): timeout = 720 38 | else: timeout = 0 39 | indicators = trakt.cachesyncMovies(timeout=timeout) 40 | return indicators 41 | except: 42 | pass 43 | 44 | 45 | def getTVShowIndicators(refresh=False): 46 | try: 47 | if trakt.getTraktIndicatorsInfo() == True: raise Exception() 48 | from metahandler import metahandlers 49 | indicators = metahandlers.MetaData(preparezip=False) 50 | return indicators 51 | except: 52 | pass 53 | try: 54 | if trakt.getTraktIndicatorsInfo() == False: raise Exception() 55 | if refresh == False: timeout = 720 56 | elif trakt.getWatchedActivity() < trakt.timeoutsyncTVShows(): timeout = 720 57 | else: timeout = 0 58 | indicators = trakt.cachesyncTVShows(timeout=timeout) 59 | return indicators 60 | except: 61 | pass 62 | 63 | 64 | def getSeasonIndicators(imdb): 65 | try: 66 | if trakt.getTraktIndicatorsInfo() == False: raise Exception() 67 | indicators = trakt.syncSeason(imdb) 68 | return indicators 69 | except: 70 | pass 71 | 72 | 73 | def getMovieOverlay(indicators, imdb): 74 | try: 75 | try: 76 | playcount = indicators._get_watched('movie', imdb, '', '') 77 | return str(playcount) 78 | except: 79 | playcount = [i for i in indicators if i == imdb] 80 | playcount = 7 if len(playcount) > 0 else 6 81 | return str(playcount) 82 | except: 83 | return '6' 84 | 85 | 86 | def getTVShowOverlay(indicators, tvdb): 87 | try: 88 | playcount = [i[0] for i in indicators if i[0] == tvdb and len(i[2]) >= int(i[1])] 89 | playcount = 7 if len(playcount) > 0 else 6 90 | return str(playcount) 91 | except: 92 | return '6' 93 | 94 | 95 | def getEpisodeOverlay(indicators, imdb, tvdb, season, episode): 96 | try: 97 | try: 98 | playcount = indicators._get_watched_episode({'imdb_id' : imdb, 'season' : season, 'episode': episode, 'premiered' : ''}) 99 | return str(playcount) 100 | except: 101 | playcount = [i[2] for i in indicators if i[0] == tvdb] 102 | playcount = playcount[0] if len(playcount) > 0 else [] 103 | playcount = [i for i in playcount if int(season) == int(i[0]) and int(episode) == int(i[1])] 104 | playcount = 7 if len(playcount) > 0 else 6 105 | return str(playcount) 106 | except: 107 | return '6' 108 | 109 | 110 | def markMovieDuringPlayback(imdb, watched): 111 | try: 112 | if trakt.getTraktIndicatorsInfo() == False: raise Exception() 113 | 114 | if int(watched) == 7: trakt.markMovieAsWatched(imdb) 115 | else: trakt.markMovieAsNotWatched(imdb) 116 | trakt.cachesyncMovies() 117 | 118 | if trakt.getTraktAddonMovieInfo() == True: 119 | trakt.markMovieAsNotWatched(imdb) 120 | except: 121 | pass 122 | 123 | try: 124 | from metahandler import metahandlers 125 | metaget = metahandlers.MetaData(preparezip=False) 126 | metaget.get_meta('movie', name='', imdb_id=imdb) 127 | metaget.change_watched('movie', name='', imdb_id=imdb, watched=int(watched)) 128 | except: 129 | pass 130 | 131 | 132 | def markEpisodeDuringPlayback(imdb, tvdb, season, episode, watched): 133 | try: 134 | if trakt.getTraktIndicatorsInfo() == False: raise Exception() 135 | 136 | if int(watched) == 7: trakt.markEpisodeAsWatched(tvdb, season, episode) 137 | else: trakt.markEpisodeAsNotWatched(tvdb, season, episode) 138 | trakt.cachesyncTVShows() 139 | 140 | if trakt.getTraktAddonEpisodeInfo() == True: 141 | trakt.markEpisodeAsNotWatched(tvdb, season, episode) 142 | except: 143 | pass 144 | 145 | try: 146 | from metahandler import metahandlers 147 | metaget = metahandlers.MetaData(preparezip=False) 148 | metaget.get_meta('tvshow', name='', imdb_id=imdb) 149 | metaget.get_episode_meta('', imdb_id=imdb, season=season, episode=episode) 150 | metaget.change_watched('episode', '', imdb_id=imdb, season=season, episode=episode, watched=int(watched)) 151 | except: 152 | pass 153 | 154 | 155 | def movies(imdb, watched): 156 | control.busy() 157 | try: 158 | if trakt.getTraktIndicatorsInfo() == False: raise Exception() 159 | if int(watched) == 7: trakt.markMovieAsWatched(imdb) 160 | else: trakt.markMovieAsNotWatched(imdb) 161 | trakt.cachesyncMovies() 162 | control.refresh() 163 | except: 164 | pass 165 | 166 | try: 167 | from metahandler import metahandlers 168 | metaget = metahandlers.MetaData(preparezip=False) 169 | metaget.get_meta('movie', name='', imdb_id=imdb) 170 | metaget.change_watched('movie', name='', imdb_id=imdb, watched=int(watched)) 171 | if trakt.getTraktIndicatorsInfo() == False: control.refresh() 172 | except: 173 | pass 174 | 175 | 176 | def episodes(imdb, tvdb, season, episode, watched): 177 | control.busy() 178 | try: 179 | if trakt.getTraktIndicatorsInfo() == False: raise Exception() 180 | if int(watched) == 7: trakt.markEpisodeAsWatched(tvdb, season, episode) 181 | else: trakt.markEpisodeAsNotWatched(tvdb, season, episode) 182 | trakt.cachesyncTVShows() 183 | control.refresh() 184 | except: 185 | pass 186 | 187 | try: 188 | from metahandler import metahandlers 189 | metaget = metahandlers.MetaData(preparezip=False) 190 | metaget.get_meta('tvshow', name='', imdb_id=imdb) 191 | metaget.get_episode_meta('', imdb_id=imdb, season=season, episode=episode) 192 | metaget.change_watched('episode', '', imdb_id=imdb, season=season, episode=episode, watched=int(watched)) 193 | if trakt.getTraktIndicatorsInfo() == False: control.refresh() 194 | except: 195 | pass 196 | 197 | 198 | def tvshows(tvshowtitle, imdb, tvdb, season, watched): 199 | control.busy() 200 | try: 201 | import sys,xbmc 202 | 203 | if not trakt.getTraktIndicatorsInfo() == False: raise Exception() 204 | 205 | from metahandler import metahandlers 206 | from resources.lib.indexers import episodes 207 | 208 | metaget = metahandlers.MetaData(preparezip=False) 209 | 210 | name = control.addonInfo('name') 211 | 212 | dialog = control.progressDialogBG 213 | dialog.create(str(name), str(tvshowtitle)) 214 | dialog.update(0, str(name), str(tvshowtitle)) 215 | 216 | metaget.get_meta('tvshow', name='', imdb_id=imdb) 217 | 218 | items = episodes.episodes().get(tvshowtitle, '0', imdb, tvdb, '0', idx=False) 219 | try: items = [i for i in items if int('%01d' % int(season)) == int('%01d' % int(i['season']))] 220 | except: pass 221 | items = [{'label': '%s S%02dE%02d' % (tvshowtitle, int(i['season']), int(i['episode'])), 'season': int('%01d' % int(i['season'])), 'episode': int('%01d' % int(i['episode']))} for i in items] 222 | 223 | for i in range(len(items)): 224 | if xbmc.abortRequested == True: return sys.exit() 225 | 226 | dialog.update(int((100 / float(len(items))) * i), str(name), str(items[i]['label'])) 227 | 228 | season, episode = items[i]['season'], items[i]['episode'] 229 | metaget.get_episode_meta('', imdb_id=imdb, season=season, episode=episode) 230 | metaget.change_watched('episode', '', imdb_id=imdb, season=season, episode=episode, watched=int(watched)) 231 | 232 | try: dialog.close() 233 | except: pass 234 | except: 235 | try: dialog.close() 236 | except: pass 237 | 238 | 239 | try: 240 | if trakt.getTraktIndicatorsInfo() == False: raise Exception() 241 | 242 | if season: 243 | from resources.lib.indexers import episodes 244 | items = episodes.episodes().get(tvshowtitle, '0', imdb, tvdb, season, idx=False) 245 | items = [(int(i['season']), int(i['episode'])) for i in items] 246 | items = [i[1] for i in items if int('%01d' % int(season)) == int('%01d' % i[0])] 247 | for i in items: 248 | if int(watched) == 7: trakt.markEpisodeAsWatched(tvdb, season, i) 249 | else: trakt.markEpisodeAsNotWatched(tvdb, season, i) 250 | else: 251 | if int(watched) == 7: trakt.markTVShowAsWatched(tvdb) 252 | else: trakt.markTVShowAsNotWatched(tvdb) 253 | trakt.cachesyncTVShows() 254 | except: 255 | pass 256 | 257 | control.refresh() 258 | 259 | 260 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/proxy.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | Covenant Add-on 5 | 6 | This program is free software: you can redistribute it and/or modify 7 | it under the terms of the GNU General Public License as published by 8 | the Free Software Foundation, either version 3 of the License, or 9 | (at your option) any later version. 10 | 11 | This program is distributed in the hope that it will be useful, 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | GNU General Public License for more details. 15 | 16 | You should have received a copy of the GNU General Public License 17 | along with this program. If not, see . 18 | """ 19 | 20 | 21 | import random 22 | import re 23 | import urllib 24 | import urlparse 25 | 26 | from resources.lib.modules import client 27 | from resources.lib.modules import utils 28 | 29 | 30 | def request(url, check, close=True, redirect=True, error=False, proxy=None, post=None, headers=None, mobile=False, XHR=False, limit=None, referer=None, cookie=None, compression=True, output='', timeout='30'): 31 | try: 32 | r = client.request(url, close=close, redirect=redirect, proxy=proxy, post=post, headers=headers, mobile=mobile, XHR=XHR, limit=limit, referer=referer, cookie=cookie, compression=compression, output=output, timeout=timeout) 33 | if r is not None and error is not False: return r 34 | if check in str(r) or str(r) == '': return r 35 | 36 | proxies = sorted(get(), key=lambda x: random.random()) 37 | proxies = sorted(proxies, key=lambda x: random.random()) 38 | proxies = proxies[:3] 39 | 40 | for p in proxies: 41 | p += urllib.quote_plus(url) 42 | if post is not None: 43 | if isinstance(post, dict): 44 | post = utils.byteify(post) 45 | post = urllib.urlencode(post) 46 | p += urllib.quote_plus('?%s' % post) 47 | r = client.request(p, close=close, redirect=redirect, proxy=proxy, headers=headers, mobile=mobile, XHR=XHR, limit=limit, referer=referer, cookie=cookie, compression=compression, output=output, timeout='20') 48 | if check in str(r) or str(r) == '': return r 49 | except: 50 | pass 51 | 52 | 53 | def geturl(url): 54 | try: 55 | r = client.request(url, output='geturl') 56 | if r is None: return r 57 | 58 | host1 = re.findall('([\w]+)[.][\w]+$', urlparse.urlparse(url.strip().lower()).netloc)[0] 59 | host2 = re.findall('([\w]+)[.][\w]+$', urlparse.urlparse(r.strip().lower()).netloc)[0] 60 | if host1 == host2: return r 61 | 62 | proxies = sorted(get(), key=lambda x: random.random()) 63 | proxies = sorted(proxies, key=lambda x: random.random()) 64 | proxies = proxies[:3] 65 | 66 | for p in proxies: 67 | p += urllib.quote_plus(url) 68 | r = client.request(p, output='geturl') 69 | if r is not None: return parse(r) 70 | except: 71 | pass 72 | 73 | 74 | def parse(url): 75 | try: url = client.replaceHTMLCodes(url) 76 | except: pass 77 | try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] 78 | except: pass 79 | try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0] 80 | except: pass 81 | return url 82 | 83 | 84 | def get(): 85 | return [ 86 | 87 | 'https://www.3proxy.us/index.php?hl=2e5&q=', 88 | 'https://www.4proxy.us/index.php?hl=2e5&q=', 89 | 'http://www.xxlproxy.com/index.php?hl=3e4&q=', 90 | 'http://free-proxyserver.com/browse.php?b=20&u=', 91 | 'http://proxite.net/browse.php?b=20&u=', 92 | 'http://proxydash.com/browse.php?b=20&u=', 93 | 'http://webproxy.stealthy.co/browse.php?b=20&u=', 94 | 'http://sslpro.eu/browse.php?b=20&u=', 95 | 'http://webtunnel.org/browse.php?b=20&u=', 96 | 'http://proxycloud.net/browse.php?b=20&u=', 97 | 'http://sno9.com/browse.php?b=20&u=', 98 | 'http://www.onlineipchanger.com/browse.php?b=20&u=', 99 | 'http://www.pingproxy.com/browse.php?b=20&u=', 100 | 'https://www.ip123a.com/browse.php?b=20&u=', 101 | 'http://buka.link/browse.php?b=20&u=', 102 | 'https://zend2.com/open18.php?b=20&u=', 103 | 'http://proxy.deals/browse.php?b=20&u=', 104 | 'http://freehollandproxy.com/browse.php?b=20&u=', 105 | 'http://proxy.rocks/browse.php?b=20&u=', 106 | 'http://proxy.discount/browse.php?b=20&u=', 107 | 'http://proxy.lgbt/browse.php?b=20&u=', 108 | 'http://proxy.vet/browse.php?b=20&u=', 109 | 'http://www.unblockmyweb.com/browse.php?b=20&u=', 110 | 'http://onewebproxy.com/browse.php?b=20&u=', 111 | 'http://pr0xii.com/browse.php?b=20&u=', 112 | 'http://mlproxy.science/surf.php?b=20&u=', 113 | 'https://www.prontoproxy.com/browse.php?b=20&u=', 114 | 'http://fproxy.net/browse.php?b=20&u=', 115 | 116 | #'http://www.ruby-group.xyz/browse.php?b=20&u=', 117 | #'http://securefor.com/browse.php?b=20&u=', 118 | #'http://www.singleclick.info/browse.php?b=20&u=', 119 | #'http://www.socialcommunication.xyz/browse.php?b=20&u=', 120 | #'http://www.theprotected.xyz/browse.php?b=20&u=', 121 | #'http://www.highlytrustedgroup.xyz/browse.php?b=20&u=', 122 | #'http://www.medicalawaregroup.xyz/browse.php?b=20&u=', 123 | #'http://www.proxywebsite.us/browse.php?b=20&u=', 124 | 'http://www.mybriefonline.xyz/browse.php?b=20&u=', 125 | 'http://www.navigate-online.xyz/browse.php?b=20&u=' 126 | 127 | ] 128 | 129 | 130 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/pyaes/__init__.py: -------------------------------------------------------------------------------- 1 | # The MIT License (MIT) 2 | # 3 | # Copyright (c) 2014 Richard Moore 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | 23 | # This is a pure-Python implementation of the AES algorithm and AES common 24 | # modes of operation. 25 | 26 | # See: https://en.wikipedia.org/wiki/Advanced_Encryption_Standard 27 | # See: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation 28 | 29 | 30 | # Supported key sizes: 31 | # 128-bit 32 | # 192-bit 33 | # 256-bit 34 | 35 | 36 | # Supported modes of operation: 37 | # ECB - Electronic Codebook 38 | # CBC - Cipher-Block Chaining 39 | # CFB - Cipher Feedback 40 | # OFB - Output Feedback 41 | # CTR - Counter 42 | 43 | # See the README.md for API details and general information. 44 | 45 | # Also useful, PyCrypto, a crypto library implemented in C with Python bindings: 46 | # https://www.dlitz.net/software/pycrypto/ 47 | 48 | 49 | from .blockfeeder import decrypt_stream, Decrypter, encrypt_stream, Encrypter 50 | from .aes import AES, AESModeOfOperationCTR, AESModeOfOperationCBC, AESModeOfOperationCFB, AESModeOfOperationECB, AESModeOfOperationOFB, AESModesOfOperation, Counter 51 | 52 | VERSION = [1, 3, 0] 53 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/pyaes/blockfeeder.py: -------------------------------------------------------------------------------- 1 | # The MIT License (MIT) 2 | # 3 | # Copyright (c) 2014 Richard Moore 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | 23 | 24 | from .aes import AESBlockModeOfOperation, AESSegmentModeOfOperation, AESStreamModeOfOperation 25 | from .util import append_PKCS7_padding, strip_PKCS7_padding, to_bufferable 26 | 27 | 28 | # First we inject three functions to each of the modes of operations 29 | # 30 | # _can_consume(size) 31 | # - Given a size, determine how many bytes could be consumed in 32 | # a single call to either the decrypt or encrypt method 33 | # 34 | # _final_encrypt(data) 35 | # - call and return encrypt on this (last) chunk of data, 36 | # padding as necessary; this will always be at least 16 37 | # bytes unless the total incoming input was less than 16 38 | # bytes 39 | # 40 | # _final_decrypt(data) 41 | # - same as _final_encrypt except for decrypt, for 42 | # stripping off padding 43 | # 44 | 45 | 46 | # ECB and CBC are block-only ciphers 47 | 48 | def _block_can_consume(self, size): 49 | if size >= 16: return 16 50 | return 0 51 | 52 | # After padding, we may have more than one block 53 | def _block_final_encrypt(self, data): 54 | data = append_PKCS7_padding(data) 55 | if len(data) == 32: 56 | return self.encrypt(data[:16]) + self.encrypt(data[16:]) 57 | return self.encrypt(data) 58 | 59 | def _block_final_decrypt(self, data): 60 | return strip_PKCS7_padding(self.decrypt(data)) 61 | 62 | AESBlockModeOfOperation._can_consume = _block_can_consume 63 | AESBlockModeOfOperation._final_encrypt = _block_final_encrypt 64 | AESBlockModeOfOperation._final_decrypt = _block_final_decrypt 65 | 66 | 67 | 68 | # CFB is a segment cipher 69 | 70 | def _segment_can_consume(self, size): 71 | return self.segment_bytes * int(size // self.segment_bytes) 72 | 73 | # CFB can handle a non-segment-sized block at the end using the remaining cipherblock 74 | def _segment_final_encrypt(self, data): 75 | faux_padding = (chr(0) * (self.segment_bytes - (len(data) % self.segment_bytes))) 76 | padded = data + to_bufferable(faux_padding) 77 | return self.encrypt(padded)[:len(data)] 78 | 79 | # CFB can handle a non-segment-sized block at the end using the remaining cipherblock 80 | def _segment_final_decrypt(self, data): 81 | faux_padding = (chr(0) * (self.segment_bytes - (len(data) % self.segment_bytes))) 82 | padded = data + to_bufferable(faux_padding) 83 | return self.decrypt(padded)[:len(data)] 84 | 85 | AESSegmentModeOfOperation._can_consume = _segment_can_consume 86 | AESSegmentModeOfOperation._final_encrypt = _segment_final_encrypt 87 | AESSegmentModeOfOperation._final_decrypt = _segment_final_decrypt 88 | 89 | 90 | 91 | # OFB and CTR are stream ciphers 92 | 93 | def _stream_can_consume(self, size): 94 | return size 95 | 96 | def _stream_final_encrypt(self, data): 97 | return self.encrypt(data) 98 | 99 | def _stream_final_decrypt(self, data): 100 | return self.decrypt(data) 101 | 102 | AESStreamModeOfOperation._can_consume = _stream_can_consume 103 | AESStreamModeOfOperation._final_encrypt = _stream_final_encrypt 104 | AESStreamModeOfOperation._final_decrypt = _stream_final_decrypt 105 | 106 | 107 | 108 | class BlockFeeder(object): 109 | '''The super-class for objects to handle chunking a stream of bytes 110 | into the appropriate block size for the underlying mode of operation 111 | and applying (or stripping) padding, as necessary.''' 112 | 113 | def __init__(self, mode, feed, final): 114 | self._mode = mode 115 | self._feed = feed 116 | self._final = final 117 | self._buffer = to_bufferable("") 118 | 119 | def feed(self, data = None): 120 | '''Provide bytes to encrypt (or decrypt), returning any bytes 121 | possible from this or any previous calls to feed. 122 | 123 | Call with None or an empty string to flush the mode of 124 | operation and return any final bytes; no further calls to 125 | feed may be made.''' 126 | 127 | if self._buffer is None: 128 | raise ValueError('already finished feeder') 129 | 130 | # Finalize; process the spare bytes we were keeping 131 | if not data: 132 | result = self._final(self._buffer) 133 | self._buffer = None 134 | return result 135 | 136 | self._buffer += to_bufferable(data) 137 | 138 | # We keep 16 bytes around so we can determine padding 139 | result = to_bufferable('') 140 | while len(self._buffer) > 16: 141 | can_consume = self._mode._can_consume(len(self._buffer) - 16) 142 | if can_consume == 0: break 143 | result += self._feed(self._buffer[:can_consume]) 144 | self._buffer = self._buffer[can_consume:] 145 | 146 | return result 147 | 148 | 149 | class Encrypter(BlockFeeder): 150 | 'Accepts bytes of plaintext and returns encrypted ciphertext.' 151 | 152 | def __init__(self, mode): 153 | BlockFeeder.__init__(self, mode, mode.encrypt, mode._final_encrypt) 154 | 155 | 156 | class Decrypter(BlockFeeder): 157 | 'Accepts bytes of ciphertext and returns decrypted plaintext.' 158 | 159 | def __init__(self, mode): 160 | BlockFeeder.__init__(self, mode, mode.decrypt, mode._final_decrypt) 161 | 162 | 163 | # 8kb blocks 164 | BLOCK_SIZE = (1 << 13) 165 | 166 | def _feed_stream(feeder, in_stream, out_stream, block_size = BLOCK_SIZE): 167 | 'Uses feeder to read and convert from in_stream and write to out_stream.' 168 | 169 | while True: 170 | chunk = in_stream.read(BLOCK_SIZE) 171 | if not chunk: 172 | break 173 | converted = feeder.feed(chunk) 174 | out_stream.write(converted) 175 | converted = feeder.feed() 176 | out_stream.write(converted) 177 | 178 | 179 | def encrypt_stream(mode, in_stream, out_stream, block_size = BLOCK_SIZE): 180 | 'Encrypts a stream of bytes from in_stream to out_stream using mode.' 181 | 182 | encrypter = Encrypter(mode) 183 | _feed_stream(encrypter, in_stream, out_stream, block_size) 184 | 185 | 186 | def decrypt_stream(mode, in_stream, out_stream, block_size = BLOCK_SIZE): 187 | 'Decrypts a stream of bytes from in_stream to out_stream using mode.' 188 | 189 | decrypter = Decrypter(mode) 190 | _feed_stream(decrypter, in_stream, out_stream, block_size) 191 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/pyaes/util.py: -------------------------------------------------------------------------------- 1 | # The MIT License (MIT) 2 | # 3 | # Copyright (c) 2014 Richard Moore 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | 23 | # Why to_bufferable? 24 | # Python 3 is very different from Python 2.x when it comes to strings of text 25 | # and strings of bytes; in Python 3, strings of bytes do not exist, instead to 26 | # represent arbitrary binary data, we must use the "bytes" object. This method 27 | # ensures the object behaves as we need it to. 28 | 29 | 30 | def to_bufferable(binary): 31 | return binary 32 | 33 | 34 | def _get_byte(c): 35 | return ord(c) 36 | 37 | 38 | try: 39 | xrange 40 | except Exception: 41 | 42 | def to_bufferable(binary): 43 | if isinstance(binary, bytes): 44 | return binary 45 | return bytes(ord(b) for b in binary) 46 | 47 | def _get_byte(c): 48 | return c 49 | 50 | def append_PKCS7_padding(data): 51 | pad = 16 - (len(data) % 16) 52 | return data + to_bufferable(chr(pad) * pad) 53 | 54 | def strip_PKCS7_padding(data): 55 | if len(data) % 16 != 0: 56 | raise ValueError("invalid length") 57 | 58 | pad = _get_byte(data[-1]) 59 | 60 | if not pad or pad > 16: 61 | return data 62 | 63 | return data[:-pad] 64 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/source_utils.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | Covenant Add-on 5 | 6 | This program is free software: you can redistribute it and/or modify 7 | it under the terms of the GNU General Public License as published by 8 | the Free Software Foundation, either version 3 of the License, or 9 | (at your option) any later version. 10 | 11 | This program is distributed in the hope that it will be useful, 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | GNU General Public License for more details. 15 | 16 | You should have received a copy of the GNU General Public License 17 | along with this program. If not, see . 18 | """ 19 | 20 | import base64 21 | import urlparse 22 | import urllib 23 | import hashlib 24 | import re 25 | 26 | from resources.lib.modules import client 27 | from resources.lib.modules import directstream 28 | from resources.lib.modules import trakt 29 | from resources.lib.modules import pyaes 30 | 31 | 32 | def is_anime(content, type, type_id): 33 | try: 34 | r = trakt.getGenre(content, type, type_id) 35 | return 'anime' in r or 'animation' in r 36 | except: 37 | return False 38 | 39 | 40 | def get_release_quality(release_name, release_link=None): 41 | 42 | if release_name is None: return 43 | 44 | try: release_name = release_name.encode('utf-8') 45 | except: pass 46 | 47 | try: 48 | quality = None 49 | 50 | release_name = release_name.upper() 51 | 52 | fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', release_name) 53 | fmt = re.split('\.|\(|\)|\[|\]|\s|-', fmt) 54 | fmt = [i.lower() for i in fmt] 55 | if '2160p' in fmt: quality = '4K' 56 | elif '1080p' in fmt: quality = '1080p' 57 | elif '720p' in fmt: quality = '720p' 58 | elif 'brrip' in fmt: quality = '720p' 59 | elif any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR' 60 | elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM' 61 | 62 | if not quality: 63 | if release_link: 64 | release_link = release_link.lower() 65 | try: release_link = release_link.encode('utf-8') 66 | except: pass 67 | if '2160' in release_link: quality = '4K' 68 | elif '1080' in release_link: quality = '1080p' 69 | elif '720' in release_link: quality = '720p' 70 | elif '.hd' in release_link: quality = 'SD' 71 | else: 72 | if any(i in ['dvdscr', 'r5', 'r6'] for i in release_link): quality = 'SCR' 73 | elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in release_link): quality = 'CAM' 74 | else: quality = 'SD' 75 | else: quality = 'SD' 76 | info = [] 77 | if '3d' in fmt or '.3D.' in release_name: info.append('3D') 78 | if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC') 79 | 80 | return quality, info 81 | except: 82 | return 'SD', [] 83 | 84 | 85 | def getFileType(url): 86 | 87 | try: url = url.lower() 88 | except: url = str(url) 89 | type = '' 90 | 91 | if 'bluray' in url: type += ' BLURAY /' 92 | if '.web-dl' in url: type += ' WEB-DL /' 93 | if '.web.' in url: type += ' WEB-DL /' 94 | if 'hdrip' in url: type += ' HDRip /' 95 | if 'bd-r' in url: type += ' BD-R /' 96 | if 'bd-rip' in url: type += ' BD-RIP /' 97 | if 'bd.r' in url: type += ' BD-R /' 98 | if 'bd.rip' in url: type += ' BD-RIP /' 99 | if 'bdr' in url: type += ' BD-R /' 100 | if 'bdrip' in url: type += ' BD-RIP /' 101 | if 'atmos' in url: type += ' ATMOS /' 102 | if 'truehd' in url: type += ' TRUEHD /' 103 | if '.dd' in url: type += ' DolbyDigital /' 104 | if '5.1' in url: type += ' 5.1 /' 105 | if '.xvid' in url: type += ' XVID /' 106 | if '.mp4' in url: type += ' MP4 /' 107 | if '.avi' in url: type += ' AVI /' 108 | if 'ac3' in url: type += ' AC3 /' 109 | if 'h.264' in url: type += ' H.264 /' 110 | if '.x264' in url: type += ' x264 /' 111 | if '.x265' in url: type += ' x265 /' 112 | if 'subs' in url: 113 | if type != '': type += ' - WITH SUBS' 114 | else: type = 'SUBS' 115 | type = type.rstrip('/') 116 | return type 117 | 118 | 119 | def check_sd_url(release_link): 120 | 121 | try: 122 | release_link = release_link.lower() 123 | if '2160' in release_link: quality = '4K' 124 | elif '1080' in release_link: quality = '1080p' 125 | elif '720' in release_link: quality = '720p' 126 | elif '.hd.' in release_link: quality = '720p' 127 | elif 'hdtv' in release_link: quality = '720p' 128 | elif any(i in ['dvdscr', 'r5', 'r6'] for i in release_link): quality = 'SCR' 129 | elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in release_link): quality = 'CAM' 130 | else: quality = 'SD' 131 | return quality 132 | except: 133 | return 'SD' 134 | 135 | 136 | def label_to_quality(label): 137 | try: 138 | try: label = int(re.search('(\d+)', label).group(1)) 139 | except: label = 0 140 | 141 | if label >= 2160: 142 | return '4K' 143 | elif label >= 1440: 144 | return '1440p' 145 | elif label >= 1080: 146 | return '1080p' 147 | elif 720 <= label < 1080: 148 | return '720p' 149 | elif label < 720: 150 | return 'SD' 151 | except: 152 | return 'SD' 153 | 154 | def strip_domain(url): 155 | try: 156 | if url.lower().startswith('http') or url.startswith('/'): 157 | url = re.findall('(?://.+?|)(/.+)', url)[0] 158 | url = client.replaceHTMLCodes(url) 159 | url = url.encode('utf-8') 160 | return url 161 | except: 162 | return 163 | 164 | 165 | def is_host_valid(url, domains): 166 | try: 167 | host = __top_domain(url) 168 | hosts = [domain.lower() for domain in domains if host and host in domain.lower()] 169 | 170 | if hosts and '.' not in host: 171 | host = hosts[0] 172 | if hosts and any([h for h in ['google', 'picasa', 'blogspot'] if h in host]): 173 | host = 'gvideo' 174 | if hosts and any([h for h in ['akamaized','ocloud'] if h in host]): 175 | host = 'CDN' 176 | return any(hosts), host 177 | except: 178 | return False, '' 179 | 180 | 181 | def __top_domain(url): 182 | elements = urlparse.urlparse(url) 183 | domain = elements.netloc or elements.path 184 | domain = domain.split('@')[-1].split(':')[0] 185 | regex = "(?:www\.)?([\w\-]*\.[\w\-]{2,3}(?:\.[\w\-]{2,3})?)$" 186 | res = re.search(regex, domain) 187 | if res: domain = res.group(1) 188 | domain = domain.lower() 189 | return domain 190 | 191 | 192 | def aliases_to_array(aliases, filter=None): 193 | try: 194 | if not filter: 195 | filter = [] 196 | if isinstance(filter, str): 197 | filter = [filter] 198 | 199 | return [x.get('title') for x in aliases if not filter or x.get('country') in filter] 200 | except: 201 | return [] 202 | 203 | 204 | def append_headers(headers): 205 | return '|%s' % '&'.join(['%s=%s' % (key, urllib.quote_plus(headers[key])) for key in headers]) 206 | 207 | 208 | def get_size(url): 209 | try: 210 | size = client.request(url, output='file_size') 211 | if size == '0': size = False 212 | size = convert_size(size) 213 | return size 214 | except: return False 215 | 216 | 217 | def convert_size(size_bytes): 218 | import math 219 | if size_bytes == 0: 220 | return "0B" 221 | size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") 222 | i = int(math.floor(math.log(size_bytes, 1024))) 223 | p = math.pow(1024, i) 224 | s = round(size_bytes / p, 2) 225 | if size_name[i] == 'B' or size_name[i] == 'KB': return None 226 | return "%s %s" % (s, size_name[i]) 227 | 228 | 229 | def check_directstreams(url, hoster='', quality='SD'): 230 | urls = [] 231 | host = hoster 232 | 233 | if 'google' in url or any(x in url for x in ['youtube.', 'docid=']): 234 | urls = directstream.google(url) 235 | if not urls: 236 | tag = directstream.googletag(url) 237 | if tag: urls = [{'quality': tag[0]['quality'], 'url': url}] 238 | if urls: host = 'gvideo' 239 | elif 'ok.ru' in url: 240 | urls = directstream.odnoklassniki(url) 241 | if urls: host = 'vk' 242 | elif 'vk.com' in url: 243 | urls = directstream.vk(url) 244 | if urls: host = 'vk' 245 | elif any(x in url for x in ['akamaized', 'blogspot', 'ocloud.stream']): 246 | urls = [{'url': url}] 247 | if urls: host = 'CDN' 248 | 249 | direct = True if urls else False 250 | 251 | if not urls: urls = [{'quality': quality, 'url': url}] 252 | 253 | return urls, host, direct 254 | 255 | 256 | # if salt is provided, it should be string 257 | # ciphertext is base64 and passphrase is string 258 | def evp_decode(cipher_text, passphrase, salt=None): 259 | cipher_text = base64.b64decode(cipher_text) 260 | if not salt: 261 | salt = cipher_text[8:16] 262 | cipher_text = cipher_text[16:] 263 | data = evpKDF(passphrase, salt) 264 | decrypter = pyaes.Decrypter(pyaes.AESModeOfOperationCBC(data['key'], data['iv'])) 265 | plain_text = decrypter.feed(cipher_text) 266 | plain_text += decrypter.feed() 267 | return plain_text 268 | 269 | 270 | def evpKDF(passwd, salt, key_size=8, iv_size=4, iterations=1, hash_algorithm="md5"): 271 | target_key_size = key_size + iv_size 272 | derived_bytes = "" 273 | number_of_derived_words = 0 274 | block = None 275 | hasher = hashlib.new(hash_algorithm) 276 | while number_of_derived_words < target_key_size: 277 | if block is not None: 278 | hasher.update(block) 279 | 280 | hasher.update(passwd) 281 | hasher.update(salt) 282 | block = hasher.digest() 283 | hasher = hashlib.new(hash_algorithm) 284 | 285 | for _i in range(1, iterations): 286 | hasher.update(block) 287 | block = hasher.digest() 288 | hasher = hashlib.new(hash_algorithm) 289 | 290 | derived_bytes += block[0: min(len(block), (target_key_size - number_of_derived_words) * 4)] 291 | 292 | number_of_derived_words += len(block) / 4 293 | 294 | return { 295 | "key": derived_bytes[0: key_size * 4], 296 | "iv": derived_bytes[key_size * 4:] 297 | } 298 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/thexem.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | ''' 4 | Covenant Add-on 5 | Copyright (C) 2017 homik 6 | 7 | This program is free software: you can redistribute it and/or modify 8 | it under the terms of the GNU General Public License as published by 9 | the Free Software Foundation, either version 3 of the License, or 10 | (at your option) any later version. 11 | 12 | This program is distributed in the hope that it will be useful, 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | GNU General Public License for more details. 16 | 17 | You should have received a copy of the GNU General Public License 18 | along with this program. If not, see . 19 | ''' 20 | 21 | import json 22 | from resources.lib.modules import client 23 | 24 | URL_PATTERN = 'http://thexem.de/map/single?id=%s&origin=tvdb&season=%s&episode=%s&destination=scene' 25 | 26 | def get_scene_episode_number(tvdbid, season, episode): 27 | 28 | try: 29 | url = URL_PATTERN % (tvdbid, season, episode) 30 | r = client.request(url) 31 | r = json.loads(r) 32 | if r['result'] == 'success': 33 | data = r['data']['scene'] 34 | return data['season'], data['episode'] 35 | except: 36 | pass 37 | 38 | return season, episode 39 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/trailer.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | Covenant Add-on 5 | 6 | This program is free software: you can redistribute it and/or modify 7 | it under the terms of the GNU General Public License as published by 8 | the Free Software Foundation, either version 3 of the License, or 9 | (at your option) any later version. 10 | 11 | This program is distributed in the hope that it will be useful, 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | GNU General Public License for more details. 15 | 16 | You should have received a copy of the GNU General Public License 17 | along with this program. If not, see . 18 | """ 19 | 20 | import sys 21 | import base64 22 | import json 23 | import random 24 | import re 25 | import urllib 26 | 27 | from resources.lib.modules import client 28 | from resources.lib.modules import control 29 | 30 | 31 | class trailer: 32 | def __init__(self): 33 | self.base_link = 'https://www.youtube.com' 34 | self.key_link = random.choice(['QUl6YVN5RDd2aFpDLTYta2habTVuYlVyLTZ0Q0JRQnZWcnFkeHNz', 'QUl6YVN5Q2RiNEFNenZpVG0yaHJhSFY3MXo2Nl9HNXBhM2ZvVXd3']) 35 | self.key_link = '&key=%s' % base64.urlsafe_b64decode(self.key_link) 36 | self.search_link = 'https://www.googleapis.com/youtube/v3/search?part=id&type=video&maxResults=5&q=%s' + self.key_link 37 | self.youtube_watch = 'https://www.youtube.com/watch?v=%s' 38 | 39 | def play(self, name='', url='', windowedtrailer=0): 40 | try: 41 | url = self.worker(name, url) 42 | if not url:return 43 | 44 | title = control.infoLabel('ListItem.Title') 45 | if not title: title = control.infoLabel('ListItem.Label') 46 | icon = control.infoLabel('ListItem.Icon') 47 | 48 | item = control.item(label=name, iconImage=icon, thumbnailImage=icon, path=url) 49 | item.setInfo(type="Video",infoLabels={ "Title":name}) 50 | 51 | item.setProperty('IsPlayable','true') 52 | control.resolve(handle=int(sys.argv[1]), succeeded=True, listitem=item) 53 | if windowedtrailer == 1: 54 | # The call to the play() method is non-blocking. So we delay further script execution to keep the script alive at this spot. 55 | # Otherwise this script will continue and probably already be garbage collected by the time the trailer has ended. 56 | control.sleep(1000) # Wait until playback starts. Less than 900ms is too short (on my box). Make it one second. 57 | while control.player.isPlayingVideo(): 58 | control.sleep(1000) 59 | # Close the dialog. 60 | # Same behaviour as the fullscreenvideo window when : 61 | # the media plays to the end, 62 | # or the user pressed one of X, ESC, or Backspace keys on the keyboard/remote to stop playback. 63 | control.execute("Dialog.Close(%s, true)" % control.getCurrentDialogId) 64 | except: 65 | pass 66 | 67 | def worker(self, name, url): 68 | try: 69 | if url.startswith(self.base_link): 70 | url = self.resolve(url) 71 | if not url: raise Exception() 72 | return url 73 | elif not url.startswith('http:'): 74 | url = self.youtube_watch % url 75 | url = self.resolve(url) 76 | if not url: raise Exception() 77 | return url 78 | else: 79 | raise Exception() 80 | except: 81 | query = name + ' trailer' 82 | query = self.search_link % urllib.quote_plus(query) 83 | return self.search(query) 84 | 85 | def search(self, url): 86 | try: 87 | apiLang = control.apiLanguage().get('youtube', 'en') 88 | 89 | if apiLang != 'en': 90 | url += "&relevanceLanguage=%s" % apiLang 91 | 92 | result = client.request(url) 93 | 94 | items = json.loads(result).get('items', []) 95 | items = [i.get('id', {}).get('videoId') for i in items] 96 | 97 | for vid_id in items: 98 | url = self.resolve(vid_id) 99 | if url: 100 | return url 101 | except: 102 | return 103 | 104 | def resolve(self, url): 105 | try: 106 | id = url.split('?v=')[-1].split('/')[-1].split('?')[0].split('&')[0] 107 | result = client.request(self.youtube_watch % id) 108 | 109 | message = client.parseDOM(result, 'div', attrs={'id': 'unavailable-submessage'}) 110 | message = ''.join(message) 111 | 112 | alert = client.parseDOM(result, 'div', attrs={'id': 'watch7-notification-area'}) 113 | 114 | if len(alert) > 0: raise Exception() 115 | if re.search('[a-zA-Z]', message): raise Exception() 116 | 117 | url = 'plugin://plugin.video.youtube/play/?video_id=%s' % id 118 | return url 119 | except: 120 | return 121 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/tvmaze.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | ''' 4 | Covenant Add-on 5 | 6 | This program is free software: you can redistribute it and/or modify 7 | it under the terms of the GNU General Public License as published by 8 | the Free Software Foundation, either version 3 of the License, or 9 | (at your option) any later version. 10 | 11 | This program is distributed in the hope that it will be useful, 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | GNU General Public License for more details. 15 | 16 | You should have received a copy of the GNU General Public License 17 | along with this program. If not, see . 18 | ''' 19 | 20 | import urllib,json 21 | 22 | from resources.lib.modules import cache 23 | from resources.lib.modules import client 24 | 25 | 26 | class tvMaze: 27 | def __init__(self, show_id = None): 28 | self.api_url = 'http://api.tvmaze.com/%s%s' 29 | self.show_id = show_id 30 | 31 | 32 | def showID(self, show_id = None): 33 | if (show_id != None): 34 | self.show_id = show_id 35 | return show_id 36 | 37 | return self.show_id 38 | 39 | 40 | def request(self, endpoint, query = None): 41 | try: 42 | # Encode the queries, if there is any... 43 | if (query != None): 44 | query = '?' + urllib.urlencode(query) 45 | else: 46 | query = '' 47 | 48 | # Make the request 49 | request = self.api_url % (endpoint, query) 50 | 51 | # Send the request and get the response 52 | # Get the results from cache if available 53 | response = cache.get(client.request, 24, request) 54 | 55 | # Retrun the result as a dictionary 56 | return json.loads(response) 57 | except: 58 | pass 59 | 60 | return {} 61 | 62 | 63 | def showLookup(self, type, id): 64 | try: 65 | result = self.request('lookup/shows', {type: id}) 66 | 67 | # Storing the show id locally 68 | if ('id' in result): 69 | self.show_id = result['id'] 70 | 71 | return result 72 | except: 73 | pass 74 | 75 | return {} 76 | 77 | 78 | def shows(self, show_id = None, embed = None): 79 | try: 80 | if (not self.showID(show_id)): 81 | raise Exception() 82 | 83 | result = self.request('shows/%d' % self.show_id) 84 | 85 | # Storing the show id locally 86 | if ('id' in result): 87 | self.show_id = result['id'] 88 | 89 | return result 90 | except: 91 | pass 92 | 93 | return {} 94 | 95 | 96 | def showSeasons(self, show_id = None): 97 | try: 98 | if (not self.showID(show_id)): 99 | raise Exception() 100 | 101 | result = self.request('shows/%d/seasons' % int( self.show_id )) 102 | 103 | if (len(result) > 0 and 'id' in result[0]): 104 | return result 105 | except: 106 | pass 107 | 108 | return [] 109 | 110 | 111 | def showSeasonList(self, show_id): 112 | return {} 113 | 114 | 115 | def showEpisodeList(self, show_id = None, specials = False): 116 | try: 117 | if (not self.showID(show_id)): 118 | raise Exception() 119 | 120 | result = self.request('shows/%d/episodes' % int( self.show_id ), 'specials=1' if specials else '') 121 | 122 | if (len(result) > 0 and 'id' in result[0]): 123 | return result 124 | except: 125 | pass 126 | 127 | return [] 128 | 129 | 130 | def episodeAbsoluteNumber(self, thetvdb, season, episode): 131 | try: 132 | url = 'http://thetvdb.com/api/%s/series/%s/default/%01d/%01d' % ('MUQ2MkYyRjkwMDMwQzQ0NA=='.decode('base64'), thetvdb, int(season), int(episode)) 133 | return int(client.parseDOM(client.request(url), 'absolute_number')[0]) 134 | except: 135 | pass 136 | 137 | return episode 138 | 139 | 140 | def getTVShowTranslation(self, thetvdb, lang): 141 | try: 142 | url = 'http://thetvdb.com/api/%s/series/%s/%s.xml' % ('MUQ2MkYyRjkwMDMwQzQ0NA=='.decode('base64'), thetvdb, lang) 143 | r = client.request(url) 144 | title = client.parseDOM(r, 'SeriesName')[0] 145 | title = client.replaceHTMLCodes(title) 146 | title = title.encode('utf-8') 147 | 148 | return title 149 | except: 150 | pass 151 | 152 | 153 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/unjuice.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | ''' 4 | Covenant Add-on 5 | Copyright (C) 2017 Covenant 6 | 7 | This program is free software: you can redistribute it and/or modify 8 | it under the terms of the GNU General Public License as published by 9 | the Free Software Foundation, either version 3 of the License, or 10 | (at your option) any later version. 11 | 12 | This program is distributed in the hope that it will be useful, 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | GNU General Public License for more details. 16 | 17 | You should have received a copy of the GNU General Public License 18 | along with this program. If not, see . 19 | ''' 20 | import re 21 | import sys 22 | 23 | from resources.lib.modules import jsunpack 24 | 25 | 26 | Juice = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=" 27 | 28 | def test(e): 29 | return True if re.search(r'JuicyCodes.Run\(', e, re.IGNORECASE) else False 30 | 31 | 32 | def run(e): 33 | try: 34 | e = re.findall(r'JuicyCodes.Run\(([^\)]+)', e, re.IGNORECASE)[0] 35 | e = re.sub(r'\"\s*\+\s*\"','', e) 36 | e = re.sub(r'[^A-Za-z0-9+\\/=]','', e) 37 | except: 38 | return None 39 | 40 | t = "" 41 | n=r=i=s=o=u=a=f=0 42 | 43 | while f < len(e): 44 | try: 45 | s = Juice.index(e[f]);f+=1; 46 | o = Juice.index(e[f]);f+=1; 47 | u = Juice.index(e[f]);f+=1; 48 | a = Juice.index(e[f]);f+=1; 49 | n = s << 2 | o >> 4; r = (15 & o) << 4 | u >> 2; i = (3 & u) << 6 | a 50 | t += chr(n) 51 | if 64 != u: t += chr(r) 52 | if 64 != a: t += chr(i) 53 | except: 54 | continue 55 | pass 56 | 57 | try: 58 | t = jsunpack.unpack(t) 59 | t = unicode(t, 'utf-8') 60 | except: 61 | t = None 62 | 63 | return t 64 | 65 | 66 | 67 | def main(): 68 | #for testing 69 | codes = 'JuicyCodes.Run("ZXZhbChmdW5jdGlvbihwLGEsYyxrLGUsZCl7ZT1mdW5jdGlvbihj"+"KXtyZXR1cm4oYzxhPycnOmUocGFyc2VJbnQoYy9hKSkpKygoYz1j"+"JWEpPjM1P1N0cmluZy5mcm9tQ2hhckNvZGUoYysyOSk6Yy50b1N0"+"cmluZygzNikpfTtpZighJycucmVwbGFjZSgvXi8sU3RyaW5nKSl7"+"d2hpbGUoYy0tKXtkW2UoYyldPWtbY118fGUoYyl9az1bZnVuY3Rp"+"b24oZSl7cmV0dXJuIGRbZV19XTtlPWZ1bmN0aW9uKCl7cmV0dXJu"+"J1xcdysnfTtjPTF9O3doaWxlKGMtLSl7aWYoa1tjXSl7cD1wLnJl"+"cGxhY2UobmV3IFJlZ0V4cCgnXFxiJytlKGMpKydcXGInLCdnJyks"+"a1tjXSl9fXJldHVybiBwfSgnMyBqPXsiSCI6IlgiLCJKIjoiUC1G"+"IiwiSyI6bH07eS5NPVwnVj09XCc7MyAxPXkoXCd2LTFcJyk7MyBk"+"OzMgNzszIEksbT1sOzMgajskKHgpLncoMigpe2ouRT14LlI7JC5R"+"KHtOOlwnTzovL1Mudi5ZLzdcJyxXOlwnVVwnLDY6aixaOlwnTFwn"+"LEM6MihlKXtkPWUuZDs3PWUuNzt0KCl9LH0pOyQoXCcjQi04XCcp"+"LnMoMigpeyQoXCcjZi04XCcpLmMoXCd1XCcpOzEuQShhLmkoNi5i"+"KSl9KTskKFwnI0QtOFwnKS5zKDIoKXskKFwnI2YtOFwnKS5jKFwn"+"dVwnKTsxLnEoKX0pfSk7MiB0KCl7MyBwPXs3OjcsZDpkLEc6IlQl"+"IiwxaTpcJzE2OjlcJywxbzpsLDFuOnt9LDFtOnsxazpcJyMxbFwn"+"LDFxOjF3LDExOjAsMXY6XCcxdFwnLDFyOlwnMXVcJ30sfTsxLjFz"+"KHApOzEuNChcJ3FcJywyKCl7fSk7MS40KFwnd1wnLDIoKXt9KTsx"+"LjQoXCcxcFwnLDIoKXt9KTsxLjQoXCcxalwnLDIoKXsxOChtJiZh"+"LmkoNi5iKSYmYS5pKDYuYik+MTkpezEuMTcoKTttPTE1OyQoXCcj"+"NS04XCcpLjEyKHooYS5pKDYuYikpKTskKFwnI2YtOFwnKS5jKFwn"+"b1wnKX19KTsxLjQoXCc1XCcsMigpe2EuMTMoNi5iLDEuMTQoKSl9"+"KTsxLjQoXCduXCcsMigpeyQoXCcjZi1uXCcpLmMoXCdvXCcpfSk7"+"MS40KFwnMWFcJywyKCl7JChcJyNmLW5cJykuYyhcJ29cJyl9KX0y"+"IHoocil7MyA1PTFiIDFnKDAsMCwwKTs1LjFoKHIpOzMgZz01LjFm"+"KCk7MyBoPTUuMWUoKTszIGs9NS4xYygpOzFkKGc8MTA/KFwnMFwn"+"K2cpOmcpK1wnOlwnKyhoPDEwPyhcJzBcJytoKTpoKStcJzpcJyso"+"azwxMD8oXCcwXCcrayk6ayl9Jyw2Miw5NSwnfHBsYXllcnxmdW5j"+"dGlvbnx2YXJ8b258dGltZXxkYXRhfHNvdXJjZXN8cmVzdW1lfHxs"+"b2NhbFN0b3JhZ2V8aWR8bW9kYWx8dHJhY2tzfHxwb3B8dGltZV9o"+"fHRpbWVfbXxnZXRJdGVtfGRhdGFQT1NUfHRpbWVfc3x0cnVlfGZp"+"cnN0X2xvYWR8ZXJyb3J8c2hvd3xqd2NvbmZpZ3xwbGF5fF90aW1l"+"fGNsaWNrfGxvYWRQbGF5ZXJ8aGlkZXxzdHJlYW1kb3J8cmVhZHl8"+"ZG9jdW1lbnR8andwbGF5ZXJ8Y29udmVydF90aW1lfHNlZWt8eWVz"+"fHN1Y2Nlc3N8bm98cmVmZXJlcnxjOXZJek5CanVPRmRqcEtYcV9f"+"WlF8d2lkdGh8ZXBpc29kZUlEfHBsYXlsaXN0fGZpbGV8c3VidGl0"+"bGV8anNvbnxrZXl8dXJsfGh0dHBzfFY0SUdfVGRxOFlPU2ZzWmlG"+"ZDFFc2xjeU9lSkIyUENZQ2hrXzRxcmkwX2lsTkE2TVpPX1BGcldX"+"REc1aHZkSGh8YWpheHxyZWZlcnJlcnxhcGl8MTAwfFBPU1R8SE92"+"OVlLNmVncFpnazVjY0JpWnBZZklBUXgzUTVib0dWN3RpR3d8bWV0"+"aG9kfDM2MDg0NXxjb3xkYXRhVHlwZXx8YmFja2dyb3VuZE9wYWNp"+"dHl8dGV4dHxzZXRJdGVtfGdldFBvc2l0aW9ufGZhbHNlfHxwYXVz"+"ZXxpZnwzMHxzZXR1cEVycm9yfG5ld3xnZXRTZWNvbmRzfHJldHVy"+"bnxnZXRNaW51dGVzfGdldEhvdXJzfERhdGV8c2V0U2Vjb25kc3xh"+"c3BlY3RyYXRpb3xmaXJzdEZyYW1lfGNvbG9yfGYzZjM3OHxjYXB0"+"aW9uc3xjYXN0fGF1dG9zdGFydHxjb21wbGV0ZXxmb250U2l6ZXxl"+"ZGdlU3R5bGV8c2V0dXB8SGVsdmV0aWNhfHJhaXNlZHxmb250ZmFt"+"aWx5fDIwJy5zcGxpdCgnfCcpLDAse30pKQo=")' 70 | res = run(codes) 71 | pass 72 | 73 | if __name__ == "__main__": 74 | sys.exit(int(main() or 0)) 75 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/utils.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | Covenant Add-on 5 | 6 | This program is free software: you can redistribute it and/or modify 7 | it under the terms of the GNU General Public License as published by 8 | the Free Software Foundation, either version 3 of the License, or 9 | (at your option) any later version. 10 | 11 | This program is distributed in the hope that it will be useful, 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | GNU General Public License for more details. 15 | 16 | You should have received a copy of the GNU General Public License 17 | along with this program. If not, see . 18 | """ 19 | 20 | import json, re 21 | 22 | 23 | def json_load_as_str(file_handle): 24 | return byteify(json.load(file_handle, object_hook=byteify), ignore_dicts=True) 25 | 26 | 27 | def json_loads_as_str(json_text): 28 | return byteify(json.loads(json_text, object_hook=byteify), ignore_dicts=True) 29 | 30 | 31 | def byteify(data, ignore_dicts=False): 32 | if isinstance(data, unicode): 33 | return data.encode('utf-8') 34 | if isinstance(data, list): 35 | return [byteify(item, ignore_dicts=True) for item in data] 36 | if isinstance(data, dict) and not ignore_dicts: 37 | return dict([(byteify(key, ignore_dicts=True), byteify(value, ignore_dicts=True)) for key, value in data.iteritems()]) 38 | return data 39 | 40 | def title_key(title): 41 | try: 42 | if title is None: title = '' 43 | articles_en = ['the', 'a', 'an'] 44 | articles_de = ['der', 'die', 'das'] 45 | articles = articles_en + articles_de 46 | 47 | match = re.match('^((\w+)\s+)', title.lower()) 48 | if match and match.group(2) in articles: 49 | offset = len(match.group(1)) 50 | else: 51 | offset = 0 52 | 53 | return title[offset:] 54 | except: 55 | return title 56 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/views.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | ''' 4 | Covenant Add-on 5 | 6 | This program is free software: you can redistribute it and/or modify 7 | it under the terms of the GNU General Public License as published by 8 | the Free Software Foundation, either version 3 of the License, or 9 | (at your option) any later version. 10 | 11 | This program is distributed in the hope that it will be useful, 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | GNU General Public License for more details. 15 | 16 | You should have received a copy of the GNU General Public License 17 | along with this program. If not, see . 18 | ''' 19 | 20 | 21 | try: from sqlite3 import dbapi2 as database 22 | except: from pysqlite2 import dbapi2 as database 23 | 24 | from resources.lib.modules import control 25 | 26 | 27 | def addView(content): 28 | try: 29 | skin = control.skin 30 | record = (skin, content, str(control.getCurrentViewId())) 31 | control.makeFile(control.dataPath) 32 | dbcon = database.connect(control.viewsFile) 33 | dbcur = dbcon.cursor() 34 | dbcur.execute("CREATE TABLE IF NOT EXISTS views (""skin TEXT, ""view_type TEXT, ""view_id TEXT, ""UNIQUE(skin, view_type)"");") 35 | dbcur.execute("DELETE FROM views WHERE skin = '%s' AND view_type = '%s'" % (record[0], record[1])) 36 | dbcur.execute("INSERT INTO views Values (?, ?, ?)", record) 37 | dbcon.commit() 38 | 39 | viewName = control.infoLabel('Container.Viewmode') 40 | skinName = control.addon(skin).getAddonInfo('name') 41 | skinIcon = control.addon(skin).getAddonInfo('icon') 42 | 43 | control.infoDialog(viewName, heading=skinName, sound=True, icon=skinIcon) 44 | except: 45 | return 46 | 47 | 48 | def setView(content, viewDict=None): 49 | for i in range(0, 200): 50 | if control.condVisibility('Container.Content(%s)' % content): 51 | try: 52 | skin = control.skin 53 | record = (skin, content) 54 | dbcon = database.connect(control.viewsFile) 55 | dbcur = dbcon.cursor() 56 | dbcur.execute("SELECT * FROM views WHERE skin = '%s' AND view_type = '%s'" % (record[0], record[1])) 57 | view = dbcur.fetchone() 58 | view = view[2] 59 | if view == None: raise Exception() 60 | return control.execute('Container.SetViewMode(%s)' % str(view)) 61 | except: 62 | try: return control.execute('Container.SetViewMode(%s)' % str(viewDict[skin])) 63 | except: return 64 | 65 | control.sleep(100) 66 | 67 | 68 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/weblogin.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | """ 3 | Modified for Jen Template integration 2018.07.08 4 | """ 5 | 6 | """ 7 | weblogin 8 | by Anarchintosh @ xbmcforums 9 | Copyleft (GNU GPL v3) 2011 onwards 10 | 11 | this example is configured for Fantasti.cc login 12 | See for the full guide please visit: 13 | http://forum.xbmc.org/showthread.php?p=772597#post772597 14 | 15 | 16 | USAGE: 17 | in your default.py put: 18 | 19 | import weblogin 20 | logged_in = weblogin.doLogin('a-path-to-save-the-cookie-to','the-username','the-password') 21 | 22 | logged_in will then be either True or False depending on whether the login was successful. 23 | """ 24 | 25 | import __builtin__ 26 | import cookielib 27 | import os 28 | import re 29 | import time 30 | import urllib,urllib2 31 | import koding,xbmcaddon 32 | 33 | 34 | def check_login(source,username): 35 | """ search for the string in the html, without caring about upper or lower case """ 36 | if re.search(login_verified,source,re.IGNORECASE): 37 | return True 38 | else: 39 | return False 40 | 41 | 42 | def verify_login(cookiepath, username, password): 43 | """ check if user has supplied only a folder path, or a full path """ 44 | if not os.path.isfile(cookiepath): 45 | """ if the user supplied only a folder path, append on to the end of the path a filename. """ 46 | cookiepath = os.path.join(cookiepath, 'cookies.lwp') 47 | 48 | """ delete any old version of the cookie file """ 49 | try: 50 | os.remove(cookiepath) 51 | except: 52 | pass 53 | 54 | if username and password: 55 | """ first check to see if a current session is active """ 56 | addon_id = xbmcaddon.Addon().getAddonInfo('id') 57 | ownAddon = xbmcaddon.Addon(id=addon_id) 58 | expiration = ownAddon.getSetting('WEBLOGIN_EXPIRES_AT') 59 | if time.time() < expiration and len(expiration) > 1: 60 | return True 61 | """ the header used to pretend you are a browser """ 62 | user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3' 63 | 64 | """ build the form data necessary for the login """ 65 | login_data = urllib.urlencode({user_var:username, pwd_var:password}) 66 | 67 | """ build the request we will make """ 68 | req = urllib2.Request(login_url, login_data) 69 | req.add_header('User-Agent',user_agent) 70 | 71 | """ initiate the cookielib class """ 72 | cj = cookielib.LWPCookieJar() 73 | 74 | """ install cookielib into the url opener, so that cookies are handled """ 75 | opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) 76 | 77 | """ do the login and get the response """ 78 | response = opener.open(req) 79 | source = response.read() 80 | response.close() 81 | 82 | """ check the received html for a string that will tell us if the user is logged in """ 83 | """ pass the username, which can be used to do this. """ 84 | login = check_login(source,username) 85 | 86 | """ if login suceeded, save the cookiejar """ 87 | if login == True: 88 | cj.save(cookiepath) 89 | 90 | """ return whether we are logged in or not """ 91 | return login 92 | else: 93 | return False 94 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/workers.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | ''' 4 | Covenant Add-on 5 | 6 | This program is free software: you can redistribute it and/or modify 7 | it under the terms of the GNU General Public License as published by 8 | the Free Software Foundation, either version 3 of the License, or 9 | (at your option) any later version. 10 | 11 | This program is distributed in the hope that it will be useful, 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | GNU General Public License for more details. 15 | 16 | You should have received a copy of the GNU General Public License 17 | along with this program. If not, see . 18 | ''' 19 | 20 | 21 | import threading 22 | 23 | 24 | class Thread(threading.Thread): 25 | def __init__(self, target, *args): 26 | self._target = target 27 | self._args = args 28 | threading.Thread.__init__(self) 29 | def run(self): 30 | self._target(*self._args) 31 | 32 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/youtube.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | ''' 4 | Exodus Add-on 5 | Copyright (C) 2016 Exodus 6 | 7 | This program is free software: you can redistribute it and/or modify 8 | it under the terms of the GNU General Public License as published by 9 | the Free Software Foundation, either version 3 of the License, or 10 | (at your option) any later version. 11 | 12 | This program is distributed in the hope that it will be useful, 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | GNU General Public License for more details. 16 | 17 | You should have received a copy of the GNU General Public License 18 | along with this program. If not, see . 19 | ''' 20 | 21 | 22 | import re,json 23 | 24 | from resources.lib.modules import client 25 | from resources.lib.modules import workers 26 | 27 | 28 | class youtube(object): 29 | def __init__(self, key=''): 30 | self.list = [] ; self.data = [] 31 | self.base_link = 'http://www.youtube.com' 32 | self.key_link = '&key=%s' % key 33 | self.playlists_link = 'https://www.googleapis.com/youtube/v3/playlists?part=snippet&maxResults=50&channelId=%s' 34 | self.playlist_link = 'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&maxResults=50&playlistId=%s' 35 | self.videos_link = 'https://www.googleapis.com/youtube/v3/search?part=snippet&order=date&maxResults=50&channelId=%s' 36 | self.content_link = 'https://www.googleapis.com/youtube/v3/videos?part=contentDetails&id=%s' 37 | self.play_link = 'plugin://plugin.video.youtube/play/?video_id=%s' 38 | 39 | 40 | def playlists(self, url): 41 | url = self.playlists_link % url + self.key_link 42 | return self.play_list(url) 43 | 44 | 45 | def playlist(self, url, pagination=False): 46 | cid = url.split('&')[0] 47 | url = self.playlist_link % url + self.key_link 48 | return self.video_list(cid, url, pagination) 49 | 50 | 51 | def videos(self, url, pagination=False): 52 | cid = url.split('&')[0] 53 | url = self.videos_link % url + self.key_link 54 | return self.video_list(cid, url, pagination) 55 | 56 | 57 | def play_list(self, url): 58 | try: 59 | result = client.request(url) 60 | result = json.loads(result) 61 | items = result['items'] 62 | except: 63 | pass 64 | 65 | for i in range(1, 5): 66 | try: 67 | if not 'nextPageToken' in result: raise Exception() 68 | next = url + '&pageToken=' + result['nextPageToken'] 69 | result = client.request(next) 70 | result = json.loads(result) 71 | items += result['items'] 72 | except: 73 | pass 74 | 75 | for item in items: 76 | try: 77 | title = item['snippet']['title'] 78 | title = title.encode('utf-8') 79 | 80 | url = item['id'] 81 | url = url.encode('utf-8') 82 | 83 | image = item['snippet']['thumbnails']['high']['url'] 84 | if '/default.jpg' in image: raise Exception() 85 | image = image.encode('utf-8') 86 | 87 | self.list.append({'title': title, 'url': url, 'image': image}) 88 | except: 89 | pass 90 | 91 | return self.list 92 | 93 | 94 | def video_list(self, cid, url, pagination): 95 | try: 96 | result = client.request(url) 97 | result = json.loads(result) 98 | items = result['items'] 99 | except: 100 | pass 101 | 102 | for i in range(1, 5): 103 | try: 104 | if pagination == True: raise Exception() 105 | if not 'nextPageToken' in result: raise Exception() 106 | page = url + '&pageToken=' + result['nextPageToken'] 107 | result = client.request(page) 108 | result = json.loads(result) 109 | items += result['items'] 110 | except: 111 | pass 112 | 113 | try: 114 | if pagination == False: raise Exception() 115 | next = cid + '&pageToken=' + result['nextPageToken'] 116 | except: 117 | next = '' 118 | 119 | for item in items: 120 | try: 121 | title = item['snippet']['title'] 122 | title = title.encode('utf-8') 123 | 124 | try: url = item['snippet']['resourceId']['videoId'] 125 | except: url = item['id']['videoId'] 126 | url = url.encode('utf-8') 127 | 128 | image = item['snippet']['thumbnails']['high']['url'] 129 | if '/default.jpg' in image: raise Exception() 130 | image = image.encode('utf-8') 131 | 132 | append = {'title': title, 'url': url, 'image': image} 133 | if not next == '': append['next'] = next 134 | self.list.append(append) 135 | except: 136 | pass 137 | 138 | try: 139 | u = [range(0, len(self.list))[i:i+50] for i in range(len(range(0, len(self.list))))[::50]] 140 | u = [','.join([self.list[x]['url'] for x in i]) for i in u] 141 | u = [self.content_link % i + self.key_link for i in u] 142 | 143 | threads = [] 144 | for i in range(0, len(u)): 145 | threads.append(workers.Thread(self.thread, u[i], i)) 146 | self.data.append('') 147 | [i.start() for i in threads] 148 | [i.join() for i in threads] 149 | 150 | items = [] 151 | for i in self.data: items += json.loads(i)['items'] 152 | except: 153 | pass 154 | 155 | for item in range(0, len(self.list)): 156 | try: 157 | vid = self.list[item]['url'] 158 | 159 | self.list[item]['url'] = self.play_link % vid 160 | 161 | d = [(i['id'], i['contentDetails']) for i in items] 162 | d = [i for i in d if i[0] == vid] 163 | d = d[0][1]['duration'] 164 | 165 | duration = 0 166 | try: duration += 60 * 60 * int(re.findall('(\d*)H', d)[0]) 167 | except: pass 168 | try: duration += 60 * int(re.findall('(\d*)M', d)[0]) 169 | except: pass 170 | try: duration += int(re.findall('(\d*)S', d)[0]) 171 | except: pass 172 | duration = str(duration) 173 | 174 | self.list[item]['duration'] = duration 175 | except: 176 | pass 177 | 178 | return self.list 179 | 180 | 181 | def thread(self, url, i): 182 | try: 183 | result = client.request(url) 184 | self.data[i] = result 185 | except: 186 | return 187 | 188 | 189 | -------------------------------------------------------------------------------- /lib/resources/lib/modules/youtube_menu.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | ''' 4 | 5 | This program is free software: you can redistribute it and/or modify 6 | it under the terms of the GNU General Public License as published by 7 | the Free Software Foundation, either version 3 of the License, or 8 | (at your option) any later version. 9 | 10 | This program is distributed in the hope that it will be useful, 11 | but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | GNU General Public License for more details. 14 | 15 | You should have received a copy of the GNU General Public License 16 | along with this program. If not, see . 17 | ''' 18 | 19 | 20 | import xbmcplugin,xbmcgui,sys,os,re,urllib,urllib2 21 | 22 | from resources.lib.modules import client 23 | from resources.lib.modules import control 24 | from resources.lib.modules import workers 25 | 26 | syshandle = int(sys.argv[1]) 27 | 28 | class youtube_menu(object): 29 | def __init__(self): 30 | self.agent = 'VGFudHJ1bUFkZG9uQWdlbnQ='.decode('base64') 31 | self.key_id = 'QUl6YVN5QTU2ckhCQXlLMENsMFA0dURNXzEyc05Pd1VtQWFhczhF'.decode('base64') 32 | 33 | def openMenuFile(self, menuFile): 34 | req = urllib2.Request(menuFile) 35 | req.add_header('User-Agent', self.agent) 36 | response = urllib2.urlopen(req) 37 | link=response.read() 38 | response.close() 39 | return link 40 | 41 | def processMenuFile(self, menuFile): 42 | link = self.openMenuFile(menuFile).replace('\n','').replace('\r','') 43 | match = re.compile('name="(.+?)".+?ection="(.+?)".+?earch="(.+?)".+?ubid="(.+?)".+?laylistid="(.+?)".+?hannelid="(.+?)".+?ideoid="(.+?)".+?con="(.+?)".+?anart="(.+?)".+?escription="(.+?)"').findall(link) 44 | return match 45 | 46 | def addMenuItem(self, name, action, subid, iconimage, fanart, description='', isFolder=True): 47 | u=sys.argv[0] + "?action=" + action + "&subid=" + subid 48 | liz=control.item(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage) 49 | liz.setInfo(type="Video", infoLabels={"Title": name, "Plot": description}) 50 | liz.setProperty('fanart_image', fanart) 51 | control.addItem(handle=syshandle,url=u,listitem=liz,isFolder=isFolder) 52 | 53 | def addSectionItem(self, name, iconimage, fanart): 54 | u=sys.argv[0]+"?action=sectionItem" 55 | liz=control.item(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage) 56 | liz.setProperty('fanart_image', fanart) 57 | control.addItem(handle=syshandle,url=u,listitem=liz,isFolder=False) 58 | 59 | def addSearchItem(self, name, search_id, icon, fanart): 60 | work_url = "plugin://plugin.video.youtube/kodion/search/query/?q="+search_id+"/" 61 | liz=control.item(name) 62 | liz.setInfo( type="Video", infoLabels={ "Title": name }) 63 | liz.setArt({ 'thumb': icon, 'banner' : 'DefaultVideo.png', 'fanart': fanart }) 64 | # liz.setPath(work_url) 65 | control.addItem(handle=syshandle,url=work_url,listitem=liz,isFolder=True) 66 | 67 | def addChannelItem(self, name, channel_id, icon, fanart): 68 | work_url = "plugin://plugin.video.youtube/channel/"+channel_id+"/" 69 | liz=control.item(name) 70 | liz.setInfo( type="Video", infoLabels={ "Title": name }) 71 | liz.setArt({ 'thumb': icon, 'banner' : 'DefaultVideo.png', 'fanart': fanart }) 72 | # liz.setPath(work_url) 73 | control.addItem(handle=syshandle,url=work_url,listitem=liz,isFolder=True) 74 | 75 | def addPlaylistItem(self, name, playlist_id, icon, fanart): 76 | work_url = "plugin://plugin.video.youtube/playlist/"+playlist_id+"/" 77 | liz=control.item(name) 78 | liz.setInfo( type="Video", infoLabels={ "Title": name }) 79 | liz.setArt({ 'thumb': icon, 'banner' : 'DefaultVideo.png', 'fanart': fanart }) 80 | # liz.setPath(work_url) 81 | control.addItem(handle=syshandle,url=work_url,listitem=liz,isFolder=True) 82 | 83 | def addVideoItem(self, name, video_id, icon, fanart): 84 | work_url = "plugin://plugin.video.youtube/play/?video_id="+video_id 85 | liz=control.item(name) 86 | liz.setInfo( type="Video", infoLabels={ "Title": name }) 87 | liz.setArt({ 'thumb': icon, 'banner' : 'DefaultVideo.png', 'fanart': fanart }) 88 | # liz.setPath(work_url) 89 | liz.setProperty('IsPlayable', 'true') 90 | control.addItem(handle=syshandle,url=work_url,listitem=liz,isFolder=True) 91 | -------------------------------------------------------------------------------- /lib/resources/lib/sources/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | ''' 4 | Covenant Add-on 5 | Copyright (C) 2017 Covenant 6 | 7 | This program is free software: you can redistribute it and/or modify 8 | it under the terms of the GNU General Public License as published by 9 | the Free Software Foundation, either version 3 of the License, or 10 | (at your option) any later version. 11 | 12 | This program is distributed in the hope that it will be useful, 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | GNU General Public License for more details. 16 | 17 | You should have received a copy of the GNU General Public License 18 | along with this program. If not, see . 19 | ''' 20 | 21 | import pkgutil 22 | import os.path 23 | 24 | from resources.lib.modules import log_utils 25 | 26 | __all__ = [x[1] for x in os.walk(os.path.dirname(__file__))][0] 27 | 28 | 29 | def sources(): 30 | try: 31 | sourceDict = [] 32 | for i in __all__: 33 | for loader, module_name, is_pkg in pkgutil.walk_packages([os.path.join(os.path.dirname(__file__), i)]): 34 | if is_pkg: 35 | continue 36 | 37 | try: 38 | module = loader.find_module(module_name).load_module(module_name) 39 | sourceDict.append((module_name, module.source())) 40 | except Exception as e: 41 | log_utils.log('Could not load "%s": %s' % (module_name, e), log_utils.LOGDEBUG) 42 | return sourceDict 43 | except: 44 | return [] 45 | 46 | 47 | --------------------------------------------------------------------------------