├── .gitignore
├── README.md
├── addon.xml
├── changelog.txt
├── icon.png
├── lib
├── __init__.py
├── default.py
└── lambdascrapers
│ ├── __init__.py
│ ├── modules
│ ├── __init__.py
│ ├── cache.py
│ ├── cfdecoder.py
│ ├── cfscrape.py
│ ├── cleandate.py
│ ├── cleantitle.py
│ ├── client.py
│ ├── control.py
│ ├── debrid.py
│ ├── directstream.py
│ ├── dom_parser.py
│ ├── dom_parser2.py
│ ├── js2py
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── constructors
│ │ │ ├── __init__.py
│ │ │ ├── jsarray.py
│ │ │ ├── jsboolean.py
│ │ │ ├── jsdate.py
│ │ │ ├── jsfunction.py
│ │ │ ├── jsmath.py
│ │ │ ├── jsnumber.py
│ │ │ ├── jsobject.py
│ │ │ ├── jsregexp.py
│ │ │ ├── jsstring.py
│ │ │ ├── six.py
│ │ │ └── time_helpers.py
│ │ ├── evaljs.py
│ │ ├── host
│ │ │ ├── __init__.py
│ │ │ ├── console.py
│ │ │ ├── dom
│ │ │ │ ├── __init__.py
│ │ │ │ ├── constants.py
│ │ │ │ └── interface.py
│ │ │ ├── jseval.py
│ │ │ └── jsfunctions.py
│ │ ├── legecy_translators
│ │ │ ├── __init__.py
│ │ │ ├── constants.py
│ │ │ ├── exps.py
│ │ │ ├── flow.py
│ │ │ ├── functions.py
│ │ │ ├── jsparser.py
│ │ │ ├── nodevisitor.py
│ │ │ ├── nparser.py
│ │ │ ├── objects.py
│ │ │ ├── tokenize.py
│ │ │ ├── translator.py
│ │ │ └── utils.py
│ │ ├── prototypes
│ │ │ ├── __init__.py
│ │ │ ├── jsarray.py
│ │ │ ├── jsboolean.py
│ │ │ ├── jserror.py
│ │ │ ├── jsfunction.py
│ │ │ ├── jsjson.py
│ │ │ ├── jsnumber.py
│ │ │ ├── jsobject.py
│ │ │ ├── jsregexp.py
│ │ │ ├── jsstring.py
│ │ │ └── six.py
│ │ ├── pyjs.py
│ │ ├── six.py
│ │ ├── todo
│ │ ├── translators
│ │ │ ├── __init__.py
│ │ │ ├── friendly_nodes.py
│ │ │ ├── jsregexps.py
│ │ │ ├── markdown.js
│ │ │ ├── pyjsparser.py
│ │ │ ├── pyjsparserdata.py
│ │ │ ├── six.py
│ │ │ ├── std_nodes.py
│ │ │ ├── translating_nodes.py
│ │ │ └── translator.py
│ │ └── utils
│ │ │ ├── __init__.py
│ │ │ ├── definitions.py
│ │ │ ├── injector.py
│ │ │ └── six.py
│ ├── jsunfuck.py
│ ├── jsunpack.py
│ ├── log_utils.py
│ ├── proxy.py
│ ├── pyaes
│ │ ├── __init__.py
│ │ ├── aes.py
│ │ ├── blockfeeder.py
│ │ └── util.py
│ ├── regex.py
│ ├── source_utils.py
│ ├── trakt.py
│ ├── tvmaze.py
│ ├── utils.py
│ └── workers.py
│ └── sources_ lambdascrapers
│ ├── __init__.py
│ ├── de
│ ├── allucde.py
│ ├── animebase.py
│ ├── animeloads.py
│ ├── bs.py
│ ├── cine.py
│ ├── cinenator.py
│ ├── ddl.py
│ ├── filmpalast.py
│ ├── foxx.py
│ ├── hdfilme.py
│ ├── hdstreams.py
│ ├── horrorkino.py
│ ├── iload.py
│ ├── kinodogs.py
│ ├── kinoking.py
│ ├── kinow.py
│ ├── kinox.py
│ ├── lichtspielhaus.py
│ ├── movie2k-ac.py
│ ├── movie2k-ag.py
│ ├── movie2z.py
│ ├── movie4k.py
│ ├── moviesever.py
│ ├── movietown.py
│ ├── netzkino.py
│ ├── proxer.py
│ ├── pureanime.py
│ ├── serienstream.py
│ ├── seriesever.py
│ ├── stream-to.py
│ ├── streamdream.py
│ ├── streamflix.py
│ ├── streamit.py
│ ├── tata.py
│ ├── video4k.py
│ └── view4u.py
│ ├── en
│ ├── 0123putlocker.py
│ ├── 123fox.py
│ ├── 123hbo.py
│ ├── 123hulu.py
│ ├── 123movieshubz.py
│ ├── 300mbdownload.py
│ ├── 4kmovieto.py
│ ├── Hdmto.py
│ ├── animetoon.py
│ ├── azmovie.py
│ ├── bnwmovies.py
│ ├── cartoonhd.py
│ ├── cmovieshd.py
│ ├── cmovieshdbz.py
│ ├── coolmoviezone.py
│ ├── downflix.py
│ ├── extramovies.py
│ ├── filmxy.py
│ ├── fmovies.py
│ ├── freefmovies.py
│ ├── freeputlockers.py
│ ├── furk.py
│ ├── gostream.py
│ ├── gowatchseries.py
│ ├── hdpopcorns.py
│ ├── iwaatch.py
│ ├── kattv.py
│ ├── l23movies.py
│ ├── library.py
│ ├── moviesonline.py
│ ├── movietoken.py
│ ├── myprojectfreetv.py
│ ├── odb.py
│ ├── openloadmovie.py
│ ├── ororo.py
│ ├── plocker.py
│ ├── primewire.py
│ ├── putlocker.py
│ ├── reddit.py
│ ├── seehd.py
│ ├── series9.py
│ ├── seriesfree.py
│ ├── seriesonline.py
│ ├── sezonlukdizi.py
│ ├── solarmoviez.py
│ ├── tvbox.py
│ ├── vdonip.py
│ ├── videoscraper.py
│ ├── vidics.py
│ ├── watchseries.py
│ ├── xwatchseries.py
│ └── ymovies.py
│ ├── en_DebridOnly
│ ├── 300mbfilms.py
│ ├── bestmoviez.py
│ ├── ddlspot.py
│ ├── ddlvalley.py
│ ├── directdl.py
│ ├── invictus.py
│ ├── iwantmyshow.py
│ ├── moviesleak.py
│ ├── myvideolink.py
│ ├── playmovies.py
│ ├── rlsbb.py
│ ├── scenerls.py
│ ├── scnsrc.py
│ ├── ultrahdindir.py
│ └── wrzcraft.py
│ ├── en_Torrent
│ ├── bitlord.py
│ ├── eztv.py
│ ├── glodls.py
│ ├── kickass2.py
│ ├── limetorrents.py
│ ├── piratebay.py
│ ├── torrentapi.py
│ ├── torrentdownloads.py
│ ├── yify.py
│ └── zoogle.py
│ ├── es
│ ├── megapelistv.py
│ ├── peliculasdk.py
│ ├── pelisplustv.py
│ ├── pepecine.py
│ └── seriespapaya.py
│ ├── gr
│ ├── gamatotv.py
│ ├── liomenoi.py
│ ├── tainiesonline.py
│ ├── tainiomania.py
│ └── xrysoi.py
│ └── pl
│ ├── alltube.py
│ ├── boxfilm.py
│ ├── cdahd.py
│ ├── cdax.py
│ ├── ekinomaniak.py
│ ├── ekinotv.py
│ ├── filiser.py
│ ├── filmwebbooster.py
│ ├── iitv.py
│ ├── movieneo.py
│ ├── openkatalog.py
│ ├── paczamy.py
│ ├── segos.py
│ ├── szukajkatv.py
│ └── trt.py
└── resources
└── settings.xml
/.gitignore:
--------------------------------------------------------------------------------
1 | # file: ~/.gitignore
2 | *.pyo
3 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # LambdaScrapers
2 | ## **Scraper Module for Exodus based add ons.**
3 |
--------------------------------------------------------------------------------
/addon.xml:
--------------------------------------------------------------------------------
1 |
2 |
Shady Grove | 23 |Aeolian | 24 |
Over the River, Charlie | 27 |Dorian | 28 |
').findall(r)
51 | for host, url in match:
52 | if host == 'internet': pass
53 | else: sources.append({'source': host,'quality': 'SD','language': 'en','url': url,'direct': False,'debridonly': False})
54 | except:
55 | return
56 | except Exception:
57 | return
58 | return sources
59 |
60 |
61 | def resolve(self, url):
62 | r = self.scraper.get(url).content
63 | match = re.compile('decode\("(.+?)"').findall(r)
64 | for info in match:
65 | info = base64.b64decode(info)
66 | match = re.compile('src="(.+?)"').findall(info)
67 | for url in match:
68 | return url
69 |
70 |
--------------------------------------------------------------------------------
/lib/lambdascrapers/sources_ lambdascrapers/en/Hdmto.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | '''
3 | hdmto scraper for Exodus forks.
4 | Nov 9 2018 - Checked
5 |
6 | Updated and refactored by someone.
7 | Originally created by others.
8 | '''
9 | import re
10 | import urllib
11 | import urlparse
12 | from resources.lib.modules import cleantitle
13 | from resources.lib.modules import client
14 | from resources.lib.modules import proxy
15 | from resources.lib.modules import cfscrape
16 |
17 | class source:
18 | def __init__(self):
19 | self.priority = 1
20 | self.language = ['en']
21 | self.domains = ['hdm.to']
22 | self.base_link = 'https://hdm.to'
23 | self.scraper = cfscrape.create_scraper()
24 |
25 | def movie(self, imdb, title, localtitle, aliases, year):
26 | try:
27 | url = cleantitle.geturl(title)
28 | return url
29 | except:
30 | return
31 |
32 | def sources(self, url, hostDict, hostprDict):
33 | try:
34 | sources = []
35 | url = '%s/%s/' % (self.base_link,url)
36 | r = self.scraper.get(url).content
37 | try:
38 | match = re.compile('
.
18 | """
19 |
20 | import re
21 | import traceback
22 |
23 | from resources.lib.modules import cfscrape, cleantitle, directstream, log_utils, source_utils
24 |
25 |
26 | class source:
27 | def __init__(self):
28 | self.priority = 1
29 | self.language = ['en']
30 | self.domains = ['downflix.win']
31 | self.base_link = 'https://en.downflix.win'
32 | self.search_link = '/%s-%s/'
33 | self.scraper = cfscrape.create_scraper()
34 |
35 | def movie(self, imdb, title, localtitle, aliases, year):
36 | try:
37 | title = cleantitle.geturl(title)
38 | url = self.base_link + self.search_link % (title, year)
39 | return url
40 | except Exception:
41 |
42 | return
43 |
44 | def sources(self, url, hostDict, hostprDict):
45 | sources = []
46 | try:
47 | if url is None:
48 | return
49 | headers = {
50 | 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
51 | holder = self.scraper.get(url, headers=headers).content
52 | Alternates = re.compile('.
18 | '''
19 |
20 | import urllib, urlparse, re
21 |
22 | from resources.lib.modules import cleantitle
23 | from resources.lib.modules import client
24 | from resources.lib.modules import source_utils
25 |
26 |
27 | class source:
28 | def __init__(self):
29 | self.priority = 1
30 | self.language = ['en']
31 | self.domains = ['filmxy.me']
32 | self.base_link = 'https://www.filmxy.one/'
33 | self.search_link = 'search/%s/feed/rss2/'
34 | self.post = 'https://cdn.filmxy.one/asset/json/posts.json'
35 |
36 | def movie(self, imdb, title, localtitle, aliases, year):
37 | try:
38 | url = {'imdb': imdb, 'title': title, 'year': year}
39 | url = urllib.urlencode(url)
40 | return url
41 | except Exception:
42 | return
43 |
44 | def sources(self, url, hostDict, hostprDict):
45 | sources = []
46 | try:
47 | if url is None: return
48 | data = urlparse.parse_qs(url)
49 | data = dict((i, data[i][0]) for i in data)
50 | title = data['title']
51 | year = data['year']
52 |
53 | tit = cleantitle.geturl(title + ' ' + year)
54 | query = urlparse.urljoin(self.base_link, tit)
55 |
56 |
57 | r = client.request(query, referer=self.base_link, redirect=True)
58 | if not data['imdb'] in r:
59 | return sources
60 |
61 | links = []
62 |
63 | try:
64 | down = client.parseDOM(r, 'div', attrs={'id': 'tab-download'})[0]
65 | down = client.parseDOM(down, 'a', ret='href')[0]
66 | data = client.request(down)
67 | frames = client.parseDOM(data, 'div', attrs={'class': 'single-link'})
68 | frames = [client.parseDOM(i, 'a', ret='href')[0] for i in frames if i]
69 | for i in frames:
70 | links.append(i)
71 |
72 | except Exception:
73 | pass
74 | try:
75 | streams = client.parseDOM(r, 'div', attrs={'id': 'tab-stream'})[0]
76 | streams = re.findall('''iframe src=(.+?) frameborder''', streams.replace('"', ''),
77 | re.I | re.DOTALL)
78 | for i in streams:
79 | links.append(i)
80 | except Exception:
81 | pass
82 |
83 | for url in links:
84 | try:
85 | valid, host = source_utils.is_host_valid(url, hostDict)
86 | if not valid:
87 | valid, host = source_utils.is_host_valid(url, hostprDict)
88 | if not valid:
89 | continue
90 | else:
91 | rd = True
92 | else:
93 | rd = False
94 | host = client.replaceHTMLCodes(host)
95 | host = host.encode('utf-8')
96 | if rd:
97 | sources.append(
98 | {'source': host, 'quality': '1080p', 'language': 'en', 'url': url,
99 | 'direct': False,
100 | 'debridonly': True})
101 | else:
102 | sources.append(
103 | {'source': host, 'quality': '1080p', 'language': 'en', 'url': url,
104 | 'direct': False,
105 | 'debridonly': False})
106 | except Exception:
107 | pass
108 | return sources
109 | except Exception:
110 | return sources
111 |
112 | def resolve(self, url):
113 | return url
--------------------------------------------------------------------------------
/lib/lambdascrapers/sources_ lambdascrapers/en/freefmovies.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | '''
3 | freefmovies scraper for Exodus forks.
4 | Nov 9 2018 - Checked
5 |
6 | Updated and refactored by someone.
7 | Originally created by others.
8 | '''
9 | import re,urllib,urlparse,json,base64,time
10 |
11 | from resources.lib.modules import cleantitle
12 | from resources.lib.modules import dom_parser2
13 | from resources.lib.modules import client
14 |
15 | class source:
16 | def __init__(self):
17 | self.priority = 1
18 | self.language = ['en']
19 | self.domains = ['freefmovies.net']
20 | self.base_link = 'http://freefmovies.net'
21 | self.search_link = '/watch/%s-%s-online-fmovies.html'
22 |
23 | def movie(self, imdb, title, localtitle, aliases, year):
24 | try:
25 | clean_title = cleantitle.geturl(title)
26 | url = urlparse.urljoin(self.base_link, (self.search_link %(clean_title,year)))
27 | return url
28 | except:
29 | return
30 |
31 | def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
32 | try:
33 | aliases.append({'country': 'uk', 'title': tvshowtitle})
34 | url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases}
35 | url = urllib.urlencode(url)
36 | return url
37 | except:
38 | return
39 |
40 | def episode(self, url, imdb, tvdb, title, premiered, season, episode):
41 | try:
42 | if url == None: return
43 | url = urlparse.parse_qs(url)
44 | url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
45 | clean_title = cleantitle.geturl(url['tvshowtitle'])+'-s%02d' % int(season)
46 | url = urlparse.urljoin(self.base_link, (self.search_link %(clean_title,url['year'])))
47 | r = client.request(url)
48 | r = dom_parser2.parse_dom(r, 'div', {'id': 'ip_episode'})
49 | r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
50 | for i in r[0]:
51 | if i.content == 'Episode %s'%episode:
52 | url = i.attrs['href']
53 | return url
54 | except:
55 | return
56 |
57 | def sources(self, url, hostDict, hostprDict):
58 | try:
59 | sources = []
60 | if url == None: return sources
61 |
62 | r = client.request(url)
63 | quality = re.findall(">(\w+)<\/p",r)
64 | if quality[0] == "HD":
65 | quality = "720p"
66 | else:
67 | quality = "SD"
68 | r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'})
69 | r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
70 |
71 | for i in r[0]:
72 | url = {'url': i.attrs['href'], 'data-film': i.attrs['data-film'], 'data-server': i.attrs['data-server'], 'data-name' : i.attrs['data-name']}
73 | url = urllib.urlencode(url)
74 | sources.append({'source': i.content, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
75 | return sources
76 | except:
77 | return sources
78 |
79 | def resolve(self, url):
80 | try:
81 | urldata = urlparse.parse_qs(url)
82 | urldata = dict((i, urldata[i][0]) for i in urldata)
83 | post = {'ipplugins': 1,'ip_film': urldata['data-film'], 'ip_server': urldata['data-server'], 'ip_name': urldata['data-name'],'fix': "0"}
84 | p1 = client.request('http://freefmovies.net/ip.file/swf/plugins/ipplugins.php', post=post, referer=urldata['url'], XHR=True)
85 | p1 = json.loads(p1)
86 | p2 = client.request('http://freefmovies.net/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=0' %(p1['s'],urldata['data-server']))
87 | p2 = json.loads(p2)
88 | p3 = client.request('http://freefmovies.net/ip.file/swf/ipplayer/api.php?hash=%s' %(p2['hash']))
89 | p3 = json.loads(p3)
90 | n = p3['status']
91 | if n == False:
92 | p2 = client.request('http://freefmovies.net/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=1' %(p1['s'],urldata['data-server']))
93 | p2 = json.loads(p2)
94 | url = "https:%s" %p2["data"].replace("\/","/")
95 | return url
96 | except:
97 | return
98 |
--------------------------------------------------------------------------------
/lib/lambdascrapers/sources_ lambdascrapers/en/freeputlockers.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | '''
3 | freeputlockers scraper for Exodus forks.
4 | Nov 9 2018 - Checked
5 |
6 | Updated and refactored by someone.
7 | Originally created by others.
8 | '''
9 | import re,urllib,urlparse,json,base64,time
10 |
11 | from resources.lib.modules import cleantitle
12 | from resources.lib.modules import dom_parser2
13 | from resources.lib.modules import client
14 | from resources.lib.modules import debrid
15 |
16 | class source:
17 | def __init__(self):
18 | self.priority = 1
19 | self.language = ['en']
20 | self.domains = ['freeputlockers.org']
21 | self.base_link = 'http://freeputlockers.org'
22 | self.search_link = '/watch/%s-%s-online-putlockers.html'
23 |
24 | def movie(self, imdb, title, localtitle, aliases, year):
25 | try:
26 | clean_title = cleantitle.geturl(title)
27 | url = urlparse.urljoin(self.base_link, (self.search_link %(clean_title,year)))
28 | return url
29 | except:
30 | return
31 |
32 | def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
33 | try:
34 | aliases.append({'country': 'us', 'title': tvshowtitle})
35 | url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases}
36 | url = urllib.urlencode(url)
37 | return url
38 | except:
39 | return
40 |
41 | def episode(self, url, imdb, tvdb, title, premiered, season, episode):
42 | try:
43 | if url == None: return
44 | url = urlparse.parse_qs(url)
45 | url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
46 | clean_title = cleantitle.geturl(url['tvshowtitle'])+'-s%02d' % int(season)
47 | url = urlparse.urljoin(self.base_link, (self.search_link %(clean_title,url['year'])))
48 | r = client.request(url)
49 | r = dom_parser2.parse_dom(r, 'div', {'id': 'ip_episode'})
50 | r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
51 | for i in r[0]:
52 | if i.content == 'Episode %s'%episode:
53 | url = i.attrs['href']
54 | return url
55 | except:
56 | return
57 |
58 | def sources(self, url, hostDict, hostprDict):
59 | try:
60 | sources = []
61 | if url == None: return sources
62 |
63 | r = client.request(url)
64 | quality = re.findall(">(\w+)<\/p",r)
65 | if quality[0] == "HD":
66 | quality = "720p"
67 | else:
68 | quality = "SD"
69 | r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'})
70 | r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
71 |
72 | for i in r[0]:
73 | url = {'url': i.attrs['href'], 'data-film': i.attrs['data-film'], 'data-server': i.attrs['data-server'], 'data-name' : i.attrs['data-name']}
74 | url = urllib.urlencode(url)
75 | sources.append({'source': i.content, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
76 | return sources
77 | except:
78 | return sources
79 |
80 | def resolve(self, url):
81 | try:
82 | urldata = urlparse.parse_qs(url)
83 | urldata = dict((i, urldata[i][0]) for i in urldata)
84 | post = {'ipplugins': 1,'ip_film': urldata['data-film'], 'ip_server': urldata['data-server'], 'ip_name': urldata['data-name'],'fix': "0"}
85 | p1 = client.request('http://freeputlockers.org/ip.file/swf/plugins/ipplugins.php', post=post, referer=urldata['url'], XHR=True)
86 | p1 = json.loads(p1)
87 | p2 = client.request('http://freeputlockers.org/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=0' %(p1['s'],urldata['data-server']))
88 | p2 = json.loads(p2)
89 | p3 = client.request('http://freeputlockers.org/ip.file/swf/ipplayer/api.php?hash=%s' %(p2['hash']))
90 | p3 = json.loads(p3)
91 | n = p3['status']
92 | if n == False:
93 | p2 = client.request('http://freeputlockers.org/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=1' %(p1['s'],urldata['data-server']))
94 | p2 = json.loads(p2)
95 | url = "https:%s" %p2["data"].replace("\/","/")
96 | return url
97 | except:
98 | return
99 |
--------------------------------------------------------------------------------
/lib/lambdascrapers/sources_ lambdascrapers/en/hdpopcorns.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | '''
3 | hdpopcorns scraper for Exodus forks.
4 | Nov 9 2018 - Checked
5 | Oct 10 2018 - Cleaned and Checked
6 |
7 | Updated and refactored by someone.
8 | Originally created by others.
9 | '''
10 |
11 | import re,requests,traceback,base64,urllib,urlparse
12 |
13 | from resources.lib.modules import cleantitle
14 | from resources.lib.modules import client
15 | from resources.lib.modules import log_utils
16 | from resources.lib.modules import debrid
17 | from resources.lib.modules import cfscrape
18 |
19 | class source:
20 | def __init__(self):
21 | self.priority = 1
22 | self.language = ['en']
23 | self.domains = ['hdpopcorns.co','hdpopcorns.eu']
24 | self.base_link = 'http://hdpopcorns.co'
25 | self.search_link = '/?s=%s'
26 | self.scraper = cfscrape.create_scraper()
27 |
28 | def movie(self, imdb, title, localtitle, aliases, year):
29 | try:
30 | search_id = title.replace(':', ' ').replace(' ', '+').lower()
31 | start_url = urlparse.urljoin(self.base_link, self.search_link % (search_id))
32 |
33 | search_results = self.scraper.get(start_url).content
34 | match = re.compile('