(.+?)',re.DOTALL).findall(chkhtml)[0]
50 | if cleantitle.get(title) in cleantitle.get(chktitle):
51 | if year in chktitle:
52 | return url
53 | return
54 | except:
55 | failure = traceback.format_exc()
56 | log_utils.log('BNWMovies - Exception: \n' + str(failure))
57 | return
58 |
59 | def sources(self, url, hostDict, hostprDict):
60 | try:
61 | sources = []
62 | if url == None: return sources
63 |
64 | html = client.request(url)
65 |
66 | Links = re.compile('.
24 | '''
25 |
26 | import re
27 | from openscrapers.modules import cleantitle
28 | from openscrapers.modules import client
29 |
30 |
31 | class source:
32 | def __init__(self):
33 | self.priority = 1
34 | self.language = ['en']
35 | self.domains = ['cmovieshd.net']
36 | self.base_link = 'https://cmovieshd.net'
37 | self.search_link = '/search/?q=%s'
38 |
39 | def movie(self, imdb, title, localtitle, aliases, year):
40 | try:
41 | title = cleantitle.geturl(title).replace('-', '+')
42 | u = self.base_link + self.search_link % title
43 | u = client.request(u)
44 | i = client.parseDOM(u, "div", attrs={"class": "movies-list"})
45 | for r in i:
46 | r = re.compile('(.+?)<').findall(r)
61 | for i in qual:
62 | if 'HD' in i:
63 | quality = '720p'
64 | else:
65 | quality = 'SD'
66 | r = client.parseDOM(r, "div", attrs={"id": "list-eps"})
67 | for i in r:
68 | t = re.compile('.
24 | '''
25 |
26 | import re
27 | import requests
28 |
29 | from openscrapers.modules import cleantitle
30 | from openscrapers.modules import client
31 | from openscrapers.modules import source_utils
32 |
33 |
34 | class source:
35 | def __init__(self):
36 | self.priority = 1
37 | self.language = ['en']
38 | self.domains = ['cmovieshd.bz']
39 | self.base_link = 'http://w1.cmovieshd.bz'
40 | self.search_link = '/film/%s/watching.html?ep=0'
41 |
42 | def movie(self, imdb, title, localtitle, aliases, year):
43 | try:
44 | title = cleantitle.geturl(title).replace('--', '-')
45 | url = {'title': title, 'year': year}
46 | return url
47 | except:
48 | return
49 |
50 | def sources(self, url, hostDict, hostprDict):
51 | try:
52 | sources = []
53 | queries = [url['title'], '%s-%s' % (url['title'], url['year'])]
54 |
55 | r = requests.get('%s%s' % (self.base_link, self.search_link % queries[0]))
56 | if not r.ok:
57 | r = requests.get('%s%s' % (self.base_link, self.search_link % queries[1]))
58 | if not r.ok:
59 | return
60 |
61 | r = r.content
62 | qual = re.compile('class="quality">(.+?)<').findall(r)
63 |
64 | for i in qual:
65 | if '1080' in i:
66 | quality = '1080p'
67 | elif '720' in i:
68 | quality = '720p'
69 | else:
70 | quality = 'SD'
71 | u = client.parseDOM(r, "div", attrs={"class": "pa-main anime_muti_link"})
72 |
73 | for t in u:
74 | urls = re.findall('.
25 | '''
26 |
27 | import re
28 |
29 | from openscrapers.modules import cleantitle,client, source_utils
30 |
31 |
32 | class source:
33 | def __init__(self):
34 | self.priority = 1
35 | self.language = ['en']
36 | self.domains = ['coolmoviezone.online']
37 | self.base_link = 'https://coolmoviezone.online'
38 |
39 |
40 | def movie(self, imdb, title, localtitle, aliases, year):
41 | try:
42 | title = cleantitle.geturl(title)
43 | url = self.base_link + '/%s-%s' % (title,year)
44 | return url
45 | except:
46 | return
47 |
48 |
49 | def sources(self, url, hostDict, hostprDict):
50 | try:
51 | sources = []
52 | r = client.request(url)
53 | match = re.compile('.
25 | """
26 |
27 | import re
28 | import traceback
29 |
30 | from openscrapers.modules import cfscrape, cleantitle, directstream, log_utils, source_utils
31 |
32 |
33 | class source:
34 | def __init__(self):
35 | self.priority = 1
36 | self.language = ['en']
37 | self.domains = ['downflix.win']
38 | self.base_link = 'https://en.downflix.win'
39 | self.search_link = '/%s-%s/'
40 | self.scraper = cfscrape.create_scraper()
41 |
42 | def movie(self, imdb, title, localtitle, aliases, year):
43 | try:
44 | title = cleantitle.geturl(title)
45 | url = self.base_link + self.search_link % (title, year)
46 | return url
47 | except Exception:
48 |
49 | return
50 |
51 | def sources(self, url, hostDict, hostprDict):
52 | sources = []
53 | try:
54 | if url is None:
55 | return
56 | headers = {
57 | 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
58 | holder = self.scraper.get(url, headers=headers).content
59 | Alternates = re.compile(' |