16 |
17 | # These work like equivalents from logging. Use logging direct if you
18 | # have 2.3.
19 | from _Debug import getLogger, StreamHandler, NOTSET, INFO, DEBUG
20 |
21 | from _ClientCookie import VERSION, __doc__, \
22 | Cookie, \
23 | CookiePolicy, DefaultCookiePolicy, \
24 | CookieJar, FileCookieJar, LoadError, request_host
25 | from _LWPCookieJar import LWPCookieJar, lwp_cookie_str
26 | from _MozillaCookieJar import MozillaCookieJar
27 | try:
28 | from _MSIECookieJar import MSIECookieJar
29 | except:
30 | pass
31 | try:
32 | import bsddb
33 | except ImportError:
34 | pass
35 | else:
36 | from _BSDDBCookieJar import BSDDBCookieJar, CreateBSDDBCookieJar
37 | #from _MSIEDBCookieJar import MSIEDBCookieJar
38 | from _ConnCache import ConnectionCache
39 | try:
40 | from urllib2 import AbstractHTTPHandler
41 | except ImportError:
42 | pass
43 | else:
44 | from ClientCookie._urllib2_support import \
45 | Request, \
46 | OpenerDirector, build_opener, install_opener, urlopen, \
47 | OpenerFactory, urlretrieve, BaseHandler
48 | from ClientCookie._urllib2_support import \
49 | HTTPHandler, HTTPRedirectHandler, \
50 | HTTPRequestUpgradeProcessor, \
51 | HTTPEquivProcessor, SeekableProcessor, HTTPCookieProcessor, \
52 | HTTPRefererProcessor, \
53 | HTTPRefreshProcessor, HTTPErrorProcessor, \
54 | HTTPResponseDebugProcessor, HTTPRedirectDebugProcessor
55 |
56 | try:
57 | import robotparser
58 | except ImportError:
59 | pass
60 | else:
61 | from ClientCookie._urllib2_support import \
62 | HTTPRobotRulesProcessor, RobotExclusionError
63 | del robotparser
64 |
65 | import httplib
66 | if hasattr(httplib, 'HTTPS'):
67 | from ClientCookie._urllib2_support import HTTPSHandler
68 | del AbstractHTTPHandler, httplib
69 | from _Util import http2time
70 | str2time = http2time
71 | del http2time
72 |
--------------------------------------------------------------------------------
/lib/gdata/tlslite/utils/Python_AES.py:
--------------------------------------------------------------------------------
1 | """Pure-Python AES implementation."""
2 |
3 | from cryptomath import *
4 |
5 | from AES import *
6 | from rijndael import rijndael
7 |
8 | def new(key, mode, IV):
9 | return Python_AES(key, mode, IV)
10 |
11 | class Python_AES(AES):
12 | def __init__(self, key, mode, IV):
13 | AES.__init__(self, key, mode, IV, "python")
14 | self.rijndael = rijndael(key, 16)
15 | self.IV = IV
16 |
17 | def encrypt(self, plaintext):
18 | AES.encrypt(self, plaintext)
19 |
20 | plaintextBytes = stringToBytes(plaintext)
21 | chainBytes = stringToBytes(self.IV)
22 |
23 | #CBC Mode: For each block...
24 | for x in range(len(plaintextBytes)/16):
25 |
26 | #XOR with the chaining block
27 | blockBytes = plaintextBytes[x*16 : (x*16)+16]
28 | for y in range(16):
29 | blockBytes[y] ^= chainBytes[y]
30 | blockString = bytesToString(blockBytes)
31 |
32 | #Encrypt it
33 | encryptedBytes = stringToBytes(self.rijndael.encrypt(blockString))
34 |
35 | #Overwrite the input with the output
36 | for y in range(16):
37 | plaintextBytes[(x*16)+y] = encryptedBytes[y]
38 |
39 | #Set the next chaining block
40 | chainBytes = encryptedBytes
41 |
42 | self.IV = bytesToString(chainBytes)
43 | return bytesToString(plaintextBytes)
44 |
45 | def decrypt(self, ciphertext):
46 | AES.decrypt(self, ciphertext)
47 |
48 | ciphertextBytes = stringToBytes(ciphertext)
49 | chainBytes = stringToBytes(self.IV)
50 |
51 | #CBC Mode: For each block...
52 | for x in range(len(ciphertextBytes)/16):
53 |
54 | #Decrypt it
55 | blockBytes = ciphertextBytes[x*16 : (x*16)+16]
56 | blockString = bytesToString(blockBytes)
57 | decryptedBytes = stringToBytes(self.rijndael.decrypt(blockString))
58 |
59 | #XOR with the chaining block and overwrite the input with output
60 | for y in range(16):
61 | decryptedBytes[y] ^= chainBytes[y]
62 | ciphertextBytes[(x*16)+y] = decryptedBytes[y]
63 |
64 | #Set the next chaining block
65 | chainBytes = blockBytes
66 |
67 | self.IV = bytesToString(chainBytes)
68 | return bytesToString(ciphertextBytes)
69 |
--------------------------------------------------------------------------------
/core/ziptools.py:
--------------------------------------------------------------------------------
1 | # -*- coding: iso-8859-1 -*-
2 | #------------------------------------------------------------
3 | # pelisalacarta - XBMC Plugin
4 | # Zip Tools
5 | # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
6 | #------------------------------------------------------------
7 | import base64, re, urllib, string, sys, zipfile, os, os.path
8 | import xbmc
9 | import config
10 | import logger
11 |
12 | class ziptools:
13 |
14 | def extract(self, file, dir):
15 | logger.info("file=%s" % file)
16 | logger.info("dir=%s" % dir)
17 |
18 | if not dir.endswith(':') and not os.path.exists(dir):
19 | os.mkdir(dir)
20 |
21 | zf = zipfile.ZipFile(file)
22 | self._createstructure(file, dir)
23 | num_files = len(zf.namelist())
24 |
25 | for name in zf.namelist():
26 | logger.info("name=%s" % name)
27 | if not name.endswith('/'):
28 | logger.info("no es un directorio")
29 | try:
30 | (path,filename) = os.path.split(os.path.join(dir, name))
31 | logger.info("path=%s" % path)
32 | logger.info("name=%s" % name)
33 | os.makedirs( path )
34 | except:
35 | pass
36 | outfilename = os.path.join(dir, name)
37 | logger.info("outfilename=%s" % outfilename)
38 | try:
39 | outfile = open(outfilename, 'wb')
40 | outfile.write(zf.read(name))
41 | except:
42 | logger.info("Error en fichero "+name)
43 |
44 | def _createstructure(self, file, dir):
45 | self._makedirs(self._listdirs(file), dir)
46 |
47 | def create_necessary_paths(filename):
48 | try:
49 | (path,name) = os.path.split(filename)
50 | os.makedirs( path)
51 | except:
52 | pass
53 |
54 | def _makedirs(self, directories, basedir):
55 | for dir in directories:
56 | curdir = os.path.join(basedir, dir)
57 | if not os.path.exists(curdir):
58 | os.mkdir(curdir)
59 |
60 | def _listdirs(self, file):
61 | zf = zipfile.ZipFile(file)
62 | dirs = []
63 | for name in zf.namelist():
64 | if name.endswith('/'):
65 | dirs.append(name)
66 |
67 | dirs.sort()
68 | return dirs
69 |
--------------------------------------------------------------------------------
/lib/gdata/tlslite/integration/TLSSocketServerMixIn.py:
--------------------------------------------------------------------------------
1 | """TLS Lite + SocketServer."""
2 |
3 | from gdata.tlslite.TLSConnection import TLSConnection
4 |
5 | class TLSSocketServerMixIn:
6 | """
7 | This class can be mixed in with any L{SocketServer.TCPServer} to
8 | add TLS support.
9 |
10 | To use this class, define a new class that inherits from it and
11 | some L{SocketServer.TCPServer} (with the mix-in first). Then
12 | implement the handshake() method, doing some sort of server
13 | handshake on the connection argument. If the handshake method
14 | returns True, the RequestHandler will be triggered. Below is a
15 | complete example of a threaded HTTPS server::
16 |
17 | from SocketServer import *
18 | from BaseHTTPServer import *
19 | from SimpleHTTPServer import *
20 | from tlslite.api import *
21 |
22 | s = open("./serverX509Cert.pem").read()
23 | x509 = X509()
24 | x509.parse(s)
25 | certChain = X509CertChain([x509])
26 |
27 | s = open("./serverX509Key.pem").read()
28 | privateKey = parsePEMKey(s, private=True)
29 |
30 | sessionCache = SessionCache()
31 |
32 | class MyHTTPServer(ThreadingMixIn, TLSSocketServerMixIn,
33 | HTTPServer):
34 | def handshake(self, tlsConnection):
35 | try:
36 | tlsConnection.handshakeServer(certChain=certChain,
37 | privateKey=privateKey,
38 | sessionCache=sessionCache)
39 | tlsConnection.ignoreAbruptClose = True
40 | return True
41 | except TLSError, error:
42 | print "Handshake failure:", str(error)
43 | return False
44 |
45 | httpd = MyHTTPServer(('localhost', 443), SimpleHTTPRequestHandler)
46 | httpd.serve_forever()
47 | """
48 |
49 |
50 | def finish_request(self, sock, client_address):
51 | tlsConnection = TLSConnection(sock)
52 | if self.handshake(tlsConnection) == True:
53 | self.RequestHandlerClass(tlsConnection, client_address, self)
54 | tlsConnection.close()
55 |
56 | #Implement this method to do some form of handshaking. Return True
57 | #if the handshake finishes properly and the request is authorized.
58 | def handshake(self, tlsConnection):
59 | raise NotImplementedError()
60 |
--------------------------------------------------------------------------------
/lib/simplejson/scanner.py:
--------------------------------------------------------------------------------
1 | """JSON token scanner
2 | """
3 | import re
4 | try:
5 | from simplejson._speedups import make_scanner as c_make_scanner
6 | except ImportError:
7 | c_make_scanner = None
8 |
9 | __all__ = ['make_scanner']
10 |
11 | NUMBER_RE = re.compile(
12 | r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
13 | (re.VERBOSE | re.MULTILINE | re.DOTALL))
14 |
15 | def py_make_scanner(context):
16 | parse_object = context.parse_object
17 | parse_array = context.parse_array
18 | parse_string = context.parse_string
19 | match_number = NUMBER_RE.match
20 | encoding = context.encoding
21 | strict = context.strict
22 | parse_float = context.parse_float
23 | parse_int = context.parse_int
24 | parse_constant = context.parse_constant
25 | object_hook = context.object_hook
26 |
27 | def _scan_once(string, idx):
28 | try:
29 | nextchar = string[idx]
30 | except IndexError:
31 | raise StopIteration
32 |
33 | if nextchar == '"':
34 | return parse_string(string, idx + 1, encoding, strict)
35 | elif nextchar == '{':
36 | return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
37 | elif nextchar == '[':
38 | return parse_array((string, idx + 1), _scan_once)
39 | elif nextchar == 'n' and string[idx:idx + 4] == 'null':
40 | return None, idx + 4
41 | elif nextchar == 't' and string[idx:idx + 4] == 'true':
42 | return True, idx + 4
43 | elif nextchar == 'f' and string[idx:idx + 5] == 'false':
44 | return False, idx + 5
45 |
46 | m = match_number(string, idx)
47 | if m is not None:
48 | integer, frac, exp = m.groups()
49 | if frac or exp:
50 | res = parse_float(integer + (frac or '') + (exp or ''))
51 | else:
52 | res = parse_int(integer)
53 | return res, m.end()
54 | elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
55 | return parse_constant('NaN'), idx + 3
56 | elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
57 | return parse_constant('Infinity'), idx + 8
58 | elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
59 | return parse_constant('-Infinity'), idx + 9
60 | else:
61 | raise StopIteration
62 |
63 | return _scan_once
64 |
65 | make_scanner = c_make_scanner or py_make_scanner
66 |
--------------------------------------------------------------------------------
/lib/gdata/tlslite/utils/dateFuncs.py:
--------------------------------------------------------------------------------
1 |
2 | import os
3 |
4 | #Functions for manipulating datetime objects
5 | #CCYY-MM-DDThh:mm:ssZ
6 | def parseDateClass(s):
7 | year, month, day = s.split("-")
8 | day, tail = day[:2], day[2:]
9 | hour, minute, second = tail[1:].split(":")
10 | second = second[:2]
11 | year, month, day = int(year), int(month), int(day)
12 | hour, minute, second = int(hour), int(minute), int(second)
13 | return createDateClass(year, month, day, hour, minute, second)
14 |
15 |
16 | if os.name != "java":
17 | from datetime import datetime, timedelta
18 |
19 | #Helper functions for working with a date/time class
20 | def createDateClass(year, month, day, hour, minute, second):
21 | return datetime(year, month, day, hour, minute, second)
22 |
23 | def printDateClass(d):
24 | #Split off fractional seconds, append 'Z'
25 | return d.isoformat().split(".")[0]+"Z"
26 |
27 | def getNow():
28 | return datetime.utcnow()
29 |
30 | def getHoursFromNow(hours):
31 | return datetime.utcnow() + timedelta(hours=hours)
32 |
33 | def getMinutesFromNow(minutes):
34 | return datetime.utcnow() + timedelta(minutes=minutes)
35 |
36 | def isDateClassExpired(d):
37 | return d < datetime.utcnow()
38 |
39 | def isDateClassBefore(d1, d2):
40 | return d1 < d2
41 |
42 | else:
43 | #Jython 2.1 is missing lots of python 2.3 stuff,
44 | #which we have to emulate here:
45 | import java
46 | import jarray
47 |
48 | def createDateClass(year, month, day, hour, minute, second):
49 | c = java.util.Calendar.getInstance()
50 | c.setTimeZone(java.util.TimeZone.getTimeZone("UTC"))
51 | c.set(year, month-1, day, hour, minute, second)
52 | return c
53 |
54 | def printDateClass(d):
55 | return "%04d-%02d-%02dT%02d:%02d:%02dZ" % \
56 | (d.get(d.YEAR), d.get(d.MONTH)+1, d.get(d.DATE), \
57 | d.get(d.HOUR_OF_DAY), d.get(d.MINUTE), d.get(d.SECOND))
58 |
59 | def getNow():
60 | c = java.util.Calendar.getInstance()
61 | c.setTimeZone(java.util.TimeZone.getTimeZone("UTC"))
62 | c.get(c.HOUR) #force refresh?
63 | return c
64 |
65 | def getHoursFromNow(hours):
66 | d = getNow()
67 | d.add(d.HOUR, hours)
68 | return d
69 |
70 | def isDateClassExpired(d):
71 | n = getNow()
72 | return d.before(n)
73 |
74 | def isDateClassBefore(d1, d2):
75 | return d1.before(d2)
76 |
--------------------------------------------------------------------------------
/servers/fileflyer.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #------------------------------------------------------------
3 | # pelisalacarta - XBMC Plugin
4 | # Conector para fileflyer
5 | # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
6 | #------------------------------------------------------------
7 |
8 | import urlparse,urllib2,urllib,re
9 | import os
10 |
11 | from core import scrapertools
12 | from core import logger
13 | from core import config
14 |
15 | def test_video_exists( page_url ):
16 | logger.info("[fileflyer.py] test_video_exists(page_url='%s')" % page_url)
17 |
18 | # Vídeo borrado: http://www.fileflyer.com/view/fioZRBu
19 | # Video erróneo:
20 | data = scrapertools.cache_page( page_url )
21 | if '' in data:
22 | return False,"El archivo ya no está disponible
en fileflyer (ha sido borrado)"
23 | else:
24 | return True,""
25 |
26 | def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
27 | logger.info("[fileflyer.py] get_video_url(page_url='%s')" % page_url)
28 | video_urls = []
29 |
30 | data = scrapertools.cache_page(page_url)
31 | location = scrapertools.get_match(data,'0
--------------------------------------------------------------------------------
/servers/gigasize.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #------------------------------------------------------------
3 | # pelisalacarta - XBMC Plugin
4 | # Conector para gigasize
5 | # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
6 | #------------------------------------------------------------
7 |
8 | import urlparse,urllib2,urllib,re
9 | import os
10 |
11 | from core import scrapertools
12 | from core import logger
13 | from core import config
14 |
15 | def test_video_exists( page_url ):
16 | logger.info("[gigasize.py] test_video_exists(page_url='%s')" % page_url)
17 |
18 | # Vídeo borrado: http://www.gigasize.com/get/097fadecgh7pf
19 | # Video erróneo:
20 | data = scrapertools.cache_page( page_url )
21 | if 'Download error' in data:
22 | return False,"El enlace no es válido o ha sido borrado de gigasize"
23 | else:
24 | return True,""
25 |
26 | def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
27 | logger.info("[gigasize.py] get_video_url(page_url='%s')" % page_url)
28 | video_urls = []
29 |
30 | return video_urls
31 |
32 | # Encuentra vídeos del servidor en el texto pasado
33 | def find_videos(data):
34 | encontrados = set()
35 | devuelve = []
36 |
37 | # http://www.gigasize.com/get/097f9cgh7pf
38 | patronvideos = '(gigasize.com/get/[a-z0-9]+)'
39 | logger.info("[gigasize.py] find_videos #"+patronvideos+"#")
40 | matches = re.compile(patronvideos,re.DOTALL).findall(data)
41 |
42 | for match in matches:
43 | titulo = "[gigasize]"
44 | url = "http://www."+match
45 | if url not in encontrados:
46 | logger.info(" url="+url)
47 | devuelve.append( [ titulo , url , 'gigasize' ] )
48 | encontrados.add(url)
49 | else:
50 | logger.info(" url duplicada="+url)
51 |
52 | # http://www.gigasize.com/get.php?d=097f9cgh7pf
53 | patronvideos = 'gigasize.com/get.php\?d\=([a-z0-9]+)'
54 | logger.info("[gigasize.py] find_videos #"+patronvideos+"#")
55 | matches = re.compile(patronvideos,re.DOTALL).findall(data)
56 |
57 | for match in matches:
58 | titulo = "[gigasize]"
59 | url = "http://www.gigasize.com/get/"+match
60 | if url not in encontrados:
61 | logger.info(" url="+url)
62 | devuelve.append( [ titulo , url , 'gigasize' ] )
63 | encontrados.add(url)
64 | else:
65 | logger.info(" url duplicada="+url)
66 |
67 | return devuelve
68 |
--------------------------------------------------------------------------------
/servers/rapidvideo.py:
--------------------------------------------------------------------------------
1 | # -*- coding: iso-8859-1 -*-
2 | #------------------------------------------------------------
3 | # pelisalacarta - XBMC Plugin
4 | # Conector para rapidvideo
5 | # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
6 | #------------------------------------------------------------
7 |
8 | import urlparse,urllib2,urllib,re
9 | import os
10 |
11 | from core import scrapertools
12 | from core import logger
13 | from core import config
14 |
15 |
16 | USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:18.0) Gecko/20100101 Firefox/18.0"
17 |
18 | def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
19 | logger.info("[rapidvideo.py] url="+page_url)
20 | video_urls=[]
21 | from lib import mechanize
22 | br = mechanize.Browser()
23 | br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
24 | br.set_handle_robots(False)
25 | res = br.open(page_url)
26 | print res.read()
27 | for form in br.forms():
28 | br.form = form
29 | res = br.submit(name='imhuman')
30 | page = res.read()
31 | page = page.split('mp4|')
32 | idLink = page[1].split('|')
33 | ip2 = idLink[2]
34 | ip3 = idLink[3]
35 |
36 | video_urls.append(["[rapidvideo]","http://50.7."+ip3+"."+ip2+":8777/"+idLink[0]+"/v.mp4"])
37 |
38 | return video_urls
39 |
40 | # Encuentra vídeos de este servidor en el texto pasado
41 | def find_videos(text):
42 | encontrados = set()
43 | devuelve = []
44 |
45 | #http://www.rapidvideo.com/view/YK7A0L7FU3A
46 | patronvideos = 'rapidvideo.org/([A-Za-z0-9]+)/'
47 | logger.info("[rapidvideo.py] find_videos #"+patronvideos+"#")
48 | matches = re.compile(patronvideos,re.DOTALL).findall(text)
49 |
50 | for match in matches:
51 | titulo = "[rapidvideo]"
52 | url = "http://www.rapidvideo.org/"+match
53 | d = scrapertools.cache_page(url)
54 | ma = scrapertools.find_single_match(d,'"fname" value="([^<]+)"')
55 | ma=titulo+" "+ma
56 | if url not in encontrados:
57 | logger.info(" url="+url)
58 | devuelve.append( [ ma , url , 'rapidvideo' ] )
59 |
60 | encontrados.add(url)
61 | else:
62 | logger.info(" url duplicada="+url)
63 |
64 |
65 | return devuelve
66 |
67 | def test():
68 |
69 | video_urls = get_video_url("http://www.rapidvideo.com/embed/sy6wen17")
70 |
71 | return len(video_urls)>0
72 |
--------------------------------------------------------------------------------
/servers/hotfile.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #------------------------------------------------------------
3 | # pelisalacarta - XBMC Plugin
4 | # Conector para hotfile
5 | # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
6 | #------------------------------------------------------------
7 |
8 | import urlparse,urllib2,urllib,re
9 | import os
10 |
11 | from core import scrapertools
12 | from core import logger
13 | from core import config
14 |
15 | def test_video_exists( page_url ):
16 | logger.info("[hotfile.py] test_video_exists(page_url='%s')" % page_url)
17 |
18 | # Existe: http://hotfile.com/dl/57556961/6606499/01_Cagayake_GIRLS.mp4.html
19 | # No existe: http://hotfile.com/dl/57978410/73e1090/08_Coolly_Hotty_Tension.mp4.html
20 | data = scrapertools.cache_page(page_url)
21 | patron = '[^<]+'
22 | patron += '[^<]+'
23 | patron += ''
24 | matches = re.compile(patron,re.DOTALL).findall(data)
25 |
26 | if len(matches)>0:
27 | return True,""
28 | else:
29 | patron = ' [^<]+'
30 | patron = '[^<]+'
31 | patron = '| ([^<]+) | [^<]+'
32 | patron = ' '
33 | matches = re.compile(patron,re.DOTALL).findall(data)
34 | if len(matches)>0:
35 | return False,matches[0][0]
36 |
37 | return True,""
38 |
39 |
40 | def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
41 | logger.info("[hotfile.py] get_video_url(page_url='%s')" % page_url)
42 | video_urls = []
43 | return video_urls
44 |
45 | # Encuentra vídeos del servidor en el texto pasado
46 | def find_videos(data):
47 | encontrados = set()
48 | devuelve = []
49 |
50 | # http://hotfile.com/dl/146349096/8d52053/Matalobos._3_Temada._Captulo_67___Eu_son_Mateo_Veloso_____carta___CRTVG.mp4.flv.html
51 | patronvideos = '(http://hotfile.com/dl/.*?\.html)'
52 | logger.info("[hotfile.py] find_videos #"+patronvideos+"#")
53 | matches = re.compile(patronvideos,re.DOTALL).findall(data)
54 |
55 | for match in matches:
56 | titulo = "[hotfile]"
57 | url = match
58 | if url not in encontrados:
59 | logger.info(" url="+url)
60 | devuelve.append( [ titulo , url , 'hotfile' ] )
61 | encontrados.add(url)
62 | else:
63 | logger.info(" url duplicada="+url)
64 |
65 | return devuelve
66 |
--------------------------------------------------------------------------------
/servers/hulkshare.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #------------------------------------------------------------
3 | # pelisalacarta - XBMC Plugin
4 | # Conector para hulkshare
5 | # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
6 | #------------------------------------------------------------
7 |
8 | import urlparse,urllib2,urllib,re
9 | import os
10 |
11 | from core import scrapertools
12 | from core import logger
13 | from core import config
14 |
15 | def test_video_exists( page_url ):
16 | return True,""
17 |
18 | def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
19 | logger.info("[hulkshare.py] get_video_url(page_url='%s')" % page_url)
20 | video_urls = []
21 |
22 | location = scrapertools.get_header_from_response(page_url, header_to_get="location")
23 | extension = scrapertools.get_filename_from_url(location)[-4:]
24 |
25 | video_urls.append( [ "[hulkshare]",location ] )
26 |
27 | return video_urls
28 |
29 | # Encuentra vídeos del servidor en el texto pasado
30 | def find_videos(data):
31 | encontrados = set()
32 | devuelve = []
33 |
34 | #http://www.hulkshare.com/dl/bp62cf2510h8
35 | #http://www.hulkshare.com/dl/e633tphub8jk
36 | patronvideos = '(hulkshare.com/dl/[a-z0-9]+)'
37 | logger.info("[hulkshare.py] find_videos #"+patronvideos+"#")
38 | matches = re.compile(patronvideos,re.DOTALL).findall(data)
39 |
40 | for match in matches:
41 | titulo = "[hulkshare]"
42 | url = "http://www."+match
43 | if url not in encontrados:
44 | logger.info(" url="+url)
45 | devuelve.append( [ titulo , url , 'hulkshare' ] )
46 | encontrados.add(url)
47 | else:
48 | logger.info(" url duplicada="+url)
49 |
50 | #http://www.tusnovelas.com/hl.php?v=5ju6iuif5e68
51 | patronvideos = 'tusnovelas.com/hl.php\?v\=([a-z0-9]+)'
52 | logger.info("[hulkshare.py] find_videos #"+patronvideos+"#")
53 | matches = re.compile(patronvideos,re.DOTALL).findall(data)
54 |
55 | for match in matches:
56 | titulo = "[hulkshare]"
57 | url = "http://www.hulkshare.com/dl/"+match
58 | if url not in encontrados:
59 | logger.info(" url="+url)
60 | devuelve.append( [ titulo , url , 'hulkshare' ] )
61 | encontrados.add(url)
62 | else:
63 | logger.info(" url duplicada="+url)
64 |
65 | #
66 | return devuelve
67 |
68 | def test():
69 | video_urls = get_video_url("http://www.hulkshare.com/dl/5ju6iuif5e68")
70 |
71 | return len(video_urls)>0
--------------------------------------------------------------------------------
/servers/junkyvideo.py:
--------------------------------------------------------------------------------
1 | # -*- coding: iso-8859-1 -*-
2 | #------------------------------------------------------------
3 | # pelisalacarta - XBMC Plugin
4 | # Conector para junkyvideo
5 | # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
6 | #------------------------------------------------------------
7 |
8 | import urlparse,urllib2,urllib,re
9 | import os
10 |
11 | from core import scrapertools
12 | from core import logger
13 | from core import config
14 |
15 |
16 |
17 | USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:18.0) Gecko/20100101 Firefox/18.0"
18 |
19 | def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
20 | logger.info("[junkyvideo.py] url="+page_url)
21 | video_urls=[]
22 | import time
23 | opener = urllib2.build_opener()
24 | page = opener.open(page_url)
25 | page = page.read()
26 | hash1 = scrapertools.find_single_match(page,'name="hash" value="([^<]+)"')
27 | idd = scrapertools.find_single_match(page,'name="id" value="([^<]+)"')
28 | time.sleep(6)
29 | params = {'op': 'download1', 'usr_login': '', 'id': idd, 'fname': '', 'referer': '', 'hash': hash1}
30 | data = urllib.urlencode(params)
31 | res = urllib2.Request(page_url, data)
32 | response =opener.open(res)
33 | page = response.read()
34 | page = page.split('file: "')
35 | link = page[1].split('"')
36 |
37 | video_urls.append(["[junkyvideo]" ,link[0]])
38 |
39 | return video_urls
40 |
41 | # Encuentra vídeos de este servidor en el texto pasado
42 | def find_videos(text):
43 | encontrados = set()
44 | devuelve = []
45 |
46 |
47 | #http://www.junkyvideo.com/r5z9g1kwg9jt
48 | patronvideos = 'junkyvideo.com/([A-Za-z0-9]+).htm'
49 | logger.info("[junkyvideo.py] find_videos #"+patronvideos+"#")
50 | matches = re.compile(patronvideos,re.DOTALL).findall(text)
51 |
52 | for match in matches:
53 | titulo = "[junkyvideo]"
54 | url = "http://www.junkyvideo.com/"+match+".htm"
55 | d = scrapertools.cache_page(url)
56 | ma = scrapertools.find_single_match(d,'Watch ([^<]+)')
57 | ma=titulo+" "+ma
58 | if url not in encontrados:
59 | logger.info(" url="+url)
60 | devuelve.append( [ ma , url , 'junkyvideo' ] )
61 |
62 | encontrados.add(url)
63 | else:
64 | logger.info(" url duplicada="+url)
65 |
66 |
67 | return devuelve
68 |
69 | def test():
70 |
71 | video_urls = get_video_url("http://www.junkyvideo.com/embed/sy6wen17")
72 |
73 | return len(video_urls)>0
74 |
--------------------------------------------------------------------------------
/core/database.py:
--------------------------------------------------------------------------------
1 | import sqlite3
2 |
3 | import logging.config
4 | import logging
5 | logging.config.fileConfig("logging.conf")
6 | logger=logging.getLogger("database")
7 |
8 | def get_connection(database_name):
9 | logger.info("get_connection(database_name=%s)" , database_name)
10 | conn = sqlite3.connect(database_name)
11 |
12 | cursor = conn.cursor()
13 | cursor.execute('create table if not exists "show" ( "channel_id" TEXT NOT NULL , "show_id" TEXT NOT NULL , "title" TEXT, "thumbnail" TEXT, "plot" TEXT, "disponible" TEXT)')
14 | cursor.close()
15 |
16 | safe_database_change(cursor,conn,'alter table "show" add column "url" TEXT')
17 | safe_database_change(cursor,conn,'alter table "show" add column "created" TEXT')
18 | safe_database_change(cursor,conn,'alter table "show" add column "deleted" TEXT')
19 |
20 | return conn
21 |
22 | def safe_database_change(cursor,conn,text):
23 | try:
24 | cursor.execute(text)
25 | conn.commit()
26 | except:
27 | pass
28 |
29 | def query(conn,query_text):
30 | logger.debug("query(query_text='%s')" , query_text)
31 | cursor = conn.cursor()
32 |
33 | cursor.execute(query_text)
34 | devuelve = cursor.fetchall()
35 | cursor.close()
36 |
37 | logger.debug("...devuelve %d filas" , len(devuelve))
38 |
39 | return devuelve
40 |
41 | def get_numeric_value(conn,query_text,when_null=0):
42 | logger.debug('get_numeric_value(query_text="%s")' , query_text)
43 | cursor = conn.cursor()
44 |
45 | cursor.execute(query_text)
46 | devuelve = cursor.fetchall()
47 | cursor.close()
48 |
49 | if devuelve[0][0]==None:
50 | numero = when_null
51 | else:
52 | numero = int(devuelve[0][0])
53 |
54 | logger.debug("...devuelve [%d]" , numero)
55 | return numero
56 |
57 | def get_single_value(conn,query_text):
58 | logger.debug("get_single_value(query_text='%s')" , query_text)
59 | cursor = conn.cursor()
60 |
61 | cursor.execute(query_text)
62 | devuelve = cursor.fetchall()
63 | cursor.close()
64 |
65 | logger.debug("...devuelve [%s]" , str(devuelve[0][0]))
66 |
67 | return devuelve[0][0]
68 |
69 | def execute(conn,query):
70 | logger.debug("execute(query='%s')" , query)
71 | cursor = conn.cursor()
72 |
73 | cursor.execute(query)
74 | conn.commit()
75 | cursor.close()
76 |
77 | def execute_parameters(conn,query,parameters):
78 | logger.debug("execute_parameters(query='%s',parameters=%s)" , query , str(parameters))
79 | cursor = conn.cursor()
80 | cursor.execute(query,parameters)
81 | conn.commit()
82 | cursor.close()
83 |
--------------------------------------------------------------------------------
/core/ClientCookie/_Request.py:
--------------------------------------------------------------------------------
1 | """Integration with Python standard library module urllib2: Request class.
2 |
3 | Copyright 2004 John J Lee
4 |
5 | This code is free software; you can redistribute it and/or modify it under
6 | the terms of the BSD License (see the file COPYING included with the
7 | distribution).
8 |
9 | """
10 |
11 | try: True
12 | except NameError:
13 | True = 1
14 | False = 0
15 |
16 | import urllib2, string
17 |
18 | from _ClientCookie import request_host
19 |
20 |
21 | class Request(urllib2.Request):
22 | def __init__(self, url, data=None, headers={},
23 | origin_req_host=None, unverifiable=False):
24 | urllib2.Request.__init__(self, url, data, headers)
25 | self.unredirected_hdrs = {}
26 |
27 | # All the terminology below comes from RFC 2965.
28 | self.unverifiable = unverifiable
29 | # Set request-host of origin transaction.
30 | # The origin request-host is needed in order to decide whether
31 | # unverifiable sub-requests (automatic redirects, images embedded
32 | # in HTML, etc.) are to third-party hosts. If they are, the
33 | # resulting transactions might need to be conducted with cookies
34 | # turned off.
35 | if origin_req_host is None:
36 | origin_req_host = request_host(self)
37 | self.origin_req_host = origin_req_host
38 |
39 | def get_origin_req_host(self):
40 | return self.origin_req_host
41 |
42 | def is_unverifiable(self):
43 | return self.unverifiable
44 |
45 | def add_unredirected_header(self, key, val):
46 | """Add a header that will not be added to a redirected request."""
47 | self.unredirected_hdrs[string.capitalize(key)] = val
48 |
49 | def has_header(self, header_name):
50 | """True iff request has named header (regular or unredirected)."""
51 | if (self.headers.has_key(header_name) or
52 | self.unredirected_hdrs.has_key(header_name)):
53 | return True
54 | return False
55 |
56 | def get_header(self, header_name, default=None):
57 | return self.headers.get(
58 | header_name,
59 | self.unredirected_hdrs.get(header_name, default))
60 |
61 | def header_items(self):
62 | hdrs = self.unredirected_hdrs.copy()
63 | hdrs.update(self.headers)
64 | return hdrs.items()
65 |
66 | def __str__(self):
67 | return "" % self.get_full_url()
68 |
69 | def get_method(self):
70 | if self.has_data():
71 | return "POST"
72 | else:
73 | return "GET"
74 |
--------------------------------------------------------------------------------
/core/jsontools.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #------------------------------------------------------------
3 | # tvalacarta - XBMC Plugin
4 | #------------------------------------------------------------
5 | # json_tools
6 | # Parsea un string en JSON probando varios módulos
7 | #------------------------------------------------------------
8 |
9 | import traceback
10 | import config
11 | import logger
12 |
13 | def load_json(data):
14 | logger.info("core.jsontools.load_json Probando simplejson en directorio lib")
15 |
16 | # callback to transform json string values to utf8
17 | def to_utf8(dct):
18 | rdct = {}
19 | for k, v in dct.items() :
20 | if isinstance(v, (str, unicode)) :
21 | rdct[k] = v.encode('utf8', 'ignore')
22 | else :
23 | rdct[k] = v
24 | return rdct
25 |
26 | try:
27 | logger.info("core.jsontools.load_json Probando simplejson en directorio lib")
28 | from lib import simplejson
29 | json_data = simplejson.loads(data, object_hook=to_utf8)
30 | logger.info("core.jsontools.load_json -> "+repr(json_data))
31 | return json_data
32 | except:
33 | logger.info(traceback.format_exc())
34 |
35 | try:
36 | logger.info("core.jsontools.load_json Probando simplejson incluido en el interprete")
37 | import simplejson
38 | json_data = simplejson.loads(data, object_hook=to_utf8)
39 | logger.info("core.jsontools.load_json -> "+repr(json_data))
40 | return json_data
41 | except:
42 | logger.info(traceback.format_exc())
43 |
44 | try:
45 | logger.info("core.jsontools.load_json Probando json incluido en el interprete")
46 | import json
47 | json_data = json.loads(data, object_hook=to_utf8)
48 | logger.info("core.jsontools.load_json -> "+repr(json_data))
49 | return json_data
50 | except:
51 | logger.info(traceback.format_exc())
52 |
53 | try:
54 | logger.info("core.jsontools.load_json Probando JSON de Plex")
55 | json_data = JSON.ObjectFromString(data, encoding="utf-8")
56 | logger.info("core.jsontools.load_json -> "+repr(json_data))
57 | return json_data
58 | except:
59 | logger.info(traceback.format_exc())
60 |
61 | logger.info("core.jsontools.load_json No se ha encontrado un parser de JSON valido")
62 | logger.info("core.jsontools.load_json -> (nada)")
63 | return ""
64 |
65 |
--------------------------------------------------------------------------------
/servers/jumbofiles.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #------------------------------------------------------------
3 | # pelisalacarta - XBMC Plugin
4 | # Conector para jumbofiles
5 | # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
6 | #------------------------------------------------------------
7 |
8 | import urlparse,urllib2,urllib,re
9 | import os
10 |
11 | from core import scrapertools
12 | from core import logger
13 | from core import config
14 |
15 | def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
16 | logger.info("[jumbofiles.py] get_video_url(page_url='%s')" % page_url)
17 | video_urls = []
18 |
19 | data = scrapertools.cache_page(page_url)
20 |
21 | # op=download2&id=oiyetnk5vwzf&rand=m2080mem&referer=&method_free=&method_premium=&down_direct=1&x=64&y=5
22 | op = scrapertools.get_match(data,'')
23 | id = scrapertools.get_match(data,'')
24 | random_number = scrapertools.get_match(data,'')
25 | down_direct = scrapertools.get_match(data,'')
26 |
27 | post = "op=%s&id=%s&rand=%s&referer=&method_free=&method_premium=&down_direct=%s&x=64&y=5" % (op,id,random_number,down_direct)
28 | data = scrapertools.cache_page(page_url,post=post)
29 | #logger.info("data="+data)
30 |
31 | # | |