├── .gitignore
├── SportScanner.bundle
└── Contents
│ ├── Libraries
│ └── Shared
│ │ ├── chardet
│ │ ├── cli
│ │ │ ├── __init__.py
│ │ │ └── chardetect.py
│ │ ├── version.py
│ │ ├── compat.py
│ │ ├── __init__.py
│ │ ├── euctwprober.py
│ │ ├── euckrprober.py
│ │ ├── gb2312prober.py
│ │ ├── big5prober.py
│ │ ├── enums.py
│ │ ├── cp949prober.py
│ │ ├── mbcsgroupprober.py
│ │ ├── utf8prober.py
│ │ ├── mbcharsetprober.py
│ │ ├── sbcsgroupprober.py
│ │ ├── codingstatemachine.py
│ │ ├── eucjpprober.py
│ │ ├── sjisprober.py
│ │ ├── charsetgroupprober.py
│ │ ├── escprober.py
│ │ ├── charsetprober.py
│ │ ├── latin1prober.py
│ │ ├── sbcharsetprober.py
│ │ └── chardistribution.py
│ │ ├── idna
│ │ ├── package_data.py
│ │ ├── __init__.py
│ │ ├── compat.py
│ │ ├── intranges.py
│ │ └── codec.py
│ │ ├── certifi
│ │ ├── __main__.py
│ │ ├── __init__.py
│ │ └── core.py
│ │ ├── urllib3
│ │ ├── packages
│ │ │ ├── __init__.py
│ │ │ ├── ssl_match_hostname
│ │ │ │ ├── __init__.py
│ │ │ │ └── _implementation.py
│ │ │ ├── backports
│ │ │ │ └── makefile.py
│ │ │ └── ordered_dict.py
│ │ ├── util
│ │ │ ├── __init__.py
│ │ │ ├── wait.py
│ │ │ ├── response.py
│ │ │ ├── request.py
│ │ │ ├── connection.py
│ │ │ ├── url.py
│ │ │ └── timeout.py
│ │ ├── filepost.py
│ │ ├── __init__.py
│ │ ├── contrib
│ │ │ ├── ntlmpool.py
│ │ │ └── socks.py
│ │ ├── request.py
│ │ ├── fields.py
│ │ └── exceptions.py
│ │ └── requests
│ │ ├── __version__.py
│ │ ├── certs.py
│ │ ├── hooks.py
│ │ ├── packages.py
│ │ ├── _internal_utils.py
│ │ ├── compat.py
│ │ ├── structures.py
│ │ ├── exceptions.py
│ │ ├── help.py
│ │ ├── status_codes.py
│ │ ├── __init__.py
│ │ └── api.py
│ ├── Code
│ └── thesportsdb.py
│ └── Info.plist
├── test.sh
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | #For Mac OSX
2 | *DS_Store
3 | .idea/
4 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/chardet/cli/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/idna/package_data.py:
--------------------------------------------------------------------------------
1 | __version__ = '2.6'
2 |
3 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/certifi/__main__.py:
--------------------------------------------------------------------------------
1 | from certifi import where
2 | print(where())
3 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Code/thesportsdb.py:
--------------------------------------------------------------------------------
1 | sportsdbkey1 = 'AAsAAQIFBQ=='
2 | sportsdbkey2 = 'MTIzNDU2Nw=='
3 |
4 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/idna/__init__.py:
--------------------------------------------------------------------------------
1 | from .package_data import __version__
2 | from .core import *
3 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/certifi/__init__.py:
--------------------------------------------------------------------------------
1 | from .core import where, old_where
2 |
3 | __version__ = "2018.01.18"
4 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/urllib3/packages/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | from . import ssl_match_hostname
4 |
5 | __all__ = ('ssl_match_hostname', )
6 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/chardet/version.py:
--------------------------------------------------------------------------------
1 | """
2 | This module exists only to simplify retrieving the version number of chardet
3 | from within setup.py and from chardet subpackages.
4 |
5 | :author: Dan Blanchard (dan.blanchard@gmail.com)
6 | """
7 |
8 | __version__ = "3.0.4"
9 | VERSION = __version__.split('.')
10 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/idna/compat.py:
--------------------------------------------------------------------------------
1 | from .core import *
2 | from .codec import *
3 |
4 | def ToASCII(label):
5 | return encode(label)
6 |
7 | def ToUnicode(label):
8 | return decode(label)
9 |
10 | def nameprep(s):
11 | raise NotImplementedError("IDNA 2008 does not utilise nameprep protocol")
12 |
13 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/requests/__version__.py:
--------------------------------------------------------------------------------
1 | # .-. .-. .-. . . .-. .-. .-. .-.
2 | # |( |- |.| | | |- `-. | `-.
3 | # ' ' `-' `-`.`-' `-' `-' ' `-'
4 |
5 | __title__ = 'requests'
6 | __description__ = 'Python HTTP for Humans.'
7 | __url__ = 'https://requests.readthedocs.io'
8 | __version__ = '2.26.0'
9 | __build__ = 0x022600
10 | __author__ = 'Kenneth Reitz'
11 | __author_email__ = 'me@kennethreitz.org'
12 | __license__ = 'Apache 2.0'
13 | __copyright__ = 'Copyright 2020 Kenneth Reitz'
14 | __cake__ = u'\u2728 \U0001f370 \u2728'
15 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/requests/certs.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """
5 | requests.certs
6 | ~~~~~~~~~~~~~~
7 |
8 | This module returns the preferred default CA certificate bundle. There is
9 | only one — the one from the certifi package.
10 |
11 | If you are packaging Requests, e.g., for a Linux distribution or a managed
12 | environment, you can change the definition of where() to return a separately
13 | packaged CA bundle.
14 | """
15 | from certifi import where
16 |
17 | if __name__ == '__main__':
18 | print(where())
19 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/urllib3/packages/ssl_match_hostname/__init__.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | try:
4 | # Our match_hostname function is the same as 3.5's, so we only want to
5 | # import the match_hostname function if it's at least that good.
6 | if sys.version_info < (3, 5):
7 | raise ImportError("Fallback to vendored code")
8 |
9 | from ssl import CertificateError, match_hostname
10 | except ImportError:
11 | try:
12 | # Backport of the function from a pypi module
13 | from backports.ssl_match_hostname import CertificateError, match_hostname
14 | except ImportError:
15 | # Our vendored copy
16 | from ._implementation import CertificateError, match_hostname
17 |
18 | # Not needed, but documenting what we provide.
19 | __all__ = ('CertificateError', 'match_hostname')
20 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | CFBundleIdentifier
6 | com.plexapp.agents.sportscanner
7 | PlexAgentAttributionText
8 |
9 | <a href="http://www.thesportsdb.com">TheSportsDB.com</a> is a crowd sourced site for sports metatdata. The site was setup in 2014 to provide a free (or low cost) data source for all kinds of sports results. Since then it has grown into one of the biggest archives of sports data on the web.
10 |
11 | PlexFrameworkVersion
12 | 2
13 | PlexPluginClass
14 | Agent
15 | PlexPluginCodePolicy
16 | Elevated
17 |
18 |
19 |
--------------------------------------------------------------------------------
/test.sh:
--------------------------------------------------------------------------------
1 | # TODO: Add the ability to pass parameters to test script
2 |
3 | api_token=
4 | library_id=11
5 | log_file="/config/Library/Application Support/Plex Media Server/Logs/PMS Plugin Logs/com.plexapp.agents.sportscanner.log"
6 |
7 | rm -f $log_file
8 |
9 | # Copy latest versions of files
10 | echo 'Updating Scanner'
11 | cp -rf /code/SportScanner/Scanners/Series /config/Library/Application\ Support/Plex\ Media\ Server/Scanners/
12 | echo 'Updating Metadata Agent'
13 | cp -rf /code/SportScanner/SportScanner.bundle /config/Library/Application\ Support/Plex\ Media\ Server/Plug-ins/
14 |
15 | sleep 1
16 |
17 | # Run content scan and metadata refresh
18 | scan_url="http://localhost:32400/library/sections/${library_id}/refresh?force=1&X-Plex-Token=${api_token}"
19 | echo "Calling ${scan_url}"
20 | curl $scan_url
21 |
22 | sleep 3
23 |
24 | # Tail log file to view progress of test run
25 | tail -f -n 50 "${log_file}"
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/requests/hooks.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | requests.hooks
5 | ~~~~~~~~~~~~~~
6 |
7 | This module provides the capabilities for the Requests hooks system.
8 |
9 | Available hooks:
10 |
11 | ``response``:
12 | The response generated from a Request.
13 | """
14 | HOOKS = ['response']
15 |
16 |
17 | def default_hooks():
18 | return {event: [] for event in HOOKS}
19 |
20 | # TODO: response is the only one
21 |
22 |
23 | def dispatch_hook(key, hooks, hook_data, **kwargs):
24 | """Dispatches a hook dictionary on a given piece of data."""
25 | hooks = hooks or {}
26 | hooks = hooks.get(key)
27 | if hooks:
28 | if hasattr(hooks, '__call__'):
29 | hooks = [hooks]
30 | for hook in hooks:
31 | _hook_data = hook(hook_data, **kwargs)
32 | if _hook_data is not None:
33 | hook_data = _hook_data
34 | return hook_data
35 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/certifi/core.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """
5 | certifi.py
6 | ~~~~~~~~~~
7 |
8 | This module returns the installation location of cacert.pem.
9 | """
10 | import os
11 | import warnings
12 |
13 |
14 | class DeprecatedBundleWarning(DeprecationWarning):
15 | """
16 | The weak security bundle is being deprecated. Please bother your service
17 | provider to get them to stop using cross-signed roots.
18 | """
19 |
20 |
21 | def where():
22 | f = os.path.dirname(__file__)
23 |
24 | return os.path.join(f, 'cacert.pem')
25 |
26 |
27 | def old_where():
28 | warnings.warn(
29 | "The weak security bundle has been removed. certifi.old_where() is now an alias "
30 | "of certifi.where(). Please update your code to use certifi.where() instead. "
31 | "certifi.old_where() will be removed in 2018.",
32 | DeprecatedBundleWarning
33 | )
34 | return where()
35 |
36 | if __name__ == '__main__':
37 | print(where())
38 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/requests/packages.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | try:
4 | import chardet
5 | except ImportError:
6 | import charset_normalizer as chardet
7 | import warnings
8 |
9 | warnings.filterwarnings('ignore', 'Trying to detect', module='charset_normalizer')
10 |
11 | # This code exists for backwards compatibility reasons.
12 | # I don't like it either. Just look the other way. :)
13 |
14 | for package in ('urllib3', 'idna'):
15 | locals()[package] = __import__(package)
16 | # This traversal is apparently necessary such that the identities are
17 | # preserved (requests.packages.urllib3.* is urllib3.*)
18 | for mod in list(sys.modules):
19 | if mod == package or mod.startswith(package + '.'):
20 | sys.modules['requests.packages.' + mod] = sys.modules[mod]
21 |
22 | target = chardet.__name__
23 | for mod in list(sys.modules):
24 | if mod == target or mod.startswith(target + '.'):
25 | sys.modules['requests.packages.' + target.replace(target, 'chardet')] = sys.modules[mod]
26 | # Kinda cool, though, right?
27 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/chardet/compat.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # Contributor(s):
3 | # Dan Blanchard
4 | # Ian Cordasco
5 | #
6 | # This library is free software; you can redistribute it and/or
7 | # modify it under the terms of the GNU Lesser General Public
8 | # License as published by the Free Software Foundation; either
9 | # version 2.1 of the License, or (at your option) any later version.
10 | #
11 | # This library is distributed in the hope that it will be useful,
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 | # Lesser General Public License for more details.
15 | #
16 | # You should have received a copy of the GNU Lesser General Public
17 | # License along with this library; if not, write to the Free Software
18 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 | # 02110-1301 USA
20 | ######################### END LICENSE BLOCK #########################
21 |
22 | import sys
23 |
24 |
25 | if sys.version_info < (3, 0):
26 | PY2 = True
27 | PY3 = False
28 | base_str = (str, unicode)
29 | text_type = unicode
30 | else:
31 | PY2 = False
32 | PY3 = True
33 | base_str = (bytes, str)
34 | text_type = str
35 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/requests/_internal_utils.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | requests._internal_utils
5 | ~~~~~~~~~~~~~~
6 |
7 | Provides utility functions that are consumed internally by Requests
8 | which depend on extremely few external helpers (such as compat)
9 | """
10 |
11 | from .compat import is_py2, builtin_str, str
12 |
13 |
14 | def to_native_string(string, encoding='ascii'):
15 | """Given a string object, regardless of type, returns a representation of
16 | that string in the native string type, encoding and decoding where
17 | necessary. This assumes ASCII unless told otherwise.
18 | """
19 | if isinstance(string, builtin_str):
20 | out = string
21 | else:
22 | if is_py2:
23 | out = string.encode(encoding)
24 | else:
25 | out = string.decode(encoding)
26 |
27 | return out
28 |
29 |
30 | def unicode_is_ascii(u_string):
31 | """Determine if unicode string only contains ASCII characters.
32 |
33 | :param str u_string: unicode string to check. Must be unicode
34 | and not Python 2 `str`.
35 | :rtype: bool
36 | """
37 | assert isinstance(u_string, str)
38 | try:
39 | u_string.encode('ascii')
40 | return True
41 | except UnicodeEncodeError:
42 | return False
43 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/urllib3/util/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | # For backwards compatibility, provide imports that used to be here.
3 | from .connection import is_connection_dropped
4 | from .request import make_headers
5 | from .response import is_fp_closed
6 | from .ssl_ import (
7 | SSLContext,
8 | HAS_SNI,
9 | IS_PYOPENSSL,
10 | IS_SECURETRANSPORT,
11 | assert_fingerprint,
12 | resolve_cert_reqs,
13 | resolve_ssl_version,
14 | ssl_wrap_socket,
15 | )
16 | from .timeout import (
17 | current_time,
18 | Timeout,
19 | )
20 |
21 | from .retry import Retry
22 | from .url import (
23 | get_host,
24 | parse_url,
25 | split_first,
26 | Url,
27 | )
28 | from .wait import (
29 | wait_for_read,
30 | wait_for_write
31 | )
32 |
33 | __all__ = (
34 | 'HAS_SNI',
35 | 'IS_PYOPENSSL',
36 | 'IS_SECURETRANSPORT',
37 | 'SSLContext',
38 | 'Retry',
39 | 'Timeout',
40 | 'Url',
41 | 'assert_fingerprint',
42 | 'current_time',
43 | 'is_connection_dropped',
44 | 'is_fp_closed',
45 | 'get_host',
46 | 'parse_url',
47 | 'make_headers',
48 | 'resolve_cert_reqs',
49 | 'resolve_ssl_version',
50 | 'split_first',
51 | 'ssl_wrap_socket',
52 | 'wait_for_read',
53 | 'wait_for_write'
54 | )
55 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/urllib3/util/wait.py:
--------------------------------------------------------------------------------
1 | from .selectors import (
2 | HAS_SELECT,
3 | DefaultSelector,
4 | EVENT_READ,
5 | EVENT_WRITE
6 | )
7 |
8 |
9 | def _wait_for_io_events(socks, events, timeout=None):
10 | """ Waits for IO events to be available from a list of sockets
11 | or optionally a single socket if passed in. Returns a list of
12 | sockets that can be interacted with immediately. """
13 | if not HAS_SELECT:
14 | raise ValueError('Platform does not have a selector')
15 | if not isinstance(socks, list):
16 | # Probably just a single socket.
17 | if hasattr(socks, "fileno"):
18 | socks = [socks]
19 | # Otherwise it might be a non-list iterable.
20 | else:
21 | socks = list(socks)
22 | with DefaultSelector() as selector:
23 | for sock in socks:
24 | selector.register(sock, events)
25 | return [key[0].fileobj for key in
26 | selector.select(timeout) if key[1] & events]
27 |
28 |
29 | def wait_for_read(socks, timeout=None):
30 | """ Waits for reading to be available from a list of sockets
31 | or optionally a single socket if passed in. Returns a list of
32 | sockets that can be read from immediately. """
33 | return _wait_for_io_events(socks, EVENT_READ, timeout)
34 |
35 |
36 | def wait_for_write(socks, timeout=None):
37 | """ Waits for writing to be available from a list of sockets
38 | or optionally a single socket if passed in. Returns a list of
39 | sockets that can be written to immediately. """
40 | return _wait_for_io_events(socks, EVENT_WRITE, timeout)
41 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/urllib3/packages/backports/makefile.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | backports.makefile
4 | ~~~~~~~~~~~~~~~~~~
5 |
6 | Backports the Python 3 ``socket.makefile`` method for use with anything that
7 | wants to create a "fake" socket object.
8 | """
9 | import io
10 |
11 | from socket import SocketIO
12 |
13 |
14 | def backport_makefile(self, mode="r", buffering=None, encoding=None,
15 | errors=None, newline=None):
16 | """
17 | Backport of ``socket.makefile`` from Python 3.5.
18 | """
19 | if not set(mode) <= set(["r", "w", "b"]):
20 | raise ValueError(
21 | "invalid mode %r (only r, w, b allowed)" % (mode,)
22 | )
23 | writing = "w" in mode
24 | reading = "r" in mode or not writing
25 | assert reading or writing
26 | binary = "b" in mode
27 | rawmode = ""
28 | if reading:
29 | rawmode += "r"
30 | if writing:
31 | rawmode += "w"
32 | raw = SocketIO(self, rawmode)
33 | self._makefile_refs += 1
34 | if buffering is None:
35 | buffering = -1
36 | if buffering < 0:
37 | buffering = io.DEFAULT_BUFFER_SIZE
38 | if buffering == 0:
39 | if not binary:
40 | raise ValueError("unbuffered streams must be binary")
41 | return raw
42 | if reading and writing:
43 | buffer = io.BufferedRWPair(raw, raw, buffering)
44 | elif reading:
45 | buffer = io.BufferedReader(raw, buffering)
46 | else:
47 | assert writing
48 | buffer = io.BufferedWriter(raw, buffering)
49 | if binary:
50 | return buffer
51 | text = io.TextIOWrapper(buffer, encoding, errors, newline)
52 | text.mode = mode
53 | return text
54 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/chardet/__init__.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # This library is free software; you can redistribute it and/or
3 | # modify it under the terms of the GNU Lesser General Public
4 | # License as published by the Free Software Foundation; either
5 | # version 2.1 of the License, or (at your option) any later version.
6 | #
7 | # This library is distributed in the hope that it will be useful,
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 | # Lesser General Public License for more details.
11 | #
12 | # You should have received a copy of the GNU Lesser General Public
13 | # License along with this library; if not, write to the Free Software
14 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
15 | # 02110-1301 USA
16 | ######################### END LICENSE BLOCK #########################
17 |
18 |
19 | from .compat import PY2, PY3
20 | from .universaldetector import UniversalDetector
21 | from .version import __version__, VERSION
22 |
23 |
24 | def detect(byte_str):
25 | """
26 | Detect the encoding of the given byte string.
27 |
28 | :param byte_str: The byte sequence to examine.
29 | :type byte_str: ``bytes`` or ``bytearray``
30 | """
31 | if not isinstance(byte_str, bytearray):
32 | if not isinstance(byte_str, bytes):
33 | raise TypeError('Expected object of type bytes or bytearray, got: '
34 | '{0}'.format(type(byte_str)))
35 | else:
36 | byte_str = bytearray(byte_str)
37 | detector = UniversalDetector()
38 | detector.feed(byte_str)
39 | return detector.close()
40 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/chardet/euctwprober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is mozilla.org code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 1998
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | #
12 | # This library is free software; you can redistribute it and/or
13 | # modify it under the terms of the GNU Lesser General Public
14 | # License as published by the Free Software Foundation; either
15 | # version 2.1 of the License, or (at your option) any later version.
16 | #
17 | # This library is distributed in the hope that it will be useful,
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 | # Lesser General Public License for more details.
21 | #
22 | # You should have received a copy of the GNU Lesser General Public
23 | # License along with this library; if not, write to the Free Software
24 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 | # 02110-1301 USA
26 | ######################### END LICENSE BLOCK #########################
27 |
28 | from .mbcharsetprober import MultiByteCharSetProber
29 | from .codingstatemachine import CodingStateMachine
30 | from .chardistribution import EUCTWDistributionAnalysis
31 | from .mbcssm import EUCTW_SM_MODEL
32 |
33 | class EUCTWProber(MultiByteCharSetProber):
34 | def __init__(self):
35 | super(EUCTWProber, self).__init__()
36 | self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL)
37 | self.distribution_analyzer = EUCTWDistributionAnalysis()
38 | self.reset()
39 |
40 | @property
41 | def charset_name(self):
42 | return "EUC-TW"
43 |
44 | @property
45 | def language(self):
46 | return "Taiwan"
47 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/chardet/euckrprober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is mozilla.org code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 1998
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | #
12 | # This library is free software; you can redistribute it and/or
13 | # modify it under the terms of the GNU Lesser General Public
14 | # License as published by the Free Software Foundation; either
15 | # version 2.1 of the License, or (at your option) any later version.
16 | #
17 | # This library is distributed in the hope that it will be useful,
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 | # Lesser General Public License for more details.
21 | #
22 | # You should have received a copy of the GNU Lesser General Public
23 | # License along with this library; if not, write to the Free Software
24 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 | # 02110-1301 USA
26 | ######################### END LICENSE BLOCK #########################
27 |
28 | from .mbcharsetprober import MultiByteCharSetProber
29 | from .codingstatemachine import CodingStateMachine
30 | from .chardistribution import EUCKRDistributionAnalysis
31 | from .mbcssm import EUCKR_SM_MODEL
32 |
33 |
34 | class EUCKRProber(MultiByteCharSetProber):
35 | def __init__(self):
36 | super(EUCKRProber, self).__init__()
37 | self.coding_sm = CodingStateMachine(EUCKR_SM_MODEL)
38 | self.distribution_analyzer = EUCKRDistributionAnalysis()
39 | self.reset()
40 |
41 | @property
42 | def charset_name(self):
43 | return "EUC-KR"
44 |
45 | @property
46 | def language(self):
47 | return "Korean"
48 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/chardet/gb2312prober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is mozilla.org code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 1998
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | #
12 | # This library is free software; you can redistribute it and/or
13 | # modify it under the terms of the GNU Lesser General Public
14 | # License as published by the Free Software Foundation; either
15 | # version 2.1 of the License, or (at your option) any later version.
16 | #
17 | # This library is distributed in the hope that it will be useful,
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 | # Lesser General Public License for more details.
21 | #
22 | # You should have received a copy of the GNU Lesser General Public
23 | # License along with this library; if not, write to the Free Software
24 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 | # 02110-1301 USA
26 | ######################### END LICENSE BLOCK #########################
27 |
28 | from .mbcharsetprober import MultiByteCharSetProber
29 | from .codingstatemachine import CodingStateMachine
30 | from .chardistribution import GB2312DistributionAnalysis
31 | from .mbcssm import GB2312_SM_MODEL
32 |
33 | class GB2312Prober(MultiByteCharSetProber):
34 | def __init__(self):
35 | super(GB2312Prober, self).__init__()
36 | self.coding_sm = CodingStateMachine(GB2312_SM_MODEL)
37 | self.distribution_analyzer = GB2312DistributionAnalysis()
38 | self.reset()
39 |
40 | @property
41 | def charset_name(self):
42 | return "GB2312"
43 |
44 | @property
45 | def language(self):
46 | return "Chinese"
47 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/chardet/big5prober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is Mozilla Communicator client code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 1998
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | #
12 | # This library is free software; you can redistribute it and/or
13 | # modify it under the terms of the GNU Lesser General Public
14 | # License as published by the Free Software Foundation; either
15 | # version 2.1 of the License, or (at your option) any later version.
16 | #
17 | # This library is distributed in the hope that it will be useful,
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 | # Lesser General Public License for more details.
21 | #
22 | # You should have received a copy of the GNU Lesser General Public
23 | # License along with this library; if not, write to the Free Software
24 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 | # 02110-1301 USA
26 | ######################### END LICENSE BLOCK #########################
27 |
28 | from .mbcharsetprober import MultiByteCharSetProber
29 | from .codingstatemachine import CodingStateMachine
30 | from .chardistribution import Big5DistributionAnalysis
31 | from .mbcssm import BIG5_SM_MODEL
32 |
33 |
34 | class Big5Prober(MultiByteCharSetProber):
35 | def __init__(self):
36 | super(Big5Prober, self).__init__()
37 | self.coding_sm = CodingStateMachine(BIG5_SM_MODEL)
38 | self.distribution_analyzer = Big5DistributionAnalysis()
39 | self.reset()
40 |
41 | @property
42 | def charset_name(self):
43 | return "Big5"
44 |
45 | @property
46 | def language(self):
47 | return "Chinese"
48 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/idna/intranges.py:
--------------------------------------------------------------------------------
1 | """
2 | Given a list of integers, made up of (hopefully) a small number of long runs
3 | of consecutive integers, compute a representation of the form
4 | ((start1, end1), (start2, end2) ...). Then answer the question "was x present
5 | in the original list?" in time O(log(# runs)).
6 | """
7 |
8 | import bisect
9 |
10 | def intranges_from_list(list_):
11 | """Represent a list of integers as a sequence of ranges:
12 | ((start_0, end_0), (start_1, end_1), ...), such that the original
13 | integers are exactly those x such that start_i <= x < end_i for some i.
14 |
15 | Ranges are encoded as single integers (start << 32 | end), not as tuples.
16 | """
17 |
18 | sorted_list = sorted(list_)
19 | ranges = []
20 | last_write = -1
21 | for i in range(len(sorted_list)):
22 | if i+1 < len(sorted_list):
23 | if sorted_list[i] == sorted_list[i+1]-1:
24 | continue
25 | current_range = sorted_list[last_write+1:i+1]
26 | ranges.append(_encode_range(current_range[0], current_range[-1] + 1))
27 | last_write = i
28 |
29 | return tuple(ranges)
30 |
31 | def _encode_range(start, end):
32 | return (start << 32) | end
33 |
34 | def _decode_range(r):
35 | return (r >> 32), (r & ((1 << 32) - 1))
36 |
37 |
38 | def intranges_contain(int_, ranges):
39 | """Determine if `int_` falls into one of the ranges in `ranges`."""
40 | tuple_ = _encode_range(int_, 0)
41 | pos = bisect.bisect_left(ranges, tuple_)
42 | # we could be immediately ahead of a tuple (start, end)
43 | # with start < int_ <= end
44 | if pos > 0:
45 | left, right = _decode_range(ranges[pos-1])
46 | if left <= int_ < right:
47 | return True
48 | # or we could be immediately behind a tuple (int_, end)
49 | if pos < len(ranges):
50 | left, _ = _decode_range(ranges[pos])
51 | if left == int_:
52 | return True
53 | return False
54 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/chardet/enums.py:
--------------------------------------------------------------------------------
1 | """
2 | All of the Enums that are used throughout the chardet package.
3 |
4 | :author: Dan Blanchard (dan.blanchard@gmail.com)
5 | """
6 |
7 |
8 | class InputState(object):
9 | """
10 | This enum represents the different states a universal detector can be in.
11 | """
12 | PURE_ASCII = 0
13 | ESC_ASCII = 1
14 | HIGH_BYTE = 2
15 |
16 |
17 | class LanguageFilter(object):
18 | """
19 | This enum represents the different language filters we can apply to a
20 | ``UniversalDetector``.
21 | """
22 | CHINESE_SIMPLIFIED = 0x01
23 | CHINESE_TRADITIONAL = 0x02
24 | JAPANESE = 0x04
25 | KOREAN = 0x08
26 | NON_CJK = 0x10
27 | ALL = 0x1F
28 | CHINESE = CHINESE_SIMPLIFIED | CHINESE_TRADITIONAL
29 | CJK = CHINESE | JAPANESE | KOREAN
30 |
31 |
32 | class ProbingState(object):
33 | """
34 | This enum represents the different states a prober can be in.
35 | """
36 | DETECTING = 0
37 | FOUND_IT = 1
38 | NOT_ME = 2
39 |
40 |
41 | class MachineState(object):
42 | """
43 | This enum represents the different states a state machine can be in.
44 | """
45 | START = 0
46 | ERROR = 1
47 | ITS_ME = 2
48 |
49 |
50 | class SequenceLikelihood(object):
51 | """
52 | This enum represents the likelihood of a character following the previous one.
53 | """
54 | NEGATIVE = 0
55 | UNLIKELY = 1
56 | LIKELY = 2
57 | POSITIVE = 3
58 |
59 | @classmethod
60 | def get_num_categories(cls):
61 | """:returns: The number of likelihood categories in the enum."""
62 | return 4
63 |
64 |
65 | class CharacterCategory(object):
66 | """
67 | This enum represents the different categories language models for
68 | ``SingleByteCharsetProber`` put characters into.
69 |
70 | Anything less than CONTROL is considered a letter.
71 | """
72 | UNDEFINED = 255
73 | LINE_BREAK = 254
74 | SYMBOL = 253
75 | DIGIT = 252
76 | CONTROL = 251
77 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/chardet/cp949prober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is mozilla.org code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 1998
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | #
12 | # This library is free software; you can redistribute it and/or
13 | # modify it under the terms of the GNU Lesser General Public
14 | # License as published by the Free Software Foundation; either
15 | # version 2.1 of the License, or (at your option) any later version.
16 | #
17 | # This library is distributed in the hope that it will be useful,
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 | # Lesser General Public License for more details.
21 | #
22 | # You should have received a copy of the GNU Lesser General Public
23 | # License along with this library; if not, write to the Free Software
24 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 | # 02110-1301 USA
26 | ######################### END LICENSE BLOCK #########################
27 |
28 | from .chardistribution import EUCKRDistributionAnalysis
29 | from .codingstatemachine import CodingStateMachine
30 | from .mbcharsetprober import MultiByteCharSetProber
31 | from .mbcssm import CP949_SM_MODEL
32 |
33 |
34 | class CP949Prober(MultiByteCharSetProber):
35 | def __init__(self):
36 | super(CP949Prober, self).__init__()
37 | self.coding_sm = CodingStateMachine(CP949_SM_MODEL)
38 | # NOTE: CP949 is a superset of EUC-KR, so the distribution should be
39 | # not different.
40 | self.distribution_analyzer = EUCKRDistributionAnalysis()
41 | self.reset()
42 |
43 | @property
44 | def charset_name(self):
45 | return "CP949"
46 |
47 | @property
48 | def language(self):
49 | return "Korean"
50 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/chardet/mbcsgroupprober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is Mozilla Universal charset detector code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 2001
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | # Shy Shalom - original C code
12 | # Proofpoint, Inc.
13 | #
14 | # This library is free software; you can redistribute it and/or
15 | # modify it under the terms of the GNU Lesser General Public
16 | # License as published by the Free Software Foundation; either
17 | # version 2.1 of the License, or (at your option) any later version.
18 | #
19 | # This library is distributed in the hope that it will be useful,
20 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
21 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 | # Lesser General Public License for more details.
23 | #
24 | # You should have received a copy of the GNU Lesser General Public
25 | # License along with this library; if not, write to the Free Software
26 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
27 | # 02110-1301 USA
28 | ######################### END LICENSE BLOCK #########################
29 |
30 | from .charsetgroupprober import CharSetGroupProber
31 | from .utf8prober import UTF8Prober
32 | from .sjisprober import SJISProber
33 | from .eucjpprober import EUCJPProber
34 | from .gb2312prober import GB2312Prober
35 | from .euckrprober import EUCKRProber
36 | from .cp949prober import CP949Prober
37 | from .big5prober import Big5Prober
38 | from .euctwprober import EUCTWProber
39 |
40 |
41 | class MBCSGroupProber(CharSetGroupProber):
42 | def __init__(self, lang_filter=None):
43 | super(MBCSGroupProber, self).__init__(lang_filter=lang_filter)
44 | self.probers = [
45 | UTF8Prober(),
46 | SJISProber(),
47 | EUCJPProber(),
48 | GB2312Prober(),
49 | EUCKRProber(),
50 | CP949Prober(),
51 | Big5Prober(),
52 | EUCTWProber()
53 | ]
54 | self.reset()
55 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/requests/compat.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | requests.compat
5 | ~~~~~~~~~~~~~~~
6 |
7 | This module handles import compatibility issues between Python 2 and
8 | Python 3.
9 | """
10 |
11 | try:
12 | import chardet
13 | except ImportError:
14 | import charset_normalizer as chardet
15 |
16 | import sys
17 |
18 | # -------
19 | # Pythons
20 | # -------
21 |
22 | # Syntax sugar.
23 | _ver = sys.version_info
24 |
25 | #: Python 2.x?
26 | is_py2 = (_ver[0] == 2)
27 |
28 | #: Python 3.x?
29 | is_py3 = (_ver[0] == 3)
30 |
31 | has_simplejson = False
32 | try:
33 | import simplejson as json
34 | has_simplejson = True
35 | except ImportError:
36 | import json
37 |
38 | # ---------
39 | # Specifics
40 | # ---------
41 |
42 | if is_py2:
43 | from urllib import (
44 | quote, unquote, quote_plus, unquote_plus, urlencode, getproxies,
45 | proxy_bypass, proxy_bypass_environment, getproxies_environment)
46 | from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
47 | from urllib2 import parse_http_list
48 | import cookielib
49 | from Cookie import Morsel
50 | from StringIO import StringIO
51 | # Keep OrderedDict for backwards compatibility.
52 | from collections import Callable, Mapping, MutableMapping, OrderedDict
53 |
54 | builtin_str = str
55 | bytes = str
56 | str = unicode
57 | basestring = basestring
58 | numeric_types = (int, long, float)
59 | integer_types = (int, long)
60 | JSONDecodeError = ValueError
61 |
62 | elif is_py3:
63 | from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
64 | from urllib.request import parse_http_list, getproxies, proxy_bypass, proxy_bypass_environment, getproxies_environment
65 | from http import cookiejar as cookielib
66 | from http.cookies import Morsel
67 | from io import StringIO
68 | # Keep OrderedDict for backwards compatibility.
69 | from collections import OrderedDict
70 | from collections.abc import Callable, Mapping, MutableMapping
71 | if has_simplejson:
72 | from simplejson import JSONDecodeError
73 | else:
74 | from json import JSONDecodeError
75 |
76 | builtin_str = str
77 | str = str
78 | bytes = bytes
79 | basestring = (str, bytes)
80 | numeric_types = (int, float)
81 | integer_types = (int,)
82 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/urllib3/util/response.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from ..packages.six.moves import http_client as httplib
3 |
4 | from ..exceptions import HeaderParsingError
5 |
6 |
7 | def is_fp_closed(obj):
8 | """
9 | Checks whether a given file-like object is closed.
10 |
11 | :param obj:
12 | The file-like object to check.
13 | """
14 |
15 | try:
16 | # Check `isclosed()` first, in case Python3 doesn't set `closed`.
17 | # GH Issue #928
18 | return obj.isclosed()
19 | except AttributeError:
20 | pass
21 |
22 | try:
23 | # Check via the official file-like-object way.
24 | return obj.closed
25 | except AttributeError:
26 | pass
27 |
28 | try:
29 | # Check if the object is a container for another file-like object that
30 | # gets released on exhaustion (e.g. HTTPResponse).
31 | return obj.fp is None
32 | except AttributeError:
33 | pass
34 |
35 | raise ValueError("Unable to determine whether fp is closed.")
36 |
37 |
38 | def assert_header_parsing(headers):
39 | """
40 | Asserts whether all headers have been successfully parsed.
41 | Extracts encountered errors from the result of parsing headers.
42 |
43 | Only works on Python 3.
44 |
45 | :param headers: Headers to verify.
46 | :type headers: `httplib.HTTPMessage`.
47 |
48 | :raises urllib3.exceptions.HeaderParsingError:
49 | If parsing errors are found.
50 | """
51 |
52 | # This will fail silently if we pass in the wrong kind of parameter.
53 | # To make debugging easier add an explicit check.
54 | if not isinstance(headers, httplib.HTTPMessage):
55 | raise TypeError('expected httplib.Message, got {0}.'.format(
56 | type(headers)))
57 |
58 | defects = getattr(headers, 'defects', None)
59 | get_payload = getattr(headers, 'get_payload', None)
60 |
61 | unparsed_data = None
62 | if get_payload: # Platform-specific: Python 3.
63 | unparsed_data = get_payload()
64 |
65 | if defects or unparsed_data:
66 | raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
67 |
68 |
69 | def is_response_to_head(response):
70 | """
71 | Checks whether the request of a response has been a HEAD-request.
72 | Handles the quirks of AppEngine.
73 |
74 | :param conn:
75 | :type conn: :class:`httplib.HTTPResponse`
76 | """
77 | # FIXME: Can we do this somehow without accessing private httplib _method?
78 | method = response._method
79 | if isinstance(method, int): # Platform-specific: Appengine
80 | return method == 3
81 | return method.upper() == 'HEAD'
82 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/urllib3/filepost.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | import codecs
3 |
4 | from uuid import uuid4
5 | from io import BytesIO
6 |
7 | from .packages import six
8 | from .packages.six import b
9 | from .fields import RequestField
10 |
11 | writer = codecs.lookup('utf-8')[3]
12 |
13 |
14 | def choose_boundary():
15 | """
16 | Our embarrassingly-simple replacement for mimetools.choose_boundary.
17 | """
18 | return uuid4().hex
19 |
20 |
21 | def iter_field_objects(fields):
22 | """
23 | Iterate over fields.
24 |
25 | Supports list of (k, v) tuples and dicts, and lists of
26 | :class:`~urllib3.fields.RequestField`.
27 |
28 | """
29 | if isinstance(fields, dict):
30 | i = six.iteritems(fields)
31 | else:
32 | i = iter(fields)
33 |
34 | for field in i:
35 | if isinstance(field, RequestField):
36 | yield field
37 | else:
38 | yield RequestField.from_tuples(*field)
39 |
40 |
41 | def iter_fields(fields):
42 | """
43 | .. deprecated:: 1.6
44 |
45 | Iterate over fields.
46 |
47 | The addition of :class:`~urllib3.fields.RequestField` makes this function
48 | obsolete. Instead, use :func:`iter_field_objects`, which returns
49 | :class:`~urllib3.fields.RequestField` objects.
50 |
51 | Supports list of (k, v) tuples and dicts.
52 | """
53 | if isinstance(fields, dict):
54 | return ((k, v) for k, v in six.iteritems(fields))
55 |
56 | return ((k, v) for k, v in fields)
57 |
58 |
59 | def encode_multipart_formdata(fields, boundary=None):
60 | """
61 | Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
62 |
63 | :param fields:
64 | Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
65 |
66 | :param boundary:
67 | If not specified, then a random boundary will be generated using
68 | :func:`mimetools.choose_boundary`.
69 | """
70 | body = BytesIO()
71 | if boundary is None:
72 | boundary = choose_boundary()
73 |
74 | for field in iter_field_objects(fields):
75 | body.write(b('--%s\r\n' % (boundary)))
76 |
77 | writer(body).write(field.render_headers())
78 | data = field.data
79 |
80 | if isinstance(data, int):
81 | data = str(data) # Backwards compatibility
82 |
83 | if isinstance(data, six.text_type):
84 | writer(body).write(data)
85 | else:
86 | body.write(data)
87 |
88 | body.write(b'\r\n')
89 |
90 | body.write(b('--%s--\r\n' % (boundary)))
91 |
92 | content_type = str('multipart/form-data; boundary=%s' % boundary)
93 |
94 | return body.getvalue(), content_type
95 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/chardet/cli/chardetect.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | """
3 | Script which takes one or more file paths and reports on their detected
4 | encodings
5 |
6 | Example::
7 |
8 | % chardetect somefile someotherfile
9 | somefile: windows-1252 with confidence 0.5
10 | someotherfile: ascii with confidence 1.0
11 |
12 | If no paths are provided, it takes its input from stdin.
13 |
14 | """
15 |
16 | from __future__ import absolute_import, print_function, unicode_literals
17 |
18 | import argparse
19 | import sys
20 |
21 | from chardet import __version__
22 | from chardet.compat import PY2
23 | from chardet.universaldetector import UniversalDetector
24 |
25 |
26 | def description_of(lines, name='stdin'):
27 | """
28 | Return a string describing the probable encoding of a file or
29 | list of strings.
30 |
31 | :param lines: The lines to get the encoding of.
32 | :type lines: Iterable of bytes
33 | :param name: Name of file or collection of lines
34 | :type name: str
35 | """
36 | u = UniversalDetector()
37 | for line in lines:
38 | line = bytearray(line)
39 | u.feed(line)
40 | # shortcut out of the loop to save reading further - particularly useful if we read a BOM.
41 | if u.done:
42 | break
43 | u.close()
44 | result = u.result
45 | if PY2:
46 | name = name.decode(sys.getfilesystemencoding(), 'ignore')
47 | if result['encoding']:
48 | return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
49 | result['confidence'])
50 | else:
51 | return '{0}: no result'.format(name)
52 |
53 |
54 | def main(argv=None):
55 | """
56 | Handles command line arguments and gets things started.
57 |
58 | :param argv: List of arguments, as if specified on the command-line.
59 | If None, ``sys.argv[1:]`` is used instead.
60 | :type argv: list of str
61 | """
62 | # Get command line arguments
63 | parser = argparse.ArgumentParser(
64 | description="Takes one or more file paths and reports their detected \
65 | encodings")
66 | parser.add_argument('input',
67 | help='File whose encoding we would like to determine. \
68 | (default: stdin)',
69 | type=argparse.FileType('rb'), nargs='*',
70 | default=[sys.stdin if PY2 else sys.stdin.buffer])
71 | parser.add_argument('--version', action='version',
72 | version='%(prog)s {0}'.format(__version__))
73 | args = parser.parse_args(argv)
74 |
75 | for f in args.input:
76 | if f.isatty():
77 | print("You are running chardetect interactively. Press " +
78 | "CTRL-D twice at the start of a blank line to signal the " +
79 | "end of your input. If you want help, run chardetect " +
80 | "--help\n", file=sys.stderr)
81 | print(description_of(f, f.name))
82 |
83 |
84 | if __name__ == '__main__':
85 | main()
86 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/chardet/utf8prober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is mozilla.org code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 1998
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | #
12 | # This library is free software; you can redistribute it and/or
13 | # modify it under the terms of the GNU Lesser General Public
14 | # License as published by the Free Software Foundation; either
15 | # version 2.1 of the License, or (at your option) any later version.
16 | #
17 | # This library is distributed in the hope that it will be useful,
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 | # Lesser General Public License for more details.
21 | #
22 | # You should have received a copy of the GNU Lesser General Public
23 | # License along with this library; if not, write to the Free Software
24 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 | # 02110-1301 USA
26 | ######################### END LICENSE BLOCK #########################
27 |
28 | from .charsetprober import CharSetProber
29 | from .enums import ProbingState, MachineState
30 | from .codingstatemachine import CodingStateMachine
31 | from .mbcssm import UTF8_SM_MODEL
32 |
33 |
34 |
35 | class UTF8Prober(CharSetProber):
36 | ONE_CHAR_PROB = 0.5
37 |
38 | def __init__(self):
39 | super(UTF8Prober, self).__init__()
40 | self.coding_sm = CodingStateMachine(UTF8_SM_MODEL)
41 | self._num_mb_chars = None
42 | self.reset()
43 |
44 | def reset(self):
45 | super(UTF8Prober, self).reset()
46 | self.coding_sm.reset()
47 | self._num_mb_chars = 0
48 |
49 | @property
50 | def charset_name(self):
51 | return "utf-8"
52 |
53 | @property
54 | def language(self):
55 | return ""
56 |
57 | def feed(self, byte_str):
58 | for c in byte_str:
59 | coding_state = self.coding_sm.next_state(c)
60 | if coding_state == MachineState.ERROR:
61 | self._state = ProbingState.NOT_ME
62 | break
63 | elif coding_state == MachineState.ITS_ME:
64 | self._state = ProbingState.FOUND_IT
65 | break
66 | elif coding_state == MachineState.START:
67 | if self.coding_sm.get_current_charlen() >= 2:
68 | self._num_mb_chars += 1
69 |
70 | if self.state == ProbingState.DETECTING:
71 | if self.get_confidence() > self.SHORTCUT_THRESHOLD:
72 | self._state = ProbingState.FOUND_IT
73 |
74 | return self.state
75 |
76 | def get_confidence(self):
77 | unlike = 0.99
78 | if self._num_mb_chars < 6:
79 | unlike *= self.ONE_CHAR_PROB ** self._num_mb_chars
80 | return 1.0 - unlike
81 | else:
82 | return unlike
83 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/urllib3/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | urllib3 - Thread-safe connection pooling and re-using.
3 | """
4 |
5 | from __future__ import absolute_import
6 | import warnings
7 |
8 | from .connectionpool import (
9 | HTTPConnectionPool,
10 | HTTPSConnectionPool,
11 | connection_from_url
12 | )
13 |
14 | from . import exceptions
15 | from .filepost import encode_multipart_formdata
16 | from .poolmanager import PoolManager, ProxyManager, proxy_from_url
17 | from .response import HTTPResponse
18 | from .util.request import make_headers
19 | from .util.url import get_host
20 | from .util.timeout import Timeout
21 | from .util.retry import Retry
22 |
23 |
24 | # Set default logging handler to avoid "No handler found" warnings.
25 | import logging
26 | try: # Python 2.7+
27 | from logging import NullHandler
28 | except ImportError:
29 | class NullHandler(logging.Handler):
30 | def emit(self, record):
31 | pass
32 |
33 | __author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
34 | __license__ = 'MIT'
35 | __version__ = '1.22'
36 |
37 | __all__ = (
38 | 'HTTPConnectionPool',
39 | 'HTTPSConnectionPool',
40 | 'PoolManager',
41 | 'ProxyManager',
42 | 'HTTPResponse',
43 | 'Retry',
44 | 'Timeout',
45 | 'add_stderr_logger',
46 | 'connection_from_url',
47 | 'disable_warnings',
48 | 'encode_multipart_formdata',
49 | 'get_host',
50 | 'make_headers',
51 | 'proxy_from_url',
52 | )
53 |
54 | logging.getLogger(__name__).addHandler(NullHandler())
55 |
56 |
57 | def add_stderr_logger(level=logging.DEBUG):
58 | """
59 | Helper for quickly adding a StreamHandler to the logger. Useful for
60 | debugging.
61 |
62 | Returns the handler after adding it.
63 | """
64 | # This method needs to be in this __init__.py to get the __name__ correct
65 | # even if urllib3 is vendored within another package.
66 | logger = logging.getLogger(__name__)
67 | handler = logging.StreamHandler()
68 | handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
69 | logger.addHandler(handler)
70 | logger.setLevel(level)
71 | logger.debug('Added a stderr logging handler to logger: %s', __name__)
72 | return handler
73 |
74 |
75 | # ... Clean up.
76 | del NullHandler
77 |
78 |
79 | # All warning filters *must* be appended unless you're really certain that they
80 | # shouldn't be: otherwise, it's very hard for users to use most Python
81 | # mechanisms to silence them.
82 | # SecurityWarning's always go off by default.
83 | warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
84 | # SubjectAltNameWarning's should go off once per host
85 | warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True)
86 | # InsecurePlatformWarning's don't vary between requests, so we keep it default.
87 | warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
88 | append=True)
89 | # SNIMissingWarnings should go off only once.
90 | warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True)
91 |
92 |
93 | def disable_warnings(category=exceptions.HTTPWarning):
94 | """
95 | Helper for quickly disabling all urllib3 warnings.
96 | """
97 | warnings.simplefilter('ignore', category)
98 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/requests/structures.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | requests.structures
5 | ~~~~~~~~~~~~~~~~~~~
6 |
7 | Data structures that power Requests.
8 | """
9 |
10 | from collections import OrderedDict
11 |
12 | from .compat import Mapping, MutableMapping
13 |
14 |
15 | class CaseInsensitiveDict(MutableMapping):
16 | """A case-insensitive ``dict``-like object.
17 |
18 | Implements all methods and operations of
19 | ``MutableMapping`` as well as dict's ``copy``. Also
20 | provides ``lower_items``.
21 |
22 | All keys are expected to be strings. The structure remembers the
23 | case of the last key to be set, and ``iter(instance)``,
24 | ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
25 | will contain case-sensitive keys. However, querying and contains
26 | testing is case insensitive::
27 |
28 | cid = CaseInsensitiveDict()
29 | cid['Accept'] = 'application/json'
30 | cid['aCCEPT'] == 'application/json' # True
31 | list(cid) == ['Accept'] # True
32 |
33 | For example, ``headers['content-encoding']`` will return the
34 | value of a ``'Content-Encoding'`` response header, regardless
35 | of how the header name was originally stored.
36 |
37 | If the constructor, ``.update``, or equality comparison
38 | operations are given keys that have equal ``.lower()``s, the
39 | behavior is undefined.
40 | """
41 |
42 | def __init__(self, data=None, **kwargs):
43 | self._store = OrderedDict()
44 | if data is None:
45 | data = {}
46 | self.update(data, **kwargs)
47 |
48 | def __setitem__(self, key, value):
49 | # Use the lowercased key for lookups, but store the actual
50 | # key alongside the value.
51 | self._store[key.lower()] = (key, value)
52 |
53 | def __getitem__(self, key):
54 | return self._store[key.lower()][1]
55 |
56 | def __delitem__(self, key):
57 | del self._store[key.lower()]
58 |
59 | def __iter__(self):
60 | return (casedkey for casedkey, mappedvalue in self._store.values())
61 |
62 | def __len__(self):
63 | return len(self._store)
64 |
65 | def lower_items(self):
66 | """Like iteritems(), but with all lowercase keys."""
67 | return (
68 | (lowerkey, keyval[1])
69 | for (lowerkey, keyval)
70 | in self._store.items()
71 | )
72 |
73 | def __eq__(self, other):
74 | if isinstance(other, Mapping):
75 | other = CaseInsensitiveDict(other)
76 | else:
77 | return NotImplemented
78 | # Compare insensitively
79 | return dict(self.lower_items()) == dict(other.lower_items())
80 |
81 | # Copy is required
82 | def copy(self):
83 | return CaseInsensitiveDict(self._store.values())
84 |
85 | def __repr__(self):
86 | return str(dict(self.items()))
87 |
88 |
89 | class LookupDict(dict):
90 | """Dictionary lookup object."""
91 |
92 | def __init__(self, name=None):
93 | self.name = name
94 | super(LookupDict, self).__init__()
95 |
96 | def __repr__(self):
97 | return '' % (self.name)
98 |
99 | def __getitem__(self, key):
100 | # We allow fall-through here, so values default to None
101 |
102 | return self.__dict__.get(key, None)
103 |
104 | def get(self, key, default=None):
105 | return self.__dict__.get(key, default)
106 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/chardet/mbcharsetprober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is Mozilla Universal charset detector code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 2001
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | # Shy Shalom - original C code
12 | # Proofpoint, Inc.
13 | #
14 | # This library is free software; you can redistribute it and/or
15 | # modify it under the terms of the GNU Lesser General Public
16 | # License as published by the Free Software Foundation; either
17 | # version 2.1 of the License, or (at your option) any later version.
18 | #
19 | # This library is distributed in the hope that it will be useful,
20 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
21 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 | # Lesser General Public License for more details.
23 | #
24 | # You should have received a copy of the GNU Lesser General Public
25 | # License along with this library; if not, write to the Free Software
26 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
27 | # 02110-1301 USA
28 | ######################### END LICENSE BLOCK #########################
29 |
30 | from .charsetprober import CharSetProber
31 | from .enums import ProbingState, MachineState
32 |
33 |
34 | class MultiByteCharSetProber(CharSetProber):
35 | """
36 | MultiByteCharSetProber
37 | """
38 |
39 | def __init__(self, lang_filter=None):
40 | super(MultiByteCharSetProber, self).__init__(lang_filter=lang_filter)
41 | self.distribution_analyzer = None
42 | self.coding_sm = None
43 | self._last_char = [0, 0]
44 |
45 | def reset(self):
46 | super(MultiByteCharSetProber, self).reset()
47 | if self.coding_sm:
48 | self.coding_sm.reset()
49 | if self.distribution_analyzer:
50 | self.distribution_analyzer.reset()
51 | self._last_char = [0, 0]
52 |
53 | @property
54 | def charset_name(self):
55 | raise NotImplementedError
56 |
57 | @property
58 | def language(self):
59 | raise NotImplementedError
60 |
61 | def feed(self, byte_str):
62 | for i in range(len(byte_str)):
63 | coding_state = self.coding_sm.next_state(byte_str[i])
64 | if coding_state == MachineState.ERROR:
65 | self.logger.debug('%s %s prober hit error at byte %s',
66 | self.charset_name, self.language, i)
67 | self._state = ProbingState.NOT_ME
68 | break
69 | elif coding_state == MachineState.ITS_ME:
70 | self._state = ProbingState.FOUND_IT
71 | break
72 | elif coding_state == MachineState.START:
73 | char_len = self.coding_sm.get_current_charlen()
74 | if i == 0:
75 | self._last_char[1] = byte_str[0]
76 | self.distribution_analyzer.feed(self._last_char, char_len)
77 | else:
78 | self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
79 | char_len)
80 |
81 | self._last_char[0] = byte_str[-1]
82 |
83 | if self.state == ProbingState.DETECTING:
84 | if (self.distribution_analyzer.got_enough_data() and
85 | (self.get_confidence() > self.SHORTCUT_THRESHOLD)):
86 | self._state = ProbingState.FOUND_IT
87 |
88 | return self.state
89 |
90 | def get_confidence(self):
91 | return self.distribution_analyzer.get_confidence()
92 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/chardet/sbcsgroupprober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is Mozilla Universal charset detector code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 2001
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | # Shy Shalom - original C code
12 | #
13 | # This library is free software; you can redistribute it and/or
14 | # modify it under the terms of the GNU Lesser General Public
15 | # License as published by the Free Software Foundation; either
16 | # version 2.1 of the License, or (at your option) any later version.
17 | #
18 | # This library is distributed in the hope that it will be useful,
19 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
20 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 | # Lesser General Public License for more details.
22 | #
23 | # You should have received a copy of the GNU Lesser General Public
24 | # License along with this library; if not, write to the Free Software
25 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
26 | # 02110-1301 USA
27 | ######################### END LICENSE BLOCK #########################
28 |
29 | from .charsetgroupprober import CharSetGroupProber
30 | from .sbcharsetprober import SingleByteCharSetProber
31 | from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel,
32 | Latin5CyrillicModel, MacCyrillicModel,
33 | Ibm866Model, Ibm855Model)
34 | from .langgreekmodel import Latin7GreekModel, Win1253GreekModel
35 | from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
36 | # from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
37 | from .langthaimodel import TIS620ThaiModel
38 | from .langhebrewmodel import Win1255HebrewModel
39 | from .hebrewprober import HebrewProber
40 | from .langturkishmodel import Latin5TurkishModel
41 |
42 |
43 | class SBCSGroupProber(CharSetGroupProber):
44 | def __init__(self):
45 | super(SBCSGroupProber, self).__init__()
46 | self.probers = [
47 | SingleByteCharSetProber(Win1251CyrillicModel),
48 | SingleByteCharSetProber(Koi8rModel),
49 | SingleByteCharSetProber(Latin5CyrillicModel),
50 | SingleByteCharSetProber(MacCyrillicModel),
51 | SingleByteCharSetProber(Ibm866Model),
52 | SingleByteCharSetProber(Ibm855Model),
53 | SingleByteCharSetProber(Latin7GreekModel),
54 | SingleByteCharSetProber(Win1253GreekModel),
55 | SingleByteCharSetProber(Latin5BulgarianModel),
56 | SingleByteCharSetProber(Win1251BulgarianModel),
57 | # TODO: Restore Hungarian encodings (iso-8859-2 and windows-1250)
58 | # after we retrain model.
59 | # SingleByteCharSetProber(Latin2HungarianModel),
60 | # SingleByteCharSetProber(Win1250HungarianModel),
61 | SingleByteCharSetProber(TIS620ThaiModel),
62 | SingleByteCharSetProber(Latin5TurkishModel),
63 | ]
64 | hebrew_prober = HebrewProber()
65 | logical_hebrew_prober = SingleByteCharSetProber(Win1255HebrewModel,
66 | False, hebrew_prober)
67 | visual_hebrew_prober = SingleByteCharSetProber(Win1255HebrewModel, True,
68 | hebrew_prober)
69 | hebrew_prober.set_model_probers(logical_hebrew_prober, visual_hebrew_prober)
70 | self.probers.extend([hebrew_prober, logical_hebrew_prober,
71 | visual_hebrew_prober])
72 |
73 | self.reset()
74 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/idna/codec.py:
--------------------------------------------------------------------------------
1 | from .core import encode, decode, alabel, ulabel, IDNAError
2 | import codecs
3 | import re
4 |
5 | _unicode_dots_re = re.compile(u'[\u002e\u3002\uff0e\uff61]')
6 |
7 | class Codec(codecs.Codec):
8 |
9 | def encode(self, data, errors='strict'):
10 |
11 | if errors != 'strict':
12 | raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
13 |
14 | if not data:
15 | return "", 0
16 |
17 | return encode(data), len(data)
18 |
19 | def decode(self, data, errors='strict'):
20 |
21 | if errors != 'strict':
22 | raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
23 |
24 | if not data:
25 | return u"", 0
26 |
27 | return decode(data), len(data)
28 |
29 | class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
30 | def _buffer_encode(self, data, errors, final):
31 | if errors != 'strict':
32 | raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
33 |
34 | if not data:
35 | return ("", 0)
36 |
37 | labels = _unicode_dots_re.split(data)
38 | trailing_dot = u''
39 | if labels:
40 | if not labels[-1]:
41 | trailing_dot = '.'
42 | del labels[-1]
43 | elif not final:
44 | # Keep potentially unfinished label until the next call
45 | del labels[-1]
46 | if labels:
47 | trailing_dot = '.'
48 |
49 | result = []
50 | size = 0
51 | for label in labels:
52 | result.append(alabel(label))
53 | if size:
54 | size += 1
55 | size += len(label)
56 |
57 | # Join with U+002E
58 | result = ".".join(result) + trailing_dot
59 | size += len(trailing_dot)
60 | return (result, size)
61 |
62 | class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
63 | def _buffer_decode(self, data, errors, final):
64 | if errors != 'strict':
65 | raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
66 |
67 | if not data:
68 | return (u"", 0)
69 |
70 | # IDNA allows decoding to operate on Unicode strings, too.
71 | if isinstance(data, unicode):
72 | labels = _unicode_dots_re.split(data)
73 | else:
74 | # Must be ASCII string
75 | data = str(data)
76 | unicode(data, "ascii")
77 | labels = data.split(".")
78 |
79 | trailing_dot = u''
80 | if labels:
81 | if not labels[-1]:
82 | trailing_dot = u'.'
83 | del labels[-1]
84 | elif not final:
85 | # Keep potentially unfinished label until the next call
86 | del labels[-1]
87 | if labels:
88 | trailing_dot = u'.'
89 |
90 | result = []
91 | size = 0
92 | for label in labels:
93 | result.append(ulabel(label))
94 | if size:
95 | size += 1
96 | size += len(label)
97 |
98 | result = u".".join(result) + trailing_dot
99 | size += len(trailing_dot)
100 | return (result, size)
101 |
102 |
103 | class StreamWriter(Codec, codecs.StreamWriter):
104 | pass
105 |
106 | class StreamReader(Codec, codecs.StreamReader):
107 | pass
108 |
109 | def getregentry():
110 | return codecs.CodecInfo(
111 | name='idna',
112 | encode=Codec().encode,
113 | decode=Codec().decode,
114 | incrementalencoder=IncrementalEncoder,
115 | incrementaldecoder=IncrementalDecoder,
116 | streamwriter=StreamWriter,
117 | streamreader=StreamReader,
118 | )
119 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/chardet/codingstatemachine.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is mozilla.org code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 1998
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | #
12 | # This library is free software; you can redistribute it and/or
13 | # modify it under the terms of the GNU Lesser General Public
14 | # License as published by the Free Software Foundation; either
15 | # version 2.1 of the License, or (at your option) any later version.
16 | #
17 | # This library is distributed in the hope that it will be useful,
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 | # Lesser General Public License for more details.
21 | #
22 | # You should have received a copy of the GNU Lesser General Public
23 | # License along with this library; if not, write to the Free Software
24 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 | # 02110-1301 USA
26 | ######################### END LICENSE BLOCK #########################
27 |
28 | import logging
29 |
30 | from .enums import MachineState
31 |
32 |
33 | class CodingStateMachine(object):
34 | """
35 | A state machine to verify a byte sequence for a particular encoding. For
36 | each byte the detector receives, it will feed that byte to every active
37 | state machine available, one byte at a time. The state machine changes its
38 | state based on its previous state and the byte it receives. There are 3
39 | states in a state machine that are of interest to an auto-detector:
40 |
41 | START state: This is the state to start with, or a legal byte sequence
42 | (i.e. a valid code point) for character has been identified.
43 |
44 | ME state: This indicates that the state machine identified a byte sequence
45 | that is specific to the charset it is designed for and that
46 | there is no other possible encoding which can contain this byte
47 | sequence. This will to lead to an immediate positive answer for
48 | the detector.
49 |
50 | ERROR state: This indicates the state machine identified an illegal byte
51 | sequence for that encoding. This will lead to an immediate
52 | negative answer for this encoding. Detector will exclude this
53 | encoding from consideration from here on.
54 | """
55 | def __init__(self, sm):
56 | self._model = sm
57 | self._curr_byte_pos = 0
58 | self._curr_char_len = 0
59 | self._curr_state = None
60 | self.logger = logging.getLogger(__name__)
61 | self.reset()
62 |
63 | def reset(self):
64 | self._curr_state = MachineState.START
65 |
66 | def next_state(self, c):
67 | # for each byte we get its class
68 | # if it is first byte, we also get byte length
69 | byte_class = self._model['class_table'][c]
70 | if self._curr_state == MachineState.START:
71 | self._curr_byte_pos = 0
72 | self._curr_char_len = self._model['char_len_table'][byte_class]
73 | # from byte's class and state_table, we get its next state
74 | curr_state = (self._curr_state * self._model['class_factor']
75 | + byte_class)
76 | self._curr_state = self._model['state_table'][curr_state]
77 | self._curr_byte_pos += 1
78 | return self._curr_state
79 |
80 | def get_current_charlen(self):
81 | return self._curr_char_len
82 |
83 | def get_coding_state_machine(self):
84 | return self._model['name']
85 |
86 | @property
87 | def language(self):
88 | return self._model['language']
89 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/chardet/eucjpprober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is mozilla.org code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 1998
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | #
12 | # This library is free software; you can redistribute it and/or
13 | # modify it under the terms of the GNU Lesser General Public
14 | # License as published by the Free Software Foundation; either
15 | # version 2.1 of the License, or (at your option) any later version.
16 | #
17 | # This library is distributed in the hope that it will be useful,
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 | # Lesser General Public License for more details.
21 | #
22 | # You should have received a copy of the GNU Lesser General Public
23 | # License along with this library; if not, write to the Free Software
24 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 | # 02110-1301 USA
26 | ######################### END LICENSE BLOCK #########################
27 |
28 | from .enums import ProbingState, MachineState
29 | from .mbcharsetprober import MultiByteCharSetProber
30 | from .codingstatemachine import CodingStateMachine
31 | from .chardistribution import EUCJPDistributionAnalysis
32 | from .jpcntx import EUCJPContextAnalysis
33 | from .mbcssm import EUCJP_SM_MODEL
34 |
35 |
36 | class EUCJPProber(MultiByteCharSetProber):
37 | def __init__(self):
38 | super(EUCJPProber, self).__init__()
39 | self.coding_sm = CodingStateMachine(EUCJP_SM_MODEL)
40 | self.distribution_analyzer = EUCJPDistributionAnalysis()
41 | self.context_analyzer = EUCJPContextAnalysis()
42 | self.reset()
43 |
44 | def reset(self):
45 | super(EUCJPProber, self).reset()
46 | self.context_analyzer.reset()
47 |
48 | @property
49 | def charset_name(self):
50 | return "EUC-JP"
51 |
52 | @property
53 | def language(self):
54 | return "Japanese"
55 |
56 | def feed(self, byte_str):
57 | for i in range(len(byte_str)):
58 | # PY3K: byte_str is a byte array, so byte_str[i] is an int, not a byte
59 | coding_state = self.coding_sm.next_state(byte_str[i])
60 | if coding_state == MachineState.ERROR:
61 | self.logger.debug('%s %s prober hit error at byte %s',
62 | self.charset_name, self.language, i)
63 | self._state = ProbingState.NOT_ME
64 | break
65 | elif coding_state == MachineState.ITS_ME:
66 | self._state = ProbingState.FOUND_IT
67 | break
68 | elif coding_state == MachineState.START:
69 | char_len = self.coding_sm.get_current_charlen()
70 | if i == 0:
71 | self._last_char[1] = byte_str[0]
72 | self.context_analyzer.feed(self._last_char, char_len)
73 | self.distribution_analyzer.feed(self._last_char, char_len)
74 | else:
75 | self.context_analyzer.feed(byte_str[i - 1:i + 1],
76 | char_len)
77 | self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
78 | char_len)
79 |
80 | self._last_char[0] = byte_str[-1]
81 |
82 | if self.state == ProbingState.DETECTING:
83 | if (self.context_analyzer.got_enough_data() and
84 | (self.get_confidence() > self.SHORTCUT_THRESHOLD)):
85 | self._state = ProbingState.FOUND_IT
86 |
87 | return self.state
88 |
89 | def get_confidence(self):
90 | context_conf = self.context_analyzer.get_confidence()
91 | distrib_conf = self.distribution_analyzer.get_confidence()
92 | return max(context_conf, distrib_conf)
93 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/requests/exceptions.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | requests.exceptions
5 | ~~~~~~~~~~~~~~~~~~~
6 |
7 | This module contains the set of Requests' exceptions.
8 | """
9 | from urllib3.exceptions import HTTPError as BaseHTTPError
10 |
11 | from .compat import JSONDecodeError as CompatJSONDecodeError
12 |
13 |
14 | class RequestException(IOError):
15 | """There was an ambiguous exception that occurred while handling your
16 | request.
17 | """
18 |
19 | def __init__(self, *args, **kwargs):
20 | """Initialize RequestException with `request` and `response` objects."""
21 | response = kwargs.pop('response', None)
22 | self.response = response
23 | self.request = kwargs.pop('request', None)
24 | if (response is not None and not self.request and
25 | hasattr(response, 'request')):
26 | self.request = self.response.request
27 | super(RequestException, self).__init__(*args, **kwargs)
28 |
29 |
30 | class InvalidJSONError(RequestException):
31 | """A JSON error occurred."""
32 |
33 |
34 | class JSONDecodeError(InvalidJSONError, CompatJSONDecodeError):
35 | """Couldn't decode the text into json"""
36 |
37 |
38 | class HTTPError(RequestException):
39 | """An HTTP error occurred."""
40 |
41 |
42 | class ConnectionError(RequestException):
43 | """A Connection error occurred."""
44 |
45 |
46 | class ProxyError(ConnectionError):
47 | """A proxy error occurred."""
48 |
49 |
50 | class SSLError(ConnectionError):
51 | """An SSL error occurred."""
52 |
53 |
54 | class Timeout(RequestException):
55 | """The request timed out.
56 |
57 | Catching this error will catch both
58 | :exc:`~requests.exceptions.ConnectTimeout` and
59 | :exc:`~requests.exceptions.ReadTimeout` errors.
60 | """
61 |
62 |
63 | class ConnectTimeout(ConnectionError, Timeout):
64 | """The request timed out while trying to connect to the remote server.
65 |
66 | Requests that produced this error are safe to retry.
67 | """
68 |
69 |
70 | class ReadTimeout(Timeout):
71 | """The server did not send any data in the allotted amount of time."""
72 |
73 |
74 | class URLRequired(RequestException):
75 | """A valid URL is required to make a request."""
76 |
77 |
78 | class TooManyRedirects(RequestException):
79 | """Too many redirects."""
80 |
81 |
82 | class MissingSchema(RequestException, ValueError):
83 | """The URL schema (e.g. http or https) is missing."""
84 |
85 |
86 | class InvalidSchema(RequestException, ValueError):
87 | """See defaults.py for valid schemas."""
88 |
89 |
90 | class InvalidURL(RequestException, ValueError):
91 | """The URL provided was somehow invalid."""
92 |
93 |
94 | class InvalidHeader(RequestException, ValueError):
95 | """The header value provided was somehow invalid."""
96 |
97 |
98 | class InvalidProxyURL(InvalidURL):
99 | """The proxy URL provided is invalid."""
100 |
101 |
102 | class ChunkedEncodingError(RequestException):
103 | """The server declared chunked encoding but sent an invalid chunk."""
104 |
105 |
106 | class ContentDecodingError(RequestException, BaseHTTPError):
107 | """Failed to decode response content."""
108 |
109 |
110 | class StreamConsumedError(RequestException, TypeError):
111 | """The content for this response was already consumed."""
112 |
113 |
114 | class RetryError(RequestException):
115 | """Custom retries logic failed"""
116 |
117 |
118 | class UnrewindableBodyError(RequestException):
119 | """Requests encountered an error when trying to rewind a body."""
120 |
121 | # Warnings
122 |
123 |
124 | class RequestsWarning(Warning):
125 | """Base warning for Requests."""
126 |
127 |
128 | class FileModeWarning(RequestsWarning, DeprecationWarning):
129 | """A file was opened in text mode, but Requests determined its binary length."""
130 |
131 |
132 | class RequestsDependencyWarning(RequestsWarning):
133 | """An imported dependency doesn't match the expected version range."""
134 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/chardet/sjisprober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is mozilla.org code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 1998
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | #
12 | # This library is free software; you can redistribute it and/or
13 | # modify it under the terms of the GNU Lesser General Public
14 | # License as published by the Free Software Foundation; either
15 | # version 2.1 of the License, or (at your option) any later version.
16 | #
17 | # This library is distributed in the hope that it will be useful,
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 | # Lesser General Public License for more details.
21 | #
22 | # You should have received a copy of the GNU Lesser General Public
23 | # License along with this library; if not, write to the Free Software
24 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 | # 02110-1301 USA
26 | ######################### END LICENSE BLOCK #########################
27 |
28 | from .mbcharsetprober import MultiByteCharSetProber
29 | from .codingstatemachine import CodingStateMachine
30 | from .chardistribution import SJISDistributionAnalysis
31 | from .jpcntx import SJISContextAnalysis
32 | from .mbcssm import SJIS_SM_MODEL
33 | from .enums import ProbingState, MachineState
34 |
35 |
36 | class SJISProber(MultiByteCharSetProber):
37 | def __init__(self):
38 | super(SJISProber, self).__init__()
39 | self.coding_sm = CodingStateMachine(SJIS_SM_MODEL)
40 | self.distribution_analyzer = SJISDistributionAnalysis()
41 | self.context_analyzer = SJISContextAnalysis()
42 | self.reset()
43 |
44 | def reset(self):
45 | super(SJISProber, self).reset()
46 | self.context_analyzer.reset()
47 |
48 | @property
49 | def charset_name(self):
50 | return self.context_analyzer.charset_name
51 |
52 | @property
53 | def language(self):
54 | return "Japanese"
55 |
56 | def feed(self, byte_str):
57 | for i in range(len(byte_str)):
58 | coding_state = self.coding_sm.next_state(byte_str[i])
59 | if coding_state == MachineState.ERROR:
60 | self.logger.debug('%s %s prober hit error at byte %s',
61 | self.charset_name, self.language, i)
62 | self._state = ProbingState.NOT_ME
63 | break
64 | elif coding_state == MachineState.ITS_ME:
65 | self._state = ProbingState.FOUND_IT
66 | break
67 | elif coding_state == MachineState.START:
68 | char_len = self.coding_sm.get_current_charlen()
69 | if i == 0:
70 | self._last_char[1] = byte_str[0]
71 | self.context_analyzer.feed(self._last_char[2 - char_len:],
72 | char_len)
73 | self.distribution_analyzer.feed(self._last_char, char_len)
74 | else:
75 | self.context_analyzer.feed(byte_str[i + 1 - char_len:i + 3
76 | - char_len], char_len)
77 | self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
78 | char_len)
79 |
80 | self._last_char[0] = byte_str[-1]
81 |
82 | if self.state == ProbingState.DETECTING:
83 | if (self.context_analyzer.got_enough_data() and
84 | (self.get_confidence() > self.SHORTCUT_THRESHOLD)):
85 | self._state = ProbingState.FOUND_IT
86 |
87 | return self.state
88 |
89 | def get_confidence(self):
90 | context_conf = self.context_analyzer.get_confidence()
91 | distrib_conf = self.distribution_analyzer.get_confidence()
92 | return max(context_conf, distrib_conf)
93 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/urllib3/util/request.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from base64 import b64encode
3 |
4 | from ..packages.six import b, integer_types
5 | from ..exceptions import UnrewindableBodyError
6 |
7 | ACCEPT_ENCODING = 'gzip,deflate'
8 | _FAILEDTELL = object()
9 |
10 |
11 | def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
12 | basic_auth=None, proxy_basic_auth=None, disable_cache=None):
13 | """
14 | Shortcuts for generating request headers.
15 |
16 | :param keep_alive:
17 | If ``True``, adds 'connection: keep-alive' header.
18 |
19 | :param accept_encoding:
20 | Can be a boolean, list, or string.
21 | ``True`` translates to 'gzip,deflate'.
22 | List will get joined by comma.
23 | String will be used as provided.
24 |
25 | :param user_agent:
26 | String representing the user-agent you want, such as
27 | "python-urllib3/0.6"
28 |
29 | :param basic_auth:
30 | Colon-separated username:password string for 'authorization: basic ...'
31 | auth header.
32 |
33 | :param proxy_basic_auth:
34 | Colon-separated username:password string for 'proxy-authorization: basic ...'
35 | auth header.
36 |
37 | :param disable_cache:
38 | If ``True``, adds 'cache-control: no-cache' header.
39 |
40 | Example::
41 |
42 | >>> make_headers(keep_alive=True, user_agent="Batman/1.0")
43 | {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
44 | >>> make_headers(accept_encoding=True)
45 | {'accept-encoding': 'gzip,deflate'}
46 | """
47 | headers = {}
48 | if accept_encoding:
49 | if isinstance(accept_encoding, str):
50 | pass
51 | elif isinstance(accept_encoding, list):
52 | accept_encoding = ','.join(accept_encoding)
53 | else:
54 | accept_encoding = ACCEPT_ENCODING
55 | headers['accept-encoding'] = accept_encoding
56 |
57 | if user_agent:
58 | headers['user-agent'] = user_agent
59 |
60 | if keep_alive:
61 | headers['connection'] = 'keep-alive'
62 |
63 | if basic_auth:
64 | headers['authorization'] = 'Basic ' + \
65 | b64encode(b(basic_auth)).decode('utf-8')
66 |
67 | if proxy_basic_auth:
68 | headers['proxy-authorization'] = 'Basic ' + \
69 | b64encode(b(proxy_basic_auth)).decode('utf-8')
70 |
71 | if disable_cache:
72 | headers['cache-control'] = 'no-cache'
73 |
74 | return headers
75 |
76 |
77 | def set_file_position(body, pos):
78 | """
79 | If a position is provided, move file to that point.
80 | Otherwise, we'll attempt to record a position for future use.
81 | """
82 | if pos is not None:
83 | rewind_body(body, pos)
84 | elif getattr(body, 'tell', None) is not None:
85 | try:
86 | pos = body.tell()
87 | except (IOError, OSError):
88 | # This differentiates from None, allowing us to catch
89 | # a failed `tell()` later when trying to rewind the body.
90 | pos = _FAILEDTELL
91 |
92 | return pos
93 |
94 |
95 | def rewind_body(body, body_pos):
96 | """
97 | Attempt to rewind body to a certain position.
98 | Primarily used for request redirects and retries.
99 |
100 | :param body:
101 | File-like object that supports seek.
102 |
103 | :param int pos:
104 | Position to seek to in file.
105 | """
106 | body_seek = getattr(body, 'seek', None)
107 | if body_seek is not None and isinstance(body_pos, integer_types):
108 | try:
109 | body_seek(body_pos)
110 | except (IOError, OSError):
111 | raise UnrewindableBodyError("An error occurred when rewinding request "
112 | "body for redirect/retry.")
113 | elif body_pos is _FAILEDTELL:
114 | raise UnrewindableBodyError("Unable to record file position for rewinding "
115 | "request body during a redirect/retry.")
116 | else:
117 | raise ValueError("body_pos must be of type integer, "
118 | "instead it was %s." % type(body_pos))
119 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/chardet/charsetgroupprober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is Mozilla Communicator client code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 1998
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | #
12 | # This library is free software; you can redistribute it and/or
13 | # modify it under the terms of the GNU Lesser General Public
14 | # License as published by the Free Software Foundation; either
15 | # version 2.1 of the License, or (at your option) any later version.
16 | #
17 | # This library is distributed in the hope that it will be useful,
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 | # Lesser General Public License for more details.
21 | #
22 | # You should have received a copy of the GNU Lesser General Public
23 | # License along with this library; if not, write to the Free Software
24 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 | # 02110-1301 USA
26 | ######################### END LICENSE BLOCK #########################
27 |
28 | from .enums import ProbingState
29 | from .charsetprober import CharSetProber
30 |
31 |
32 | class CharSetGroupProber(CharSetProber):
33 | def __init__(self, lang_filter=None):
34 | super(CharSetGroupProber, self).__init__(lang_filter=lang_filter)
35 | self._active_num = 0
36 | self.probers = []
37 | self._best_guess_prober = None
38 |
39 | def reset(self):
40 | super(CharSetGroupProber, self).reset()
41 | self._active_num = 0
42 | for prober in self.probers:
43 | if prober:
44 | prober.reset()
45 | prober.active = True
46 | self._active_num += 1
47 | self._best_guess_prober = None
48 |
49 | @property
50 | def charset_name(self):
51 | if not self._best_guess_prober:
52 | self.get_confidence()
53 | if not self._best_guess_prober:
54 | return None
55 | return self._best_guess_prober.charset_name
56 |
57 | @property
58 | def language(self):
59 | if not self._best_guess_prober:
60 | self.get_confidence()
61 | if not self._best_guess_prober:
62 | return None
63 | return self._best_guess_prober.language
64 |
65 | def feed(self, byte_str):
66 | for prober in self.probers:
67 | if not prober:
68 | continue
69 | if not prober.active:
70 | continue
71 | state = prober.feed(byte_str)
72 | if not state:
73 | continue
74 | if state == ProbingState.FOUND_IT:
75 | self._best_guess_prober = prober
76 | return self.state
77 | elif state == ProbingState.NOT_ME:
78 | prober.active = False
79 | self._active_num -= 1
80 | if self._active_num <= 0:
81 | self._state = ProbingState.NOT_ME
82 | return self.state
83 | return self.state
84 |
85 | def get_confidence(self):
86 | state = self.state
87 | if state == ProbingState.FOUND_IT:
88 | return 0.99
89 | elif state == ProbingState.NOT_ME:
90 | return 0.01
91 | best_conf = 0.0
92 | self._best_guess_prober = None
93 | for prober in self.probers:
94 | if not prober:
95 | continue
96 | if not prober.active:
97 | self.logger.debug('%s not active', prober.charset_name)
98 | continue
99 | conf = prober.get_confidence()
100 | self.logger.debug('%s %s confidence = %s', prober.charset_name, prober.language, conf)
101 | if best_conf < conf:
102 | best_conf = conf
103 | self._best_guess_prober = prober
104 | if not self._best_guess_prober:
105 | return 0.0
106 | return best_conf
107 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/chardet/escprober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is mozilla.org code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 1998
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | #
12 | # This library is free software; you can redistribute it and/or
13 | # modify it under the terms of the GNU Lesser General Public
14 | # License as published by the Free Software Foundation; either
15 | # version 2.1 of the License, or (at your option) any later version.
16 | #
17 | # This library is distributed in the hope that it will be useful,
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 | # Lesser General Public License for more details.
21 | #
22 | # You should have received a copy of the GNU Lesser General Public
23 | # License along with this library; if not, write to the Free Software
24 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 | # 02110-1301 USA
26 | ######################### END LICENSE BLOCK #########################
27 |
28 | from .charsetprober import CharSetProber
29 | from .codingstatemachine import CodingStateMachine
30 | from .enums import LanguageFilter, ProbingState, MachineState
31 | from .escsm import (HZ_SM_MODEL, ISO2022CN_SM_MODEL, ISO2022JP_SM_MODEL,
32 | ISO2022KR_SM_MODEL)
33 |
34 |
35 | class EscCharSetProber(CharSetProber):
36 | """
37 | This CharSetProber uses a "code scheme" approach for detecting encodings,
38 | whereby easily recognizable escape or shift sequences are relied on to
39 | identify these encodings.
40 | """
41 |
42 | def __init__(self, lang_filter=None):
43 | super(EscCharSetProber, self).__init__(lang_filter=lang_filter)
44 | self.coding_sm = []
45 | if self.lang_filter & LanguageFilter.CHINESE_SIMPLIFIED:
46 | self.coding_sm.append(CodingStateMachine(HZ_SM_MODEL))
47 | self.coding_sm.append(CodingStateMachine(ISO2022CN_SM_MODEL))
48 | if self.lang_filter & LanguageFilter.JAPANESE:
49 | self.coding_sm.append(CodingStateMachine(ISO2022JP_SM_MODEL))
50 | if self.lang_filter & LanguageFilter.KOREAN:
51 | self.coding_sm.append(CodingStateMachine(ISO2022KR_SM_MODEL))
52 | self.active_sm_count = None
53 | self._detected_charset = None
54 | self._detected_language = None
55 | self._state = None
56 | self.reset()
57 |
58 | def reset(self):
59 | super(EscCharSetProber, self).reset()
60 | for coding_sm in self.coding_sm:
61 | if not coding_sm:
62 | continue
63 | coding_sm.active = True
64 | coding_sm.reset()
65 | self.active_sm_count = len(self.coding_sm)
66 | self._detected_charset = None
67 | self._detected_language = None
68 |
69 | @property
70 | def charset_name(self):
71 | return self._detected_charset
72 |
73 | @property
74 | def language(self):
75 | return self._detected_language
76 |
77 | def get_confidence(self):
78 | if self._detected_charset:
79 | return 0.99
80 | else:
81 | return 0.00
82 |
83 | def feed(self, byte_str):
84 | for c in byte_str:
85 | for coding_sm in self.coding_sm:
86 | if not coding_sm or not coding_sm.active:
87 | continue
88 | coding_state = coding_sm.next_state(c)
89 | if coding_state == MachineState.ERROR:
90 | coding_sm.active = False
91 | self.active_sm_count -= 1
92 | if self.active_sm_count <= 0:
93 | self._state = ProbingState.NOT_ME
94 | return self.state
95 | elif coding_state == MachineState.ITS_ME:
96 | self._state = ProbingState.FOUND_IT
97 | self._detected_charset = coding_sm.get_coding_state_machine()
98 | self._detected_language = coding_sm.language
99 | return self.state
100 |
101 | return self.state
102 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/requests/help.py:
--------------------------------------------------------------------------------
1 | """Module containing bug report helper(s)."""
2 | from __future__ import print_function
3 |
4 | import json
5 | import platform
6 | import sys
7 | import ssl
8 |
9 | import idna
10 | import urllib3
11 |
12 | from . import __version__ as requests_version
13 |
14 | try:
15 | import charset_normalizer
16 | except ImportError:
17 | charset_normalizer = None
18 |
19 | try:
20 | import chardet
21 | except ImportError:
22 | chardet = None
23 |
24 | try:
25 | from urllib3.contrib import pyopenssl
26 | except ImportError:
27 | pyopenssl = None
28 | OpenSSL = None
29 | cryptography = None
30 | else:
31 | import OpenSSL
32 | import cryptography
33 |
34 |
35 | def _implementation():
36 | """Return a dict with the Python implementation and version.
37 |
38 | Provide both the name and the version of the Python implementation
39 | currently running. For example, on CPython 2.7.5 it will return
40 | {'name': 'CPython', 'version': '2.7.5'}.
41 |
42 | This function works best on CPython and PyPy: in particular, it probably
43 | doesn't work for Jython or IronPython. Future investigation should be done
44 | to work out the correct shape of the code for those platforms.
45 | """
46 | implementation = platform.python_implementation()
47 |
48 | if implementation == 'CPython':
49 | implementation_version = platform.python_version()
50 | elif implementation == 'PyPy':
51 | implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
52 | sys.pypy_version_info.minor,
53 | sys.pypy_version_info.micro)
54 | if sys.pypy_version_info.releaselevel != 'final':
55 | implementation_version = ''.join([
56 | implementation_version, sys.pypy_version_info.releaselevel
57 | ])
58 | elif implementation == 'Jython':
59 | implementation_version = platform.python_version() # Complete Guess
60 | elif implementation == 'IronPython':
61 | implementation_version = platform.python_version() # Complete Guess
62 | else:
63 | implementation_version = 'Unknown'
64 |
65 | return {'name': implementation, 'version': implementation_version}
66 |
67 |
68 | def info():
69 | """Generate information for a bug report."""
70 | try:
71 | platform_info = {
72 | 'system': platform.system(),
73 | 'release': platform.release(),
74 | }
75 | except IOError:
76 | platform_info = {
77 | 'system': 'Unknown',
78 | 'release': 'Unknown',
79 | }
80 |
81 | implementation_info = _implementation()
82 | urllib3_info = {'version': urllib3.__version__}
83 | charset_normalizer_info = {'version': None}
84 | chardet_info = {'version': None}
85 | if charset_normalizer:
86 | charset_normalizer_info = {'version': charset_normalizer.__version__}
87 | if chardet:
88 | chardet_info = {'version': chardet.__version__}
89 |
90 | pyopenssl_info = {
91 | 'version': None,
92 | 'openssl_version': '',
93 | }
94 | if OpenSSL:
95 | pyopenssl_info = {
96 | 'version': OpenSSL.__version__,
97 | 'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER,
98 | }
99 | cryptography_info = {
100 | 'version': getattr(cryptography, '__version__', ''),
101 | }
102 | idna_info = {
103 | 'version': getattr(idna, '__version__', ''),
104 | }
105 |
106 | system_ssl = ssl.OPENSSL_VERSION_NUMBER
107 | system_ssl_info = {
108 | 'version': '%x' % system_ssl if system_ssl is not None else ''
109 | }
110 |
111 | return {
112 | 'platform': platform_info,
113 | 'implementation': implementation_info,
114 | 'system_ssl': system_ssl_info,
115 | 'using_pyopenssl': pyopenssl is not None,
116 | 'using_charset_normalizer': chardet is None,
117 | 'pyOpenSSL': pyopenssl_info,
118 | 'urllib3': urllib3_info,
119 | 'chardet': chardet_info,
120 | 'charset_normalizer': charset_normalizer_info,
121 | 'cryptography': cryptography_info,
122 | 'idna': idna_info,
123 | 'requests': {
124 | 'version': requests_version,
125 | },
126 | }
127 |
128 |
129 | def main():
130 | """Pretty-print the bug information as JSON."""
131 | print(json.dumps(info(), sort_keys=True, indent=2))
132 |
133 |
134 | if __name__ == '__main__':
135 | main()
136 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/requests/status_codes.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | r"""
4 | The ``codes`` object defines a mapping from common names for HTTP statuses
5 | to their numerical codes, accessible either as attributes or as dictionary
6 | items.
7 |
8 | Example::
9 |
10 | >>> import requests
11 | >>> requests.codes['temporary_redirect']
12 | 307
13 | >>> requests.codes.teapot
14 | 418
15 | >>> requests.codes['\o/']
16 | 200
17 |
18 | Some codes have multiple names, and both upper- and lower-case versions of
19 | the names are allowed. For example, ``codes.ok``, ``codes.OK``, and
20 | ``codes.okay`` all correspond to the HTTP status code 200.
21 | """
22 |
23 | from .structures import LookupDict
24 |
25 | _codes = {
26 |
27 | # Informational.
28 | 100: ('continue',),
29 | 101: ('switching_protocols',),
30 | 102: ('processing',),
31 | 103: ('checkpoint',),
32 | 122: ('uri_too_long', 'request_uri_too_long'),
33 | 200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'),
34 | 201: ('created',),
35 | 202: ('accepted',),
36 | 203: ('non_authoritative_info', 'non_authoritative_information'),
37 | 204: ('no_content',),
38 | 205: ('reset_content', 'reset'),
39 | 206: ('partial_content', 'partial'),
40 | 207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
41 | 208: ('already_reported',),
42 | 226: ('im_used',),
43 |
44 | # Redirection.
45 | 300: ('multiple_choices',),
46 | 301: ('moved_permanently', 'moved', '\\o-'),
47 | 302: ('found',),
48 | 303: ('see_other', 'other'),
49 | 304: ('not_modified',),
50 | 305: ('use_proxy',),
51 | 306: ('switch_proxy',),
52 | 307: ('temporary_redirect', 'temporary_moved', 'temporary'),
53 | 308: ('permanent_redirect',
54 | 'resume_incomplete', 'resume',), # These 2 to be removed in 3.0
55 |
56 | # Client Error.
57 | 400: ('bad_request', 'bad'),
58 | 401: ('unauthorized',),
59 | 402: ('payment_required', 'payment'),
60 | 403: ('forbidden',),
61 | 404: ('not_found', '-o-'),
62 | 405: ('method_not_allowed', 'not_allowed'),
63 | 406: ('not_acceptable',),
64 | 407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'),
65 | 408: ('request_timeout', 'timeout'),
66 | 409: ('conflict',),
67 | 410: ('gone',),
68 | 411: ('length_required',),
69 | 412: ('precondition_failed', 'precondition'),
70 | 413: ('request_entity_too_large',),
71 | 414: ('request_uri_too_large',),
72 | 415: ('unsupported_media_type', 'unsupported_media', 'media_type'),
73 | 416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
74 | 417: ('expectation_failed',),
75 | 418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
76 | 421: ('misdirected_request',),
77 | 422: ('unprocessable_entity', 'unprocessable'),
78 | 423: ('locked',),
79 | 424: ('failed_dependency', 'dependency'),
80 | 425: ('unordered_collection', 'unordered'),
81 | 426: ('upgrade_required', 'upgrade'),
82 | 428: ('precondition_required', 'precondition'),
83 | 429: ('too_many_requests', 'too_many'),
84 | 431: ('header_fields_too_large', 'fields_too_large'),
85 | 444: ('no_response', 'none'),
86 | 449: ('retry_with', 'retry'),
87 | 450: ('blocked_by_windows_parental_controls', 'parental_controls'),
88 | 451: ('unavailable_for_legal_reasons', 'legal_reasons'),
89 | 499: ('client_closed_request',),
90 |
91 | # Server Error.
92 | 500: ('internal_server_error', 'server_error', '/o\\', '✗'),
93 | 501: ('not_implemented',),
94 | 502: ('bad_gateway',),
95 | 503: ('service_unavailable', 'unavailable'),
96 | 504: ('gateway_timeout',),
97 | 505: ('http_version_not_supported', 'http_version'),
98 | 506: ('variant_also_negotiates',),
99 | 507: ('insufficient_storage',),
100 | 509: ('bandwidth_limit_exceeded', 'bandwidth'),
101 | 510: ('not_extended',),
102 | 511: ('network_authentication_required', 'network_auth', 'network_authentication'),
103 | }
104 |
105 | codes = LookupDict(name='status_codes')
106 |
107 | def _init():
108 | for code, titles in _codes.items():
109 | for title in titles:
110 | setattr(codes, title, code)
111 | if not title.startswith(('\\', '/')):
112 | setattr(codes, title.upper(), code)
113 |
114 | def doc(code):
115 | names = ', '.join('``%s``' % n for n in _codes[code])
116 | return '* %d: %s' % (code, names)
117 |
118 | global __doc__
119 | __doc__ = (__doc__ + '\n' +
120 | '\n'.join(doc(code) for code in sorted(_codes))
121 | if __doc__ is not None else None)
122 |
123 | _init()
124 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/urllib3/util/connection.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | import socket
3 | from .wait import wait_for_read
4 | from .selectors import HAS_SELECT, SelectorError
5 |
6 |
7 | def is_connection_dropped(conn): # Platform-specific
8 | """
9 | Returns True if the connection is dropped and should be closed.
10 |
11 | :param conn:
12 | :class:`httplib.HTTPConnection` object.
13 |
14 | Note: For platforms like AppEngine, this will always return ``False`` to
15 | let the platform handle connection recycling transparently for us.
16 | """
17 | sock = getattr(conn, 'sock', False)
18 | if sock is False: # Platform-specific: AppEngine
19 | return False
20 | if sock is None: # Connection already closed (such as by httplib).
21 | return True
22 |
23 | if not HAS_SELECT:
24 | return False
25 |
26 | try:
27 | return bool(wait_for_read(sock, timeout=0.0))
28 | except SelectorError:
29 | return True
30 |
31 |
32 | # This function is copied from socket.py in the Python 2.7 standard
33 | # library test suite. Added to its signature is only `socket_options`.
34 | # One additional modification is that we avoid binding to IPv6 servers
35 | # discovered in DNS if the system doesn't have IPv6 functionality.
36 | def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
37 | source_address=None, socket_options=None):
38 | """Connect to *address* and return the socket object.
39 |
40 | Convenience function. Connect to *address* (a 2-tuple ``(host,
41 | port)``) and return the socket object. Passing the optional
42 | *timeout* parameter will set the timeout on the socket instance
43 | before attempting to connect. If no *timeout* is supplied, the
44 | global default timeout setting returned by :func:`getdefaulttimeout`
45 | is used. If *source_address* is set it must be a tuple of (host, port)
46 | for the socket to bind as a source address before making the connection.
47 | An host of '' or port 0 tells the OS to use the default.
48 | """
49 |
50 | host, port = address
51 | if host.startswith('['):
52 | host = host.strip('[]')
53 | err = None
54 |
55 | # Using the value from allowed_gai_family() in the context of getaddrinfo lets
56 | # us select whether to work with IPv4 DNS records, IPv6 records, or both.
57 | # The original create_connection function always returns all records.
58 | family = allowed_gai_family()
59 |
60 | for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
61 | af, socktype, proto, canonname, sa = res
62 | sock = None
63 | try:
64 | sock = socket.socket(af, socktype, proto)
65 |
66 | # If provided, set socket level options before connecting.
67 | _set_socket_options(sock, socket_options)
68 |
69 | if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
70 | sock.settimeout(timeout)
71 | if source_address:
72 | sock.bind(source_address)
73 | sock.connect(sa)
74 | return sock
75 |
76 | except socket.error as e:
77 | err = e
78 | if sock is not None:
79 | sock.close()
80 | sock = None
81 |
82 | if err is not None:
83 | raise err
84 |
85 | raise socket.error("getaddrinfo returns an empty list")
86 |
87 |
88 | def _set_socket_options(sock, options):
89 | if options is None:
90 | return
91 |
92 | for opt in options:
93 | sock.setsockopt(*opt)
94 |
95 |
96 | def allowed_gai_family():
97 | """This function is designed to work in the context of
98 | getaddrinfo, where family=socket.AF_UNSPEC is the default and
99 | will perform a DNS search for both IPv6 and IPv4 records."""
100 |
101 | family = socket.AF_INET
102 | if HAS_IPV6:
103 | family = socket.AF_UNSPEC
104 | return family
105 |
106 |
107 | def _has_ipv6(host):
108 | """ Returns True if the system can bind an IPv6 address. """
109 | sock = None
110 | has_ipv6 = False
111 |
112 | if socket.has_ipv6:
113 | # has_ipv6 returns true if cPython was compiled with IPv6 support.
114 | # It does not tell us if the system has IPv6 support enabled. To
115 | # determine that we must bind to an IPv6 address.
116 | # https://github.com/shazow/urllib3/pull/611
117 | # https://bugs.python.org/issue658327
118 | try:
119 | sock = socket.socket(socket.AF_INET6)
120 | sock.bind((host, 0))
121 | has_ipv6 = True
122 | except Exception:
123 | pass
124 |
125 | if sock:
126 | sock.close()
127 | return has_ipv6
128 |
129 |
130 | HAS_IPV6 = _has_ipv6('::1')
131 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/urllib3/contrib/ntlmpool.py:
--------------------------------------------------------------------------------
1 | """
2 | NTLM authenticating pool, contributed by erikcederstran
3 |
4 | Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
5 | """
6 | from __future__ import absolute_import
7 |
8 | from logging import getLogger
9 | from ntlm import ntlm
10 |
11 | from .. import HTTPSConnectionPool
12 | from ..packages.six.moves.http_client import HTTPSConnection
13 |
14 |
15 | log = getLogger(__name__)
16 |
17 |
18 | class NTLMConnectionPool(HTTPSConnectionPool):
19 | """
20 | Implements an NTLM authentication version of an urllib3 connection pool
21 | """
22 |
23 | scheme = 'https'
24 |
25 | def __init__(self, user, pw, authurl, *args, **kwargs):
26 | """
27 | authurl is a random URL on the server that is protected by NTLM.
28 | user is the Windows user, probably in the DOMAIN\\username format.
29 | pw is the password for the user.
30 | """
31 | super(NTLMConnectionPool, self).__init__(*args, **kwargs)
32 | self.authurl = authurl
33 | self.rawuser = user
34 | user_parts = user.split('\\', 1)
35 | self.domain = user_parts[0].upper()
36 | self.user = user_parts[1]
37 | self.pw = pw
38 |
39 | def _new_conn(self):
40 | # Performs the NTLM handshake that secures the connection. The socket
41 | # must be kept open while requests are performed.
42 | self.num_connections += 1
43 | log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s',
44 | self.num_connections, self.host, self.authurl)
45 |
46 | headers = {}
47 | headers['Connection'] = 'Keep-Alive'
48 | req_header = 'Authorization'
49 | resp_header = 'www-authenticate'
50 |
51 | conn = HTTPSConnection(host=self.host, port=self.port)
52 |
53 | # Send negotiation message
54 | headers[req_header] = (
55 | 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
56 | log.debug('Request headers: %s', headers)
57 | conn.request('GET', self.authurl, None, headers)
58 | res = conn.getresponse()
59 | reshdr = dict(res.getheaders())
60 | log.debug('Response status: %s %s', res.status, res.reason)
61 | log.debug('Response headers: %s', reshdr)
62 | log.debug('Response data: %s [...]', res.read(100))
63 |
64 | # Remove the reference to the socket, so that it can not be closed by
65 | # the response object (we want to keep the socket open)
66 | res.fp = None
67 |
68 | # Server should respond with a challenge message
69 | auth_header_values = reshdr[resp_header].split(', ')
70 | auth_header_value = None
71 | for s in auth_header_values:
72 | if s[:5] == 'NTLM ':
73 | auth_header_value = s[5:]
74 | if auth_header_value is None:
75 | raise Exception('Unexpected %s response header: %s' %
76 | (resp_header, reshdr[resp_header]))
77 |
78 | # Send authentication message
79 | ServerChallenge, NegotiateFlags = \
80 | ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
81 | auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
82 | self.user,
83 | self.domain,
84 | self.pw,
85 | NegotiateFlags)
86 | headers[req_header] = 'NTLM %s' % auth_msg
87 | log.debug('Request headers: %s', headers)
88 | conn.request('GET', self.authurl, None, headers)
89 | res = conn.getresponse()
90 | log.debug('Response status: %s %s', res.status, res.reason)
91 | log.debug('Response headers: %s', dict(res.getheaders()))
92 | log.debug('Response data: %s [...]', res.read()[:100])
93 | if res.status != 200:
94 | if res.status == 401:
95 | raise Exception('Server rejected request: wrong '
96 | 'username or password')
97 | raise Exception('Wrong server response: %s %s' %
98 | (res.status, res.reason))
99 |
100 | res.fp = None
101 | log.debug('Connection established')
102 | return conn
103 |
104 | def urlopen(self, method, url, body=None, headers=None, retries=3,
105 | redirect=True, assert_same_host=True):
106 | if headers is None:
107 | headers = {}
108 | headers['Connection'] = 'Keep-Alive'
109 | return super(NTLMConnectionPool, self).urlopen(method, url, body,
110 | headers, retries,
111 | redirect,
112 | assert_same_host)
113 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/requests/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # __
4 | # /__) _ _ _ _ _/ _
5 | # / ( (- (/ (/ (- _) / _)
6 | # /
7 |
8 | """
9 | Requests HTTP Library
10 | ~~~~~~~~~~~~~~~~~~~~~
11 |
12 | Requests is an HTTP library, written in Python, for human beings.
13 | Basic GET usage:
14 |
15 | >>> import requests
16 | >>> r = requests.get('https://www.python.org')
17 | >>> r.status_code
18 | 200
19 | >>> b'Python is a programming language' in r.content
20 | True
21 |
22 | ... or POST:
23 |
24 | >>> payload = dict(key1='value1', key2='value2')
25 | >>> r = requests.post('https://httpbin.org/post', data=payload)
26 | >>> print(r.text)
27 | {
28 | ...
29 | "form": {
30 | "key1": "value1",
31 | "key2": "value2"
32 | },
33 | ...
34 | }
35 |
36 | The other HTTP methods are supported - see `requests.api`. Full documentation
37 | is at .
38 |
39 | :copyright: (c) 2017 by Kenneth Reitz.
40 | :license: Apache 2.0, see LICENSE for more details.
41 | """
42 |
43 | import urllib3
44 | import warnings
45 | from .exceptions import RequestsDependencyWarning
46 |
47 | try:
48 | from charset_normalizer import __version__ as charset_normalizer_version
49 | except ImportError:
50 | charset_normalizer_version = None
51 |
52 | try:
53 | from chardet import __version__ as chardet_version
54 | except ImportError:
55 | chardet_version = None
56 |
57 | def check_compatibility(urllib3_version, chardet_version, charset_normalizer_version):
58 | urllib3_version = urllib3_version.split('.')
59 | assert urllib3_version != ['dev'] # Verify urllib3 isn't installed from git.
60 |
61 | # Sometimes, urllib3 only reports its version as 16.1.
62 | if len(urllib3_version) == 2:
63 | urllib3_version.append('0')
64 |
65 | # Check urllib3 for compatibility.
66 | major, minor, patch = urllib3_version # noqa: F811
67 | major, minor, patch = int(major), int(minor), int(patch)
68 | # urllib3 >= 1.21.1, <= 1.26
69 | assert major == 1
70 | assert minor >= 21
71 | assert minor <= 26
72 |
73 | # Check charset_normalizer for compatibility.
74 | if chardet_version:
75 | major, minor, patch = chardet_version.split('.')[:3]
76 | major, minor, patch = int(major), int(minor), int(patch)
77 | # chardet_version >= 3.0.2, < 5.0.0
78 | assert (3, 0, 2) <= (major, minor, patch) < (5, 0, 0)
79 | elif charset_normalizer_version:
80 | major, minor, patch = charset_normalizer_version.split('.')[:3]
81 | major, minor, patch = int(major), int(minor), int(patch)
82 | # charset_normalizer >= 2.0.0 < 3.0.0
83 | assert (2, 0, 0) <= (major, minor, patch) < (3, 0, 0)
84 | else:
85 | raise Exception("You need either charset_normalizer or chardet installed")
86 |
87 | def _check_cryptography(cryptography_version):
88 | # cryptography < 1.3.4
89 | try:
90 | cryptography_version = list(map(int, cryptography_version.split('.')))
91 | except ValueError:
92 | return
93 |
94 | if cryptography_version < [1, 3, 4]:
95 | warning = 'Old version of cryptography ({}) may cause slowdown.'.format(cryptography_version)
96 | warnings.warn(warning, RequestsDependencyWarning)
97 |
98 | # Check imported dependencies for compatibility.
99 | try:
100 | check_compatibility(urllib3.__version__, chardet_version, charset_normalizer_version)
101 | except (AssertionError, ValueError):
102 | warnings.warn("urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported "
103 | "version!".format(urllib3.__version__, chardet_version, charset_normalizer_version),
104 | RequestsDependencyWarning)
105 |
106 | # Attempt to enable urllib3's fallback for SNI support
107 | # if the standard library doesn't support SNI or the
108 | # 'ssl' library isn't available.
109 | try:
110 | try:
111 | import ssl
112 | except ImportError:
113 | ssl = None
114 |
115 | if not getattr(ssl, "HAS_SNI", False):
116 | from urllib3.contrib import pyopenssl
117 | pyopenssl.inject_into_urllib3()
118 |
119 | # Check cryptography version
120 | from cryptography import __version__ as cryptography_version
121 | _check_cryptography(cryptography_version)
122 | except ImportError:
123 | pass
124 |
125 | # urllib3's DependencyWarnings should be silenced.
126 | from urllib3.exceptions import DependencyWarning
127 | warnings.simplefilter('ignore', DependencyWarning)
128 |
129 | from .__version__ import __title__, __description__, __url__, __version__
130 | from .__version__ import __build__, __author__, __author_email__, __license__
131 | from .__version__ import __copyright__, __cake__
132 |
133 | from . import utils
134 | from . import packages
135 | from .models import Request, Response, PreparedRequest
136 | from .api import request, get, head, post, patch, put, delete, options
137 | from .sessions import session, Session
138 | from .status_codes import codes
139 | from .exceptions import (
140 | RequestException, Timeout, URLRequired,
141 | TooManyRedirects, HTTPError, ConnectionError,
142 | FileModeWarning, ConnectTimeout, ReadTimeout, JSONDecodeError
143 | )
144 |
145 | # Set default logging handler to avoid "No handler found" warnings.
146 | import logging
147 | from logging import NullHandler
148 |
149 | logging.getLogger(__name__).addHandler(NullHandler())
150 |
151 | # FileModeWarnings go off per the default.
152 | warnings.simplefilter('default', FileModeWarning, append=True)
153 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/chardet/charsetprober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is Mozilla Universal charset detector code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 2001
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | # Shy Shalom - original C code
12 | #
13 | # This library is free software; you can redistribute it and/or
14 | # modify it under the terms of the GNU Lesser General Public
15 | # License as published by the Free Software Foundation; either
16 | # version 2.1 of the License, or (at your option) any later version.
17 | #
18 | # This library is distributed in the hope that it will be useful,
19 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
20 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 | # Lesser General Public License for more details.
22 | #
23 | # You should have received a copy of the GNU Lesser General Public
24 | # License along with this library; if not, write to the Free Software
25 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
26 | # 02110-1301 USA
27 | ######################### END LICENSE BLOCK #########################
28 |
29 | import logging
30 | import re
31 |
32 | from .enums import ProbingState
33 |
34 |
35 | class CharSetProber(object):
36 |
37 | SHORTCUT_THRESHOLD = 0.95
38 |
39 | def __init__(self, lang_filter=None):
40 | self._state = None
41 | self.lang_filter = lang_filter
42 | self.logger = logging.getLogger(__name__)
43 |
44 | def reset(self):
45 | self._state = ProbingState.DETECTING
46 |
47 | @property
48 | def charset_name(self):
49 | return None
50 |
51 | def feed(self, buf):
52 | pass
53 |
54 | @property
55 | def state(self):
56 | return self._state
57 |
58 | def get_confidence(self):
59 | return 0.0
60 |
61 | @staticmethod
62 | def filter_high_byte_only(buf):
63 | buf = re.sub(b'([\x00-\x7F])+', b' ', buf)
64 | return buf
65 |
66 | @staticmethod
67 | def filter_international_words(buf):
68 | """
69 | We define three types of bytes:
70 | alphabet: english alphabets [a-zA-Z]
71 | international: international characters [\x80-\xFF]
72 | marker: everything else [^a-zA-Z\x80-\xFF]
73 |
74 | The input buffer can be thought to contain a series of words delimited
75 | by markers. This function works to filter all words that contain at
76 | least one international character. All contiguous sequences of markers
77 | are replaced by a single space ascii character.
78 |
79 | This filter applies to all scripts which do not use English characters.
80 | """
81 | filtered = bytearray()
82 |
83 | # This regex expression filters out only words that have at-least one
84 | # international character. The word may include one marker character at
85 | # the end.
86 | words = re.findall(b'[a-zA-Z]*[\x80-\xFF]+[a-zA-Z]*[^a-zA-Z\x80-\xFF]?',
87 | buf)
88 |
89 | for word in words:
90 | filtered.extend(word[:-1])
91 |
92 | # If the last character in the word is a marker, replace it with a
93 | # space as markers shouldn't affect our analysis (they are used
94 | # similarly across all languages and may thus have similar
95 | # frequencies).
96 | last_char = word[-1:]
97 | if not last_char.isalpha() and last_char < b'\x80':
98 | last_char = b' '
99 | filtered.extend(last_char)
100 |
101 | return filtered
102 |
103 | @staticmethod
104 | def filter_with_english_letters(buf):
105 | """
106 | Returns a copy of ``buf`` that retains only the sequences of English
107 | alphabet and high byte characters that are not between <> characters.
108 | Also retains English alphabet and high byte characters immediately
109 | before occurrences of >.
110 |
111 | This filter can be applied to all scripts which contain both English
112 | characters and extended ASCII characters, but is currently only used by
113 | ``Latin1Prober``.
114 | """
115 | filtered = bytearray()
116 | in_tag = False
117 | prev = 0
118 |
119 | for curr in range(len(buf)):
120 | # Slice here to get bytes instead of an int with Python 3
121 | buf_char = buf[curr:curr + 1]
122 | # Check if we're coming out of or entering an HTML tag
123 | if buf_char == b'>':
124 | in_tag = False
125 | elif buf_char == b'<':
126 | in_tag = True
127 |
128 | # If current character is not extended-ASCII and not alphabetic...
129 | if buf_char < b'\x80' and not buf_char.isalpha():
130 | # ...and we're not in a tag
131 | if curr > prev and not in_tag:
132 | # Keep everything after last non-extended-ASCII,
133 | # non-alphabetic character
134 | filtered.extend(buf[prev:curr])
135 | # Output a space to delimit stretch we kept
136 | filtered.extend(b' ')
137 | prev = curr + 1
138 |
139 | # If we're not in a tag...
140 | if not in_tag:
141 | # Keep everything after last non-extended-ASCII, non-alphabetic
142 | # character
143 | filtered.extend(buf[prev:])
144 |
145 | return filtered
146 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/chardet/latin1prober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is Mozilla Universal charset detector code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 2001
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | # Shy Shalom - original C code
12 | #
13 | # This library is free software; you can redistribute it and/or
14 | # modify it under the terms of the GNU Lesser General Public
15 | # License as published by the Free Software Foundation; either
16 | # version 2.1 of the License, or (at your option) any later version.
17 | #
18 | # This library is distributed in the hope that it will be useful,
19 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
20 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 | # Lesser General Public License for more details.
22 | #
23 | # You should have received a copy of the GNU Lesser General Public
24 | # License along with this library; if not, write to the Free Software
25 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
26 | # 02110-1301 USA
27 | ######################### END LICENSE BLOCK #########################
28 |
29 | from .charsetprober import CharSetProber
30 | from .enums import ProbingState
31 |
32 | FREQ_CAT_NUM = 4
33 |
34 | UDF = 0 # undefined
35 | OTH = 1 # other
36 | ASC = 2 # ascii capital letter
37 | ASS = 3 # ascii small letter
38 | ACV = 4 # accent capital vowel
39 | ACO = 5 # accent capital other
40 | ASV = 6 # accent small vowel
41 | ASO = 7 # accent small other
42 | CLASS_NUM = 8 # total classes
43 |
44 | Latin1_CharToClass = (
45 | OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
46 | OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
47 | OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
48 | OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
49 | OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
50 | OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
51 | OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
52 | OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
53 | OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
54 | ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
55 | ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
56 | ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
57 | OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
58 | ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
59 | ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
60 | ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
61 | OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
62 | OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
63 | UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
64 | OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
65 | OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
66 | OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
67 | OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
68 | OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
69 | ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
70 | ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
71 | ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
72 | ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
73 | ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
74 | ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
75 | ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
76 | ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
77 | )
78 |
79 | # 0 : illegal
80 | # 1 : very unlikely
81 | # 2 : normal
82 | # 3 : very likely
83 | Latin1ClassModel = (
84 | # UDF OTH ASC ASS ACV ACO ASV ASO
85 | 0, 0, 0, 0, 0, 0, 0, 0, # UDF
86 | 0, 3, 3, 3, 3, 3, 3, 3, # OTH
87 | 0, 3, 3, 3, 3, 3, 3, 3, # ASC
88 | 0, 3, 3, 3, 1, 1, 3, 3, # ASS
89 | 0, 3, 3, 3, 1, 2, 1, 2, # ACV
90 | 0, 3, 3, 3, 3, 3, 3, 3, # ACO
91 | 0, 3, 1, 3, 1, 1, 1, 3, # ASV
92 | 0, 3, 1, 3, 1, 1, 3, 3, # ASO
93 | )
94 |
95 |
96 | class Latin1Prober(CharSetProber):
97 | def __init__(self):
98 | super(Latin1Prober, self).__init__()
99 | self._last_char_class = None
100 | self._freq_counter = None
101 | self.reset()
102 |
103 | def reset(self):
104 | self._last_char_class = OTH
105 | self._freq_counter = [0] * FREQ_CAT_NUM
106 | CharSetProber.reset(self)
107 |
108 | @property
109 | def charset_name(self):
110 | return "ISO-8859-1"
111 |
112 | @property
113 | def language(self):
114 | return ""
115 |
116 | def feed(self, byte_str):
117 | byte_str = self.filter_with_english_letters(byte_str)
118 | for c in byte_str:
119 | char_class = Latin1_CharToClass[c]
120 | freq = Latin1ClassModel[(self._last_char_class * CLASS_NUM)
121 | + char_class]
122 | if freq == 0:
123 | self._state = ProbingState.NOT_ME
124 | break
125 | self._freq_counter[freq] += 1
126 | self._last_char_class = char_class
127 |
128 | return self.state
129 |
130 | def get_confidence(self):
131 | if self.state == ProbingState.NOT_ME:
132 | return 0.01
133 |
134 | total = sum(self._freq_counter)
135 | if total < 0.01:
136 | confidence = 0.0
137 | else:
138 | confidence = ((self._freq_counter[3] - self._freq_counter[1] * 20.0)
139 | / total)
140 | if confidence < 0.0:
141 | confidence = 0.0
142 | # lower the confidence of latin1 so that other more accurate
143 | # detector can take priority.
144 | confidence = confidence * 0.73
145 | return confidence
146 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/chardet/sbcharsetprober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is Mozilla Universal charset detector code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 2001
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | # Shy Shalom - original C code
12 | #
13 | # This library is free software; you can redistribute it and/or
14 | # modify it under the terms of the GNU Lesser General Public
15 | # License as published by the Free Software Foundation; either
16 | # version 2.1 of the License, or (at your option) any later version.
17 | #
18 | # This library is distributed in the hope that it will be useful,
19 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
20 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 | # Lesser General Public License for more details.
22 | #
23 | # You should have received a copy of the GNU Lesser General Public
24 | # License along with this library; if not, write to the Free Software
25 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
26 | # 02110-1301 USA
27 | ######################### END LICENSE BLOCK #########################
28 |
29 | from .charsetprober import CharSetProber
30 | from .enums import CharacterCategory, ProbingState, SequenceLikelihood
31 |
32 |
33 | class SingleByteCharSetProber(CharSetProber):
34 | SAMPLE_SIZE = 64
35 | SB_ENOUGH_REL_THRESHOLD = 1024 # 0.25 * SAMPLE_SIZE^2
36 | POSITIVE_SHORTCUT_THRESHOLD = 0.95
37 | NEGATIVE_SHORTCUT_THRESHOLD = 0.05
38 |
39 | def __init__(self, model, reversed=False, name_prober=None):
40 | super(SingleByteCharSetProber, self).__init__()
41 | self._model = model
42 | # TRUE if we need to reverse every pair in the model lookup
43 | self._reversed = reversed
44 | # Optional auxiliary prober for name decision
45 | self._name_prober = name_prober
46 | self._last_order = None
47 | self._seq_counters = None
48 | self._total_seqs = None
49 | self._total_char = None
50 | self._freq_char = None
51 | self.reset()
52 |
53 | def reset(self):
54 | super(SingleByteCharSetProber, self).reset()
55 | # char order of last character
56 | self._last_order = 255
57 | self._seq_counters = [0] * SequenceLikelihood.get_num_categories()
58 | self._total_seqs = 0
59 | self._total_char = 0
60 | # characters that fall in our sampling range
61 | self._freq_char = 0
62 |
63 | @property
64 | def charset_name(self):
65 | if self._name_prober:
66 | return self._name_prober.charset_name
67 | else:
68 | return self._model['charset_name']
69 |
70 | @property
71 | def language(self):
72 | if self._name_prober:
73 | return self._name_prober.language
74 | else:
75 | return self._model.get('language')
76 |
77 | def feed(self, byte_str):
78 | if not self._model['keep_english_letter']:
79 | byte_str = self.filter_international_words(byte_str)
80 | if not byte_str:
81 | return self.state
82 | char_to_order_map = self._model['char_to_order_map']
83 | for i, c in enumerate(byte_str):
84 | # XXX: Order is in range 1-64, so one would think we want 0-63 here,
85 | # but that leads to 27 more test failures than before.
86 | order = char_to_order_map[c]
87 | # XXX: This was SYMBOL_CAT_ORDER before, with a value of 250, but
88 | # CharacterCategory.SYMBOL is actually 253, so we use CONTROL
89 | # to make it closer to the original intent. The only difference
90 | # is whether or not we count digits and control characters for
91 | # _total_char purposes.
92 | if order < CharacterCategory.CONTROL:
93 | self._total_char += 1
94 | if order < self.SAMPLE_SIZE:
95 | self._freq_char += 1
96 | if self._last_order < self.SAMPLE_SIZE:
97 | self._total_seqs += 1
98 | if not self._reversed:
99 | i = (self._last_order * self.SAMPLE_SIZE) + order
100 | model = self._model['precedence_matrix'][i]
101 | else: # reverse the order of the letters in the lookup
102 | i = (order * self.SAMPLE_SIZE) + self._last_order
103 | model = self._model['precedence_matrix'][i]
104 | self._seq_counters[model] += 1
105 | self._last_order = order
106 |
107 | charset_name = self._model['charset_name']
108 | if self.state == ProbingState.DETECTING:
109 | if self._total_seqs > self.SB_ENOUGH_REL_THRESHOLD:
110 | confidence = self.get_confidence()
111 | if confidence > self.POSITIVE_SHORTCUT_THRESHOLD:
112 | self.logger.debug('%s confidence = %s, we have a winner',
113 | charset_name, confidence)
114 | self._state = ProbingState.FOUND_IT
115 | elif confidence < self.NEGATIVE_SHORTCUT_THRESHOLD:
116 | self.logger.debug('%s confidence = %s, below negative '
117 | 'shortcut threshhold %s', charset_name,
118 | confidence,
119 | self.NEGATIVE_SHORTCUT_THRESHOLD)
120 | self._state = ProbingState.NOT_ME
121 |
122 | return self.state
123 |
124 | def get_confidence(self):
125 | r = 0.01
126 | if self._total_seqs > 0:
127 | r = ((1.0 * self._seq_counters[SequenceLikelihood.POSITIVE]) /
128 | self._total_seqs / self._model['typical_positive_ratio'])
129 | r = r * self._freq_char / self._total_char
130 | if r >= 1.0:
131 | r = 0.99
132 | return r
133 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/urllib3/packages/ssl_match_hostname/_implementation.py:
--------------------------------------------------------------------------------
1 | """The match_hostname() function from Python 3.3.3, essential when using SSL."""
2 |
3 | # Note: This file is under the PSF license as the code comes from the python
4 | # stdlib. http://docs.python.org/3/license.html
5 |
6 | import re
7 | import sys
8 |
9 | # ipaddress has been backported to 2.6+ in pypi. If it is installed on the
10 | # system, use it to handle IPAddress ServerAltnames (this was added in
11 | # python-3.5) otherwise only do DNS matching. This allows
12 | # backports.ssl_match_hostname to continue to be used all the way back to
13 | # python-2.4.
14 | try:
15 | import ipaddress
16 | except ImportError:
17 | ipaddress = None
18 |
19 | __version__ = '3.5.0.1'
20 |
21 |
22 | class CertificateError(ValueError):
23 | pass
24 |
25 |
26 | def _dnsname_match(dn, hostname, max_wildcards=1):
27 | """Matching according to RFC 6125, section 6.4.3
28 |
29 | http://tools.ietf.org/html/rfc6125#section-6.4.3
30 | """
31 | pats = []
32 | if not dn:
33 | return False
34 |
35 | # Ported from python3-syntax:
36 | # leftmost, *remainder = dn.split(r'.')
37 | parts = dn.split(r'.')
38 | leftmost = parts[0]
39 | remainder = parts[1:]
40 |
41 | wildcards = leftmost.count('*')
42 | if wildcards > max_wildcards:
43 | # Issue #17980: avoid denials of service by refusing more
44 | # than one wildcard per fragment. A survey of established
45 | # policy among SSL implementations showed it to be a
46 | # reasonable choice.
47 | raise CertificateError(
48 | "too many wildcards in certificate DNS name: " + repr(dn))
49 |
50 | # speed up common case w/o wildcards
51 | if not wildcards:
52 | return dn.lower() == hostname.lower()
53 |
54 | # RFC 6125, section 6.4.3, subitem 1.
55 | # The client SHOULD NOT attempt to match a presented identifier in which
56 | # the wildcard character comprises a label other than the left-most label.
57 | if leftmost == '*':
58 | # When '*' is a fragment by itself, it matches a non-empty dotless
59 | # fragment.
60 | pats.append('[^.]+')
61 | elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
62 | # RFC 6125, section 6.4.3, subitem 3.
63 | # The client SHOULD NOT attempt to match a presented identifier
64 | # where the wildcard character is embedded within an A-label or
65 | # U-label of an internationalized domain name.
66 | pats.append(re.escape(leftmost))
67 | else:
68 | # Otherwise, '*' matches any dotless string, e.g. www*
69 | pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
70 |
71 | # add the remaining fragments, ignore any wildcards
72 | for frag in remainder:
73 | pats.append(re.escape(frag))
74 |
75 | pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
76 | return pat.match(hostname)
77 |
78 |
79 | def _to_unicode(obj):
80 | if isinstance(obj, str) and sys.version_info < (3,):
81 | obj = unicode(obj, encoding='ascii', errors='strict')
82 | return obj
83 |
84 | def _ipaddress_match(ipname, host_ip):
85 | """Exact matching of IP addresses.
86 |
87 | RFC 6125 explicitly doesn't define an algorithm for this
88 | (section 1.7.2 - "Out of Scope").
89 | """
90 | # OpenSSL may add a trailing newline to a subjectAltName's IP address
91 | # Divergence from upstream: ipaddress can't handle byte str
92 | ip = ipaddress.ip_address(_to_unicode(ipname).rstrip())
93 | return ip == host_ip
94 |
95 |
96 | def match_hostname(cert, hostname):
97 | """Verify that *cert* (in decoded format as returned by
98 | SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
99 | rules are followed, but IP addresses are not accepted for *hostname*.
100 |
101 | CertificateError is raised on failure. On success, the function
102 | returns nothing.
103 | """
104 | if not cert:
105 | raise ValueError("empty or no certificate, match_hostname needs a "
106 | "SSL socket or SSL context with either "
107 | "CERT_OPTIONAL or CERT_REQUIRED")
108 | try:
109 | # Divergence from upstream: ipaddress can't handle byte str
110 | host_ip = ipaddress.ip_address(_to_unicode(hostname))
111 | except ValueError:
112 | # Not an IP address (common case)
113 | host_ip = None
114 | except UnicodeError:
115 | # Divergence from upstream: Have to deal with ipaddress not taking
116 | # byte strings. addresses should be all ascii, so we consider it not
117 | # an ipaddress in this case
118 | host_ip = None
119 | except AttributeError:
120 | # Divergence from upstream: Make ipaddress library optional
121 | if ipaddress is None:
122 | host_ip = None
123 | else:
124 | raise
125 | dnsnames = []
126 | san = cert.get('subjectAltName', ())
127 | for key, value in san:
128 | if key == 'DNS':
129 | if host_ip is None and _dnsname_match(value, hostname):
130 | return
131 | dnsnames.append(value)
132 | elif key == 'IP Address':
133 | if host_ip is not None and _ipaddress_match(value, host_ip):
134 | return
135 | dnsnames.append(value)
136 | if not dnsnames:
137 | # The subject is only checked when there is no dNSName entry
138 | # in subjectAltName
139 | for sub in cert.get('subject', ()):
140 | for key, value in sub:
141 | # XXX according to RFC 2818, the most specific Common Name
142 | # must be used.
143 | if key == 'commonName':
144 | if _dnsname_match(value, hostname):
145 | return
146 | dnsnames.append(value)
147 | if len(dnsnames) > 1:
148 | raise CertificateError("hostname %r "
149 | "doesn't match either of %s"
150 | % (hostname, ', '.join(map(repr, dnsnames))))
151 | elif len(dnsnames) == 1:
152 | raise CertificateError("hostname %r "
153 | "doesn't match %r"
154 | % (hostname, dnsnames[0]))
155 | else:
156 | raise CertificateError("no appropriate commonName or "
157 | "subjectAltName fields were found")
158 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/urllib3/request.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | from .filepost import encode_multipart_formdata
4 | from .packages.six.moves.urllib.parse import urlencode
5 |
6 |
7 | __all__ = ['RequestMethods']
8 |
9 |
10 | class RequestMethods(object):
11 | """
12 | Convenience mixin for classes who implement a :meth:`urlopen` method, such
13 | as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
14 | :class:`~urllib3.poolmanager.PoolManager`.
15 |
16 | Provides behavior for making common types of HTTP request methods and
17 | decides which type of request field encoding to use.
18 |
19 | Specifically,
20 |
21 | :meth:`.request_encode_url` is for sending requests whose fields are
22 | encoded in the URL (such as GET, HEAD, DELETE).
23 |
24 | :meth:`.request_encode_body` is for sending requests whose fields are
25 | encoded in the *body* of the request using multipart or www-form-urlencoded
26 | (such as for POST, PUT, PATCH).
27 |
28 | :meth:`.request` is for making any kind of request, it will look up the
29 | appropriate encoding format and use one of the above two methods to make
30 | the request.
31 |
32 | Initializer parameters:
33 |
34 | :param headers:
35 | Headers to include with all requests, unless other headers are given
36 | explicitly.
37 | """
38 |
39 | _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
40 |
41 | def __init__(self, headers=None):
42 | self.headers = headers or {}
43 |
44 | def urlopen(self, method, url, body=None, headers=None,
45 | encode_multipart=True, multipart_boundary=None,
46 | **kw): # Abstract
47 | raise NotImplemented("Classes extending RequestMethods must implement "
48 | "their own ``urlopen`` method.")
49 |
50 | def request(self, method, url, fields=None, headers=None, **urlopen_kw):
51 | """
52 | Make a request using :meth:`urlopen` with the appropriate encoding of
53 | ``fields`` based on the ``method`` used.
54 |
55 | This is a convenience method that requires the least amount of manual
56 | effort. It can be used in most situations, while still having the
57 | option to drop down to more specific methods when necessary, such as
58 | :meth:`request_encode_url`, :meth:`request_encode_body`,
59 | or even the lowest level :meth:`urlopen`.
60 | """
61 | method = method.upper()
62 |
63 | if method in self._encode_url_methods:
64 | return self.request_encode_url(method, url, fields=fields,
65 | headers=headers,
66 | **urlopen_kw)
67 | else:
68 | return self.request_encode_body(method, url, fields=fields,
69 | headers=headers,
70 | **urlopen_kw)
71 |
72 | def request_encode_url(self, method, url, fields=None, headers=None,
73 | **urlopen_kw):
74 | """
75 | Make a request using :meth:`urlopen` with the ``fields`` encoded in
76 | the url. This is useful for request methods like GET, HEAD, DELETE, etc.
77 | """
78 | if headers is None:
79 | headers = self.headers
80 |
81 | extra_kw = {'headers': headers}
82 | extra_kw.update(urlopen_kw)
83 |
84 | if fields:
85 | url += '?' + urlencode(fields)
86 |
87 | return self.urlopen(method, url, **extra_kw)
88 |
89 | def request_encode_body(self, method, url, fields=None, headers=None,
90 | encode_multipart=True, multipart_boundary=None,
91 | **urlopen_kw):
92 | """
93 | Make a request using :meth:`urlopen` with the ``fields`` encoded in
94 | the body. This is useful for request methods like POST, PUT, PATCH, etc.
95 |
96 | When ``encode_multipart=True`` (default), then
97 | :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
98 | the payload with the appropriate content type. Otherwise
99 | :meth:`urllib.urlencode` is used with the
100 | 'application/x-www-form-urlencoded' content type.
101 |
102 | Multipart encoding must be used when posting files, and it's reasonably
103 | safe to use it in other times too. However, it may break request
104 | signing, such as with OAuth.
105 |
106 | Supports an optional ``fields`` parameter of key/value strings AND
107 | key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
108 | the MIME type is optional. For example::
109 |
110 | fields = {
111 | 'foo': 'bar',
112 | 'fakefile': ('foofile.txt', 'contents of foofile'),
113 | 'realfile': ('barfile.txt', open('realfile').read()),
114 | 'typedfile': ('bazfile.bin', open('bazfile').read(),
115 | 'image/jpeg'),
116 | 'nonamefile': 'contents of nonamefile field',
117 | }
118 |
119 | When uploading a file, providing a filename (the first parameter of the
120 | tuple) is optional but recommended to best mimick behavior of browsers.
121 |
122 | Note that if ``headers`` are supplied, the 'Content-Type' header will
123 | be overwritten because it depends on the dynamic random boundary string
124 | which is used to compose the body of the request. The random boundary
125 | string can be explicitly set with the ``multipart_boundary`` parameter.
126 | """
127 | if headers is None:
128 | headers = self.headers
129 |
130 | extra_kw = {'headers': {}}
131 |
132 | if fields:
133 | if 'body' in urlopen_kw:
134 | raise TypeError(
135 | "request got values for both 'fields' and 'body', can only specify one.")
136 |
137 | if encode_multipart:
138 | body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)
139 | else:
140 | body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'
141 |
142 | extra_kw['body'] = body
143 | extra_kw['headers'] = {'Content-Type': content_type}
144 |
145 | extra_kw['headers'].update(headers)
146 | extra_kw.update(urlopen_kw)
147 |
148 | return self.urlopen(method, url, **extra_kw)
149 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/urllib3/fields.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | import email.utils
3 | import mimetypes
4 |
5 | from .packages import six
6 |
7 |
8 | def guess_content_type(filename, default='application/octet-stream'):
9 | """
10 | Guess the "Content-Type" of a file.
11 |
12 | :param filename:
13 | The filename to guess the "Content-Type" of using :mod:`mimetypes`.
14 | :param default:
15 | If no "Content-Type" can be guessed, default to `default`.
16 | """
17 | if filename:
18 | return mimetypes.guess_type(filename)[0] or default
19 | return default
20 |
21 |
22 | def format_header_param(name, value):
23 | """
24 | Helper function to format and quote a single header parameter.
25 |
26 | Particularly useful for header parameters which might contain
27 | non-ASCII values, like file names. This follows RFC 2231, as
28 | suggested by RFC 2388 Section 4.4.
29 |
30 | :param name:
31 | The name of the parameter, a string expected to be ASCII only.
32 | :param value:
33 | The value of the parameter, provided as a unicode string.
34 | """
35 | if not any(ch in value for ch in '"\\\r\n'):
36 | result = '%s="%s"' % (name, value)
37 | try:
38 | result.encode('ascii')
39 | except (UnicodeEncodeError, UnicodeDecodeError):
40 | pass
41 | else:
42 | return result
43 | if not six.PY3 and isinstance(value, six.text_type): # Python 2:
44 | value = value.encode('utf-8')
45 | value = email.utils.encode_rfc2231(value, 'utf-8')
46 | value = '%s*=%s' % (name, value)
47 | return value
48 |
49 |
50 | class RequestField(object):
51 | """
52 | A data container for request body parameters.
53 |
54 | :param name:
55 | The name of this request field.
56 | :param data:
57 | The data/value body.
58 | :param filename:
59 | An optional filename of the request field.
60 | :param headers:
61 | An optional dict-like object of headers to initially use for the field.
62 | """
63 | def __init__(self, name, data, filename=None, headers=None):
64 | self._name = name
65 | self._filename = filename
66 | self.data = data
67 | self.headers = {}
68 | if headers:
69 | self.headers = dict(headers)
70 |
71 | @classmethod
72 | def from_tuples(cls, fieldname, value):
73 | """
74 | A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
75 |
76 | Supports constructing :class:`~urllib3.fields.RequestField` from
77 | parameter of key/value strings AND key/filetuple. A filetuple is a
78 | (filename, data, MIME type) tuple where the MIME type is optional.
79 | For example::
80 |
81 | 'foo': 'bar',
82 | 'fakefile': ('foofile.txt', 'contents of foofile'),
83 | 'realfile': ('barfile.txt', open('realfile').read()),
84 | 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
85 | 'nonamefile': 'contents of nonamefile field',
86 |
87 | Field names and filenames must be unicode.
88 | """
89 | if isinstance(value, tuple):
90 | if len(value) == 3:
91 | filename, data, content_type = value
92 | else:
93 | filename, data = value
94 | content_type = guess_content_type(filename)
95 | else:
96 | filename = None
97 | content_type = None
98 | data = value
99 |
100 | request_param = cls(fieldname, data, filename=filename)
101 | request_param.make_multipart(content_type=content_type)
102 |
103 | return request_param
104 |
105 | def _render_part(self, name, value):
106 | """
107 | Overridable helper function to format a single header parameter.
108 |
109 | :param name:
110 | The name of the parameter, a string expected to be ASCII only.
111 | :param value:
112 | The value of the parameter, provided as a unicode string.
113 | """
114 | return format_header_param(name, value)
115 |
116 | def _render_parts(self, header_parts):
117 | """
118 | Helper function to format and quote a single header.
119 |
120 | Useful for single headers that are composed of multiple items. E.g.,
121 | 'Content-Disposition' fields.
122 |
123 | :param header_parts:
124 | A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
125 | as `k1="v1"; k2="v2"; ...`.
126 | """
127 | parts = []
128 | iterable = header_parts
129 | if isinstance(header_parts, dict):
130 | iterable = header_parts.items()
131 |
132 | for name, value in iterable:
133 | if value is not None:
134 | parts.append(self._render_part(name, value))
135 |
136 | return '; '.join(parts)
137 |
138 | def render_headers(self):
139 | """
140 | Renders the headers for this request field.
141 | """
142 | lines = []
143 |
144 | sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
145 | for sort_key in sort_keys:
146 | if self.headers.get(sort_key, False):
147 | lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
148 |
149 | for header_name, header_value in self.headers.items():
150 | if header_name not in sort_keys:
151 | if header_value:
152 | lines.append('%s: %s' % (header_name, header_value))
153 |
154 | lines.append('\r\n')
155 | return '\r\n'.join(lines)
156 |
157 | def make_multipart(self, content_disposition=None, content_type=None,
158 | content_location=None):
159 | """
160 | Makes this request field into a multipart request field.
161 |
162 | This method overrides "Content-Disposition", "Content-Type" and
163 | "Content-Location" headers to the request parameter.
164 |
165 | :param content_type:
166 | The 'Content-Type' of the request body.
167 | :param content_location:
168 | The 'Content-Location' of the request body.
169 |
170 | """
171 | self.headers['Content-Disposition'] = content_disposition or 'form-data'
172 | self.headers['Content-Disposition'] += '; '.join([
173 | '', self._render_parts(
174 | (('name', self._name), ('filename', self._filename))
175 | )
176 | ])
177 | self.headers['Content-Type'] = content_type
178 | self.headers['Content-Location'] = content_location
179 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/urllib3/contrib/socks.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | This module contains provisional support for SOCKS proxies from within
4 | urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and
5 | SOCKS5. To enable its functionality, either install PySocks or install this
6 | module with the ``socks`` extra.
7 |
8 | The SOCKS implementation supports the full range of urllib3 features. It also
9 | supports the following SOCKS features:
10 |
11 | - SOCKS4
12 | - SOCKS4a
13 | - SOCKS5
14 | - Usernames and passwords for the SOCKS proxy
15 |
16 | Known Limitations:
17 |
18 | - Currently PySocks does not support contacting remote websites via literal
19 | IPv6 addresses. Any such connection attempt will fail. You must use a domain
20 | name.
21 | - Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any
22 | such connection attempt will fail.
23 | """
24 | from __future__ import absolute_import
25 |
26 | try:
27 | import socks
28 | except ImportError:
29 | import warnings
30 | from ..exceptions import DependencyWarning
31 |
32 | warnings.warn((
33 | 'SOCKS support in urllib3 requires the installation of optional '
34 | 'dependencies: specifically, PySocks. For more information, see '
35 | 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies'
36 | ),
37 | DependencyWarning
38 | )
39 | raise
40 |
41 | from socket import error as SocketError, timeout as SocketTimeout
42 |
43 | from ..connection import (
44 | HTTPConnection, HTTPSConnection
45 | )
46 | from ..connectionpool import (
47 | HTTPConnectionPool, HTTPSConnectionPool
48 | )
49 | from ..exceptions import ConnectTimeoutError, NewConnectionError
50 | from ..poolmanager import PoolManager
51 | from ..util.url import parse_url
52 |
53 | try:
54 | import ssl
55 | except ImportError:
56 | ssl = None
57 |
58 |
59 | class SOCKSConnection(HTTPConnection):
60 | """
61 | A plain-text HTTP connection that connects via a SOCKS proxy.
62 | """
63 | def __init__(self, *args, **kwargs):
64 | self._socks_options = kwargs.pop('_socks_options')
65 | super(SOCKSConnection, self).__init__(*args, **kwargs)
66 |
67 | def _new_conn(self):
68 | """
69 | Establish a new connection via the SOCKS proxy.
70 | """
71 | extra_kw = {}
72 | if self.source_address:
73 | extra_kw['source_address'] = self.source_address
74 |
75 | if self.socket_options:
76 | extra_kw['socket_options'] = self.socket_options
77 |
78 | try:
79 | conn = socks.create_connection(
80 | (self.host, self.port),
81 | proxy_type=self._socks_options['socks_version'],
82 | proxy_addr=self._socks_options['proxy_host'],
83 | proxy_port=self._socks_options['proxy_port'],
84 | proxy_username=self._socks_options['username'],
85 | proxy_password=self._socks_options['password'],
86 | proxy_rdns=self._socks_options['rdns'],
87 | timeout=self.timeout,
88 | **extra_kw
89 | )
90 |
91 | except SocketTimeout as e:
92 | raise ConnectTimeoutError(
93 | self, "Connection to %s timed out. (connect timeout=%s)" %
94 | (self.host, self.timeout))
95 |
96 | except socks.ProxyError as e:
97 | # This is fragile as hell, but it seems to be the only way to raise
98 | # useful errors here.
99 | if e.socket_err:
100 | error = e.socket_err
101 | if isinstance(error, SocketTimeout):
102 | raise ConnectTimeoutError(
103 | self,
104 | "Connection to %s timed out. (connect timeout=%s)" %
105 | (self.host, self.timeout)
106 | )
107 | else:
108 | raise NewConnectionError(
109 | self,
110 | "Failed to establish a new connection: %s" % error
111 | )
112 | else:
113 | raise NewConnectionError(
114 | self,
115 | "Failed to establish a new connection: %s" % e
116 | )
117 |
118 | except SocketError as e: # Defensive: PySocks should catch all these.
119 | raise NewConnectionError(
120 | self, "Failed to establish a new connection: %s" % e)
121 |
122 | return conn
123 |
124 |
125 | # We don't need to duplicate the Verified/Unverified distinction from
126 | # urllib3/connection.py here because the HTTPSConnection will already have been
127 | # correctly set to either the Verified or Unverified form by that module. This
128 | # means the SOCKSHTTPSConnection will automatically be the correct type.
129 | class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
130 | pass
131 |
132 |
133 | class SOCKSHTTPConnectionPool(HTTPConnectionPool):
134 | ConnectionCls = SOCKSConnection
135 |
136 |
137 | class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
138 | ConnectionCls = SOCKSHTTPSConnection
139 |
140 |
141 | class SOCKSProxyManager(PoolManager):
142 | """
143 | A version of the urllib3 ProxyManager that routes connections via the
144 | defined SOCKS proxy.
145 | """
146 | pool_classes_by_scheme = {
147 | 'http': SOCKSHTTPConnectionPool,
148 | 'https': SOCKSHTTPSConnectionPool,
149 | }
150 |
151 | def __init__(self, proxy_url, username=None, password=None,
152 | num_pools=10, headers=None, **connection_pool_kw):
153 | parsed = parse_url(proxy_url)
154 |
155 | if parsed.scheme == 'socks5':
156 | socks_version = socks.PROXY_TYPE_SOCKS5
157 | rdns = False
158 | elif parsed.scheme == 'socks5h':
159 | socks_version = socks.PROXY_TYPE_SOCKS5
160 | rdns = True
161 | elif parsed.scheme == 'socks4':
162 | socks_version = socks.PROXY_TYPE_SOCKS4
163 | rdns = False
164 | elif parsed.scheme == 'socks4a':
165 | socks_version = socks.PROXY_TYPE_SOCKS4
166 | rdns = True
167 | else:
168 | raise ValueError(
169 | "Unable to determine SOCKS version from %s" % proxy_url
170 | )
171 |
172 | self.proxy_url = proxy_url
173 |
174 | socks_options = {
175 | 'socks_version': socks_version,
176 | 'proxy_host': parsed.host,
177 | 'proxy_port': parsed.port,
178 | 'username': username,
179 | 'password': password,
180 | 'rdns': rdns
181 | }
182 | connection_pool_kw['_socks_options'] = socks_options
183 |
184 | super(SOCKSProxyManager, self).__init__(
185 | num_pools, headers, **connection_pool_kw
186 | )
187 |
188 | self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
189 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/requests/api.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | requests.api
5 | ~~~~~~~~~~~~
6 |
7 | This module implements the Requests API.
8 |
9 | :copyright: (c) 2012 by Kenneth Reitz.
10 | :license: Apache2, see LICENSE for more details.
11 | """
12 |
13 | from . import sessions
14 |
15 |
16 | def request(method, url, **kwargs):
17 | """Constructs and sends a :class:`Request `.
18 |
19 | :param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``.
20 | :param url: URL for the new :class:`Request` object.
21 | :param params: (optional) Dictionary, list of tuples or bytes to send
22 | in the query string for the :class:`Request`.
23 | :param data: (optional) Dictionary, list of tuples, bytes, or file-like
24 | object to send in the body of the :class:`Request`.
25 | :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
26 | :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
27 | :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
28 | :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
29 | ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
30 | or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
31 | defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
32 | to add for the file.
33 | :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
34 | :param timeout: (optional) How many seconds to wait for the server to send data
35 | before giving up, as a float, or a :ref:`(connect timeout, read
36 | timeout) ` tuple.
37 | :type timeout: float or tuple
38 | :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``.
39 | :type allow_redirects: bool
40 | :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
41 | :param verify: (optional) Either a boolean, in which case it controls whether we verify
42 | the server's TLS certificate, or a string, in which case it must be a path
43 | to a CA bundle to use. Defaults to ``True``.
44 | :param stream: (optional) if ``False``, the response content will be immediately downloaded.
45 | :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
46 | :return: :class:`Response ` object
47 | :rtype: requests.Response
48 |
49 | Usage::
50 |
51 | >>> import requests
52 | >>> req = requests.request('GET', 'https://httpbin.org/get')
53 | >>> req
54 |
55 | """
56 |
57 | # By using the 'with' statement we are sure the session is closed, thus we
58 | # avoid leaving sockets open which can trigger a ResourceWarning in some
59 | # cases, and look like a memory leak in others.
60 | with sessions.Session() as session:
61 | return session.request(method=method, url=url, **kwargs)
62 |
63 |
64 | def get(url, params=None, **kwargs):
65 | r"""Sends a GET request.
66 |
67 | :param url: URL for the new :class:`Request` object.
68 | :param params: (optional) Dictionary, list of tuples or bytes to send
69 | in the query string for the :class:`Request`.
70 | :param \*\*kwargs: Optional arguments that ``request`` takes.
71 | :return: :class:`Response ` object
72 | :rtype: requests.Response
73 | """
74 |
75 | return request('get', url, params=params, **kwargs)
76 |
77 |
78 | def options(url, **kwargs):
79 | r"""Sends an OPTIONS request.
80 |
81 | :param url: URL for the new :class:`Request` object.
82 | :param \*\*kwargs: Optional arguments that ``request`` takes.
83 | :return: :class:`Response ` object
84 | :rtype: requests.Response
85 | """
86 |
87 | return request('options', url, **kwargs)
88 |
89 |
90 | def head(url, **kwargs):
91 | r"""Sends a HEAD request.
92 |
93 | :param url: URL for the new :class:`Request` object.
94 | :param \*\*kwargs: Optional arguments that ``request`` takes. If
95 | `allow_redirects` is not provided, it will be set to `False` (as
96 | opposed to the default :meth:`request` behavior).
97 | :return: :class:`Response ` object
98 | :rtype: requests.Response
99 | """
100 |
101 | kwargs.setdefault('allow_redirects', False)
102 | return request('head', url, **kwargs)
103 |
104 |
105 | def post(url, data=None, json=None, **kwargs):
106 | r"""Sends a POST request.
107 |
108 | :param url: URL for the new :class:`Request` object.
109 | :param data: (optional) Dictionary, list of tuples, bytes, or file-like
110 | object to send in the body of the :class:`Request`.
111 | :param json: (optional) json data to send in the body of the :class:`Request`.
112 | :param \*\*kwargs: Optional arguments that ``request`` takes.
113 | :return: :class:`Response ` object
114 | :rtype: requests.Response
115 | """
116 |
117 | return request('post', url, data=data, json=json, **kwargs)
118 |
119 |
120 | def put(url, data=None, **kwargs):
121 | r"""Sends a PUT request.
122 |
123 | :param url: URL for the new :class:`Request` object.
124 | :param data: (optional) Dictionary, list of tuples, bytes, or file-like
125 | object to send in the body of the :class:`Request`.
126 | :param json: (optional) json data to send in the body of the :class:`Request`.
127 | :param \*\*kwargs: Optional arguments that ``request`` takes.
128 | :return: :class:`Response ` object
129 | :rtype: requests.Response
130 | """
131 |
132 | return request('put', url, data=data, **kwargs)
133 |
134 |
135 | def patch(url, data=None, **kwargs):
136 | r"""Sends a PATCH request.
137 |
138 | :param url: URL for the new :class:`Request` object.
139 | :param data: (optional) Dictionary, list of tuples, bytes, or file-like
140 | object to send in the body of the :class:`Request`.
141 | :param json: (optional) json data to send in the body of the :class:`Request`.
142 | :param \*\*kwargs: Optional arguments that ``request`` takes.
143 | :return: :class:`Response ` object
144 | :rtype: requests.Response
145 | """
146 |
147 | return request('patch', url, data=data, **kwargs)
148 |
149 |
150 | def delete(url, **kwargs):
151 | r"""Sends a DELETE request.
152 |
153 | :param url: URL for the new :class:`Request` object.
154 | :param \*\*kwargs: Optional arguments that ``request`` takes.
155 | :return: :class:`Response ` object
156 | :rtype: requests.Response
157 | """
158 |
159 | return request('delete', url, **kwargs)
160 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # SportScanner
2 |
3 | [](https://gitter.im/mmmmmtasty/SportScanner?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
4 |
5 | # Status Update
6 |
7 | I am intending to make some improvements to this scanner and metadata agent. Please feel free to raise issues with requests. Support may still be patchy ;)
8 |
9 | Whish list:
10 | - Shell script for fast unraid testing
11 | - Consider writing tests to enable more refactoring/expansion without breaking existing functionality
12 | - Consider new thesportsdb.com APIs to see if they add value
13 | - Consider support for multi-part events, double headers and cup competitions
14 | - Investigate alternatives or improvements to season setup
15 |
16 | -------------
17 |
18 | Scanner and Metadata Agent for Plex that uses www.thesportsdb.com
19 |
20 | # Installation
21 |
22 | Plex main folder location:
23 |
24 | * '%LOCALAPPDATA%\Plex Media Server\' # Windows Vista/7/8
25 | * '%USERPROFILE%\Local Settings\Application Data\Plex Media Server\' # Windows XP, 2003, Home Server
26 | * '$HOME/Library/Application Support/Plex Media Server/' # Mac OS
27 | * '$PLEX_HOME/Library/Application Support/Plex Media Server/', # Linux
28 | * '/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/', # Debian,Fedora,CentOS,Ubuntu
29 | * '/usr/local/plexdata/Plex Media Server/', # FreeBSD
30 | * '/usr/pbi/plexmediaserver-amd64/plexdata/Plex Media Server/', # FreeNAS
31 | * '${JAIL_ROOT}/var/db/plexdata/Plex Media Server/', # FreeNAS
32 | * '/c/.plex/Library/Application Support/Plex Media Server/', # ReadyNAS
33 | * '/share/MD0_DATA/.qpkg/PlexMediaServer/Library/Plex Media Server/', # QNAP
34 | * '/volume1/Plex/Library/Application Support/Plex Media Server/', # Synology, Asustor
35 | * '/raid0/data/module/Plex/sys/Plex Media Server/', # Thecus
36 | * '/raid0/data/PLEX_CONFIG/Plex Media Server/' # Thecus Plex community
37 |
38 | - Download the latest release from https://github.com/mmmmmtasty/SportScanner/releases
39 | - Extract files
40 | - Copy the extracted directory "Scanners" into your Plex main folder location - check the list above for more clues
41 | - Copy the extracted directory "SportScanner.bundle" into the Plug-ins directory in your main folder location - check the list above for more clues
42 | - You may need to restart Plex
43 | - Create a new library and under Advanced options you should be able to select "SportScanner" as both your scanner and metadata agent.
44 |
45 | # Media Format
46 |
47 | The SportScanner scanner requires one of two folder structures to work correctly, the first of which matches Plex's standard folder structure.
48 |
49 | ## RECOMMENDED METHOD
50 |
51 | Follow the Plex standards for folder structure - TV Show\Season\. For SportScanner, TV Shows = League Name. For example for 2015/2016 NHL you would do something like the following:
52 |
53 | - ~LibraryRoot/NHL/Season 1516/NHL.2015.09.25.New-York-Islanders.vs.Philadelphia-Flyers.720p.HDTV.60fps.x264-Reborn4HD_h.mp4
54 |
55 | In this scenario you still need all the information in the file name, I aim to remove that requirement down the line. The only information that comes only from the folder structure is the season.
56 |
57 | ## Alternative naming standard
58 |
59 | You can also choose to ignore the season directory and have the scanner work it out with a folder structure like so:
60 |
61 | - ~LibraryRoot/Ice Hockey/NHL/NHL.2015.09.25.New-York-Islanders.vs.Philadelphia-Flyers.720p.HDTV.60fps.x264-Reborn4HD_h.mp4
62 |
63 | THERE IS A DOWN SIDE TO THIS! For this to work you must include a file in each league directory called "SportScanner.txt" that contains information about how the seasons work for this sport. The first line in the file will always be "XXXX" or "XXYY". "XXXX" means that the seasons happens within one calendar year and will therefore be named "2015" of "1999" for example. "XXYY" means that a season occurs across two seasons and will take the format "1516" or "9899" for example. When you define the season as "XXYY" you MUST then on the next line write the integer values of a month and a day in the form "month,day". This should be a a month and a day somewhere in the off-season for that sport. This tells the scanner when one season has finished and the next one is beginning to ensure that it puts files in the correct season based off the date the event happened. As an example, if you are trying to add NHL you would create a file at the following path:
64 |
65 | - ~LibraryRoot/Ice Hockey/NHL/SportScanner.txt
66 |
67 | In this instance the contents of this file would be as follows, saying that seasons should be in "XXYY" format and a date in the middle of the off-season is 1st July:
68 |
69 | XXYY
70 | 7,1
71 |
72 | ## NOT RECOMMENDED (but works for now)
73 |
74 | SportScanner does not actually pay attention to the name of the League directory when it comes to matching events - all info has to be in the filename. This means that you can still group all sports together and as long as they share a season format you can create a SportScanner.txt file as outlined above and everything will work.
75 |
76 | This is rubbish, it kind of accidentally works, I don't recommend it as I will cut it out as part of improvement works in future.
77 |
78 | # Known Issues
79 | - No posters for seasons
80 | - Can only handle individual files, not multipart or those in folders
81 | - All information must be in the filename regardless of the directory structure.
82 |
83 | # Additional Metadata
84 |
85 | The presence of a .SportScanner metadata file can be used to append additional text to the title of the event as well as override a portion of the episode number.
86 | Normally the episode number is of the form `YYMMDDHHHH` where YY is the year, MM is the month, DD is the day, and HHHH is based on a hash. If the first line of the `.SportScanner` file is a number it will be used in place of the hash.
87 | The second line of the `.SportScanner` file will be appended to the title of the event.
88 |
89 | - ~LibraryRoot/Formula 1/Season 2019/Formula 1 2019-06-30 Austrian Grand Prix - 03 Post-Race Analysis.mp4
90 | - ~LibraryRoot/Formula 1/Season 2019/Formula 1 2019-06-30 Austrian Grand Prix - 03 Post-Race Analysis.SportScanner
91 |
92 | In the above example, the `Formula 1 2019-06-30 Austrian Grand Prix - 03 Post-Race Analysis.SportScanner` file contains the following text:
93 |
94 | ```
95 | 3
96 | (Post-Race Analysis)
97 | ```
98 |
99 | The resulting episode number is `1906300003` and the resulting title is `Austrian Grand Prix (Post-Race Analysis)`
100 |
101 | # API Key
102 |
103 | if you have your own API key for thesportsdb.com and want to use it, create a file in the SportScanner data directory. On Linux, this directory is
104 | ```
105 | /var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Data/com.plexapp.agents.sportscanner
106 | ```
107 | Create a plain text file named 'SportScanner.ini' in that directory (case sensitive if your OS is) and enter
108 | ```
109 | [thesportsdb.com]
110 | apikey=
111 | ```
112 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/urllib3/util/url.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from collections import namedtuple
3 |
4 | from ..exceptions import LocationParseError
5 |
6 |
7 | url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
8 |
9 | # We only want to normalize urls with an HTTP(S) scheme.
10 | # urllib3 infers URLs without a scheme (None) to be http.
11 | NORMALIZABLE_SCHEMES = ('http', 'https', None)
12 |
13 |
14 | class Url(namedtuple('Url', url_attrs)):
15 | """
16 | Datastructure for representing an HTTP URL. Used as a return value for
17 | :func:`parse_url`. Both the scheme and host are normalized as they are
18 | both case-insensitive according to RFC 3986.
19 | """
20 | __slots__ = ()
21 |
22 | def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
23 | query=None, fragment=None):
24 | if path and not path.startswith('/'):
25 | path = '/' + path
26 | if scheme:
27 | scheme = scheme.lower()
28 | if host and scheme in NORMALIZABLE_SCHEMES:
29 | host = host.lower()
30 | return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
31 | query, fragment)
32 |
33 | @property
34 | def hostname(self):
35 | """For backwards-compatibility with urlparse. We're nice like that."""
36 | return self.host
37 |
38 | @property
39 | def request_uri(self):
40 | """Absolute path including the query string."""
41 | uri = self.path or '/'
42 |
43 | if self.query is not None:
44 | uri += '?' + self.query
45 |
46 | return uri
47 |
48 | @property
49 | def netloc(self):
50 | """Network location including host and port"""
51 | if self.port:
52 | return '%s:%d' % (self.host, self.port)
53 | return self.host
54 |
55 | @property
56 | def url(self):
57 | """
58 | Convert self into a url
59 |
60 | This function should more or less round-trip with :func:`.parse_url`. The
61 | returned url may not be exactly the same as the url inputted to
62 | :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
63 | with a blank port will have : removed).
64 |
65 | Example: ::
66 |
67 | >>> U = parse_url('http://google.com/mail/')
68 | >>> U.url
69 | 'http://google.com/mail/'
70 | >>> Url('http', 'username:password', 'host.com', 80,
71 | ... '/path', 'query', 'fragment').url
72 | 'http://username:password@host.com:80/path?query#fragment'
73 | """
74 | scheme, auth, host, port, path, query, fragment = self
75 | url = ''
76 |
77 | # We use "is not None" we want things to happen with empty strings (or 0 port)
78 | if scheme is not None:
79 | url += scheme + '://'
80 | if auth is not None:
81 | url += auth + '@'
82 | if host is not None:
83 | url += host
84 | if port is not None:
85 | url += ':' + str(port)
86 | if path is not None:
87 | url += path
88 | if query is not None:
89 | url += '?' + query
90 | if fragment is not None:
91 | url += '#' + fragment
92 |
93 | return url
94 |
95 | def __str__(self):
96 | return self.url
97 |
98 |
99 | def split_first(s, delims):
100 | """
101 | Given a string and an iterable of delimiters, split on the first found
102 | delimiter. Return two split parts and the matched delimiter.
103 |
104 | If not found, then the first part is the full input string.
105 |
106 | Example::
107 |
108 | >>> split_first('foo/bar?baz', '?/=')
109 | ('foo', 'bar?baz', '/')
110 | >>> split_first('foo/bar?baz', '123')
111 | ('foo/bar?baz', '', None)
112 |
113 | Scales linearly with number of delims. Not ideal for large number of delims.
114 | """
115 | min_idx = None
116 | min_delim = None
117 | for d in delims:
118 | idx = s.find(d)
119 | if idx < 0:
120 | continue
121 |
122 | if min_idx is None or idx < min_idx:
123 | min_idx = idx
124 | min_delim = d
125 |
126 | if min_idx is None or min_idx < 0:
127 | return s, '', None
128 |
129 | return s[:min_idx], s[min_idx + 1:], min_delim
130 |
131 |
132 | def parse_url(url):
133 | """
134 | Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
135 | performed to parse incomplete urls. Fields not provided will be None.
136 |
137 | Partly backwards-compatible with :mod:`urlparse`.
138 |
139 | Example::
140 |
141 | >>> parse_url('http://google.com/mail/')
142 | Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
143 | >>> parse_url('google.com:80')
144 | Url(scheme=None, host='google.com', port=80, path=None, ...)
145 | >>> parse_url('/foo?bar')
146 | Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
147 | """
148 |
149 | # While this code has overlap with stdlib's urlparse, it is much
150 | # simplified for our needs and less annoying.
151 | # Additionally, this implementations does silly things to be optimal
152 | # on CPython.
153 |
154 | if not url:
155 | # Empty
156 | return Url()
157 |
158 | scheme = None
159 | auth = None
160 | host = None
161 | port = None
162 | path = None
163 | fragment = None
164 | query = None
165 |
166 | # Scheme
167 | if '://' in url:
168 | scheme, url = url.split('://', 1)
169 |
170 | # Find the earliest Authority Terminator
171 | # (http://tools.ietf.org/html/rfc3986#section-3.2)
172 | url, path_, delim = split_first(url, ['/', '?', '#'])
173 |
174 | if delim:
175 | # Reassemble the path
176 | path = delim + path_
177 |
178 | # Auth
179 | if '@' in url:
180 | # Last '@' denotes end of auth part
181 | auth, url = url.rsplit('@', 1)
182 |
183 | # IPv6
184 | if url and url[0] == '[':
185 | host, url = url.split(']', 1)
186 | host += ']'
187 |
188 | # Port
189 | if ':' in url:
190 | _host, port = url.split(':', 1)
191 |
192 | if not host:
193 | host = _host
194 |
195 | if port:
196 | # If given, ports must be integers. No whitespace, no plus or
197 | # minus prefixes, no non-integer digits such as ^2 (superscript).
198 | if not port.isdigit():
199 | raise LocationParseError(url)
200 | try:
201 | port = int(port)
202 | except ValueError:
203 | raise LocationParseError(url)
204 | else:
205 | # Blank ports are cool, too. (rfc3986#section-3.2.3)
206 | port = None
207 |
208 | elif not host and url:
209 | host = url
210 |
211 | if not path:
212 | return Url(scheme, auth, host, port, path, query, fragment)
213 |
214 | # Fragment
215 | if '#' in path:
216 | path, fragment = path.split('#', 1)
217 |
218 | # Query
219 | if '?' in path:
220 | path, query = path.split('?', 1)
221 |
222 | return Url(scheme, auth, host, port, path, query, fragment)
223 |
224 |
225 | def get_host(url):
226 | """
227 | Deprecated. Use :func:`parse_url` instead.
228 | """
229 | p = parse_url(url)
230 | return p.scheme or 'http', p.hostname, p.port
231 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/urllib3/exceptions.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from .packages.six.moves.http_client import (
3 | IncompleteRead as httplib_IncompleteRead
4 | )
5 | # Base Exceptions
6 |
7 |
8 | class HTTPError(Exception):
9 | "Base exception used by this module."
10 | pass
11 |
12 |
13 | class HTTPWarning(Warning):
14 | "Base warning used by this module."
15 | pass
16 |
17 |
18 | class PoolError(HTTPError):
19 | "Base exception for errors caused within a pool."
20 | def __init__(self, pool, message):
21 | self.pool = pool
22 | HTTPError.__init__(self, "%s: %s" % (pool, message))
23 |
24 | def __reduce__(self):
25 | # For pickling purposes.
26 | return self.__class__, (None, None)
27 |
28 |
29 | class RequestError(PoolError):
30 | "Base exception for PoolErrors that have associated URLs."
31 | def __init__(self, pool, url, message):
32 | self.url = url
33 | PoolError.__init__(self, pool, message)
34 |
35 | def __reduce__(self):
36 | # For pickling purposes.
37 | return self.__class__, (None, self.url, None)
38 |
39 |
40 | class SSLError(HTTPError):
41 | "Raised when SSL certificate fails in an HTTPS connection."
42 | pass
43 |
44 |
45 | class ProxyError(HTTPError):
46 | "Raised when the connection to a proxy fails."
47 | pass
48 |
49 |
50 | class DecodeError(HTTPError):
51 | "Raised when automatic decoding based on Content-Type fails."
52 | pass
53 |
54 |
55 | class ProtocolError(HTTPError):
56 | "Raised when something unexpected happens mid-request/response."
57 | pass
58 |
59 |
60 | #: Renamed to ProtocolError but aliased for backwards compatibility.
61 | ConnectionError = ProtocolError
62 |
63 |
64 | # Leaf Exceptions
65 |
66 | class MaxRetryError(RequestError):
67 | """Raised when the maximum number of retries is exceeded.
68 |
69 | :param pool: The connection pool
70 | :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
71 | :param string url: The requested Url
72 | :param exceptions.Exception reason: The underlying error
73 |
74 | """
75 |
76 | def __init__(self, pool, url, reason=None):
77 | self.reason = reason
78 |
79 | message = "Max retries exceeded with url: %s (Caused by %r)" % (
80 | url, reason)
81 |
82 | RequestError.__init__(self, pool, url, message)
83 |
84 |
85 | class HostChangedError(RequestError):
86 | "Raised when an existing pool gets a request for a foreign host."
87 |
88 | def __init__(self, pool, url, retries=3):
89 | message = "Tried to open a foreign host with url: %s" % url
90 | RequestError.__init__(self, pool, url, message)
91 | self.retries = retries
92 |
93 |
94 | class TimeoutStateError(HTTPError):
95 | """ Raised when passing an invalid state to a timeout """
96 | pass
97 |
98 |
99 | class TimeoutError(HTTPError):
100 | """ Raised when a socket timeout error occurs.
101 |
102 | Catching this error will catch both :exc:`ReadTimeoutErrors
103 | ` and :exc:`ConnectTimeoutErrors `.
104 | """
105 | pass
106 |
107 |
108 | class ReadTimeoutError(TimeoutError, RequestError):
109 | "Raised when a socket timeout occurs while receiving data from a server"
110 | pass
111 |
112 |
113 | # This timeout error does not have a URL attached and needs to inherit from the
114 | # base HTTPError
115 | class ConnectTimeoutError(TimeoutError):
116 | "Raised when a socket timeout occurs while connecting to a server"
117 | pass
118 |
119 |
120 | class NewConnectionError(ConnectTimeoutError, PoolError):
121 | "Raised when we fail to establish a new connection. Usually ECONNREFUSED."
122 | pass
123 |
124 |
125 | class EmptyPoolError(PoolError):
126 | "Raised when a pool runs out of connections and no more are allowed."
127 | pass
128 |
129 |
130 | class ClosedPoolError(PoolError):
131 | "Raised when a request enters a pool after the pool has been closed."
132 | pass
133 |
134 |
135 | class LocationValueError(ValueError, HTTPError):
136 | "Raised when there is something wrong with a given URL input."
137 | pass
138 |
139 |
140 | class LocationParseError(LocationValueError):
141 | "Raised when get_host or similar fails to parse the URL input."
142 |
143 | def __init__(self, location):
144 | message = "Failed to parse: %s" % location
145 | HTTPError.__init__(self, message)
146 |
147 | self.location = location
148 |
149 |
150 | class ResponseError(HTTPError):
151 | "Used as a container for an error reason supplied in a MaxRetryError."
152 | GENERIC_ERROR = 'too many error responses'
153 | SPECIFIC_ERROR = 'too many {status_code} error responses'
154 |
155 |
156 | class SecurityWarning(HTTPWarning):
157 | "Warned when perfoming security reducing actions"
158 | pass
159 |
160 |
161 | class SubjectAltNameWarning(SecurityWarning):
162 | "Warned when connecting to a host with a certificate missing a SAN."
163 | pass
164 |
165 |
166 | class InsecureRequestWarning(SecurityWarning):
167 | "Warned when making an unverified HTTPS request."
168 | pass
169 |
170 |
171 | class SystemTimeWarning(SecurityWarning):
172 | "Warned when system time is suspected to be wrong"
173 | pass
174 |
175 |
176 | class InsecurePlatformWarning(SecurityWarning):
177 | "Warned when certain SSL configuration is not available on a platform."
178 | pass
179 |
180 |
181 | class SNIMissingWarning(HTTPWarning):
182 | "Warned when making a HTTPS request without SNI available."
183 | pass
184 |
185 |
186 | class DependencyWarning(HTTPWarning):
187 | """
188 | Warned when an attempt is made to import a module with missing optional
189 | dependencies.
190 | """
191 | pass
192 |
193 |
194 | class ResponseNotChunked(ProtocolError, ValueError):
195 | "Response needs to be chunked in order to read it as chunks."
196 | pass
197 |
198 |
199 | class BodyNotHttplibCompatible(HTTPError):
200 | """
201 | Body should be httplib.HTTPResponse like (have an fp attribute which
202 | returns raw chunks) for read_chunked().
203 | """
204 | pass
205 |
206 |
207 | class IncompleteRead(HTTPError, httplib_IncompleteRead):
208 | """
209 | Response length doesn't match expected Content-Length
210 |
211 | Subclass of http_client.IncompleteRead to allow int value
212 | for `partial` to avoid creating large objects on streamed
213 | reads.
214 | """
215 | def __init__(self, partial, expected):
216 | super(IncompleteRead, self).__init__(partial, expected)
217 |
218 | def __repr__(self):
219 | return ('IncompleteRead(%i bytes read, '
220 | '%i more expected)' % (self.partial, self.expected))
221 |
222 |
223 | class InvalidHeader(HTTPError):
224 | "The header provided was somehow invalid."
225 | pass
226 |
227 |
228 | class ProxySchemeUnknown(AssertionError, ValueError):
229 | "ProxyManager does not support the supplied scheme"
230 | # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
231 |
232 | def __init__(self, scheme):
233 | message = "Not supported proxy scheme %s" % scheme
234 | super(ProxySchemeUnknown, self).__init__(message)
235 |
236 |
237 | class HeaderParsingError(HTTPError):
238 | "Raised by assert_header_parsing, but we convert it to a log.warning statement."
239 | def __init__(self, defects, unparsed_data):
240 | message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data)
241 | super(HeaderParsingError, self).__init__(message)
242 |
243 |
244 | class UnrewindableBodyError(HTTPError):
245 | "urllib3 encountered an error when trying to rewind a body"
246 | pass
247 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/urllib3/packages/ordered_dict.py:
--------------------------------------------------------------------------------
1 | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
2 | # Passes Python2.7's test suite and incorporates all the latest updates.
3 | # Copyright 2009 Raymond Hettinger, released under the MIT License.
4 | # http://code.activestate.com/recipes/576693/
5 | try:
6 | from thread import get_ident as _get_ident
7 | except ImportError:
8 | from dummy_thread import get_ident as _get_ident
9 |
10 | try:
11 | from _abcoll import KeysView, ValuesView, ItemsView
12 | except ImportError:
13 | pass
14 |
15 |
16 | class OrderedDict(dict):
17 | 'Dictionary that remembers insertion order'
18 | # An inherited dict maps keys to values.
19 | # The inherited dict provides __getitem__, __len__, __contains__, and get.
20 | # The remaining methods are order-aware.
21 | # Big-O running times for all methods are the same as for regular dictionaries.
22 |
23 | # The internal self.__map dictionary maps keys to links in a doubly linked list.
24 | # The circular doubly linked list starts and ends with a sentinel element.
25 | # The sentinel element never gets deleted (this simplifies the algorithm).
26 | # Each link is stored as a list of length three: [PREV, NEXT, KEY].
27 |
28 | def __init__(self, *args, **kwds):
29 | '''Initialize an ordered dictionary. Signature is the same as for
30 | regular dictionaries, but keyword arguments are not recommended
31 | because their insertion order is arbitrary.
32 |
33 | '''
34 | if len(args) > 1:
35 | raise TypeError('expected at most 1 arguments, got %d' % len(args))
36 | try:
37 | self.__root
38 | except AttributeError:
39 | self.__root = root = [] # sentinel node
40 | root[:] = [root, root, None]
41 | self.__map = {}
42 | self.__update(*args, **kwds)
43 |
44 | def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
45 | 'od.__setitem__(i, y) <==> od[i]=y'
46 | # Setting a new item creates a new link which goes at the end of the linked
47 | # list, and the inherited dictionary is updated with the new key/value pair.
48 | if key not in self:
49 | root = self.__root
50 | last = root[0]
51 | last[1] = root[0] = self.__map[key] = [last, root, key]
52 | dict_setitem(self, key, value)
53 |
54 | def __delitem__(self, key, dict_delitem=dict.__delitem__):
55 | 'od.__delitem__(y) <==> del od[y]'
56 | # Deleting an existing item uses self.__map to find the link which is
57 | # then removed by updating the links in the predecessor and successor nodes.
58 | dict_delitem(self, key)
59 | link_prev, link_next, key = self.__map.pop(key)
60 | link_prev[1] = link_next
61 | link_next[0] = link_prev
62 |
63 | def __iter__(self):
64 | 'od.__iter__() <==> iter(od)'
65 | root = self.__root
66 | curr = root[1]
67 | while curr is not root:
68 | yield curr[2]
69 | curr = curr[1]
70 |
71 | def __reversed__(self):
72 | 'od.__reversed__() <==> reversed(od)'
73 | root = self.__root
74 | curr = root[0]
75 | while curr is not root:
76 | yield curr[2]
77 | curr = curr[0]
78 |
79 | def clear(self):
80 | 'od.clear() -> None. Remove all items from od.'
81 | try:
82 | for node in self.__map.itervalues():
83 | del node[:]
84 | root = self.__root
85 | root[:] = [root, root, None]
86 | self.__map.clear()
87 | except AttributeError:
88 | pass
89 | dict.clear(self)
90 |
91 | def popitem(self, last=True):
92 | '''od.popitem() -> (k, v), return and remove a (key, value) pair.
93 | Pairs are returned in LIFO order if last is true or FIFO order if false.
94 |
95 | '''
96 | if not self:
97 | raise KeyError('dictionary is empty')
98 | root = self.__root
99 | if last:
100 | link = root[0]
101 | link_prev = link[0]
102 | link_prev[1] = root
103 | root[0] = link_prev
104 | else:
105 | link = root[1]
106 | link_next = link[1]
107 | root[1] = link_next
108 | link_next[0] = root
109 | key = link[2]
110 | del self.__map[key]
111 | value = dict.pop(self, key)
112 | return key, value
113 |
114 | # -- the following methods do not depend on the internal structure --
115 |
116 | def keys(self):
117 | 'od.keys() -> list of keys in od'
118 | return list(self)
119 |
120 | def values(self):
121 | 'od.values() -> list of values in od'
122 | return [self[key] for key in self]
123 |
124 | def items(self):
125 | 'od.items() -> list of (key, value) pairs in od'
126 | return [(key, self[key]) for key in self]
127 |
128 | def iterkeys(self):
129 | 'od.iterkeys() -> an iterator over the keys in od'
130 | return iter(self)
131 |
132 | def itervalues(self):
133 | 'od.itervalues -> an iterator over the values in od'
134 | for k in self:
135 | yield self[k]
136 |
137 | def iteritems(self):
138 | 'od.iteritems -> an iterator over the (key, value) items in od'
139 | for k in self:
140 | yield (k, self[k])
141 |
142 | def update(*args, **kwds):
143 | '''od.update(E, **F) -> None. Update od from dict/iterable E and F.
144 |
145 | If E is a dict instance, does: for k in E: od[k] = E[k]
146 | If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
147 | Or if E is an iterable of items, does: for k, v in E: od[k] = v
148 | In either case, this is followed by: for k, v in F.items(): od[k] = v
149 |
150 | '''
151 | if len(args) > 2:
152 | raise TypeError('update() takes at most 2 positional '
153 | 'arguments (%d given)' % (len(args),))
154 | elif not args:
155 | raise TypeError('update() takes at least 1 argument (0 given)')
156 | self = args[0]
157 | # Make progressively weaker assumptions about "other"
158 | other = ()
159 | if len(args) == 2:
160 | other = args[1]
161 | if isinstance(other, dict):
162 | for key in other:
163 | self[key] = other[key]
164 | elif hasattr(other, 'keys'):
165 | for key in other.keys():
166 | self[key] = other[key]
167 | else:
168 | for key, value in other:
169 | self[key] = value
170 | for key, value in kwds.items():
171 | self[key] = value
172 |
173 | __update = update # let subclasses override update without breaking __init__
174 |
175 | __marker = object()
176 |
177 | def pop(self, key, default=__marker):
178 | '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
179 | If key is not found, d is returned if given, otherwise KeyError is raised.
180 |
181 | '''
182 | if key in self:
183 | result = self[key]
184 | del self[key]
185 | return result
186 | if default is self.__marker:
187 | raise KeyError(key)
188 | return default
189 |
190 | def setdefault(self, key, default=None):
191 | 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
192 | if key in self:
193 | return self[key]
194 | self[key] = default
195 | return default
196 |
197 | def __repr__(self, _repr_running={}):
198 | 'od.__repr__() <==> repr(od)'
199 | call_key = id(self), _get_ident()
200 | if call_key in _repr_running:
201 | return '...'
202 | _repr_running[call_key] = 1
203 | try:
204 | if not self:
205 | return '%s()' % (self.__class__.__name__,)
206 | return '%s(%r)' % (self.__class__.__name__, self.items())
207 | finally:
208 | del _repr_running[call_key]
209 |
210 | def __reduce__(self):
211 | 'Return state information for pickling'
212 | items = [[k, self[k]] for k in self]
213 | inst_dict = vars(self).copy()
214 | for k in vars(OrderedDict()):
215 | inst_dict.pop(k, None)
216 | if inst_dict:
217 | return (self.__class__, (items,), inst_dict)
218 | return self.__class__, (items,)
219 |
220 | def copy(self):
221 | 'od.copy() -> a shallow copy of od'
222 | return self.__class__(self)
223 |
224 | @classmethod
225 | def fromkeys(cls, iterable, value=None):
226 | '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
227 | and values equal to v (which defaults to None).
228 |
229 | '''
230 | d = cls()
231 | for key in iterable:
232 | d[key] = value
233 | return d
234 |
235 | def __eq__(self, other):
236 | '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
237 | while comparison to a regular mapping is order-insensitive.
238 |
239 | '''
240 | if isinstance(other, OrderedDict):
241 | return len(self)==len(other) and self.items() == other.items()
242 | return dict.__eq__(self, other)
243 |
244 | def __ne__(self, other):
245 | return not self == other
246 |
247 | # -- the following methods are only used in Python 2.7 --
248 |
249 | def viewkeys(self):
250 | "od.viewkeys() -> a set-like object providing a view on od's keys"
251 | return KeysView(self)
252 |
253 | def viewvalues(self):
254 | "od.viewvalues() -> an object providing a view on od's values"
255 | return ValuesView(self)
256 |
257 | def viewitems(self):
258 | "od.viewitems() -> a set-like object providing a view on od's items"
259 | return ItemsView(self)
260 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/chardet/chardistribution.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is Mozilla Communicator client code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 1998
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | #
12 | # This library is free software; you can redistribute it and/or
13 | # modify it under the terms of the GNU Lesser General Public
14 | # License as published by the Free Software Foundation; either
15 | # version 2.1 of the License, or (at your option) any later version.
16 | #
17 | # This library is distributed in the hope that it will be useful,
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 | # Lesser General Public License for more details.
21 | #
22 | # You should have received a copy of the GNU Lesser General Public
23 | # License along with this library; if not, write to the Free Software
24 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 | # 02110-1301 USA
26 | ######################### END LICENSE BLOCK #########################
27 |
28 | from .euctwfreq import (EUCTW_CHAR_TO_FREQ_ORDER, EUCTW_TABLE_SIZE,
29 | EUCTW_TYPICAL_DISTRIBUTION_RATIO)
30 | from .euckrfreq import (EUCKR_CHAR_TO_FREQ_ORDER, EUCKR_TABLE_SIZE,
31 | EUCKR_TYPICAL_DISTRIBUTION_RATIO)
32 | from .gb2312freq import (GB2312_CHAR_TO_FREQ_ORDER, GB2312_TABLE_SIZE,
33 | GB2312_TYPICAL_DISTRIBUTION_RATIO)
34 | from .big5freq import (BIG5_CHAR_TO_FREQ_ORDER, BIG5_TABLE_SIZE,
35 | BIG5_TYPICAL_DISTRIBUTION_RATIO)
36 | from .jisfreq import (JIS_CHAR_TO_FREQ_ORDER, JIS_TABLE_SIZE,
37 | JIS_TYPICAL_DISTRIBUTION_RATIO)
38 |
39 |
40 | class CharDistributionAnalysis(object):
41 | ENOUGH_DATA_THRESHOLD = 1024
42 | SURE_YES = 0.99
43 | SURE_NO = 0.01
44 | MINIMUM_DATA_THRESHOLD = 3
45 |
46 | def __init__(self):
47 | # Mapping table to get frequency order from char order (get from
48 | # GetOrder())
49 | self._char_to_freq_order = None
50 | self._table_size = None # Size of above table
51 | # This is a constant value which varies from language to language,
52 | # used in calculating confidence. See
53 | # http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
54 | # for further detail.
55 | self.typical_distribution_ratio = None
56 | self._done = None
57 | self._total_chars = None
58 | self._freq_chars = None
59 | self.reset()
60 |
61 | def reset(self):
62 | """reset analyser, clear any state"""
63 | # If this flag is set to True, detection is done and conclusion has
64 | # been made
65 | self._done = False
66 | self._total_chars = 0 # Total characters encountered
67 | # The number of characters whose frequency order is less than 512
68 | self._freq_chars = 0
69 |
70 | def feed(self, char, char_len):
71 | """feed a character with known length"""
72 | if char_len == 2:
73 | # we only care about 2-bytes character in our distribution analysis
74 | order = self.get_order(char)
75 | else:
76 | order = -1
77 | if order >= 0:
78 | self._total_chars += 1
79 | # order is valid
80 | if order < self._table_size:
81 | if 512 > self._char_to_freq_order[order]:
82 | self._freq_chars += 1
83 |
84 | def get_confidence(self):
85 | """return confidence based on existing data"""
86 | # if we didn't receive any character in our consideration range,
87 | # return negative answer
88 | if self._total_chars <= 0 or self._freq_chars <= self.MINIMUM_DATA_THRESHOLD:
89 | return self.SURE_NO
90 |
91 | if self._total_chars != self._freq_chars:
92 | r = (self._freq_chars / ((self._total_chars - self._freq_chars)
93 | * self.typical_distribution_ratio))
94 | if r < self.SURE_YES:
95 | return r
96 |
97 | # normalize confidence (we don't want to be 100% sure)
98 | return self.SURE_YES
99 |
100 | def got_enough_data(self):
101 | # It is not necessary to receive all data to draw conclusion.
102 | # For charset detection, certain amount of data is enough
103 | return self._total_chars > self.ENOUGH_DATA_THRESHOLD
104 |
105 | def get_order(self, byte_str):
106 | # We do not handle characters based on the original encoding string,
107 | # but convert this encoding string to a number, here called order.
108 | # This allows multiple encodings of a language to share one frequency
109 | # table.
110 | return -1
111 |
112 |
113 | class EUCTWDistributionAnalysis(CharDistributionAnalysis):
114 | def __init__(self):
115 | super(EUCTWDistributionAnalysis, self).__init__()
116 | self._char_to_freq_order = EUCTW_CHAR_TO_FREQ_ORDER
117 | self._table_size = EUCTW_TABLE_SIZE
118 | self.typical_distribution_ratio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
119 |
120 | def get_order(self, byte_str):
121 | # for euc-TW encoding, we are interested
122 | # first byte range: 0xc4 -- 0xfe
123 | # second byte range: 0xa1 -- 0xfe
124 | # no validation needed here. State machine has done that
125 | first_char = byte_str[0]
126 | if first_char >= 0xC4:
127 | return 94 * (first_char - 0xC4) + byte_str[1] - 0xA1
128 | else:
129 | return -1
130 |
131 |
132 | class EUCKRDistributionAnalysis(CharDistributionAnalysis):
133 | def __init__(self):
134 | super(EUCKRDistributionAnalysis, self).__init__()
135 | self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER
136 | self._table_size = EUCKR_TABLE_SIZE
137 | self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
138 |
139 | def get_order(self, byte_str):
140 | # for euc-KR encoding, we are interested
141 | # first byte range: 0xb0 -- 0xfe
142 | # second byte range: 0xa1 -- 0xfe
143 | # no validation needed here. State machine has done that
144 | first_char = byte_str[0]
145 | if first_char >= 0xB0:
146 | return 94 * (first_char - 0xB0) + byte_str[1] - 0xA1
147 | else:
148 | return -1
149 |
150 |
151 | class GB2312DistributionAnalysis(CharDistributionAnalysis):
152 | def __init__(self):
153 | super(GB2312DistributionAnalysis, self).__init__()
154 | self._char_to_freq_order = GB2312_CHAR_TO_FREQ_ORDER
155 | self._table_size = GB2312_TABLE_SIZE
156 | self.typical_distribution_ratio = GB2312_TYPICAL_DISTRIBUTION_RATIO
157 |
158 | def get_order(self, byte_str):
159 | # for GB2312 encoding, we are interested
160 | # first byte range: 0xb0 -- 0xfe
161 | # second byte range: 0xa1 -- 0xfe
162 | # no validation needed here. State machine has done that
163 | first_char, second_char = byte_str[0], byte_str[1]
164 | if (first_char >= 0xB0) and (second_char >= 0xA1):
165 | return 94 * (first_char - 0xB0) + second_char - 0xA1
166 | else:
167 | return -1
168 |
169 |
170 | class Big5DistributionAnalysis(CharDistributionAnalysis):
171 | def __init__(self):
172 | super(Big5DistributionAnalysis, self).__init__()
173 | self._char_to_freq_order = BIG5_CHAR_TO_FREQ_ORDER
174 | self._table_size = BIG5_TABLE_SIZE
175 | self.typical_distribution_ratio = BIG5_TYPICAL_DISTRIBUTION_RATIO
176 |
177 | def get_order(self, byte_str):
178 | # for big5 encoding, we are interested
179 | # first byte range: 0xa4 -- 0xfe
180 | # second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
181 | # no validation needed here. State machine has done that
182 | first_char, second_char = byte_str[0], byte_str[1]
183 | if first_char >= 0xA4:
184 | if second_char >= 0xA1:
185 | return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
186 | else:
187 | return 157 * (first_char - 0xA4) + second_char - 0x40
188 | else:
189 | return -1
190 |
191 |
192 | class SJISDistributionAnalysis(CharDistributionAnalysis):
193 | def __init__(self):
194 | super(SJISDistributionAnalysis, self).__init__()
195 | self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
196 | self._table_size = JIS_TABLE_SIZE
197 | self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
198 |
199 | def get_order(self, byte_str):
200 | # for sjis encoding, we are interested
201 | # first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
202 | # second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
203 | # no validation needed here. State machine has done that
204 | first_char, second_char = byte_str[0], byte_str[1]
205 | if (first_char >= 0x81) and (first_char <= 0x9F):
206 | order = 188 * (first_char - 0x81)
207 | elif (first_char >= 0xE0) and (first_char <= 0xEF):
208 | order = 188 * (first_char - 0xE0 + 31)
209 | else:
210 | return -1
211 | order = order + second_char - 0x40
212 | if second_char > 0x7F:
213 | order = -1
214 | return order
215 |
216 |
217 | class EUCJPDistributionAnalysis(CharDistributionAnalysis):
218 | def __init__(self):
219 | super(EUCJPDistributionAnalysis, self).__init__()
220 | self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
221 | self._table_size = JIS_TABLE_SIZE
222 | self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
223 |
224 | def get_order(self, byte_str):
225 | # for euc-JP encoding, we are interested
226 | # first byte range: 0xa0 -- 0xfe
227 | # second byte range: 0xa1 -- 0xfe
228 | # no validation needed here. State machine has done that
229 | char = byte_str[0]
230 | if char >= 0xA0:
231 | return 94 * (char - 0xA1) + byte_str[1] - 0xa1
232 | else:
233 | return -1
234 |
--------------------------------------------------------------------------------
/SportScanner.bundle/Contents/Libraries/Shared/urllib3/util/timeout.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | # The default socket timeout, used by httplib to indicate that no timeout was
3 | # specified by the user
4 | from socket import _GLOBAL_DEFAULT_TIMEOUT
5 | import time
6 |
7 | from ..exceptions import TimeoutStateError
8 |
9 | # A sentinel value to indicate that no timeout was specified by the user in
10 | # urllib3
11 | _Default = object()
12 |
13 |
14 | # Use time.monotonic if available.
15 | current_time = getattr(time, "monotonic", time.time)
16 |
17 |
18 | class Timeout(object):
19 | """ Timeout configuration.
20 |
21 | Timeouts can be defined as a default for a pool::
22 |
23 | timeout = Timeout(connect=2.0, read=7.0)
24 | http = PoolManager(timeout=timeout)
25 | response = http.request('GET', 'http://example.com/')
26 |
27 | Or per-request (which overrides the default for the pool)::
28 |
29 | response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
30 |
31 | Timeouts can be disabled by setting all the parameters to ``None``::
32 |
33 | no_timeout = Timeout(connect=None, read=None)
34 | response = http.request('GET', 'http://example.com/, timeout=no_timeout)
35 |
36 |
37 | :param total:
38 | This combines the connect and read timeouts into one; the read timeout
39 | will be set to the time leftover from the connect attempt. In the
40 | event that both a connect timeout and a total are specified, or a read
41 | timeout and a total are specified, the shorter timeout will be applied.
42 |
43 | Defaults to None.
44 |
45 | :type total: integer, float, or None
46 |
47 | :param connect:
48 | The maximum amount of time to wait for a connection attempt to a server
49 | to succeed. Omitting the parameter will default the connect timeout to
50 | the system default, probably `the global default timeout in socket.py
51 | `_.
52 | None will set an infinite timeout for connection attempts.
53 |
54 | :type connect: integer, float, or None
55 |
56 | :param read:
57 | The maximum amount of time to wait between consecutive
58 | read operations for a response from the server. Omitting
59 | the parameter will default the read timeout to the system
60 | default, probably `the global default timeout in socket.py
61 | `_.
62 | None will set an infinite timeout.
63 |
64 | :type read: integer, float, or None
65 |
66 | .. note::
67 |
68 | Many factors can affect the total amount of time for urllib3 to return
69 | an HTTP response.
70 |
71 | For example, Python's DNS resolver does not obey the timeout specified
72 | on the socket. Other factors that can affect total request time include
73 | high CPU load, high swap, the program running at a low priority level,
74 | or other behaviors.
75 |
76 | In addition, the read and total timeouts only measure the time between
77 | read operations on the socket connecting the client and the server,
78 | not the total amount of time for the request to return a complete
79 | response. For most requests, the timeout is raised because the server
80 | has not sent the first byte in the specified time. This is not always
81 | the case; if a server streams one byte every fifteen seconds, a timeout
82 | of 20 seconds will not trigger, even though the request will take
83 | several minutes to complete.
84 |
85 | If your goal is to cut off any request after a set amount of wall clock
86 | time, consider having a second "watcher" thread to cut off a slow
87 | request.
88 | """
89 |
90 | #: A sentinel object representing the default timeout value
91 | DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
92 |
93 | def __init__(self, total=None, connect=_Default, read=_Default):
94 | self._connect = self._validate_timeout(connect, 'connect')
95 | self._read = self._validate_timeout(read, 'read')
96 | self.total = self._validate_timeout(total, 'total')
97 | self._start_connect = None
98 |
99 | def __str__(self):
100 | return '%s(connect=%r, read=%r, total=%r)' % (
101 | type(self).__name__, self._connect, self._read, self.total)
102 |
103 | @classmethod
104 | def _validate_timeout(cls, value, name):
105 | """ Check that a timeout attribute is valid.
106 |
107 | :param value: The timeout value to validate
108 | :param name: The name of the timeout attribute to validate. This is
109 | used to specify in error messages.
110 | :return: The validated and casted version of the given value.
111 | :raises ValueError: If it is a numeric value less than or equal to
112 | zero, or the type is not an integer, float, or None.
113 | """
114 | if value is _Default:
115 | return cls.DEFAULT_TIMEOUT
116 |
117 | if value is None or value is cls.DEFAULT_TIMEOUT:
118 | return value
119 |
120 | if isinstance(value, bool):
121 | raise ValueError("Timeout cannot be a boolean value. It must "
122 | "be an int, float or None.")
123 | try:
124 | float(value)
125 | except (TypeError, ValueError):
126 | raise ValueError("Timeout value %s was %s, but it must be an "
127 | "int, float or None." % (name, value))
128 |
129 | try:
130 | if value <= 0:
131 | raise ValueError("Attempted to set %s timeout to %s, but the "
132 | "timeout cannot be set to a value less "
133 | "than or equal to 0." % (name, value))
134 | except TypeError: # Python 3
135 | raise ValueError("Timeout value %s was %s, but it must be an "
136 | "int, float or None." % (name, value))
137 |
138 | return value
139 |
140 | @classmethod
141 | def from_float(cls, timeout):
142 | """ Create a new Timeout from a legacy timeout value.
143 |
144 | The timeout value used by httplib.py sets the same timeout on the
145 | connect(), and recv() socket requests. This creates a :class:`Timeout`
146 | object that sets the individual timeouts to the ``timeout`` value
147 | passed to this function.
148 |
149 | :param timeout: The legacy timeout value.
150 | :type timeout: integer, float, sentinel default object, or None
151 | :return: Timeout object
152 | :rtype: :class:`Timeout`
153 | """
154 | return Timeout(read=timeout, connect=timeout)
155 |
156 | def clone(self):
157 | """ Create a copy of the timeout object
158 |
159 | Timeout properties are stored per-pool but each request needs a fresh
160 | Timeout object to ensure each one has its own start/stop configured.
161 |
162 | :return: a copy of the timeout object
163 | :rtype: :class:`Timeout`
164 | """
165 | # We can't use copy.deepcopy because that will also create a new object
166 | # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
167 | # detect the user default.
168 | return Timeout(connect=self._connect, read=self._read,
169 | total=self.total)
170 |
171 | def start_connect(self):
172 | """ Start the timeout clock, used during a connect() attempt
173 |
174 | :raises urllib3.exceptions.TimeoutStateError: if you attempt
175 | to start a timer that has been started already.
176 | """
177 | if self._start_connect is not None:
178 | raise TimeoutStateError("Timeout timer has already been started.")
179 | self._start_connect = current_time()
180 | return self._start_connect
181 |
182 | def get_connect_duration(self):
183 | """ Gets the time elapsed since the call to :meth:`start_connect`.
184 |
185 | :return: Elapsed time.
186 | :rtype: float
187 | :raises urllib3.exceptions.TimeoutStateError: if you attempt
188 | to get duration for a timer that hasn't been started.
189 | """
190 | if self._start_connect is None:
191 | raise TimeoutStateError("Can't get connect duration for timer "
192 | "that has not started.")
193 | return current_time() - self._start_connect
194 |
195 | @property
196 | def connect_timeout(self):
197 | """ Get the value to use when setting a connection timeout.
198 |
199 | This will be a positive float or integer, the value None
200 | (never timeout), or the default system timeout.
201 |
202 | :return: Connect timeout.
203 | :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
204 | """
205 | if self.total is None:
206 | return self._connect
207 |
208 | if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
209 | return self.total
210 |
211 | return min(self._connect, self.total)
212 |
213 | @property
214 | def read_timeout(self):
215 | """ Get the value for the read timeout.
216 |
217 | This assumes some time has elapsed in the connection timeout and
218 | computes the read timeout appropriately.
219 |
220 | If self.total is set, the read timeout is dependent on the amount of
221 | time taken by the connect timeout. If the connection time has not been
222 | established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
223 | raised.
224 |
225 | :return: Value to use for the read timeout.
226 | :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
227 | :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
228 | has not yet been called on this object.
229 | """
230 | if (self.total is not None and
231 | self.total is not self.DEFAULT_TIMEOUT and
232 | self._read is not None and
233 | self._read is not self.DEFAULT_TIMEOUT):
234 | # In case the connect timeout has not yet been established.
235 | if self._start_connect is None:
236 | return self._read
237 | return max(0, min(self.total - self.get_connect_duration(),
238 | self._read))
239 | elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
240 | return max(0, self.total - self.get_connect_duration())
241 | else:
242 | return self._read
243 |
--------------------------------------------------------------------------------