80 | % else:
81 |
82 | % endif
83 | % endif
84 |
85 | ${list_info(run)}
86 |
87 | % if show_gauge:
88 |
89 | % endif
90 | % if 'sprt' in run['args'] and 'Pending' not in results_info['info'][0]:
91 |
92 | % endif
93 |
--------------------------------------------------------------------------------
/server/fishtest/templates/login.mak:
--------------------------------------------------------------------------------
1 | <%inherit file="base.mak"/>
2 |
3 |
6 |
7 |
8 |
9 | Login
10 |
11 | Don't have an account?
12 |
Sign up
13 |
14 |
15 |
16 |
61 |
62 |
63 |
64 |
--------------------------------------------------------------------------------
/server/fishtest/templates/machines.mak:
--------------------------------------------------------------------------------
1 | <%!
2 | from fishtest.util import format_time_ago, worker_name
3 |
4 | def clip_long(text, max_length=20):
5 | if len(text) > max_length:
6 | return text[:max_length] + "..."
7 | else:
8 | return text
9 | %>
10 |
11 |
12 |
13 |
14 | Machine
15 | Cores
16 | UUID
17 | MNps
18 | RAM
19 | System
20 | Compiler
21 | Python
22 | Worker
23 | Running on
24 | Last active
25 |
26 |
27 |
28 | % for machine in machines_list:
29 | <%
30 | gcc_version = ".".join([str(m) for m in machine['gcc_version']])
31 | compiler = machine.get('compiler', 'g++')
32 | python_version = ".".join([str(m) for m in machine['python_version']])
33 | version = str(machine['version']) + "*" * machine['modified']
34 | worker_name_ = worker_name(machine, short=True)
35 | formatted_time_ago = format_time_ago(machine["last_updated"])
36 | sort_value_time_ago = -machine['last_updated'].timestamp()
37 | branch = machine['run']['args']['new_tag']
38 | task_id = str(machine['task_id'])
39 | run_id = str(machine['run']['_id'])
40 | %>
41 |
42 | ${machine['username']}
43 |
44 | % if 'country_code' in machine:
45 |
47 | % endif
48 | ${machine['concurrency']}
49 |
50 | ${machine['unique_key'].split('-')[0]}
51 | ${f"{machine['nps'] / 1000000:.2f}"}
52 | ${machine['max_memory']}
53 | ${machine['uname']}
54 | ${compiler} ${gcc_version}
55 | ${python_version}
56 | ${version}
57 |
58 | ${clip_long(branch) + "/" + task_id}
59 |
60 | ${formatted_time_ago}
61 |
62 | % endfor
63 | % if "version" not in locals():
64 |
65 | No machines running
66 |
67 | % endif
68 |
69 |
70 |
--------------------------------------------------------------------------------
/server/fishtest/templates/nn_upload.mak:
--------------------------------------------------------------------------------
1 | <%inherit file="base.mak"/>
2 |
3 |
6 |
7 |
8 |
9 | Neural Network Upload
10 |
11 |
12 | Please read the
13 | Testing Guidelines
18 | before uploading your network.
19 |
20 |
21 | By uploading, you license your network under a
22 | CC0
27 | license.
28 |
29 |
30 | Your uploaded network will be available for public download and listed
31 | on the NN stats page .
32 |
33 |
34 |
35 |
36 |
77 |
78 |
79 |
88 |
--------------------------------------------------------------------------------
/server/fishtest/templates/nns.mak:
--------------------------------------------------------------------------------
1 | <%inherit file="base.mak"/>
2 |
3 |
6 |
7 |
Neural Network Repository
8 |
9 |
10 | These networks are freely available for download and sharing under a
11 | CC0 license.
12 | Nets colored green in the table have passed fishtest testing
13 | and achieved the status of default net during the development of Stockfish.
14 | The recommended net for a given Stockfish executable can be found as the default value of the EvalFile UCI option.
15 |
16 |
17 |
58 |
59 | <%include file="pagination.mak" args="pages=pages"/>
60 |
61 |
62 |
63 |
64 |
65 | Time
66 | Network
67 | Username
68 | First test
69 | Last test
70 | Downloads
71 |
72 |
73 |
74 | % for idx, nn in enumerate(nns):
75 | % if not master_only or 'is_master' in nn:
76 |
77 | ${nn['time'].strftime("%y-%m-%d %H:%M:%S")}
78 | % if 'is_master' in nn:
79 |
80 | % else:
81 |
82 | % endif
83 | ${nn['name']}
84 | ${nn['user']}
85 |
86 | % if 'first_test' in nn:
87 | ${str(nn['first_test']['date']).split('.')[0]}
88 | % endif
89 |
90 |
91 | % if 'last_test' in nn:
92 | ${str(nn['last_test']['date']).split('.')[0]}
93 | % endif
94 |
95 | ${nn.get('downloads', 0)}
96 |
97 | % endif
98 | % endfor
99 | % if "idx" not in locals():
100 |
101 | No nets available
102 |
103 | % endif
104 |
105 |
106 |
107 |
108 | <%include file="pagination.mak" args="pages=pages"/>
109 |
110 |
119 |
--------------------------------------------------------------------------------
/server/fishtest/templates/notfound.mak:
--------------------------------------------------------------------------------
1 | <%inherit file="base.mak"/>
2 |
3 |
6 |
7 |
43 |
44 |
45 |
46 |
404
47 |
Oops! Page not found.
48 |
The page you are looking for might have been removed, had its name changed, or is temporarily unavailable.
49 |
Go to Home
50 |
51 |
52 |
--------------------------------------------------------------------------------
/server/fishtest/templates/pagination.mak:
--------------------------------------------------------------------------------
1 | <%page args="pages=None"/>
2 |
3 | % if pages and len(pages) > 3:
4 |
5 |
16 |
17 | % endif
18 |
--------------------------------------------------------------------------------
/server/fishtest/templates/run_tables.mak:
--------------------------------------------------------------------------------
1 | <%!
2 | import binascii
3 | %>
4 | <%
5 | # to differentiate toggle states on different pages
6 | prefix = 'user'+binascii.hexlify(username.encode()).decode()+"_" if username is not Undefined else ""
7 | %>
8 |
9 | % if page_idx == 0:
10 | <%
11 | pending_approval_runs = [run for run in runs['pending'] if not run['approved']]
12 | paused_runs = [run for run in runs['pending'] if run['approved']]
13 | %>
14 |
15 | <%include file="run_table.mak" args="runs=pending_approval_runs,
16 | show_delete=True,
17 | header='Pending approval',
18 | count=len(pending_approval_runs),
19 | toggle=prefix+'pending',
20 | alt='No tests pending approval'"
21 | />
22 |
23 | <%include file="run_table.mak" args="runs=paused_runs,
24 | show_delete=True,
25 | header='Paused',
26 | count=len(paused_runs),
27 | toggle=prefix+'paused',
28 | alt='No paused tests'"
29 | />
30 |
31 | <%include file="run_table.mak" args="runs=failed_runs,
32 | show_delete=True,
33 | toggle=prefix+'failed',
34 | count=len(failed_runs),
35 | header='Failed',
36 | alt='No failed tests on this page'"
37 | />
38 |
39 | <%include file="run_table.mak" args="runs=runs['active'],
40 | header='Active',
41 | toggle=prefix+'active',
42 | count=len(runs['active']),
43 | alt='No active tests'"
44 | />
45 | % endif
46 |
47 | <%include file="run_table.mak" args="runs=finished_runs,
48 | header='Finished',
49 | count=num_finished_runs,
50 | toggle=prefix+'finished' if page_idx==0 else None,
51 | pages=finished_runs_pages"
52 | />
53 |
--------------------------------------------------------------------------------
/server/fishtest/templates/signup.mak:
--------------------------------------------------------------------------------
1 | <%inherit file="base.mak"/>
2 |
3 |
6 |
7 | <%block name="head">
8 |
9 | %block>
10 |
11 |
12 |
13 | Register
14 |
15 |
Manual Approvals
16 |
To avoid spam, a person will manually approve your account.
17 |
This is usually quick but sometimes takes a few hours .
18 |
19 |
Already have an account? Log in
20 |
21 |
22 |
23 |
120 |
121 |
122 |
123 |
--------------------------------------------------------------------------------
/server/fishtest/templates/tasks.mak:
--------------------------------------------------------------------------------
1 | <%!
2 | from fishtest.util import worker_name, display_residual
3 | %>
4 |
5 | % for idx, task in enumerate(run['tasks'] + run.get('bad_tasks', [])):
6 | <%
7 | if 'bad' in task and idx < len(run['tasks']):
8 | continue
9 | task_id = task.get('task_id', idx)
10 | stats = task.get('stats', {})
11 | if 'stats' in task:
12 | total = stats['wins'] + stats['losses'] + stats['draws']
13 | else:
14 | continue
15 |
16 | if task_id == show_task:
17 | active_style = 'highlight'
18 | elif task['active']:
19 | active_style = 'info'
20 | else:
21 | active_style = ''
22 | %>
23 |
24 |
25 | ${task_id}
26 |
27 | % if 'bad' in task:
28 |
29 | % else:
30 |
31 | % endif
32 | % if approver and task['worker_info']['username'] != "Unknown_worker":
33 |
34 | ${worker_name(task['worker_info'])}
35 |
36 | % elif 'worker_info' in task:
37 | ${worker_name(task["worker_info"])}
38 | % else:
39 | -
40 | % endif
41 |
42 |
43 | <%
44 | gcc_version = ".".join([str(m) for m in task['worker_info']['gcc_version']])
45 | compiler = task['worker_info'].get('compiler', 'g++')
46 | python_version = ".".join([str(m) for m in task['worker_info']['python_version']])
47 | version = task['worker_info']['version']
48 | ARCH = task['worker_info']['ARCH']
49 | %>
50 | os: ${task['worker_info']['uname']};
51 | ram: ${task['worker_info']['max_memory']}MiB;
52 | compiler: ${compiler} ${gcc_version};
53 | python: ${python_version};
54 | worker: ${version};
55 | arch: ${ARCH}
56 |
57 | ${str(task.get('last_updated', '-')).split('.')[0]}
58 | ${f"{total:03d} / {task['num_games']:03d}"}
59 | % if 'pentanomial' not in run['results']:
60 | ${stats.get('wins', '-')}
61 | ${stats.get('losses', '-')}
62 | ${stats.get('draws', '-')}
63 | % else:
64 | <%
65 | p = stats.get('pentanomial', [0] * 5)
66 | %>
67 | [${p[0]}, ${p[1]}, ${p[2]}, ${p[3]}, ${p[4]}]
68 | % endif
69 | ${stats.get('crashes', '-')}
70 | ${stats.get('time_losses', '-')}
71 |
72 | % if 'spsa' not in run['args']:
73 | <%
74 | d=display_residual(task, chi2)
75 | %>
76 | % if d['residual']!=float("inf"):
77 |
78 | ${f"{d['residual']:.3f}"}
79 |
80 | % else:
81 | -
82 | % endif
83 | % endif
84 |
85 | % endfor
86 |
87 | % if len(run['tasks'] + run.get('bad_tasks', [])) == 0:
88 |
89 | No tasks running
90 |
91 | % endif
92 |
--------------------------------------------------------------------------------
/server/fishtest/templates/tests_finished.mak:
--------------------------------------------------------------------------------
1 | <%inherit file="base.mak"/>
2 | <%
3 | title = ""
4 | if "ltc_only" in request.url:
5 | title += " - LTC"
6 | if "success_only" in request.url:
7 | title += " - Greens"
8 | if "yellow_only" in request.url:
9 | title += " - Yellows"
10 | %>
11 |
12 |
15 |
16 |
17 | Finished Tests
18 | % if 'success_only' in request.url:
19 | - Greens
20 | % elif 'yellow_only' in request.url:
21 | - Yellows
22 | % elif 'ltc_only' in request.url:
23 | - LTC
24 | % endif
25 |
26 |
27 | <%include file="run_table.mak" args="runs=finished_runs,
28 | header='Finished',
29 | count=num_finished_runs,
30 | pages=finished_runs_pages,
31 | title=title"
32 | />
33 |
--------------------------------------------------------------------------------
/server/fishtest/templates/tests_live_elo.mak:
--------------------------------------------------------------------------------
1 | <%inherit file="base.mak"/>
2 |
3 |
4 |
5 |
6 |
7 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
Details
24 |
25 |
29 |
30 |
31 |
32 | Commit
33 |
34 |
35 |
36 |
37 |
38 | Info
39 |
40 |
41 |
42 | Submitter
43 |
44 |
45 |
46 |
47 |
48 | TC
49 |
50 |
51 |
52 | SPRT
53 |
54 |
55 |
56 | LLR
57 |
58 |
59 |
60 | Elo
61 |
62 |
63 |
64 | LOS
65 |
66 |
67 |
68 | Games
69 |
70 |
71 |
72 | Pentanomial
73 |
74 |
75 |
76 |
77 |
78 |
79 |
--------------------------------------------------------------------------------
/server/fishtest/templates/tests_user.mak:
--------------------------------------------------------------------------------
1 | <%inherit file="base.mak"/>
2 |
3 | % if is_approver:
4 | ${username} - Tests
5 | % else:
6 | ${username} - Tests
7 | % endif
8 |
9 |
10 |
13 |
14 | <%include file="run_tables.mak"/>
15 |
--------------------------------------------------------------------------------
/server/fishtest/workerdb.py:
--------------------------------------------------------------------------------
1 | from datetime import UTC, datetime
2 |
3 | from fishtest.schemas import worker_schema
4 | from vtjson import validate
5 |
6 |
7 | class WorkerDb:
8 | def __init__(self, db):
9 | self.db = db
10 | self.workers = self.db["workers"]
11 |
12 | def get_worker(
13 | self,
14 | worker_name,
15 | ):
16 | q = {"worker_name": worker_name}
17 | r = self.workers.find_one(
18 | q,
19 | )
20 | if r is None:
21 | return {
22 | "worker_name": worker_name,
23 | "blocked": False,
24 | "message": "",
25 | "last_updated": None,
26 | }
27 | else:
28 | return r
29 |
30 | def update_worker(self, worker_name, blocked=None, message=None):
31 | r = {
32 | "worker_name": worker_name,
33 | "blocked": blocked,
34 | "message": message,
35 | "last_updated": datetime.now(UTC),
36 | }
37 | validate(worker_schema, r, "worker") # may throw exception
38 | self.workers.replace_one({"worker_name": worker_name}, r, upsert=True)
39 |
40 | def get_blocked_workers(self):
41 | q = {"blocked": True}
42 | return list(self.workers.find(q))
43 |
--------------------------------------------------------------------------------
/server/production.ini:
--------------------------------------------------------------------------------
1 | ###
2 | # app configuration
3 | # http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/environment.html
4 | ###
5 |
6 | [app:main]
7 | use = egg:fishtest_server
8 |
9 | pyramid.reload_templates = false
10 | pyramid.debug_authorization = false
11 | pyramid.debug_notfound = false
12 | pyramid.debug_routematch = false
13 | pyramid.default_locale_name = en
14 |
15 | mako.directories = fishtest:templates
16 |
17 | fishtest.port = %(http_port)s
18 | fishtest.primary_port = 6543
19 |
20 | ###
21 | # wsgi server configuration
22 | ###
23 |
24 | [server:main]
25 | use = egg:waitress#main
26 | host = 127.0.0.1
27 | port = %(http_port)s
28 |
29 | trusted_proxy = 127.0.0.1
30 | trusted_proxy_count = 1
31 | trusted_proxy_headers = x-forwarded-for x-forwarded-host x-forwarded-proto x-forwarded-port
32 | clear_untrusted_proxy_headers = yes
33 |
34 | connection_limit = 100
35 | threads = 10
36 |
37 | ###
38 | # logging configuration
39 | # http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/logging.html
40 | ###
41 |
42 | [loggers]
43 | keys = root, fishtest
44 |
45 | [handlers]
46 | keys = console
47 |
48 | [formatters]
49 | keys = generic
50 |
51 | [logger_root]
52 | level = ERROR
53 | handlers = console
54 |
55 | [logger_fishtest]
56 | level = WARN
57 | handlers =
58 | qualname = fishtest
59 |
60 | [handler_console]
61 | class = StreamHandler
62 | args = (sys.stderr,)
63 | level = NOTSET
64 | formatter = generic
65 |
66 | [formatter_generic]
67 | format = %(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] %(message)s
68 |
--------------------------------------------------------------------------------
/server/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "fishtest-server"
3 | version = "0.1.0"
4 | description = "fishtest-server"
5 | readme = "README.md"
6 | requires-python = ">=3.13"
7 | classifiers = [
8 | "Development Status :: Beta",
9 | "Intended Audience :: Developers",
10 | "Intended Audience :: Information Technology",
11 | "Intended Audience :: System Administrators",
12 | "Programming Language :: Python :: 3.13",
13 | "Operating System :: Linux",
14 | "Environment :: Web Environment",
15 | "Framework :: MongoDB",
16 | "Framework :: Mako",
17 | "Framework :: Numpy",
18 | "Framework :: Pyramid",
19 | "Framework :: Scipy",
20 | "Framework :: Waitress",
21 | "Topic :: Computer Chess Development",
22 | "Topic :: Computer Chess Development :: Statistical Testing Framework",
23 | "Topic :: Internet",
24 | "Topic :: Internet :: WWW/HTTP",
25 | "Topic :: Internet :: WWW/HTTP :: HTTP Servers",
26 | "Topic :: Software Development",
27 | "Topic :: Software Development :: Application Frameworks",
28 | ]
29 | dependencies = [
30 | "awscli>=1.38.37",
31 | "email-validator>=2.2.0",
32 | "numpy>=2.2.5",
33 | "pymongo>=4.12.0",
34 | "pyramid>=2.0.2",
35 | "pyramid-debugtoolbar>=4.12.1",
36 | "pyramid-mako>=1.1.0",
37 | "requests>=2.32.3",
38 | "scipy>=1.15.2",
39 | "vtjson>=2.2.4",
40 | "waitress>=3.0.2",
41 | "zxcvbn>=4.5.0",
42 | ]
43 |
44 | [project.urls]
45 | Homepage = "https://tests.stockfishchess.org"
46 | Documentation = "https://github.com/official-stockfish/fishtest/wiki"
47 | Repository = "https://github.com/official-stockfish/fishtest"
48 | Issues = "https://github.com/official-stockfish/fishtest"
49 |
50 | [project.entry-points."paste.app_factory"]
51 | main = "fishtest:main"
52 |
53 | [build-system]
54 | requires = ["setuptools>=61"]
55 | build-backend = "setuptools.build_meta"
56 |
57 | [tool.setuptools.data-files]
58 | "." = [
59 | "*.cfg",
60 | "*.ini",
61 | "*.rst",
62 | "*.txt",
63 | ]
64 |
65 | [tool.setuptools.package-data]
66 | fishtest = [
67 | "*.css",
68 | "*.gif",
69 | "*.html",
70 | "*.ico",
71 | "*.js",
72 | "*.jpg",
73 | "*.mak",
74 | "*.mako",
75 | "*.mem",
76 | "*.png",
77 | "*.webp",
78 | "*.pt",
79 | "*.txt",
80 | "*.xml",
81 | ]
82 |
--------------------------------------------------------------------------------
/server/tests/test_nn.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from datetime import UTC, datetime
3 |
4 | from util import get_rundb
5 | from vtjson import ValidationError
6 |
7 |
8 | def show(mc):
9 | exception = mc.exception
10 | print(f"{exception.__class__.__name__}: {str(mc.exception)}")
11 |
12 |
13 | class TestNN(unittest.TestCase):
14 | def setUp(self):
15 | self.rundb = get_rundb()
16 | self.name = "nn-0000000000a0.nnue"
17 | self.user = "user00"
18 | self.first_test = datetime(2024, 1, 1, tzinfo=UTC)
19 | self.last_test = datetime(2024, 3, 24, tzinfo=UTC)
20 | self.last_test_old = datetime(2023, 3, 24, tzinfo=UTC)
21 | self.run_id = "64e74776a170cb1f26fa3930"
22 |
23 | def tearDown(self):
24 | self.rundb.nndb.delete_many({})
25 |
26 | def test_nn(self):
27 | self.rundb.upload_nn(self.user, self.name)
28 | net = self.rundb.get_nn(self.name)
29 | del net["_id"]
30 | self.assertEqual(net, {"user": self.user, "name": self.name, "downloads": 0})
31 | self.rundb.increment_nn_downloads(self.name)
32 | net = self.rundb.get_nn(self.name)
33 | del net["_id"]
34 | self.assertEqual(net, {"user": self.user, "name": self.name, "downloads": 1})
35 | with self.assertRaises(ValidationError) as mc:
36 | new_net = {
37 | "user": self.user,
38 | "name": self.name,
39 | "downloads": 0,
40 | "first_test": {"date": self.first_test, "id": self.run_id},
41 | "is_master": True,
42 | }
43 | self.rundb.update_nn(new_net)
44 | show(mc)
45 | with self.assertRaises(ValidationError) as mc:
46 | new_net = {
47 | "user": self.user,
48 | "name": self.name,
49 | "downloads": 0,
50 | "is_master": True,
51 | }
52 | self.rundb.update_nn(new_net)
53 | show(mc)
54 | with self.assertRaises(ValidationError) as mc:
55 | new_net = {
56 | "user": self.user,
57 | "name": self.name,
58 | "downloads": 0,
59 | "first_test": {"date": self.first_test, "id": self.run_id},
60 | "is_master": True,
61 | "last_test": {"date": self.last_test_old, "id": self.run_id},
62 | }
63 | self.rundb.update_nn(new_net)
64 | show(mc)
65 | new_net = {
66 | "user": self.user,
67 | "name": self.name,
68 | "downloads": 0,
69 | "first_test": {"date": self.first_test, "id": self.run_id},
70 | "is_master": True,
71 | "last_test": {"date": self.last_test, "id": self.run_id},
72 | }
73 | self.rundb.update_nn(new_net)
74 | net = self.rundb.get_nn(self.name)
75 | del net["_id"]
76 | new_net["downloads"] = 1
77 | self.assertEqual(net, new_net)
78 |
--------------------------------------------------------------------------------
/server/tests/test_run.py:
--------------------------------------------------------------------------------
1 | import re
2 | import unittest
3 |
4 | from fishtest.views import get_master_info
5 |
6 |
7 | class CreateRunTest(unittest.TestCase):
8 | def test_10_get_bench(self):
9 | master_commits_url = (
10 | "https://api.github.com/repos/official-stockfish/Stockfish/commits"
11 | )
12 | self.assertTrue(
13 | re.match(
14 | r"[1-9]\d{5,7}|None", str(get_master_info(master_commits_url)["bench"])
15 | )
16 | )
17 |
18 |
19 | if __name__ == "__main__":
20 | unittest.main()
21 |
--------------------------------------------------------------------------------
/server/tests/test_users.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from datetime import UTC, datetime
3 |
4 | import util
5 | from fishtest.views import login, signup
6 | from pyramid import testing
7 |
8 |
9 | class Create10UsersTest(unittest.TestCase):
10 | def setUp(self):
11 | self.rundb = util.get_rundb()
12 | self.config = testing.setUp()
13 | self.config.add_route("login", "/login")
14 | self.config.add_route("signup", "/signup")
15 |
16 | def tearDown(self):
17 | self.rundb.userdb.users.delete_many({"username": "JoeUser"})
18 | self.rundb.userdb.user_cache.delete_many({"username": "JoeUser"})
19 | testing.tearDown()
20 |
21 | def test_create_user(self):
22 | request = testing.DummyRequest(
23 | userdb=self.rundb.userdb,
24 | method="POST",
25 | remote_addr="127.0.0.1",
26 | params={
27 | "username": "JoeUser",
28 | "password": "secret",
29 | "password2": "secret",
30 | "email": "joe@user.net",
31 | "tests_repo": "https://github.com/official-stockfish/Stockfish",
32 | },
33 | )
34 | response = signup(request)
35 | self.assertTrue("The resource was found at", response)
36 |
37 |
38 | class Create50LoginTest(unittest.TestCase):
39 | def setUp(self):
40 | self.rundb = util.get_rundb()
41 | self.rundb.userdb.create_user(
42 | "JoeUser",
43 | "secret",
44 | "email@email.email",
45 | "https://github.com/official-stockfish/Stockfish",
46 | )
47 | self.config = testing.setUp()
48 | self.config.add_route("login", "/login")
49 |
50 | def tearDown(self):
51 | self.rundb.userdb.users.delete_many({"username": "JoeUser"})
52 | self.rundb.userdb.user_cache.delete_many({"username": "JoeUser"})
53 | testing.tearDown()
54 |
55 | def test_login(self):
56 | request = testing.DummyRequest(
57 | userdb=self.rundb.userdb,
58 | method="POST",
59 | params={"username": "JoeUser", "password": "badsecret"},
60 | )
61 | response = login(request)
62 | self.assertTrue(
63 | "Invalid password for user: JoeUser" in request.session.pop_flash("error")
64 | )
65 |
66 | # Correct password, but still pending from logging in
67 | request.params["password"] = "secret"
68 | login(request)
69 | self.assertTrue(
70 | "Account pending for user: JoeUser" in request.session.pop_flash("error")[0]
71 | )
72 |
73 | # Unblock, then user can log in successfully
74 | user = self.rundb.userdb.get_user("JoeUser")
75 | user["pending"] = False
76 | self.rundb.userdb.save_user(user)
77 | response = login(request)
78 | self.assertEqual(response.code, 302)
79 | self.assertTrue("The resource was found at" in str(response))
80 |
81 |
82 | class Create90APITest(unittest.TestCase):
83 | def setUp(self):
84 | self.rundb = util.get_rundb()
85 | self.run_id = self.rundb.new_run(
86 | "master",
87 | "master",
88 | 100000,
89 | "100+0.01",
90 | "100+0.01",
91 | "book",
92 | 10,
93 | 1,
94 | "",
95 | "",
96 | username="travis",
97 | tests_repo="travis",
98 | start_time=datetime.now(UTC),
99 | )
100 | self.rundb.userdb.user_cache.insert_one(
101 | {"username": "JoeUser", "cpu_hours": 12345}
102 | )
103 | self.config = testing.setUp()
104 | self.config.add_route("api_stop_run", "/api/stop_run")
105 |
106 | def tearDown(self):
107 | self.rundb.userdb.users.delete_many({"username": "JoeUser"})
108 | self.rundb.userdb.user_cache.delete_many({"username": "JoeUser"})
109 | testing.tearDown()
110 |
111 |
112 | if __name__ == "__main__":
113 | unittest.main()
114 |
--------------------------------------------------------------------------------
/server/tests/util.py:
--------------------------------------------------------------------------------
1 | import atexit
2 |
3 | from fishtest.rundb import RunDb
4 |
5 |
6 | def get_rundb():
7 | rundb = RunDb(db_name="fishtest_tests")
8 | atexit.register(rundb.conn.close)
9 | return rundb
10 |
11 |
12 | def find_run(arg="username", value="travis"):
13 | rundb = RunDb(db_name="fishtest_tests")
14 | atexit.register(rundb.conn.close)
15 | for run in rundb.get_unfinished_runs():
16 | if run["args"][arg] == value:
17 | return run
18 | return None
19 |
--------------------------------------------------------------------------------
/server/utils/backup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # Backup MongoDB to AWS S3
3 | # https://docs.aws.amazon.com/cli/latest/topic/s3-config.html
4 | # List available backups with:
5 | # ${VENV}/bin/aws s3 ls s3://fishtest/backup/archive/
6 | # Download a backup with:
7 | # ${VENV}/bin/aws s3 cp s3://fishtest/backup/archive/
/dump.tar dump.tar
8 |
9 | # Load the variables with the AWS keys, cron uses a limited environment
10 | . ${HOME}/.profile
11 |
12 | cd ${HOME}/backup
13 | for db_name in "fishtest_new" "admin" "config" "local"; do
14 | mongodump --db=${db_name} --numParallelCollections=1 --excludeCollection="pgns" --gzip
15 | done
16 | tar -cvf dump.tar dump
17 | rm -rf dump
18 |
19 | date_utc=$(date +%Y%m%d --utc)
20 | ${VENV}/bin/aws configure set default.s3.max_concurrent_requests 1
21 | ${VENV}/bin/aws s3 cp dump.tar s3://fishtest/backup/archive/${date_utc}/dump.tar
22 |
--------------------------------------------------------------------------------
/server/utils/clone_fish.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import bz2
4 |
5 | import requests
6 | from pymongo import ASCENDING, MongoClient
7 |
8 | # fish_host = 'http://localhost:6543'
9 | fish_host = "http://94.198.98.239" # 'http://tests.stockfishchess.org'
10 |
11 | conn = MongoClient("localhost")
12 |
13 | # conn.drop_database('fish_clone')
14 |
15 | db = conn["fish_clone"]
16 |
17 | pgndb = db["pgns"]
18 | runs = db["runs"]
19 |
20 | pgndb.ensure_index([("run_id", ASCENDING)])
21 |
22 |
23 | def main():
24 | """clone a fishtest database with PGNs and runs with the REST API"""
25 |
26 | skip = 0
27 | count = 0
28 | in_sync = False
29 | loaded = {}
30 | while True:
31 | pgn_list = requests.get(fish_host + "/api/pgn_100/" + str(skip)).json()
32 | for pgn_file in pgn_list:
33 | print(pgn_file)
34 | if pgndb.find_one({"run_id": pgn_file}):
35 | print("Already copied: {}".format(pgn_file))
36 | if pgn_file not in loaded:
37 | in_sync = True
38 | break
39 | else:
40 | run_id = pgn_file.split("-")[0]
41 | if not runs.find_one({"_id": run_id}):
42 | print("New run: " + run_id)
43 | run = requests.get(fish_host + "/api/get_run/" + run_id).json()
44 | runs.insert(run)
45 | pgn = requests.get(fish_host + "/api/pgn/" + pgn_file)
46 | pgndb.insert(dict(pgn_bz2=bz2.compress(pgn.content), run_id=pgn_file))
47 | loaded[pgn_file] = True
48 | count += 1
49 | skip += len(pgn_list)
50 | if in_sync or len(pgn_list) < 100:
51 | break
52 |
53 | print("Copied: {:6d} PGN files (~ {:8d} games)".format(count, 250 * count))
54 | count = pgndb.count()
55 | print("Database:{:6d} PGN files (~ {:8d} games)".format(count, 250 * count))
56 | count = runs.count()
57 | print("Database:{:6d} runs".formt(count))
58 |
59 |
60 | if __name__ == "__main__":
61 | main()
62 |
--------------------------------------------------------------------------------
/server/utils/compact_actions.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from fishtest.actiondb import ActionDb
4 | from pymongo import MongoClient
5 |
6 | conn = MongoClient()
7 | db = conn["fishtest_new"]
8 | actiondb = ActionDb(db)
9 |
10 |
11 | def compact_actions():
12 | for a in actiondb.actions.find():
13 | update = False
14 | if "tasks" in a["data"]:
15 | del a["data"]["tasks"]
16 | print(a["data"]["_id"])
17 | update = True
18 | if "before" in a["data"]:
19 | del a["data"]["before"]["tasks"]
20 | print("before")
21 | update = True
22 | if "after" in a["data"]:
23 | del a["data"]["after"]["tasks"]
24 | print("after")
25 | update = True
26 |
27 | if update:
28 | actiondb.actions.replace_one({"_id": a["_id"]}, a)
29 |
30 |
31 | def main():
32 | compact_actions()
33 |
34 |
35 | if __name__ == "__main__":
36 | main()
37 |
--------------------------------------------------------------------------------
/server/utils/convert_actions.py:
--------------------------------------------------------------------------------
1 | from datetime import UTC, datetime
2 |
3 | import pymongo
4 | from bson.objectid import ObjectId
5 |
6 | if __name__ == "__main__":
7 | client = pymongo.MongoClient()
8 | actions_collection = client["fishtest_new"]["actions"]
9 | runs_collection = client["fishtest_new"]["runs"]
10 | actions = actions_collection.find({}).sort("_id", 1)
11 | count = 0
12 | print("Starting conversion...")
13 | t0 = datetime.now(UTC)
14 | for action in actions:
15 | count += 1
16 | action_id = action["_id"]
17 | if "time" in action and isinstance(action["time"], datetime):
18 | action["time"] = action["time"].replace(tzinfo=UTC).timestamp()
19 | if "run_id" in action and isinstance(action["run_id"], ObjectId):
20 | action["run_id"] = str(action["run_id"])
21 | actions_collection.replace_one({"_id": action_id}, action)
22 | print("Actions converted: {}.".format(count), end="\r")
23 | t1 = datetime.now(UTC)
24 | duration = (t1 - t0).total_seconds()
25 | time_per_run = duration / count
26 | print("")
27 | print(
28 | "Conversion finished in {:.2f} seconds. Time per run: {:.2f}ms.".format(
29 | duration, 1000 * time_per_run
30 | )
31 | )
32 | actions.close()
33 | client.close()
34 |
--------------------------------------------------------------------------------
/server/utils/create_pgndb.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from pymongo import MongoClient
4 |
5 | conn = MongoClient("localhost")
6 |
7 | db = conn["fishtest_new"]
8 |
9 | db.drop_collection("pgns")
10 |
11 | db.create_collection("pgns", capped=True, size=50000)
12 |
--------------------------------------------------------------------------------
/server/utils/current.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # current.py - list slow database queres currently running
4 | #
5 | # Run this script manually to list slow db queries. It also lists current indexes
6 | # on the runs collection and runs the 'uptime' command to indicate how busy the
7 | # system currently is.
8 | #
9 |
10 | import pprint
11 | import subprocess
12 | import sys
13 |
14 | from pymongo import MongoClient
15 |
16 | db_name = "fishtest_new"
17 |
18 | # MongoDB server is assumed to be on the same machine, if not user should use
19 | # ssh with port forwarding to access the remote host.
20 | conn = MongoClient("localhost")
21 | db = conn[db_name]
22 | runs = db["runs"]
23 |
24 | # display current list of indexes
25 | print("Current Indexes:")
26 | pprint.pprint(runs.index_information(), stream=None, indent=1, width=80, depth=None)
27 |
28 | # display current uptime command
29 | print("\nRun 'uptime':\n")
30 | print(subprocess.check_call(["uptime"]))
31 |
32 | # display current operations
33 | print("\nCurrent operations:")
34 | t = 0.3
35 | if len(sys.argv) > 1:
36 | t = float(sys.argv[1])
37 | pprint.pprint(
38 | db.current_op({"secs_running": {"$gte": t}, "query": {"$ne": {}}}),
39 | stream=None,
40 | indent=1,
41 | width=80,
42 | depth=None,
43 | )
44 |
--------------------------------------------------------------------------------
/server/utils/purge_pgns.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import re
3 | from datetime import UTC, datetime, timedelta
4 |
5 | from fishtest.rundb import RunDb
6 | from pymongo import DESCENDING
7 |
8 |
9 | def purge_pgns(rundb, finished, deleted, days, days_ltc=60):
10 | kept_runs = kept_tasks = kept_pgns = 0
11 | purged_runs = purged_tasks = purged_pgns = 0
12 | now = datetime.now(UTC)
13 | cutoff_date_ltc = now - timedelta(days=days_ltc)
14 | cutoff_date = now - timedelta(days=days)
15 | tc_regex = re.compile("^([2-9][0-9])|([1-9][0-9][0-9])")
16 | runs_query = {
17 | "finished": finished,
18 | "deleted": deleted,
19 | "last_updated": {"$gte": now - timedelta(days=60)},
20 | }
21 | for run in rundb.db.runs.find(runs_query, sort=[("last_updated", DESCENDING)]):
22 | keep = (
23 | not deleted
24 | and finished
25 | and tc_regex.match(run["args"]["tc"])
26 | and run["last_updated"] > cutoff_date_ltc
27 | ) or run["last_updated"] > cutoff_date
28 |
29 | if keep:
30 | kept_runs += 1
31 | else:
32 | purged_runs += 1
33 |
34 | tasks_count = len(run["tasks"])
35 | pgns_query = {"run_id": {"$regex": f"^{run['_id']}-\\d+"}}
36 | pgns_count = rundb.pgndb.count_documents(pgns_query)
37 | if keep:
38 | kept_tasks += tasks_count
39 | kept_pgns += pgns_count
40 | else:
41 | rundb.pgndb.delete_many(pgns_query)
42 | purged_tasks += tasks_count
43 | purged_pgns += pgns_count
44 |
45 | return (
46 | kept_runs,
47 | kept_tasks,
48 | kept_pgns,
49 | purged_runs,
50 | purged_tasks,
51 | purged_pgns,
52 | )
53 |
54 |
55 | def report(
56 | runs_type,
57 | kept_runs,
58 | kept_tasks,
59 | kept_pgns,
60 | purged_runs,
61 | purged_tasks,
62 | purged_pgns,
63 | ):
64 | template = "{:5d} runs, {:7d} tasks, {:7d} pgns"
65 | print(runs_type)
66 | print("kept :", template.format(kept_runs, kept_tasks, kept_pgns))
67 | print("purged:", template.format(purged_runs, purged_tasks, purged_pgns))
68 |
69 |
70 | def main():
71 | # Process the runs in descending order of last_updated for the
72 | # last 60 days and purge the pgns collection for:
73 | # - runs that are finished and not deleted, and older than 1 days for STC
74 | # - runs that are finished and not deleted, and older than 10 days for LTC
75 | # - runs that are finished and deleted, and older than 10 days
76 | # - runs that are not finished and not deleted, and older than 50 days
77 |
78 | rundb = RunDb()
79 | out = purge_pgns(rundb=rundb, finished=True, deleted=False, days=1, days_ltc=10)
80 | report("Finished runs:", *out)
81 | out = purge_pgns(rundb=rundb, finished=True, deleted=True, days=10)
82 | report("Deleted runs:", *out)
83 | out = purge_pgns(rundb=rundb, finished=False, deleted=False, days=50)
84 | report("Unfinished runs:", *out)
85 | msg = rundb.db.command({"compact": "pgns"})
86 | print(msg)
87 |
88 |
89 | if __name__ == "__main__":
90 | main()
91 |
--------------------------------------------------------------------------------
/server/utils/test_queries.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # test_queries.py - run some sample queries to check db speed
4 | #
5 |
6 | import pprint
7 | import time
8 |
9 | from fishtest.rundb import RunDb
10 | from pymongo import DESCENDING, MongoClient
11 |
12 | db_name = "fishtest_new"
13 | rundb = RunDb()
14 |
15 | # MongoDB server is assumed to be on the same machine, if not user should use
16 | # ssh with port forwarding to access the remote host.
17 | conn = MongoClient("localhost")
18 | db = conn[db_name]
19 | runs = db["runs"]
20 | pgns = db["pgns"]
21 |
22 |
23 | def qlen(c):
24 | if c:
25 | return len(list(c))
26 | else:
27 | return 0
28 |
29 |
30 | # Extra conditions that might be applied to finished_runs:
31 | # q['args.username'] = username
32 | # q['args.tc'] = {'$regex':'^([4-9][0-9])|([1-9][0-9][0-9])'}
33 | # q['results_info.style'] = '#44EB44'
34 |
35 |
36 | # Time some important queries using call to rundb function
37 |
38 | print("\nFetching unfinished runs ...")
39 | start = time.time()
40 | c = rundb.get_unfinished_runs()
41 | end = time.time()
42 |
43 | print("{} rows {:1.4f}".format(qlen(c), end - start) + "s\nFetching machines ...")
44 | start = time.time()
45 | c = rundb.get_machines()
46 | end = time.time()
47 |
48 | print("{} rows {:1.4f}".format(qlen(c), end - start) + "s\nFetching finished runs ...")
49 | start = time.time()
50 | c, n = rundb.get_finished_runs(
51 | skip=0, limit=50, username="", success_only=False, ltc_only=False
52 | )
53 | end = time.time()
54 |
55 | print(
56 | "{} rows {:1.4f}".format(qlen(c), end - start)
57 | + "s\nFetching finished runs (vdv) ..."
58 | )
59 | start = time.time()
60 | c, n = rundb.get_finished_runs(
61 | skip=0, limit=50, username="vdv", success_only=False, ltc_only=False
62 | )
63 | end = time.time()
64 |
65 | print("{} rows {:1.4f}".format(qlen(c), end - start) + "s\nRequesting pgn ...")
66 | if n == 0:
67 | c.append({"_id": "abc"})
68 | start = time.time()
69 | c = rundb.get_pgn(str(c[0]["_id"]) + ".pgn")
70 | end = time.time()
71 |
72 |
73 | # Tests: Explain some queries - should show which indexes are being used
74 |
75 | print("{} rows {:1.4f}".format(qlen(c), end - start) + "s")
76 | print("\n\nExplain queries")
77 | print("\nFetching unfinished runs xp ...")
78 | start = time.time()
79 | c = runs.find(
80 | {"finished": False}, sort=[("last_updated", DESCENDING), ("start_time", DESCENDING)]
81 | ).explain()
82 | print(pprint.pformat(c, indent=3, width=110))
83 | end = time.time()
84 |
85 | print("{} rows {:1.4f}".format(qlen(c), end - start) + "s")
86 | print("\nFetching machines xp ...")
87 | start = time.time()
88 | c = runs.find({"finished": False, "tasks": {"$elemMatch": {"active": True}}}).explain()
89 | print(pprint.pformat(c, indent=3, width=110))
90 | end = time.time()
91 |
92 | print("{} rows {:1.4f}".format(qlen(c), end - start) + "s")
93 | print("\nFetching finished runs xp ...")
94 | start = time.time()
95 | q = {"finished": True}
96 | c = runs.find(q, skip=0, limit=50, sort=[("last_updated", DESCENDING)]).explain()
97 | print(pprint.pformat(c, indent=3, width=110))
98 | end = time.time()
99 |
100 | print("{} rows {:1.4f}".format(qlen(c), end - start) + "s")
101 |
--------------------------------------------------------------------------------
/server/utils/upgrade.py:
--------------------------------------------------------------------------------
1 | import pprint
2 | import uuid
3 | from datetime import UTC, datetime
4 |
5 | import pymongo
6 | from fishtest.util import worker_name
7 |
8 |
9 | def show(p):
10 | pprint.pprint(p)
11 |
12 |
13 | # for documentation
14 | run_default = {
15 | "_id": "?",
16 | "args": {
17 | "base_tag": "?",
18 | "new_tag": "?",
19 | "base_net": "?",
20 | "new_net": "?",
21 | "num_games": 400000,
22 | "tc": "?",
23 | "new_tc": "?",
24 | "book": "?",
25 | "book_depth": "8",
26 | "threads": 1,
27 | "resolved_base": "?",
28 | "resolved_new": "?",
29 | "msg_base": "?",
30 | "msg_new": "?",
31 | "base_options": "?",
32 | "new_options": "?",
33 | "base_signature": "?",
34 | "new_signature": "?",
35 | "username": "Unknown user",
36 | "tests_repo": "?",
37 | "auto_purge": False,
38 | "throughput": 100,
39 | "itp": 100.0,
40 | "priority": 0,
41 | "adjudication": True,
42 | },
43 | "start_time": datetime.min,
44 | "last_updated": datetime.min,
45 | "tc_base": -1.0,
46 | "base_same_as_master": True,
47 | "results_stale": False,
48 | "finished": True,
49 | "approved": True,
50 | "approver": "?",
51 | "workers": 0,
52 | "cores": 0,
53 | "results": {
54 | "wins": 0,
55 | "losses": 0,
56 | "draws": 0,
57 | "crashes": 0,
58 | "time_losses": 0,
59 | },
60 | }
61 |
62 | worker_info_default = {
63 | "uname": "?",
64 | "architecture": ["?", "?"],
65 | "concurrency": 0,
66 | "max_memory": 0,
67 | "min_threads": 1,
68 | "username": "Unknown_worker",
69 | "version": 0,
70 | "python_version": [],
71 | "gcc_version": [],
72 | "unique_key": "xxxxxxxx",
73 | "rate": {"limit": 5000, "remaining": 5000},
74 | "ARCH": "?",
75 | "nps": 0.0,
76 | "remote_addr": "?.?.?.?",
77 | "country_code": "?",
78 | }
79 |
80 | worker_dict = {}
81 |
82 |
83 | def convert_task_list(tasks):
84 | for task in tasks:
85 | if task["worker_info"]["unique_key"] == "xxxxxxxx":
86 | name = worker_name(task["worker_info"])
87 | if name not in worker_dict:
88 | worker_dict[name] = str(uuid.uuid4())
89 | task["worker_info"]["unique_key"] = worker_dict[name]
90 |
91 |
92 | def convert_run(run):
93 | for k, v in run["args"].items():
94 | if v == "?":
95 | run["args"][k] = ""
96 |
97 | for flag in ["finished", "failed", "deleted", "is_green", "is_yellow"]:
98 | if flag not in run:
99 | run[flag] = False
100 |
101 |
102 | if __name__ == "__main__":
103 | client = pymongo.MongoClient()
104 | runs_collection = client["fishtest_new"]["runs"]
105 | runs = runs_collection.find({}).sort("_id", 1)
106 | count = 0
107 | print("Starting conversion...")
108 | t0 = datetime.now(UTC)
109 | for r in runs:
110 | count += 1
111 | r_id = r["_id"]
112 | convert_run(r)
113 | convert_task_list(r["tasks"])
114 | runs_collection.replace_one({"_id": r_id}, r)
115 | print("Runs converted: {}.".format(count), end="\r")
116 | t1 = datetime.now(UTC)
117 | duration = (t1 - t0).total_seconds()
118 | time_per_run = duration / count
119 | print("")
120 | print(
121 | "Conversion finished in {:.2f} seconds. Time per run: {:.2f}ms.".format(
122 | duration, 1000 * time_per_run
123 | )
124 | )
125 | runs.close()
126 | client.close()
127 |
--------------------------------------------------------------------------------
/worker/packages/__init__.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import sys
3 | import types
4 | from pathlib import Path
5 |
6 |
7 | class LazyModule(types.ModuleType):
8 | def __init__(self, name):
9 | super().__init__(name)
10 | self._module_path = f".{name}"
11 | self._module = None
12 |
13 | def _load(self):
14 | if self._module is None:
15 | pkg_path = str(Path(__file__).resolve().parent)
16 | if pkg_path not in sys.path:
17 | sys.path.append(pkg_path)
18 | self._module = importlib.import_module(self._module_path, __name__)
19 | return self._module
20 |
21 | def __getattr__(self, item):
22 | module = self._load()
23 | return getattr(module, item)
24 |
25 |
26 | requests = LazyModule("requests")
27 |
28 | __all__ = ["requests"]
29 |
--------------------------------------------------------------------------------
/worker/packages/certifi/__init__.py:
--------------------------------------------------------------------------------
1 | from .core import contents, where
2 |
3 | __version__ = "2021.05.30"
4 |
--------------------------------------------------------------------------------
/worker/packages/certifi/__main__.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | from certifi import contents, where
4 |
5 | parser = argparse.ArgumentParser()
6 | parser.add_argument("-c", "--contents", action="store_true")
7 | args = parser.parse_args()
8 |
9 | if args.contents:
10 | print(contents())
11 | else:
12 | print(where())
13 |
--------------------------------------------------------------------------------
/worker/packages/certifi/core.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | certifi.py
5 | ~~~~~~~~~~
6 |
7 | This module returns the installation location of cacert.pem or its contents.
8 | """
9 | import os
10 |
11 | try:
12 | from importlib.resources import path as get_path, read_text
13 |
14 | _CACERT_CTX = None
15 | _CACERT_PATH = None
16 |
17 | def where():
18 | # This is slightly terrible, but we want to delay extracting the file
19 | # in cases where we're inside of a zipimport situation until someone
20 | # actually calls where(), but we don't want to re-extract the file
21 | # on every call of where(), so we'll do it once then store it in a
22 | # global variable.
23 | global _CACERT_CTX
24 | global _CACERT_PATH
25 | if _CACERT_PATH is None:
26 | # This is slightly janky, the importlib.resources API wants you to
27 | # manage the cleanup of this file, so it doesn't actually return a
28 | # path, it returns a context manager that will give you the path
29 | # when you enter it and will do any cleanup when you leave it. In
30 | # the common case of not needing a temporary file, it will just
31 | # return the file system location and the __exit__() is a no-op.
32 | #
33 | # We also have to hold onto the actual context manager, because
34 | # it will do the cleanup whenever it gets garbage collected, so
35 | # we will also store that at the global level as well.
36 | _CACERT_CTX = get_path("certifi", "cacert.pem")
37 | _CACERT_PATH = str(_CACERT_CTX.__enter__())
38 |
39 | return _CACERT_PATH
40 |
41 |
42 | except ImportError:
43 | # This fallback will work for Python versions prior to 3.7 that lack the
44 | # importlib.resources module but relies on the existing `where` function
45 | # so won't address issues with environments like PyOxidizer that don't set
46 | # __file__ on modules.
47 | def read_text(_module, _path, encoding="ascii"):
48 | with open(where(), "r", encoding=encoding) as data:
49 | return data.read()
50 |
51 | # If we don't have importlib.resources, then we will just do the old logic
52 | # of assuming we're on the filesystem and munge the path directly.
53 | def where():
54 | f = os.path.dirname(__file__)
55 |
56 | return os.path.join(f, "cacert.pem")
57 |
58 |
59 | def contents():
60 | return read_text("certifi", "cacert.pem", encoding="ascii")
61 |
--------------------------------------------------------------------------------
/worker/packages/chardet/__init__.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # This library is free software; you can redistribute it and/or
3 | # modify it under the terms of the GNU Lesser General Public
4 | # License as published by the Free Software Foundation; either
5 | # version 2.1 of the License, or (at your option) any later version.
6 | #
7 | # This library is distributed in the hope that it will be useful,
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 | # Lesser General Public License for more details.
11 | #
12 | # You should have received a copy of the GNU Lesser General Public
13 | # License along with this library; if not, write to the Free Software
14 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
15 | # 02110-1301 USA
16 | ######################### END LICENSE BLOCK #########################
17 |
18 |
19 | from .universaldetector import UniversalDetector
20 | from .enums import InputState
21 | from .version import __version__, VERSION
22 |
23 |
24 | __all__ = ['UniversalDetector', 'detect', 'detect_all', '__version__', 'VERSION']
25 |
26 |
27 | def detect(byte_str):
28 | """
29 | Detect the encoding of the given byte string.
30 |
31 | :param byte_str: The byte sequence to examine.
32 | :type byte_str: ``bytes`` or ``bytearray``
33 | """
34 | if not isinstance(byte_str, bytearray):
35 | if not isinstance(byte_str, bytes):
36 | raise TypeError('Expected object of type bytes or bytearray, got: '
37 | '{}'.format(type(byte_str)))
38 | else:
39 | byte_str = bytearray(byte_str)
40 | detector = UniversalDetector()
41 | detector.feed(byte_str)
42 | return detector.close()
43 |
44 |
45 | def detect_all(byte_str):
46 | """
47 | Detect all the possible encodings of the given byte string.
48 |
49 | :param byte_str: The byte sequence to examine.
50 | :type byte_str: ``bytes`` or ``bytearray``
51 | """
52 | if not isinstance(byte_str, bytearray):
53 | if not isinstance(byte_str, bytes):
54 | raise TypeError('Expected object of type bytes or bytearray, got: '
55 | '{}'.format(type(byte_str)))
56 | else:
57 | byte_str = bytearray(byte_str)
58 |
59 | detector = UniversalDetector()
60 | detector.feed(byte_str)
61 | detector.close()
62 |
63 | if detector._input_state == InputState.HIGH_BYTE:
64 | results = []
65 | for prober in detector._charset_probers:
66 | if prober.get_confidence() > detector.MINIMUM_THRESHOLD:
67 | charset_name = prober.charset_name
68 | lower_charset_name = prober.charset_name.lower()
69 | # Use Windows encoding name instead of ISO-8859 if we saw any
70 | # extra Windows-specific bytes
71 | if lower_charset_name.startswith('iso-8859'):
72 | if detector._has_win_bytes:
73 | charset_name = detector.ISO_WIN_MAP.get(lower_charset_name,
74 | charset_name)
75 | results.append({
76 | 'encoding': charset_name,
77 | 'confidence': prober.get_confidence(),
78 | 'language': prober.language,
79 | })
80 | if len(results) > 0:
81 | return sorted(results, key=lambda result: -result['confidence'])
82 |
83 | return [detector.result]
84 |
--------------------------------------------------------------------------------
/worker/packages/chardet/big5prober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is Mozilla Communicator client code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 1998
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | #
12 | # This library is free software; you can redistribute it and/or
13 | # modify it under the terms of the GNU Lesser General Public
14 | # License as published by the Free Software Foundation; either
15 | # version 2.1 of the License, or (at your option) any later version.
16 | #
17 | # This library is distributed in the hope that it will be useful,
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 | # Lesser General Public License for more details.
21 | #
22 | # You should have received a copy of the GNU Lesser General Public
23 | # License along with this library; if not, write to the Free Software
24 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 | # 02110-1301 USA
26 | ######################### END LICENSE BLOCK #########################
27 |
28 | from .mbcharsetprober import MultiByteCharSetProber
29 | from .codingstatemachine import CodingStateMachine
30 | from .chardistribution import Big5DistributionAnalysis
31 | from .mbcssm import BIG5_SM_MODEL
32 |
33 |
34 | class Big5Prober(MultiByteCharSetProber):
35 | def __init__(self):
36 | super(Big5Prober, self).__init__()
37 | self.coding_sm = CodingStateMachine(BIG5_SM_MODEL)
38 | self.distribution_analyzer = Big5DistributionAnalysis()
39 | self.reset()
40 |
41 | @property
42 | def charset_name(self):
43 | return "Big5"
44 |
45 | @property
46 | def language(self):
47 | return "Chinese"
48 |
--------------------------------------------------------------------------------
/worker/packages/chardet/charsetgroupprober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is Mozilla Communicator client code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 1998
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | #
12 | # This library is free software; you can redistribute it and/or
13 | # modify it under the terms of the GNU Lesser General Public
14 | # License as published by the Free Software Foundation; either
15 | # version 2.1 of the License, or (at your option) any later version.
16 | #
17 | # This library is distributed in the hope that it will be useful,
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 | # Lesser General Public License for more details.
21 | #
22 | # You should have received a copy of the GNU Lesser General Public
23 | # License along with this library; if not, write to the Free Software
24 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 | # 02110-1301 USA
26 | ######################### END LICENSE BLOCK #########################
27 |
28 | from .enums import ProbingState
29 | from .charsetprober import CharSetProber
30 |
31 |
32 | class CharSetGroupProber(CharSetProber):
33 | def __init__(self, lang_filter=None):
34 | super(CharSetGroupProber, self).__init__(lang_filter=lang_filter)
35 | self._active_num = 0
36 | self.probers = []
37 | self._best_guess_prober = None
38 |
39 | def reset(self):
40 | super(CharSetGroupProber, self).reset()
41 | self._active_num = 0
42 | for prober in self.probers:
43 | if prober:
44 | prober.reset()
45 | prober.active = True
46 | self._active_num += 1
47 | self._best_guess_prober = None
48 |
49 | @property
50 | def charset_name(self):
51 | if not self._best_guess_prober:
52 | self.get_confidence()
53 | if not self._best_guess_prober:
54 | return None
55 | return self._best_guess_prober.charset_name
56 |
57 | @property
58 | def language(self):
59 | if not self._best_guess_prober:
60 | self.get_confidence()
61 | if not self._best_guess_prober:
62 | return None
63 | return self._best_guess_prober.language
64 |
65 | def feed(self, byte_str):
66 | for prober in self.probers:
67 | if not prober:
68 | continue
69 | if not prober.active:
70 | continue
71 | state = prober.feed(byte_str)
72 | if not state:
73 | continue
74 | if state == ProbingState.FOUND_IT:
75 | self._best_guess_prober = prober
76 | self._state = ProbingState.FOUND_IT
77 | return self.state
78 | elif state == ProbingState.NOT_ME:
79 | prober.active = False
80 | self._active_num -= 1
81 | if self._active_num <= 0:
82 | self._state = ProbingState.NOT_ME
83 | return self.state
84 | return self.state
85 |
86 | def get_confidence(self):
87 | state = self.state
88 | if state == ProbingState.FOUND_IT:
89 | return 0.99
90 | elif state == ProbingState.NOT_ME:
91 | return 0.01
92 | best_conf = 0.0
93 | self._best_guess_prober = None
94 | for prober in self.probers:
95 | if not prober:
96 | continue
97 | if not prober.active:
98 | self.logger.debug('%s not active', prober.charset_name)
99 | continue
100 | conf = prober.get_confidence()
101 | self.logger.debug('%s %s confidence = %s', prober.charset_name, prober.language, conf)
102 | if best_conf < conf:
103 | best_conf = conf
104 | self._best_guess_prober = prober
105 | if not self._best_guess_prober:
106 | return 0.0
107 | return best_conf
108 |
--------------------------------------------------------------------------------
/worker/packages/chardet/cli/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/worker/packages/chardet/cli/chardetect.py:
--------------------------------------------------------------------------------
1 | """
2 | Script which takes one or more file paths and reports on their detected
3 | encodings
4 |
5 | Example::
6 |
7 | % chardetect somefile someotherfile
8 | somefile: windows-1252 with confidence 0.5
9 | someotherfile: ascii with confidence 1.0
10 |
11 | If no paths are provided, it takes its input from stdin.
12 |
13 | """
14 |
15 | from __future__ import absolute_import, print_function, unicode_literals
16 |
17 | import argparse
18 | import sys
19 |
20 | from chardet import __version__
21 | from chardet.compat import PY2
22 | from chardet.universaldetector import UniversalDetector
23 |
24 |
25 | def description_of(lines, name='stdin'):
26 | """
27 | Return a string describing the probable encoding of a file or
28 | list of strings.
29 |
30 | :param lines: The lines to get the encoding of.
31 | :type lines: Iterable of bytes
32 | :param name: Name of file or collection of lines
33 | :type name: str
34 | """
35 | u = UniversalDetector()
36 | for line in lines:
37 | line = bytearray(line)
38 | u.feed(line)
39 | # shortcut out of the loop to save reading further - particularly useful if we read a BOM.
40 | if u.done:
41 | break
42 | u.close()
43 | result = u.result
44 | if PY2:
45 | name = name.decode(sys.getfilesystemencoding(), 'ignore')
46 | if result['encoding']:
47 | return '{}: {} with confidence {}'.format(name, result['encoding'],
48 | result['confidence'])
49 | else:
50 | return '{}: no result'.format(name)
51 |
52 |
53 | def main(argv=None):
54 | """
55 | Handles command line arguments and gets things started.
56 |
57 | :param argv: List of arguments, as if specified on the command-line.
58 | If None, ``sys.argv[1:]`` is used instead.
59 | :type argv: list of str
60 | """
61 | # Get command line arguments
62 | parser = argparse.ArgumentParser(
63 | description="Takes one or more file paths and reports their detected \
64 | encodings")
65 | parser.add_argument('input',
66 | help='File whose encoding we would like to determine. \
67 | (default: stdin)',
68 | type=argparse.FileType('rb'), nargs='*',
69 | default=[sys.stdin if PY2 else sys.stdin.buffer])
70 | parser.add_argument('--version', action='version',
71 | version='%(prog)s {}'.format(__version__))
72 | args = parser.parse_args(argv)
73 |
74 | for f in args.input:
75 | if f.isatty():
76 | print("You are running chardetect interactively. Press " +
77 | "CTRL-D twice at the start of a blank line to signal the " +
78 | "end of your input. If you want help, run chardetect " +
79 | "--help\n", file=sys.stderr)
80 | print(description_of(f, f.name))
81 |
82 |
83 | if __name__ == '__main__':
84 | main()
85 |
--------------------------------------------------------------------------------
/worker/packages/chardet/codingstatemachine.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is mozilla.org code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 1998
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | #
12 | # This library is free software; you can redistribute it and/or
13 | # modify it under the terms of the GNU Lesser General Public
14 | # License as published by the Free Software Foundation; either
15 | # version 2.1 of the License, or (at your option) any later version.
16 | #
17 | # This library is distributed in the hope that it will be useful,
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 | # Lesser General Public License for more details.
21 | #
22 | # You should have received a copy of the GNU Lesser General Public
23 | # License along with this library; if not, write to the Free Software
24 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 | # 02110-1301 USA
26 | ######################### END LICENSE BLOCK #########################
27 |
28 | import logging
29 |
30 | from .enums import MachineState
31 |
32 |
33 | class CodingStateMachine(object):
34 | """
35 | A state machine to verify a byte sequence for a particular encoding. For
36 | each byte the detector receives, it will feed that byte to every active
37 | state machine available, one byte at a time. The state machine changes its
38 | state based on its previous state and the byte it receives. There are 3
39 | states in a state machine that are of interest to an auto-detector:
40 |
41 | START state: This is the state to start with, or a legal byte sequence
42 | (i.e. a valid code point) for character has been identified.
43 |
44 | ME state: This indicates that the state machine identified a byte sequence
45 | that is specific to the charset it is designed for and that
46 | there is no other possible encoding which can contain this byte
47 | sequence. This will to lead to an immediate positive answer for
48 | the detector.
49 |
50 | ERROR state: This indicates the state machine identified an illegal byte
51 | sequence for that encoding. This will lead to an immediate
52 | negative answer for this encoding. Detector will exclude this
53 | encoding from consideration from here on.
54 | """
55 | def __init__(self, sm):
56 | self._model = sm
57 | self._curr_byte_pos = 0
58 | self._curr_char_len = 0
59 | self._curr_state = None
60 | self.logger = logging.getLogger(__name__)
61 | self.reset()
62 |
63 | def reset(self):
64 | self._curr_state = MachineState.START
65 |
66 | def next_state(self, c):
67 | # for each byte we get its class
68 | # if it is first byte, we also get byte length
69 | byte_class = self._model['class_table'][c]
70 | if self._curr_state == MachineState.START:
71 | self._curr_byte_pos = 0
72 | self._curr_char_len = self._model['char_len_table'][byte_class]
73 | # from byte's class and state_table, we get its next state
74 | curr_state = (self._curr_state * self._model['class_factor']
75 | + byte_class)
76 | self._curr_state = self._model['state_table'][curr_state]
77 | self._curr_byte_pos += 1
78 | return self._curr_state
79 |
80 | def get_current_charlen(self):
81 | return self._curr_char_len
82 |
83 | def get_coding_state_machine(self):
84 | return self._model['name']
85 |
86 | @property
87 | def language(self):
88 | return self._model['language']
89 |
--------------------------------------------------------------------------------
/worker/packages/chardet/compat.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # Contributor(s):
3 | # Dan Blanchard
4 | # Ian Cordasco
5 | #
6 | # This library is free software; you can redistribute it and/or
7 | # modify it under the terms of the GNU Lesser General Public
8 | # License as published by the Free Software Foundation; either
9 | # version 2.1 of the License, or (at your option) any later version.
10 | #
11 | # This library is distributed in the hope that it will be useful,
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 | # Lesser General Public License for more details.
15 | #
16 | # You should have received a copy of the GNU Lesser General Public
17 | # License along with this library; if not, write to the Free Software
18 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 | # 02110-1301 USA
20 | ######################### END LICENSE BLOCK #########################
21 |
22 | import sys
23 |
24 |
25 | if sys.version_info < (3, 0):
26 | PY2 = True
27 | PY3 = False
28 | string_types = (str, unicode)
29 | text_type = unicode
30 | iteritems = dict.iteritems
31 | else:
32 | PY2 = False
33 | PY3 = True
34 | string_types = (bytes, str)
35 | text_type = str
36 | iteritems = dict.items
37 |
--------------------------------------------------------------------------------
/worker/packages/chardet/cp949prober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is mozilla.org code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 1998
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | #
12 | # This library is free software; you can redistribute it and/or
13 | # modify it under the terms of the GNU Lesser General Public
14 | # License as published by the Free Software Foundation; either
15 | # version 2.1 of the License, or (at your option) any later version.
16 | #
17 | # This library is distributed in the hope that it will be useful,
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 | # Lesser General Public License for more details.
21 | #
22 | # You should have received a copy of the GNU Lesser General Public
23 | # License along with this library; if not, write to the Free Software
24 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 | # 02110-1301 USA
26 | ######################### END LICENSE BLOCK #########################
27 |
28 | from .chardistribution import EUCKRDistributionAnalysis
29 | from .codingstatemachine import CodingStateMachine
30 | from .mbcharsetprober import MultiByteCharSetProber
31 | from .mbcssm import CP949_SM_MODEL
32 |
33 |
34 | class CP949Prober(MultiByteCharSetProber):
35 | def __init__(self):
36 | super(CP949Prober, self).__init__()
37 | self.coding_sm = CodingStateMachine(CP949_SM_MODEL)
38 | # NOTE: CP949 is a superset of EUC-KR, so the distribution should be
39 | # not different.
40 | self.distribution_analyzer = EUCKRDistributionAnalysis()
41 | self.reset()
42 |
43 | @property
44 | def charset_name(self):
45 | return "CP949"
46 |
47 | @property
48 | def language(self):
49 | return "Korean"
50 |
--------------------------------------------------------------------------------
/worker/packages/chardet/enums.py:
--------------------------------------------------------------------------------
1 | """
2 | All of the Enums that are used throughout the chardet package.
3 |
4 | :author: Dan Blanchard (dan.blanchard@gmail.com)
5 | """
6 |
7 |
8 | class InputState(object):
9 | """
10 | This enum represents the different states a universal detector can be in.
11 | """
12 | PURE_ASCII = 0
13 | ESC_ASCII = 1
14 | HIGH_BYTE = 2
15 |
16 |
17 | class LanguageFilter(object):
18 | """
19 | This enum represents the different language filters we can apply to a
20 | ``UniversalDetector``.
21 | """
22 | CHINESE_SIMPLIFIED = 0x01
23 | CHINESE_TRADITIONAL = 0x02
24 | JAPANESE = 0x04
25 | KOREAN = 0x08
26 | NON_CJK = 0x10
27 | ALL = 0x1F
28 | CHINESE = CHINESE_SIMPLIFIED | CHINESE_TRADITIONAL
29 | CJK = CHINESE | JAPANESE | KOREAN
30 |
31 |
32 | class ProbingState(object):
33 | """
34 | This enum represents the different states a prober can be in.
35 | """
36 | DETECTING = 0
37 | FOUND_IT = 1
38 | NOT_ME = 2
39 |
40 |
41 | class MachineState(object):
42 | """
43 | This enum represents the different states a state machine can be in.
44 | """
45 | START = 0
46 | ERROR = 1
47 | ITS_ME = 2
48 |
49 |
50 | class SequenceLikelihood(object):
51 | """
52 | This enum represents the likelihood of a character following the previous one.
53 | """
54 | NEGATIVE = 0
55 | UNLIKELY = 1
56 | LIKELY = 2
57 | POSITIVE = 3
58 |
59 | @classmethod
60 | def get_num_categories(cls):
61 | """:returns: The number of likelihood categories in the enum."""
62 | return 4
63 |
64 |
65 | class CharacterCategory(object):
66 | """
67 | This enum represents the different categories language models for
68 | ``SingleByteCharsetProber`` put characters into.
69 |
70 | Anything less than CONTROL is considered a letter.
71 | """
72 | UNDEFINED = 255
73 | LINE_BREAK = 254
74 | SYMBOL = 253
75 | DIGIT = 252
76 | CONTROL = 251
77 |
--------------------------------------------------------------------------------
/worker/packages/chardet/escprober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is mozilla.org code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 1998
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | #
12 | # This library is free software; you can redistribute it and/or
13 | # modify it under the terms of the GNU Lesser General Public
14 | # License as published by the Free Software Foundation; either
15 | # version 2.1 of the License, or (at your option) any later version.
16 | #
17 | # This library is distributed in the hope that it will be useful,
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 | # Lesser General Public License for more details.
21 | #
22 | # You should have received a copy of the GNU Lesser General Public
23 | # License along with this library; if not, write to the Free Software
24 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 | # 02110-1301 USA
26 | ######################### END LICENSE BLOCK #########################
27 |
28 | from .charsetprober import CharSetProber
29 | from .codingstatemachine import CodingStateMachine
30 | from .enums import LanguageFilter, ProbingState, MachineState
31 | from .escsm import (HZ_SM_MODEL, ISO2022CN_SM_MODEL, ISO2022JP_SM_MODEL,
32 | ISO2022KR_SM_MODEL)
33 |
34 |
35 | class EscCharSetProber(CharSetProber):
36 | """
37 | This CharSetProber uses a "code scheme" approach for detecting encodings,
38 | whereby easily recognizable escape or shift sequences are relied on to
39 | identify these encodings.
40 | """
41 |
42 | def __init__(self, lang_filter=None):
43 | super(EscCharSetProber, self).__init__(lang_filter=lang_filter)
44 | self.coding_sm = []
45 | if self.lang_filter & LanguageFilter.CHINESE_SIMPLIFIED:
46 | self.coding_sm.append(CodingStateMachine(HZ_SM_MODEL))
47 | self.coding_sm.append(CodingStateMachine(ISO2022CN_SM_MODEL))
48 | if self.lang_filter & LanguageFilter.JAPANESE:
49 | self.coding_sm.append(CodingStateMachine(ISO2022JP_SM_MODEL))
50 | if self.lang_filter & LanguageFilter.KOREAN:
51 | self.coding_sm.append(CodingStateMachine(ISO2022KR_SM_MODEL))
52 | self.active_sm_count = None
53 | self._detected_charset = None
54 | self._detected_language = None
55 | self._state = None
56 | self.reset()
57 |
58 | def reset(self):
59 | super(EscCharSetProber, self).reset()
60 | for coding_sm in self.coding_sm:
61 | if not coding_sm:
62 | continue
63 | coding_sm.active = True
64 | coding_sm.reset()
65 | self.active_sm_count = len(self.coding_sm)
66 | self._detected_charset = None
67 | self._detected_language = None
68 |
69 | @property
70 | def charset_name(self):
71 | return self._detected_charset
72 |
73 | @property
74 | def language(self):
75 | return self._detected_language
76 |
77 | def get_confidence(self):
78 | if self._detected_charset:
79 | return 0.99
80 | else:
81 | return 0.00
82 |
83 | def feed(self, byte_str):
84 | for c in byte_str:
85 | for coding_sm in self.coding_sm:
86 | if not coding_sm or not coding_sm.active:
87 | continue
88 | coding_state = coding_sm.next_state(c)
89 | if coding_state == MachineState.ERROR:
90 | coding_sm.active = False
91 | self.active_sm_count -= 1
92 | if self.active_sm_count <= 0:
93 | self._state = ProbingState.NOT_ME
94 | return self.state
95 | elif coding_state == MachineState.ITS_ME:
96 | self._state = ProbingState.FOUND_IT
97 | self._detected_charset = coding_sm.get_coding_state_machine()
98 | self._detected_language = coding_sm.language
99 | return self.state
100 |
101 | return self.state
102 |
--------------------------------------------------------------------------------
/worker/packages/chardet/eucjpprober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is mozilla.org code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 1998
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | #
12 | # This library is free software; you can redistribute it and/or
13 | # modify it under the terms of the GNU Lesser General Public
14 | # License as published by the Free Software Foundation; either
15 | # version 2.1 of the License, or (at your option) any later version.
16 | #
17 | # This library is distributed in the hope that it will be useful,
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 | # Lesser General Public License for more details.
21 | #
22 | # You should have received a copy of the GNU Lesser General Public
23 | # License along with this library; if not, write to the Free Software
24 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 | # 02110-1301 USA
26 | ######################### END LICENSE BLOCK #########################
27 |
28 | from .enums import ProbingState, MachineState
29 | from .mbcharsetprober import MultiByteCharSetProber
30 | from .codingstatemachine import CodingStateMachine
31 | from .chardistribution import EUCJPDistributionAnalysis
32 | from .jpcntx import EUCJPContextAnalysis
33 | from .mbcssm import EUCJP_SM_MODEL
34 |
35 |
36 | class EUCJPProber(MultiByteCharSetProber):
37 | def __init__(self):
38 | super(EUCJPProber, self).__init__()
39 | self.coding_sm = CodingStateMachine(EUCJP_SM_MODEL)
40 | self.distribution_analyzer = EUCJPDistributionAnalysis()
41 | self.context_analyzer = EUCJPContextAnalysis()
42 | self.reset()
43 |
44 | def reset(self):
45 | super(EUCJPProber, self).reset()
46 | self.context_analyzer.reset()
47 |
48 | @property
49 | def charset_name(self):
50 | return "EUC-JP"
51 |
52 | @property
53 | def language(self):
54 | return "Japanese"
55 |
56 | def feed(self, byte_str):
57 | for i in range(len(byte_str)):
58 | # PY3K: byte_str is a byte array, so byte_str[i] is an int, not a byte
59 | coding_state = self.coding_sm.next_state(byte_str[i])
60 | if coding_state == MachineState.ERROR:
61 | self.logger.debug('%s %s prober hit error at byte %s',
62 | self.charset_name, self.language, i)
63 | self._state = ProbingState.NOT_ME
64 | break
65 | elif coding_state == MachineState.ITS_ME:
66 | self._state = ProbingState.FOUND_IT
67 | break
68 | elif coding_state == MachineState.START:
69 | char_len = self.coding_sm.get_current_charlen()
70 | if i == 0:
71 | self._last_char[1] = byte_str[0]
72 | self.context_analyzer.feed(self._last_char, char_len)
73 | self.distribution_analyzer.feed(self._last_char, char_len)
74 | else:
75 | self.context_analyzer.feed(byte_str[i - 1:i + 1],
76 | char_len)
77 | self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
78 | char_len)
79 |
80 | self._last_char[0] = byte_str[-1]
81 |
82 | if self.state == ProbingState.DETECTING:
83 | if (self.context_analyzer.got_enough_data() and
84 | (self.get_confidence() > self.SHORTCUT_THRESHOLD)):
85 | self._state = ProbingState.FOUND_IT
86 |
87 | return self.state
88 |
89 | def get_confidence(self):
90 | context_conf = self.context_analyzer.get_confidence()
91 | distrib_conf = self.distribution_analyzer.get_confidence()
92 | return max(context_conf, distrib_conf)
93 |
--------------------------------------------------------------------------------
/worker/packages/chardet/euckrprober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is mozilla.org code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 1998
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | #
12 | # This library is free software; you can redistribute it and/or
13 | # modify it under the terms of the GNU Lesser General Public
14 | # License as published by the Free Software Foundation; either
15 | # version 2.1 of the License, or (at your option) any later version.
16 | #
17 | # This library is distributed in the hope that it will be useful,
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 | # Lesser General Public License for more details.
21 | #
22 | # You should have received a copy of the GNU Lesser General Public
23 | # License along with this library; if not, write to the Free Software
24 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 | # 02110-1301 USA
26 | ######################### END LICENSE BLOCK #########################
27 |
28 | from .mbcharsetprober import MultiByteCharSetProber
29 | from .codingstatemachine import CodingStateMachine
30 | from .chardistribution import EUCKRDistributionAnalysis
31 | from .mbcssm import EUCKR_SM_MODEL
32 |
33 |
34 | class EUCKRProber(MultiByteCharSetProber):
35 | def __init__(self):
36 | super(EUCKRProber, self).__init__()
37 | self.coding_sm = CodingStateMachine(EUCKR_SM_MODEL)
38 | self.distribution_analyzer = EUCKRDistributionAnalysis()
39 | self.reset()
40 |
41 | @property
42 | def charset_name(self):
43 | return "EUC-KR"
44 |
45 | @property
46 | def language(self):
47 | return "Korean"
48 |
--------------------------------------------------------------------------------
/worker/packages/chardet/euctwprober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is mozilla.org code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 1998
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | #
12 | # This library is free software; you can redistribute it and/or
13 | # modify it under the terms of the GNU Lesser General Public
14 | # License as published by the Free Software Foundation; either
15 | # version 2.1 of the License, or (at your option) any later version.
16 | #
17 | # This library is distributed in the hope that it will be useful,
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 | # Lesser General Public License for more details.
21 | #
22 | # You should have received a copy of the GNU Lesser General Public
23 | # License along with this library; if not, write to the Free Software
24 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 | # 02110-1301 USA
26 | ######################### END LICENSE BLOCK #########################
27 |
28 | from .mbcharsetprober import MultiByteCharSetProber
29 | from .codingstatemachine import CodingStateMachine
30 | from .chardistribution import EUCTWDistributionAnalysis
31 | from .mbcssm import EUCTW_SM_MODEL
32 |
33 | class EUCTWProber(MultiByteCharSetProber):
34 | def __init__(self):
35 | super(EUCTWProber, self).__init__()
36 | self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL)
37 | self.distribution_analyzer = EUCTWDistributionAnalysis()
38 | self.reset()
39 |
40 | @property
41 | def charset_name(self):
42 | return "EUC-TW"
43 |
44 | @property
45 | def language(self):
46 | return "Taiwan"
47 |
--------------------------------------------------------------------------------
/worker/packages/chardet/gb2312prober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is mozilla.org code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 1998
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | #
12 | # This library is free software; you can redistribute it and/or
13 | # modify it under the terms of the GNU Lesser General Public
14 | # License as published by the Free Software Foundation; either
15 | # version 2.1 of the License, or (at your option) any later version.
16 | #
17 | # This library is distributed in the hope that it will be useful,
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 | # Lesser General Public License for more details.
21 | #
22 | # You should have received a copy of the GNU Lesser General Public
23 | # License along with this library; if not, write to the Free Software
24 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 | # 02110-1301 USA
26 | ######################### END LICENSE BLOCK #########################
27 |
28 | from .mbcharsetprober import MultiByteCharSetProber
29 | from .codingstatemachine import CodingStateMachine
30 | from .chardistribution import GB2312DistributionAnalysis
31 | from .mbcssm import GB2312_SM_MODEL
32 |
33 | class GB2312Prober(MultiByteCharSetProber):
34 | def __init__(self):
35 | super(GB2312Prober, self).__init__()
36 | self.coding_sm = CodingStateMachine(GB2312_SM_MODEL)
37 | self.distribution_analyzer = GB2312DistributionAnalysis()
38 | self.reset()
39 |
40 | @property
41 | def charset_name(self):
42 | return "GB2312"
43 |
44 | @property
45 | def language(self):
46 | return "Chinese"
47 |
--------------------------------------------------------------------------------
/worker/packages/chardet/mbcharsetprober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is Mozilla Universal charset detector code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 2001
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | # Shy Shalom - original C code
12 | # Proofpoint, Inc.
13 | #
14 | # This library is free software; you can redistribute it and/or
15 | # modify it under the terms of the GNU Lesser General Public
16 | # License as published by the Free Software Foundation; either
17 | # version 2.1 of the License, or (at your option) any later version.
18 | #
19 | # This library is distributed in the hope that it will be useful,
20 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
21 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 | # Lesser General Public License for more details.
23 | #
24 | # You should have received a copy of the GNU Lesser General Public
25 | # License along with this library; if not, write to the Free Software
26 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
27 | # 02110-1301 USA
28 | ######################### END LICENSE BLOCK #########################
29 |
30 | from .charsetprober import CharSetProber
31 | from .enums import ProbingState, MachineState
32 |
33 |
34 | class MultiByteCharSetProber(CharSetProber):
35 | """
36 | MultiByteCharSetProber
37 | """
38 |
39 | def __init__(self, lang_filter=None):
40 | super(MultiByteCharSetProber, self).__init__(lang_filter=lang_filter)
41 | self.distribution_analyzer = None
42 | self.coding_sm = None
43 | self._last_char = [0, 0]
44 |
45 | def reset(self):
46 | super(MultiByteCharSetProber, self).reset()
47 | if self.coding_sm:
48 | self.coding_sm.reset()
49 | if self.distribution_analyzer:
50 | self.distribution_analyzer.reset()
51 | self._last_char = [0, 0]
52 |
53 | @property
54 | def charset_name(self):
55 | raise NotImplementedError
56 |
57 | @property
58 | def language(self):
59 | raise NotImplementedError
60 |
61 | def feed(self, byte_str):
62 | for i in range(len(byte_str)):
63 | coding_state = self.coding_sm.next_state(byte_str[i])
64 | if coding_state == MachineState.ERROR:
65 | self.logger.debug('%s %s prober hit error at byte %s',
66 | self.charset_name, self.language, i)
67 | self._state = ProbingState.NOT_ME
68 | break
69 | elif coding_state == MachineState.ITS_ME:
70 | self._state = ProbingState.FOUND_IT
71 | break
72 | elif coding_state == MachineState.START:
73 | char_len = self.coding_sm.get_current_charlen()
74 | if i == 0:
75 | self._last_char[1] = byte_str[0]
76 | self.distribution_analyzer.feed(self._last_char, char_len)
77 | else:
78 | self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
79 | char_len)
80 |
81 | self._last_char[0] = byte_str[-1]
82 |
83 | if self.state == ProbingState.DETECTING:
84 | if (self.distribution_analyzer.got_enough_data() and
85 | (self.get_confidence() > self.SHORTCUT_THRESHOLD)):
86 | self._state = ProbingState.FOUND_IT
87 |
88 | return self.state
89 |
90 | def get_confidence(self):
91 | return self.distribution_analyzer.get_confidence()
92 |
--------------------------------------------------------------------------------
/worker/packages/chardet/mbcsgroupprober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is Mozilla Universal charset detector code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 2001
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | # Shy Shalom - original C code
12 | # Proofpoint, Inc.
13 | #
14 | # This library is free software; you can redistribute it and/or
15 | # modify it under the terms of the GNU Lesser General Public
16 | # License as published by the Free Software Foundation; either
17 | # version 2.1 of the License, or (at your option) any later version.
18 | #
19 | # This library is distributed in the hope that it will be useful,
20 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
21 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 | # Lesser General Public License for more details.
23 | #
24 | # You should have received a copy of the GNU Lesser General Public
25 | # License along with this library; if not, write to the Free Software
26 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
27 | # 02110-1301 USA
28 | ######################### END LICENSE BLOCK #########################
29 |
30 | from .charsetgroupprober import CharSetGroupProber
31 | from .utf8prober import UTF8Prober
32 | from .sjisprober import SJISProber
33 | from .eucjpprober import EUCJPProber
34 | from .gb2312prober import GB2312Prober
35 | from .euckrprober import EUCKRProber
36 | from .cp949prober import CP949Prober
37 | from .big5prober import Big5Prober
38 | from .euctwprober import EUCTWProber
39 |
40 |
41 | class MBCSGroupProber(CharSetGroupProber):
42 | def __init__(self, lang_filter=None):
43 | super(MBCSGroupProber, self).__init__(lang_filter=lang_filter)
44 | self.probers = [
45 | UTF8Prober(),
46 | SJISProber(),
47 | EUCJPProber(),
48 | GB2312Prober(),
49 | EUCKRProber(),
50 | CP949Prober(),
51 | Big5Prober(),
52 | EUCTWProber()
53 | ]
54 | self.reset()
55 |
--------------------------------------------------------------------------------
/worker/packages/chardet/metadata/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/official-stockfish/fishtest/fdefcdd1be3617ffa0451b006e3b6f030cf8678d/worker/packages/chardet/metadata/__init__.py
--------------------------------------------------------------------------------
/worker/packages/chardet/sbcsgroupprober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is Mozilla Universal charset detector code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 2001
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | # Shy Shalom - original C code
12 | #
13 | # This library is free software; you can redistribute it and/or
14 | # modify it under the terms of the GNU Lesser General Public
15 | # License as published by the Free Software Foundation; either
16 | # version 2.1 of the License, or (at your option) any later version.
17 | #
18 | # This library is distributed in the hope that it will be useful,
19 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
20 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 | # Lesser General Public License for more details.
22 | #
23 | # You should have received a copy of the GNU Lesser General Public
24 | # License along with this library; if not, write to the Free Software
25 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
26 | # 02110-1301 USA
27 | ######################### END LICENSE BLOCK #########################
28 |
29 | from .charsetgroupprober import CharSetGroupProber
30 | from .hebrewprober import HebrewProber
31 | from .langbulgarianmodel import (ISO_8859_5_BULGARIAN_MODEL,
32 | WINDOWS_1251_BULGARIAN_MODEL)
33 | from .langgreekmodel import ISO_8859_7_GREEK_MODEL, WINDOWS_1253_GREEK_MODEL
34 | from .langhebrewmodel import WINDOWS_1255_HEBREW_MODEL
35 | # from .langhungarianmodel import (ISO_8859_2_HUNGARIAN_MODEL,
36 | # WINDOWS_1250_HUNGARIAN_MODEL)
37 | from .langrussianmodel import (IBM855_RUSSIAN_MODEL, IBM866_RUSSIAN_MODEL,
38 | ISO_8859_5_RUSSIAN_MODEL, KOI8_R_RUSSIAN_MODEL,
39 | MACCYRILLIC_RUSSIAN_MODEL,
40 | WINDOWS_1251_RUSSIAN_MODEL)
41 | from .langthaimodel import TIS_620_THAI_MODEL
42 | from .langturkishmodel import ISO_8859_9_TURKISH_MODEL
43 | from .sbcharsetprober import SingleByteCharSetProber
44 |
45 |
46 | class SBCSGroupProber(CharSetGroupProber):
47 | def __init__(self):
48 | super(SBCSGroupProber, self).__init__()
49 | hebrew_prober = HebrewProber()
50 | logical_hebrew_prober = SingleByteCharSetProber(WINDOWS_1255_HEBREW_MODEL,
51 | False, hebrew_prober)
52 | # TODO: See if using ISO-8859-8 Hebrew model works better here, since
53 | # it's actually the visual one
54 | visual_hebrew_prober = SingleByteCharSetProber(WINDOWS_1255_HEBREW_MODEL,
55 | True, hebrew_prober)
56 | hebrew_prober.set_model_probers(logical_hebrew_prober,
57 | visual_hebrew_prober)
58 | # TODO: ORDER MATTERS HERE. I changed the order vs what was in master
59 | # and several tests failed that did not before. Some thought
60 | # should be put into the ordering, and we should consider making
61 | # order not matter here, because that is very counter-intuitive.
62 | self.probers = [
63 | SingleByteCharSetProber(WINDOWS_1251_RUSSIAN_MODEL),
64 | SingleByteCharSetProber(KOI8_R_RUSSIAN_MODEL),
65 | SingleByteCharSetProber(ISO_8859_5_RUSSIAN_MODEL),
66 | SingleByteCharSetProber(MACCYRILLIC_RUSSIAN_MODEL),
67 | SingleByteCharSetProber(IBM866_RUSSIAN_MODEL),
68 | SingleByteCharSetProber(IBM855_RUSSIAN_MODEL),
69 | SingleByteCharSetProber(ISO_8859_7_GREEK_MODEL),
70 | SingleByteCharSetProber(WINDOWS_1253_GREEK_MODEL),
71 | SingleByteCharSetProber(ISO_8859_5_BULGARIAN_MODEL),
72 | SingleByteCharSetProber(WINDOWS_1251_BULGARIAN_MODEL),
73 | # TODO: Restore Hungarian encodings (iso-8859-2 and windows-1250)
74 | # after we retrain model.
75 | # SingleByteCharSetProber(ISO_8859_2_HUNGARIAN_MODEL),
76 | # SingleByteCharSetProber(WINDOWS_1250_HUNGARIAN_MODEL),
77 | SingleByteCharSetProber(TIS_620_THAI_MODEL),
78 | SingleByteCharSetProber(ISO_8859_9_TURKISH_MODEL),
79 | hebrew_prober,
80 | logical_hebrew_prober,
81 | visual_hebrew_prober,
82 | ]
83 | self.reset()
84 |
--------------------------------------------------------------------------------
/worker/packages/chardet/sjisprober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is mozilla.org code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 1998
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | #
12 | # This library is free software; you can redistribute it and/or
13 | # modify it under the terms of the GNU Lesser General Public
14 | # License as published by the Free Software Foundation; either
15 | # version 2.1 of the License, or (at your option) any later version.
16 | #
17 | # This library is distributed in the hope that it will be useful,
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 | # Lesser General Public License for more details.
21 | #
22 | # You should have received a copy of the GNU Lesser General Public
23 | # License along with this library; if not, write to the Free Software
24 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 | # 02110-1301 USA
26 | ######################### END LICENSE BLOCK #########################
27 |
28 | from .mbcharsetprober import MultiByteCharSetProber
29 | from .codingstatemachine import CodingStateMachine
30 | from .chardistribution import SJISDistributionAnalysis
31 | from .jpcntx import SJISContextAnalysis
32 | from .mbcssm import SJIS_SM_MODEL
33 | from .enums import ProbingState, MachineState
34 |
35 |
36 | class SJISProber(MultiByteCharSetProber):
37 | def __init__(self):
38 | super(SJISProber, self).__init__()
39 | self.coding_sm = CodingStateMachine(SJIS_SM_MODEL)
40 | self.distribution_analyzer = SJISDistributionAnalysis()
41 | self.context_analyzer = SJISContextAnalysis()
42 | self.reset()
43 |
44 | def reset(self):
45 | super(SJISProber, self).reset()
46 | self.context_analyzer.reset()
47 |
48 | @property
49 | def charset_name(self):
50 | return self.context_analyzer.charset_name
51 |
52 | @property
53 | def language(self):
54 | return "Japanese"
55 |
56 | def feed(self, byte_str):
57 | for i in range(len(byte_str)):
58 | coding_state = self.coding_sm.next_state(byte_str[i])
59 | if coding_state == MachineState.ERROR:
60 | self.logger.debug('%s %s prober hit error at byte %s',
61 | self.charset_name, self.language, i)
62 | self._state = ProbingState.NOT_ME
63 | break
64 | elif coding_state == MachineState.ITS_ME:
65 | self._state = ProbingState.FOUND_IT
66 | break
67 | elif coding_state == MachineState.START:
68 | char_len = self.coding_sm.get_current_charlen()
69 | if i == 0:
70 | self._last_char[1] = byte_str[0]
71 | self.context_analyzer.feed(self._last_char[2 - char_len:],
72 | char_len)
73 | self.distribution_analyzer.feed(self._last_char, char_len)
74 | else:
75 | self.context_analyzer.feed(byte_str[i + 1 - char_len:i + 3
76 | - char_len], char_len)
77 | self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
78 | char_len)
79 |
80 | self._last_char[0] = byte_str[-1]
81 |
82 | if self.state == ProbingState.DETECTING:
83 | if (self.context_analyzer.got_enough_data() and
84 | (self.get_confidence() > self.SHORTCUT_THRESHOLD)):
85 | self._state = ProbingState.FOUND_IT
86 |
87 | return self.state
88 |
89 | def get_confidence(self):
90 | context_conf = self.context_analyzer.get_confidence()
91 | distrib_conf = self.distribution_analyzer.get_confidence()
92 | return max(context_conf, distrib_conf)
93 |
--------------------------------------------------------------------------------
/worker/packages/chardet/utf8prober.py:
--------------------------------------------------------------------------------
1 | ######################## BEGIN LICENSE BLOCK ########################
2 | # The Original Code is mozilla.org code.
3 | #
4 | # The Initial Developer of the Original Code is
5 | # Netscape Communications Corporation.
6 | # Portions created by the Initial Developer are Copyright (C) 1998
7 | # the Initial Developer. All Rights Reserved.
8 | #
9 | # Contributor(s):
10 | # Mark Pilgrim - port to Python
11 | #
12 | # This library is free software; you can redistribute it and/or
13 | # modify it under the terms of the GNU Lesser General Public
14 | # License as published by the Free Software Foundation; either
15 | # version 2.1 of the License, or (at your option) any later version.
16 | #
17 | # This library is distributed in the hope that it will be useful,
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 | # Lesser General Public License for more details.
21 | #
22 | # You should have received a copy of the GNU Lesser General Public
23 | # License along with this library; if not, write to the Free Software
24 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 | # 02110-1301 USA
26 | ######################### END LICENSE BLOCK #########################
27 |
28 | from .charsetprober import CharSetProber
29 | from .enums import ProbingState, MachineState
30 | from .codingstatemachine import CodingStateMachine
31 | from .mbcssm import UTF8_SM_MODEL
32 |
33 |
34 |
35 | class UTF8Prober(CharSetProber):
36 | ONE_CHAR_PROB = 0.5
37 |
38 | def __init__(self):
39 | super(UTF8Prober, self).__init__()
40 | self.coding_sm = CodingStateMachine(UTF8_SM_MODEL)
41 | self._num_mb_chars = None
42 | self.reset()
43 |
44 | def reset(self):
45 | super(UTF8Prober, self).reset()
46 | self.coding_sm.reset()
47 | self._num_mb_chars = 0
48 |
49 | @property
50 | def charset_name(self):
51 | return "utf-8"
52 |
53 | @property
54 | def language(self):
55 | return ""
56 |
57 | def feed(self, byte_str):
58 | for c in byte_str:
59 | coding_state = self.coding_sm.next_state(c)
60 | if coding_state == MachineState.ERROR:
61 | self._state = ProbingState.NOT_ME
62 | break
63 | elif coding_state == MachineState.ITS_ME:
64 | self._state = ProbingState.FOUND_IT
65 | break
66 | elif coding_state == MachineState.START:
67 | if self.coding_sm.get_current_charlen() >= 2:
68 | self._num_mb_chars += 1
69 |
70 | if self.state == ProbingState.DETECTING:
71 | if self.get_confidence() > self.SHORTCUT_THRESHOLD:
72 | self._state = ProbingState.FOUND_IT
73 |
74 | return self.state
75 |
76 | def get_confidence(self):
77 | unlike = 0.99
78 | if self._num_mb_chars < 6:
79 | unlike *= self.ONE_CHAR_PROB ** self._num_mb_chars
80 | return 1.0 - unlike
81 | else:
82 | return unlike
83 |
--------------------------------------------------------------------------------
/worker/packages/chardet/version.py:
--------------------------------------------------------------------------------
1 | """
2 | This module exists only to simplify retrieving the version number of chardet
3 | from within setup.py and from chardet subpackages.
4 |
5 | :author: Dan Blanchard (dan.blanchard@gmail.com)
6 | """
7 |
8 | __version__ = "4.0.0"
9 | VERSION = __version__.split('.')
10 |
--------------------------------------------------------------------------------
/worker/packages/expression/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Package for the expression parser.
3 |
4 | Copyright 2017-2018 Leon Helwerda
5 |
6 | Licensed under the Apache License, Version 2.0 (the "License");
7 | you may not use this file except in compliance with the License.
8 | You may obtain a copy of the License at
9 |
10 | http://www.apache.org/licenses/LICENSE-2.0
11 |
12 | Unless required by applicable law or agreed to in writing, software
13 | distributed under the License is distributed on an "AS IS" BASIS,
14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | See the License for the specific language governing permissions and
16 | limitations under the License.
17 | """
18 |
19 | from .parser import Expression_Parser
20 |
21 | __all__ = ['Expression_Parser']
22 | __version__ = '0.0.5'
23 |
--------------------------------------------------------------------------------
/worker/packages/expression/interpreter.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | """
3 | Command line interpreter using the expression line parser.
4 |
5 | Copyright 2017-2018 Leon Helwerda
6 |
7 | Licensed under the Apache License, Version 2.0 (the "License");
8 | you may not use this file except in compliance with the License.
9 | You may obtain a copy of the License at
10 |
11 | http://www.apache.org/licenses/LICENSE-2.0
12 |
13 | Unless required by applicable law or agreed to in writing, software
14 | distributed under the License is distributed on an "AS IS" BASIS,
15 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | See the License for the specific language governing permissions and
17 | limitations under the License.
18 | """
19 |
20 | from __future__ import print_function
21 |
22 | import cmd
23 | import sys
24 | import traceback
25 | import expression
26 |
27 | class Expression_Interpreter(cmd.Cmd):
28 | """
29 | Interactive command line interpreter that applies the expression line parser
30 | to the provided input.
31 | """
32 |
33 | def __init__(self):
34 | cmd.Cmd.__init__(self)
35 | self.prompt = '>> '
36 | self.parser = expression.Expression_Parser(assignment=True)
37 |
38 | def default(self, line):
39 | try:
40 | output = self.parser.parse(line)
41 | if output is not None:
42 | self.stdout.write(str(output) + '\n')
43 |
44 | variables = self.parser.variables
45 | variables.update(self.parser.modified_variables)
46 | self.parser.variables = variables
47 | except SyntaxError:
48 | traceback.print_exc(0)
49 |
50 | def do_quit(self, line):
51 | """
52 | Exit the interpreter.
53 | """
54 |
55 | if line != '' and line != '()':
56 | self.stdout.write(line + '\n')
57 | self._quit()
58 |
59 | @staticmethod
60 | def _quit():
61 | sys.exit(1)
62 |
63 | def main():
64 | """
65 | Main entry point.
66 | """
67 |
68 | Expression_Interpreter().cmdloop()
69 |
70 | if __name__ == '__main__':
71 | main()
72 |
--------------------------------------------------------------------------------
/worker/packages/idna/__init__.py:
--------------------------------------------------------------------------------
1 | from .package_data import __version__
2 | from .core import *
3 |
--------------------------------------------------------------------------------
/worker/packages/idna/codec.py:
--------------------------------------------------------------------------------
1 | from .core import encode, decode, alabel, ulabel, IDNAError
2 | import codecs
3 | import re
4 |
5 | _unicode_dots_re = re.compile(u'[\u002e\u3002\uff0e\uff61]')
6 |
7 | class Codec(codecs.Codec):
8 |
9 | def encode(self, data, errors='strict'):
10 |
11 | if errors != 'strict':
12 | raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
13 |
14 | if not data:
15 | return "", 0
16 |
17 | return encode(data), len(data)
18 |
19 | def decode(self, data, errors='strict'):
20 |
21 | if errors != 'strict':
22 | raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
23 |
24 | if not data:
25 | return u"", 0
26 |
27 | return decode(data), len(data)
28 |
29 | class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
30 | def _buffer_encode(self, data, errors, final):
31 | if errors != 'strict':
32 | raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
33 |
34 | if not data:
35 | return ("", 0)
36 |
37 | labels = _unicode_dots_re.split(data)
38 | trailing_dot = u''
39 | if labels:
40 | if not labels[-1]:
41 | trailing_dot = '.'
42 | del labels[-1]
43 | elif not final:
44 | # Keep potentially unfinished label until the next call
45 | del labels[-1]
46 | if labels:
47 | trailing_dot = '.'
48 |
49 | result = []
50 | size = 0
51 | for label in labels:
52 | result.append(alabel(label))
53 | if size:
54 | size += 1
55 | size += len(label)
56 |
57 | # Join with U+002E
58 | result = ".".join(result) + trailing_dot
59 | size += len(trailing_dot)
60 | return (result, size)
61 |
62 | class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
63 | def _buffer_decode(self, data, errors, final):
64 | if errors != 'strict':
65 | raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
66 |
67 | if not data:
68 | return (u"", 0)
69 |
70 | # IDNA allows decoding to operate on Unicode strings, too.
71 | if isinstance(data, unicode):
72 | labels = _unicode_dots_re.split(data)
73 | else:
74 | # Must be ASCII string
75 | data = str(data)
76 | unicode(data, "ascii")
77 | labels = data.split(".")
78 |
79 | trailing_dot = u''
80 | if labels:
81 | if not labels[-1]:
82 | trailing_dot = u'.'
83 | del labels[-1]
84 | elif not final:
85 | # Keep potentially unfinished label until the next call
86 | del labels[-1]
87 | if labels:
88 | trailing_dot = u'.'
89 |
90 | result = []
91 | size = 0
92 | for label in labels:
93 | result.append(ulabel(label))
94 | if size:
95 | size += 1
96 | size += len(label)
97 |
98 | result = u".".join(result) + trailing_dot
99 | size += len(trailing_dot)
100 | return (result, size)
101 |
102 |
103 | class StreamWriter(Codec, codecs.StreamWriter):
104 | pass
105 |
106 | class StreamReader(Codec, codecs.StreamReader):
107 | pass
108 |
109 | def getregentry():
110 | return codecs.CodecInfo(
111 | name='idna',
112 | encode=Codec().encode,
113 | decode=Codec().decode,
114 | incrementalencoder=IncrementalEncoder,
115 | incrementaldecoder=IncrementalDecoder,
116 | streamwriter=StreamWriter,
117 | streamreader=StreamReader,
118 | )
119 |
--------------------------------------------------------------------------------
/worker/packages/idna/compat.py:
--------------------------------------------------------------------------------
1 | from .core import *
2 | from .codec import *
3 |
4 | def ToASCII(label):
5 | return encode(label)
6 |
7 | def ToUnicode(label):
8 | return decode(label)
9 |
10 | def nameprep(s):
11 | raise NotImplementedError("IDNA 2008 does not utilise nameprep protocol")
12 |
13 |
--------------------------------------------------------------------------------
/worker/packages/idna/intranges.py:
--------------------------------------------------------------------------------
1 | """
2 | Given a list of integers, made up of (hopefully) a small number of long runs
3 | of consecutive integers, compute a representation of the form
4 | ((start1, end1), (start2, end2) ...). Then answer the question "was x present
5 | in the original list?" in time O(log(# runs)).
6 | """
7 |
8 | import bisect
9 |
10 | def intranges_from_list(list_):
11 | """Represent a list of integers as a sequence of ranges:
12 | ((start_0, end_0), (start_1, end_1), ...), such that the original
13 | integers are exactly those x such that start_i <= x < end_i for some i.
14 |
15 | Ranges are encoded as single integers (start << 32 | end), not as tuples.
16 | """
17 |
18 | sorted_list = sorted(list_)
19 | ranges = []
20 | last_write = -1
21 | for i in range(len(sorted_list)):
22 | if i+1 < len(sorted_list):
23 | if sorted_list[i] == sorted_list[i+1]-1:
24 | continue
25 | current_range = sorted_list[last_write+1:i+1]
26 | ranges.append(_encode_range(current_range[0], current_range[-1] + 1))
27 | last_write = i
28 |
29 | return tuple(ranges)
30 |
31 | def _encode_range(start, end):
32 | return (start << 32) | end
33 |
34 | def _decode_range(r):
35 | return (r >> 32), (r & ((1 << 32) - 1))
36 |
37 |
38 | def intranges_contain(int_, ranges):
39 | """Determine if `int_` falls into one of the ranges in `ranges`."""
40 | tuple_ = _encode_range(int_, 0)
41 | pos = bisect.bisect_left(ranges, tuple_)
42 | # we could be immediately ahead of a tuple (start, end)
43 | # with start < int_ <= end
44 | if pos > 0:
45 | left, right = _decode_range(ranges[pos-1])
46 | if left <= int_ < right:
47 | return True
48 | # or we could be immediately behind a tuple (int_, end)
49 | if pos < len(ranges):
50 | left, _ = _decode_range(ranges[pos])
51 | if left == int_:
52 | return True
53 | return False
54 |
--------------------------------------------------------------------------------
/worker/packages/idna/package_data.py:
--------------------------------------------------------------------------------
1 | __version__ = '2.10'
2 |
3 |
--------------------------------------------------------------------------------
/worker/packages/openlock/AUTHORS:
--------------------------------------------------------------------------------
1 | Pasquale Pigazzini (ppigazzini)
2 | Michel Van den Bergh (vdbergh)
3 | Joost VandeVondele (vondele)
4 |
--------------------------------------------------------------------------------
/worker/packages/openlock/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 The openlock authors
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/worker/packages/openlock/README.md:
--------------------------------------------------------------------------------
1 | # openlock
2 |
3 | A locking library not depending on inter-process locking primitives in the OS.
4 |
5 | ## API
6 |
7 | - `FileLock(lock_file="openlock.lock", timeout=None)`. Constructor. The optional `timeout` argument is the default for the corresponding argument of `acquire()` (see below). A `FileLock` object supports the context manager protocol.
8 | - `FileLock.acquire(timeout=None)`. Attempts to acquire the lock. The optional `timeout` argument specifies the maximum waiting time in seconds before a `Timeout` exception is raised.
9 | - `FileLock.release()`. Releases the lock. May raise an `InvalidRelease` exception.
10 | - `FileLock.locked()`. Indicates if the lock is held by a process.
11 | - `FileLock.getpid()`. The PID of the process that holds the lock, if any. Otherwise returns `None`.
12 | - `FileLock.lock_file`. The name of the lock file.
13 | - `FileLock.timeout`. The value of the timeout parameter.
14 | - `openlock.set_defaults(**kw)`. Sets default values for the internal parameters. Currently `tries`, `retry_period`, `race_delay` with values of `2`, `0.3s` and `0.2s` respectively.
15 | - `openlock.get_defaults()`. Returns a dictionary with the default values for the internal parameters.
16 |
17 | ## How does it work
18 |
19 | A valid lock file has two lines of text containing respectively:
20 |
21 | - `pid`: the PID of the process holding the lock;
22 | - `name`: the content of `argv[0]` of the process holding the lock.
23 |
24 | A lock file is considered stale if the pair `(pid, name)` does not belong to a Python process in the process table.
25 |
26 | A process that seeks to acquire a lock first atomically tries to create a new lock file. If this succeeds then it has acquired the lock. If it fails then this means that a lock file exists. If it is valid, i.e. not stale and syntactically valid, then this implies that the lock has already been acquired and the process will periodically retry to acquire it - subject to the `timeout` parameter. If the lock file is invalid, then the process atomically overwrites it with its own data. It sleeps `race_delay` seconds and then checks if the lock file has again been overwritten (necessarily by a different process). If not then it has acquired the lock.
27 |
28 | Once the lock is acquired the process installs an exit handler to remove the lock file on exit.
29 |
30 | To release the lock, the process deletes the lock file and uninstall the exit handler.
31 |
32 | In follows from this description that the algorithm is latency free in the common use case where there are no invalid lock files.
33 |
34 | ## Issues
35 |
36 | There are no known issues in the common use case where there are no invalid lock files. In general the following is true:
37 |
38 | - The algorithm for dealing with invalid lock files fails if a process needs more time than indicated by the `race_delay` parameter to create a new lock file after detecting the absence of a valid one. The library will issue a warning if it thinks the system is too slow for the algorithm to work correctly and it will recommend to increase the value of the `race_delay` parameter.
39 |
40 | - Since PIDs are only unique over the lifetime of a process, it may be, although it is very unlikely, that the data `(pid, name)` matches a Python process different from the one that created the lock file. In that case the algorithm fails to recognize the lock file as stale.
41 |
42 | ## History
43 |
44 | This is a refactored version of the locking algorithm used by the worker for the Fishtest web application .
45 |
--------------------------------------------------------------------------------
/worker/packages/openlock/__init__.py:
--------------------------------------------------------------------------------
1 | from .openlock import ( # noqa: F401
2 | FileLock,
3 | InvalidLockFile,
4 | InvalidOption,
5 | InvalidRelease,
6 | OpenLockException,
7 | Timeout,
8 | __version__,
9 | get_defaults,
10 | logger,
11 | set_defaults,
12 | )
13 |
--------------------------------------------------------------------------------
/worker/packages/openlock/_helper.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import time
3 |
4 | from openlock import FileLock, Timeout
5 |
6 |
7 | def other_process1(lock_file):
8 | r = FileLock(lock_file)
9 | try:
10 | r.acquire(timeout=0)
11 | except Timeout:
12 | return 1
13 | return 0
14 |
15 |
16 | def other_process2(lock_file):
17 | r = FileLock(lock_file)
18 | r.acquire(timeout=0)
19 | time.sleep(2)
20 | return 2
21 |
22 |
23 | if __name__ == "__main__":
24 | lock_file = sys.argv[1]
25 | cmd = sys.argv[2]
26 | if cmd == "1":
27 | print(other_process1(lock_file))
28 | else:
29 | print(other_process2(lock_file))
30 |
--------------------------------------------------------------------------------
/worker/packages/requests/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # __
4 | # /__) _ _ _ _ _/ _
5 | # / ( (- (/ (/ (- _) / _)
6 | # /
7 |
8 | """
9 | Requests HTTP Library
10 | ~~~~~~~~~~~~~~~~~~~~~
11 |
12 | Requests is an HTTP library, written in Python, for human beings.
13 | Basic GET usage:
14 |
15 | >>> import requests
16 | >>> r = requests.get('https://www.python.org')
17 | >>> r.status_code
18 | 200
19 | >>> b'Python is a programming language' in r.content
20 | True
21 |
22 | ... or POST:
23 |
24 | >>> payload = dict(key1='value1', key2='value2')
25 | >>> r = requests.post('https://httpbin.org/post', data=payload)
26 | >>> print(r.text)
27 | {
28 | ...
29 | "form": {
30 | "key1": "value1",
31 | "key2": "value2"
32 | },
33 | ...
34 | }
35 |
36 | The other HTTP methods are supported - see `requests.api`. Full documentation
37 | is at .
38 |
39 | :copyright: (c) 2017 by Kenneth Reitz.
40 | :license: Apache 2.0, see LICENSE for more details.
41 | """
42 |
43 | import urllib3
44 | import chardet
45 | import warnings
46 | from .exceptions import RequestsDependencyWarning
47 |
48 |
49 | def check_compatibility(urllib3_version, chardet_version):
50 | urllib3_version = urllib3_version.split('.')
51 | assert urllib3_version != ['dev'] # Verify urllib3 isn't installed from git.
52 |
53 | # Sometimes, urllib3 only reports its version as 16.1.
54 | if len(urllib3_version) == 2:
55 | urllib3_version.append('0')
56 |
57 | # Check urllib3 for compatibility.
58 | major, minor, patch = urllib3_version # noqa: F811
59 | major, minor, patch = int(major), int(minor), int(patch)
60 | # urllib3 >= 1.21.1, <= 1.26
61 | assert major == 1
62 | assert minor >= 21
63 | assert minor <= 26
64 |
65 | # Check chardet for compatibility.
66 | major, minor, patch = chardet_version.split('.')[:3]
67 | major, minor, patch = int(major), int(minor), int(patch)
68 | # chardet >= 3.0.2, < 5.0.0
69 | assert (3, 0, 2) <= (major, minor, patch) < (5, 0, 0)
70 |
71 |
72 | def _check_cryptography(cryptography_version):
73 | # cryptography < 1.3.4
74 | try:
75 | cryptography_version = list(map(int, cryptography_version.split('.')))
76 | except ValueError:
77 | return
78 |
79 | if cryptography_version < [1, 3, 4]:
80 | warning = 'Old version of cryptography ({}) may cause slowdown.'.format(cryptography_version)
81 | warnings.warn(warning, RequestsDependencyWarning)
82 |
83 | # Check imported dependencies for compatibility.
84 | try:
85 | check_compatibility(urllib3.__version__, chardet.__version__)
86 | except (AssertionError, ValueError):
87 | warnings.warn("urllib3 ({}) or chardet ({}) doesn't match a supported "
88 | "version!".format(urllib3.__version__, chardet.__version__),
89 | RequestsDependencyWarning)
90 |
91 | # Attempt to enable urllib3's fallback for SNI support
92 | # if the standard library doesn't support SNI or the
93 | # 'ssl' library isn't available.
94 | try:
95 | try:
96 | import ssl
97 | except ImportError:
98 | ssl = None
99 |
100 | if not getattr(ssl, "HAS_SNI", False):
101 | from urllib3.contrib import pyopenssl
102 | pyopenssl.inject_into_urllib3()
103 |
104 | # Check cryptography version
105 | from cryptography import __version__ as cryptography_version
106 | _check_cryptography(cryptography_version)
107 | except ImportError:
108 | pass
109 |
110 | # urllib3's DependencyWarnings should be silenced.
111 | from urllib3.exceptions import DependencyWarning
112 | warnings.simplefilter('ignore', DependencyWarning)
113 |
114 | from .__version__ import __title__, __description__, __url__, __version__
115 | from .__version__ import __build__, __author__, __author_email__, __license__
116 | from .__version__ import __copyright__, __cake__
117 |
118 | from . import utils
119 | from . import packages
120 | from .models import Request, Response, PreparedRequest
121 | from .api import request, get, head, post, patch, put, delete, options
122 | from .sessions import session, Session
123 | from .status_codes import codes
124 | from .exceptions import (
125 | RequestException, Timeout, URLRequired,
126 | TooManyRedirects, HTTPError, ConnectionError,
127 | FileModeWarning, ConnectTimeout, ReadTimeout
128 | )
129 |
130 | # Set default logging handler to avoid "No handler found" warnings.
131 | import logging
132 | from logging import NullHandler
133 |
134 | logging.getLogger(__name__).addHandler(NullHandler())
135 |
136 | # FileModeWarnings go off per the default.
137 | warnings.simplefilter('default', FileModeWarning, append=True)
138 |
--------------------------------------------------------------------------------
/worker/packages/requests/__version__.py:
--------------------------------------------------------------------------------
1 | # .-. .-. .-. . . .-. .-. .-. .-.
2 | # |( |- |.| | | |- `-. | `-.
3 | # ' ' `-' `-`.`-' `-' `-' ' `-'
4 |
5 | __title__ = 'requests'
6 | __description__ = 'Python HTTP for Humans.'
7 | __url__ = 'https://requests.readthedocs.io'
8 | __version__ = '2.25.1'
9 | __build__ = 0x022501
10 | __author__ = 'Kenneth Reitz'
11 | __author_email__ = 'me@kennethreitz.org'
12 | __license__ = 'Apache 2.0'
13 | __copyright__ = 'Copyright 2020 Kenneth Reitz'
14 | __cake__ = u'\u2728 \U0001f370 \u2728'
15 |
--------------------------------------------------------------------------------
/worker/packages/requests/_internal_utils.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | requests._internal_utils
5 | ~~~~~~~~~~~~~~
6 |
7 | Provides utility functions that are consumed internally by Requests
8 | which depend on extremely few external helpers (such as compat)
9 | """
10 |
11 | from .compat import is_py2, builtin_str, str
12 |
13 |
14 | def to_native_string(string, encoding='ascii'):
15 | """Given a string object, regardless of type, returns a representation of
16 | that string in the native string type, encoding and decoding where
17 | necessary. This assumes ASCII unless told otherwise.
18 | """
19 | if isinstance(string, builtin_str):
20 | out = string
21 | else:
22 | if is_py2:
23 | out = string.encode(encoding)
24 | else:
25 | out = string.decode(encoding)
26 |
27 | return out
28 |
29 |
30 | def unicode_is_ascii(u_string):
31 | """Determine if unicode string only contains ASCII characters.
32 |
33 | :param str u_string: unicode string to check. Must be unicode
34 | and not Python 2 `str`.
35 | :rtype: bool
36 | """
37 | assert isinstance(u_string, str)
38 | try:
39 | u_string.encode('ascii')
40 | return True
41 | except UnicodeEncodeError:
42 | return False
43 |
--------------------------------------------------------------------------------
/worker/packages/requests/certs.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | """
5 | requests.certs
6 | ~~~~~~~~~~~~~~
7 |
8 | This module returns the preferred default CA certificate bundle. There is
9 | only one — the one from the certifi package.
10 |
11 | If you are packaging Requests, e.g., for a Linux distribution or a managed
12 | environment, you can change the definition of where() to return a separately
13 | packaged CA bundle.
14 | """
15 | from certifi import where
16 |
17 | if __name__ == '__main__':
18 | print(where())
19 |
--------------------------------------------------------------------------------
/worker/packages/requests/compat.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | requests.compat
5 | ~~~~~~~~~~~~~~~
6 |
7 | This module handles import compatibility issues between Python 2 and
8 | Python 3.
9 | """
10 |
11 | import chardet
12 |
13 | import sys
14 |
15 | # -------
16 | # Pythons
17 | # -------
18 |
19 | # Syntax sugar.
20 | _ver = sys.version_info
21 |
22 | #: Python 2.x?
23 | is_py2 = (_ver[0] == 2)
24 |
25 | #: Python 3.x?
26 | is_py3 = (_ver[0] == 3)
27 |
28 | try:
29 | import simplejson as json
30 | except ImportError:
31 | import json
32 |
33 | # ---------
34 | # Specifics
35 | # ---------
36 |
37 | if is_py2:
38 | from urllib import (
39 | quote, unquote, quote_plus, unquote_plus, urlencode, getproxies,
40 | proxy_bypass, proxy_bypass_environment, getproxies_environment)
41 | from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
42 | from urllib2 import parse_http_list
43 | import cookielib
44 | from Cookie import Morsel
45 | from StringIO import StringIO
46 | # Keep OrderedDict for backwards compatibility.
47 | from collections import Callable, Mapping, MutableMapping, OrderedDict
48 |
49 |
50 | builtin_str = str
51 | bytes = str
52 | str = unicode
53 | basestring = basestring
54 | numeric_types = (int, long, float)
55 | integer_types = (int, long)
56 |
57 | elif is_py3:
58 | from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
59 | from urllib.request import parse_http_list, getproxies, proxy_bypass, proxy_bypass_environment, getproxies_environment
60 | from http import cookiejar as cookielib
61 | from http.cookies import Morsel
62 | from io import StringIO
63 | # Keep OrderedDict for backwards compatibility.
64 | from collections import OrderedDict
65 | from collections.abc import Callable, Mapping, MutableMapping
66 |
67 | builtin_str = str
68 | str = str
69 | bytes = bytes
70 | basestring = (str, bytes)
71 | numeric_types = (int, float)
72 | integer_types = (int,)
73 |
--------------------------------------------------------------------------------
/worker/packages/requests/exceptions.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | requests.exceptions
5 | ~~~~~~~~~~~~~~~~~~~
6 |
7 | This module contains the set of Requests' exceptions.
8 | """
9 | from urllib3.exceptions import HTTPError as BaseHTTPError
10 |
11 |
12 | class RequestException(IOError):
13 | """There was an ambiguous exception that occurred while handling your
14 | request.
15 | """
16 |
17 | def __init__(self, *args, **kwargs):
18 | """Initialize RequestException with `request` and `response` objects."""
19 | response = kwargs.pop('response', None)
20 | self.response = response
21 | self.request = kwargs.pop('request', None)
22 | if (response is not None and not self.request and
23 | hasattr(response, 'request')):
24 | self.request = self.response.request
25 | super(RequestException, self).__init__(*args, **kwargs)
26 |
27 |
28 | class HTTPError(RequestException):
29 | """An HTTP error occurred."""
30 |
31 |
32 | class ConnectionError(RequestException):
33 | """A Connection error occurred."""
34 |
35 |
36 | class ProxyError(ConnectionError):
37 | """A proxy error occurred."""
38 |
39 |
40 | class SSLError(ConnectionError):
41 | """An SSL error occurred."""
42 |
43 |
44 | class Timeout(RequestException):
45 | """The request timed out.
46 |
47 | Catching this error will catch both
48 | :exc:`~requests.exceptions.ConnectTimeout` and
49 | :exc:`~requests.exceptions.ReadTimeout` errors.
50 | """
51 |
52 |
53 | class ConnectTimeout(ConnectionError, Timeout):
54 | """The request timed out while trying to connect to the remote server.
55 |
56 | Requests that produced this error are safe to retry.
57 | """
58 |
59 |
60 | class ReadTimeout(Timeout):
61 | """The server did not send any data in the allotted amount of time."""
62 |
63 |
64 | class URLRequired(RequestException):
65 | """A valid URL is required to make a request."""
66 |
67 |
68 | class TooManyRedirects(RequestException):
69 | """Too many redirects."""
70 |
71 |
72 | class MissingSchema(RequestException, ValueError):
73 | """The URL schema (e.g. http or https) is missing."""
74 |
75 |
76 | class InvalidSchema(RequestException, ValueError):
77 | """See defaults.py for valid schemas."""
78 |
79 |
80 | class InvalidURL(RequestException, ValueError):
81 | """The URL provided was somehow invalid."""
82 |
83 |
84 | class InvalidHeader(RequestException, ValueError):
85 | """The header value provided was somehow invalid."""
86 |
87 |
88 | class InvalidProxyURL(InvalidURL):
89 | """The proxy URL provided is invalid."""
90 |
91 |
92 | class ChunkedEncodingError(RequestException):
93 | """The server declared chunked encoding but sent an invalid chunk."""
94 |
95 |
96 | class ContentDecodingError(RequestException, BaseHTTPError):
97 | """Failed to decode response content."""
98 |
99 |
100 | class StreamConsumedError(RequestException, TypeError):
101 | """The content for this response was already consumed."""
102 |
103 |
104 | class RetryError(RequestException):
105 | """Custom retries logic failed"""
106 |
107 |
108 | class UnrewindableBodyError(RequestException):
109 | """Requests encountered an error when trying to rewind a body."""
110 |
111 | # Warnings
112 |
113 |
114 | class RequestsWarning(Warning):
115 | """Base warning for Requests."""
116 |
117 |
118 | class FileModeWarning(RequestsWarning, DeprecationWarning):
119 | """A file was opened in text mode, but Requests determined its binary length."""
120 |
121 |
122 | class RequestsDependencyWarning(RequestsWarning):
123 | """An imported dependency doesn't match the expected version range."""
124 |
--------------------------------------------------------------------------------
/worker/packages/requests/help.py:
--------------------------------------------------------------------------------
1 | """Module containing bug report helper(s)."""
2 | from __future__ import print_function
3 |
4 | import json
5 | import platform
6 | import sys
7 | import ssl
8 |
9 | import idna
10 | import urllib3
11 | import chardet
12 |
13 | from . import __version__ as requests_version
14 |
15 | try:
16 | from urllib3.contrib import pyopenssl
17 | except ImportError:
18 | pyopenssl = None
19 | OpenSSL = None
20 | cryptography = None
21 | else:
22 | import OpenSSL
23 | import cryptography
24 |
25 |
26 | def _implementation():
27 | """Return a dict with the Python implementation and version.
28 |
29 | Provide both the name and the version of the Python implementation
30 | currently running. For example, on CPython 2.7.5 it will return
31 | {'name': 'CPython', 'version': '2.7.5'}.
32 |
33 | This function works best on CPython and PyPy: in particular, it probably
34 | doesn't work for Jython or IronPython. Future investigation should be done
35 | to work out the correct shape of the code for those platforms.
36 | """
37 | implementation = platform.python_implementation()
38 |
39 | if implementation == 'CPython':
40 | implementation_version = platform.python_version()
41 | elif implementation == 'PyPy':
42 | implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
43 | sys.pypy_version_info.minor,
44 | sys.pypy_version_info.micro)
45 | if sys.pypy_version_info.releaselevel != 'final':
46 | implementation_version = ''.join([
47 | implementation_version, sys.pypy_version_info.releaselevel
48 | ])
49 | elif implementation == 'Jython':
50 | implementation_version = platform.python_version() # Complete Guess
51 | elif implementation == 'IronPython':
52 | implementation_version = platform.python_version() # Complete Guess
53 | else:
54 | implementation_version = 'Unknown'
55 |
56 | return {'name': implementation, 'version': implementation_version}
57 |
58 |
59 | def info():
60 | """Generate information for a bug report."""
61 | try:
62 | platform_info = {
63 | 'system': platform.system(),
64 | 'release': platform.release(),
65 | }
66 | except IOError:
67 | platform_info = {
68 | 'system': 'Unknown',
69 | 'release': 'Unknown',
70 | }
71 |
72 | implementation_info = _implementation()
73 | urllib3_info = {'version': urllib3.__version__}
74 | chardet_info = {'version': chardet.__version__}
75 |
76 | pyopenssl_info = {
77 | 'version': None,
78 | 'openssl_version': '',
79 | }
80 | if OpenSSL:
81 | pyopenssl_info = {
82 | 'version': OpenSSL.__version__,
83 | 'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER,
84 | }
85 | cryptography_info = {
86 | 'version': getattr(cryptography, '__version__', ''),
87 | }
88 | idna_info = {
89 | 'version': getattr(idna, '__version__', ''),
90 | }
91 |
92 | system_ssl = ssl.OPENSSL_VERSION_NUMBER
93 | system_ssl_info = {
94 | 'version': '%x' % system_ssl if system_ssl is not None else ''
95 | }
96 |
97 | return {
98 | 'platform': platform_info,
99 | 'implementation': implementation_info,
100 | 'system_ssl': system_ssl_info,
101 | 'using_pyopenssl': pyopenssl is not None,
102 | 'pyOpenSSL': pyopenssl_info,
103 | 'urllib3': urllib3_info,
104 | 'chardet': chardet_info,
105 | 'cryptography': cryptography_info,
106 | 'idna': idna_info,
107 | 'requests': {
108 | 'version': requests_version,
109 | },
110 | }
111 |
112 |
113 | def main():
114 | """Pretty-print the bug information as JSON."""
115 | print(json.dumps(info(), sort_keys=True, indent=2))
116 |
117 |
118 | if __name__ == '__main__':
119 | main()
120 |
--------------------------------------------------------------------------------
/worker/packages/requests/hooks.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | requests.hooks
5 | ~~~~~~~~~~~~~~
6 |
7 | This module provides the capabilities for the Requests hooks system.
8 |
9 | Available hooks:
10 |
11 | ``response``:
12 | The response generated from a Request.
13 | """
14 | HOOKS = ['response']
15 |
16 |
17 | def default_hooks():
18 | return {event: [] for event in HOOKS}
19 |
20 | # TODO: response is the only one
21 |
22 |
23 | def dispatch_hook(key, hooks, hook_data, **kwargs):
24 | """Dispatches a hook dictionary on a given piece of data."""
25 | hooks = hooks or {}
26 | hooks = hooks.get(key)
27 | if hooks:
28 | if hasattr(hooks, '__call__'):
29 | hooks = [hooks]
30 | for hook in hooks:
31 | _hook_data = hook(hook_data, **kwargs)
32 | if _hook_data is not None:
33 | hook_data = _hook_data
34 | return hook_data
35 |
--------------------------------------------------------------------------------
/worker/packages/requests/packages.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | # This code exists for backwards compatibility reasons.
4 | # I don't like it either. Just look the other way. :)
5 |
6 | for package in ('urllib3', 'idna', 'chardet'):
7 | locals()[package] = __import__(package)
8 | # This traversal is apparently necessary such that the identities are
9 | # preserved (requests.packages.urllib3.* is urllib3.*)
10 | for mod in list(sys.modules):
11 | if mod == package or mod.startswith(package + '.'):
12 | sys.modules['requests.packages.' + mod] = sys.modules[mod]
13 |
14 | # Kinda cool, though, right?
15 |
--------------------------------------------------------------------------------
/worker/packages/requests/status_codes.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | r"""
4 | The ``codes`` object defines a mapping from common names for HTTP statuses
5 | to their numerical codes, accessible either as attributes or as dictionary
6 | items.
7 |
8 | Example::
9 |
10 | >>> import requests
11 | >>> requests.codes['temporary_redirect']
12 | 307
13 | >>> requests.codes.teapot
14 | 418
15 | >>> requests.codes['\o/']
16 | 200
17 |
18 | Some codes have multiple names, and both upper- and lower-case versions of
19 | the names are allowed. For example, ``codes.ok``, ``codes.OK``, and
20 | ``codes.okay`` all correspond to the HTTP status code 200.
21 | """
22 |
23 | from .structures import LookupDict
24 |
25 | _codes = {
26 |
27 | # Informational.
28 | 100: ('continue',),
29 | 101: ('switching_protocols',),
30 | 102: ('processing',),
31 | 103: ('checkpoint',),
32 | 122: ('uri_too_long', 'request_uri_too_long'),
33 | 200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'),
34 | 201: ('created',),
35 | 202: ('accepted',),
36 | 203: ('non_authoritative_info', 'non_authoritative_information'),
37 | 204: ('no_content',),
38 | 205: ('reset_content', 'reset'),
39 | 206: ('partial_content', 'partial'),
40 | 207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
41 | 208: ('already_reported',),
42 | 226: ('im_used',),
43 |
44 | # Redirection.
45 | 300: ('multiple_choices',),
46 | 301: ('moved_permanently', 'moved', '\\o-'),
47 | 302: ('found',),
48 | 303: ('see_other', 'other'),
49 | 304: ('not_modified',),
50 | 305: ('use_proxy',),
51 | 306: ('switch_proxy',),
52 | 307: ('temporary_redirect', 'temporary_moved', 'temporary'),
53 | 308: ('permanent_redirect',
54 | 'resume_incomplete', 'resume',), # These 2 to be removed in 3.0
55 |
56 | # Client Error.
57 | 400: ('bad_request', 'bad'),
58 | 401: ('unauthorized',),
59 | 402: ('payment_required', 'payment'),
60 | 403: ('forbidden',),
61 | 404: ('not_found', '-o-'),
62 | 405: ('method_not_allowed', 'not_allowed'),
63 | 406: ('not_acceptable',),
64 | 407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'),
65 | 408: ('request_timeout', 'timeout'),
66 | 409: ('conflict',),
67 | 410: ('gone',),
68 | 411: ('length_required',),
69 | 412: ('precondition_failed', 'precondition'),
70 | 413: ('request_entity_too_large',),
71 | 414: ('request_uri_too_large',),
72 | 415: ('unsupported_media_type', 'unsupported_media', 'media_type'),
73 | 416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
74 | 417: ('expectation_failed',),
75 | 418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
76 | 421: ('misdirected_request',),
77 | 422: ('unprocessable_entity', 'unprocessable'),
78 | 423: ('locked',),
79 | 424: ('failed_dependency', 'dependency'),
80 | 425: ('unordered_collection', 'unordered'),
81 | 426: ('upgrade_required', 'upgrade'),
82 | 428: ('precondition_required', 'precondition'),
83 | 429: ('too_many_requests', 'too_many'),
84 | 431: ('header_fields_too_large', 'fields_too_large'),
85 | 444: ('no_response', 'none'),
86 | 449: ('retry_with', 'retry'),
87 | 450: ('blocked_by_windows_parental_controls', 'parental_controls'),
88 | 451: ('unavailable_for_legal_reasons', 'legal_reasons'),
89 | 499: ('client_closed_request',),
90 |
91 | # Server Error.
92 | 500: ('internal_server_error', 'server_error', '/o\\', '✗'),
93 | 501: ('not_implemented',),
94 | 502: ('bad_gateway',),
95 | 503: ('service_unavailable', 'unavailable'),
96 | 504: ('gateway_timeout',),
97 | 505: ('http_version_not_supported', 'http_version'),
98 | 506: ('variant_also_negotiates',),
99 | 507: ('insufficient_storage',),
100 | 509: ('bandwidth_limit_exceeded', 'bandwidth'),
101 | 510: ('not_extended',),
102 | 511: ('network_authentication_required', 'network_auth', 'network_authentication'),
103 | }
104 |
105 | codes = LookupDict(name='status_codes')
106 |
107 | def _init():
108 | for code, titles in _codes.items():
109 | for title in titles:
110 | setattr(codes, title, code)
111 | if not title.startswith(('\\', '/')):
112 | setattr(codes, title.upper(), code)
113 |
114 | def doc(code):
115 | names = ', '.join('``%s``' % n for n in _codes[code])
116 | return '* %d: %s' % (code, names)
117 |
118 | global __doc__
119 | __doc__ = (__doc__ + '\n' +
120 | '\n'.join(doc(code) for code in sorted(_codes))
121 | if __doc__ is not None else None)
122 |
123 | _init()
124 |
--------------------------------------------------------------------------------
/worker/packages/requests/structures.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | requests.structures
5 | ~~~~~~~~~~~~~~~~~~~
6 |
7 | Data structures that power Requests.
8 | """
9 |
10 | from collections import OrderedDict
11 |
12 | from .compat import Mapping, MutableMapping
13 |
14 |
15 | class CaseInsensitiveDict(MutableMapping):
16 | """A case-insensitive ``dict``-like object.
17 |
18 | Implements all methods and operations of
19 | ``MutableMapping`` as well as dict's ``copy``. Also
20 | provides ``lower_items``.
21 |
22 | All keys are expected to be strings. The structure remembers the
23 | case of the last key to be set, and ``iter(instance)``,
24 | ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
25 | will contain case-sensitive keys. However, querying and contains
26 | testing is case insensitive::
27 |
28 | cid = CaseInsensitiveDict()
29 | cid['Accept'] = 'application/json'
30 | cid['aCCEPT'] == 'application/json' # True
31 | list(cid) == ['Accept'] # True
32 |
33 | For example, ``headers['content-encoding']`` will return the
34 | value of a ``'Content-Encoding'`` response header, regardless
35 | of how the header name was originally stored.
36 |
37 | If the constructor, ``.update``, or equality comparison
38 | operations are given keys that have equal ``.lower()``s, the
39 | behavior is undefined.
40 | """
41 |
42 | def __init__(self, data=None, **kwargs):
43 | self._store = OrderedDict()
44 | if data is None:
45 | data = {}
46 | self.update(data, **kwargs)
47 |
48 | def __setitem__(self, key, value):
49 | # Use the lowercased key for lookups, but store the actual
50 | # key alongside the value.
51 | self._store[key.lower()] = (key, value)
52 |
53 | def __getitem__(self, key):
54 | return self._store[key.lower()][1]
55 |
56 | def __delitem__(self, key):
57 | del self._store[key.lower()]
58 |
59 | def __iter__(self):
60 | return (casedkey for casedkey, mappedvalue in self._store.values())
61 |
62 | def __len__(self):
63 | return len(self._store)
64 |
65 | def lower_items(self):
66 | """Like iteritems(), but with all lowercase keys."""
67 | return (
68 | (lowerkey, keyval[1])
69 | for (lowerkey, keyval)
70 | in self._store.items()
71 | )
72 |
73 | def __eq__(self, other):
74 | if isinstance(other, Mapping):
75 | other = CaseInsensitiveDict(other)
76 | else:
77 | return NotImplemented
78 | # Compare insensitively
79 | return dict(self.lower_items()) == dict(other.lower_items())
80 |
81 | # Copy is required
82 | def copy(self):
83 | return CaseInsensitiveDict(self._store.values())
84 |
85 | def __repr__(self):
86 | return str(dict(self.items()))
87 |
88 |
89 | class LookupDict(dict):
90 | """Dictionary lookup object."""
91 |
92 | def __init__(self, name=None):
93 | self.name = name
94 | super(LookupDict, self).__init__()
95 |
96 | def __repr__(self):
97 | return '' % (self.name)
98 |
99 | def __getitem__(self, key):
100 | # We allow fall-through here, so values default to None
101 |
102 | return self.__dict__.get(key, None)
103 |
104 | def get(self, key, default=None):
105 | return self.__dict__.get(key, default)
106 |
--------------------------------------------------------------------------------
/worker/packages/urllib3/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Python HTTP library with thread-safe connection pooling, file post support, user friendly, and more
3 | """
4 | from __future__ import absolute_import
5 |
6 | # Set default logging handler to avoid "No handler found" warnings.
7 | import logging
8 | import warnings
9 | from logging import NullHandler
10 |
11 | from . import exceptions
12 | from ._version import __version__
13 | from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url
14 | from .filepost import encode_multipart_formdata
15 | from .poolmanager import PoolManager, ProxyManager, proxy_from_url
16 | from .response import HTTPResponse
17 | from .util.request import make_headers
18 | from .util.retry import Retry
19 | from .util.timeout import Timeout
20 | from .util.url import get_host
21 |
22 | __author__ = "Andrey Petrov (andrey.petrov@shazow.net)"
23 | __license__ = "MIT"
24 | __version__ = __version__
25 |
26 | __all__ = (
27 | "HTTPConnectionPool",
28 | "HTTPSConnectionPool",
29 | "PoolManager",
30 | "ProxyManager",
31 | "HTTPResponse",
32 | "Retry",
33 | "Timeout",
34 | "add_stderr_logger",
35 | "connection_from_url",
36 | "disable_warnings",
37 | "encode_multipart_formdata",
38 | "get_host",
39 | "make_headers",
40 | "proxy_from_url",
41 | )
42 |
43 | logging.getLogger(__name__).addHandler(NullHandler())
44 |
45 |
46 | def add_stderr_logger(level=logging.DEBUG):
47 | """
48 | Helper for quickly adding a StreamHandler to the logger. Useful for
49 | debugging.
50 |
51 | Returns the handler after adding it.
52 | """
53 | # This method needs to be in this __init__.py to get the __name__ correct
54 | # even if urllib3 is vendored within another package.
55 | logger = logging.getLogger(__name__)
56 | handler = logging.StreamHandler()
57 | handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s"))
58 | logger.addHandler(handler)
59 | logger.setLevel(level)
60 | logger.debug("Added a stderr logging handler to logger: %s", __name__)
61 | return handler
62 |
63 |
64 | # ... Clean up.
65 | del NullHandler
66 |
67 |
68 | # All warning filters *must* be appended unless you're really certain that they
69 | # shouldn't be: otherwise, it's very hard for users to use most Python
70 | # mechanisms to silence them.
71 | # SecurityWarning's always go off by default.
72 | warnings.simplefilter("always", exceptions.SecurityWarning, append=True)
73 | # SubjectAltNameWarning's should go off once per host
74 | warnings.simplefilter("default", exceptions.SubjectAltNameWarning, append=True)
75 | # InsecurePlatformWarning's don't vary between requests, so we keep it default.
76 | warnings.simplefilter("default", exceptions.InsecurePlatformWarning, append=True)
77 | # SNIMissingWarnings should go off only once.
78 | warnings.simplefilter("default", exceptions.SNIMissingWarning, append=True)
79 |
80 |
81 | def disable_warnings(category=exceptions.HTTPWarning):
82 | """
83 | Helper for quickly disabling all urllib3 warnings.
84 | """
85 | warnings.simplefilter("ignore", category)
86 |
--------------------------------------------------------------------------------
/worker/packages/urllib3/_version.py:
--------------------------------------------------------------------------------
1 | # This file is protected via CODEOWNERS
2 | __version__ = "1.26.7"
3 |
--------------------------------------------------------------------------------
/worker/packages/urllib3/contrib/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/official-stockfish/fishtest/fdefcdd1be3617ffa0451b006e3b6f030cf8678d/worker/packages/urllib3/contrib/__init__.py
--------------------------------------------------------------------------------
/worker/packages/urllib3/contrib/_appengine_environ.py:
--------------------------------------------------------------------------------
1 | """
2 | This module provides means to detect the App Engine environment.
3 | """
4 |
5 | import os
6 |
7 |
8 | def is_appengine():
9 | return is_local_appengine() or is_prod_appengine()
10 |
11 |
12 | def is_appengine_sandbox():
13 | """Reports if the app is running in the first generation sandbox.
14 |
15 | The second generation runtimes are technically still in a sandbox, but it
16 | is much less restrictive, so generally you shouldn't need to check for it.
17 | see https://cloud.google.com/appengine/docs/standard/runtimes
18 | """
19 | return is_appengine() and os.environ["APPENGINE_RUNTIME"] == "python27"
20 |
21 |
22 | def is_local_appengine():
23 | return "APPENGINE_RUNTIME" in os.environ and os.environ.get(
24 | "SERVER_SOFTWARE", ""
25 | ).startswith("Development/")
26 |
27 |
28 | def is_prod_appengine():
29 | return "APPENGINE_RUNTIME" in os.environ and os.environ.get(
30 | "SERVER_SOFTWARE", ""
31 | ).startswith("Google App Engine/")
32 |
33 |
34 | def is_prod_appengine_mvms():
35 | """Deprecated."""
36 | return False
37 |
--------------------------------------------------------------------------------
/worker/packages/urllib3/contrib/_securetransport/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/official-stockfish/fishtest/fdefcdd1be3617ffa0451b006e3b6f030cf8678d/worker/packages/urllib3/contrib/_securetransport/__init__.py
--------------------------------------------------------------------------------
/worker/packages/urllib3/filepost.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | import binascii
4 | import codecs
5 | import os
6 | from io import BytesIO
7 |
8 | from .fields import RequestField
9 | from .packages import six
10 | from .packages.six import b
11 |
12 | writer = codecs.lookup("utf-8")[3]
13 |
14 |
15 | def choose_boundary():
16 | """
17 | Our embarrassingly-simple replacement for mimetools.choose_boundary.
18 | """
19 | boundary = binascii.hexlify(os.urandom(16))
20 | if not six.PY2:
21 | boundary = boundary.decode("ascii")
22 | return boundary
23 |
24 |
25 | def iter_field_objects(fields):
26 | """
27 | Iterate over fields.
28 |
29 | Supports list of (k, v) tuples and dicts, and lists of
30 | :class:`~urllib3.fields.RequestField`.
31 |
32 | """
33 | if isinstance(fields, dict):
34 | i = six.iteritems(fields)
35 | else:
36 | i = iter(fields)
37 |
38 | for field in i:
39 | if isinstance(field, RequestField):
40 | yield field
41 | else:
42 | yield RequestField.from_tuples(*field)
43 |
44 |
45 | def iter_fields(fields):
46 | """
47 | .. deprecated:: 1.6
48 |
49 | Iterate over fields.
50 |
51 | The addition of :class:`~urllib3.fields.RequestField` makes this function
52 | obsolete. Instead, use :func:`iter_field_objects`, which returns
53 | :class:`~urllib3.fields.RequestField` objects.
54 |
55 | Supports list of (k, v) tuples and dicts.
56 | """
57 | if isinstance(fields, dict):
58 | return ((k, v) for k, v in six.iteritems(fields))
59 |
60 | return ((k, v) for k, v in fields)
61 |
62 |
63 | def encode_multipart_formdata(fields, boundary=None):
64 | """
65 | Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
66 |
67 | :param fields:
68 | Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
69 |
70 | :param boundary:
71 | If not specified, then a random boundary will be generated using
72 | :func:`urllib3.filepost.choose_boundary`.
73 | """
74 | body = BytesIO()
75 | if boundary is None:
76 | boundary = choose_boundary()
77 |
78 | for field in iter_field_objects(fields):
79 | body.write(b("--%s\r\n" % (boundary)))
80 |
81 | writer(body).write(field.render_headers())
82 | data = field.data
83 |
84 | if isinstance(data, int):
85 | data = str(data) # Backwards compatibility
86 |
87 | if isinstance(data, six.text_type):
88 | writer(body).write(data)
89 | else:
90 | body.write(data)
91 |
92 | body.write(b"\r\n")
93 |
94 | body.write(b("--%s--\r\n" % (boundary)))
95 |
96 | content_type = str("multipart/form-data; boundary=%s" % boundary)
97 |
98 | return body.getvalue(), content_type
99 |
--------------------------------------------------------------------------------
/worker/packages/urllib3/packages/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | from . import ssl_match_hostname
4 |
5 | __all__ = ("ssl_match_hostname",)
6 |
--------------------------------------------------------------------------------
/worker/packages/urllib3/packages/backports/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/official-stockfish/fishtest/fdefcdd1be3617ffa0451b006e3b6f030cf8678d/worker/packages/urllib3/packages/backports/__init__.py
--------------------------------------------------------------------------------
/worker/packages/urllib3/packages/backports/makefile.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | backports.makefile
4 | ~~~~~~~~~~~~~~~~~~
5 |
6 | Backports the Python 3 ``socket.makefile`` method for use with anything that
7 | wants to create a "fake" socket object.
8 | """
9 | import io
10 | from socket import SocketIO
11 |
12 |
13 | def backport_makefile(
14 | self, mode="r", buffering=None, encoding=None, errors=None, newline=None
15 | ):
16 | """
17 | Backport of ``socket.makefile`` from Python 3.5.
18 | """
19 | if not set(mode) <= {"r", "w", "b"}:
20 | raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
21 | writing = "w" in mode
22 | reading = "r" in mode or not writing
23 | assert reading or writing
24 | binary = "b" in mode
25 | rawmode = ""
26 | if reading:
27 | rawmode += "r"
28 | if writing:
29 | rawmode += "w"
30 | raw = SocketIO(self, rawmode)
31 | self._makefile_refs += 1
32 | if buffering is None:
33 | buffering = -1
34 | if buffering < 0:
35 | buffering = io.DEFAULT_BUFFER_SIZE
36 | if buffering == 0:
37 | if not binary:
38 | raise ValueError("unbuffered streams must be binary")
39 | return raw
40 | if reading and writing:
41 | buffer = io.BufferedRWPair(raw, raw, buffering)
42 | elif reading:
43 | buffer = io.BufferedReader(raw, buffering)
44 | else:
45 | assert writing
46 | buffer = io.BufferedWriter(raw, buffering)
47 | if binary:
48 | return buffer
49 | text = io.TextIOWrapper(buffer, encoding, errors, newline)
50 | text.mode = mode
51 | return text
52 |
--------------------------------------------------------------------------------
/worker/packages/urllib3/packages/ssl_match_hostname/__init__.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | try:
4 | # Our match_hostname function is the same as 3.10's, so we only want to
5 | # import the match_hostname function if it's at least that good.
6 | # We also fallback on Python 3.10+ because our code doesn't emit
7 | # deprecation warnings and is the same as Python 3.10 otherwise.
8 | if sys.version_info < (3, 5) or sys.version_info >= (3, 10):
9 | raise ImportError("Fallback to vendored code")
10 |
11 | from ssl import CertificateError, match_hostname
12 | except ImportError:
13 | try:
14 | # Backport of the function from a pypi module
15 | from backports.ssl_match_hostname import ( # type: ignore
16 | CertificateError,
17 | match_hostname,
18 | )
19 | except ImportError:
20 | # Our vendored copy
21 | from ._implementation import CertificateError, match_hostname # type: ignore
22 |
23 | # Not needed, but documenting what we provide.
24 | __all__ = ("CertificateError", "match_hostname")
25 |
--------------------------------------------------------------------------------
/worker/packages/urllib3/util/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | # For backwards compatibility, provide imports that used to be here.
4 | from .connection import is_connection_dropped
5 | from .request import SKIP_HEADER, SKIPPABLE_HEADERS, make_headers
6 | from .response import is_fp_closed
7 | from .retry import Retry
8 | from .ssl_ import (
9 | ALPN_PROTOCOLS,
10 | HAS_SNI,
11 | IS_PYOPENSSL,
12 | IS_SECURETRANSPORT,
13 | PROTOCOL_TLS,
14 | SSLContext,
15 | assert_fingerprint,
16 | resolve_cert_reqs,
17 | resolve_ssl_version,
18 | ssl_wrap_socket,
19 | )
20 | from .timeout import Timeout, current_time
21 | from .url import Url, get_host, parse_url, split_first
22 | from .wait import wait_for_read, wait_for_write
23 |
24 | __all__ = (
25 | "HAS_SNI",
26 | "IS_PYOPENSSL",
27 | "IS_SECURETRANSPORT",
28 | "SSLContext",
29 | "PROTOCOL_TLS",
30 | "ALPN_PROTOCOLS",
31 | "Retry",
32 | "Timeout",
33 | "Url",
34 | "assert_fingerprint",
35 | "current_time",
36 | "is_connection_dropped",
37 | "is_fp_closed",
38 | "get_host",
39 | "parse_url",
40 | "make_headers",
41 | "resolve_cert_reqs",
42 | "resolve_ssl_version",
43 | "split_first",
44 | "ssl_wrap_socket",
45 | "wait_for_read",
46 | "wait_for_write",
47 | "SKIP_HEADER",
48 | "SKIPPABLE_HEADERS",
49 | )
50 |
--------------------------------------------------------------------------------
/worker/packages/urllib3/util/proxy.py:
--------------------------------------------------------------------------------
1 | from .ssl_ import create_urllib3_context, resolve_cert_reqs, resolve_ssl_version
2 |
3 |
4 | def connection_requires_http_tunnel(
5 | proxy_url=None, proxy_config=None, destination_scheme=None
6 | ):
7 | """
8 | Returns True if the connection requires an HTTP CONNECT through the proxy.
9 |
10 | :param URL proxy_url:
11 | URL of the proxy.
12 | :param ProxyConfig proxy_config:
13 | Proxy configuration from poolmanager.py
14 | :param str destination_scheme:
15 | The scheme of the destination. (i.e https, http, etc)
16 | """
17 | # If we're not using a proxy, no way to use a tunnel.
18 | if proxy_url is None:
19 | return False
20 |
21 | # HTTP destinations never require tunneling, we always forward.
22 | if destination_scheme == "http":
23 | return False
24 |
25 | # Support for forwarding with HTTPS proxies and HTTPS destinations.
26 | if (
27 | proxy_url.scheme == "https"
28 | and proxy_config
29 | and proxy_config.use_forwarding_for_https
30 | ):
31 | return False
32 |
33 | # Otherwise always use a tunnel.
34 | return True
35 |
36 |
37 | def create_proxy_ssl_context(
38 | ssl_version, cert_reqs, ca_certs=None, ca_cert_dir=None, ca_cert_data=None
39 | ):
40 | """
41 | Generates a default proxy ssl context if one hasn't been provided by the
42 | user.
43 | """
44 | ssl_context = create_urllib3_context(
45 | ssl_version=resolve_ssl_version(ssl_version),
46 | cert_reqs=resolve_cert_reqs(cert_reqs),
47 | )
48 |
49 | if (
50 | not ca_certs
51 | and not ca_cert_dir
52 | and not ca_cert_data
53 | and hasattr(ssl_context, "load_default_certs")
54 | ):
55 | ssl_context.load_default_certs()
56 |
57 | return ssl_context
58 |
--------------------------------------------------------------------------------
/worker/packages/urllib3/util/queue.py:
--------------------------------------------------------------------------------
1 | import collections
2 |
3 | from ..packages import six
4 | from ..packages.six.moves import queue
5 |
6 | if six.PY2:
7 | # Queue is imported for side effects on MS Windows. See issue #229.
8 | import Queue as _unused_module_Queue # noqa: F401
9 |
10 |
11 | class LifoQueue(queue.Queue):
12 | def _init(self, _):
13 | self.queue = collections.deque()
14 |
15 | def _qsize(self, len=len):
16 | return len(self.queue)
17 |
18 | def _put(self, item):
19 | self.queue.append(item)
20 |
21 | def _get(self):
22 | return self.queue.pop()
23 |
--------------------------------------------------------------------------------
/worker/packages/urllib3/util/response.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | from email.errors import MultipartInvariantViolationDefect, StartBoundaryNotFoundDefect
4 |
5 | from ..exceptions import HeaderParsingError
6 | from ..packages.six.moves import http_client as httplib
7 |
8 |
9 | def is_fp_closed(obj):
10 | """
11 | Checks whether a given file-like object is closed.
12 |
13 | :param obj:
14 | The file-like object to check.
15 | """
16 |
17 | try:
18 | # Check `isclosed()` first, in case Python3 doesn't set `closed`.
19 | # GH Issue #928
20 | return obj.isclosed()
21 | except AttributeError:
22 | pass
23 |
24 | try:
25 | # Check via the official file-like-object way.
26 | return obj.closed
27 | except AttributeError:
28 | pass
29 |
30 | try:
31 | # Check if the object is a container for another file-like object that
32 | # gets released on exhaustion (e.g. HTTPResponse).
33 | return obj.fp is None
34 | except AttributeError:
35 | pass
36 |
37 | raise ValueError("Unable to determine whether fp is closed.")
38 |
39 |
40 | def assert_header_parsing(headers):
41 | """
42 | Asserts whether all headers have been successfully parsed.
43 | Extracts encountered errors from the result of parsing headers.
44 |
45 | Only works on Python 3.
46 |
47 | :param http.client.HTTPMessage headers: Headers to verify.
48 |
49 | :raises urllib3.exceptions.HeaderParsingError:
50 | If parsing errors are found.
51 | """
52 |
53 | # This will fail silently if we pass in the wrong kind of parameter.
54 | # To make debugging easier add an explicit check.
55 | if not isinstance(headers, httplib.HTTPMessage):
56 | raise TypeError("expected httplib.Message, got {0}.".format(type(headers)))
57 |
58 | defects = getattr(headers, "defects", None)
59 | get_payload = getattr(headers, "get_payload", None)
60 |
61 | unparsed_data = None
62 | if get_payload:
63 | # get_payload is actually email.message.Message.get_payload;
64 | # we're only interested in the result if it's not a multipart message
65 | if not headers.is_multipart():
66 | payload = get_payload()
67 |
68 | if isinstance(payload, (bytes, str)):
69 | unparsed_data = payload
70 | if defects:
71 | # httplib is assuming a response body is available
72 | # when parsing headers even when httplib only sends
73 | # header data to parse_headers() This results in
74 | # defects on multipart responses in particular.
75 | # See: https://github.com/urllib3/urllib3/issues/800
76 |
77 | # So we ignore the following defects:
78 | # - StartBoundaryNotFoundDefect:
79 | # The claimed start boundary was never found.
80 | # - MultipartInvariantViolationDefect:
81 | # A message claimed to be a multipart but no subparts were found.
82 | defects = [
83 | defect
84 | for defect in defects
85 | if not isinstance(
86 | defect, (StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect)
87 | )
88 | ]
89 |
90 | if defects or unparsed_data:
91 | raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
92 |
93 |
94 | def is_response_to_head(response):
95 | """
96 | Checks whether the request of a response has been a HEAD-request.
97 | Handles the quirks of AppEngine.
98 |
99 | :param http.client.HTTPResponse response:
100 | Response to check if the originating request
101 | used 'HEAD' as a method.
102 | """
103 | # FIXME: Can we do this somehow without accessing private httplib _method?
104 | method = response._method
105 | if isinstance(method, int): # Platform-specific: Appengine
106 | return method == 3
107 | return method.upper() == "HEAD"
108 |
--------------------------------------------------------------------------------
/worker/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "fishtest-worker"
3 | version = "0.1.0"
4 | description = "fishtest-worker"
5 | readme = "README.md"
6 | requires-python = ">=3.6"
7 | dependencies = [
8 | "expression-parser>=0.0.5",
9 | "openlock>=1.1.5",
10 | "requests>=2.25.1",
11 | ]
12 |
--------------------------------------------------------------------------------
/worker/sri.txt:
--------------------------------------------------------------------------------
1 | {"__version": 276, "updater.py": "+i4UI6vDlgNrZ/A/mCOGN820HJX2L896A75KKbGT10DeOXJQwwsleqEzNJpdysou", "worker.py": "nr3Dz7DJ0b69FVQ3QQ3iL/tNbbOklUH6uS50WGW75HwBzgxPzJLxXBKJrEob+4i/", "games.py": "IQMDSd55VldF72RZRCTMTZF8yFFM6exjifkw2y2Cy85bwfAk3Z90/CvAnUFUmOJH"}
2 |
--------------------------------------------------------------------------------
/worker/tests/test_worker.py:
--------------------------------------------------------------------------------
1 | import os
2 | import shutil
3 | import subprocess
4 | import sys
5 | import tempfile
6 | import unittest
7 | from configparser import ConfigParser
8 | from pathlib import Path
9 |
10 | import games
11 | import updater
12 |
13 | import worker
14 |
15 |
16 | class WorkerTest(unittest.TestCase):
17 | def setUp(self):
18 | self.worker_dir = Path(__file__).resolve().parents[1]
19 | self.tempdir_obj = tempfile.TemporaryDirectory()
20 | self.tempdir = Path(self.tempdir_obj.name)
21 | (self.tempdir / "testing").mkdir()
22 |
23 | def tearDown(self):
24 | try:
25 | self.tempdir_obj.cleanup()
26 | except PermissionError as e:
27 | if os.name == "nt":
28 | shutil.rmtree(self.tempdir, ignore_errors=True)
29 | else:
30 | raise e
31 |
32 | def test_item_download(self):
33 | blob = None
34 | try:
35 | blob = games.download_from_github("README.md")
36 | except Exception:
37 | pass
38 | self.assertIsNotNone(blob)
39 |
40 | def test_config_setup(self):
41 | sys.argv = [sys.argv[0], "user", "pass", "--no_validation"]
42 | worker.CONFIGFILE = str(self.tempdir / "foo.txt")
43 | worker.setup_parameters(self.tempdir)
44 | config = ConfigParser(inline_comment_prefixes=";", interpolation=None)
45 | config.read(worker.CONFIGFILE)
46 | self.assertTrue(config.has_section("login"))
47 | self.assertTrue(config.has_section("parameters"))
48 | self.assertTrue(config.has_option("login", "username"))
49 | self.assertTrue(config.has_option("login", "password"))
50 | self.assertTrue(config.has_option("parameters", "host"))
51 | self.assertTrue(config.has_option("parameters", "port"))
52 | self.assertTrue(config.has_option("parameters", "concurrency"))
53 |
54 | def test_worker_script_with_bad_args(self):
55 | self.assertFalse((self.worker_dir / "fishtest.cfg").exists())
56 | p = subprocess.run(["python", "worker.py", "--no-validation"])
57 | self.assertEqual(p.returncode, 1)
58 |
59 | def test_setup_exception(self):
60 | cwd = self.tempdir
61 | with self.assertRaises(Exception):
62 | games.setup_engine("foo", cwd, cwd, "https://foo", "foo", "https://foo", 1)
63 |
64 | def test_updater(self):
65 | file_list = updater.update(restart=False, test=True)
66 | self.assertIn("worker.py", file_list)
67 |
68 | def test_sri(self):
69 | self.assertTrue(worker.verify_sri(self.worker_dir))
70 |
71 | def test_toolchain_verification(self):
72 | self.assertTrue(worker.verify_toolchain())
73 |
74 | def test_setup_fastchess(self):
75 | self.assertTrue(
76 | worker.setup_fastchess(
77 | self.tempdir, list(worker.detect_compilers())[0], 4, ""
78 | )
79 | )
80 |
81 |
82 | if __name__ == "__main__":
83 | unittest.main()
84 |
--------------------------------------------------------------------------------