├── COPYRIGHT.txt ├── tests ├── fixtureapps │ ├── __init__.py │ ├── groundhog1.jpg │ ├── error_traceback.py │ ├── runner.py │ ├── toolarge.py │ ├── sleepy.py │ ├── badcl.py │ ├── writecb.py │ ├── getline.py │ ├── nocl.py │ ├── error.py │ ├── echo.py │ └── filewrapper.py ├── __init__.py ├── test_init.py ├── test_trigger.py ├── test_regression.py ├── test_utilities.py ├── test_runner.py └── test_receiver.py ├── setup.py ├── docs ├── rebuild ├── api.rst ├── glossary.rst ├── socket-activation.rst ├── differences.rst ├── filewrapper.rst ├── Makefile ├── design.rst ├── usage.rst ├── index.rst ├── reverse-proxy.rst ├── logging.rst ├── runner.rst ├── conf.py └── arguments.rst ├── rtd.txt ├── src └── waitress │ ├── __main__.py │ ├── compat.py │ ├── __init__.py │ ├── rfc7230.py │ ├── receiver.py │ ├── utilities.py │ ├── trigger.py │ ├── runner.py │ ├── buffers.py │ ├── proxy_headers.py │ └── server.py ├── .gitignore ├── CHANGES.txt ├── .coveragerc ├── MANIFEST.in ├── pyproject.toml ├── README.rst ├── .flake8 ├── tox.ini ├── setup.cfg ├── LICENSE.txt ├── .github └── workflows │ └── ci-tests.yml ├── RELEASING.txt ├── contributing.md ├── TODO.txt └── CONTRIBUTORS.txt /COPYRIGHT.txt: -------------------------------------------------------------------------------- 1 | Zope Foundation and Contributors -------------------------------------------------------------------------------- /tests/fixtureapps/__init__.py: -------------------------------------------------------------------------------- 1 | # package (for -m) 2 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup() 4 | -------------------------------------------------------------------------------- /docs/rebuild: -------------------------------------------------------------------------------- 1 | make clean html SPHINXBUILD=../env26/bin/sphinx-build 2 | 3 | -------------------------------------------------------------------------------- /rtd.txt: -------------------------------------------------------------------------------- 1 | Sphinx >= 1.3.1 2 | repoze.sphinx.autointerface 3 | pylons-sphinx-themes 4 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # This file is necessary to make this directory a package. 3 | -------------------------------------------------------------------------------- /src/waitress/__main__.py: -------------------------------------------------------------------------------- 1 | from waitress.runner import run # pragma nocover 2 | 3 | run() # pragma nocover 4 | -------------------------------------------------------------------------------- /tests/fixtureapps/groundhog1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NicolasLM/waitress/master/tests/fixtureapps/groundhog1.jpg -------------------------------------------------------------------------------- /tests/fixtureapps/error_traceback.py: -------------------------------------------------------------------------------- 1 | def app(environ, start_response): # pragma: no cover 2 | raise ValueError("Invalid application: " + chr(8364)) 3 | -------------------------------------------------------------------------------- /tests/fixtureapps/runner.py: -------------------------------------------------------------------------------- 1 | def app(): # pragma: no cover 2 | return None 3 | 4 | 5 | def returns_app(): # pragma: no cover 6 | return app 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.egg-info/ 2 | *.pyc 3 | env*/ 4 | .coverage 5 | .coverage.* 6 | .tox/ 7 | dist/ 8 | build/ 9 | coverage.xml 10 | docs/_themes 11 | docs/_build 12 | -------------------------------------------------------------------------------- /CHANGES.txt: -------------------------------------------------------------------------------- 1 | 3.0.0 (Unreleased) 2 | ------------------ 3 | 4 | Updated Defaults 5 | ~~~~~~~~~~~~~~~~ 6 | 7 | - clear_untrusted_proxy_headers is set to True by default. See 8 | https://github.com/Pylons/waitress/pull/370 9 | -------------------------------------------------------------------------------- /tests/fixtureapps/toolarge.py: -------------------------------------------------------------------------------- 1 | def app(environ, start_response): # pragma: no cover 2 | body = b"abcdef" 3 | cl = len(body) 4 | start_response( 5 | "200 OK", [("Content-Length", str(cl)), ("Content-Type", "text/plain")] 6 | ) 7 | return [body] 8 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | parallel = true 3 | concurrency = 4 | thread 5 | multiprocessing 6 | source = 7 | waitress 8 | omit = 9 | waitress/tests/fixtureapps/getline.py 10 | 11 | [paths] 12 | source = 13 | src/waitress 14 | */src/waitress 15 | */site-packages/waitress 16 | 17 | [report] 18 | show_missing = true 19 | precision = 2 20 | -------------------------------------------------------------------------------- /tests/fixtureapps/sleepy.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | 4 | def app(environ, start_response): # pragma: no cover 5 | if environ["PATH_INFO"] == "/sleepy": 6 | time.sleep(2) 7 | body = b"sleepy returned" 8 | else: 9 | body = b"notsleepy returned" 10 | cl = str(len(body)) 11 | start_response("200 OK", [("Content-Length", cl), ("Content-Type", "text/plain")]) 12 | return [body] 13 | -------------------------------------------------------------------------------- /tests/fixtureapps/badcl.py: -------------------------------------------------------------------------------- 1 | def app(environ, start_response): # pragma: no cover 2 | body = b"abcdefghi" 3 | cl = len(body) 4 | if environ["PATH_INFO"] == "/short_body": 5 | cl = len(body) + 1 6 | if environ["PATH_INFO"] == "/long_body": 7 | cl = len(body) - 1 8 | start_response( 9 | "200 OK", [("Content-Length", str(cl)), ("Content-Type", "text/plain")] 10 | ) 11 | return [body] 12 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | graft src/waitress 2 | graft tests 3 | graft docs 4 | graft .github 5 | 6 | include README.rst 7 | include CHANGES.txt 8 | include HISTORY.txt 9 | include RELEASING.txt 10 | include LICENSE.txt 11 | include contributing.md 12 | include CONTRIBUTORS.txt 13 | include COPYRIGHT.txt 14 | 15 | include pyproject.toml setup.cfg 16 | include .coveragerc .flake8 17 | include tox.ini rtd.txt 18 | 19 | exclude TODO.txt 20 | prune docs/_build 21 | 22 | recursive-exclude * __pycache__ *.py[cod] 23 | -------------------------------------------------------------------------------- /tests/fixtureapps/writecb.py: -------------------------------------------------------------------------------- 1 | def app(environ, start_response): # pragma: no cover 2 | path_info = environ["PATH_INFO"] 3 | if path_info == "/no_content_length": 4 | headers = [] 5 | else: 6 | headers = [("Content-Length", "9")] 7 | write = start_response("200 OK", headers) 8 | if path_info == "/long_body": 9 | write(b"abcdefghij") 10 | elif path_info == "/short_body": 11 | write(b"abcdefgh") 12 | else: 13 | write(b"abcdefghi") 14 | return [] 15 | -------------------------------------------------------------------------------- /tests/fixtureapps/getline.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | if __name__ == "__main__": 4 | try: 5 | from urllib.request import URLError, urlopen 6 | except ImportError: 7 | from urllib2 import URLError, urlopen 8 | 9 | url = sys.argv[1] 10 | headers = {"Content-Type": "text/plain; charset=utf-8"} 11 | try: 12 | resp = urlopen(url) 13 | line = resp.readline().decode("ascii") # py3 14 | except URLError: 15 | line = "failed to read %s" % url 16 | sys.stdout.write(line) 17 | sys.stdout.flush() 18 | -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | .. _waitress_api: 2 | 3 | :mod:`waitress` API 4 | --------------------------- 5 | 6 | .. module:: waitress 7 | 8 | .. function:: serve(app, listen='0.0.0.0:8080', unix_socket=None, unix_socket_perms='600', threads=4, url_scheme='http', url_prefix='', ident='waitress', backlog=1024, recv_bytes=8192, send_bytes=1, outbuf_overflow=104856, outbuf_high_watermark=16777216, inbuf_overflow=52488, connection_limit=1000, cleanup_interval=30, channel_timeout=120, log_socket_errors=True, max_request_header_size=262144, max_request_body_size=1073741824, expose_tracebacks=False) 9 | 10 | See :ref:`arguments` for more information. 11 | -------------------------------------------------------------------------------- /tests/fixtureapps/nocl.py: -------------------------------------------------------------------------------- 1 | def chunks(l, n): # pragma: no cover 2 | """Yield successive n-sized chunks from l.""" 3 | for i in range(0, len(l), n): 4 | yield l[i : i + n] 5 | 6 | 7 | def gen(body): # pragma: no cover 8 | yield from chunks(body, 10) 9 | 10 | 11 | def app(environ, start_response): # pragma: no cover 12 | cl = environ.get("CONTENT_LENGTH", None) 13 | if cl is not None: 14 | cl = int(cl) 15 | body = environ["wsgi.input"].read(cl) 16 | start_response("200 OK", [("Content-Type", "text/plain")]) 17 | if environ["PATH_INFO"] == "/list": 18 | return [body] 19 | if environ["PATH_INFO"] == "/list_lentwo": 20 | return [body[0:1], body[1:]] 21 | return gen(body) 22 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools >= 41"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [tool.black] 6 | target-version = ['py35', 'py36', 'py37', 'py38'] 7 | exclude = ''' 8 | /( 9 | \.git 10 | | .tox 11 | )/ 12 | ''' 13 | 14 | # This next section only exists for people that have their editors 15 | # automatically call isort, black already sorts entries on its own when run. 16 | [tool.isort] 17 | profile = "black" 18 | multi_line_output = 3 19 | src_paths = ["src", "tests"] 20 | skip_glob = ["docs/*"] 21 | include_trailing_comma = true 22 | force_grid_wrap = false 23 | combine_as_imports = true 24 | line_length = 88 25 | force_sort_within_sections = true 26 | default_section = "THIRDPARTY" 27 | known_first_party = "waitress" 28 | -------------------------------------------------------------------------------- /tests/fixtureapps/error.py: -------------------------------------------------------------------------------- 1 | def app(environ, start_response): # pragma: no cover 2 | cl = environ.get("CONTENT_LENGTH", None) 3 | if cl is not None: 4 | cl = int(cl) 5 | body = environ["wsgi.input"].read(cl) 6 | cl = str(len(body)) 7 | if environ["PATH_INFO"] == "/before_start_response": 8 | raise ValueError("wrong") 9 | write = start_response( 10 | "200 OK", [("Content-Length", cl), ("Content-Type", "text/plain")] 11 | ) 12 | if environ["PATH_INFO"] == "/after_write_cb": 13 | write("abc") 14 | if environ["PATH_INFO"] == "/in_generator": 15 | 16 | def foo(): 17 | yield "abc" 18 | raise ValueError 19 | 20 | return foo() 21 | raise ValueError("wrong") 22 | -------------------------------------------------------------------------------- /src/waitress/compat.py: -------------------------------------------------------------------------------- 1 | import platform 2 | 3 | # Fix for issue reported in https://github.com/Pylons/waitress/issues/138, 4 | # Python on Windows may not define IPPROTO_IPV6 in socket. 5 | import socket 6 | import sys 7 | import warnings 8 | 9 | # True if we are running on Windows 10 | WIN = platform.system() == "Windows" 11 | 12 | MAXINT = sys.maxsize 13 | HAS_IPV6 = socket.has_ipv6 14 | 15 | if hasattr(socket, "IPPROTO_IPV6") and hasattr(socket, "IPV6_V6ONLY"): 16 | IPPROTO_IPV6 = socket.IPPROTO_IPV6 17 | IPV6_V6ONLY = socket.IPV6_V6ONLY 18 | else: # pragma: no cover 19 | if WIN: 20 | IPPROTO_IPV6 = 41 21 | IPV6_V6ONLY = 27 22 | else: 23 | warnings.warn( 24 | "OS does not support required IPv6 socket flags. This is requirement " 25 | "for Waitress. Please open an issue at https://github.com/Pylons/waitress. " 26 | "IPv6 support has been disabled.", 27 | RuntimeWarning, 28 | ) 29 | HAS_IPV6 = False 30 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Waitress 2 | ======== 3 | 4 | .. image:: https://img.shields.io/pypi/v/waitress.svg 5 | :target: https://pypi.org/project/waitress/ 6 | :alt: latest version of waitress on PyPI 7 | 8 | .. image:: https://github.com/Pylons/waitress/workflows/Build%20and%20test/badge.svg 9 | :target: https://github.com/Pylons/waitress/actions?query=workflow%3A%22Build+and+test%22 10 | 11 | .. image:: https://readthedocs.org/projects/waitress/badge/?version=master 12 | :target: https://docs.pylonsproject.org/projects/waitress/en/master 13 | :alt: master Documentation Status 14 | 15 | .. image:: https://img.shields.io/badge/irc-freenode-blue.svg 16 | :target: https://webchat.freenode.net/?channels=pyramid 17 | :alt: IRC Freenode 18 | 19 | Waitress is a production-quality pure-Python WSGI server with very acceptable 20 | performance. It has no dependencies except ones which live in the Python 21 | standard library. It runs on CPython on Unix and Windows under Python 3.7+. It 22 | is also known to run on PyPy 3 (version 3.7 compatible python) on UNIX. It 23 | supports HTTP/1.0 and HTTP/1.1. 24 | 25 | For more information, see the "docs" directory of the Waitress package or visit 26 | https://docs.pylonsproject.org/projects/waitress/en/latest/ 27 | -------------------------------------------------------------------------------- /tests/test_init.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | 4 | class Test_serve(unittest.TestCase): 5 | def _callFUT(self, app, **kw): 6 | from waitress import serve 7 | 8 | return serve(app, **kw) 9 | 10 | def test_it(self): 11 | server = DummyServerFactory() 12 | app = object() 13 | result = self._callFUT(app, _server=server, _quiet=True) 14 | self.assertEqual(server.app, app) 15 | self.assertEqual(result, None) 16 | self.assertEqual(server.ran, True) 17 | 18 | 19 | class Test_serve_paste(unittest.TestCase): 20 | def _callFUT(self, app, **kw): 21 | from waitress import serve_paste 22 | 23 | return serve_paste(app, None, **kw) 24 | 25 | def test_it(self): 26 | server = DummyServerFactory() 27 | app = object() 28 | result = self._callFUT(app, _server=server, _quiet=True) 29 | self.assertEqual(server.app, app) 30 | self.assertEqual(result, 0) 31 | self.assertEqual(server.ran, True) 32 | 33 | 34 | class DummyServerFactory: 35 | ran = False 36 | 37 | def __call__(self, app, **kw): 38 | self.adj = DummyAdj(kw) 39 | self.app = app 40 | self.kw = kw 41 | return self 42 | 43 | def run(self): 44 | self.ran = True 45 | 46 | 47 | class DummyAdj: 48 | verbose = False 49 | 50 | def __init__(self, kw): 51 | self.__dict__.update(kw) 52 | -------------------------------------------------------------------------------- /src/waitress/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from waitress.server import create_server 4 | 5 | 6 | def serve(app, **kw): 7 | _server = kw.pop("_server", create_server) # test shim 8 | _quiet = kw.pop("_quiet", False) # test shim 9 | _profile = kw.pop("_profile", False) # test shim 10 | if not _quiet: # pragma: no cover 11 | # idempotent if logging has already been set up 12 | logging.basicConfig() 13 | server = _server(app, **kw) 14 | if not _quiet: # pragma: no cover 15 | server.print_listen("Serving on http://{}:{}") 16 | if _profile: # pragma: no cover 17 | profile("server.run()", globals(), locals(), (), False) 18 | else: 19 | server.run() 20 | 21 | 22 | def serve_paste(app, global_conf, **kw): 23 | serve(app, **kw) 24 | return 0 25 | 26 | 27 | def profile(cmd, globals, locals, sort_order, callers): # pragma: no cover 28 | # runs a command under the profiler and print profiling output at shutdown 29 | import os 30 | import profile 31 | import pstats 32 | import tempfile 33 | 34 | fd, fn = tempfile.mkstemp() 35 | try: 36 | profile.runctx(cmd, globals, locals, fn) 37 | stats = pstats.Stats(fn) 38 | stats.strip_dirs() 39 | # calls,time,cumulative and cumulative,calls,time are useful 40 | stats.sort_stats(*(sort_order or ("cumulative", "calls", "time"))) 41 | if callers: 42 | stats.print_callers(0.3) 43 | else: 44 | stats.print_stats(0.3) 45 | finally: 46 | os.remove(fn) 47 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | # Recommended flake8 settings while editing, we use Black for the final linting/say in how code is formatted 2 | # 3 | # pip install flake8 flake8-bugbear 4 | # 5 | # This will warn/error on things that black does not fix, on purpose. 6 | # 7 | # Run: 8 | # 9 | # tox -e run-flake8 10 | # 11 | # To have it automatically create and install the appropriate tools, and run 12 | # flake8 across the source code/tests 13 | 14 | [flake8] 15 | # max line length is set to 88 in black, here it is set to 80 and we enable bugbear's B950 warning, which is: 16 | # 17 | # B950: Line too long. This is a pragmatic equivalent of pycodestyle’s E501: it 18 | # considers “max-line-length” but only triggers when the value has been 19 | # exceeded by more than 10%. You will no longer be forced to reformat code due 20 | # to the closing parenthesis being one character too far to satisfy the linter. 21 | # At the same time, if you do significantly violate the line length, you will 22 | # receive a message that states what the actual limit is. This is inspired by 23 | # Raymond Hettinger’s “Beyond PEP 8” talk and highway patrol not stopping you 24 | # if you drive < 5mph too fast. Disable E501 to avoid duplicate warnings. 25 | max-line-length = 80 26 | max-complexity = 12 27 | select = E,F,W,C,B,B9 28 | ignore = 29 | # E123 closing bracket does not match indentation of opening bracket’s line 30 | E123 31 | # E203 whitespace before ‘:’ (Not PEP8 compliant, Python Black) 32 | E203 33 | # E501 line too long (82 > 79 characters) (replaced by B950 from flake8-bugbear, https://github.com/PyCQA/flake8-bugbear) 34 | E501 35 | # W503 line break before binary operator (Not PEP8 compliant, Python Black) 36 | W503 37 | -------------------------------------------------------------------------------- /docs/glossary.rst: -------------------------------------------------------------------------------- 1 | .. _glossary: 2 | 3 | Glossary 4 | ======== 5 | 6 | .. glossary:: 7 | :sorted: 8 | 9 | PasteDeploy 10 | A system for configuration of WSGI web components in declarative ``.ini`` format. 11 | See https://docs.pylonsproject.org/projects/pastedeploy/en/latest/. 12 | 13 | asyncore 14 | A Python standard library module for asynchronous communications. See :mod:`asyncore`. 15 | 16 | .. versionchanged:: 1.2.0 17 | Waitress has now "vendored" ``asyncore`` into itself as ``waitress.wasyncore``. 18 | This is to cope with the eventuality that ``asyncore`` will be removed from the Python standard library in Python 3.8 or so. 19 | 20 | middleware 21 | *Middleware* is a :term:`WSGI` concept. 22 | It is a WSGI component that acts both as a server and an application. 23 | Interesting uses for middleware exist, such as caching, content-transport encoding, and other functions. 24 | See `WSGI.org `_ or `PyPI `_ to find middleware for your application. 25 | 26 | WSGI 27 | `Web Server Gateway Interface `_. 28 | This is a Python standard for connecting web applications to web servers, similar to the concept of Java Servlets. 29 | Waitress requires that your application be served as a WSGI application. 30 | 31 | wasyncore 32 | .. versionchanged:: 1.2.0 33 | Waitress has now "vendored" :term:`asyncore` into itself as ``waitress.wasyncore``. 34 | This is to cope with the eventuality that ``asyncore`` will be removed from the Python standard library in Python 3.8 or so. 35 | -------------------------------------------------------------------------------- /docs/socket-activation.rst: -------------------------------------------------------------------------------- 1 | Socket Activation 2 | ----------------- 3 | 4 | While waitress does not support the various implementations of socket activation, 5 | for example using systemd or launchd, it is prepared to receive pre-bound sockets 6 | from init systems, process and socket managers, or other launchers that can provide 7 | pre-bound sockets. 8 | 9 | The following shows a code example starting waitress with two pre-bound Internet sockets. 10 | 11 | .. code-block:: python 12 | 13 | import socket 14 | import waitress 15 | 16 | 17 | def app(environ, start_response): 18 | content_length = environ.get('CONTENT_LENGTH', None) 19 | if content_length is not None: 20 | content_length = int(content_length) 21 | body = environ['wsgi.input'].read(content_length) 22 | content_length = str(len(body)) 23 | start_response( 24 | '200 OK', 25 | [('Content-Length', content_length), ('Content-Type', 'text/plain')] 26 | ) 27 | return [body] 28 | 29 | 30 | if __name__ == '__main__': 31 | sockets = [ 32 | socket.socket(socket.AF_INET, socket.SOCK_STREAM), 33 | socket.socket(socket.AF_INET, socket.SOCK_STREAM)] 34 | sockets[0].bind(('127.0.0.1', 8080)) 35 | sockets[1].bind(('127.0.0.1', 9090)) 36 | waitress.serve(app, sockets=sockets) 37 | for socket in sockets: 38 | socket.close() 39 | 40 | Generally, to implement socket activation for a given init system, a wrapper 41 | script uses the init system specific libraries to retrieve the sockets from 42 | the init system. Afterwards it starts waitress, passing the sockets with the parameter 43 | ``sockets``. Note that the sockets have to be bound, which all init systems 44 | supporting socket activation do. 45 | 46 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = 3 | lint, 4 | py37,py38,py39,py310,pypy38, 5 | coverage, 6 | docs 7 | isolated_build = True 8 | 9 | [testenv] 10 | commands = 11 | python --version 12 | python -mpytest \ 13 | pypy38: --no-cov \ 14 | {posargs:} 15 | extras = 16 | testing 17 | setenv = 18 | COVERAGE_FILE=.coverage.{envname} 19 | 20 | [testenv:coverage] 21 | skip_install = True 22 | commands = 23 | coverage combine 24 | coverage xml 25 | coverage report --fail-under=100 26 | deps = 27 | coverage 28 | setenv = 29 | COVERAGE_FILE=.coverage 30 | 31 | [testenv:docs] 32 | whitelist_externals = 33 | make 34 | commands = 35 | make -C docs html BUILDDIR={envdir} "SPHINXOPTS=-W -E -D suppress_warnings=ref.term" 36 | extras = 37 | docs 38 | 39 | [testenv:lint] 40 | skip_install = True 41 | commands = 42 | isort --check-only --df src/waitress tests 43 | black --check --diff . 44 | check-manifest 45 | # flake8 src/waitress/ tests 46 | # build sdist/wheel 47 | python -m build . 48 | twine check dist/* 49 | deps = 50 | black 51 | build 52 | check-manifest 53 | flake8 54 | flake8-bugbear 55 | isort 56 | readme_renderer 57 | twine 58 | 59 | [testenv:format] 60 | skip_install = true 61 | commands = 62 | isort src/waitress tests 63 | black . 64 | deps = 65 | black 66 | isort 67 | 68 | [testenv:build] 69 | skip_install = true 70 | commands = 71 | # clean up build/ and dist/ folders 72 | python -c 'import shutil; shutil.rmtree("build", ignore_errors=True)' 73 | # Make sure we aren't forgetting anything 74 | check-manifest 75 | # build sdist/wheel 76 | python -m build . 77 | # Verify all is well 78 | twine check dist/* 79 | 80 | deps = 81 | build 82 | check-manifest 83 | readme_renderer 84 | twine 85 | -------------------------------------------------------------------------------- /tests/fixtureapps/echo.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | import json 3 | 4 | 5 | def app_body_only(environ, start_response): # pragma: no cover 6 | cl = environ.get("CONTENT_LENGTH", None) 7 | if cl is not None: 8 | cl = int(cl) 9 | body = environ["wsgi.input"].read(cl) 10 | cl = str(len(body)) 11 | start_response( 12 | "200 OK", 13 | [ 14 | ("Content-Length", cl), 15 | ("Content-Type", "text/plain"), 16 | ], 17 | ) 18 | return [body] 19 | 20 | 21 | def app(environ, start_response): # pragma: no cover 22 | cl = environ.get("CONTENT_LENGTH", None) 23 | if cl is not None: 24 | cl = int(cl) 25 | request_body = environ["wsgi.input"].read(cl) 26 | cl = str(len(request_body)) 27 | meta = { 28 | "method": environ["REQUEST_METHOD"], 29 | "path_info": environ["PATH_INFO"], 30 | "script_name": environ["SCRIPT_NAME"], 31 | "query_string": environ["QUERY_STRING"], 32 | "content_length": cl, 33 | "scheme": environ["wsgi.url_scheme"], 34 | "remote_addr": environ["REMOTE_ADDR"], 35 | "remote_host": environ["REMOTE_HOST"], 36 | "server_port": environ["SERVER_PORT"], 37 | "server_name": environ["SERVER_NAME"], 38 | "headers": { 39 | k[len("HTTP_") :]: v for k, v in environ.items() if k.startswith("HTTP_") 40 | }, 41 | } 42 | response = json.dumps(meta).encode("utf8") + b"\r\n\r\n" + request_body 43 | start_response( 44 | "200 OK", 45 | [ 46 | ("Content-Length", str(len(response))), 47 | ("Content-Type", "text/plain"), 48 | ], 49 | ) 50 | return [response] 51 | 52 | 53 | Echo = namedtuple( 54 | "Echo", 55 | ( 56 | "method path_info script_name query_string content_length scheme " 57 | "remote_addr remote_host server_port server_name headers body" 58 | ), 59 | ) 60 | 61 | 62 | def parse_response(response): 63 | meta, body = response.split(b"\r\n\r\n", 1) 64 | meta = json.loads(meta.decode("utf8")) 65 | return Echo(body=body, **meta) 66 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = waitress 3 | version = 3.0.0b0 4 | description = Waitress WSGI server 5 | long_description = file: README.rst, CHANGES.txt 6 | long_description_content_type = text/x-rst 7 | keywords = waitress wsgi server http 8 | license = ZPL 2.1 9 | classifiers = 10 | Development Status :: 6 - Mature 11 | Environment :: Web Environment 12 | Intended Audience :: Developers 13 | License :: OSI Approved :: Zope Public License 14 | Programming Language :: Python 15 | Programming Language :: Python :: 3 16 | Programming Language :: Python :: 3.7 17 | Programming Language :: Python :: 3.8 18 | Programming Language :: Python :: 3.9 19 | Programming Language :: Python :: 3.10 20 | Programming Language :: Python :: Implementation :: CPython 21 | Programming Language :: Python :: Implementation :: PyPy 22 | Operating System :: OS Independent 23 | Topic :: Internet :: WWW/HTTP 24 | Topic :: Internet :: WWW/HTTP :: WSGI 25 | url = https://github.com/Pylons/waitress 26 | project_urls = 27 | Documentation = https://docs.pylonsproject.org/projects/waitress/en/latest/index.html 28 | Changelog = https://docs.pylonsproject.org/projects/waitress/en/latest/index.html#change-history 29 | Issue Tracker = https://github.com/Pylons/waitress/issues 30 | 31 | author = Zope Foundation and Contributors 32 | author_email = zope-dev@zope.org 33 | maintainer = Pylons Project 34 | maintainer_email = pylons-discuss@googlegroups.com 35 | 36 | [options] 37 | package_dir= 38 | =src 39 | packages=find: 40 | python_requires = >=3.7.0 41 | 42 | [options.entry_points] 43 | paste.server_runner = 44 | main = waitress:serve_paste 45 | console_scripts = 46 | waitress-serve = waitress.runner:run 47 | 48 | [options.packages.find] 49 | where=src 50 | 51 | [options.extras_require] 52 | testing = 53 | pytest 54 | pytest-cover 55 | coverage>=5.0 56 | 57 | docs = 58 | Sphinx>=1.8.1 59 | docutils 60 | pylons-sphinx-themes>=1.0.9 61 | 62 | [tool:pytest] 63 | python_files = test_*.py 64 | # For the benefit of test_wasyncore.py 65 | python_classes = Test* 66 | testpaths = 67 | tests 68 | addopts = --cov -W always 69 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Zope Public License (ZPL) Version 2.1 2 | 3 | A copyright notice accompanies this license document that identifies the 4 | copyright holders. 5 | 6 | This license has been certified as open source. It has also been designated as 7 | GPL compatible by the Free Software Foundation (FSF). 8 | 9 | Redistribution and use in source and binary forms, with or without 10 | modification, are permitted provided that the following conditions are met: 11 | 12 | 1. Redistributions in source code must retain the accompanying copyright 13 | notice, this list of conditions, and the following disclaimer. 14 | 15 | 2. Redistributions in binary form must reproduce the accompanying copyright 16 | notice, this list of conditions, and the following disclaimer in the 17 | documentation and/or other materials provided with the distribution. 18 | 19 | 3. Names of the copyright holders must not be used to endorse or promote 20 | products derived from this software without prior written permission from the 21 | copyright holders. 22 | 23 | 4. The right to distribute this software or to use it for any purpose does not 24 | give you the right to use Servicemarks (sm) or Trademarks (tm) of the 25 | copyright 26 | holders. Use of them is covered by separate agreement with the copyright 27 | holders. 28 | 29 | 5. If any files are modified, you must cause the modified files to carry 30 | prominent notices stating that you changed the files and the date of any 31 | change. 32 | 33 | Disclaimer 34 | 35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY EXPRESSED 36 | OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 37 | OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 38 | EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY DIRECT, INDIRECT, 39 | INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 41 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 42 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 43 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 44 | EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 | -------------------------------------------------------------------------------- /docs/differences.rst: -------------------------------------------------------------------------------- 1 | Differences from ``zope.server`` 2 | -------------------------------- 3 | 4 | - Has no non-stdlib dependencies. 5 | 6 | - No support for non-WSGI servers (no FTP, plain-HTTP, etc); refactorings and 7 | slight interface changes as a result. Non-WSGI-supporting code removed. 8 | 9 | - Slight cleanup in the way application response headers are handled (no more 10 | "accumulated headers"). 11 | 12 | - Supports the HTTP 1.1 "expect/continue" mechanism (required by WSGI spec). 13 | 14 | - Calls "close()" on the app_iter object returned by the WSGI application. 15 | 16 | - Allows trusted proxies to override ``wsgi.url_scheme`` for particular 17 | requests by supplying the ``X_FORWARDED_PROTO`` header. 18 | 19 | - Supports an explicit ``wsgi.url_scheme`` parameter for ease of deployment 20 | behind SSL proxies. 21 | 22 | - Different adjustment defaults (less conservative). 23 | 24 | - Python 3 compatible. 25 | 26 | - More test coverage (unit tests added, functional tests refactored and more 27 | added). 28 | 29 | - Supports convenience ``waitress.serve`` function (e.g. ``from waitress 30 | import serve; serve(app)`` and convenience ``server.run()`` function. 31 | 32 | - Returns a "real" write method from start_response. 33 | 34 | - Provides a getsockname method of the server FBO figuring out which port the 35 | server is listening on when it's bound to port 0. 36 | 37 | - Warns when app_iter bytestream numbytes less than or greater than specified 38 | Content-Length. 39 | 40 | - Set content-length header if len(app_iter) == 1 and none provided. 41 | 42 | - Raise an exception if start_response isnt called before any body write. 43 | 44 | - channel.write does not accept non-byte-sequences. 45 | 46 | - Put maintenance check on server rather than channel to avoid a class of 47 | DOS. 48 | 49 | - wsgi.multiprocess set (correctly) to False. 50 | 51 | - Ensures header total can not exceed a maximum size. 52 | 53 | - Ensures body total can not exceed a maximum size. 54 | 55 | - Broken chunked encoding request bodies don't crash the server. 56 | 57 | - Handles keepalive/pipelining properly (no out of order responses, no 58 | premature channel closes). 59 | 60 | - Send a 500 error to the client when a task raises an uncaught exception 61 | (with optional traceback rendering via "expose_traceback" adjustment). 62 | 63 | - Supports HTTP/1.1 chunked responses when application doesn't set a 64 | Content-Length header. 65 | 66 | - Dont hang a thread up trying to send data to slow clients. 67 | 68 | - Supports ``wsgi.file_wrapper`` protocol. 69 | -------------------------------------------------------------------------------- /docs/filewrapper.rst: -------------------------------------------------------------------------------- 1 | Support for ``wsgi.file_wrapper`` 2 | --------------------------------- 3 | 4 | Waitress supports the Python Web Server Gateway Interface v1.0 as specified in :pep:`3333`. Here's a usage example: 5 | 6 | .. code-block:: python 7 | 8 | import os 9 | 10 | here = os.path.dirname(os.path.abspath(__file__)) 11 | 12 | def myapp(environ, start_response): 13 | f = open(os.path.join(here, 'myphoto.jpg'), 'rb') 14 | headers = [('Content-Type', 'image/jpeg')] 15 | start_response( 16 | '200 OK', 17 | headers 18 | ) 19 | return environ['wsgi.file_wrapper'](f, 32768) 20 | 21 | The file wrapper constructor is accessed via 22 | ``environ['wsgi.file_wrapper']``. The signature of the file wrapper 23 | constructor is ``(filelike_object, block_size)``. Both arguments must be 24 | passed as positional (not keyword) arguments. The result of creating a file 25 | wrapper should be **returned** as the ``app_iter`` from a WSGI application. 26 | 27 | The object passed as ``filelike_object`` to the wrapper must be a file-like 28 | object which supports *at least* the ``read()`` method, and the ``read()`` 29 | method must support an optional size hint argument and the ``read()`` method 30 | *must* return **bytes** objects (never unicode). It *should* support the 31 | ``seek()`` and ``tell()`` methods. If it does not, normal iteration over the 32 | ``filelike_object`` using the provided ``block_size`` is used (and copying is 33 | done, negating any benefit of the file wrapper). It *should* support a 34 | ``close()`` method. 35 | 36 | The specified ``block_size`` argument to the file wrapper constructor will be 37 | used only when the ``filelike_object`` doesn't support ``seek`` and/or 38 | ``tell`` methods. Waitress needs to use normal iteration to serve the file 39 | in this degenerate case (as per the WSGI pec), and this block size will be 40 | used as the iteration chunk size. The ``block_size`` argument is optional; 41 | if it is not passed, a default value ``32768`` is used. 42 | 43 | Waitress will set a ``Content-Length`` header on behalf of an application 44 | when a file wrapper with a sufficiently file-like object is used if the 45 | application hasn't already set one. 46 | 47 | The machinery which handles a file wrapper currently doesn't do anything 48 | particularly special using fancy system calls (it doesn't use ``sendfile`` 49 | for example); using it currently just prevents the system from needing to 50 | copy data to a temporary buffer in order to send it to the client. No 51 | copying of data is done when a WSGI app returns a file wrapper that wraps a 52 | sufficiently file-like object. It may do something fancier in the future. 53 | -------------------------------------------------------------------------------- /src/waitress/rfc7230.py: -------------------------------------------------------------------------------- 1 | """ 2 | This contains a bunch of RFC7230 definitions and regular expressions that are 3 | needed to properly parse HTTP messages. 4 | """ 5 | 6 | import re 7 | 8 | HEXDIG = "[0-9a-fA-F]" 9 | DIGIT = "[0-9]" 10 | 11 | WS = "[ \t]" 12 | OWS = WS + "{0,}?" 13 | RWS = WS + "{1,}?" 14 | BWS = OWS 15 | 16 | # RFC 7230 Section 3.2.6 "Field Value Components": 17 | # tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" 18 | # / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" 19 | # / DIGIT / ALPHA 20 | # obs-text = %x80-FF 21 | TCHAR = r"[!#$%&'*+\-.^_`|~0-9A-Za-z]" 22 | OBS_TEXT = r"\x80-\xff" 23 | 24 | TOKEN = TCHAR + "{1,}" 25 | 26 | # RFC 5234 Appendix B.1 "Core Rules": 27 | # VCHAR = %x21-7E 28 | # ; visible (printing) characters 29 | VCHAR = r"\x21-\x7e" 30 | 31 | # The '\\' between \x5b and \x5d is needed to escape \x5d (']') 32 | QDTEXT = "[\t \x21\x23-\x5b\\\x5d-\x7e" + OBS_TEXT + "]" 33 | 34 | QUOTED_PAIR = r"\\" + "([\t " + VCHAR + OBS_TEXT + "])" 35 | QUOTED_STRING = '"(?:(?:' + QDTEXT + ")|(?:" + QUOTED_PAIR + '))*"' 36 | 37 | # header-field = field-name ":" OWS field-value OWS 38 | # field-name = token 39 | # field-value = *( field-content / obs-fold ) 40 | # field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] 41 | # field-vchar = VCHAR / obs-text 42 | 43 | # Errata from: https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4189 44 | # changes field-content to: 45 | # 46 | # field-content = field-vchar [ 1*( SP / HTAB / field-vchar ) 47 | # field-vchar ] 48 | 49 | FIELD_VCHAR = "[" + VCHAR + OBS_TEXT + "]" 50 | # Field content is more greedy than the ABNF, in that it will match the whole value 51 | FIELD_CONTENT = FIELD_VCHAR + "+(?:[ \t]+" + FIELD_VCHAR + "+)*" 52 | # Which allows the field value here to just see if there is even a value in the first place 53 | FIELD_VALUE = "(?:" + FIELD_CONTENT + ")?" 54 | 55 | # chunk-ext = *( ";" chunk-ext-name [ "=" chunk-ext-val ] ) 56 | # chunk-ext-name = token 57 | # chunk-ext-val = token / quoted-string 58 | 59 | CHUNK_EXT_NAME = TOKEN 60 | CHUNK_EXT_VAL = "(?:" + TOKEN + ")|(?:" + QUOTED_STRING + ")" 61 | CHUNK_EXT = ( 62 | "(?:;(?P" + CHUNK_EXT_NAME + ")(?:=(?P" + CHUNK_EXT_VAL + "))?)*" 63 | ) 64 | 65 | # Pre-compiled regular expressions for use elsewhere 66 | ONLY_HEXDIG_RE = re.compile(("^" + HEXDIG + "+$").encode("latin-1")) 67 | ONLY_DIGIT_RE = re.compile(("^" + DIGIT + "+$").encode("latin-1")) 68 | HEADER_FIELD_RE = re.compile( 69 | ( 70 | "^(?P" + TOKEN + "):" + OWS + "(?P" + FIELD_VALUE + ")" + OWS + "$" 71 | ).encode("latin-1") 72 | ) 73 | QUOTED_PAIR_RE = re.compile(QUOTED_PAIR) 74 | QUOTED_STRING_RE = re.compile(QUOTED_STRING) 75 | CHUNK_EXT_RE = re.compile(("^" + CHUNK_EXT + "$").encode("latin-1")) 76 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | 9 | # Internal variables. 10 | PAPEROPT_a4 = -D latex_paper_size=a4 11 | PAPEROPT_letter = -D latex_paper_size=letter 12 | ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 13 | 14 | .PHONY: help clean html web pickle htmlhelp latex changes linkcheck 15 | 16 | help: 17 | @echo "Please use \`make ' where is one of" 18 | @echo " html to make standalone HTML files" 19 | @echo " pickle to make pickle files (usable by e.g. sphinx-web)" 20 | @echo " htmlhelp to make HTML files and a HTML help project" 21 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 22 | @echo " changes to make an overview over all changed/added/deprecated items" 23 | @echo " linkcheck to check all external links for integrity" 24 | 25 | clean: 26 | -rm -rf _build/* 27 | 28 | html: 29 | mkdir -p _build/html _build/doctrees 30 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) _build/html 31 | @echo 32 | @echo "Build finished. The HTML pages are in _build/html." 33 | 34 | text: 35 | mkdir -p _build/text _build/doctrees 36 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) _build/text 37 | @echo 38 | @echo "Build finished. The HTML pages are in _build/text." 39 | 40 | pickle: 41 | mkdir -p _build/pickle _build/doctrees 42 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) _build/pickle 43 | @echo 44 | @echo "Build finished; now you can process the pickle files or run" 45 | @echo " sphinx-web _build/pickle" 46 | @echo "to start the sphinx-web server." 47 | 48 | web: pickle 49 | 50 | htmlhelp: 51 | mkdir -p _build/htmlhelp _build/doctrees 52 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) _build/htmlhelp 53 | @echo 54 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 55 | ".hhp project file in _build/htmlhelp." 56 | 57 | latex: 58 | mkdir -p _build/latex _build/doctrees 59 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex 60 | cp _static/*.png _build/latex 61 | ./convert_images.sh 62 | cp _static/latex-warning.png _build/latex 63 | cp _static/latex-note.png _build/latex 64 | @echo 65 | @echo "Build finished; the LaTeX files are in _build/latex." 66 | @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ 67 | "run these through (pdf)latex." 68 | 69 | changes: 70 | mkdir -p _build/changes _build/doctrees 71 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) _build/changes 72 | @echo 73 | @echo "The overview file is in _build/changes." 74 | 75 | linkcheck: 76 | mkdir -p _build/linkcheck _build/doctrees 77 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) _build/linkcheck 78 | @echo 79 | @echo "Link check complete; look for any errors in the above output " \ 80 | "or in _build/linkcheck/output.txt." 81 | 82 | epub: 83 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) _build/epub 84 | @echo 85 | @echo "Build finished. The epub file is in _build/epub." 86 | 87 | -------------------------------------------------------------------------------- /tests/fixtureapps/filewrapper.py: -------------------------------------------------------------------------------- 1 | import io 2 | import os 3 | 4 | here = os.path.dirname(os.path.abspath(__file__)) 5 | fn = os.path.join(here, "groundhog1.jpg") 6 | 7 | 8 | class KindaFilelike: # pragma: no cover 9 | def __init__(self, bytes): 10 | self.bytes = bytes 11 | 12 | def read(self, n): 13 | bytes = self.bytes[:n] 14 | self.bytes = self.bytes[n:] 15 | return bytes 16 | 17 | 18 | class UnseekableIOBase(io.RawIOBase): # pragma: no cover 19 | def __init__(self, bytes): 20 | self.buf = io.BytesIO(bytes) 21 | 22 | def writable(self): 23 | return False 24 | 25 | def readable(self): 26 | return True 27 | 28 | def seekable(self): 29 | return False 30 | 31 | def read(self, n): 32 | return self.buf.read(n) 33 | 34 | 35 | def app(environ, start_response): # pragma: no cover 36 | path_info = environ["PATH_INFO"] 37 | if path_info.startswith("/filelike"): 38 | f = open(fn, "rb") 39 | f.seek(0, 2) 40 | cl = f.tell() 41 | f.seek(0) 42 | if path_info == "/filelike": 43 | headers = [ 44 | ("Content-Length", str(cl)), 45 | ("Content-Type", "image/jpeg"), 46 | ] 47 | elif path_info == "/filelike_nocl": 48 | headers = [("Content-Type", "image/jpeg")] 49 | elif path_info == "/filelike_shortcl": 50 | # short content length 51 | headers = [ 52 | ("Content-Length", "1"), 53 | ("Content-Type", "image/jpeg"), 54 | ] 55 | else: 56 | # long content length (/filelike_longcl) 57 | headers = [ 58 | ("Content-Length", str(cl + 10)), 59 | ("Content-Type", "image/jpeg"), 60 | ] 61 | else: 62 | with open(fn, "rb") as fp: 63 | data = fp.read() 64 | cl = len(data) 65 | f = KindaFilelike(data) 66 | if path_info == "/notfilelike": 67 | headers = [ 68 | ("Content-Length", str(len(data))), 69 | ("Content-Type", "image/jpeg"), 70 | ] 71 | elif path_info == "/notfilelike_iobase": 72 | headers = [ 73 | ("Content-Length", str(len(data))), 74 | ("Content-Type", "image/jpeg"), 75 | ] 76 | f = UnseekableIOBase(data) 77 | elif path_info == "/notfilelike_nocl": 78 | headers = [("Content-Type", "image/jpeg")] 79 | elif path_info == "/notfilelike_shortcl": 80 | # short content length 81 | headers = [ 82 | ("Content-Length", "1"), 83 | ("Content-Type", "image/jpeg"), 84 | ] 85 | else: 86 | # long content length (/notfilelike_longcl) 87 | headers = [ 88 | ("Content-Length", str(cl + 10)), 89 | ("Content-Type", "image/jpeg"), 90 | ] 91 | 92 | start_response("200 OK", headers) 93 | return environ["wsgi.file_wrapper"](f, 8192) 94 | -------------------------------------------------------------------------------- /.github/workflows/ci-tests.yml: -------------------------------------------------------------------------------- 1 | name: Build and test 2 | 3 | on: 4 | # Only on pushes to master or one of the release branches we build on push 5 | push: 6 | branches: 7 | - master 8 | - "[0-9].[0-9]+-branch" 9 | tags: 10 | # Build pull requests 11 | pull_request: 12 | 13 | jobs: 14 | test: 15 | strategy: 16 | matrix: 17 | py: 18 | - "3.7" 19 | - "3.8" 20 | - "3.9" 21 | - "3.10" 22 | - "pypy-3.8" 23 | # Pre-release 24 | - "3.11.0-alpha - 3.11.0" 25 | os: 26 | - "ubuntu-latest" 27 | - "windows-latest" 28 | - "macos-latest" 29 | architecture: 30 | - x64 31 | - x86 32 | include: 33 | - py: "pypy-3.8" 34 | toxenv: "pypy38" 35 | exclude: 36 | # Linux and macOS don't have x86 python 37 | - os: "ubuntu-latest" 38 | architecture: x86 39 | - os: "macos-latest" 40 | architecture: x86 41 | 42 | name: "Python: ${{ matrix.py }}-${{ matrix.architecture }} on ${{ matrix.os }}" 43 | runs-on: ${{ matrix.os }} 44 | steps: 45 | - uses: actions/checkout@v2 46 | - name: Setup python 47 | uses: actions/setup-python@v2 48 | with: 49 | python-version: ${{ matrix.py }} 50 | architecture: ${{ matrix.architecture }} 51 | - run: pip install tox 52 | - name: Running tox with specific toxenv 53 | if: ${{ matrix.toxenv != '' }} 54 | env: 55 | TOXENV: ${{ matrix.toxenv }} 56 | run: tox 57 | - name: Running tox for current python version 58 | if: ${{ matrix.toxenv == '' }} 59 | run: tox -e py 60 | 61 | coverage: 62 | runs-on: ubuntu-latest 63 | name: Validate coverage 64 | steps: 65 | - uses: actions/checkout@v2 66 | - name: Setup python 3.10 67 | uses: actions/setup-python@v2 68 | with: 69 | python-version: "3.10" 70 | architecture: x64 71 | 72 | - run: pip install tox 73 | - run: tox -e py310,coverage 74 | docs: 75 | runs-on: ubuntu-latest 76 | name: Build the documentation 77 | steps: 78 | - uses: actions/checkout@v2 79 | - name: Setup python 80 | uses: actions/setup-python@v2 81 | with: 82 | python-version: "3.10" 83 | architecture: x64 84 | - run: pip install tox 85 | - run: tox -e docs 86 | lint: 87 | runs-on: ubuntu-latest 88 | name: Lint the package 89 | steps: 90 | - uses: actions/checkout@v2 91 | - name: Setup python 92 | uses: actions/setup-python@v2 93 | with: 94 | python-version: "3.10" 95 | architecture: x64 96 | - run: pip install tox 97 | - run: tox -e lint 98 | -------------------------------------------------------------------------------- /docs/design.rst: -------------------------------------------------------------------------------- 1 | Design 2 | ------ 3 | 4 | Waitress uses a combination of asynchronous and synchronous code to do its job. 5 | It handles I/O to and from clients using the :term:`wasyncore`, which is :term:`asyncore` vendored into Waitress. 6 | It services requests via threads. 7 | 8 | .. note:: 9 | :term:`asyncore` has been deprecated since Python 3.6. 10 | Work continues on its inevitable removal from the Python standard library. 11 | Its recommended replacement is :mod:`asyncio`. 12 | 13 | Although :term:`asyncore` has been vendored into Waitress as :term:`wasyncore`, you may see references to "asyncore" in this documentation's code examples and API. 14 | The terms are effectively the same and may be used interchangeably. 15 | 16 | The :term:`wasyncore` module: 17 | 18 | - Uses the ``select.select`` function to wait for connections from clients 19 | and determine if a connected client is ready to receive output. 20 | 21 | - Creates a channel whenever a new connection is made to the server. 22 | 23 | - Executes methods of a channel whenever it believes data can be read from or 24 | written to the channel. 25 | 26 | A "channel" is created for each connection from a client to the server. The 27 | channel handles all requests over the same connection from that client. A 28 | channel will handle some number of requests during its lifetime: zero to how 29 | ever many HTTP requests are sent to the server by the client over a single 30 | connection. For example, an HTTP/1.1 client may issue a theoretically 31 | infinite number of requests over the same connection; each of these will be 32 | handled by the same channel. An HTTP/1.0 client without a "Connection: 33 | keep-alive" header will request usually only one over a single TCP 34 | connection, however, and when the request has completed, the client 35 | disconnects and reconnects (which will create another channel). When the 36 | connection related to a channel is closed, the channel is destroyed and 37 | garbage collected. 38 | 39 | When a channel determines the client has sent at least one full valid HTTP 40 | request, it schedules a "task" with a "thread dispatcher". The thread 41 | dispatcher maintains a fixed pool of worker threads available to do client 42 | work (by default, 4 threads). If a worker thread is available when a task is 43 | scheduled, the worker thread runs the task. The task has access to the 44 | channel, and can write back to the channel's output buffer. When all worker 45 | threads are in use, scheduled tasks will wait in a queue for a worker thread 46 | to become available. 47 | 48 | I/O is always done asynchronously (by :term:`wasyncore`) in the main thread. 49 | Worker threads never do any I/O. 50 | This means that 51 | 52 | #. a large number of clients can be connected to the server at once, and 53 | #. worker threads will never be hung up trying to send data to a slow client. 54 | 55 | No attempt is made to kill a "hung thread". It's assumed that when a task 56 | (application logic) starts that it will eventually complete. If for some 57 | reason WSGI application logic never completes and spins forever, the worker 58 | thread related to that WSGI application will be consumed "forever", and if 59 | enough worker threads are consumed like this, the server will stop responding 60 | entirely. 61 | 62 | Periodic maintenance is done by the main thread (the thread handling I/O). 63 | If a channel hasn't sent or received any data in a while, the channel's 64 | connection is closed, and the channel is destroyed. 65 | -------------------------------------------------------------------------------- /RELEASING.txt: -------------------------------------------------------------------------------- 1 | Releasing 2 | ========= 3 | 4 | - For clarity, we define releases as follows. 5 | 6 | - Alpha, beta, dev and similar statuses do not qualify whether a release is 7 | major or minor. The term "pre-release" means alpha, beta, or dev. 8 | 9 | - A release is final when it is no longer pre-release. 10 | 11 | - A *major* release is where the first number either before or after the 12 | first dot increases. Examples: 1.0 to 1.1a1, or 0.9 to 1.0. 13 | 14 | - A *minor* or *bug fix* release is where the number after the second dot 15 | increases. Example: 1.0 to 1.0.1. 16 | 17 | Prepare new release 18 | ------------------- 19 | 20 | - Do platform test via tox: 21 | 22 | $ tox -r 23 | 24 | Make sure statement coverage is at 100% (the test run will fail if not). 25 | 26 | - Run tests on Windows if feasible. 27 | 28 | - Ensure all features of the release are documented (audit CHANGES.txt or 29 | communicate with contributors). 30 | 31 | - Change CHANGES.txt heading to reflect the new version number. 32 | 33 | - Minor releases should include a link under "Bug Fix Releases" to the minor 34 | feature changes in CHANGES.txt. 35 | 36 | - Change setup.py version to the release version number. 37 | 38 | - Make sure PyPI long description renders (requires ``readme_renderer`` 39 | installed into your Python):: 40 | 41 | $ python setup.py check -r -s -m 42 | 43 | - Create a release tag. 44 | 45 | - Make sure your Python has ``setuptools-git``, ``twine``, and ``wheel`` 46 | installed and release to PyPI:: 47 | 48 | $ python setup.py sdist bdist_wheel 49 | $ twine upload dist/waitress-X.X-* 50 | 51 | 52 | Prepare master for further development (major releases only) 53 | ------------------------------------------------------------ 54 | 55 | - In CHANGES.txt, preserve headings but clear out content. Add heading 56 | "unreleased" for the version number. 57 | 58 | - Forward port the changes in CHANGES.txt to HISTORY.txt. 59 | 60 | - Change setup.py version to the next version number. 61 | 62 | 63 | Marketing and communications 64 | ---------------------------- 65 | 66 | - Check `https://wiki.python.org/moin/WebServers 67 | `_. 68 | 69 | - Announce to Twitter. 70 | 71 | ``` 72 | waitress 1.x released. 73 | 74 | PyPI 75 | https://pypi.org/project/waitress/1.x/ 76 | 77 | === One time only for new version, first pre-release === 78 | What's New 79 | https://docs.pylonsproject.org/projects/waitress/en/latest/#id2 80 | === For all subsequent pre-releases === 81 | Changes 82 | https://docs.pylonsproject.org/projects/waitress/en/latest/#change-history 83 | 84 | Documentation: 85 | https://docs.pylonsproject.org/projects/waitress/en/latest/ 86 | 87 | Issues 88 | https://github.com/Pylons/waitress/issues 89 | ``` 90 | 91 | - Announce to maillist. 92 | 93 | ``` 94 | waitress 1.X.X has been released. 95 | 96 | The full changelog is here: 97 | https://docs.pylonsproject.org/projects/waitress/en/latest/#change-history 98 | 99 | What's New In waitress 1.X: 100 | https://docs.pylonsproject.org/projects/waitress/en/latest/#id2 101 | 102 | Documentation: 103 | https://docs.pylonsproject.org/projects/waitress/en/latest/ 104 | 105 | You can install it via PyPI: 106 | 107 | pip install waitress==1.X 108 | 109 | Enjoy, and please report any issues you find to the issue tracker at 110 | https://github.com/Pylons/waitress/issues 111 | 112 | Thanks! 113 | 114 | - waitress core developers 115 | ``` 116 | -------------------------------------------------------------------------------- /docs/usage.rst: -------------------------------------------------------------------------------- 1 | .. _usage: 2 | 3 | ===== 4 | Usage 5 | ===== 6 | 7 | The following code will run waitress on port 8080 on all available IP addresses, both IPv4 and IPv6. 8 | 9 | .. code-block:: python 10 | 11 | from waitress import serve 12 | serve(wsgiapp, listen='*:8080') 13 | 14 | Press :kbd:`Ctrl-C` (or :kbd:`Ctrl-Break` on Windows) to exit the server. 15 | 16 | The following will run waitress on port 8080 on all available IPv4 addresses, but not IPv6. 17 | 18 | .. code-block:: python 19 | 20 | from waitress import serve 21 | serve(wsgiapp, host='0.0.0.0', port=8080) 22 | 23 | By default Waitress binds to any IPv4 address on port 8080. 24 | You can omit the ``host`` and ``port`` arguments and just call ``serve`` with the WSGI app as a single argument: 25 | 26 | .. code-block:: python 27 | 28 | from waitress import serve 29 | serve(wsgiapp) 30 | 31 | If you want to serve your application through a UNIX domain socket (to serve a downstream HTTP server/proxy such as nginx, lighttpd, and so on), call ``serve`` with the ``unix_socket`` argument: 32 | 33 | .. code-block:: python 34 | 35 | from waitress import serve 36 | serve(wsgiapp, unix_socket='/path/to/unix.sock') 37 | 38 | Needless to say, this configuration won't work on Windows. 39 | 40 | Exceptions generated by your application will be shown on the console by 41 | default. See :ref:`access-logging` to change this. 42 | 43 | There's an entry point for :term:`PasteDeploy` (``egg:waitress#main``) that 44 | lets you use Waitress's WSGI gateway from a configuration file, e.g.: 45 | 46 | .. code-block:: ini 47 | 48 | [server:main] 49 | use = egg:waitress#main 50 | listen = 127.0.0.1:8080 51 | 52 | Using ``host`` and ``port`` is also supported: 53 | 54 | .. code-block:: ini 55 | 56 | [server:main] 57 | host = 127.0.0.1 58 | port = 8080 59 | 60 | The :term:`PasteDeploy` syntax for UNIX domain sockets is analogous: 61 | 62 | .. code-block:: ini 63 | 64 | [server:main] 65 | use = egg:waitress#main 66 | unix_socket = /path/to/unix.sock 67 | 68 | You can find more settings to tweak (arguments to ``waitress.serve`` or 69 | equivalent settings in PasteDeploy) in :ref:`arguments`. 70 | 71 | Additionally, there is a command line runner called ``waitress-serve``, which 72 | can be used in development and in situations where the likes of 73 | :term:`PasteDeploy` is not necessary: 74 | 75 | .. code-block:: bash 76 | 77 | # Listen on both IPv4 and IPv6 on port 8041 78 | waitress-serve --listen=*:8041 myapp:wsgifunc 79 | 80 | # Listen on only IPv4 on port 8041 81 | waitress-serve --port=8041 myapp:wsgifunc 82 | 83 | Heroku 84 | ------ 85 | 86 | Waitress can be used to serve WSGI apps on Heroku, include waitress in your requirements.txt file a update the Procfile as following: 87 | 88 | .. code-block:: bash 89 | 90 | web: waitress-serve \ 91 | --listen "*:$PORT" \ 92 | --trusted-proxy '*' \ 93 | --trusted-proxy-headers 'x-forwarded-for x-forwarded-proto x-forwarded-port' \ 94 | --log-untrusted-proxy-headers \ 95 | --clear-untrusted-proxy-headers \ 96 | --threads ${WEB_CONCURRENCY:-4} \ 97 | myapp:wsgifunc 98 | 99 | The proxy config informs Waitress to trust the `forwarding headers `_ set by the Heroku load balancer. 100 | It also allows for setting the standard ``WEB_CONCURRENCY`` environment variable to tweak the number of requests handled by Waitress at a time. 101 | 102 | Note that Waitress uses a thread-based model and careful effort should be taken to ensure that requests do not take longer than 30 seconds or Heroku will inform the client that the request failed even though the request is still being processed by Waitress and occupying a thread until it completes. 103 | 104 | For more information on this, see :ref:`runner`. 105 | -------------------------------------------------------------------------------- /tests/test_trigger.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import unittest 4 | 5 | if not sys.platform.startswith("win"): 6 | 7 | class Test_trigger(unittest.TestCase): 8 | def _makeOne(self, map): 9 | from waitress.trigger import trigger 10 | 11 | self.inst = trigger(map) 12 | return self.inst 13 | 14 | def tearDown(self): 15 | self.inst.close() # prevent __del__ warning from file_dispatcher 16 | 17 | def test__close(self): 18 | map = {} 19 | inst = self._makeOne(map) 20 | fd1, fd2 = inst._fds 21 | inst.close() 22 | self.assertRaises(OSError, os.read, fd1, 1) 23 | self.assertRaises(OSError, os.read, fd2, 1) 24 | 25 | def test__physical_pull(self): 26 | map = {} 27 | inst = self._makeOne(map) 28 | inst._physical_pull() 29 | r = os.read(inst._fds[0], 1) 30 | self.assertEqual(r, b"x") 31 | 32 | def test_readable(self): 33 | map = {} 34 | inst = self._makeOne(map) 35 | self.assertEqual(inst.readable(), True) 36 | 37 | def test_writable(self): 38 | map = {} 39 | inst = self._makeOne(map) 40 | self.assertEqual(inst.writable(), False) 41 | 42 | def test_handle_connect(self): 43 | map = {} 44 | inst = self._makeOne(map) 45 | self.assertEqual(inst.handle_connect(), None) 46 | 47 | def test_close(self): 48 | map = {} 49 | inst = self._makeOne(map) 50 | self.assertEqual(inst.close(), None) 51 | self.assertEqual(inst._closed, True) 52 | 53 | def test_handle_close(self): 54 | map = {} 55 | inst = self._makeOne(map) 56 | self.assertEqual(inst.handle_close(), None) 57 | self.assertEqual(inst._closed, True) 58 | 59 | def test_pull_trigger_nothunk(self): 60 | map = {} 61 | inst = self._makeOne(map) 62 | self.assertEqual(inst.pull_trigger(), None) 63 | r = os.read(inst._fds[0], 1) 64 | self.assertEqual(r, b"x") 65 | 66 | def test_pull_trigger_thunk(self): 67 | map = {} 68 | inst = self._makeOne(map) 69 | self.assertEqual(inst.pull_trigger(True), None) 70 | self.assertEqual(len(inst.thunks), 1) 71 | r = os.read(inst._fds[0], 1) 72 | self.assertEqual(r, b"x") 73 | 74 | def test_handle_read_socket_error(self): 75 | map = {} 76 | inst = self._makeOne(map) 77 | result = inst.handle_read() 78 | self.assertEqual(result, None) 79 | 80 | def test_handle_read_no_socket_error(self): 81 | map = {} 82 | inst = self._makeOne(map) 83 | inst.pull_trigger() 84 | result = inst.handle_read() 85 | self.assertEqual(result, None) 86 | 87 | def test_handle_read_thunk(self): 88 | map = {} 89 | inst = self._makeOne(map) 90 | inst.pull_trigger() 91 | L = [] 92 | inst.thunks = [lambda: L.append(True)] 93 | result = inst.handle_read() 94 | self.assertEqual(result, None) 95 | self.assertEqual(L, [True]) 96 | self.assertEqual(inst.thunks, []) 97 | 98 | def test_handle_read_thunk_error(self): 99 | map = {} 100 | inst = self._makeOne(map) 101 | 102 | def errorthunk(): 103 | raise ValueError 104 | 105 | inst.pull_trigger(errorthunk) 106 | L = [] 107 | inst.log_info = lambda *arg: L.append(arg) 108 | result = inst.handle_read() 109 | self.assertEqual(result, None) 110 | self.assertEqual(len(L), 1) 111 | self.assertEqual(inst.thunks, []) 112 | -------------------------------------------------------------------------------- /contributing.md: -------------------------------------------------------------------------------- 1 | Contributing 2 | ============ 3 | 4 | All projects under the Pylons Projects, including this one, follow the guidelines established at [How to Contribute](https://pylonsproject.org/community-how-to-contribute.html) and [Coding Style and Standards](https://pylonsproject.org/community-coding-style-standards.html). 5 | 6 | 7 | Get support 8 | ----------- 9 | 10 | See [Get Support](https://pylonsproject.org/community-support.html). You are reading this document most likely because you want to *contribute* to the project and not *get support*. 11 | 12 | 13 | Working on issues 14 | ----------------- 15 | 16 | To respect both your time and ours, we emphasize the following points. 17 | 18 | * We use the [Issue Tracker on GitHub](https://github.com/Pylons/waitress/issues) to discuss bugs, improvements, and feature requests. Search through existing issues before reporting a new one. Issues may be complex or wide-ranging. A discussion up front sets us all on the best path forward. 19 | * Minor issues—such as spelling, grammar, and syntax—don't require discussion and a pull request is sufficient. 20 | * After discussing the issue with maintainers and agreeing on a resolution, submit a pull request of your work. [GitHub Flow](https://guides.github.com/introduction/flow/index.html) describes the workflow process and why it's a good practice. 21 | 22 | 23 | Git branches 24 | ------------ 25 | 26 | There is a single branch [master](https://github.com/Pylons/waitress/) on which development takes place and from which releases to PyPI are tagged. This is the default branch on GitHub. 27 | 28 | 29 | Running tests and building documentation 30 | ---------------------------------------- 31 | 32 | We use [tox](https://tox.readthedocs.io/en/latest/) to automate test running, coverage, and building documentation across all supported Python versions. 33 | 34 | To run everything configured in the `tox.ini` file: 35 | 36 | $ tox 37 | 38 | To run tests on Python 2 and 3, and ensure full coverage, but exclude building of docs: 39 | 40 | $ tox -e py2-cover,py3-cover,coverage 41 | 42 | To build the docs only: 43 | 44 | $ tox -e docs 45 | 46 | See the `tox.ini` file for details. 47 | 48 | 49 | Contributing documentation 50 | -------------------------- 51 | 52 | *Note:* These instructions might not work for Windows users. Suggestions to improve the process for Windows users are welcome by submitting an issue or a pull request. 53 | 54 | 1. Fork the repo on GitHub by clicking the [Fork] button. 55 | 2. Clone your fork into a workspace on your local machine. 56 | 57 | cd ~/projects 58 | git clone git@github.com:/waitress.git 59 | 60 | 3. Add a git remote "upstream" for the cloned fork. 61 | 62 | git remote add upstream git@github.com:Pylons/waitress.git 63 | 64 | 4. Set an environment variable to your virtual environment. 65 | 66 | # Mac and Linux 67 | $ export VENV=~/projects/waitress/env 68 | 69 | # Windows 70 | set VENV=c:\projects\waitress\env 71 | 72 | 5. Try to build the docs in your workspace. 73 | 74 | # Mac and Linux 75 | $ make clean html SPHINXBUILD=$VENV/bin/sphinx-build 76 | 77 | # Windows 78 | c:\> make clean html SPHINXBUILD=%VENV%\bin\sphinx-build 79 | 80 | If successful, then you can make changes to the documentation. You can load the built documentation in the `/_build/html/` directory in a web browser. 81 | 82 | 6. From this point forward, follow the typical [git workflow](https://help.github.com/articles/what-is-a-good-git-workflow/). Start by pulling from the upstream to get the most current changes. 83 | 84 | git pull upstream master 85 | 86 | 7. Make a branch, make changes to the docs, and rebuild them as indicated in step 5. To speed up the build process, you can omit `clean` from the above command to rebuild only those pages that depend on the files you have changed. 87 | 88 | 8. Once you are satisfied with your changes and the documentation builds successfully without errors or warnings, then git commit and push them to your "origin" repository on GitHub. 89 | 90 | git commit -m "commit message" 91 | git push -u origin --all # first time only, subsequent can be just 'git push'. 92 | 93 | 9. Create a [pull request](https://help.github.com/articles/using-pull-requests/). 94 | 95 | 10. Repeat the process starting from Step 6. 96 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. _index: 2 | 3 | ======== 4 | Waitress 5 | ======== 6 | 7 | Waitress is meant to be a production-quality pure-Python WSGI server with very 8 | acceptable performance. It has no dependencies except ones which live in the 9 | Python standard library. It runs on CPython on Unix and Windows under Python 10 | 3.7+. It is also known to run on PyPy 3 (python version 3.7+) on UNIX. It 11 | supports HTTP/1.0 and HTTP/1.1. 12 | 13 | 14 | Extended Documentation 15 | ---------------------- 16 | 17 | .. toctree:: 18 | :maxdepth: 1 19 | 20 | usage 21 | logging 22 | reverse-proxy 23 | design 24 | differences 25 | api 26 | arguments 27 | filewrapper 28 | runner 29 | socket-activation 30 | glossary 31 | 32 | Change History 33 | -------------- 34 | 35 | .. include:: ../CHANGES.txt 36 | .. include:: ../HISTORY.txt 37 | 38 | Known Issues 39 | ------------ 40 | 41 | - Does not support TLS natively. See :ref:`using-behind-a-reverse-proxy` for more information. 42 | 43 | Support and Development 44 | ----------------------- 45 | 46 | The `Pylons Project web site `_ is the main online 47 | source of Waitress support and development information. 48 | 49 | To report bugs, use the `issue tracker 50 | `_. 51 | 52 | If you've got questions that aren't answered by this documentation, 53 | contact the `Pylons-discuss maillist 54 | `_ or join the `#pyramid 55 | IRC channel `_. 56 | 57 | Browse and check out tagged and trunk versions of Waitress via 58 | the `Waitress GitHub repository `_. 59 | To check out the trunk via ``git``, use this command: 60 | 61 | .. code-block:: text 62 | 63 | git clone git@github.com:Pylons/waitress.git 64 | 65 | To find out how to become a contributor to Waitress, please see the guidelines in `contributing.md `_ and `How to Contribute Source Code and Documentation `_. 66 | 67 | Why? 68 | ---- 69 | 70 | At the time of the release of Waitress, there are already many pure-Python 71 | WSGI servers. Why would we need another? 72 | 73 | Waitress is meant to be useful to web framework authors who require broad 74 | platform support. It's neither the fastest nor the fanciest WSGI server 75 | available but using it helps eliminate the N-by-M documentation burden 76 | (e.g. production vs. deployment, Windows vs. Unix, Python 3 vs. Python 2, 77 | PyPy vs. CPython) and resulting user confusion imposed by spotty platform 78 | support of the current (2012-ish) crop of WSGI servers. For example, 79 | ``gunicorn`` is great, but doesn't run on Windows. ``paste.httpserver`` is 80 | perfectly serviceable, but doesn't run under Python 3 and has no dedicated 81 | tests suite that would allow someone who did a Python 3 port to know it 82 | worked after a port was completed. ``wsgiref`` works fine under most any 83 | Python, but it's a little slow and it's not recommended for production use as 84 | it's single-threaded and has not been audited for security issues. 85 | 86 | At the time of this writing, some existing WSGI servers already claim wide 87 | platform support and have serviceable test suites. The CherryPy WSGI server, 88 | for example, targets Python 2 and Python 3 and it can run on UNIX or Windows. 89 | However, it is not distributed separately from its eponymous web framework, 90 | and requiring a non-CherryPy web framework to depend on the CherryPy web 91 | framework distribution simply for its server component is awkward. The test 92 | suite of the CherryPy server also depends on the CherryPy web framework, so 93 | even if we forked its server component into a separate distribution, we would 94 | have still needed to backfill for all of its tests. The CherryPy team has 95 | started work on `Cheroot `_, which 96 | should solve this problem, however. 97 | 98 | Waitress is a fork of the WSGI-related components which existed in 99 | ``zope.server``. ``zope.server`` had passable framework-independent test 100 | coverage out of the box, and a good bit more coverage was added during the 101 | fork. ``zope.server`` has existed in one form or another since about 2001, 102 | and has seen production usage since then, so Waitress is not exactly 103 | "another" server, it's more a repackaging of an old one that was already 104 | known to work fairly well. 105 | -------------------------------------------------------------------------------- /tests/test_regression.py: -------------------------------------------------------------------------------- 1 | ############################################################################## 2 | # 3 | # Copyright (c) 2005 Zope Foundation and Contributors. 4 | # All Rights Reserved. 5 | # 6 | # This software is subject to the provisions of the Zope Public License, 7 | # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. 8 | # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED 9 | # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 10 | # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS 11 | # FOR A PARTICULAR PURPOSE. 12 | # 13 | ############################################################################## 14 | """Tests for waitress.channel maintenance logic 15 | """ 16 | import doctest 17 | 18 | 19 | class FakeSocket: # pragma: no cover 20 | data = "" 21 | setblocking = lambda *_: None 22 | close = lambda *_: None 23 | 24 | def __init__(self, no): 25 | self.no = no 26 | 27 | def fileno(self): 28 | return self.no 29 | 30 | def getpeername(self): 31 | return ("localhost", self.no) 32 | 33 | def send(self, data): 34 | self.data += data 35 | return len(data) 36 | 37 | def recv(self, data): 38 | return "data" 39 | 40 | 41 | def zombies_test(): 42 | """Regression test for HTTPChannel.maintenance method 43 | 44 | Bug: This method checks for channels that have been "inactive" for a 45 | configured time. The bug was that last_activity is set at creation time 46 | but never updated during async channel activity (reads and writes), so 47 | any channel older than the configured timeout will be closed when a new 48 | channel is created, regardless of activity. 49 | 50 | >>> import time 51 | >>> import waitress.adjustments 52 | >>> config = waitress.adjustments.Adjustments() 53 | 54 | >>> from waitress.server import HTTPServer 55 | >>> class TestServer(HTTPServer): 56 | ... def bind(self, (ip, port)): 57 | ... print "Listening on %s:%d" % (ip or '*', port) 58 | >>> sb = TestServer('127.0.0.1', 80, start=False, verbose=True) 59 | Listening on 127.0.0.1:80 60 | 61 | First we confirm the correct behavior, where a channel with no activity 62 | for the timeout duration gets closed. 63 | 64 | >>> from waitress.channel import HTTPChannel 65 | >>> socket = FakeSocket(42) 66 | >>> channel = HTTPChannel(sb, socket, ('localhost', 42)) 67 | 68 | >>> channel.connected 69 | True 70 | 71 | >>> channel.last_activity -= int(config.channel_timeout) + 1 72 | 73 | >>> channel.next_channel_cleanup[0] = channel.creation_time - int( 74 | ... config.cleanup_interval) - 1 75 | 76 | >>> socket2 = FakeSocket(7) 77 | >>> channel2 = HTTPChannel(sb, socket2, ('localhost', 7)) 78 | 79 | >>> channel.connected 80 | False 81 | 82 | Write Activity 83 | -------------- 84 | 85 | Now we make sure that if there is activity the channel doesn't get closed 86 | incorrectly. 87 | 88 | >>> channel2.connected 89 | True 90 | 91 | >>> channel2.last_activity -= int(config.channel_timeout) + 1 92 | 93 | >>> channel2.handle_write() 94 | 95 | >>> channel2.next_channel_cleanup[0] = channel2.creation_time - int( 96 | ... config.cleanup_interval) - 1 97 | 98 | >>> socket3 = FakeSocket(3) 99 | >>> channel3 = HTTPChannel(sb, socket3, ('localhost', 3)) 100 | 101 | >>> channel2.connected 102 | True 103 | 104 | Read Activity 105 | -------------- 106 | 107 | We should test to see that read activity will update a channel as well. 108 | 109 | >>> channel3.connected 110 | True 111 | 112 | >>> channel3.last_activity -= int(config.channel_timeout) + 1 113 | 114 | >>> import waitress.parser 115 | >>> channel3.parser_class = ( 116 | ... waitress.parser.HTTPRequestParser) 117 | >>> channel3.handle_read() 118 | 119 | >>> channel3.next_channel_cleanup[0] = channel3.creation_time - int( 120 | ... config.cleanup_interval) - 1 121 | 122 | >>> socket4 = FakeSocket(4) 123 | >>> channel4 = HTTPChannel(sb, socket4, ('localhost', 4)) 124 | 125 | >>> channel3.connected 126 | True 127 | 128 | Main loop window 129 | ---------------- 130 | 131 | There is also a corner case we'll do a shallow test for where a 132 | channel can be closed waiting for the main loop. 133 | 134 | >>> channel4.last_activity -= 1 135 | 136 | >>> last_active = channel4.last_activity 137 | 138 | >>> channel4.set_async() 139 | 140 | >>> channel4.last_activity != last_active 141 | True 142 | """ 143 | 144 | 145 | def test_suite(): 146 | return doctest.DocTestSuite() 147 | -------------------------------------------------------------------------------- /tests/test_utilities.py: -------------------------------------------------------------------------------- 1 | ############################################################################## 2 | # 3 | # Copyright (c) 2002 Zope Foundation and Contributors. 4 | # All Rights Reserved. 5 | # 6 | # This software is subject to the provisions of the Zope Public License, 7 | # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. 8 | # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED 9 | # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 10 | # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS 11 | # FOR A PARTICULAR PURPOSE. 12 | # 13 | ############################################################################## 14 | 15 | import unittest 16 | 17 | 18 | class Test_parse_http_date(unittest.TestCase): 19 | def _callFUT(self, v): 20 | from waitress.utilities import parse_http_date 21 | 22 | return parse_http_date(v) 23 | 24 | def test_rfc850(self): 25 | val = "Tuesday, 08-Feb-94 14:15:29 GMT" 26 | result = self._callFUT(val) 27 | self.assertEqual(result, 760716929) 28 | 29 | def test_rfc822(self): 30 | val = "Sun, 08 Feb 1994 14:15:29 GMT" 31 | result = self._callFUT(val) 32 | self.assertEqual(result, 760716929) 33 | 34 | def test_neither(self): 35 | val = "" 36 | result = self._callFUT(val) 37 | self.assertEqual(result, 0) 38 | 39 | 40 | class Test_build_http_date(unittest.TestCase): 41 | def test_rountdrip(self): 42 | from time import time 43 | 44 | from waitress.utilities import build_http_date, parse_http_date 45 | 46 | t = int(time()) 47 | self.assertEqual(t, parse_http_date(build_http_date(t))) 48 | 49 | 50 | class Test_unpack_rfc850(unittest.TestCase): 51 | def _callFUT(self, val): 52 | from waitress.utilities import rfc850_reg, unpack_rfc850 53 | 54 | return unpack_rfc850(rfc850_reg.match(val.lower())) 55 | 56 | def test_it(self): 57 | val = "Tuesday, 08-Feb-94 14:15:29 GMT" 58 | result = self._callFUT(val) 59 | self.assertEqual(result, (1994, 2, 8, 14, 15, 29, 0, 0, 0)) 60 | 61 | 62 | class Test_unpack_rfc_822(unittest.TestCase): 63 | def _callFUT(self, val): 64 | from waitress.utilities import rfc822_reg, unpack_rfc822 65 | 66 | return unpack_rfc822(rfc822_reg.match(val.lower())) 67 | 68 | def test_it(self): 69 | val = "Sun, 08 Feb 1994 14:15:29 GMT" 70 | result = self._callFUT(val) 71 | self.assertEqual(result, (1994, 2, 8, 14, 15, 29, 0, 0, 0)) 72 | 73 | 74 | class Test_find_double_newline(unittest.TestCase): 75 | def _callFUT(self, val): 76 | from waitress.utilities import find_double_newline 77 | 78 | return find_double_newline(val) 79 | 80 | def test_empty(self): 81 | self.assertEqual(self._callFUT(b""), -1) 82 | 83 | def test_one_linefeed(self): 84 | self.assertEqual(self._callFUT(b"\n"), -1) 85 | 86 | def test_double_linefeed(self): 87 | self.assertEqual(self._callFUT(b"\n\n"), -1) 88 | 89 | def test_one_crlf(self): 90 | self.assertEqual(self._callFUT(b"\r\n"), -1) 91 | 92 | def test_double_crfl(self): 93 | self.assertEqual(self._callFUT(b"\r\n\r\n"), 4) 94 | 95 | def test_mixed(self): 96 | self.assertEqual(self._callFUT(b"\n\n00\r\n\r\n"), 8) 97 | 98 | 99 | class TestBadRequest(unittest.TestCase): 100 | def _makeOne(self): 101 | from waitress.utilities import BadRequest 102 | 103 | return BadRequest(1) 104 | 105 | def test_it(self): 106 | inst = self._makeOne() 107 | self.assertEqual(inst.body, 1) 108 | 109 | 110 | class Test_undquote(unittest.TestCase): 111 | def _callFUT(self, value): 112 | from waitress.utilities import undquote 113 | 114 | return undquote(value) 115 | 116 | def test_empty(self): 117 | self.assertEqual(self._callFUT(""), "") 118 | 119 | def test_quoted(self): 120 | self.assertEqual(self._callFUT('"test"'), "test") 121 | 122 | def test_unquoted(self): 123 | self.assertEqual(self._callFUT("test"), "test") 124 | 125 | def test_quoted_backslash_quote(self): 126 | self.assertEqual(self._callFUT('"\\""'), '"') 127 | 128 | def test_quoted_htab(self): 129 | self.assertEqual(self._callFUT('"\t"'), "\t") 130 | 131 | def test_quoted_backslash_htab(self): 132 | self.assertEqual(self._callFUT('"\\\t"'), "\t") 133 | 134 | def test_quoted_backslash_invalid(self): 135 | self.assertRaises(ValueError, self._callFUT, '"\\"') 136 | 137 | def test_invalid_quoting(self): 138 | self.assertRaises(ValueError, self._callFUT, '"test') 139 | 140 | def test_invalid_quoting_single_quote(self): 141 | self.assertRaises(ValueError, self._callFUT, '"') 142 | -------------------------------------------------------------------------------- /TODO.txt: -------------------------------------------------------------------------------- 1 | - 0.0.0.0 / IPv6. 2 | 3 | - Speed tweaking. 4 | 5 | - Anticipate multivalue and single-value-only headers in request headers in 6 | parser.py. 7 | 8 | - Timeout functests. 9 | 10 | - Complex pipelining functests (with intermediate connection: close). 11 | 12 | - Killthreads support. 13 | 14 | - "TCP segment of a reassembled PDU" in wireshark. 15 | 16 | - Jim F. would like the server to log request start, request queue (to thread 17 | pool), app start, app finish, and request finish (all data has been 18 | flushed to client) events. 19 | 20 | Some challenges exist trying to divine per-request end time. We currently 21 | have the potential for request pipelining; the channel might service more 22 | than one request before it closes. We currently don't preserve any 23 | information about which request a response's data belongs to while flushing 24 | response data from a connection's output buffer. 25 | 26 | While accepting request data from a client, Waitress will obtain N request 27 | bodies and schedule all the requests it receives with the task manager. 28 | For example, if it obtains two request bodies in a single recv() call it 29 | will create two request objects and schedule *both* of these requests to be 30 | serviced by the task manager immediately. 31 | 32 | The task thread manager will service these synchronously: the first request 33 | will be run first, then the second. When the first request runs, it will 34 | push data to the out buffer, then it will end. Then the second request 35 | will run, and push data to the same out buffer, and it will end. While 36 | these requests are executing, the channel from whence they came stops 37 | accepting requests until the previously scheduled requests have actually 38 | been serviced. The request-burdened channel will be *sending* data to the 39 | client while the requests are being serviced, it just won't accept any more 40 | data until existing requests have been serviced. In the meantime, other 41 | channels may still be generating requests and adding tasks to the task 42 | manager. 43 | 44 | To capture request-end time we could create an output buffer per request or 45 | we could keep a dictionary of the final bytestream position of the 46 | outbuffer for each response to to request id; either would be a 47 | straightforward way to capture the fact that a particular request's 48 | response data has been flushed. We currently don't do that though. 49 | 50 | Here's what we can currently log without changing anything: 51 | 52 | An example of the events logged for a connection that receives two requests 53 | and each request succeeds, and the connection is closed after sending all 54 | data:: 55 | 56 | channel created: channel 1 at time 10 57 | request created: channel 1, request id 1 at time 11 58 | request created: channel 1, request id 2 at time 12 59 | channel requests queued: channel 1, request ids 1,2 at time 13 60 | request started: request id 1 at time 14 61 | request serviced: request id 1 at time 15 62 | request started: request id 2 at time 16 63 | request serviced: request id 2 at time 17 64 | channel closed: channel 1 at time 18 65 | 66 | An example of the events logged for a connection that receives two requests 67 | and the first request fails in such a way that the next request cannot 68 | proceed (content-length header of the first response does not match number 69 | of bytes sent in response to the first request, for example):: 70 | 71 | channel created: channel 1 at time 10 72 | request created: channel 1, request id 1 at time 11 73 | request created: channel 1, request id 2 at time 12 74 | channel requests queued: channel 1, request ids 1,2 at time 13 75 | request started: request id 1 at time 14 76 | request serviced: request id 1 at time 15 77 | request cancelled: request id 2 at time 17 78 | channel closed: channel 1 at time 18 79 | 80 | An example of the events logged for a connection that receives four 81 | requests (which all succeed in generating successful responses) but where 82 | the client waits for the first two responses to send the second two 83 | requests: 84 | 85 | channel created: channel 1 at time 10 86 | request created: channel 1, request id 1 at time 11 87 | request created: channel 1, request id 2 at time 12 88 | channel requests queued: channel 1, request ids 1,2 at time 13 89 | request started: request id 1 at time 14 90 | request serviced: request id 1 at time 15 91 | request started: request id 2 at time 15 92 | request serviced: request id 2 at time 16 93 | request created: channel 1, request id 3 at time 17 94 | request created: channel 1, request id 4 at time 18 95 | channel requests queued: channel 1, request ids 3,4 at time 18 96 | request started: request id 3 at time 19 97 | request serviced: request id 3 at time 20 98 | request started: request id 4 at time 21 99 | request serviced: request id 4 at time 22 100 | channel closed: channel 1 at time 23 101 | -------------------------------------------------------------------------------- /docs/reverse-proxy.rst: -------------------------------------------------------------------------------- 1 | .. index:: reverse, proxy, TLS, SSL, https 2 | 3 | .. _using-behind-a-reverse-proxy: 4 | 5 | ============================ 6 | Using Behind a Reverse Proxy 7 | ============================ 8 | 9 | Often people will set up "pure Python" web servers behind reverse proxies, 10 | especially if they need TLS support (Waitress does not natively support TLS). 11 | Even if you don't need TLS support, it's not uncommon to see Waitress and 12 | other pure-Python web servers set up to only handle requests behind a reverse proxy; 13 | these proxies often have lots of useful deployment knobs. 14 | 15 | If you're using Waitress behind a reverse proxy, you'll almost always want 16 | your reverse proxy to pass along the ``Host`` header sent by the client to 17 | Waitress, in either case, as it will be used by most applications to generate 18 | correct URLs. You may also use the proxy headers if passing ``Host`` directly 19 | is not possible, or there are multiple proxies involved. 20 | 21 | For example, when using nginx as a reverse proxy, you might add the following 22 | lines in a ``location`` section. 23 | 24 | .. code-block:: nginx 25 | 26 | proxy_set_header Host $host; 27 | 28 | The Apache directive named ``ProxyPreserveHost`` does something similar when 29 | used as a reverse proxy. 30 | 31 | Unfortunately, even if you pass the ``Host`` header, the Host header does not 32 | contain enough information to regenerate the original URL sent by the client. 33 | For example, if your reverse proxy accepts HTTPS requests (and therefore URLs 34 | which start with ``https://``), the URLs generated by your application when 35 | used behind a reverse proxy served by Waitress might inappropriately be 36 | ``http://foo`` rather than ``https://foo``. To fix this, you'll want to 37 | change the ``wsgi.url_scheme`` in the WSGI environment before it reaches your 38 | application. You can do this in one of three ways: 39 | 40 | 1. You can pass a ``url_scheme`` configuration variable to the 41 | ``waitress.serve`` function. 42 | 43 | 2. You can pass certain well known proxy headers from your proxy server and 44 | use waitress's ``trusted_proxy`` support to automatically configure the 45 | WSGI environment. 46 | 47 | Using ``url_scheme`` to set ``wsgi.url_scheme`` 48 | ----------------------------------------------- 49 | 50 | You can have the Waitress server use the ``https`` url scheme by default.: 51 | 52 | .. code-block:: python 53 | 54 | from waitress import serve 55 | serve(wsgiapp, listen='0.0.0.0:8080', url_scheme='https') 56 | 57 | This works if all URLs generated by your application should use the ``https`` 58 | scheme. 59 | 60 | Passing the proxy headers to setup the WSGI environment 61 | ------------------------------------------------------- 62 | 63 | If your proxy accepts both HTTP and HTTPS URLs, and you want your application 64 | to generate the appropriate url based on the incoming scheme, you'll want to 65 | pass waitress ``X-Forwarded-Proto``, however Waitress is also able to update 66 | the environment using ``X-Forwarded-Proto``, ``X-Forwarded-For``, 67 | ``X-Forwarded-Host``, and ``X-Forwarded-Port``:: 68 | 69 | proxy_set_header X-Forwarded-Proto $scheme; 70 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 71 | proxy_set_header X-Forwarded-Host $host:$server_port; 72 | proxy_set_header X-Forwarded-Port $server_port; 73 | 74 | when using Apache, ``mod_proxy`` automatically forwards the following headers:: 75 | 76 | X-Forwarded-For 77 | X-Forwarded-Host 78 | X-Forwarded-Server 79 | 80 | You will also want to add to Apache:: 81 | 82 | RequestHeader set X-Forwarded-Proto https 83 | 84 | Configure waitress's ``trusted_proxy_headers`` as appropriate:: 85 | 86 | trusted_proxy_headers = "x-forwarded-for x-forwarded-host x-forwarded-proto x-forwarded-port" 87 | 88 | At this point waitress will set up the WSGI environment using the information 89 | specified in the trusted proxy headers. This will setup the following 90 | variables:: 91 | 92 | HTTP_HOST 93 | SERVER_NAME 94 | SERVER_PORT 95 | REMOTE_ADDR 96 | REMOTE_PORT (if available) 97 | wsgi.url_scheme 98 | 99 | Waitress also has support for the `Forwarded (RFC7239) HTTP header 100 | `_ which is better defined than the ad-hoc 101 | ``X-Forwarded-*``, however support is not nearly as widespread yet. 102 | ``Forwarded`` supports similar functionality as the different individual 103 | headers, and is mutually exclusive to using the ``X-Forwarded-*`` headers. 104 | 105 | To configure waitress to use the ``Forwarded`` header, set:: 106 | 107 | trusted_proxy_headers = "forwarded" 108 | 109 | .. note:: 110 | 111 | You must also configure the Waitress server's ``trusted_proxy`` to 112 | contain the IP address of the proxy. 113 | 114 | 115 | Using ``url_prefix`` to influence ``SCRIPT_NAME`` and ``PATH_INFO`` 116 | ------------------------------------------------------------------- 117 | 118 | You can have the Waitress server use a particular url prefix by default for all 119 | URLs generated by downstream applications that take ``SCRIPT_NAME`` into 120 | account.: 121 | 122 | .. code-block:: python 123 | 124 | from waitress import serve 125 | serve(wsgiapp, listen='0.0.0.0:8080', url_prefix='/foo') 126 | 127 | Setting this to any value except the empty string will cause the WSGI 128 | ``SCRIPT_NAME`` value to be that value, minus any trailing slashes you add, and 129 | it will cause the ``PATH_INFO`` of any request which is prefixed with this 130 | value to be stripped of the prefix. This is useful in proxying scenarios where 131 | you wish to forward all traffic to a Waitress server but need URLs generated by 132 | downstream applications to be prefixed with a particular path segment. 133 | -------------------------------------------------------------------------------- /CONTRIBUTORS.txt: -------------------------------------------------------------------------------- 1 | Pylons Project Contributor Agreement 2 | ==================================== 3 | 4 | The submitter agrees by adding his or her name within the section below named 5 | "Contributors" and submitting the resulting modified document to the 6 | canonical shared repository location for this software project (whether 7 | directly, as a user with "direct commit access", or via a "pull request"), he 8 | or she is signing a contract electronically. The submitter becomes a 9 | Contributor after a) he or she signs this document by adding their name 10 | beneath the "Contributors" section below, and b) the resulting document is 11 | accepted into the canonical version control repository. 12 | 13 | Treatment of Account 14 | --------------------- 15 | 16 | Contributor will not allow anyone other than the Contributor to use his or 17 | her username or source repository login to submit code to a Pylons Project 18 | source repository. Should Contributor become aware of any such use, 19 | Contributor will immediately notify Agendaless Consulting. 20 | Notification must be performed by sending an email to 21 | webmaster@agendaless.com. Until such notice is received, Contributor will be 22 | presumed to have taken all actions made through Contributor's account. If the 23 | Contributor has direct commit access, Agendaless Consulting will have 24 | complete control and discretion over capabilities assigned to Contributor's 25 | account, and may disable Contributor's account for any reason at any time. 26 | 27 | Legal Effect of Contribution 28 | ---------------------------- 29 | 30 | Upon submitting a change or new work to a Pylons Project source Repository (a 31 | "Contribution"), you agree to assign, and hereby do assign, a one-half 32 | interest of all right, title and interest in and to copyright and other 33 | intellectual property rights with respect to your new and original portions 34 | of the Contribution to Agendaless Consulting. You and Agendaless Consulting 35 | each agree that the other shall be free to exercise any and all exclusive 36 | rights in and to the Contribution, without accounting to one another, 37 | including without limitation, the right to license the Contribution to others 38 | under the Repoze Public License. This agreement shall run with title to the 39 | Contribution. Agendaless Consulting does not convey to you any right, title 40 | or interest in or to the Program or such portions of the Contribution that 41 | were taken from the Program. Your transmission of a submission to the Pylons 42 | Project source Repository and marks of identification concerning the 43 | Contribution itself constitute your intent to contribute and your assignment 44 | of the work in accordance with the provisions of this Agreement. 45 | 46 | License Terms 47 | ------------- 48 | 49 | Code committed to the Pylons Project source repository (Committed Code) must 50 | be governed by the Repoze Public License (http://repoze.org/LICENSE.txt, aka 51 | "the RPL") or another license acceptable to Agendaless Consulting. Until 52 | Agendaless Consulting declares in writing an acceptable license other than 53 | the RPL, only the RPL shall be used. A list of exceptions is detailed within 54 | the "Licensing Exceptions" section of this document, if one exists. 55 | 56 | Representations, Warranty, and Indemnification 57 | ---------------------------------------------- 58 | 59 | Contributor represents and warrants that the Committed Code does not violate 60 | the rights of any person or entity, and that the Contributor has legal 61 | authority to enter into this Agreement and legal authority over Contributed 62 | Code. Further, Contributor indemnifies Agendaless Consulting against 63 | violations. 64 | 65 | Cryptography 66 | ------------ 67 | 68 | Contributor understands that cryptographic code may be subject to government 69 | regulations with which Agendaless Consulting and/or entities using Committed 70 | Code must comply. Any code which contains any of the items listed below must 71 | not be checked-in until Agendaless Consulting staff has been notified and has 72 | approved such contribution in writing. 73 | 74 | - Cryptographic capabilities or features 75 | 76 | - Calls to cryptographic features 77 | 78 | - User interface elements which provide context relating to cryptography 79 | 80 | - Code which may, under casual inspection, appear to be cryptographic. 81 | 82 | Notices 83 | ------- 84 | 85 | Contributor confirms that any notices required will be included in any 86 | Committed Code. 87 | 88 | Licensing Exceptions 89 | ==================== 90 | 91 | Code committed within the ``docs/`` subdirectory of the Waitress source 92 | control repository and "docstrings" which appear in the documentation 93 | generated by running "make" within this directory is licensed under the 94 | Creative Commons Attribution-Noncommercial-Share Alike 3.0 United States 95 | License (http://creativecommons.org/licenses/by-nc-sa/3.0/us/). 96 | 97 | List of Contributors 98 | ==================== 99 | 100 | The below-signed are contributors to a code repository that is part of the 101 | project named "Waitress". Each below-signed contributor has read, understand 102 | and agrees to the terms above in the section within this document entitled 103 | "Pylons Project Contributor Agreement" as of the date beside his or her name. 104 | 105 | Contributors 106 | ------------ 107 | 108 | - Chris McDonough, 2011/12/17 109 | 110 | - Michael Merickel, 2012/01/16 111 | 112 | - Damien Baty, 2012/10/25 113 | 114 | - Georges Dubus, 2012/11/24 115 | 116 | - Tres Seaver, 2013/04/09 117 | 118 | - Tshepang Lekhonkhobe, 2013/04/09 119 | 120 | - Keith Gaughan, 2013/05/11 121 | 122 | - Jamie Matthews, 2013/06/19 123 | 124 | - Adam Groszer, 2013/08/15 125 | 126 | - Matt Russell, 2015/01/14 127 | 128 | - David Glick, 2015/04/13 129 | 130 | - Shane Hathaway, 2015-04-20 131 | 132 | - Steve Piercy, 2015-04-21 133 | 134 | - Ben Warren, 2015-05-17 135 | 136 | - Bert JW Regeer, 2015-09-23 137 | 138 | - Yu Zhou, 2015-09-24 139 | 140 | - Jason Madden, 2016-03-19 141 | 142 | - Atsushi Odagiri, 2017-02-12 143 | 144 | - David D Lowe, 2017-06-02 145 | 146 | - Jack Wearden, 2018-05-18 147 | 148 | - Frank Krick, 2018-10-29 149 | -------------------------------------------------------------------------------- /tests/test_runner.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | import os 3 | import sys 4 | 5 | if sys.version_info[:2] == (2, 6): # pragma: no cover 6 | import unittest2 as unittest 7 | else: # pragma: no cover 8 | import unittest 9 | 10 | from waitress import runner 11 | 12 | 13 | class Test_match(unittest.TestCase): 14 | def test_empty(self): 15 | self.assertRaisesRegex( 16 | ValueError, "^Malformed application ''$", runner.match, "" 17 | ) 18 | 19 | def test_module_only(self): 20 | self.assertRaisesRegex( 21 | ValueError, r"^Malformed application 'foo\.bar'$", runner.match, "foo.bar" 22 | ) 23 | 24 | def test_bad_module(self): 25 | self.assertRaisesRegex( 26 | ValueError, 27 | r"^Malformed application 'foo#bar:barney'$", 28 | runner.match, 29 | "foo#bar:barney", 30 | ) 31 | 32 | def test_module_obj(self): 33 | self.assertTupleEqual( 34 | runner.match("foo.bar:fred.barney"), ("foo.bar", "fred.barney") 35 | ) 36 | 37 | 38 | class Test_resolve(unittest.TestCase): 39 | def test_bad_module(self): 40 | self.assertRaises( 41 | ImportError, runner.resolve, "nonexistent", "nonexistent_function" 42 | ) 43 | 44 | def test_nonexistent_function(self): 45 | self.assertRaisesRegex( 46 | AttributeError, 47 | r"has no attribute 'nonexistent_function'", 48 | runner.resolve, 49 | "os.path", 50 | "nonexistent_function", 51 | ) 52 | 53 | def test_simple_happy_path(self): 54 | from os.path import exists 55 | 56 | self.assertIs(runner.resolve("os.path", "exists"), exists) 57 | 58 | def test_complex_happy_path(self): 59 | # Ensure we can recursively resolve object attributes if necessary. 60 | self.assertEqual(runner.resolve("os.path", "exists.__name__"), "exists") 61 | 62 | 63 | class Test_run(unittest.TestCase): 64 | def match_output(self, argv, code, regex): 65 | argv = ["waitress-serve"] + argv 66 | with capture() as captured: 67 | self.assertEqual(runner.run(argv=argv), code) 68 | self.assertRegex(captured.getvalue(), regex) 69 | captured.close() 70 | 71 | def test_bad(self): 72 | self.match_output(["--bad-opt"], 1, "^Error: option --bad-opt not recognized") 73 | 74 | def test_help(self): 75 | self.match_output(["--help"], 0, "^Usage:\n\n waitress-serve") 76 | 77 | def test_no_app(self): 78 | self.match_output([], 1, "^Error: Specify one application only") 79 | 80 | def test_multiple_apps_app(self): 81 | self.match_output(["a:a", "b:b"], 1, "^Error: Specify one application only") 82 | 83 | def test_bad_apps_app(self): 84 | self.match_output(["a"], 1, "^Error: Malformed application 'a'") 85 | 86 | def test_bad_app_module(self): 87 | self.match_output(["nonexistent:a"], 1, "^Error: Bad module 'nonexistent'") 88 | 89 | self.match_output( 90 | ["nonexistent:a"], 91 | 1, 92 | ( 93 | r"There was an exception \((ImportError|ModuleNotFoundError)\) " 94 | "importing your module.\n\nIt had these arguments: \n" 95 | "1. No module named '?nonexistent'?" 96 | ), 97 | ) 98 | 99 | def test_cwd_added_to_path(self): 100 | def null_serve(app, **kw): 101 | pass 102 | 103 | sys_path = sys.path 104 | current_dir = os.getcwd() 105 | try: 106 | os.chdir(os.path.dirname(__file__)) 107 | argv = [ 108 | "waitress-serve", 109 | "fixtureapps.runner:app", 110 | ] 111 | self.assertEqual(runner.run(argv=argv, _serve=null_serve), 0) 112 | finally: 113 | sys.path = sys_path 114 | os.chdir(current_dir) 115 | 116 | def test_bad_app_object(self): 117 | self.match_output( 118 | ["tests.fixtureapps.runner:a"], 1, "^Error: Bad object name 'a'" 119 | ) 120 | 121 | def test_simple_call(self): 122 | from tests.fixtureapps import runner as _apps 123 | 124 | def check_server(app, **kw): 125 | self.assertIs(app, _apps.app) 126 | self.assertDictEqual(kw, {"port": "80"}) 127 | 128 | argv = [ 129 | "waitress-serve", 130 | "--port=80", 131 | "tests.fixtureapps.runner:app", 132 | ] 133 | self.assertEqual(runner.run(argv=argv, _serve=check_server), 0) 134 | 135 | def test_returned_app(self): 136 | from tests.fixtureapps import runner as _apps 137 | 138 | def check_server(app, **kw): 139 | self.assertIs(app, _apps.app) 140 | self.assertDictEqual(kw, {"port": "80"}) 141 | 142 | argv = [ 143 | "waitress-serve", 144 | "--port=80", 145 | "--call", 146 | "tests.fixtureapps.runner:returns_app", 147 | ] 148 | self.assertEqual(runner.run(argv=argv, _serve=check_server), 0) 149 | 150 | 151 | class Test_helper(unittest.TestCase): 152 | def test_exception_logging(self): 153 | from waitress.runner import show_exception 154 | 155 | regex = ( 156 | r"There was an exception \(ImportError\) importing your module." 157 | r"\n\nIt had these arguments: \n1. My reason" 158 | ) 159 | 160 | with capture() as captured: 161 | try: 162 | raise ImportError("My reason") 163 | except ImportError: 164 | self.assertEqual(show_exception(sys.stderr), None) 165 | self.assertRegex(captured.getvalue(), regex) 166 | captured.close() 167 | 168 | regex = ( 169 | r"There was an exception \(ImportError\) importing your module." 170 | r"\n\nIt had no arguments." 171 | ) 172 | 173 | with capture() as captured: 174 | try: 175 | raise ImportError 176 | except ImportError: 177 | self.assertEqual(show_exception(sys.stderr), None) 178 | self.assertRegex(captured.getvalue(), regex) 179 | captured.close() 180 | 181 | 182 | @contextlib.contextmanager 183 | def capture(): 184 | from io import StringIO 185 | 186 | fd = StringIO() 187 | sys.stdout = fd 188 | sys.stderr = fd 189 | yield fd 190 | sys.stdout = sys.__stdout__ 191 | sys.stderr = sys.__stderr__ 192 | -------------------------------------------------------------------------------- /docs/logging.rst: -------------------------------------------------------------------------------- 1 | .. _access-logging: 2 | 3 | ============== 4 | Access Logging 5 | ============== 6 | 7 | The WSGI design is modular. Waitress logs error conditions, debugging 8 | output, etc., but not web traffic. For web traffic logging, Paste 9 | provides `TransLogger 10 | `_ 11 | :term:`middleware`. TransLogger produces logs in the `Apache Combined 12 | Log Format `_. 13 | 14 | 15 | .. _logging-to-the-console-using-python: 16 | 17 | Logging to the Console Using Python 18 | ----------------------------------- 19 | 20 | ``waitress.serve`` calls ``logging.basicConfig()`` to set up logging to the 21 | console when the server starts up. Assuming no other logging configuration 22 | has already been done, this sets the logging default level to 23 | ``logging.WARNING``. The Waitress logger will inherit the root logger's 24 | level information (it logs at level ``WARNING`` or above). 25 | 26 | Waitress sends its logging output (including application exception 27 | renderings) to the Python logger object named ``waitress``. You can 28 | influence the logger level and output stream using the normal Python 29 | ``logging`` module API. For example: 30 | 31 | .. code-block:: python 32 | 33 | import logging 34 | logger = logging.getLogger('waitress') 35 | logger.setLevel(logging.INFO) 36 | 37 | Within a PasteDeploy configuration file, you can use the normal Python 38 | ``logging`` module ``.ini`` file format to change similar Waitress logging 39 | options. For example: 40 | 41 | .. code-block:: ini 42 | 43 | [logger_waitress] 44 | level = INFO 45 | 46 | 47 | .. _logging-to-the-console-using-pastedeploy: 48 | 49 | Logging to the Console Using PasteDeploy 50 | ---------------------------------------- 51 | 52 | TransLogger will automatically setup a logging handler to the console when called with no arguments. 53 | It "just works" in environments that don't configure logging. 54 | This is by virtue of its default configuration setting of ``setup_console_handler = True``. 55 | 56 | 57 | .. TODO: 58 | .. .. _logging-to-a-file-using-python: 59 | 60 | .. Logging to a File Using Python 61 | .. ------------------------------ 62 | 63 | .. Show how to configure the WSGI logger via python. 64 | 65 | 66 | .. _logging-to-a-file-using-pastedeploy: 67 | 68 | Logging to a File Using PasteDeploy 69 | ------------------------------------ 70 | 71 | TransLogger does not write to files, and the Python logging system 72 | must be configured to do this. The Python class :class:`FileHandler` 73 | logging handler can be used alongside TransLogger to create an 74 | ``access.log`` file similar to Apache's. 75 | 76 | Like any standard :term:`middleware` with a Paste entry point, 77 | TransLogger can be configured to wrap your application using ``.ini`` 78 | file syntax. First add a 79 | ``[filter:translogger]`` section, then use a ``[pipeline:main]`` 80 | section file to form a WSGI pipeline with both the translogger and 81 | your application in it. For instance, if you have this: 82 | 83 | .. code-block:: ini 84 | 85 | [app:wsgiapp] 86 | use = egg:mypackage#wsgiapp 87 | 88 | [server:main] 89 | use = egg:waitress#main 90 | host = 127.0.0.1 91 | port = 8080 92 | 93 | Add this: 94 | 95 | .. code-block:: ini 96 | 97 | [filter:translogger] 98 | use = egg:Paste#translogger 99 | setup_console_handler = False 100 | 101 | [pipeline:main] 102 | pipeline = translogger 103 | wsgiapp 104 | 105 | Using PasteDeploy this way to form and serve a pipeline is equivalent to 106 | wrapping your app in a TransLogger instance via the bottom of the ``main`` 107 | function of your project's ``__init__`` file: 108 | 109 | .. code-block:: python 110 | 111 | from mypackage import wsgiapp 112 | from waitress import serve 113 | from paste.translogger import TransLogger 114 | serve(TransLogger(wsgiapp, setup_console_handler=False)) 115 | 116 | .. note:: 117 | TransLogger will automatically set up a logging handler to the console when 118 | called with no arguments, so it "just works" in environments that don't 119 | configure logging. Since our logging handlers are configured, we disable 120 | the automation via ``setup_console_handler = False``. 121 | 122 | With the filter in place, TransLogger's logger (named the ``wsgi`` logger) will 123 | propagate its log messages to the parent logger (the root logger), sending 124 | its output to the console when we request a page: 125 | 126 | .. code-block:: text 127 | 128 | 00:50:53,694 INFO [wsgiapp] Returning: Hello World! 129 | (content-type: text/plain) 130 | 00:50:53,695 INFO [wsgi] 192.168.1.111 - - [11/Aug/2011:20:09:33 -0700] "GET /hello 131 | HTTP/1.1" 404 - "-" 132 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en-US; rv:1.8.1.6) Gecko/20070725 133 | Firefox/2.0.0.6" 134 | 135 | To direct TransLogger to an ``access.log`` FileHandler, we need the 136 | following to add a FileHandler (named ``accesslog``) to the list of 137 | handlers, and ensure that the ``wsgi`` logger is configured and uses 138 | this handler accordingly: 139 | 140 | .. code-block:: ini 141 | 142 | # Begin logging configuration 143 | 144 | [loggers] 145 | keys = root, wsgiapp, wsgi 146 | 147 | [handlers] 148 | keys = console, accesslog 149 | 150 | [logger_wsgi] 151 | level = INFO 152 | handlers = accesslog 153 | qualname = wsgi 154 | propagate = 0 155 | 156 | [handler_accesslog] 157 | class = FileHandler 158 | args = ('%(here)s/access.log','a') 159 | level = INFO 160 | formatter = generic 161 | 162 | As mentioned above, non-root loggers by default propagate their log records 163 | to the root logger's handlers (currently the console handler). Setting 164 | ``propagate`` to ``0`` (``False``) here disables this; so the ``wsgi`` logger 165 | directs its records only to the ``accesslog`` handler. 166 | 167 | Finally, there's no need to use the ``generic`` formatter with 168 | TransLogger, as TransLogger itself provides all the information we 169 | need. We'll use a formatter that passes-through the log messages as 170 | is. Add a new formatter called ``accesslog`` by including the 171 | following in your configuration file: 172 | 173 | .. code-block:: ini 174 | 175 | [formatters] 176 | keys = generic, accesslog 177 | 178 | [formatter_accesslog] 179 | format = %(message)s 180 | 181 | Finally alter the existing configuration to wire this new 182 | ``accesslog`` formatter into the FileHandler: 183 | 184 | .. code-block:: ini 185 | 186 | [handler_accesslog] 187 | class = FileHandler 188 | args = ('%(here)s/access.log','a') 189 | level = INFO 190 | formatter = accesslog 191 | -------------------------------------------------------------------------------- /src/waitress/receiver.py: -------------------------------------------------------------------------------- 1 | ############################################################################## 2 | # 3 | # Copyright (c) 2001, 2002 Zope Foundation and Contributors. 4 | # All Rights Reserved. 5 | # 6 | # This software is subject to the provisions of the Zope Public License, 7 | # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. 8 | # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED 9 | # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 10 | # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS 11 | # FOR A PARTICULAR PURPOSE. 12 | # 13 | ############################################################################## 14 | """Data Chunk Receiver 15 | """ 16 | 17 | from waitress.rfc7230 import CHUNK_EXT_RE, ONLY_HEXDIG_RE 18 | from waitress.utilities import BadRequest, find_double_newline 19 | 20 | 21 | class FixedStreamReceiver: 22 | 23 | # See IStreamConsumer 24 | completed = False 25 | error = None 26 | 27 | def __init__(self, cl, buf): 28 | self.remain = cl 29 | self.buf = buf 30 | 31 | def __len__(self): 32 | return self.buf.__len__() 33 | 34 | def received(self, data): 35 | "See IStreamConsumer" 36 | rm = self.remain 37 | 38 | if rm < 1: 39 | self.completed = True # Avoid any chance of spinning 40 | 41 | return 0 42 | datalen = len(data) 43 | 44 | if rm <= datalen: 45 | self.buf.append(data[:rm]) 46 | self.remain = 0 47 | self.completed = True 48 | 49 | return rm 50 | else: 51 | self.buf.append(data) 52 | self.remain -= datalen 53 | 54 | return datalen 55 | 56 | def getfile(self): 57 | return self.buf.getfile() 58 | 59 | def getbuf(self): 60 | return self.buf 61 | 62 | 63 | class ChunkedReceiver: 64 | 65 | chunk_remainder = 0 66 | validate_chunk_end = False 67 | control_line = b"" 68 | chunk_end = b"" 69 | all_chunks_received = False 70 | trailer = b"" 71 | completed = False 72 | error = None 73 | 74 | # max_control_line = 1024 75 | # max_trailer = 65536 76 | 77 | def __init__(self, buf): 78 | self.buf = buf 79 | 80 | def __len__(self): 81 | return self.buf.__len__() 82 | 83 | def received(self, s): 84 | # Returns the number of bytes consumed. 85 | 86 | if self.completed: 87 | return 0 88 | orig_size = len(s) 89 | 90 | while s: 91 | rm = self.chunk_remainder 92 | 93 | if rm > 0: 94 | # Receive the remainder of a chunk. 95 | to_write = s[:rm] 96 | self.buf.append(to_write) 97 | written = len(to_write) 98 | s = s[written:] 99 | 100 | self.chunk_remainder -= written 101 | 102 | if self.chunk_remainder == 0: 103 | self.validate_chunk_end = True 104 | elif self.validate_chunk_end: 105 | s = self.chunk_end + s 106 | 107 | pos = s.find(b"\r\n") 108 | 109 | if pos < 0 and len(s) < 2: 110 | self.chunk_end = s 111 | s = b"" 112 | else: 113 | self.chunk_end = b"" 114 | 115 | if pos == 0: 116 | # Chop off the terminating CR LF from the chunk 117 | s = s[2:] 118 | else: 119 | self.error = BadRequest("Chunk not properly terminated") 120 | self.all_chunks_received = True 121 | 122 | # Always exit this loop 123 | self.validate_chunk_end = False 124 | elif not self.all_chunks_received: 125 | # Receive a control line. 126 | s = self.control_line + s 127 | pos = s.find(b"\r\n") 128 | 129 | if pos < 0: 130 | # Control line not finished. 131 | self.control_line = s 132 | s = b"" 133 | else: 134 | # Control line finished. 135 | line = s[:pos] 136 | s = s[pos + 2 :] 137 | self.control_line = b"" 138 | 139 | if line: 140 | # Begin a new chunk. 141 | semi = line.find(b";") 142 | 143 | if semi >= 0: 144 | extinfo = line[semi:] 145 | valid_ext_info = CHUNK_EXT_RE.match(extinfo) 146 | 147 | if not valid_ext_info: 148 | self.error = BadRequest("Invalid chunk extension") 149 | self.all_chunks_received = True 150 | 151 | break 152 | 153 | line = line[:semi] 154 | 155 | if not ONLY_HEXDIG_RE.match(line): 156 | self.error = BadRequest("Invalid chunk size") 157 | self.all_chunks_received = True 158 | 159 | break 160 | 161 | # Can not fail due to matching against the regular 162 | # expression above 163 | sz = int(line, 16) # hexadecimal 164 | 165 | if sz > 0: 166 | # Start a new chunk. 167 | self.chunk_remainder = sz 168 | else: 169 | # Finished chunks. 170 | self.all_chunks_received = True 171 | # else expect a control line. 172 | else: 173 | # Receive the trailer. 174 | trailer = self.trailer + s 175 | 176 | if trailer.startswith(b"\r\n"): 177 | # No trailer. 178 | self.completed = True 179 | 180 | return orig_size - (len(trailer) - 2) 181 | pos = find_double_newline(trailer) 182 | 183 | if pos < 0: 184 | # Trailer not finished. 185 | self.trailer = trailer 186 | s = b"" 187 | else: 188 | # Finished the trailer. 189 | self.completed = True 190 | self.trailer = trailer[:pos] 191 | 192 | return orig_size - (len(trailer) - pos) 193 | 194 | return orig_size 195 | 196 | def getfile(self): 197 | return self.buf.getfile() 198 | 199 | def getbuf(self): 200 | return self.buf 201 | -------------------------------------------------------------------------------- /docs/runner.rst: -------------------------------------------------------------------------------- 1 | .. _runner: 2 | 3 | waitress-serve 4 | -------------- 5 | 6 | .. versionadded:: 0.8.4 7 | 8 | Waitress comes bundled with a thin command-line wrapper around the ``waitress.serve`` function called ``waitress-serve``. 9 | This is useful for development, and in production situations where serving of static assets is delegated to a reverse proxy, such as nginx or Apache. 10 | 11 | ``waitress-serve`` takes the very same :ref:`arguments ` as the 12 | ``waitress.serve`` function, but where the function's arguments have 13 | underscores, ``waitress-serve`` uses hyphens. Thus:: 14 | 15 | import myapp 16 | 17 | waitress.serve(myapp.wsgifunc, port=8041, url_scheme='https') 18 | 19 | Is equivalent to:: 20 | 21 | waitress-serve --port=8041 --url-scheme=https myapp:wsgifunc 22 | 23 | The full argument list is :ref:`given below `. 24 | 25 | Boolean arguments are represented by flags. If you wish to explicitly set a 26 | flag, simply use it by its name. Thus the flag:: 27 | 28 | --expose-tracebacks 29 | 30 | Is equivalent to passing ``expose_tracebacks=True`` to ``waitress.serve``. 31 | 32 | All flags have a negative equivalent. These are prefixed with ``no-``; thus 33 | using the flag:: 34 | 35 | --no-expose-tracebacks 36 | 37 | Is equivalent to passing ``expose_tracebacks=False`` to ``waitress.serve``. 38 | 39 | If at any time you want the full argument list, use the ``--help`` flag. 40 | 41 | Applications are specified similarly to PasteDeploy, where the format is 42 | ``myapp.mymodule:wsgifunc``. As some application frameworks use application 43 | objects, you can use dots to resolve attributes like so: 44 | ``myapp.mymodule:appobj.wsgifunc``. 45 | 46 | A number of frameworks, *web.py* being an example, have factory methods on 47 | their application objects that return usable WSGI functions when called. For 48 | cases like these, ``waitress-serve`` has the ``--call`` flag. Thus:: 49 | 50 | waitress-serve --call myapp.mymodule.app.wsgi_factory 51 | 52 | Would load the ``myapp.mymodule`` module, and call ``app.wsgi_factory`` to get 53 | a WSGI application function to be passed to ``waitress.server``. 54 | 55 | .. note:: 56 | 57 | As of 0.8.6, the current directory is automatically included on 58 | ``sys.path``. 59 | 60 | .. _invocation: 61 | 62 | Invocation 63 | ~~~~~~~~~~ 64 | 65 | Usage:: 66 | 67 | waitress-serve [OPTS] MODULE:OBJECT 68 | 69 | Common options: 70 | 71 | ``--help`` 72 | Show this information. 73 | 74 | ``--call`` 75 | Call the given object to get the WSGI application. 76 | 77 | ``--host=ADDR`` 78 | Hostname or IP address on which to listen, default is '0.0.0.0', 79 | which means "all IP addresses on this host". 80 | 81 | ``--port=PORT`` 82 | TCP port on which to listen, default is '8080' 83 | 84 | ``--listen=host:port`` 85 | Tell waitress to listen on an ip port combination. 86 | 87 | Example: 88 | 89 | --listen=127.0.0.1:8080 90 | --listen=[::1]:8080 91 | --listen=*:8080 92 | 93 | This option may be used multiple times to listen on multiple sockets. 94 | A wildcard for the hostname is also supported and will bind to both 95 | IPv4/IPv6 depending on whether they are enabled or disabled. 96 | 97 | ``--[no-]ipv4`` 98 | Toggle on/off IPv4 support. 99 | 100 | This affects wildcard matching when listening on a wildcard address/port 101 | combination. 102 | 103 | ``--[no-]ipv6`` 104 | Toggle on/off IPv6 support. 105 | 106 | This affects wildcard matching when listening on a wildcard address/port 107 | combination. 108 | 109 | ``--unix-socket=PATH`` 110 | Path of Unix socket. If a socket path is specified, a Unix domain 111 | socket is made instead of the usual inet domain socket. 112 | 113 | Not available on Windows. 114 | 115 | ``--unix-socket-perms=PERMS`` 116 | Octal permissions to use for the Unix domain socket, default is 117 | '600'. 118 | 119 | ``--url-scheme=STR`` 120 | Default ``wsgi.url_scheme`` value, default is 'http'. 121 | 122 | ``--url-prefix=STR`` 123 | The ``SCRIPT_NAME`` WSGI environment value. Setting this to anything 124 | except the empty string will cause the WSGI ``SCRIPT_NAME`` value to be the 125 | value passed minus any trailing slashes you add, and it will cause the 126 | ``PATH_INFO`` of any request which is prefixed with this value to be 127 | stripped of the prefix. Default is the empty string. 128 | 129 | ``--ident=STR`` 130 | Server identity used in the 'Server' header in responses. Default 131 | is 'waitress'. 132 | 133 | Tuning options: 134 | 135 | ``--threads=INT`` 136 | Number of threads used to process application logic, default is 4. 137 | 138 | ``--backlog=INT`` 139 | Connection backlog for the server. Default is 1024. 140 | 141 | ``--recv-bytes=INT`` 142 | Number of bytes to request when calling ``socket.recv()``. Default is 143 | 8192. 144 | 145 | ``--send-bytes=INT`` 146 | Number of bytes to send to socket.send(). Default is 1. 147 | Multiples of 9000 should avoid partly-filled TCP packets. 148 | 149 | .. deprecated:: 1.3 150 | 151 | ``--outbuf-overflow=INT`` 152 | A temporary file should be created if the pending output is larger than 153 | this. Default is 1048576 (1MB). 154 | 155 | ``--outbuf-high-watermark=INT`` 156 | The app_iter will pause when pending output is larger than this value 157 | and will resume once enough data is written to the socket to fall below 158 | this threshold. Default is 16777216 (16MB). 159 | 160 | ``--inbuf-overflow=INT`` 161 | A temporary file should be created if the pending input is larger than 162 | this. Default is 524288 (512KB). 163 | 164 | ``--connection-limit=INT`` 165 | Stop creating new channels if too many are already active. Default is 166 | 100. 167 | 168 | ``--cleanup-interval=INT`` 169 | Minimum seconds between cleaning up inactive channels. Default is 30. See 170 | ``--channel-timeout``. 171 | 172 | ``--channel-timeout=INT`` 173 | Maximum number of seconds to leave inactive connections open. Default is 174 | 120. 'Inactive' is defined as 'has received no data from the client and has 175 | sent no data to the client'. 176 | 177 | ``--[no-]log-socket-errors`` 178 | Toggle whether premature client disconnect tracebacks ought to be logged. 179 | On by default. 180 | 181 | ``--max-request-header-size=INT`` 182 | Maximum size of all request headers combined. Default is 262144 (256KB). 183 | 184 | ``--max-request-body-size=INT`` 185 | Maximum size of request body. Default is 1073741824 (1GB). 186 | 187 | ``--[no-]expose-tracebacks`` 188 | Toggle whether to expose tracebacks of unhandled exceptions to the client. 189 | Off by default. 190 | 191 | ``--asyncore-loop-timeout=INT`` 192 | The timeout value in seconds passed to ``asyncore.loop()``. Default is 1. 193 | 194 | ``--asyncore-use-poll`` 195 | The use_poll argument passed to ``asyncore.loop()``. Helps overcome open 196 | file descriptors limit. Default is False. 197 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # waitress documentation build configuration file 4 | # 5 | # This file is execfile()d with the current directory set to its containing 6 | # dir. 7 | # 8 | # The contents of this file are pickled, so don't put values in the 9 | # namespace that aren't pickleable (module imports are okay, they're 10 | # removed automatically). 11 | # 12 | # All configuration values have a default value; values that are commented 13 | # out serve to show the default value. 14 | 15 | # If your extensions are in another directory, add it here. If the 16 | # directory is relative to the documentation root, use os.path.abspath to 17 | # make it absolute, like shown here. 18 | # sys.path.append(os.path.abspath('some/directory')) 19 | 20 | import datetime 21 | import pkg_resources 22 | import pylons_sphinx_themes 23 | 24 | # General configuration 25 | # --------------------- 26 | 27 | # Add any Sphinx extension module names here, as strings. They can be 28 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 29 | extensions = [ 30 | "sphinx.ext.autodoc", 31 | "sphinx.ext.intersphinx", 32 | ] 33 | 34 | intersphinx_mapping = { 35 | "python": ("https://docs.python.org/3/", None), 36 | } 37 | 38 | # Add any paths that contain templates here, relative to this directory. 39 | templates_path = ["_templates"] 40 | 41 | # The suffix of source filenames. 42 | source_suffix = ".rst" 43 | 44 | # The master toctree document. 45 | master_doc = "index" 46 | 47 | # General substitutions. 48 | project = "waitress" 49 | thisyear = datetime.datetime.now().year 50 | copyright = "2012-%s, Agendaless Consulting " % thisyear 51 | 52 | # The default replacements for |version| and |release|, also used in various 53 | # other places throughout the built documents. 54 | # 55 | # The short X.Y version. 56 | version = pkg_resources.get_distribution("waitress").version 57 | # The full version, including alpha/beta/rc tags. 58 | release = version 59 | 60 | # There are two options for replacing |today|: either, you set today to 61 | # some non-false value, then it is used: 62 | # today = '' 63 | # Else, today_fmt is used as the format for a strftime call. 64 | today_fmt = "%B %d, %Y" 65 | 66 | # List of documents that shouldn't be included in the build. 67 | # unused_docs = [] 68 | 69 | # List of directories, relative to source directories, that shouldn't be 70 | # searched for source files. 71 | # exclude_dirs = [] 72 | exclude_patterns = [ 73 | "_themes/README.rst", 74 | ] 75 | 76 | # The reST default role (used for this markup: `text`) to use for all 77 | # documents. 78 | # default_role = None 79 | 80 | # If true, '()' will be appended to :func: etc. cross-reference text. 81 | # add_function_parentheses = True 82 | 83 | # If true, the current module name will be prepended to all description 84 | # unit titles (such as .. function::). 85 | # add_module_names = True 86 | add_module_names = False 87 | 88 | # If true, sectionauthor and moduleauthor directives will be shown in the 89 | # output. They are ignored by default. 90 | # show_authors = False 91 | 92 | # The name of the Pygments (syntax highlighting) style to use. 93 | pygments_style = "sphinx" 94 | 95 | # Do not use smart quotes. 96 | smartquotes = False 97 | 98 | 99 | # Options for HTML output 100 | # ----------------------- 101 | 102 | # Add and use Pylons theme 103 | html_theme = "pylons" 104 | html_theme_path = pylons_sphinx_themes.get_html_themes_path() 105 | html_theme_options = dict(github_url="https://github.com/Pylons/waitress") 106 | 107 | # The style sheet to use for HTML and HTML Help pages. A file of that name 108 | # must exist either in Sphinx' static/ path, or in one of the custom paths 109 | # given in html_static_path. 110 | # html_style = 'repoze.css' 111 | 112 | # The name for this set of Sphinx documents. If None, it defaults to 113 | # " v documentation". 114 | # html_title = None 115 | 116 | # A shorter title for the navigation bar. Default is the same as 117 | # html_title. 118 | # html_short_title = None 119 | 120 | # The name of an image file (within the static path) to place at the top of 121 | # the sidebar. 122 | # html_logo = '.static/logo_hi.gif' 123 | 124 | # The name of an image file (within the static path) to use as favicon of 125 | # the docs. This file should be a Windows icon file (.ico) being 16x16 or 126 | # 32x32 pixels large. 127 | # html_favicon = None 128 | 129 | # Add any paths that contain custom static files (such as style sheets) 130 | # here, relative to this directory. They are copied after the builtin 131 | # static files, so a file named "default.css" will overwrite the builtin 132 | # "default.css". 133 | # html_static_path = ['.static'] 134 | 135 | # If not '', a 'Last updated on:' timestamp is inserted at every page 136 | # bottom, using the given strftime format. 137 | html_last_updated_fmt = "%b %d, %Y" 138 | 139 | # If true, SmartyPants will be used to convert quotes and dashes to 140 | # typographically correct entities. 141 | # html_use_smartypants = True 142 | 143 | # Custom sidebar templates, maps document names to template names. 144 | # html_sidebars = {} 145 | 146 | # Additional templates that should be rendered to pages, maps page names to 147 | # template names. 148 | # html_additional_pages = {} 149 | 150 | # If false, no module index is generated. 151 | # html_use_modindex = True 152 | 153 | # If false, no index is generated. 154 | # html_use_index = True 155 | 156 | # If true, the index is split into individual pages for each letter. 157 | # html_split_index = False 158 | 159 | # If true, the reST sources are included in the HTML build as 160 | # _sources/. 161 | # html_copy_source = True 162 | 163 | # If true, an OpenSearch description file will be output, and all pages 164 | # will contain a tag referring to it. The value of this option must 165 | # be the base URL from which the finished HTML is served. 166 | # html_use_opensearch = '' 167 | 168 | # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). 169 | # html_file_suffix = '' 170 | 171 | # Output file base name for HTML help builder. 172 | htmlhelp_basename = "waitress" 173 | 174 | # Control display of sidebars 175 | html_sidebars = { 176 | "**": [ 177 | "localtoc.html", 178 | "ethicalads.html", 179 | "relations.html", 180 | "sourcelink.html", 181 | "searchbox.html", 182 | ] 183 | } 184 | 185 | # Options for LaTeX output 186 | # ------------------------ 187 | 188 | # The paper size ('letter' or 'a4'). 189 | # latex_paper_size = 'letter' 190 | 191 | # The font size ('10pt', '11pt' or '12pt'). 192 | # latex_font_size = '10pt' 193 | 194 | # Grouping the document tree into LaTeX files. List of tuples 195 | # (source start file, target name, title, 196 | # author, document class [howto/manual]). 197 | latex_documents = [ 198 | ( 199 | "index", 200 | "waitress.tex", 201 | "waitress Documentation", 202 | "Pylons Project Developers", 203 | "manual", 204 | ), 205 | ] 206 | 207 | # The name of an image file (relative to this directory) to place at the 208 | # top of the title page. 209 | # latex_logo = '.static/logo_hi.gif' 210 | 211 | # For "manual" documents, if this is true, then toplevel headings are 212 | # parts, not chapters. 213 | # latex_use_parts = False 214 | 215 | # Additional stuff for the LaTeX preamble. 216 | # latex_preamble = '' 217 | 218 | # Documents to append as an appendix to all manuals. 219 | # latex_appendices = [] 220 | 221 | # If false, no module index is generated. 222 | # latex_use_modindex = True 223 | -------------------------------------------------------------------------------- /src/waitress/utilities.py: -------------------------------------------------------------------------------- 1 | ############################################################################## 2 | # 3 | # Copyright (c) 2004 Zope Foundation and Contributors. 4 | # All Rights Reserved. 5 | # 6 | # This software is subject to the provisions of the Zope Public License, 7 | # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. 8 | # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED 9 | # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 10 | # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS 11 | # FOR A PARTICULAR PURPOSE. 12 | # 13 | ############################################################################## 14 | """Utility functions 15 | """ 16 | 17 | import calendar 18 | import errno 19 | import logging 20 | import os 21 | import re 22 | import stat 23 | import time 24 | 25 | from .rfc7230 import QUOTED_PAIR_RE, QUOTED_STRING_RE 26 | 27 | logger = logging.getLogger("waitress") 28 | queue_logger = logging.getLogger("waitress.queue") 29 | 30 | 31 | def find_double_newline(s): 32 | """Returns the position just after a double newline in the given string.""" 33 | pos = s.find(b"\r\n\r\n") 34 | 35 | if pos >= 0: 36 | pos += 4 37 | 38 | return pos 39 | 40 | 41 | def concat(*args): 42 | return "".join(args) 43 | 44 | 45 | def join(seq, field=" "): 46 | return field.join(seq) 47 | 48 | 49 | def group(s): 50 | return "(" + s + ")" 51 | 52 | 53 | short_days = ["sun", "mon", "tue", "wed", "thu", "fri", "sat"] 54 | long_days = [ 55 | "sunday", 56 | "monday", 57 | "tuesday", 58 | "wednesday", 59 | "thursday", 60 | "friday", 61 | "saturday", 62 | ] 63 | 64 | short_day_reg = group(join(short_days, "|")) 65 | long_day_reg = group(join(long_days, "|")) 66 | 67 | daymap = {} 68 | 69 | for i in range(7): 70 | daymap[short_days[i]] = i 71 | daymap[long_days[i]] = i 72 | 73 | hms_reg = join(3 * [group("[0-9][0-9]")], ":") 74 | 75 | months = [ 76 | "jan", 77 | "feb", 78 | "mar", 79 | "apr", 80 | "may", 81 | "jun", 82 | "jul", 83 | "aug", 84 | "sep", 85 | "oct", 86 | "nov", 87 | "dec", 88 | ] 89 | 90 | monmap = {} 91 | 92 | for i in range(12): 93 | monmap[months[i]] = i + 1 94 | 95 | months_reg = group(join(months, "|")) 96 | 97 | # From draft-ietf-http-v11-spec-07.txt/3.3.1 98 | # Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 99 | # Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036 100 | # Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format 101 | 102 | # rfc822 format 103 | rfc822_date = join( 104 | [ 105 | concat(short_day_reg, ","), # day 106 | group("[0-9][0-9]?"), # date 107 | months_reg, # month 108 | group("[0-9]+"), # year 109 | hms_reg, # hour minute second 110 | "gmt", 111 | ], 112 | " ", 113 | ) 114 | 115 | rfc822_reg = re.compile(rfc822_date) 116 | 117 | 118 | def unpack_rfc822(m): 119 | g = m.group 120 | 121 | return ( 122 | int(g(4)), # year 123 | monmap[g(3)], # month 124 | int(g(2)), # day 125 | int(g(5)), # hour 126 | int(g(6)), # minute 127 | int(g(7)), # second 128 | 0, 129 | 0, 130 | 0, 131 | ) 132 | 133 | 134 | # rfc850 format 135 | rfc850_date = join( 136 | [ 137 | concat(long_day_reg, ","), 138 | join([group("[0-9][0-9]?"), months_reg, group("[0-9]+")], "-"), 139 | hms_reg, 140 | "gmt", 141 | ], 142 | " ", 143 | ) 144 | 145 | rfc850_reg = re.compile(rfc850_date) 146 | # they actually unpack the same way 147 | def unpack_rfc850(m): 148 | g = m.group 149 | yr = g(4) 150 | 151 | if len(yr) == 2: 152 | yr = "19" + yr 153 | 154 | return ( 155 | int(yr), # year 156 | monmap[g(3)], # month 157 | int(g(2)), # day 158 | int(g(5)), # hour 159 | int(g(6)), # minute 160 | int(g(7)), # second 161 | 0, 162 | 0, 163 | 0, 164 | ) 165 | 166 | 167 | # parsdate.parsedate - ~700/sec. 168 | # parse_http_date - ~1333/sec. 169 | 170 | weekdayname = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] 171 | monthname = [ 172 | None, 173 | "Jan", 174 | "Feb", 175 | "Mar", 176 | "Apr", 177 | "May", 178 | "Jun", 179 | "Jul", 180 | "Aug", 181 | "Sep", 182 | "Oct", 183 | "Nov", 184 | "Dec", 185 | ] 186 | 187 | 188 | def build_http_date(when): 189 | year, month, day, hh, mm, ss, wd, y, z = time.gmtime(when) 190 | 191 | return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( 192 | weekdayname[wd], 193 | day, 194 | monthname[month], 195 | year, 196 | hh, 197 | mm, 198 | ss, 199 | ) 200 | 201 | 202 | def parse_http_date(d): 203 | d = d.lower() 204 | m = rfc850_reg.match(d) 205 | 206 | if m and m.end() == len(d): 207 | retval = int(calendar.timegm(unpack_rfc850(m))) 208 | else: 209 | m = rfc822_reg.match(d) 210 | 211 | if m and m.end() == len(d): 212 | retval = int(calendar.timegm(unpack_rfc822(m))) 213 | else: 214 | return 0 215 | 216 | return retval 217 | 218 | 219 | def undquote(value): 220 | if value.startswith('"') and value.endswith('"'): 221 | # So it claims to be DQUOTE'ed, let's validate that 222 | matches = QUOTED_STRING_RE.match(value) 223 | 224 | if matches and matches.end() == len(value): 225 | # Remove the DQUOTE's from the value 226 | value = value[1:-1] 227 | 228 | # Remove all backslashes that are followed by a valid vchar or 229 | # obs-text 230 | value = QUOTED_PAIR_RE.sub(r"\1", value) 231 | 232 | return value 233 | elif not value.startswith('"') and not value.endswith('"'): 234 | return value 235 | 236 | raise ValueError("Invalid quoting in value") 237 | 238 | 239 | def cleanup_unix_socket(path): 240 | try: 241 | st = os.stat(path) 242 | except OSError as exc: 243 | if exc.errno != errno.ENOENT: 244 | raise # pragma: no cover 245 | else: 246 | if stat.S_ISSOCK(st.st_mode): 247 | try: 248 | os.remove(path) 249 | except OSError: # pragma: no cover 250 | # avoid race condition error during tests 251 | pass 252 | 253 | 254 | class Error: 255 | code = 500 256 | reason = "Internal Server Error" 257 | 258 | def __init__(self, body): 259 | self.body = body 260 | 261 | def to_response(self): 262 | status = f"{self.code} {self.reason}" 263 | body = f"{self.reason}\r\n\r\n{self.body}" 264 | tag = "\r\n\r\n(generated by waitress)" 265 | body = (body + tag).encode("utf-8") 266 | headers = [("Content-Type", "text/plain; charset=utf-8")] 267 | 268 | return status, headers, body 269 | 270 | def wsgi_response(self, environ, start_response): 271 | status, headers, body = self.to_response() 272 | start_response(status, headers) 273 | yield body 274 | 275 | 276 | class BadRequest(Error): 277 | code = 400 278 | reason = "Bad Request" 279 | 280 | 281 | class RequestHeaderFieldsTooLarge(BadRequest): 282 | code = 431 283 | reason = "Request Header Fields Too Large" 284 | 285 | 286 | class RequestEntityTooLarge(BadRequest): 287 | code = 413 288 | reason = "Request Entity Too Large" 289 | 290 | 291 | class InternalServerError(Error): 292 | code = 500 293 | reason = "Internal Server Error" 294 | 295 | 296 | class ServerNotImplemented(Error): 297 | code = 501 298 | reason = "Not Implemented" 299 | -------------------------------------------------------------------------------- /src/waitress/trigger.py: -------------------------------------------------------------------------------- 1 | ############################################################################## 2 | # 3 | # Copyright (c) 2001-2005 Zope Foundation and Contributors. 4 | # All Rights Reserved. 5 | # 6 | # This software is subject to the provisions of the Zope Public License, 7 | # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. 8 | # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED 9 | # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 10 | # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS 11 | # FOR A PARTICULAR PURPOSE 12 | # 13 | ############################################################################## 14 | 15 | import errno 16 | import os 17 | import socket 18 | import threading 19 | 20 | from . import wasyncore 21 | 22 | # Wake up a call to select() running in the main thread. 23 | # 24 | # This is useful in a context where you are using Medusa's I/O 25 | # subsystem to deliver data, but the data is generated by another 26 | # thread. Normally, if Medusa is in the middle of a call to 27 | # select(), new output data generated by another thread will have 28 | # to sit until the call to select() either times out or returns. 29 | # If the trigger is 'pulled' by another thread, it should immediately 30 | # generate a READ event on the trigger object, which will force the 31 | # select() invocation to return. 32 | # 33 | # A common use for this facility: letting Medusa manage I/O for a 34 | # large number of connections; but routing each request through a 35 | # thread chosen from a fixed-size thread pool. When a thread is 36 | # acquired, a transaction is performed, but output data is 37 | # accumulated into buffers that will be emptied more efficiently 38 | # by Medusa. [picture a server that can process database queries 39 | # rapidly, but doesn't want to tie up threads waiting to send data 40 | # to low-bandwidth connections] 41 | # 42 | # The other major feature provided by this class is the ability to 43 | # move work back into the main thread: if you call pull_trigger() 44 | # with a thunk argument, when select() wakes up and receives the 45 | # event it will call your thunk from within that thread. The main 46 | # purpose of this is to remove the need to wrap thread locks around 47 | # Medusa's data structures, which normally do not need them. [To see 48 | # why this is true, imagine this scenario: A thread tries to push some 49 | # new data onto a channel's outgoing data queue at the same time that 50 | # the main thread is trying to remove some] 51 | 52 | 53 | class _triggerbase: 54 | """OS-independent base class for OS-dependent trigger class.""" 55 | 56 | kind = None # subclass must set to "pipe" or "loopback"; used by repr 57 | 58 | def __init__(self): 59 | self._closed = False 60 | 61 | # `lock` protects the `thunks` list from being traversed and 62 | # appended to simultaneously. 63 | self.lock = threading.Lock() 64 | 65 | # List of no-argument callbacks to invoke when the trigger is 66 | # pulled. These run in the thread running the wasyncore mainloop, 67 | # regardless of which thread pulls the trigger. 68 | self.thunks = [] 69 | 70 | def readable(self): 71 | return True 72 | 73 | def writable(self): 74 | return False 75 | 76 | def handle_connect(self): 77 | pass 78 | 79 | def handle_close(self): 80 | self.close() 81 | 82 | # Override the wasyncore close() method, because it doesn't know about 83 | # (so can't close) all the gimmicks we have open. Subclass must 84 | # supply a _close() method to do platform-specific closing work. _close() 85 | # will be called iff we're not already closed. 86 | def close(self): 87 | if not self._closed: 88 | self._closed = True 89 | self.del_channel() 90 | self._close() # subclass does OS-specific stuff 91 | 92 | def pull_trigger(self, thunk=None): 93 | if thunk: 94 | with self.lock: 95 | self.thunks.append(thunk) 96 | self._physical_pull() 97 | 98 | def handle_read(self): 99 | try: 100 | self.recv(8192) 101 | except OSError: 102 | return 103 | with self.lock: 104 | for thunk in self.thunks: 105 | try: 106 | thunk() 107 | except: 108 | nil, t, v, tbinfo = wasyncore.compact_traceback() 109 | self.log_info(f"exception in trigger thunk: ({t}:{v} {tbinfo})") 110 | self.thunks = [] 111 | 112 | 113 | if os.name == "posix": 114 | 115 | class trigger(_triggerbase, wasyncore.file_dispatcher): 116 | kind = "pipe" 117 | 118 | def __init__(self, map): 119 | _triggerbase.__init__(self) 120 | r, self.trigger = self._fds = os.pipe() 121 | wasyncore.file_dispatcher.__init__(self, r, map=map) 122 | 123 | def _close(self): 124 | for fd in self._fds: 125 | os.close(fd) 126 | self._fds = [] 127 | wasyncore.file_dispatcher.close(self) 128 | 129 | def _physical_pull(self): 130 | os.write(self.trigger, b"x") 131 | 132 | else: # pragma: no cover 133 | # Windows version; uses just sockets, because a pipe isn't select'able 134 | # on Windows. 135 | 136 | class trigger(_triggerbase, wasyncore.dispatcher): 137 | kind = "loopback" 138 | 139 | def __init__(self, map): 140 | _triggerbase.__init__(self) 141 | 142 | # Get a pair of connected sockets. The trigger is the 'w' 143 | # end of the pair, which is connected to 'r'. 'r' is put 144 | # in the wasyncore socket map. "pulling the trigger" then 145 | # means writing something on w, which will wake up r. 146 | 147 | w = socket.socket() 148 | # Disable buffering -- pulling the trigger sends 1 byte, 149 | # and we want that sent immediately, to wake up wasyncore's 150 | # select() ASAP. 151 | w.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) 152 | 153 | count = 0 154 | while True: 155 | count += 1 156 | # Bind to a local port; for efficiency, let the OS pick 157 | # a free port for us. 158 | # Unfortunately, stress tests showed that we may not 159 | # be able to connect to that port ("Address already in 160 | # use") despite that the OS picked it. This appears 161 | # to be a race bug in the Windows socket implementation. 162 | # So we loop until a connect() succeeds (almost always 163 | # on the first try). See the long thread at 164 | # http://mail.zope.org/pipermail/zope/2005-July/160433.html 165 | # for hideous details. 166 | a = socket.socket() 167 | a.bind(("127.0.0.1", 0)) 168 | connect_address = a.getsockname() # assigned (host, port) pair 169 | a.listen(1) 170 | try: 171 | w.connect(connect_address) 172 | break # success 173 | except OSError as detail: 174 | if getattr(detail, "winerror", None) != errno.WSAEADDRINUSE: 175 | # "Address already in use" is the only error 176 | # I've seen on two WinXP Pro SP2 boxes, under 177 | # Pythons 2.3.5 and 2.4.1. 178 | raise 179 | # (10048, 'Address already in use') 180 | # assert count <= 2 # never triggered in Tim's tests 181 | if count >= 10: # I've never seen it go above 2 182 | a.close() 183 | w.close() 184 | raise RuntimeError("Cannot bind trigger!") 185 | # Close `a` and try again. Note: I originally put a short 186 | # sleep() here, but it didn't appear to help or hurt. 187 | a.close() 188 | 189 | r, addr = a.accept() # r becomes wasyncore's (self.)socket 190 | a.close() 191 | self.trigger = w 192 | wasyncore.dispatcher.__init__(self, r, map=map) 193 | 194 | def _close(self): 195 | # self.socket is r, and self.trigger is w, from __init__ 196 | self.socket.close() 197 | self.trigger.close() 198 | 199 | def _physical_pull(self): 200 | self.trigger.send(b"x") 201 | -------------------------------------------------------------------------------- /src/waitress/runner.py: -------------------------------------------------------------------------------- 1 | ############################################################################## 2 | # 3 | # Copyright (c) 2013 Zope Foundation and Contributors. 4 | # All Rights Reserved. 5 | # 6 | # This software is subject to the provisions of the Zope Public License, 7 | # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. 8 | # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED 9 | # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 10 | # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS 11 | # FOR A PARTICULAR PURPOSE. 12 | # 13 | ############################################################################## 14 | """Command line runner. 15 | """ 16 | 17 | 18 | import getopt 19 | import logging 20 | import os 21 | import os.path 22 | import re 23 | import sys 24 | 25 | from waitress import serve 26 | from waitress.adjustments import Adjustments 27 | from waitress.utilities import logger 28 | 29 | HELP = """\ 30 | Usage: 31 | 32 | {0} [OPTS] MODULE:OBJECT 33 | 34 | Standard options: 35 | 36 | --help 37 | Show this information. 38 | 39 | --call 40 | Call the given object to get the WSGI application. 41 | 42 | --host=ADDR 43 | Hostname or IP address on which to listen, default is '0.0.0.0', 44 | which means "all IP addresses on this host". 45 | 46 | Note: May not be used together with --listen 47 | 48 | --port=PORT 49 | TCP port on which to listen, default is '8080' 50 | 51 | Note: May not be used together with --listen 52 | 53 | --listen=ip:port 54 | Tell waitress to listen on an ip port combination. 55 | 56 | Example: 57 | 58 | --listen=127.0.0.1:8080 59 | --listen=[::1]:8080 60 | --listen=*:8080 61 | 62 | This option may be used multiple times to listen on multiple sockets. 63 | A wildcard for the hostname is also supported and will bind to both 64 | IPv4/IPv6 depending on whether they are enabled or disabled. 65 | 66 | --[no-]ipv4 67 | Toggle on/off IPv4 support. 68 | 69 | Example: 70 | 71 | --no-ipv4 72 | 73 | This will disable IPv4 socket support. This affects wildcard matching 74 | when generating the list of sockets. 75 | 76 | --[no-]ipv6 77 | Toggle on/off IPv6 support. 78 | 79 | Example: 80 | 81 | --no-ipv6 82 | 83 | This will turn on IPv6 socket support. This affects wildcard matching 84 | when generating a list of sockets. 85 | 86 | --unix-socket=PATH 87 | Path of Unix socket. If a socket path is specified, a Unix domain 88 | socket is made instead of the usual inet domain socket. 89 | 90 | Not available on Windows. 91 | 92 | --unix-socket-perms=PERMS 93 | Octal permissions to use for the Unix domain socket, default is 94 | '600'. 95 | 96 | --url-scheme=STR 97 | Default wsgi.url_scheme value, default is 'http'. 98 | 99 | --url-prefix=STR 100 | The ``SCRIPT_NAME`` WSGI environment value. Setting this to anything 101 | except the empty string will cause the WSGI ``SCRIPT_NAME`` value to be 102 | the value passed minus any trailing slashes you add, and it will cause 103 | the ``PATH_INFO`` of any request which is prefixed with this value to 104 | be stripped of the prefix. Default is the empty string. 105 | 106 | --ident=STR 107 | Server identity used in the 'Server' header in responses. Default 108 | is 'waitress'. 109 | 110 | Tuning options: 111 | 112 | --threads=INT 113 | Number of threads used to process application logic, default is 4. 114 | 115 | --backlog=INT 116 | Connection backlog for the server. Default is 1024. 117 | 118 | --recv-bytes=INT 119 | Number of bytes to request when calling socket.recv(). Default is 120 | 8192. 121 | 122 | --send-bytes=INT 123 | Number of bytes to send to socket.send(). Default is 18000. 124 | Multiples of 9000 should avoid partly-filled TCP packets. 125 | 126 | --outbuf-overflow=INT 127 | A temporary file should be created if the pending output is larger 128 | than this. Default is 1048576 (1MB). 129 | 130 | --outbuf-high-watermark=INT 131 | The app_iter will pause when pending output is larger than this value 132 | and will resume once enough data is written to the socket to fall below 133 | this threshold. Default is 16777216 (16MB). 134 | 135 | --inbuf-overflow=INT 136 | A temporary file should be created if the pending input is larger 137 | than this. Default is 524288 (512KB). 138 | 139 | --connection-limit=INT 140 | Stop creating new channels if too many are already active. 141 | Default is 100. 142 | 143 | --cleanup-interval=INT 144 | Minimum seconds between cleaning up inactive channels. Default 145 | is 30. See '--channel-timeout'. 146 | 147 | --channel-timeout=INT 148 | Maximum number of seconds to leave inactive connections open. 149 | Default is 120. 'Inactive' is defined as 'has received no data 150 | from the client and has sent no data to the client'. 151 | 152 | --[no-]log-socket-errors 153 | Toggle whether premature client disconnect tracebacks ought to be 154 | logged. On by default. 155 | 156 | --max-request-header-size=INT 157 | Maximum size of all request headers combined. Default is 262144 158 | (256KB). 159 | 160 | --max-request-body-size=INT 161 | Maximum size of request body. Default is 1073741824 (1GB). 162 | 163 | --[no-]expose-tracebacks 164 | Toggle whether to expose tracebacks of unhandled exceptions to the 165 | client. Off by default. 166 | 167 | --asyncore-loop-timeout=INT 168 | The timeout value in seconds passed to asyncore.loop(). Default is 1. 169 | 170 | --asyncore-use-poll 171 | The use_poll argument passed to ``asyncore.loop()``. Helps overcome 172 | open file descriptors limit. Default is False. 173 | 174 | --channel-request-lookahead=INT 175 | Allows channels to stay readable and buffer more requests up to the 176 | given maximum even if a request is already being processed. This allows 177 | detecting if a client closed the connection while its request is being 178 | processed. Default is 0. 179 | 180 | """ 181 | 182 | RUNNER_PATTERN = re.compile( 183 | r""" 184 | ^ 185 | (?P 186 | [a-z_][a-z0-9_]*(?:\.[a-z_][a-z0-9_]*)* 187 | ) 188 | : 189 | (?P 190 | [a-z_][a-z0-9_]*(?:\.[a-z_][a-z0-9_]*)* 191 | ) 192 | $ 193 | """, 194 | re.I | re.X, 195 | ) 196 | 197 | 198 | def match(obj_name): 199 | matches = RUNNER_PATTERN.match(obj_name) 200 | if not matches: 201 | raise ValueError(f"Malformed application '{obj_name}'") 202 | return matches.group("module"), matches.group("object") 203 | 204 | 205 | def resolve(module_name, object_name): 206 | """Resolve a named object in a module.""" 207 | # We cast each segments due to an issue that has been found to manifest 208 | # in Python 2.6.6, but not 2.6.8, and may affect other revisions of Python 209 | # 2.6 and 2.7, whereby ``__import__`` chokes if the list passed in the 210 | # ``fromlist`` argument are unicode strings rather than 8-bit strings. 211 | # The error triggered is "TypeError: Item in ``fromlist '' not a string". 212 | # My guess is that this was fixed by checking against ``basestring`` 213 | # rather than ``str`` sometime between the release of 2.6.6 and 2.6.8, 214 | # but I've yet to go over the commits. I know, however, that the NEWS 215 | # file makes no mention of such a change to the behaviour of 216 | # ``__import__``. 217 | segments = [str(segment) for segment in object_name.split(".")] 218 | obj = __import__(module_name, fromlist=segments[:1]) 219 | for segment in segments: 220 | obj = getattr(obj, segment) 221 | return obj 222 | 223 | 224 | def show_help(stream, name, error=None): # pragma: no cover 225 | if error is not None: 226 | print(f"Error: {error}\n", file=stream) 227 | print(HELP.format(name), file=stream) 228 | 229 | 230 | def show_exception(stream): 231 | exc_type, exc_value = sys.exc_info()[:2] 232 | args = getattr(exc_value, "args", None) 233 | print( 234 | ("There was an exception ({}) importing your module.\n").format( 235 | exc_type.__name__, 236 | ), 237 | file=stream, 238 | ) 239 | if args: 240 | print("It had these arguments: ", file=stream) 241 | for idx, arg in enumerate(args, start=1): 242 | print(f"{idx}. {arg}\n", file=stream) 243 | else: 244 | print("It had no arguments.", file=stream) 245 | 246 | 247 | def run(argv=sys.argv, _serve=serve): 248 | """Command line runner.""" 249 | name = os.path.basename(argv[0]) 250 | 251 | try: 252 | kw, args = Adjustments.parse_args(argv[1:]) 253 | except getopt.GetoptError as exc: 254 | show_help(sys.stderr, name, str(exc)) 255 | return 1 256 | 257 | if kw["help"]: 258 | show_help(sys.stdout, name) 259 | return 0 260 | 261 | if len(args) != 1: 262 | show_help(sys.stderr, name, "Specify one application only") 263 | return 1 264 | 265 | # set a default level for the logger only if it hasn't been set explicitly 266 | # note that this level does not override any parent logger levels, 267 | # handlers, etc but without it no log messages are emitted by default 268 | if logger.level == logging.NOTSET: 269 | logger.setLevel(logging.INFO) 270 | 271 | try: 272 | module, obj_name = match(args[0]) 273 | except ValueError as exc: 274 | show_help(sys.stderr, name, str(exc)) 275 | show_exception(sys.stderr) 276 | return 1 277 | 278 | # Add the current directory onto sys.path 279 | sys.path.append(os.getcwd()) 280 | 281 | # Get the WSGI function. 282 | try: 283 | app = resolve(module, obj_name) 284 | except ImportError: 285 | show_help(sys.stderr, name, f"Bad module '{module}'") 286 | show_exception(sys.stderr) 287 | return 1 288 | except AttributeError: 289 | show_help(sys.stderr, name, f"Bad object name '{obj_name}'") 290 | show_exception(sys.stderr) 291 | return 1 292 | if kw["call"]: 293 | app = app() 294 | 295 | # These arguments are specific to the runner, not waitress itself. 296 | del kw["call"], kw["help"] 297 | 298 | _serve(app, **kw) 299 | return 0 300 | -------------------------------------------------------------------------------- /tests/test_receiver.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import pytest 4 | 5 | 6 | class TestFixedStreamReceiver(unittest.TestCase): 7 | def _makeOne(self, cl, buf): 8 | from waitress.receiver import FixedStreamReceiver 9 | 10 | return FixedStreamReceiver(cl, buf) 11 | 12 | def test_received_remain_lt_1(self): 13 | buf = DummyBuffer() 14 | inst = self._makeOne(0, buf) 15 | result = inst.received("a") 16 | self.assertEqual(result, 0) 17 | self.assertEqual(inst.completed, True) 18 | 19 | def test_received_remain_lte_datalen(self): 20 | buf = DummyBuffer() 21 | inst = self._makeOne(1, buf) 22 | result = inst.received("aa") 23 | self.assertEqual(result, 1) 24 | self.assertEqual(inst.completed, True) 25 | self.assertEqual(inst.completed, 1) 26 | self.assertEqual(inst.remain, 0) 27 | self.assertEqual(buf.data, ["a"]) 28 | 29 | def test_received_remain_gt_datalen(self): 30 | buf = DummyBuffer() 31 | inst = self._makeOne(10, buf) 32 | result = inst.received("aa") 33 | self.assertEqual(result, 2) 34 | self.assertEqual(inst.completed, False) 35 | self.assertEqual(inst.remain, 8) 36 | self.assertEqual(buf.data, ["aa"]) 37 | 38 | def test_getfile(self): 39 | buf = DummyBuffer() 40 | inst = self._makeOne(10, buf) 41 | self.assertEqual(inst.getfile(), buf) 42 | 43 | def test_getbuf(self): 44 | buf = DummyBuffer() 45 | inst = self._makeOne(10, buf) 46 | self.assertEqual(inst.getbuf(), buf) 47 | 48 | def test___len__(self): 49 | buf = DummyBuffer(["1", "2"]) 50 | inst = self._makeOne(10, buf) 51 | self.assertEqual(inst.__len__(), 2) 52 | 53 | 54 | class TestChunkedReceiver(unittest.TestCase): 55 | def _makeOne(self, buf): 56 | from waitress.receiver import ChunkedReceiver 57 | 58 | return ChunkedReceiver(buf) 59 | 60 | def test_alreadycompleted(self): 61 | buf = DummyBuffer() 62 | inst = self._makeOne(buf) 63 | inst.completed = True 64 | result = inst.received(b"a") 65 | self.assertEqual(result, 0) 66 | self.assertEqual(inst.completed, True) 67 | 68 | def test_received_remain_gt_zero(self): 69 | buf = DummyBuffer() 70 | inst = self._makeOne(buf) 71 | inst.chunk_remainder = 100 72 | result = inst.received(b"a") 73 | self.assertEqual(inst.chunk_remainder, 99) 74 | self.assertEqual(result, 1) 75 | self.assertEqual(inst.completed, False) 76 | 77 | def test_received_control_line_notfinished(self): 78 | buf = DummyBuffer() 79 | inst = self._makeOne(buf) 80 | result = inst.received(b"a") 81 | self.assertEqual(inst.control_line, b"a") 82 | self.assertEqual(result, 1) 83 | self.assertEqual(inst.completed, False) 84 | 85 | def test_received_control_line_finished_garbage_in_input(self): 86 | buf = DummyBuffer() 87 | inst = self._makeOne(buf) 88 | result = inst.received(b"garbage\r\n") 89 | self.assertEqual(result, 9) 90 | self.assertTrue(inst.error) 91 | 92 | def test_received_control_line_finished_all_chunks_not_received(self): 93 | buf = DummyBuffer() 94 | inst = self._makeOne(buf) 95 | result = inst.received(b"a;discard\r\n") 96 | self.assertEqual(inst.control_line, b"") 97 | self.assertEqual(inst.chunk_remainder, 10) 98 | self.assertEqual(inst.all_chunks_received, False) 99 | self.assertEqual(result, 11) 100 | self.assertEqual(inst.completed, False) 101 | 102 | def test_received_control_line_finished_all_chunks_received(self): 103 | buf = DummyBuffer() 104 | inst = self._makeOne(buf) 105 | result = inst.received(b"0;discard\r\n") 106 | self.assertEqual(inst.control_line, b"") 107 | self.assertEqual(inst.all_chunks_received, True) 108 | self.assertEqual(result, 11) 109 | self.assertEqual(inst.completed, False) 110 | 111 | def test_received_trailer_startswith_crlf(self): 112 | buf = DummyBuffer() 113 | inst = self._makeOne(buf) 114 | inst.all_chunks_received = True 115 | result = inst.received(b"\r\n") 116 | self.assertEqual(result, 2) 117 | self.assertEqual(inst.completed, True) 118 | 119 | def test_received_trailer_startswith_lf(self): 120 | buf = DummyBuffer() 121 | inst = self._makeOne(buf) 122 | inst.all_chunks_received = True 123 | result = inst.received(b"\n") 124 | self.assertEqual(result, 1) 125 | self.assertEqual(inst.completed, False) 126 | 127 | def test_received_trailer_not_finished(self): 128 | buf = DummyBuffer() 129 | inst = self._makeOne(buf) 130 | inst.all_chunks_received = True 131 | result = inst.received(b"a") 132 | self.assertEqual(result, 1) 133 | self.assertEqual(inst.completed, False) 134 | 135 | def test_received_trailer_finished(self): 136 | buf = DummyBuffer() 137 | inst = self._makeOne(buf) 138 | inst.all_chunks_received = True 139 | result = inst.received(b"abc\r\n\r\n") 140 | self.assertEqual(inst.trailer, b"abc\r\n\r\n") 141 | self.assertEqual(result, 7) 142 | self.assertEqual(inst.completed, True) 143 | 144 | def test_getfile(self): 145 | buf = DummyBuffer() 146 | inst = self._makeOne(buf) 147 | self.assertEqual(inst.getfile(), buf) 148 | 149 | def test_getbuf(self): 150 | buf = DummyBuffer() 151 | inst = self._makeOne(buf) 152 | self.assertEqual(inst.getbuf(), buf) 153 | 154 | def test___len__(self): 155 | buf = DummyBuffer(["1", "2"]) 156 | inst = self._makeOne(buf) 157 | self.assertEqual(inst.__len__(), 2) 158 | 159 | def test_received_chunk_is_properly_terminated(self): 160 | buf = DummyBuffer() 161 | inst = self._makeOne(buf) 162 | data = b"4\r\nWiki\r\n" 163 | result = inst.received(data) 164 | self.assertEqual(result, len(data)) 165 | self.assertEqual(inst.completed, False) 166 | self.assertEqual(buf.data[0], b"Wiki") 167 | 168 | def test_received_chunk_not_properly_terminated(self): 169 | from waitress.utilities import BadRequest 170 | 171 | buf = DummyBuffer() 172 | inst = self._makeOne(buf) 173 | data = b"4\r\nWikibadchunk\r\n" 174 | result = inst.received(data) 175 | self.assertEqual(result, len(data)) 176 | self.assertEqual(inst.completed, False) 177 | self.assertEqual(buf.data[0], b"Wiki") 178 | self.assertEqual(inst.error.__class__, BadRequest) 179 | 180 | def test_received_multiple_chunks(self): 181 | from waitress.utilities import BadRequest 182 | 183 | buf = DummyBuffer() 184 | inst = self._makeOne(buf) 185 | data = ( 186 | b"4\r\n" 187 | b"Wiki\r\n" 188 | b"5\r\n" 189 | b"pedia\r\n" 190 | b"E\r\n" 191 | b" in\r\n" 192 | b"\r\n" 193 | b"chunks.\r\n" 194 | b"0\r\n" 195 | b"\r\n" 196 | ) 197 | result = inst.received(data) 198 | self.assertEqual(result, len(data)) 199 | self.assertEqual(inst.completed, True) 200 | self.assertEqual(b"".join(buf.data), b"Wikipedia in\r\n\r\nchunks.") 201 | self.assertEqual(inst.error, None) 202 | 203 | def test_received_multiple_chunks_split(self): 204 | from waitress.utilities import BadRequest 205 | 206 | buf = DummyBuffer() 207 | inst = self._makeOne(buf) 208 | data1 = b"4\r\nWiki\r" 209 | result = inst.received(data1) 210 | self.assertEqual(result, len(data1)) 211 | 212 | data2 = ( 213 | b"\n5\r\n" 214 | b"pedia\r\n" 215 | b"E\r\n" 216 | b" in\r\n" 217 | b"\r\n" 218 | b"chunks.\r\n" 219 | b"0\r\n" 220 | b"\r\n" 221 | ) 222 | 223 | result = inst.received(data2) 224 | self.assertEqual(result, len(data2)) 225 | 226 | self.assertEqual(inst.completed, True) 227 | self.assertEqual(b"".join(buf.data), b"Wikipedia in\r\n\r\nchunks.") 228 | self.assertEqual(inst.error, None) 229 | 230 | 231 | class TestChunkedReceiverParametrized: 232 | def _makeOne(self, buf): 233 | from waitress.receiver import ChunkedReceiver 234 | 235 | return ChunkedReceiver(buf) 236 | 237 | @pytest.mark.parametrize( 238 | "invalid_extension", [b"\n", b"invalid=", b"\r", b"invalid = true"] 239 | ) 240 | def test_received_invalid_extensions(self, invalid_extension): 241 | from waitress.utilities import BadRequest 242 | 243 | buf = DummyBuffer() 244 | inst = self._makeOne(buf) 245 | data = b"4;" + invalid_extension + b"\r\ntest\r\n" 246 | result = inst.received(data) 247 | assert result == len(data) 248 | assert inst.error.__class__ == BadRequest 249 | assert inst.error.body == "Invalid chunk extension" 250 | 251 | @pytest.mark.parametrize( 252 | "valid_extension", [b"test", b"valid=true", b"valid=true;other=true"] 253 | ) 254 | def test_received_valid_extensions(self, valid_extension): 255 | # While waitress may ignore extensions in Chunked Encoding, we do want 256 | # to make sure that we don't fail when we do encounter one that is 257 | # valid 258 | buf = DummyBuffer() 259 | inst = self._makeOne(buf) 260 | data = b"4;" + valid_extension + b"\r\ntest\r\n" 261 | result = inst.received(data) 262 | assert result == len(data) 263 | assert inst.error == None 264 | 265 | @pytest.mark.parametrize( 266 | "invalid_size", [b"0x04", b"+0x04", b"x04", b"+04", b" 04", b" 0x04"] 267 | ) 268 | def test_received_invalid_size(self, invalid_size): 269 | from waitress.utilities import BadRequest 270 | 271 | buf = DummyBuffer() 272 | inst = self._makeOne(buf) 273 | data = invalid_size + b"\r\ntest\r\n" 274 | result = inst.received(data) 275 | assert result == len(data) 276 | assert inst.error.__class__ == BadRequest 277 | assert inst.error.body == "Invalid chunk size" 278 | 279 | 280 | class DummyBuffer: 281 | def __init__(self, data=None): 282 | if data is None: 283 | data = [] 284 | self.data = data 285 | 286 | def append(self, s): 287 | self.data.append(s) 288 | 289 | def getfile(self): 290 | return self 291 | 292 | def __len__(self): 293 | return len(self.data) 294 | -------------------------------------------------------------------------------- /src/waitress/buffers.py: -------------------------------------------------------------------------------- 1 | ############################################################################## 2 | # 3 | # Copyright (c) 2001-2004 Zope Foundation and Contributors. 4 | # All Rights Reserved. 5 | # 6 | # This software is subject to the provisions of the Zope Public License, 7 | # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. 8 | # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED 9 | # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 10 | # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS 11 | # FOR A PARTICULAR PURPOSE. 12 | # 13 | ############################################################################## 14 | """Buffers 15 | """ 16 | from io import BytesIO 17 | 18 | # copy_bytes controls the size of temp. strings for shuffling data around. 19 | COPY_BYTES = 1 << 18 # 256K 20 | 21 | # The maximum number of bytes to buffer in a simple string. 22 | STRBUF_LIMIT = 8192 23 | 24 | 25 | class FileBasedBuffer: 26 | 27 | remain = 0 28 | 29 | def __init__(self, file, from_buffer=None): 30 | self.file = file 31 | if from_buffer is not None: 32 | from_file = from_buffer.getfile() 33 | read_pos = from_file.tell() 34 | from_file.seek(0) 35 | while True: 36 | data = from_file.read(COPY_BYTES) 37 | if not data: 38 | break 39 | file.write(data) 40 | self.remain = int(file.tell() - read_pos) 41 | from_file.seek(read_pos) 42 | file.seek(read_pos) 43 | 44 | def __len__(self): 45 | return self.remain 46 | 47 | def __nonzero__(self): 48 | return True 49 | 50 | __bool__ = __nonzero__ # py3 51 | 52 | def append(self, s): 53 | file = self.file 54 | read_pos = file.tell() 55 | file.seek(0, 2) 56 | file.write(s) 57 | file.seek(read_pos) 58 | self.remain = self.remain + len(s) 59 | 60 | def get(self, numbytes=-1, skip=False): 61 | file = self.file 62 | if not skip: 63 | read_pos = file.tell() 64 | if numbytes < 0: 65 | # Read all 66 | res = file.read() 67 | else: 68 | res = file.read(numbytes) 69 | if skip: 70 | self.remain -= len(res) 71 | else: 72 | file.seek(read_pos) 73 | return res 74 | 75 | def skip(self, numbytes, allow_prune=0): 76 | if self.remain < numbytes: 77 | raise ValueError( 78 | "Can't skip %d bytes in buffer of %d bytes" % (numbytes, self.remain) 79 | ) 80 | self.file.seek(numbytes, 1) 81 | self.remain = self.remain - numbytes 82 | 83 | def newfile(self): 84 | raise NotImplementedError() 85 | 86 | def prune(self): 87 | file = self.file 88 | if self.remain == 0: 89 | read_pos = file.tell() 90 | file.seek(0, 2) 91 | sz = file.tell() 92 | file.seek(read_pos) 93 | if sz == 0: 94 | # Nothing to prune. 95 | return 96 | nf = self.newfile() 97 | while True: 98 | data = file.read(COPY_BYTES) 99 | if not data: 100 | break 101 | nf.write(data) 102 | self.file = nf 103 | 104 | def getfile(self): 105 | return self.file 106 | 107 | def close(self): 108 | if hasattr(self.file, "close"): 109 | self.file.close() 110 | self.remain = 0 111 | 112 | 113 | class TempfileBasedBuffer(FileBasedBuffer): 114 | def __init__(self, from_buffer=None): 115 | FileBasedBuffer.__init__(self, self.newfile(), from_buffer) 116 | 117 | def newfile(self): 118 | from tempfile import TemporaryFile 119 | 120 | return TemporaryFile("w+b") 121 | 122 | 123 | class BytesIOBasedBuffer(FileBasedBuffer): 124 | def __init__(self, from_buffer=None): 125 | if from_buffer is not None: 126 | FileBasedBuffer.__init__(self, BytesIO(), from_buffer) 127 | else: 128 | # Shortcut. :-) 129 | self.file = BytesIO() 130 | 131 | def newfile(self): 132 | return BytesIO() 133 | 134 | 135 | def _is_seekable(fp): 136 | if hasattr(fp, "seekable"): 137 | return fp.seekable() 138 | return hasattr(fp, "seek") and hasattr(fp, "tell") 139 | 140 | 141 | class ReadOnlyFileBasedBuffer(FileBasedBuffer): 142 | # used as wsgi.file_wrapper 143 | 144 | def __init__(self, file, block_size=32768): 145 | self.file = file 146 | self.block_size = block_size # for __iter__ 147 | 148 | # This is for the benefit of anyone that is attempting to wrap this 149 | # wsgi.file_wrapper in a WSGI middleware and wants to seek, this is 150 | # useful for instance for support Range requests 151 | if _is_seekable(self.file): 152 | if hasattr(self.file, "seekable"): 153 | self.seekable = self.file.seekable 154 | 155 | self.seek = self.file.seek 156 | self.tell = self.file.tell 157 | 158 | def prepare(self, size=None): 159 | if _is_seekable(self.file): 160 | start_pos = self.file.tell() 161 | self.file.seek(0, 2) 162 | end_pos = self.file.tell() 163 | self.file.seek(start_pos) 164 | fsize = end_pos - start_pos 165 | if size is None: 166 | self.remain = fsize 167 | else: 168 | self.remain = min(fsize, size) 169 | return self.remain 170 | 171 | def get(self, numbytes=-1, skip=False): 172 | # never read more than self.remain (it can be user-specified) 173 | if numbytes == -1 or numbytes > self.remain: 174 | numbytes = self.remain 175 | file = self.file 176 | if not skip: 177 | read_pos = file.tell() 178 | res = file.read(numbytes) 179 | if skip: 180 | self.remain -= len(res) 181 | else: 182 | file.seek(read_pos) 183 | return res 184 | 185 | def __iter__(self): # called by task if self.filelike has no seek/tell 186 | return self 187 | 188 | def next(self): 189 | val = self.file.read(self.block_size) 190 | if not val: 191 | raise StopIteration 192 | return val 193 | 194 | __next__ = next # py3 195 | 196 | def append(self, s): 197 | raise NotImplementedError 198 | 199 | 200 | class OverflowableBuffer: 201 | """ 202 | This buffer implementation has four stages: 203 | - No data 204 | - Bytes-based buffer 205 | - BytesIO-based buffer 206 | - Temporary file storage 207 | The first two stages are fastest for simple transfers. 208 | """ 209 | 210 | overflowed = False 211 | buf = None 212 | strbuf = b"" # Bytes-based buffer. 213 | 214 | def __init__(self, overflow): 215 | # overflow is the maximum to be stored in a StringIO buffer. 216 | self.overflow = overflow 217 | 218 | def __len__(self): 219 | buf = self.buf 220 | if buf is not None: 221 | # use buf.__len__ rather than len(buf) FBO of not getting 222 | # OverflowError on Python 2 223 | return buf.__len__() 224 | else: 225 | return self.strbuf.__len__() 226 | 227 | def __nonzero__(self): 228 | # use self.__len__ rather than len(self) FBO of not getting 229 | # OverflowError on Python 2 230 | return self.__len__() > 0 231 | 232 | __bool__ = __nonzero__ # py3 233 | 234 | def _create_buffer(self): 235 | strbuf = self.strbuf 236 | if len(strbuf) >= self.overflow: 237 | self._set_large_buffer() 238 | else: 239 | self._set_small_buffer() 240 | buf = self.buf 241 | if strbuf: 242 | buf.append(self.strbuf) 243 | self.strbuf = b"" 244 | return buf 245 | 246 | def _set_small_buffer(self): 247 | oldbuf = self.buf 248 | self.buf = BytesIOBasedBuffer(oldbuf) 249 | 250 | # Attempt to close the old buffer 251 | if hasattr(oldbuf, "close"): 252 | oldbuf.close() 253 | 254 | self.overflowed = False 255 | 256 | def _set_large_buffer(self): 257 | oldbuf = self.buf 258 | self.buf = TempfileBasedBuffer(oldbuf) 259 | 260 | # Attempt to close the old buffer 261 | if hasattr(oldbuf, "close"): 262 | oldbuf.close() 263 | 264 | self.overflowed = True 265 | 266 | def append(self, s): 267 | buf = self.buf 268 | if buf is None: 269 | strbuf = self.strbuf 270 | if len(strbuf) + len(s) < STRBUF_LIMIT: 271 | self.strbuf = strbuf + s 272 | return 273 | buf = self._create_buffer() 274 | buf.append(s) 275 | # use buf.__len__ rather than len(buf) FBO of not getting 276 | # OverflowError on Python 2 277 | sz = buf.__len__() 278 | if not self.overflowed: 279 | if sz >= self.overflow: 280 | self._set_large_buffer() 281 | 282 | def get(self, numbytes=-1, skip=False): 283 | buf = self.buf 284 | if buf is None: 285 | strbuf = self.strbuf 286 | if not skip: 287 | return strbuf 288 | buf = self._create_buffer() 289 | return buf.get(numbytes, skip) 290 | 291 | def skip(self, numbytes, allow_prune=False): 292 | buf = self.buf 293 | if buf is None: 294 | if allow_prune and numbytes == len(self.strbuf): 295 | # We could slice instead of converting to 296 | # a buffer, but that would eat up memory in 297 | # large transfers. 298 | self.strbuf = b"" 299 | return 300 | buf = self._create_buffer() 301 | buf.skip(numbytes, allow_prune) 302 | 303 | def prune(self): 304 | """ 305 | A potentially expensive operation that removes all data 306 | already retrieved from the buffer. 307 | """ 308 | buf = self.buf 309 | if buf is None: 310 | self.strbuf = b"" 311 | return 312 | buf.prune() 313 | if self.overflowed: 314 | # use buf.__len__ rather than len(buf) FBO of not getting 315 | # OverflowError on Python 2 316 | sz = buf.__len__() 317 | if sz < self.overflow: 318 | # Revert to a faster buffer. 319 | self._set_small_buffer() 320 | 321 | def getfile(self): 322 | buf = self.buf 323 | if buf is None: 324 | buf = self._create_buffer() 325 | return buf.getfile() 326 | 327 | def close(self): 328 | buf = self.buf 329 | if buf is not None: 330 | buf.close() 331 | -------------------------------------------------------------------------------- /docs/arguments.rst: -------------------------------------------------------------------------------- 1 | .. _arguments: 2 | 3 | Arguments to ``waitress.serve`` 4 | ------------------------------- 5 | 6 | Here are the arguments you can pass to the ``waitress.serve`` function or use 7 | in :term:`PasteDeploy` configuration (interchangeably): 8 | 9 | host 10 | Hostname or IP address (string) on which to listen, default ``0.0.0.0``, 11 | which means "all IP addresses on this host". 12 | 13 | .. warning:: 14 | May not be used with ``listen`` 15 | 16 | port 17 | TCP port (integer) on which to listen, default ``8080`` 18 | 19 | .. warning:: 20 | May not be used with ``listen`` 21 | 22 | listen 23 | Tell waitress to listen on combinations of ``host:port`` arguments. 24 | Combinations should be a quoted, space-delimited list, as in the following examples. 25 | 26 | .. code-block:: python 27 | 28 | listen="127.0.0.1:8080 [::1]:8080" 29 | listen="*:8080 *:6543" 30 | 31 | A wildcard for the hostname is also supported and will bind to both 32 | IPv4/IPv6 depending on whether they are enabled or disabled. 33 | 34 | IPv6 IP addresses are supported by surrounding the IP address with brackets. 35 | 36 | .. versionadded:: 1.0 37 | 38 | server_name 39 | This is the value that will be placed in the WSGI environment as 40 | ``SERVER_NAME``, the only time that this value is used in the WSGI 41 | environment for a request is if the client sent a HTTP/1.0 request without 42 | a ``Host`` header set, and no other proxy headers. 43 | 44 | The default is value is ``waitress.invalid``, if your WSGI application is 45 | creating URL's that include this as the hostname and you are using a 46 | reverse proxy setup, you may want to validate that your reverse proxy is 47 | sending the appropriate headers. 48 | 49 | In most situations you will not need to set this value. 50 | 51 | Default: ``waitress.invalid`` 52 | 53 | .. versionadded:: 2.0 54 | 55 | ipv4 56 | Enable or disable IPv4 (boolean) 57 | 58 | ipv6 59 | Enable or disable IPv6 (boolean) 60 | 61 | unix_socket 62 | Path of Unix socket (string). If a socket path is specified, a Unix domain 63 | socket is made instead of the usual inet domain socket. 64 | 65 | Not available on Windows. 66 | 67 | Default: ``None`` 68 | 69 | unix_socket_perms 70 | Octal permissions to use for the Unix domain socket (string). 71 | Only used if ``unix_socket`` is not ``None``. 72 | 73 | Default: ``'600'`` 74 | 75 | sockets 76 | A list of sockets. The sockets can be either Internet or UNIX sockets and have 77 | to be bound. Internet and UNIX sockets cannot be mixed. 78 | If the socket list is not empty, waitress creates one server for each socket. 79 | 80 | Default: ``[]`` 81 | 82 | .. versionadded:: 1.1.1 83 | 84 | .. warning:: 85 | May not be used with ``listen``, ``host``, ``port`` or ``unix_socket`` 86 | 87 | threads 88 | The number of threads used to process application logic (integer). 89 | 90 | Default: ``4`` 91 | 92 | trusted_proxy 93 | IP address of a remote peer allowed to override various WSGI environment 94 | variables using proxy headers. 95 | 96 | For unix sockets, set this value to ``localhost`` instead of an IP address. 97 | 98 | Default: ``None`` 99 | 100 | trusted_proxy_count 101 | How many proxies we trust when chained. For example, 102 | 103 | ``X-Forwarded-For: 192.0.2.1, "[2001:db8::1]"`` 104 | 105 | or 106 | 107 | ``Forwarded: for=192.0.2.1, For="[2001:db8::1]"`` 108 | 109 | means there were (potentially), two proxies involved. If we know there is 110 | only 1 valid proxy, then that initial IP address "192.0.2.1" is not trusted 111 | and we completely ignore it. 112 | 113 | If there are two trusted proxies in the path, this value should be set to 114 | 2. If there are more proxies, this value should be set higher. 115 | 116 | Default: ``1`` 117 | 118 | .. versionadded:: 1.2.0 119 | 120 | trusted_proxy_headers 121 | Which of the proxy headers should we trust, this is a set where you 122 | either specify "forwarded" or one or more of "x-forwarded-host", "x-forwarded-for", 123 | "x-forwarded-proto", "x-forwarded-port", "x-forwarded-by". 124 | 125 | This list of trusted headers is used when ``trusted_proxy`` is set and will 126 | allow waitress to modify the WSGI environment using the values provided by 127 | the proxy. 128 | 129 | .. versionadded:: 1.2.0 130 | 131 | .. warning:: 132 | If ``trusted_proxy`` is set, the default is ``x-forwarded-proto`` to 133 | match older versions of Waitress. Users should explicitly opt-in by 134 | selecting the headers to be trusted as future versions of waitress will 135 | use an empty default. 136 | 137 | .. warning:: 138 | It is an error to set this value without setting ``trusted_proxy``. 139 | 140 | log_untrusted_proxy_headers 141 | Should waitress log warning messages about proxy headers that are being 142 | sent from upstream that are not trusted by ``trusted_proxy_headers`` but 143 | are being cleared due to ``clear_untrusted_proxy_headers``? 144 | 145 | This may be useful for debugging if you expect your upstream proxy server 146 | to only send specific headers. 147 | 148 | Default: ``False`` 149 | 150 | .. versionadded:: 1.2.0 151 | 152 | .. warning:: 153 | It is a no-op to set this value without also setting 154 | ``clear_untrusted_proxy_headers`` and ``trusted_proxy`` 155 | 156 | clear_untrusted_proxy_headers 157 | This tells Waitress to remove any untrusted proxy headers ("Forwarded", 158 | "X-Forwared-For", "X-Forwarded-By", "X-Forwarded-Host", "X-Forwarded-Port", 159 | "X-Forwarded-Proto") not explicitly allowed by ``trusted_proxy_headers``. 160 | 161 | Default: ``True`` 162 | 163 | .. versionchanged:: 3.0.0 164 | In this version default value is set to ``True`` and deprecation warning 165 | doesn't show up anymore. 166 | 167 | .. versionadded:: 1.2.0 168 | 169 | .. warning:: 170 | The default value is set to ``False`` for backwards compatibility. In 171 | future versions of Waitress this default will be changed to ``True``. 172 | Warnings will be raised unless the user explicitly provides a value for 173 | this option, allowing the user to opt-in to the new safety features 174 | automatically. 175 | 176 | .. warning:: 177 | It is an error to set this value without setting ``trusted_proxy``. 178 | 179 | url_scheme 180 | The value of ``wsgi.url_scheme`` in the environ. This can be 181 | overridden per-request by the value of the ``X_FORWARDED_PROTO`` header, 182 | but only if the client address matches ``trusted_proxy``. 183 | 184 | Default: ``http`` 185 | 186 | ident 187 | Server identity (string) used in "Server:" header in responses. 188 | 189 | Default: ``waitress`` 190 | 191 | backlog 192 | The value waitress passes to pass to ``socket.listen()`` (integer). 193 | This is the maximum number of incoming TCP 194 | connections that will wait in an OS queue for an available channel. From 195 | listen(1): "If a connection request arrives when the queue is full, the 196 | client may receive an error with an indication of ECONNREFUSED or, if the 197 | underlying protocol supports retransmission, the request may be ignored 198 | so that a later reattempt at connection succeeds." 199 | 200 | Default: ``1024`` 201 | 202 | recv_bytes 203 | The argument waitress passes to ``socket.recv()`` (integer). 204 | 205 | Default: ``8192`` 206 | 207 | send_bytes 208 | The number of bytes to send to ``socket.send()`` (integer). 209 | Multiples of 9000 should avoid partly-filled TCP 210 | packets, but don't set this larger than the TCP write buffer size. In 211 | Linux, ``/proc/sys/net/ipv4/tcp_wmem`` controls the minimum, default, and 212 | maximum sizes of TCP write buffers. 213 | 214 | Default: ``1`` 215 | 216 | .. deprecated:: 1.3 217 | 218 | outbuf_overflow 219 | A tempfile should be created if the pending output is larger than 220 | outbuf_overflow, which is measured in bytes. The default is conservative. 221 | 222 | Default: ``1048576`` (1MB) 223 | 224 | outbuf_high_watermark 225 | The app_iter will pause when pending output is larger than this value 226 | and will resume once enough data is written to the socket to fall below 227 | this threshold. 228 | 229 | Default: ``16777216`` (16MB) 230 | 231 | inbuf_overflow 232 | A tempfile should be created if the pending input is larger than 233 | inbuf_overflow, which is measured in bytes. The default is conservative. 234 | 235 | Default: ``524288`` (512K) 236 | 237 | connection_limit 238 | Stop creating new channels if too many are already active (integer). 239 | Each channel consumes at least one file descriptor, 240 | and, depending on the input and output body sizes, potentially up to 241 | three, plus whatever file descriptors your application logic happens to 242 | open. The default is conservative, but you may need to increase the 243 | number of file descriptors available to the Waitress process on most 244 | platforms in order to safely change it (see ``ulimit -a`` "open files" 245 | setting). Note that this doesn't control the maximum number of TCP 246 | connections that can be waiting for processing; the ``backlog`` argument 247 | controls that. 248 | 249 | Default: ``100`` 250 | 251 | cleanup_interval 252 | Minimum seconds between cleaning up inactive channels (integer). 253 | See also ``channel_timeout``. 254 | 255 | Default: ``30`` 256 | 257 | channel_timeout 258 | Maximum seconds to leave an inactive connection open (integer). 259 | "Inactive" is defined as "has received no data from a client 260 | and has sent no data to a client". 261 | 262 | Default: ``120`` 263 | 264 | log_socket_errors 265 | Set to ``False`` to not log premature client disconnect tracebacks. 266 | 267 | Default: ``True`` 268 | 269 | max_request_header_size 270 | Maximum number of bytes of all request headers combined (integer). 271 | 272 | Default: ``262144`` (256K) 273 | 274 | max_request_body_size 275 | Maximum number of bytes in request body (integer). 276 | 277 | Default: ``1073741824`` (1GB) 278 | 279 | expose_tracebacks 280 | Set to ``True`` to expose tracebacks of unhandled exceptions to client. 281 | 282 | Default: ``False`` 283 | 284 | asyncore_loop_timeout 285 | The ``timeout`` value (seconds) passed to ``asyncore.loop`` to run the mainloop. 286 | 287 | Default: ``1`` 288 | 289 | .. versionadded:: 0.8.3 290 | 291 | asyncore_use_poll 292 | Set to ``True`` to switch from using ``select()`` to ``poll()`` in ``asyncore.loop``. 293 | By default ``asyncore.loop()`` uses ``select()`` which has a limit of 1024 file descriptors. 294 | ``select()`` and ``poll()`` provide basically the same functionality, but ``poll()`` doesn't have the file descriptors limit. 295 | 296 | Default: ``False`` 297 | 298 | .. versionadded:: 0.8.6 299 | 300 | url_prefix 301 | String: the value used as the WSGI ``SCRIPT_NAME`` value. Setting this to 302 | anything except the empty string will cause the WSGI ``SCRIPT_NAME`` value 303 | to be the value passed minus any trailing slashes you add, and it will 304 | cause the ``PATH_INFO`` of any request which is prefixed with this value to 305 | be stripped of the prefix. 306 | 307 | Default: ``''`` 308 | -------------------------------------------------------------------------------- /src/waitress/proxy_headers.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | 3 | from .utilities import BadRequest, logger, undquote 4 | 5 | PROXY_HEADERS = frozenset( 6 | { 7 | "X_FORWARDED_FOR", 8 | "X_FORWARDED_HOST", 9 | "X_FORWARDED_PROTO", 10 | "X_FORWARDED_PORT", 11 | "X_FORWARDED_BY", 12 | "FORWARDED", 13 | } 14 | ) 15 | 16 | Forwarded = namedtuple("Forwarded", ["by", "for_", "host", "proto"]) 17 | 18 | 19 | class MalformedProxyHeader(Exception): 20 | def __init__(self, header, reason, value): 21 | self.header = header 22 | self.reason = reason 23 | self.value = value 24 | super().__init__(header, reason, value) 25 | 26 | 27 | def proxy_headers_middleware( 28 | app, 29 | trusted_proxy=None, 30 | trusted_proxy_count=1, 31 | trusted_proxy_headers=None, 32 | clear_untrusted=True, 33 | log_untrusted=False, 34 | logger=logger, 35 | ): 36 | def translate_proxy_headers(environ, start_response): 37 | untrusted_headers = PROXY_HEADERS 38 | remote_peer = environ["REMOTE_ADDR"] 39 | if trusted_proxy == "*" or remote_peer == trusted_proxy: 40 | try: 41 | untrusted_headers = parse_proxy_headers( 42 | environ, 43 | trusted_proxy_count=trusted_proxy_count, 44 | trusted_proxy_headers=trusted_proxy_headers, 45 | logger=logger, 46 | ) 47 | except MalformedProxyHeader as ex: 48 | logger.warning( 49 | 'Malformed proxy header "%s" from "%s": %s value: %s', 50 | ex.header, 51 | remote_peer, 52 | ex.reason, 53 | ex.value, 54 | ) 55 | error = BadRequest(f'Header "{ex.header}" malformed.') 56 | return error.wsgi_response(environ, start_response) 57 | 58 | # Clear out the untrusted proxy headers 59 | if clear_untrusted: 60 | clear_untrusted_headers( 61 | environ, untrusted_headers, log_warning=log_untrusted, logger=logger 62 | ) 63 | 64 | return app(environ, start_response) 65 | 66 | return translate_proxy_headers 67 | 68 | 69 | def parse_proxy_headers( 70 | environ, trusted_proxy_count, trusted_proxy_headers, logger=logger 71 | ): 72 | if trusted_proxy_headers is None: 73 | trusted_proxy_headers = set() 74 | 75 | forwarded_for = [] 76 | forwarded_host = forwarded_proto = forwarded_port = forwarded = "" 77 | client_addr = None 78 | untrusted_headers = set(PROXY_HEADERS) 79 | 80 | def raise_for_multiple_values(): 81 | raise ValueError("Unspecified behavior for multiple values found in header") 82 | 83 | if "x-forwarded-for" in trusted_proxy_headers and "HTTP_X_FORWARDED_FOR" in environ: 84 | try: 85 | forwarded_for = [] 86 | 87 | for forward_hop in environ["HTTP_X_FORWARDED_FOR"].split(","): 88 | forward_hop = forward_hop.strip() 89 | forward_hop = undquote(forward_hop) 90 | 91 | # Make sure that all IPv6 addresses are surrounded by brackets, 92 | # this is assuming that the IPv6 representation here does not 93 | # include a port number. 94 | 95 | if "." not in forward_hop and ( 96 | ":" in forward_hop and forward_hop[-1] != "]" 97 | ): 98 | forwarded_for.append(f"[{forward_hop}]") 99 | else: 100 | forwarded_for.append(forward_hop) 101 | 102 | forwarded_for = forwarded_for[-trusted_proxy_count:] 103 | client_addr = forwarded_for[0] 104 | 105 | untrusted_headers.remove("X_FORWARDED_FOR") 106 | except Exception as ex: 107 | raise MalformedProxyHeader( 108 | "X-Forwarded-For", str(ex), environ["HTTP_X_FORWARDED_FOR"] 109 | ) 110 | 111 | if ( 112 | "x-forwarded-host" in trusted_proxy_headers 113 | and "HTTP_X_FORWARDED_HOST" in environ 114 | ): 115 | try: 116 | forwarded_host_multiple = [] 117 | 118 | for forward_host in environ["HTTP_X_FORWARDED_HOST"].split(","): 119 | forward_host = forward_host.strip() 120 | forward_host = undquote(forward_host) 121 | forwarded_host_multiple.append(forward_host) 122 | 123 | forwarded_host_multiple = forwarded_host_multiple[-trusted_proxy_count:] 124 | forwarded_host = forwarded_host_multiple[0] 125 | 126 | untrusted_headers.remove("X_FORWARDED_HOST") 127 | except Exception as ex: 128 | raise MalformedProxyHeader( 129 | "X-Forwarded-Host", str(ex), environ["HTTP_X_FORWARDED_HOST"] 130 | ) 131 | 132 | if "x-forwarded-proto" in trusted_proxy_headers: 133 | try: 134 | forwarded_proto = undquote(environ.get("HTTP_X_FORWARDED_PROTO", "")) 135 | if "," in forwarded_proto: 136 | raise_for_multiple_values() 137 | untrusted_headers.remove("X_FORWARDED_PROTO") 138 | except Exception as ex: 139 | raise MalformedProxyHeader( 140 | "X-Forwarded-Proto", str(ex), environ["HTTP_X_FORWARDED_PROTO"] 141 | ) 142 | 143 | if "x-forwarded-port" in trusted_proxy_headers: 144 | try: 145 | forwarded_port = undquote(environ.get("HTTP_X_FORWARDED_PORT", "")) 146 | if "," in forwarded_port: 147 | raise_for_multiple_values() 148 | untrusted_headers.remove("X_FORWARDED_PORT") 149 | except Exception as ex: 150 | raise MalformedProxyHeader( 151 | "X-Forwarded-Port", str(ex), environ["HTTP_X_FORWARDED_PORT"] 152 | ) 153 | 154 | if "x-forwarded-by" in trusted_proxy_headers: 155 | # Waitress itself does not use X-Forwarded-By, but we can not 156 | # remove it so it can get set in the environ 157 | untrusted_headers.remove("X_FORWARDED_BY") 158 | 159 | if "forwarded" in trusted_proxy_headers: 160 | forwarded = environ.get("HTTP_FORWARDED", None) 161 | untrusted_headers = PROXY_HEADERS - {"FORWARDED"} 162 | 163 | # If the Forwarded header exists, it gets priority 164 | if forwarded: 165 | proxies = [] 166 | try: 167 | for forwarded_element in forwarded.split(","): 168 | # Remove whitespace that may have been introduced when 169 | # appending a new entry 170 | forwarded_element = forwarded_element.strip() 171 | 172 | forwarded_for = forwarded_host = forwarded_proto = "" 173 | forwarded_port = forwarded_by = "" 174 | 175 | for pair in forwarded_element.split(";"): 176 | pair = pair.lower() 177 | 178 | if not pair: 179 | continue 180 | 181 | token, equals, value = pair.partition("=") 182 | 183 | if equals != "=": 184 | raise ValueError('Invalid forwarded-pair missing "="') 185 | 186 | if token.strip() != token: 187 | raise ValueError("Token may not be surrounded by whitespace") 188 | 189 | if value.strip() != value: 190 | raise ValueError("Value may not be surrounded by whitespace") 191 | 192 | if token == "by": 193 | forwarded_by = undquote(value) 194 | 195 | elif token == "for": 196 | forwarded_for = undquote(value) 197 | 198 | elif token == "host": 199 | forwarded_host = undquote(value) 200 | 201 | elif token == "proto": 202 | forwarded_proto = undquote(value) 203 | 204 | else: 205 | logger.warning("Unknown Forwarded token: %s" % token) 206 | 207 | proxies.append( 208 | Forwarded( 209 | forwarded_by, forwarded_for, forwarded_host, forwarded_proto 210 | ) 211 | ) 212 | except Exception as ex: 213 | raise MalformedProxyHeader("Forwarded", str(ex), environ["HTTP_FORWARDED"]) 214 | 215 | proxies = proxies[-trusted_proxy_count:] 216 | 217 | # Iterate backwards and fill in some values, the oldest entry that 218 | # contains the information we expect is the one we use. We expect 219 | # that intermediate proxies may re-write the host header or proto, 220 | # but the oldest entry is the one that contains the information the 221 | # client expects when generating URL's 222 | # 223 | # Forwarded: for="[2001:db8::1]";host="example.com:8443";proto="https" 224 | # Forwarded: for=192.0.2.1;host="example.internal:8080" 225 | # 226 | # (After HTTPS header folding) should mean that we use as values: 227 | # 228 | # Host: example.com 229 | # Protocol: https 230 | # Port: 8443 231 | 232 | for proxy in proxies[::-1]: 233 | client_addr = proxy.for_ or client_addr 234 | forwarded_host = proxy.host or forwarded_host 235 | forwarded_proto = proxy.proto or forwarded_proto 236 | 237 | if forwarded_proto: 238 | forwarded_proto = forwarded_proto.lower() 239 | 240 | if forwarded_proto not in {"http", "https"}: 241 | raise MalformedProxyHeader( 242 | "Forwarded Proto=" if forwarded else "X-Forwarded-Proto", 243 | "unsupported proto value", 244 | forwarded_proto, 245 | ) 246 | 247 | # Set the URL scheme to the proxy provided proto 248 | environ["wsgi.url_scheme"] = forwarded_proto 249 | 250 | if not forwarded_port: 251 | if forwarded_proto == "http": 252 | forwarded_port = "80" 253 | 254 | if forwarded_proto == "https": 255 | forwarded_port = "443" 256 | 257 | if forwarded_host: 258 | if ":" in forwarded_host and forwarded_host[-1] != "]": 259 | host, port = forwarded_host.rsplit(":", 1) 260 | host, port = host.strip(), str(port) 261 | 262 | # We trust the port in the Forwarded Host/X-Forwarded-Host over 263 | # X-Forwarded-Port, or whatever we got from Forwarded 264 | # Proto/X-Forwarded-Proto. 265 | 266 | if forwarded_port != port: 267 | forwarded_port = port 268 | 269 | # We trust the proxy server's forwarded Host 270 | environ["SERVER_NAME"] = host 271 | environ["HTTP_HOST"] = forwarded_host 272 | else: 273 | # We trust the proxy server's forwarded Host 274 | environ["SERVER_NAME"] = forwarded_host 275 | environ["HTTP_HOST"] = forwarded_host 276 | 277 | if forwarded_port: 278 | if forwarded_port not in {"443", "80"}: 279 | environ["HTTP_HOST"] = "{}:{}".format( 280 | forwarded_host, forwarded_port 281 | ) 282 | elif forwarded_port == "80" and environ["wsgi.url_scheme"] != "http": 283 | environ["HTTP_HOST"] = "{}:{}".format( 284 | forwarded_host, forwarded_port 285 | ) 286 | elif forwarded_port == "443" and environ["wsgi.url_scheme"] != "https": 287 | environ["HTTP_HOST"] = "{}:{}".format( 288 | forwarded_host, forwarded_port 289 | ) 290 | 291 | if forwarded_port: 292 | environ["SERVER_PORT"] = str(forwarded_port) 293 | 294 | if client_addr: 295 | if ":" in client_addr and client_addr[-1] != "]": 296 | addr, port = client_addr.rsplit(":", 1) 297 | environ["REMOTE_ADDR"] = strip_brackets(addr.strip()) 298 | environ["REMOTE_PORT"] = port.strip() 299 | else: 300 | environ["REMOTE_ADDR"] = strip_brackets(client_addr.strip()) 301 | environ["REMOTE_HOST"] = environ["REMOTE_ADDR"] 302 | 303 | return untrusted_headers 304 | 305 | 306 | def strip_brackets(addr): 307 | if addr[0] == "[" and addr[-1] == "]": 308 | return addr[1:-1] 309 | return addr 310 | 311 | 312 | def clear_untrusted_headers( 313 | environ, untrusted_headers, log_warning=False, logger=logger 314 | ): 315 | untrusted_headers_removed = [ 316 | header 317 | for header in untrusted_headers 318 | if environ.pop("HTTP_" + header, False) is not False 319 | ] 320 | 321 | if log_warning and untrusted_headers_removed: 322 | untrusted_headers_removed = [ 323 | "-".join(x.capitalize() for x in header.split("_")) 324 | for header in untrusted_headers_removed 325 | ] 326 | logger.warning( 327 | "Removed untrusted headers (%s). Waitress recommends these be " 328 | "removed upstream.", 329 | ", ".join(untrusted_headers_removed), 330 | ) 331 | -------------------------------------------------------------------------------- /src/waitress/server.py: -------------------------------------------------------------------------------- 1 | ############################################################################## 2 | # 3 | # Copyright (c) 2001, 2002 Zope Foundation and Contributors. 4 | # All Rights Reserved. 5 | # 6 | # This software is subject to the provisions of the Zope Public License, 7 | # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. 8 | # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED 9 | # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 10 | # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS 11 | # FOR A PARTICULAR PURPOSE. 12 | # 13 | ############################################################################## 14 | 15 | import os 16 | import os.path 17 | import socket 18 | import time 19 | 20 | from waitress import trigger 21 | from waitress.adjustments import Adjustments 22 | from waitress.channel import HTTPChannel 23 | from waitress.compat import IPPROTO_IPV6, IPV6_V6ONLY 24 | from waitress.task import ThreadedTaskDispatcher 25 | from waitress.utilities import cleanup_unix_socket 26 | 27 | from . import wasyncore 28 | from .proxy_headers import proxy_headers_middleware 29 | 30 | 31 | def create_server( 32 | application, 33 | map=None, 34 | _start=True, # test shim 35 | _sock=None, # test shim 36 | _dispatcher=None, # test shim 37 | **kw # adjustments 38 | ): 39 | """ 40 | if __name__ == '__main__': 41 | server = create_server(app) 42 | server.run() 43 | """ 44 | if application is None: 45 | raise ValueError( 46 | 'The "app" passed to ``create_server`` was ``None``. You forgot ' 47 | "to return a WSGI app within your application." 48 | ) 49 | adj = Adjustments(**kw) 50 | 51 | if map is None: # pragma: nocover 52 | map = {} 53 | 54 | dispatcher = _dispatcher 55 | if dispatcher is None: 56 | dispatcher = ThreadedTaskDispatcher() 57 | dispatcher.set_thread_count(adj.threads) 58 | 59 | if adj.unix_socket and hasattr(socket, "AF_UNIX"): 60 | sockinfo = (socket.AF_UNIX, socket.SOCK_STREAM, None, None) 61 | return UnixWSGIServer( 62 | application, 63 | map, 64 | _start, 65 | _sock, 66 | dispatcher=dispatcher, 67 | adj=adj, 68 | sockinfo=sockinfo, 69 | ) 70 | 71 | effective_listen = [] 72 | last_serv = None 73 | if not adj.sockets: 74 | for sockinfo in adj.listen: 75 | # When TcpWSGIServer is called, it registers itself in the map. This 76 | # side-effect is all we need it for, so we don't store a reference to 77 | # or return it to the user. 78 | last_serv = TcpWSGIServer( 79 | application, 80 | map, 81 | _start, 82 | _sock, 83 | dispatcher=dispatcher, 84 | adj=adj, 85 | sockinfo=sockinfo, 86 | ) 87 | effective_listen.append( 88 | (last_serv.effective_host, last_serv.effective_port) 89 | ) 90 | 91 | for sock in adj.sockets: 92 | sockinfo = (sock.family, sock.type, sock.proto, sock.getsockname()) 93 | if sock.family == socket.AF_INET or sock.family == socket.AF_INET6: 94 | last_serv = TcpWSGIServer( 95 | application, 96 | map, 97 | _start, 98 | sock, 99 | dispatcher=dispatcher, 100 | adj=adj, 101 | bind_socket=False, 102 | sockinfo=sockinfo, 103 | ) 104 | effective_listen.append( 105 | (last_serv.effective_host, last_serv.effective_port) 106 | ) 107 | elif hasattr(socket, "AF_UNIX") and sock.family == socket.AF_UNIX: 108 | last_serv = UnixWSGIServer( 109 | application, 110 | map, 111 | _start, 112 | sock, 113 | dispatcher=dispatcher, 114 | adj=adj, 115 | bind_socket=False, 116 | sockinfo=sockinfo, 117 | ) 118 | effective_listen.append( 119 | (last_serv.effective_host, last_serv.effective_port) 120 | ) 121 | 122 | # We are running a single server, so we can just return the last server, 123 | # saves us from having to create one more object 124 | if len(effective_listen) == 1: 125 | # In this case we have no need to use a MultiSocketServer 126 | return last_serv 127 | 128 | log_info = last_serv.log_info 129 | # Return a class that has a utility function to print out the sockets it's 130 | # listening on, and has a .run() function. All of the TcpWSGIServers 131 | # registered themselves in the map above. 132 | return MultiSocketServer(map, adj, effective_listen, dispatcher, log_info) 133 | 134 | 135 | # This class is only ever used if we have multiple listen sockets. It allows 136 | # the serve() API to call .run() which starts the wasyncore loop, and catches 137 | # SystemExit/KeyboardInterrupt so that it can attempt to cleanly shut down. 138 | class MultiSocketServer: 139 | asyncore = wasyncore # test shim 140 | 141 | def __init__( 142 | self, 143 | map=None, 144 | adj=None, 145 | effective_listen=None, 146 | dispatcher=None, 147 | log_info=None, 148 | ): 149 | self.adj = adj 150 | self.map = map 151 | self.effective_listen = effective_listen 152 | self.task_dispatcher = dispatcher 153 | self.log_info = log_info 154 | 155 | def print_listen(self, format_str): # pragma: nocover 156 | for l in self.effective_listen: 157 | l = list(l) 158 | 159 | if ":" in l[0]: 160 | l[0] = f"[{l[0]}]" 161 | 162 | self.log_info(format_str.format(*l)) 163 | 164 | def run(self): 165 | try: 166 | self.asyncore.loop( 167 | timeout=self.adj.asyncore_loop_timeout, 168 | map=self.map, 169 | use_poll=self.adj.asyncore_use_poll, 170 | ) 171 | except (SystemExit, KeyboardInterrupt): 172 | self.close() 173 | 174 | def close(self): 175 | self.task_dispatcher.shutdown() 176 | wasyncore.close_all(self.map) 177 | 178 | 179 | class BaseWSGIServer(wasyncore.dispatcher): 180 | 181 | channel_class = HTTPChannel 182 | next_channel_cleanup = 0 183 | socketmod = socket # test shim 184 | asyncore = wasyncore # test shim 185 | in_connection_overflow = False 186 | 187 | def __init__( 188 | self, 189 | application, 190 | map=None, 191 | _start=True, # test shim 192 | _sock=None, # test shim 193 | dispatcher=None, # dispatcher 194 | adj=None, # adjustments 195 | sockinfo=None, # opaque object 196 | bind_socket=True, 197 | **kw 198 | ): 199 | if adj is None: 200 | adj = Adjustments(**kw) 201 | 202 | if adj.trusted_proxy or adj.clear_untrusted_proxy_headers: 203 | # wrap the application to deal with proxy headers 204 | # we wrap it here because webtest subclasses the TcpWSGIServer 205 | # directly and thus doesn't run any code that's in create_server 206 | application = proxy_headers_middleware( 207 | application, 208 | trusted_proxy=adj.trusted_proxy, 209 | trusted_proxy_count=adj.trusted_proxy_count, 210 | trusted_proxy_headers=adj.trusted_proxy_headers, 211 | clear_untrusted=adj.clear_untrusted_proxy_headers, 212 | log_untrusted=adj.log_untrusted_proxy_headers, 213 | logger=self.logger, 214 | ) 215 | 216 | if map is None: 217 | # use a nonglobal socket map by default to hopefully prevent 218 | # conflicts with apps and libs that use the wasyncore global socket 219 | # map ala https://github.com/Pylons/waitress/issues/63 220 | map = {} 221 | if sockinfo is None: 222 | sockinfo = adj.listen[0] 223 | 224 | self.sockinfo = sockinfo 225 | self.family = sockinfo[0] 226 | self.socktype = sockinfo[1] 227 | self.application = application 228 | self.adj = adj 229 | self.trigger = trigger.trigger(map) 230 | if dispatcher is None: 231 | dispatcher = ThreadedTaskDispatcher() 232 | dispatcher.set_thread_count(self.adj.threads) 233 | 234 | self.task_dispatcher = dispatcher 235 | self.asyncore.dispatcher.__init__(self, _sock, map=map) 236 | if _sock is None: 237 | self.create_socket(self.family, self.socktype) 238 | if self.family == socket.AF_INET6: # pragma: nocover 239 | self.socket.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 1) 240 | 241 | self.set_reuse_addr() 242 | 243 | if bind_socket: 244 | self.bind_server_socket() 245 | 246 | self.effective_host, self.effective_port = self.getsockname() 247 | self.server_name = adj.server_name 248 | self.active_channels = {} 249 | if _start: 250 | self.accept_connections() 251 | 252 | def bind_server_socket(self): 253 | raise NotImplementedError # pragma: no cover 254 | 255 | def getsockname(self): 256 | raise NotImplementedError # pragma: no cover 257 | 258 | def accept_connections(self): 259 | self.accepting = True 260 | self.socket.listen(self.adj.backlog) # Get around asyncore NT limit 261 | 262 | def add_task(self, task): 263 | self.task_dispatcher.add_task(task) 264 | 265 | def readable(self): 266 | now = time.time() 267 | if now >= self.next_channel_cleanup: 268 | self.next_channel_cleanup = now + self.adj.cleanup_interval 269 | self.maintenance(now) 270 | 271 | if self.accepting: 272 | if ( 273 | not self.in_connection_overflow 274 | and len(self._map) >= self.adj.connection_limit 275 | ): 276 | self.in_connection_overflow = True 277 | self.logger.warning( 278 | "total open connections reached the connection limit, " 279 | "no longer accepting new connections" 280 | ) 281 | elif ( 282 | self.in_connection_overflow 283 | and len(self._map) < self.adj.connection_limit 284 | ): 285 | self.in_connection_overflow = False 286 | self.logger.info( 287 | "total open connections dropped below the connection limit, " 288 | "listening again" 289 | ) 290 | return not self.in_connection_overflow 291 | return False 292 | 293 | def writable(self): 294 | return False 295 | 296 | def handle_read(self): 297 | pass 298 | 299 | def handle_connect(self): 300 | pass 301 | 302 | def handle_accept(self): 303 | try: 304 | v = self.accept() 305 | if v is None: 306 | return 307 | conn, addr = v 308 | except OSError: 309 | # Linux: On rare occasions we get a bogus socket back from 310 | # accept. socketmodule.c:makesockaddr complains that the 311 | # address family is unknown. We don't want the whole server 312 | # to shut down because of this. 313 | if self.adj.log_socket_errors: 314 | self.logger.warning("server accept() threw an exception", exc_info=True) 315 | return 316 | self.set_socket_options(conn) 317 | addr = self.fix_addr(addr) 318 | self.channel_class(self, conn, addr, self.adj, map=self._map) 319 | 320 | def run(self): 321 | try: 322 | self.asyncore.loop( 323 | timeout=self.adj.asyncore_loop_timeout, 324 | map=self._map, 325 | use_poll=self.adj.asyncore_use_poll, 326 | ) 327 | except (SystemExit, KeyboardInterrupt): 328 | self.task_dispatcher.shutdown() 329 | 330 | def pull_trigger(self): 331 | self.trigger.pull_trigger() 332 | 333 | def set_socket_options(self, conn): 334 | pass 335 | 336 | def fix_addr(self, addr): 337 | return addr 338 | 339 | def maintenance(self, now): 340 | """ 341 | Closes channels that have not had any activity in a while. 342 | 343 | The timeout is configured through adj.channel_timeout (seconds). 344 | """ 345 | cutoff = now - self.adj.channel_timeout 346 | for channel in self.active_channels.values(): 347 | if (not channel.requests) and channel.last_activity < cutoff: 348 | channel.will_close = True 349 | 350 | def print_listen(self, format_str): # pragma: no cover 351 | self.log_info(format_str.format(self.effective_host, self.effective_port)) 352 | 353 | def close(self): 354 | self.trigger.close() 355 | return wasyncore.dispatcher.close(self) 356 | 357 | 358 | class TcpWSGIServer(BaseWSGIServer): 359 | def bind_server_socket(self): 360 | (_, _, _, sockaddr) = self.sockinfo 361 | self.bind(sockaddr) 362 | 363 | def getsockname(self): 364 | # Return the IP address, port as numeric 365 | return self.socketmod.getnameinfo( 366 | self.socket.getsockname(), 367 | self.socketmod.NI_NUMERICHOST | self.socketmod.NI_NUMERICSERV, 368 | ) 369 | 370 | def set_socket_options(self, conn): 371 | for (level, optname, value) in self.adj.socket_options: 372 | conn.setsockopt(level, optname, value) 373 | 374 | 375 | if hasattr(socket, "AF_UNIX"): 376 | 377 | class UnixWSGIServer(BaseWSGIServer): 378 | def __init__( 379 | self, 380 | application, 381 | map=None, 382 | _start=True, # test shim 383 | _sock=None, # test shim 384 | dispatcher=None, # dispatcher 385 | adj=None, # adjustments 386 | sockinfo=None, # opaque object 387 | **kw 388 | ): 389 | if sockinfo is None: 390 | sockinfo = (socket.AF_UNIX, socket.SOCK_STREAM, None, None) 391 | 392 | super().__init__( 393 | application, 394 | map=map, 395 | _start=_start, 396 | _sock=_sock, 397 | dispatcher=dispatcher, 398 | adj=adj, 399 | sockinfo=sockinfo, 400 | **kw, 401 | ) 402 | 403 | def bind_server_socket(self): 404 | cleanup_unix_socket(self.adj.unix_socket) 405 | self.bind(self.adj.unix_socket) 406 | if os.path.exists(self.adj.unix_socket): 407 | os.chmod(self.adj.unix_socket, self.adj.unix_socket_perms) 408 | 409 | def getsockname(self): 410 | return ("unix", self.socket.getsockname()) 411 | 412 | def fix_addr(self, addr): 413 | return ("localhost", None) 414 | 415 | 416 | # Compatibility alias. 417 | WSGIServer = TcpWSGIServer 418 | --------------------------------------------------------------------------------