├── .coveragerc ├── .flake8 ├── .github ├── dependabot.yml └── workflows │ └── ci-tests.yml ├── .gitignore ├── .readthedocs.yaml ├── CHANGES.txt ├── CONTRIBUTORS.txt ├── COPYRIGHT.txt ├── HISTORY.txt ├── LICENSE.txt ├── MANIFEST.in ├── README.rst ├── RELEASING.txt ├── TODO.txt ├── contributing.md ├── docs ├── Makefile ├── api.rst ├── arguments.rst ├── conf.py ├── design.rst ├── differences.rst ├── filewrapper.rst ├── glossary.rst ├── index.rst ├── logging.rst ├── rebuild ├── reverse-proxy.rst ├── runner.rst ├── socket-activation.rst └── usage.rst ├── pyproject.toml ├── setup.cfg ├── setup.py ├── src └── waitress │ ├── __init__.py │ ├── __main__.py │ ├── adjustments.py │ ├── buffers.py │ ├── channel.py │ ├── compat.py │ ├── parser.py │ ├── proxy_headers.py │ ├── receiver.py │ ├── rfc7230.py │ ├── runner.py │ ├── server.py │ ├── task.py │ ├── trigger.py │ ├── utilities.py │ └── wasyncore.py ├── tests ├── __init__.py ├── fixtureapps │ ├── __init__.py │ ├── badcl.py │ ├── echo.py │ ├── error.py │ ├── error_traceback.py │ ├── filewrapper.py │ ├── getline.py │ ├── groundhog1.jpg │ ├── nocl.py │ ├── runner.py │ ├── sleepy.py │ ├── toolarge.py │ └── writecb.py ├── test_adjustments.py ├── test_buffers.py ├── test_channel.py ├── test_functional.py ├── test_init.py ├── test_parser.py ├── test_proxy_headers.py ├── test_receiver.py ├── test_regression.py ├── test_runner.py ├── test_server.py ├── test_task.py ├── test_trigger.py ├── test_utilities.py └── test_wasyncore.py └── tox.ini /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | parallel = true 3 | concurrency = 4 | thread 5 | multiprocessing 6 | source = 7 | waitress 8 | omit = 9 | waitress/tests/fixtureapps/getline.py 10 | 11 | [paths] 12 | source = 13 | src/waitress 14 | */src/waitress 15 | */site-packages/waitress 16 | 17 | [report] 18 | show_missing = true 19 | precision = 2 20 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | # Recommended flake8 settings while editing, we use Black for the final linting/say in how code is formatted 2 | # 3 | # pip install flake8 flake8-bugbear 4 | # 5 | # This will warn/error on things that black does not fix, on purpose. 6 | # 7 | # Run: 8 | # 9 | # tox -e run-flake8 10 | # 11 | # To have it automatically create and install the appropriate tools, and run 12 | # flake8 across the source code/tests 13 | 14 | [flake8] 15 | # max line length is set to 88 in black, here it is set to 80 and we enable bugbear's B950 warning, which is: 16 | # 17 | # B950: Line too long. This is a pragmatic equivalent of pycodestyle’s E501: it 18 | # considers “max-line-length” but only triggers when the value has been 19 | # exceeded by more than 10%. You will no longer be forced to reformat code due 20 | # to the closing parenthesis being one character too far to satisfy the linter. 21 | # At the same time, if you do significantly violate the line length, you will 22 | # receive a message that states what the actual limit is. This is inspired by 23 | # Raymond Hettinger’s “Beyond PEP 8” talk and highway patrol not stopping you 24 | # if you drive < 5mph too fast. Disable E501 to avoid duplicate warnings. 25 | max-line-length = 80 26 | max-complexity = 12 27 | select = E,F,W,C,B,B9 28 | ignore = 29 | # E123 closing bracket does not match indentation of opening bracket’s line 30 | E123 31 | # E203 whitespace before ‘:’ (Not PEP8 compliant, Python Black) 32 | E203 33 | # E501 line too long (82 > 79 characters) (replaced by B950 from flake8-bugbear, https://github.com/PyCQA/flake8-bugbear) 34 | E501 35 | # W503 line break before binary operator (Not PEP8 compliant, Python Black) 36 | W503 37 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # Set update schedule for GitHub Actions 2 | 3 | version: 2 4 | updates: 5 | 6 | - package-ecosystem: "github-actions" 7 | directory: "/" 8 | schedule: 9 | # Check for updates to GitHub Actions every weekday 10 | interval: "daily" 11 | -------------------------------------------------------------------------------- /.github/workflows/ci-tests.yml: -------------------------------------------------------------------------------- 1 | name: Build and test 2 | 3 | concurrency: 4 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} 5 | cancel-in-progress: true 6 | 7 | on: 8 | # Only on pushes to main or one of the release branches we build on push 9 | push: 10 | branches: 11 | - main 12 | - "[0-9].[0-9]+-branch" 13 | tags: 14 | - "*" 15 | # Build pull requests 16 | pull_request: 17 | 18 | jobs: 19 | test: 20 | strategy: 21 | matrix: 22 | py: 23 | - "3.9" 24 | - "3.10" 25 | - "3.11" 26 | - "3.12" 27 | - "3.13" 28 | - "pypy-3.9" 29 | - "pypy-3.10" 30 | # Pre-release 31 | os: 32 | - "ubuntu-22.04" 33 | - "windows-latest" 34 | - "macos-14" # arm64 35 | - "macos-13" # x64 36 | architecture: 37 | - x64 38 | - x86 39 | - arm64 40 | include: 41 | - py: "pypy-3.9" 42 | toxenv: "pypy39" 43 | - py: "pypy-3.10" 44 | toxenv: "pypy310" 45 | exclude: 46 | # Ubuntu does not have x86/arm64 Python 47 | - os: "ubuntu-22.04" 48 | architecture: x86 49 | - os: "ubuntu-22.04" 50 | architecture: arm64 51 | # MacOS we need to make sure to remove x86 on all 52 | # We need to run no arm64 on macos-13 (Intel), but some 53 | # Python versions: 3.9/3.10 54 | # 55 | # From 3.11 onward, there is support for running x64 and 56 | # arm64 on Apple Silicon based systems (macos-14) 57 | - os: "macos-13" 58 | architecture: x86 59 | - os: "macos-13" 60 | architecture: arm64 61 | - os: "macos-14" 62 | architecture: x86 63 | - os: "macos-14" 64 | architecture: x64 65 | py: "3.9" 66 | - os: "macos-14" 67 | architecture: x64 68 | py: "3.10" 69 | # Windows does not have arm64 releases 70 | - os: "windows-latest" 71 | architecture: arm64 72 | # Don't run all PyPy versions except latest on 73 | # Windows/macOS. They are expensive to run. 74 | - os: "windows-latest" 75 | py: "pypy-3.9" 76 | - os: "macos-13" 77 | py: "pypy-3.9" 78 | - os: "macos-14" 79 | py: "pypy-3.9" 80 | name: "Python: ${{ matrix.py }}-${{ matrix.architecture }} on ${{ matrix.os }}" 81 | runs-on: ${{ matrix.os }} 82 | steps: 83 | - uses: actions/checkout@v4 84 | - name: Setup python 85 | uses: actions/setup-python@v5 86 | with: 87 | python-version: ${{ matrix.py }} 88 | architecture: ${{ matrix.architecture }} 89 | - run: pip install tox 90 | - name: Running tox with specific toxenv 91 | if: ${{ matrix.toxenv != '' }} 92 | env: 93 | TOXENV: ${{ matrix.toxenv }} 94 | run: tox 95 | - name: Running tox for current python version 96 | if: ${{ matrix.toxenv == '' }} 97 | run: tox -e py 98 | 99 | coverage: 100 | runs-on: ubuntu-22.04 101 | name: Validate coverage 102 | steps: 103 | - uses: actions/checkout@v4 104 | - name: Setup python 105 | uses: actions/setup-python@v5 106 | with: 107 | python-version: "3.13" 108 | architecture: x64 109 | - run: pip install tox 110 | - run: tox -e py313,coverage 111 | 112 | docs: 113 | runs-on: ubuntu-22.04 114 | name: Build the documentation 115 | steps: 116 | - uses: actions/checkout@v4 117 | - name: Setup python 118 | uses: actions/setup-python@v5 119 | with: 120 | python-version: "3.13" 121 | architecture: x64 122 | - run: pip install tox 123 | - run: tox -e docs 124 | 125 | lint: 126 | runs-on: ubuntu-22.04 127 | name: Lint the package 128 | steps: 129 | - uses: actions/checkout@v4 130 | - name: Setup python 131 | uses: actions/setup-python@v5 132 | with: 133 | python-version: "3.13" 134 | architecture: x64 135 | - run: pip install tox 136 | - run: tox -e lint 137 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.egg-info/ 2 | *.pyc 3 | env*/ 4 | .coverage 5 | .coverage.* 6 | .tox/ 7 | dist/ 8 | build/ 9 | coverage.xml 10 | docs/_themes 11 | docs/_build 12 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # https://docs.readthedocs.io/en/stable/config-file/v2.html 2 | version: 2 3 | build: 4 | os: ubuntu-22.04 5 | tools: 6 | python: '3.12' 7 | sphinx: 8 | configuration: docs/conf.py 9 | formats: 10 | - pdf 11 | - epub 12 | python: 13 | install: 14 | - method: pip 15 | path: . 16 | extra_requirements: 17 | - docs 18 | -------------------------------------------------------------------------------- /CHANGES.txt: -------------------------------------------------------------------------------- 1 | 3.0.2 (2024-11-16) 2 | ------------------ 3 | 4 | Security 5 | ~~~~~~~~ 6 | 7 | - When using Waitress to process trusted proxy headers, Waitress will now 8 | update the headers to drop any untrusted values, thereby making sure that 9 | WSGI apps only get trusted and validated values that Waitress itself used to 10 | update the environ. See https://github.com/Pylons/waitress/pull/452 and 11 | https://github.com/Pylons/waitress/issues/451 12 | 13 | 14 | 3.0.1 (2024-10-28) 15 | ------------------ 16 | 17 | Backward Incompatibilities 18 | ~~~~~~~~~~~~~~~~~~~~~~~~~~ 19 | 20 | - Python 3.8 is no longer supported. 21 | See https://github.com/Pylons/waitress/pull/445. 22 | 23 | Features 24 | ~~~~~~~~ 25 | 26 | - Added support for Python 3.13. 27 | See https://github.com/Pylons/waitress/pull/445. 28 | 29 | Security 30 | ~~~~~~~~ 31 | 32 | - Fix a bug that would lead to Waitress busy looping on select() on a half-open 33 | socket due to a race condition that existed when creating a new HTTPChannel. 34 | See https://github.com/Pylons/waitress/pull/435, 35 | https://github.com/Pylons/waitress/issues/418 and 36 | https://github.com/Pylons/waitress/security/advisories/GHSA-3f84-rpwh-47g6 37 | 38 | With thanks to Dylan Jay and Dieter Maurer for their extensive debugging and 39 | helping track this down. 40 | 41 | - No longer strip the header values before passing them to the WSGI environ. 42 | See https://github.com/Pylons/waitress/pull/434 and 43 | https://github.com/Pylons/waitress/issues/432 44 | 45 | - Fix a race condition in Waitress when `channel_request_lookahead` is enabled 46 | that could lead to HTTP request smuggling. 47 | 48 | See https://github.com/Pylons/waitress/security/advisories/GHSA-9298-4cf8-g4wj 49 | 50 | 3.0.0 (2024-02-04) 51 | ------------------ 52 | 53 | - Rename "master" git branch to "main" 54 | 55 | - Fix a bug that would appear on macOS whereby if we accept() a socket that is 56 | already gone, setting socket options would fail and take down the server. See 57 | https://github.com/Pylons/waitress/pull/399 58 | 59 | - Fixed testing of vendored asyncore code to not rely on particular naming for 60 | errno's. See https://github.com/Pylons/waitress/pull/397 61 | 62 | - HTTP Request methods and versions are now validated to meet the HTTP 63 | standards thereby dropping invalid requests on the floor. See 64 | https://github.com/Pylons/waitress/pull/423 65 | 66 | - No longer close the connection when sending a HEAD request response. See 67 | https://github.com/Pylons/waitress/pull/428 68 | 69 | - Always attempt to send the Connection: close response header when we are 70 | going to close the connection to let the remote know in more instances. 71 | https://github.com/Pylons/waitress/pull/429 72 | 73 | - Python 3.7 is no longer supported. Add support for Python 3.11, 3.12 and 74 | PyPy 3.9, 3.10. See https://github.com/Pylons/waitress/pull/412 75 | 76 | - Document that trusted_proxy may be set to a wildcard value to trust all 77 | proxies. See https://github.com/Pylons/waitress/pull/431 78 | 79 | Updated Defaults 80 | ~~~~~~~~~~~~~~~~ 81 | 82 | - clear_untrusted_proxy_headers is set to True by default. See 83 | https://github.com/Pylons/waitress/pull/370 84 | -------------------------------------------------------------------------------- /CONTRIBUTORS.txt: -------------------------------------------------------------------------------- 1 | Pylons Project Contributor Agreement 2 | ==================================== 3 | 4 | The submitter agrees by adding his or her name within the section below named 5 | "Contributors" and submitting the resulting modified document to the 6 | canonical shared repository location for this software project (whether 7 | directly, as a user with "direct commit access", or via a "pull request"), he 8 | or she is signing a contract electronically. The submitter becomes a 9 | Contributor after a) he or she signs this document by adding their name 10 | beneath the "Contributors" section below, and b) the resulting document is 11 | accepted into the canonical version control repository. 12 | 13 | Treatment of Account 14 | --------------------- 15 | 16 | Contributor will not allow anyone other than the Contributor to use his or 17 | her username or source repository login to submit code to a Pylons Project 18 | source repository. Should Contributor become aware of any such use, 19 | Contributor will immediately notify Agendaless Consulting. 20 | Notification must be performed by sending an email to 21 | webmaster@agendaless.com. Until such notice is received, Contributor will be 22 | presumed to have taken all actions made through Contributor's account. If the 23 | Contributor has direct commit access, Agendaless Consulting will have 24 | complete control and discretion over capabilities assigned to Contributor's 25 | account, and may disable Contributor's account for any reason at any time. 26 | 27 | Legal Effect of Contribution 28 | ---------------------------- 29 | 30 | Upon submitting a change or new work to a Pylons Project source Repository (a 31 | "Contribution"), you agree to assign, and hereby do assign, a one-half 32 | interest of all right, title and interest in and to copyright and other 33 | intellectual property rights with respect to your new and original portions 34 | of the Contribution to Agendaless Consulting. You and Agendaless Consulting 35 | each agree that the other shall be free to exercise any and all exclusive 36 | rights in and to the Contribution, without accounting to one another, 37 | including without limitation, the right to license the Contribution to others 38 | under the Repoze Public License. This agreement shall run with title to the 39 | Contribution. Agendaless Consulting does not convey to you any right, title 40 | or interest in or to the Program or such portions of the Contribution that 41 | were taken from the Program. Your transmission of a submission to the Pylons 42 | Project source Repository and marks of identification concerning the 43 | Contribution itself constitute your intent to contribute and your assignment 44 | of the work in accordance with the provisions of this Agreement. 45 | 46 | License Terms 47 | ------------- 48 | 49 | Code committed to the Pylons Project source repository (Committed Code) must 50 | be governed by the Repoze Public License (http://repoze.org/LICENSE.txt, aka 51 | "the RPL") or another license acceptable to Agendaless Consulting. Until 52 | Agendaless Consulting declares in writing an acceptable license other than 53 | the RPL, only the RPL shall be used. A list of exceptions is detailed within 54 | the "Licensing Exceptions" section of this document, if one exists. 55 | 56 | Representations, Warranty, and Indemnification 57 | ---------------------------------------------- 58 | 59 | Contributor represents and warrants that the Committed Code does not violate 60 | the rights of any person or entity, and that the Contributor has legal 61 | authority to enter into this Agreement and legal authority over Contributed 62 | Code. Further, Contributor indemnifies Agendaless Consulting against 63 | violations. 64 | 65 | Cryptography 66 | ------------ 67 | 68 | Contributor understands that cryptographic code may be subject to government 69 | regulations with which Agendaless Consulting and/or entities using Committed 70 | Code must comply. Any code which contains any of the items listed below must 71 | not be checked-in until Agendaless Consulting staff has been notified and has 72 | approved such contribution in writing. 73 | 74 | - Cryptographic capabilities or features 75 | 76 | - Calls to cryptographic features 77 | 78 | - User interface elements which provide context relating to cryptography 79 | 80 | - Code which may, under casual inspection, appear to be cryptographic. 81 | 82 | Notices 83 | ------- 84 | 85 | Contributor confirms that any notices required will be included in any 86 | Committed Code. 87 | 88 | Licensing Exceptions 89 | ==================== 90 | 91 | Code committed within the ``docs/`` subdirectory of the Waitress source 92 | control repository and "docstrings" which appear in the documentation 93 | generated by running "make" within this directory is licensed under the 94 | Creative Commons Attribution-Noncommercial-Share Alike 3.0 United States 95 | License (http://creativecommons.org/licenses/by-nc-sa/3.0/us/). 96 | 97 | List of Contributors 98 | ==================== 99 | 100 | The below-signed are contributors to a code repository that is part of the 101 | project named "Waitress". Each below-signed contributor has read, understand 102 | and agrees to the terms above in the section within this document entitled 103 | "Pylons Project Contributor Agreement" as of the date beside his or her name. 104 | 105 | Contributors 106 | ------------ 107 | 108 | - Chris McDonough, 2011/12/17 109 | 110 | - Michael Merickel, 2012/01/16 111 | 112 | - Damien Baty, 2012/10/25 113 | 114 | - Georges Dubus, 2012/11/24 115 | 116 | - Tres Seaver, 2013/04/09 117 | 118 | - Tshepang Lekhonkhobe, 2013/04/09 119 | 120 | - Keith Gaughan, 2013/05/11 121 | 122 | - Jamie Matthews, 2013/06/19 123 | 124 | - Adam Groszer, 2013/08/15 125 | 126 | - Matt Russell, 2015/01/14 127 | 128 | - David Glick, 2015/04/13 129 | 130 | - Shane Hathaway, 2015-04-20 131 | 132 | - Steve Piercy, 2015-04-21 133 | 134 | - Ben Warren, 2015-05-17 135 | 136 | - Bert JW Regeer, 2015-09-23 137 | 138 | - Yu Zhou, 2015-09-24 139 | 140 | - Jason Madden, 2016-03-19 141 | 142 | - Atsushi Odagiri, 2017-02-12 143 | 144 | - David D Lowe, 2017-06-02 145 | 146 | - Jack Wearden, 2018-05-18 147 | 148 | - Frank Krick, 2018-10-29 149 | 150 | - Jonathan Vanasco, 2022-11-15 151 | 152 | - Simon King, 2024-11-12 153 | -------------------------------------------------------------------------------- /COPYRIGHT.txt: -------------------------------------------------------------------------------- 1 | Zope Foundation and Contributors -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Zope Public License (ZPL) Version 2.1 2 | 3 | A copyright notice accompanies this license document that identifies the 4 | copyright holders. 5 | 6 | This license has been certified as open source. It has also been designated as 7 | GPL compatible by the Free Software Foundation (FSF). 8 | 9 | Redistribution and use in source and binary forms, with or without 10 | modification, are permitted provided that the following conditions are met: 11 | 12 | 1. Redistributions in source code must retain the accompanying copyright 13 | notice, this list of conditions, and the following disclaimer. 14 | 15 | 2. Redistributions in binary form must reproduce the accompanying copyright 16 | notice, this list of conditions, and the following disclaimer in the 17 | documentation and/or other materials provided with the distribution. 18 | 19 | 3. Names of the copyright holders must not be used to endorse or promote 20 | products derived from this software without prior written permission from the 21 | copyright holders. 22 | 23 | 4. The right to distribute this software or to use it for any purpose does not 24 | give you the right to use Servicemarks (sm) or Trademarks (tm) of the 25 | copyright 26 | holders. Use of them is covered by separate agreement with the copyright 27 | holders. 28 | 29 | 5. If any files are modified, you must cause the modified files to carry 30 | prominent notices stating that you changed the files and the date of any 31 | change. 32 | 33 | Disclaimer 34 | 35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY EXPRESSED 36 | OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 37 | OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 38 | EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY DIRECT, INDIRECT, 39 | INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 41 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 42 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 43 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 44 | EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | graft src/waitress 2 | graft tests 3 | graft docs 4 | graft .github 5 | 6 | include README.rst 7 | include CHANGES.txt 8 | include HISTORY.txt 9 | include RELEASING.txt 10 | include LICENSE.txt 11 | include contributing.md 12 | include CONTRIBUTORS.txt 13 | include COPYRIGHT.txt 14 | 15 | include pyproject.toml setup.cfg 16 | include .coveragerc .flake8 17 | include tox.ini .readthedocs.yaml 18 | 19 | exclude TODO.txt 20 | prune docs/_build 21 | 22 | recursive-exclude * __pycache__ *.py[cod] 23 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Waitress 2 | ======== 3 | 4 | .. image:: https://img.shields.io/pypi/v/waitress.svg 5 | :target: https://pypi.org/project/waitress/ 6 | :alt: latest version of waitress on PyPI 7 | 8 | .. image:: https://github.com/Pylons/waitress/actions/workflows/ci-tests.yml/badge.svg 9 | :target: https://github.com/Pylons/waitress/actions/workflows/ci-tests.yml 10 | 11 | .. image:: https://readthedocs.org/projects/waitress/badge/?version=stable 12 | :target: https://docs.pylonsproject.org/projects/waitress/en/stable/ 13 | :alt: main Documentation Status 14 | 15 | Waitress is a production-quality pure-Python WSGI server with very acceptable 16 | performance. It has no dependencies except ones which live in the Python 17 | standard library. It runs on CPython on Unix and Windows under Python 3.9+. It 18 | is also known to run on PyPy 3 (version 3.9 compatible python and above) on 19 | UNIX. It supports HTTP/1.0 and HTTP/1.1. 20 | 21 | For more information, see the "docs" directory of the Waitress package or visit 22 | https://docs.pylonsproject.org/projects/waitress/en/latest/ 23 | -------------------------------------------------------------------------------- /RELEASING.txt: -------------------------------------------------------------------------------- 1 | Releasing 2 | ========= 3 | 4 | - For clarity, we define releases as follows. 5 | 6 | - Alpha, beta, dev and similar statuses do not qualify whether a release is 7 | major or minor. The term "pre-release" means alpha, beta, or dev. 8 | 9 | - A release is final when it is no longer pre-release. 10 | 11 | - A *major* release is where the first number either before or after the 12 | first dot increases. Examples: 1.0 to 1.1a1, or 0.9 to 1.0. 13 | 14 | - A *minor* or *bug fix* release is where the number after the second dot 15 | increases. Example: 1.0 to 1.0.1. 16 | 17 | Prepare new release 18 | ------------------- 19 | 20 | - Do platform test via tox: 21 | 22 | $ tox -r 23 | 24 | Make sure statement coverage is at 100% (the test run will fail if not). 25 | 26 | - Run tests on Windows if feasible. 27 | 28 | - Ensure all features of the release are documented (audit CHANGES.txt or 29 | communicate with contributors). 30 | 31 | - Change CHANGES.txt heading to reflect the new version number. 32 | 33 | - Minor releases should include a link under "Bug Fix Releases" to the minor 34 | feature changes in CHANGES.txt. 35 | 36 | - Change setup.py version to the release version number. 37 | 38 | - Make sure PyPI long description renders (requires ``readme_renderer`` 39 | installed into your Python):: 40 | 41 | $ python setup.py check -r -s -m 42 | 43 | - Create a release tag. 44 | 45 | - Make sure your Python has ``setuptools-git``, ``twine``, and ``wheel`` 46 | installed and release to PyPI:: 47 | 48 | $ python setup.py sdist bdist_wheel 49 | $ twine upload dist/waitress-X.X-* 50 | 51 | 52 | Prepare main for further development (major releases only) 53 | ---------------------------------------------------------- 54 | 55 | - In CHANGES.txt, preserve headings but clear out content. Add heading 56 | "unreleased" for the version number. 57 | 58 | - Forward port the changes in CHANGES.txt to HISTORY.txt. 59 | 60 | - Change setup.py version to the next version number. 61 | 62 | 63 | Marketing and communications 64 | ---------------------------- 65 | 66 | - Check `https://wiki.python.org/moin/WebServers 67 | `_. 68 | 69 | - Announce to Twitter. 70 | 71 | ``` 72 | waitress 1.x released. 73 | 74 | PyPI 75 | https://pypi.org/project/waitress/1.x/ 76 | 77 | === One time only for new version, first pre-release === 78 | What's New 79 | https://docs.pylonsproject.org/projects/waitress/en/latest/#id2 80 | === For all subsequent pre-releases === 81 | Changes 82 | https://docs.pylonsproject.org/projects/waitress/en/latest/#change-history 83 | 84 | Documentation: 85 | https://docs.pylonsproject.org/projects/waitress/en/latest/ 86 | 87 | Issues 88 | https://github.com/Pylons/waitress/issues 89 | ``` 90 | 91 | - Announce to maillist. 92 | 93 | ``` 94 | waitress 1.X.X has been released. 95 | 96 | The full changelog is here: 97 | https://docs.pylonsproject.org/projects/waitress/en/latest/#change-history 98 | 99 | What's New In waitress 1.X: 100 | https://docs.pylonsproject.org/projects/waitress/en/latest/#id2 101 | 102 | Documentation: 103 | https://docs.pylonsproject.org/projects/waitress/en/latest/ 104 | 105 | You can install it via PyPI: 106 | 107 | pip install waitress==1.X 108 | 109 | Enjoy, and please report any issues you find to the issue tracker at 110 | https://github.com/Pylons/waitress/issues 111 | 112 | Thanks! 113 | 114 | - waitress core developers 115 | ``` 116 | -------------------------------------------------------------------------------- /TODO.txt: -------------------------------------------------------------------------------- 1 | - 0.0.0.0 / IPv6. 2 | 3 | - Speed tweaking. 4 | 5 | - Anticipate multivalue and single-value-only headers in request headers in 6 | parser.py. 7 | 8 | - Timeout functests. 9 | 10 | - Complex pipelining functests (with intermediate connection: close). 11 | 12 | - Killthreads support. 13 | 14 | - "TCP segment of a reassembled PDU" in wireshark. 15 | 16 | - Jim F. would like the server to log request start, request queue (to thread 17 | pool), app start, app finish, and request finish (all data has been 18 | flushed to client) events. 19 | 20 | Some challenges exist trying to divine per-request end time. We currently 21 | have the potential for request pipelining; the channel might service more 22 | than one request before it closes. We currently don't preserve any 23 | information about which request a response's data belongs to while flushing 24 | response data from a connection's output buffer. 25 | 26 | While accepting request data from a client, Waitress will obtain N request 27 | bodies and schedule all the requests it receives with the task manager. 28 | For example, if it obtains two request bodies in a single recv() call it 29 | will create two request objects and schedule *both* of these requests to be 30 | serviced by the task manager immediately. 31 | 32 | The task thread manager will service these synchronously: the first request 33 | will be run first, then the second. When the first request runs, it will 34 | push data to the out buffer, then it will end. Then the second request 35 | will run, and push data to the same out buffer, and it will end. While 36 | these requests are executing, the channel from whence they came stops 37 | accepting requests until the previously scheduled requests have actually 38 | been serviced. The request-burdened channel will be *sending* data to the 39 | client while the requests are being serviced, it just won't accept any more 40 | data until existing requests have been serviced. In the meantime, other 41 | channels may still be generating requests and adding tasks to the task 42 | manager. 43 | 44 | To capture request-end time we could create an output buffer per request or 45 | we could keep a dictionary of the final bytestream position of the 46 | outbuffer for each response to to request id; either would be a 47 | straightforward way to capture the fact that a particular request's 48 | response data has been flushed. We currently don't do that though. 49 | 50 | Here's what we can currently log without changing anything: 51 | 52 | An example of the events logged for a connection that receives two requests 53 | and each request succeeds, and the connection is closed after sending all 54 | data:: 55 | 56 | channel created: channel 1 at time 10 57 | request created: channel 1, request id 1 at time 11 58 | request created: channel 1, request id 2 at time 12 59 | channel requests queued: channel 1, request ids 1,2 at time 13 60 | request started: request id 1 at time 14 61 | request serviced: request id 1 at time 15 62 | request started: request id 2 at time 16 63 | request serviced: request id 2 at time 17 64 | channel closed: channel 1 at time 18 65 | 66 | An example of the events logged for a connection that receives two requests 67 | and the first request fails in such a way that the next request cannot 68 | proceed (content-length header of the first response does not match number 69 | of bytes sent in response to the first request, for example):: 70 | 71 | channel created: channel 1 at time 10 72 | request created: channel 1, request id 1 at time 11 73 | request created: channel 1, request id 2 at time 12 74 | channel requests queued: channel 1, request ids 1,2 at time 13 75 | request started: request id 1 at time 14 76 | request serviced: request id 1 at time 15 77 | request cancelled: request id 2 at time 17 78 | channel closed: channel 1 at time 18 79 | 80 | An example of the events logged for a connection that receives four 81 | requests (which all succeed in generating successful responses) but where 82 | the client waits for the first two responses to send the second two 83 | requests: 84 | 85 | channel created: channel 1 at time 10 86 | request created: channel 1, request id 1 at time 11 87 | request created: channel 1, request id 2 at time 12 88 | channel requests queued: channel 1, request ids 1,2 at time 13 89 | request started: request id 1 at time 14 90 | request serviced: request id 1 at time 15 91 | request started: request id 2 at time 15 92 | request serviced: request id 2 at time 16 93 | request created: channel 1, request id 3 at time 17 94 | request created: channel 1, request id 4 at time 18 95 | channel requests queued: channel 1, request ids 3,4 at time 18 96 | request started: request id 3 at time 19 97 | request serviced: request id 3 at time 20 98 | request started: request id 4 at time 21 99 | request serviced: request id 4 at time 22 100 | channel closed: channel 1 at time 23 101 | -------------------------------------------------------------------------------- /contributing.md: -------------------------------------------------------------------------------- 1 | Contributing 2 | ============ 3 | 4 | All projects under the Pylons Projects, including this one, follow the guidelines established at [How to Contribute](https://pylonsproject.org/community-how-to-contribute.html) and [Coding Style and Standards](https://pylonsproject.org/community-coding-style-standards.html). 5 | 6 | 7 | Get support 8 | ----------- 9 | 10 | See [Get Support](https://pylonsproject.org/community-support.html). You are reading this document most likely because you want to *contribute* to the project and not *get support*. 11 | 12 | 13 | Working on issues 14 | ----------------- 15 | 16 | To respect both your time and ours, we emphasize the following points. 17 | 18 | * We use the [Issue Tracker on GitHub](https://github.com/Pylons/waitress/issues) to discuss bugs, improvements, and feature requests. Search through existing issues before reporting a new one. Issues may be complex or wide-ranging. A discussion up front sets us all on the best path forward. 19 | * Minor issues—such as spelling, grammar, and syntax—don't require discussion and a pull request is sufficient. 20 | * After discussing the issue with maintainers and agreeing on a resolution, submit a pull request of your work. [GitHub Flow](https://guides.github.com/introduction/flow/index.html) describes the workflow process and why it's a good practice. 21 | 22 | 23 | Git branches 24 | ------------ 25 | 26 | There is a single branch [main](https://github.com/Pylons/waitress/) on which development takes place and from which releases to PyPI are tagged. This is the default branch on GitHub. 27 | 28 | 29 | Running tests and building documentation 30 | ---------------------------------------- 31 | 32 | We use [tox](https://tox.readthedocs.io/en/latest/) to automate test running, coverage, and building documentation across all supported Python versions. 33 | 34 | To run everything configured in the `tox.ini` file: 35 | 36 | $ tox 37 | 38 | To run tests on Python 2 and 3, and ensure full coverage, but exclude building of docs: 39 | 40 | $ tox -e py2-cover,py3-cover,coverage 41 | 42 | To build the docs only: 43 | 44 | $ tox -e docs 45 | 46 | See the `tox.ini` file for details. 47 | 48 | 49 | Contributing documentation 50 | -------------------------- 51 | 52 | *Note:* These instructions might not work for Windows users. Suggestions to improve the process for Windows users are welcome by submitting an issue or a pull request. 53 | 54 | 1. Fork the repo on GitHub by clicking the [Fork] button. 55 | 2. Clone your fork into a workspace on your local machine. 56 | 57 | cd ~/projects 58 | git clone git@github.com:/waitress.git 59 | 60 | 3. Add a git remote "upstream" for the cloned fork. 61 | 62 | git remote add upstream git@github.com:Pylons/waitress.git 63 | 64 | 4. Set an environment variable to your virtual environment. 65 | 66 | # Mac and Linux 67 | $ export VENV=~/projects/waitress/env 68 | 69 | # Windows 70 | set VENV=c:\projects\waitress\env 71 | 72 | 5. Try to build the docs in your workspace. 73 | 74 | # Mac and Linux 75 | $ make clean html SPHINXBUILD=$VENV/bin/sphinx-build 76 | 77 | # Windows 78 | c:\> make clean html SPHINXBUILD=%VENV%\bin\sphinx-build 79 | 80 | If successful, then you can make changes to the documentation. You can load the built documentation in the `/_build/html/` directory in a web browser. 81 | 82 | 6. From this point forward, follow the typical [git workflow](https://help.github.com/articles/what-is-a-good-git-workflow/). Start by pulling from the upstream to get the most current changes. 83 | 84 | git pull upstream main 85 | 86 | 7. Make a branch, make changes to the docs, and rebuild them as indicated in step 5. To speed up the build process, you can omit `clean` from the above command to rebuild only those pages that depend on the files you have changed. 87 | 88 | 8. Once you are satisfied with your changes and the documentation builds successfully without errors or warnings, then git commit and push them to your "origin" repository on GitHub. 89 | 90 | git commit -m "commit message" 91 | git push -u origin --all # first time only, subsequent can be just 'git push'. 92 | 93 | 9. Create a [pull request](https://help.github.com/articles/using-pull-requests/). 94 | 95 | 10. Repeat the process starting from Step 6. 96 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | 9 | # Internal variables. 10 | PAPEROPT_a4 = -D latex_paper_size=a4 11 | PAPEROPT_letter = -D latex_paper_size=letter 12 | ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 13 | 14 | .PHONY: help clean html web pickle htmlhelp latex changes linkcheck 15 | 16 | help: 17 | @echo "Please use \`make ' where is one of" 18 | @echo " html to make standalone HTML files" 19 | @echo " pickle to make pickle files (usable by e.g. sphinx-web)" 20 | @echo " htmlhelp to make HTML files and a HTML help project" 21 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 22 | @echo " changes to make an overview over all changed/added/deprecated items" 23 | @echo " linkcheck to check all external links for integrity" 24 | 25 | clean: 26 | -rm -rf _build/* 27 | 28 | html: 29 | mkdir -p _build/html _build/doctrees 30 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) _build/html 31 | @echo 32 | @echo "Build finished. The HTML pages are in _build/html." 33 | 34 | text: 35 | mkdir -p _build/text _build/doctrees 36 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) _build/text 37 | @echo 38 | @echo "Build finished. The HTML pages are in _build/text." 39 | 40 | pickle: 41 | mkdir -p _build/pickle _build/doctrees 42 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) _build/pickle 43 | @echo 44 | @echo "Build finished; now you can process the pickle files or run" 45 | @echo " sphinx-web _build/pickle" 46 | @echo "to start the sphinx-web server." 47 | 48 | web: pickle 49 | 50 | htmlhelp: 51 | mkdir -p _build/htmlhelp _build/doctrees 52 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) _build/htmlhelp 53 | @echo 54 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 55 | ".hhp project file in _build/htmlhelp." 56 | 57 | latex: 58 | mkdir -p _build/latex _build/doctrees 59 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex 60 | cp _static/*.png _build/latex 61 | ./convert_images.sh 62 | cp _static/latex-warning.png _build/latex 63 | cp _static/latex-note.png _build/latex 64 | @echo 65 | @echo "Build finished; the LaTeX files are in _build/latex." 66 | @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ 67 | "run these through (pdf)latex." 68 | 69 | changes: 70 | mkdir -p _build/changes _build/doctrees 71 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) _build/changes 72 | @echo 73 | @echo "The overview file is in _build/changes." 74 | 75 | linkcheck: 76 | mkdir -p _build/linkcheck _build/doctrees 77 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) _build/linkcheck 78 | @echo 79 | @echo "Link check complete; look for any errors in the above output " \ 80 | "or in _build/linkcheck/output.txt." 81 | 82 | epub: 83 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) _build/epub 84 | @echo 85 | @echo "Build finished. The epub file is in _build/epub." 86 | 87 | -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | .. _waitress_api: 2 | 3 | :mod:`waitress` API 4 | --------------------------- 5 | 6 | .. module:: waitress 7 | 8 | .. function:: serve(app, listen='0.0.0.0:8080', unix_socket=None, unix_socket_perms='600', threads=4, url_scheme='http', url_prefix='', ident='waitress', backlog=1024, recv_bytes=8192, send_bytes=1, outbuf_overflow=104856, outbuf_high_watermark=16777216, inbuf_overflow=52488, connection_limit=1000, cleanup_interval=30, channel_timeout=120, log_socket_errors=True, max_request_header_size=262144, max_request_body_size=1073741824, expose_tracebacks=False) 9 | 10 | See :ref:`arguments` for more information. 11 | -------------------------------------------------------------------------------- /docs/arguments.rst: -------------------------------------------------------------------------------- 1 | .. _arguments: 2 | 3 | Arguments to ``waitress.serve`` 4 | ------------------------------- 5 | 6 | Here are the arguments you can pass to the ``waitress.serve`` function or use 7 | in :term:`PasteDeploy` configuration (interchangeably): 8 | 9 | host 10 | Hostname or IP address (string) on which to listen, default ``0.0.0.0``, 11 | which means "all IP addresses on this host". 12 | 13 | .. warning:: 14 | May not be used with ``listen`` 15 | 16 | port 17 | TCP port (integer) on which to listen, default ``8080`` 18 | 19 | .. warning:: 20 | May not be used with ``listen`` 21 | 22 | listen 23 | Tell waitress to listen on combinations of ``host:port`` arguments. 24 | Combinations should be a quoted, space-delimited list, as in the following examples. 25 | 26 | .. code-block:: python 27 | 28 | listen="127.0.0.1:8080 [::1]:8080" 29 | listen="*:8080 *:6543" 30 | 31 | A wildcard for the hostname is also supported and will bind to both 32 | IPv4/IPv6 depending on whether they are enabled or disabled. 33 | 34 | IPv6 IP addresses are supported by surrounding the IP address with brackets. 35 | 36 | .. versionadded:: 1.0 37 | 38 | server_name 39 | This is the value that will be placed in the WSGI environment as 40 | ``SERVER_NAME``, the only time that this value is used in the WSGI 41 | environment for a request is if the client sent a HTTP/1.0 request without 42 | a ``Host`` header set, and no other proxy headers. 43 | 44 | The default is value is ``waitress.invalid``, if your WSGI application is 45 | creating URL's that include this as the hostname and you are using a 46 | reverse proxy setup, you may want to validate that your reverse proxy is 47 | sending the appropriate headers. 48 | 49 | In most situations you will not need to set this value. 50 | 51 | Default: ``waitress.invalid`` 52 | 53 | .. versionadded:: 2.0 54 | 55 | ipv4 56 | Enable or disable IPv4 (boolean) 57 | 58 | ipv6 59 | Enable or disable IPv6 (boolean) 60 | 61 | unix_socket 62 | Path of Unix socket (string). If a socket path is specified, a Unix domain 63 | socket is made instead of the usual inet domain socket. 64 | 65 | Not available on Windows. 66 | 67 | Default: ``None`` 68 | 69 | unix_socket_perms 70 | Octal permissions to use for the Unix domain socket (string). 71 | Only used if ``unix_socket`` is not ``None``. 72 | 73 | Default: ``'600'`` 74 | 75 | sockets 76 | A list of sockets. The sockets can be either Internet or UNIX sockets and have 77 | to be bound. Internet and UNIX sockets cannot be mixed. 78 | If the socket list is not empty, waitress creates one server for each socket. 79 | 80 | Default: ``[]`` 81 | 82 | .. versionadded:: 1.1.1 83 | 84 | .. warning:: 85 | May not be used with ``listen``, ``host``, ``port`` or ``unix_socket`` 86 | 87 | threads 88 | The number of threads used to process application logic (integer). 89 | 90 | Default: ``4`` 91 | 92 | trusted_proxy 93 | IP address of a remote peer allowed to override various WSGI environment 94 | variables using proxy headers. 95 | 96 | For unix sockets, set this value to ``localhost`` instead of an IP address. 97 | 98 | The value ``*`` (wildcard) may be used to signify that all remote peers are 99 | to be trusted. 100 | 101 | .. warning:: 102 | Using the wildcard is a security issue if Waitress is receiving 103 | connections from untrusted locations as well as trusted locations. Make 104 | sure that waitress is adequately deployed behind an additional layer of 105 | security, such as a firewall only allowing traffic from known proxies. 106 | 107 | Default: ``None`` 108 | 109 | trusted_proxy_count 110 | How many proxies we trust when chained. For example, 111 | 112 | ``X-Forwarded-For: 192.0.2.1, "[2001:db8::1]"`` 113 | 114 | or 115 | 116 | ``Forwarded: for=192.0.2.1, For="[2001:db8::1]"`` 117 | 118 | means there were (potentially), two proxies involved. If we know there is 119 | only 1 valid proxy, then that initial IP address "192.0.2.1" is not trusted 120 | and we completely ignore it. 121 | 122 | If there are two trusted proxies in the path, this value should be set to 123 | 2. If there are more proxies, this value should be set higher. 124 | 125 | Default: ``1`` 126 | 127 | .. versionadded:: 1.2.0 128 | 129 | trusted_proxy_headers 130 | Which of the proxy headers should we trust, this is a set where you 131 | either specify "forwarded" or one or more of "x-forwarded-host", "x-forwarded-for", 132 | "x-forwarded-proto", "x-forwarded-port", "x-forwarded-by". 133 | 134 | This list of trusted headers is used when ``trusted_proxy`` is set and will 135 | allow waitress to modify the WSGI environment using the values provided by 136 | the proxy. 137 | 138 | .. versionadded:: 1.2.0 139 | 140 | .. warning:: 141 | If ``trusted_proxy`` is set, the default is ``x-forwarded-proto`` to 142 | match older versions of Waitress. Users should explicitly opt-in by 143 | selecting the headers to be trusted as future versions of waitress will 144 | use an empty default. 145 | 146 | .. warning:: 147 | It is an error to set this value without setting ``trusted_proxy``. 148 | 149 | log_untrusted_proxy_headers 150 | Should waitress log warning messages about proxy headers that are being 151 | sent from upstream that are not trusted by ``trusted_proxy_headers`` but 152 | are being cleared due to ``clear_untrusted_proxy_headers``? 153 | 154 | This may be useful for debugging if you expect your upstream proxy server 155 | to only send specific headers. 156 | 157 | Default: ``False`` 158 | 159 | .. versionadded:: 1.2.0 160 | 161 | .. warning:: 162 | It is a no-op to set this value without also setting 163 | ``clear_untrusted_proxy_headers`` and ``trusted_proxy`` 164 | 165 | clear_untrusted_proxy_headers 166 | This tells Waitress to remove any untrusted proxy headers ("Forwarded", 167 | "X-Forwared-For", "X-Forwarded-By", "X-Forwarded-Host", "X-Forwarded-Port", 168 | "X-Forwarded-Proto") not explicitly allowed by ``trusted_proxy_headers``. 169 | 170 | Default: ``True`` 171 | 172 | .. versionchanged:: 3.0.0 173 | In this version default value is set to ``True`` and deprecation warning 174 | doesn't show up anymore. 175 | 176 | .. versionadded:: 1.2.0 177 | 178 | .. warning:: 179 | The default value is set to ``False`` for backwards compatibility. In 180 | future versions of Waitress this default will be changed to ``True``. 181 | Warnings will be raised unless the user explicitly provides a value for 182 | this option, allowing the user to opt-in to the new safety features 183 | automatically. 184 | 185 | .. warning:: 186 | It is an error to set this value without setting ``trusted_proxy``. 187 | 188 | url_scheme 189 | The value of ``wsgi.url_scheme`` in the environ. This can be 190 | overridden per-request by the value of the ``X_FORWARDED_PROTO`` header, 191 | but only if the client address matches ``trusted_proxy``. 192 | 193 | Default: ``http`` 194 | 195 | ident 196 | Server identity (string) used in "Server:" header in responses. 197 | 198 | Default: ``waitress`` 199 | 200 | backlog 201 | The value waitress passes to pass to ``socket.listen()`` (integer). 202 | This is the maximum number of incoming TCP 203 | connections that will wait in an OS queue for an available channel. From 204 | listen(1): "If a connection request arrives when the queue is full, the 205 | client may receive an error with an indication of ECONNREFUSED or, if the 206 | underlying protocol supports retransmission, the request may be ignored 207 | so that a later reattempt at connection succeeds." 208 | 209 | Default: ``1024`` 210 | 211 | recv_bytes 212 | The argument waitress passes to ``socket.recv()`` (integer). 213 | 214 | Default: ``8192`` 215 | 216 | send_bytes 217 | The number of bytes to send to ``socket.send()`` (integer). 218 | Multiples of 9000 should avoid partly-filled TCP 219 | packets, but don't set this larger than the TCP write buffer size. In 220 | Linux, ``/proc/sys/net/ipv4/tcp_wmem`` controls the minimum, default, and 221 | maximum sizes of TCP write buffers. 222 | 223 | Default: ``1`` 224 | 225 | .. deprecated:: 1.3 226 | 227 | outbuf_overflow 228 | A tempfile should be created if the pending output is larger than 229 | outbuf_overflow, which is measured in bytes. The default is conservative. 230 | 231 | Default: ``1048576`` (1MB) 232 | 233 | outbuf_high_watermark 234 | The app_iter will pause when pending output is larger than this value 235 | and will resume once enough data is written to the socket to fall below 236 | this threshold. 237 | 238 | Default: ``16777216`` (16MB) 239 | 240 | inbuf_overflow 241 | A tempfile should be created if the pending input is larger than 242 | inbuf_overflow, which is measured in bytes. The default is conservative. 243 | 244 | Default: ``524288`` (512K) 245 | 246 | connection_limit 247 | Stop creating new channels if too many are already active (integer). 248 | Each channel consumes at least one file descriptor, 249 | and, depending on the input and output body sizes, potentially up to 250 | three, plus whatever file descriptors your application logic happens to 251 | open. The default is conservative, but you may need to increase the 252 | number of file descriptors available to the Waitress process on most 253 | platforms in order to safely change it (see ``ulimit -a`` "open files" 254 | setting). Note that this doesn't control the maximum number of TCP 255 | connections that can be waiting for processing; the ``backlog`` argument 256 | controls that. 257 | 258 | Default: ``100`` 259 | 260 | cleanup_interval 261 | Minimum seconds between cleaning up inactive channels (integer). 262 | See also ``channel_timeout``. 263 | 264 | Default: ``30`` 265 | 266 | channel_timeout 267 | Maximum seconds to leave an inactive connection open (integer). 268 | "Inactive" is defined as "has received no data from a client 269 | and has sent no data to a client". 270 | 271 | Default: ``120`` 272 | 273 | log_socket_errors 274 | Set to ``False`` to not log premature client disconnect tracebacks. 275 | 276 | Default: ``True`` 277 | 278 | max_request_header_size 279 | Maximum number of bytes of all request headers combined (integer). 280 | 281 | Default: ``262144`` (256K) 282 | 283 | max_request_body_size 284 | Maximum number of bytes in request body (integer). 285 | 286 | Default: ``1073741824`` (1GB) 287 | 288 | expose_tracebacks 289 | Set to ``True`` to expose tracebacks of unhandled exceptions to client. 290 | 291 | Default: ``False`` 292 | 293 | asyncore_loop_timeout 294 | The ``timeout`` value (seconds) passed to ``asyncore.loop`` to run the mainloop. 295 | 296 | Default: ``1`` 297 | 298 | .. versionadded:: 0.8.3 299 | 300 | asyncore_use_poll 301 | Set to ``True`` to switch from using ``select()`` to ``poll()`` in ``asyncore.loop``. 302 | By default ``asyncore.loop()`` uses ``select()`` which has a limit of 1024 file descriptors. 303 | ``select()`` and ``poll()`` provide basically the same functionality, but ``poll()`` doesn't have the file descriptors limit. 304 | 305 | Default: ``False`` 306 | 307 | .. versionadded:: 0.8.6 308 | 309 | url_prefix 310 | String: the value used as the WSGI ``SCRIPT_NAME`` value. Setting this to 311 | anything except the empty string will cause the WSGI ``SCRIPT_NAME`` value 312 | to be the value passed minus any trailing slashes you add, and it will 313 | cause the ``PATH_INFO`` of any request which is prefixed with this value to 314 | be stripped of the prefix. 315 | 316 | Default: ``''`` 317 | 318 | channel_request_lookahead 319 | Sets the amount of requests we can continue to read from the socket, while 320 | we are processing current requests. The default value won't allow any 321 | lookahead, increase it above ``0`` to enable. 322 | 323 | When enabled this inserts a callable ``waitress.client_disconnected`` into 324 | the environment that allows the task to check if the client disconnected 325 | while waiting for the response at strategic points in the execution and to 326 | cancel the operation. 327 | 328 | Default: ``0`` 329 | 330 | .. versionadded:: 2.0.0 331 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # waitress documentation build configuration file 4 | # 5 | # This file is execfile()d with the current directory set to its containing 6 | # dir. 7 | # 8 | # The contents of this file are pickled, so don't put values in the 9 | # namespace that aren't pickleable (module imports are okay, they're 10 | # removed automatically). 11 | # 12 | # All configuration values have a default value; values that are commented 13 | # out serve to show the default value. 14 | 15 | # If your extensions are in another directory, add it here. If the 16 | # directory is relative to the documentation root, use os.path.abspath to 17 | # make it absolute, like shown here. 18 | # sys.path.append(os.path.abspath('some/directory')) 19 | 20 | import datetime 21 | 22 | try: 23 | from importlib.metadata import version as metadata_version 24 | except ImportError: 25 | from importlib_metadata import version as metadata_version 26 | import pylons_sphinx_themes 27 | 28 | # General configuration 29 | # --------------------- 30 | 31 | # Add any Sphinx extension module names here, as strings. They can be 32 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 33 | extensions = [ 34 | "sphinx.ext.autodoc", 35 | "sphinx.ext.intersphinx", 36 | ] 37 | 38 | intersphinx_mapping = { 39 | "python": ("https://docs.python.org/3/", None), 40 | } 41 | 42 | # Add any paths that contain templates here, relative to this directory. 43 | templates_path = ["_templates"] 44 | 45 | # The suffix of source filenames. 46 | source_suffix = ".rst" 47 | 48 | # The main toctree document. 49 | master_doc = "index" 50 | 51 | # General substitutions. 52 | project = "waitress" 53 | thisyear = datetime.datetime.now().year 54 | copyright = "2012-%s, Agendaless Consulting " % thisyear 55 | 56 | # The default replacements for |version| and |release|, also used in various 57 | # other places throughout the built documents. 58 | # 59 | # The short X.Y version. 60 | version = metadata_version("waitress") 61 | # The full version, including alpha/beta/rc tags. 62 | release = version 63 | 64 | # There are two options for replacing |today|: either, you set today to 65 | # some non-false value, then it is used: 66 | # today = '' 67 | # Else, today_fmt is used as the format for a strftime call. 68 | today_fmt = "%B %d, %Y" 69 | 70 | # List of documents that shouldn't be included in the build. 71 | # unused_docs = [] 72 | 73 | # List of directories, relative to source directories, that shouldn't be 74 | # searched for source files. 75 | # exclude_dirs = [] 76 | exclude_patterns = [ 77 | "_themes/README.rst", 78 | ] 79 | 80 | # The reST default role (used for this markup: `text`) to use for all 81 | # documents. 82 | # default_role = None 83 | 84 | # If true, '()' will be appended to :func: etc. cross-reference text. 85 | # add_function_parentheses = True 86 | 87 | # If true, the current module name will be prepended to all description 88 | # unit titles (such as .. function::). 89 | # add_module_names = True 90 | add_module_names = False 91 | 92 | # If true, sectionauthor and moduleauthor directives will be shown in the 93 | # output. They are ignored by default. 94 | # show_authors = False 95 | 96 | # The name of the Pygments (syntax highlighting) style to use. 97 | pygments_style = "sphinx" 98 | 99 | # Do not use smart quotes. 100 | smartquotes = False 101 | 102 | 103 | # Options for HTML output 104 | # ----------------------- 105 | 106 | # Add and use Pylons theme 107 | html_theme = "pylons" 108 | html_theme_path = pylons_sphinx_themes.get_html_themes_path() 109 | html_theme_options = dict(github_url="https://github.com/Pylons/waitress") 110 | 111 | # The style sheet to use for HTML and HTML Help pages. A file of that name 112 | # must exist either in Sphinx' static/ path, or in one of the custom paths 113 | # given in html_static_path. 114 | # html_style = 'repoze.css' 115 | 116 | # The name for this set of Sphinx documents. If None, it defaults to 117 | # " v documentation". 118 | # html_title = None 119 | 120 | # A shorter title for the navigation bar. Default is the same as 121 | # html_title. 122 | # html_short_title = None 123 | 124 | # The name of an image file (within the static path) to place at the top of 125 | # the sidebar. 126 | # html_logo = '.static/logo_hi.gif' 127 | 128 | # The name of an image file (within the static path) to use as favicon of 129 | # the docs. This file should be a Windows icon file (.ico) being 16x16 or 130 | # 32x32 pixels large. 131 | # html_favicon = None 132 | 133 | # Add any paths that contain custom static files (such as style sheets) 134 | # here, relative to this directory. They are copied after the builtin 135 | # static files, so a file named "default.css" will overwrite the builtin 136 | # "default.css". 137 | # html_static_path = ['.static'] 138 | 139 | # If not '', a 'Last updated on:' timestamp is inserted at every page 140 | # bottom, using the given strftime format. 141 | html_last_updated_fmt = "%b %d, %Y" 142 | 143 | # If true, SmartyPants will be used to convert quotes and dashes to 144 | # typographically correct entities. 145 | # html_use_smartypants = True 146 | 147 | # Custom sidebar templates, maps document names to template names. 148 | # html_sidebars = {} 149 | 150 | # Additional templates that should be rendered to pages, maps page names to 151 | # template names. 152 | # html_additional_pages = {} 153 | 154 | # If false, no module index is generated. 155 | # html_use_modindex = True 156 | 157 | # If false, no index is generated. 158 | # html_use_index = True 159 | 160 | # If true, the index is split into individual pages for each letter. 161 | # html_split_index = False 162 | 163 | # If true, the reST sources are included in the HTML build as 164 | # _sources/. 165 | # html_copy_source = True 166 | 167 | # If true, an OpenSearch description file will be output, and all pages 168 | # will contain a tag referring to it. The value of this option must 169 | # be the base URL from which the finished HTML is served. 170 | # html_use_opensearch = '' 171 | 172 | # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). 173 | # html_file_suffix = '' 174 | 175 | # Output file base name for HTML help builder. 176 | htmlhelp_basename = "waitress" 177 | 178 | # Control display of sidebars 179 | html_sidebars = { 180 | "**": [ 181 | "localtoc.html", 182 | "ethicalads.html", 183 | "relations.html", 184 | "sourcelink.html", 185 | "searchbox.html", 186 | ] 187 | } 188 | 189 | # Options for LaTeX output 190 | # ------------------------ 191 | 192 | # The paper size ('letter' or 'a4'). 193 | # latex_paper_size = 'letter' 194 | 195 | # The font size ('10pt', '11pt' or '12pt'). 196 | # latex_font_size = '10pt' 197 | 198 | # Grouping the document tree into LaTeX files. List of tuples 199 | # (source start file, target name, title, 200 | # author, document class [howto/manual]). 201 | latex_documents = [ 202 | ( 203 | "index", 204 | "waitress.tex", 205 | "waitress Documentation", 206 | "Pylons Project Developers", 207 | "manual", 208 | ), 209 | ] 210 | 211 | # The name of an image file (relative to this directory) to place at the 212 | # top of the title page. 213 | # latex_logo = '.static/logo_hi.gif' 214 | 215 | # For "manual" documents, if this is true, then toplevel headings are 216 | # parts, not chapters. 217 | # latex_use_parts = False 218 | 219 | # Additional stuff for the LaTeX preamble. 220 | # latex_preamble = '' 221 | 222 | # Documents to append as an appendix to all manuals. 223 | # latex_appendices = [] 224 | 225 | # If false, no module index is generated. 226 | # latex_use_modindex = True 227 | -------------------------------------------------------------------------------- /docs/design.rst: -------------------------------------------------------------------------------- 1 | Design 2 | ------ 3 | 4 | Waitress uses a combination of asynchronous and synchronous code to do its job. 5 | It handles I/O to and from clients using the :term:`wasyncore`, which is :term:`asyncore` vendored into Waitress. 6 | It services requests via threads. 7 | 8 | .. note:: 9 | :term:`asyncore` has been deprecated since Python 3.6. 10 | Work continues on its inevitable removal from the Python standard library. 11 | Its recommended replacement is :mod:`asyncio`. 12 | 13 | Although :term:`asyncore` has been vendored into Waitress as :term:`wasyncore`, you may see references to "asyncore" in this documentation's code examples and API. 14 | The terms are effectively the same and may be used interchangeably. 15 | 16 | The :term:`wasyncore` module: 17 | 18 | - Uses the ``select.select`` function to wait for connections from clients 19 | and determine if a connected client is ready to receive output. 20 | 21 | - Creates a channel whenever a new connection is made to the server. 22 | 23 | - Executes methods of a channel whenever it believes data can be read from or 24 | written to the channel. 25 | 26 | A "channel" is created for each connection from a client to the server. The 27 | channel handles all requests over the same connection from that client. A 28 | channel will handle some number of requests during its lifetime: zero to how 29 | ever many HTTP requests are sent to the server by the client over a single 30 | connection. For example, an HTTP/1.1 client may issue a theoretically 31 | infinite number of requests over the same connection; each of these will be 32 | handled by the same channel. An HTTP/1.0 client without a "Connection: 33 | keep-alive" header will request usually only one over a single TCP 34 | connection, however, and when the request has completed, the client 35 | disconnects and reconnects (which will create another channel). When the 36 | connection related to a channel is closed, the channel is destroyed and 37 | garbage collected. 38 | 39 | When a channel determines the client has sent at least one full valid HTTP 40 | request, it schedules a "task" with a "thread dispatcher". The thread 41 | dispatcher maintains a fixed pool of worker threads available to do client 42 | work (by default, 4 threads). If a worker thread is available when a task is 43 | scheduled, the worker thread runs the task. The task has access to the 44 | channel, and can write back to the channel's output buffer. When all worker 45 | threads are in use, scheduled tasks will wait in a queue for a worker thread 46 | to become available. 47 | 48 | I/O is always done asynchronously (by :term:`wasyncore`) in the main thread. 49 | Worker threads never do any I/O. 50 | This means that 51 | 52 | #. a large number of clients can be connected to the server at once, and 53 | #. worker threads will never be hung up trying to send data to a slow client. 54 | 55 | No attempt is made to kill a "hung thread". It's assumed that when a task 56 | (application logic) starts that it will eventually complete. If for some 57 | reason WSGI application logic never completes and spins forever, the worker 58 | thread related to that WSGI application will be consumed "forever", and if 59 | enough worker threads are consumed like this, the server will stop responding 60 | entirely. 61 | 62 | Periodic maintenance is done by the main thread (the thread handling I/O). 63 | If a channel hasn't sent or received any data in a while, the channel's 64 | connection is closed, and the channel is destroyed. 65 | -------------------------------------------------------------------------------- /docs/differences.rst: -------------------------------------------------------------------------------- 1 | Differences from ``zope.server`` 2 | -------------------------------- 3 | 4 | - Has no non-stdlib dependencies. 5 | 6 | - No support for non-WSGI servers (no FTP, plain-HTTP, etc); refactorings and 7 | slight interface changes as a result. Non-WSGI-supporting code removed. 8 | 9 | - Slight cleanup in the way application response headers are handled (no more 10 | "accumulated headers"). 11 | 12 | - Supports the HTTP 1.1 "expect/continue" mechanism (required by WSGI spec). 13 | 14 | - Calls "close()" on the app_iter object returned by the WSGI application. 15 | 16 | - Allows trusted proxies to override ``wsgi.url_scheme`` for particular 17 | requests by supplying the ``X_FORWARDED_PROTO`` header. 18 | 19 | - Supports an explicit ``wsgi.url_scheme`` parameter for ease of deployment 20 | behind SSL proxies. 21 | 22 | - Different adjustment defaults (less conservative). 23 | 24 | - Python 3 compatible. 25 | 26 | - More test coverage (unit tests added, functional tests refactored and more 27 | added). 28 | 29 | - Supports convenience ``waitress.serve`` function (e.g. ``from waitress 30 | import serve; serve(app)`` and convenience ``server.run()`` function. 31 | 32 | - Returns a "real" write method from start_response. 33 | 34 | - Provides a getsockname method of the server FBO figuring out which port the 35 | server is listening on when it's bound to port 0. 36 | 37 | - Warns when app_iter bytestream numbytes less than or greater than specified 38 | Content-Length. 39 | 40 | - Set content-length header if len(app_iter) == 1 and none provided. 41 | 42 | - Raise an exception if start_response isnt called before any body write. 43 | 44 | - channel.write does not accept non-byte-sequences. 45 | 46 | - Put maintenance check on server rather than channel to avoid a class of 47 | DOS. 48 | 49 | - wsgi.multiprocess set (correctly) to False. 50 | 51 | - Ensures header total can not exceed a maximum size. 52 | 53 | - Ensures body total can not exceed a maximum size. 54 | 55 | - Broken chunked encoding request bodies don't crash the server. 56 | 57 | - Handles keepalive/pipelining properly (no out of order responses, no 58 | premature channel closes). 59 | 60 | - Send a 500 error to the client when a task raises an uncaught exception 61 | (with optional traceback rendering via "expose_traceback" adjustment). 62 | 63 | - Supports HTTP/1.1 chunked responses when application doesn't set a 64 | Content-Length header. 65 | 66 | - Dont hang a thread up trying to send data to slow clients. 67 | 68 | - Supports ``wsgi.file_wrapper`` protocol. 69 | -------------------------------------------------------------------------------- /docs/filewrapper.rst: -------------------------------------------------------------------------------- 1 | Support for ``wsgi.file_wrapper`` 2 | --------------------------------- 3 | 4 | Waitress supports the Python Web Server Gateway Interface v1.0 as specified in :pep:`3333`. Here's a usage example: 5 | 6 | .. code-block:: python 7 | 8 | import os 9 | 10 | here = os.path.dirname(os.path.abspath(__file__)) 11 | 12 | def myapp(environ, start_response): 13 | f = open(os.path.join(here, 'myphoto.jpg'), 'rb') 14 | headers = [('Content-Type', 'image/jpeg')] 15 | start_response( 16 | '200 OK', 17 | headers 18 | ) 19 | return environ['wsgi.file_wrapper'](f, 32768) 20 | 21 | The file wrapper constructor is accessed via 22 | ``environ['wsgi.file_wrapper']``. The signature of the file wrapper 23 | constructor is ``(filelike_object, block_size)``. Both arguments must be 24 | passed as positional (not keyword) arguments. The result of creating a file 25 | wrapper should be **returned** as the ``app_iter`` from a WSGI application. 26 | 27 | The object passed as ``filelike_object`` to the wrapper must be a file-like 28 | object which supports *at least* the ``read()`` method, and the ``read()`` 29 | method must support an optional size hint argument and the ``read()`` method 30 | *must* return **bytes** objects (never unicode). It *should* support the 31 | ``seek()`` and ``tell()`` methods. If it does not, normal iteration over the 32 | ``filelike_object`` using the provided ``block_size`` is used (and copying is 33 | done, negating any benefit of the file wrapper). It *should* support a 34 | ``close()`` method. 35 | 36 | The specified ``block_size`` argument to the file wrapper constructor will be 37 | used only when the ``filelike_object`` doesn't support ``seek`` and/or 38 | ``tell`` methods. Waitress needs to use normal iteration to serve the file 39 | in this degenerate case (as per the WSGI pec), and this block size will be 40 | used as the iteration chunk size. The ``block_size`` argument is optional; 41 | if it is not passed, a default value ``32768`` is used. 42 | 43 | Waitress will set a ``Content-Length`` header on behalf of an application 44 | when a file wrapper with a sufficiently file-like object is used if the 45 | application hasn't already set one. 46 | 47 | The machinery which handles a file wrapper currently doesn't do anything 48 | particularly special using fancy system calls (it doesn't use ``sendfile`` 49 | for example); using it currently just prevents the system from needing to 50 | copy data to a temporary buffer in order to send it to the client. No 51 | copying of data is done when a WSGI app returns a file wrapper that wraps a 52 | sufficiently file-like object. It may do something fancier in the future. 53 | -------------------------------------------------------------------------------- /docs/glossary.rst: -------------------------------------------------------------------------------- 1 | .. _glossary: 2 | 3 | Glossary 4 | ======== 5 | 6 | .. glossary:: 7 | :sorted: 8 | 9 | PasteDeploy 10 | A system for configuration of WSGI web components in declarative ``.ini`` format. 11 | See https://docs.pylonsproject.org/projects/pastedeploy/en/latest/. 12 | 13 | asyncore 14 | A Python standard library module for asynchronous communications. See :mod:`asyncore`. 15 | 16 | .. versionchanged:: 1.2.0 17 | Waitress has now "vendored" ``asyncore`` into itself as ``waitress.wasyncore``. 18 | This is to cope with the eventuality that ``asyncore`` will be removed from the Python standard library in Python 3.8 or so. 19 | 20 | middleware 21 | *Middleware* is a :term:`WSGI` concept. 22 | It is a WSGI component that acts both as a server and an application. 23 | Interesting uses for middleware exist, such as caching, content-transport encoding, and other functions. 24 | See `WSGI.org `_ or `PyPI `_ to find middleware for your application. 25 | 26 | WSGI 27 | `Web Server Gateway Interface `_. 28 | This is a Python standard for connecting web applications to web servers, similar to the concept of Java Servlets. 29 | Waitress requires that your application be served as a WSGI application. 30 | 31 | wasyncore 32 | .. versionchanged:: 1.2.0 33 | Waitress has now "vendored" :term:`asyncore` into itself as ``waitress.wasyncore``. 34 | This is to cope with the eventuality that ``asyncore`` will be removed from the Python standard library in Python 3.8 or so. 35 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. _index: 2 | 3 | ======== 4 | Waitress 5 | ======== 6 | 7 | Waitress is meant to be a production-quality pure-Python WSGI server with very 8 | acceptable performance. It has no dependencies except ones which live in the 9 | Python standard library. It runs on CPython on Unix and Windows under Python 10 | 3.9+. It is also known to run on PyPy 3 (Python version 3.9+) on UNIX. It 11 | supports HTTP/1.0 and HTTP/1.1. 12 | 13 | 14 | Extended Documentation 15 | ---------------------- 16 | 17 | .. toctree:: 18 | :maxdepth: 1 19 | 20 | usage 21 | logging 22 | reverse-proxy 23 | design 24 | differences 25 | api 26 | arguments 27 | filewrapper 28 | runner 29 | socket-activation 30 | glossary 31 | 32 | Change History 33 | -------------- 34 | 35 | .. include:: ../CHANGES.txt 36 | .. include:: ../HISTORY.txt 37 | 38 | Known Issues 39 | ------------ 40 | 41 | - Does not support TLS natively. See :ref:`using-behind-a-reverse-proxy` for more information. 42 | 43 | Support and Development 44 | ----------------------- 45 | 46 | The `Pylons Project web site `_ is the main online 47 | source of Waitress support and development information. 48 | 49 | To report bugs, use the `issue tracker 50 | `_. 51 | 52 | If you've got questions that aren't answered by this documentation, 53 | contact the `Pylons-discuss maillist 54 | `_ or join the `#pyramid 55 | IRC channel `_. 56 | 57 | Browse and check out tagged and trunk versions of Waitress via 58 | the `Waitress GitHub repository `_. 59 | To check out the trunk via ``git``, use this command: 60 | 61 | .. code-block:: text 62 | 63 | git clone git@github.com:Pylons/waitress.git 64 | 65 | To find out how to become a contributor to Waitress, please see the guidelines in `contributing.md `_ and `How to Contribute Source Code and Documentation `_. 66 | 67 | Why? 68 | ---- 69 | 70 | At the time of the release of Waitress, there are already many pure-Python 71 | WSGI servers. Why would we need another? 72 | 73 | Waitress is meant to be useful to web framework authors who require broad 74 | platform support. It's neither the fastest nor the fanciest WSGI server 75 | available but using it helps eliminate the N-by-M documentation burden 76 | (e.g. production vs. deployment, Windows vs. Unix, Python 3 vs. Python 2, 77 | PyPy vs. CPython) and resulting user confusion imposed by spotty platform 78 | support of the current (2012-ish) crop of WSGI servers. For example, 79 | ``gunicorn`` is great, but doesn't run on Windows. ``paste.httpserver`` is 80 | perfectly serviceable, but doesn't run under Python 3 and has no dedicated 81 | tests suite that would allow someone who did a Python 3 port to know it 82 | worked after a port was completed. ``wsgiref`` works fine under most any 83 | Python, but it's a little slow and it's not recommended for production use as 84 | it's single-threaded and has not been audited for security issues. 85 | 86 | At the time of this writing, some existing WSGI servers already claim wide 87 | platform support and have serviceable test suites. The CherryPy WSGI server, 88 | for example, targets Python 2 and Python 3 and it can run on UNIX or Windows. 89 | However, it is not distributed separately from its eponymous web framework, 90 | and requiring a non-CherryPy web framework to depend on the CherryPy web 91 | framework distribution simply for its server component is awkward. The test 92 | suite of the CherryPy server also depends on the CherryPy web framework, so 93 | even if we forked its server component into a separate distribution, we would 94 | have still needed to backfill for all of its tests. The CherryPy team has 95 | started work on `Cheroot `_, which 96 | should solve this problem, however. 97 | 98 | Waitress is a fork of the WSGI-related components which existed in 99 | ``zope.server``. ``zope.server`` had passable framework-independent test 100 | coverage out of the box, and a good bit more coverage was added during the 101 | fork. ``zope.server`` has existed in one form or another since about 2001, 102 | and has seen production usage since then, so Waitress is not exactly 103 | "another" server, it's more a repackaging of an old one that was already 104 | known to work fairly well. 105 | -------------------------------------------------------------------------------- /docs/logging.rst: -------------------------------------------------------------------------------- 1 | .. _access-logging: 2 | 3 | ============== 4 | Access Logging 5 | ============== 6 | 7 | The WSGI design is modular. Waitress logs error conditions, debugging 8 | output, etc., but not web traffic. For web traffic logging, Paste 9 | provides `TransLogger 10 | `_ 11 | :term:`middleware`. TransLogger produces logs in the `Apache Combined 12 | Log Format `_. 13 | 14 | 15 | .. _logging-to-the-console-using-python: 16 | 17 | Logging to the Console Using Python 18 | ----------------------------------- 19 | 20 | ``waitress.serve`` calls ``logging.basicConfig()`` to set up logging to the 21 | console when the server starts up. Assuming no other logging configuration 22 | has already been done, this sets the logging default level to 23 | ``logging.WARNING``. The Waitress logger will inherit the root logger's 24 | level information (it logs at level ``WARNING`` or above). 25 | 26 | Waitress sends its logging output (including application exception 27 | renderings) to the Python logger object named ``waitress``. You can 28 | influence the logger level and output stream using the normal Python 29 | ``logging`` module API. For example: 30 | 31 | .. code-block:: python 32 | 33 | import logging 34 | logger = logging.getLogger('waitress') 35 | logger.setLevel(logging.INFO) 36 | 37 | Within a PasteDeploy configuration file, you can use the normal Python 38 | ``logging`` module ``.ini`` file format to change similar Waitress logging 39 | options. For example: 40 | 41 | .. code-block:: ini 42 | 43 | [logger_waitress] 44 | level = INFO 45 | 46 | 47 | .. _logging-to-the-console-using-pastedeploy: 48 | 49 | Logging to the Console Using PasteDeploy 50 | ---------------------------------------- 51 | 52 | TransLogger will automatically setup a logging handler to the console when called with no arguments. 53 | It "just works" in environments that don't configure logging. 54 | This is by virtue of its default configuration setting of ``setup_console_handler = True``. 55 | 56 | 57 | .. TODO: 58 | .. .. _logging-to-a-file-using-python: 59 | 60 | .. Logging to a File Using Python 61 | .. ------------------------------ 62 | 63 | .. Show how to configure the WSGI logger via python. 64 | 65 | 66 | .. _logging-to-a-file-using-pastedeploy: 67 | 68 | Logging to a File Using PasteDeploy 69 | ------------------------------------ 70 | 71 | TransLogger does not write to files, and the Python logging system 72 | must be configured to do this. The Python class :class:`FileHandler` 73 | logging handler can be used alongside TransLogger to create an 74 | ``access.log`` file similar to Apache's. 75 | 76 | Like any standard :term:`middleware` with a Paste entry point, 77 | TransLogger can be configured to wrap your application using ``.ini`` 78 | file syntax. First add a 79 | ``[filter:translogger]`` section, then use a ``[pipeline:main]`` 80 | section file to form a WSGI pipeline with both the translogger and 81 | your application in it. For instance, if you have this: 82 | 83 | .. code-block:: ini 84 | 85 | [app:wsgiapp] 86 | use = egg:mypackage#wsgiapp 87 | 88 | [server:main] 89 | use = egg:waitress#main 90 | host = 127.0.0.1 91 | port = 8080 92 | 93 | Add this: 94 | 95 | .. code-block:: ini 96 | 97 | [filter:translogger] 98 | use = egg:Paste#translogger 99 | setup_console_handler = False 100 | 101 | [pipeline:main] 102 | pipeline = translogger 103 | wsgiapp 104 | 105 | Using PasteDeploy this way to form and serve a pipeline is equivalent to 106 | wrapping your app in a TransLogger instance via the bottom of the ``main`` 107 | function of your project's ``__init__`` file: 108 | 109 | .. code-block:: python 110 | 111 | from mypackage import wsgiapp 112 | from waitress import serve 113 | from paste.translogger import TransLogger 114 | serve(TransLogger(wsgiapp, setup_console_handler=False)) 115 | 116 | .. note:: 117 | TransLogger will automatically set up a logging handler to the console when 118 | called with no arguments, so it "just works" in environments that don't 119 | configure logging. Since our logging handlers are configured, we disable 120 | the automation via ``setup_console_handler = False``. 121 | 122 | With the filter in place, TransLogger's logger (named the ``wsgi`` logger) will 123 | propagate its log messages to the parent logger (the root logger), sending 124 | its output to the console when we request a page: 125 | 126 | .. code-block:: text 127 | 128 | 00:50:53,694 INFO [wsgiapp] Returning: Hello World! 129 | (content-type: text/plain) 130 | 00:50:53,695 INFO [wsgi] 192.168.1.111 - - [11/Aug/2011:20:09:33 -0700] "GET /hello 131 | HTTP/1.1" 404 - "-" 132 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en-US; rv:1.8.1.6) Gecko/20070725 133 | Firefox/2.0.0.6" 134 | 135 | To direct TransLogger to an ``access.log`` FileHandler, we need the 136 | following to add a FileHandler (named ``accesslog``) to the list of 137 | handlers, and ensure that the ``wsgi`` logger is configured and uses 138 | this handler accordingly: 139 | 140 | .. code-block:: ini 141 | 142 | # Begin logging configuration 143 | 144 | [loggers] 145 | keys = root, wsgiapp, wsgi 146 | 147 | [handlers] 148 | keys = console, accesslog 149 | 150 | [logger_wsgi] 151 | level = INFO 152 | handlers = accesslog 153 | qualname = wsgi 154 | propagate = 0 155 | 156 | [handler_accesslog] 157 | class = FileHandler 158 | args = ('%(here)s/access.log','a') 159 | level = INFO 160 | formatter = generic 161 | 162 | As mentioned above, non-root loggers by default propagate their log records 163 | to the root logger's handlers (currently the console handler). Setting 164 | ``propagate`` to ``0`` (``False``) here disables this; so the ``wsgi`` logger 165 | directs its records only to the ``accesslog`` handler. 166 | 167 | Finally, there's no need to use the ``generic`` formatter with 168 | TransLogger, as TransLogger itself provides all the information we 169 | need. We'll use a formatter that passes-through the log messages as 170 | is. Add a new formatter called ``accesslog`` by including the 171 | following in your configuration file: 172 | 173 | .. code-block:: ini 174 | 175 | [formatters] 176 | keys = generic, accesslog 177 | 178 | [formatter_accesslog] 179 | format = %(message)s 180 | 181 | Finally alter the existing configuration to wire this new 182 | ``accesslog`` formatter into the FileHandler: 183 | 184 | .. code-block:: ini 185 | 186 | [handler_accesslog] 187 | class = FileHandler 188 | args = ('%(here)s/access.log','a') 189 | level = INFO 190 | formatter = accesslog 191 | -------------------------------------------------------------------------------- /docs/rebuild: -------------------------------------------------------------------------------- 1 | make clean html SPHINXBUILD=../env26/bin/sphinx-build 2 | 3 | -------------------------------------------------------------------------------- /docs/reverse-proxy.rst: -------------------------------------------------------------------------------- 1 | .. index:: reverse, proxy, TLS, SSL, https 2 | 3 | .. _using-behind-a-reverse-proxy: 4 | 5 | ============================ 6 | Using Behind a Reverse Proxy 7 | ============================ 8 | 9 | Often people will set up "pure Python" web servers behind reverse proxies, 10 | especially if they need TLS support (Waitress does not natively support TLS). 11 | Even if you don't need TLS support, it's not uncommon to see Waitress and 12 | other pure-Python web servers set up to only handle requests behind a reverse proxy; 13 | these proxies often have lots of useful deployment knobs. 14 | 15 | If you're using Waitress behind a reverse proxy, you'll almost always want 16 | your reverse proxy to pass along the ``Host`` header sent by the client to 17 | Waitress, in either case, as it will be used by most applications to generate 18 | correct URLs. You may also use the proxy headers if passing ``Host`` directly 19 | is not possible, or there are multiple proxies involved. 20 | 21 | For example, when using nginx as a reverse proxy, you might add the following 22 | lines in a ``location`` section. 23 | 24 | .. code-block:: nginx 25 | 26 | proxy_set_header Host $host; 27 | 28 | The Apache directive named ``ProxyPreserveHost`` does something similar when 29 | used as a reverse proxy. 30 | 31 | Unfortunately, even if you pass the ``Host`` header, the Host header does not 32 | contain enough information to regenerate the original URL sent by the client. 33 | For example, if your reverse proxy accepts HTTPS requests (and therefore URLs 34 | which start with ``https://``), the URLs generated by your application when 35 | used behind a reverse proxy served by Waitress might inappropriately be 36 | ``http://foo`` rather than ``https://foo``. To fix this, you'll want to 37 | change the ``wsgi.url_scheme`` in the WSGI environment before it reaches your 38 | application. You can do this in one of three ways: 39 | 40 | 1. You can pass a ``url_scheme`` configuration variable to the 41 | ``waitress.serve`` function. 42 | 43 | 2. You can pass certain well known proxy headers from your proxy server and 44 | use waitress's ``trusted_proxy`` support to automatically configure the 45 | WSGI environment. 46 | 47 | Using ``url_scheme`` to set ``wsgi.url_scheme`` 48 | ----------------------------------------------- 49 | 50 | You can have the Waitress server use the ``https`` url scheme by default.: 51 | 52 | .. code-block:: python 53 | 54 | from waitress import serve 55 | serve(wsgiapp, listen='0.0.0.0:8080', url_scheme='https') 56 | 57 | This works if all URLs generated by your application should use the ``https`` 58 | scheme. 59 | 60 | Passing the proxy headers to setup the WSGI environment 61 | ------------------------------------------------------- 62 | 63 | If your proxy accepts both HTTP and HTTPS URLs, and you want your application 64 | to generate the appropriate url based on the incoming scheme, you'll want to 65 | pass waitress ``X-Forwarded-Proto``, however Waitress is also able to update 66 | the environment using ``X-Forwarded-Proto``, ``X-Forwarded-For``, 67 | ``X-Forwarded-Host``, and ``X-Forwarded-Port``:: 68 | 69 | proxy_set_header X-Forwarded-Proto $scheme; 70 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 71 | proxy_set_header X-Forwarded-Host $host:$server_port; 72 | proxy_set_header X-Forwarded-Port $server_port; 73 | 74 | when using Apache, ``mod_proxy`` automatically forwards the following headers:: 75 | 76 | X-Forwarded-For 77 | X-Forwarded-Host 78 | X-Forwarded-Server 79 | 80 | You will also want to add to Apache:: 81 | 82 | RequestHeader set X-Forwarded-Proto https 83 | 84 | Configure waitress's ``trusted_proxy_headers`` as appropriate:: 85 | 86 | trusted_proxy_headers = "x-forwarded-for x-forwarded-host x-forwarded-proto x-forwarded-port" 87 | 88 | At this point waitress will set up the WSGI environment using the information 89 | specified in the trusted proxy headers. This will setup the following 90 | variables:: 91 | 92 | HTTP_HOST 93 | SERVER_NAME 94 | SERVER_PORT 95 | REMOTE_ADDR 96 | REMOTE_PORT (if available) 97 | wsgi.url_scheme 98 | 99 | Waitress also has support for the `Forwarded (RFC7239) HTTP header 100 | `_ which is better defined than the ad-hoc 101 | ``X-Forwarded-*``, however support is not nearly as widespread yet. 102 | ``Forwarded`` supports similar functionality as the different individual 103 | headers, and is mutually exclusive to using the ``X-Forwarded-*`` headers. 104 | 105 | To configure waitress to use the ``Forwarded`` header, set:: 106 | 107 | trusted_proxy_headers = "forwarded" 108 | 109 | .. note:: 110 | 111 | You must also configure the Waitress server's ``trusted_proxy`` to 112 | contain the IP address of the proxy. 113 | 114 | 115 | Using ``url_prefix`` to influence ``SCRIPT_NAME`` and ``PATH_INFO`` 116 | ------------------------------------------------------------------- 117 | 118 | You can have the Waitress server use a particular url prefix by default for all 119 | URLs generated by downstream applications that take ``SCRIPT_NAME`` into 120 | account.: 121 | 122 | .. code-block:: python 123 | 124 | from waitress import serve 125 | serve(wsgiapp, listen='0.0.0.0:8080', url_prefix='/foo') 126 | 127 | Setting this to any value except the empty string will cause the WSGI 128 | ``SCRIPT_NAME`` value to be that value, minus any trailing slashes you add, and 129 | it will cause the ``PATH_INFO`` of any request which is prefixed with this 130 | value to be stripped of the prefix. This is useful in proxying scenarios where 131 | you wish to forward all traffic to a Waitress server but need URLs generated by 132 | downstream applications to be prefixed with a particular path segment. 133 | -------------------------------------------------------------------------------- /docs/runner.rst: -------------------------------------------------------------------------------- 1 | .. _runner: 2 | 3 | waitress-serve 4 | -------------- 5 | 6 | .. versionadded:: 0.8.4 7 | 8 | Waitress comes bundled with a thin command-line wrapper around the ``waitress.serve`` function called ``waitress-serve``. 9 | This is useful for development, and in production situations where serving of static assets is delegated to a reverse proxy, such as nginx or Apache. 10 | 11 | ``waitress-serve`` takes the very same :ref:`arguments ` as the 12 | ``waitress.serve`` function, but where the function's arguments have 13 | underscores, ``waitress-serve`` uses hyphens. Thus:: 14 | 15 | import myapp 16 | 17 | waitress.serve(myapp.wsgifunc, port=8041, url_scheme='https') 18 | 19 | Is equivalent to:: 20 | 21 | waitress-serve --port=8041 --url-scheme=https myapp:wsgifunc 22 | 23 | Or: 24 | 25 | waitress-serve --port=8041 --url-scheme=https --app=myapp:wsgifunc 26 | 27 | The full argument list is :ref:`given below `. 28 | 29 | Boolean arguments are represented by flags. If you wish to explicitly set a 30 | flag, simply use it by its name. Thus the flag:: 31 | 32 | --expose-tracebacks 33 | 34 | Is equivalent to passing ``expose_tracebacks=True`` to ``waitress.serve``. 35 | 36 | All flags have a negative equivalent. These are prefixed with ``no-``; thus 37 | using the flag:: 38 | 39 | --no-expose-tracebacks 40 | 41 | Is equivalent to passing ``expose_tracebacks=False`` to ``waitress.serve``. 42 | 43 | If at any time you want the full argument list, use the ``--help`` flag. 44 | 45 | Applications are specified similarly to PasteDeploy, where the format is 46 | ``myapp.mymodule:wsgifunc``. As some application frameworks use application 47 | objects, you can use dots to resolve attributes like so: 48 | ``myapp.mymodule:appobj.wsgifunc``. 49 | 50 | A number of frameworks, *web.py* being an example, have factory methods on 51 | their application objects that return usable WSGI functions when called. For 52 | cases like these, ``waitress-serve`` has the ``--call`` flag. Thus:: 53 | 54 | waitress-serve --call myapp.mymodule.app.wsgi_factory 55 | 56 | Would load the ``myapp.mymodule`` module, and call ``app.wsgi_factory`` to get 57 | a WSGI application function to be passed to ``waitress.server``. 58 | 59 | .. note:: 60 | 61 | As of 0.8.6, the current directory is automatically included on 62 | ``sys.path``. 63 | 64 | .. _invocation: 65 | 66 | Invocation 67 | ~~~~~~~~~~ 68 | 69 | Usage:: 70 | 71 | waitress-serve [OPTS] [MODULE:OBJECT] 72 | 73 | Common options: 74 | 75 | ``--help`` 76 | Show this information. 77 | 78 | ``--app=MODULE:OBJECT`` 79 | Run the given callable object the WSGI application. 80 | 81 | You can specify the WSGI application using this flag or as a positional 82 | argument. 83 | 84 | ``--call`` 85 | Call the given object to get the WSGI application. 86 | 87 | ``--host=ADDR`` 88 | Hostname or IP address on which to listen, default is '0.0.0.0', 89 | which means "all IP addresses on this host". 90 | 91 | ``--port=PORT`` 92 | TCP port on which to listen, default is '8080' 93 | 94 | ``--listen=host:port`` 95 | Tell waitress to listen on an ip port combination. 96 | 97 | Example: 98 | 99 | --listen=127.0.0.1:8080 100 | --listen=[::1]:8080 101 | --listen=*:8080 102 | 103 | This option may be used multiple times to listen on multiple sockets. 104 | A wildcard for the hostname is also supported and will bind to both 105 | IPv4/IPv6 depending on whether they are enabled or disabled. 106 | 107 | ``--server-name=NAME`` 108 | This is the value that will be placed in the WSGI environment as 109 | ``SERVER_NAME``, the only time that this value is used in the WSGI 110 | environment for a request is if the client sent a HTTP/1.0 request without 111 | a ``Host`` header set, and no other proxy headers. 112 | 113 | The default is value is ``waitress.invalid``, if your WSGI application is 114 | creating URL's that include this as the hostname and you are using a 115 | reverse proxy setup, you may want to validate that your reverse proxy is 116 | sending the appropriate headers. 117 | 118 | In most situations you will not need to set this value. 119 | 120 | ``--[no-]ipv4`` 121 | Toggle on/off IPv4 support. 122 | 123 | This affects wildcard matching when listening on a wildcard address/port 124 | combination. 125 | 126 | ``--[no-]ipv6`` 127 | Toggle on/off IPv6 support. 128 | 129 | This affects wildcard matching when listening on a wildcard address/port 130 | combination. 131 | 132 | ``--unix-socket=PATH`` 133 | Path of Unix socket. If a socket path is specified, a Unix domain 134 | socket is made instead of the usual inet domain socket. 135 | 136 | Not available on Windows. 137 | 138 | ``--unix-socket-perms=PERMS`` 139 | Octal permissions to use for the Unix domain socket, default is 140 | '600'. 141 | 142 | ``--url-scheme=STR`` 143 | Default ``wsgi.url_scheme`` value, default is 'http'. 144 | 145 | ``--url-prefix=STR`` 146 | The ``SCRIPT_NAME`` WSGI environment value. Setting this to anything 147 | except the empty string will cause the WSGI ``SCRIPT_NAME`` value to be the 148 | value passed minus any trailing slashes you add, and it will cause the 149 | ``PATH_INFO`` of any request which is prefixed with this value to be 150 | stripped of the prefix. Default is the empty string. 151 | 152 | ``--ident=STR`` 153 | Server identity used in the 'Server' header in responses. Default 154 | is 'waitress'. 155 | 156 | ``--trusted-proxy=IP`` 157 | IP address of a remote peer allowed to override various WSGI environment 158 | variables using proxy headers. 159 | 160 | For unix sockets, set this value to ``localhost`` instead of an IP address. 161 | 162 | The value ``*`` (wildcard) may be used to signify that all remote peers are 163 | to be trusted. 164 | 165 | ``--trusted-proxy-count=INT`` 166 | How many proxies we trust when chained. For example, 167 | 168 | ``X-Forwarded-For: 192.0.2.1, "[2001:db8::1]"`` 169 | 170 | or 171 | 172 | ``Forwarded: for=192.0.2.1, For="[2001:db8::1]"`` 173 | 174 | means there were (potentially), two proxies involved. If we know there is 175 | only 1 valid proxy, then that initial IP address "192.0.2.1" is not trusted 176 | and we completely ignore it. 177 | 178 | If there are two trusted proxies in the path, this value should be set to 179 | 2. If there are more proxies, this value should be set higher. 180 | 181 | Default: ``1`` 182 | 183 | ``--trusted-proxy-headers=LIST`` 184 | Which of the proxy headers should we trust, this is a set where you 185 | either specify "forwarded" or one or more of "x-forwarded-host", "x-forwarded-for", 186 | "x-forwarded-proto", "x-forwarded-port", "x-forwarded-by". 187 | 188 | This list of trusted headers is used when ``trusted_proxy`` is set and will 189 | allow waitress to modify the WSGI environment using the values provided by 190 | the proxy. 191 | 192 | .. warning:: 193 | It is an error to set this value without setting ``--trusted-proxy``. 194 | 195 | .. warning:: 196 | If ``--trusted-proxy`` is set, the default is ``x-forwarded-proto`` to 197 | match older versions of Waitress. Users should explicitly opt-in by 198 | selecting the headers to be trusted as future versions of waitress will 199 | use an empty default. 200 | 201 | ``--[no-]log-untrusted-proxy-headers`` 202 | Should waitress log warning messages about proxy headers that are being 203 | sent from upstream that are not trusted by ``--trusted-proxy-headers`` but 204 | are being cleared due to ``--clear-untrusted-proxy-headers``? 205 | 206 | This may be useful for debugging if you expect your upstream proxy server 207 | to only send specific headers. 208 | 209 | .. warning:: 210 | It is a no-op to set this value without also setting 211 | ``--clear-untrusted-proxy-headers`` and ``--trusted-proxy`` 212 | 213 | ``--[no-]clear-untrusted-proxy-headers`` 214 | This tells Waitress to remove any untrusted proxy headers ("Forwarded", 215 | "X-Forwared-For", "X-Forwarded-By", "X-Forwarded-Host", "X-Forwarded-Port", 216 | "X-Forwarded-Proto") not explicitly allowed by ``--trusted-proxy-headers``. 217 | 218 | This is active by default. 219 | 220 | .. warning:: 221 | It is an error to set this value without setting ``--trusted-proxy``. 222 | 223 | Tuning options: 224 | 225 | ``--threads=INT`` 226 | Number of threads used to process application logic, default is 4. 227 | 228 | ``--backlog=INT`` 229 | Connection backlog for the server. Default is 1024. 230 | 231 | ``--recv-bytes=INT`` 232 | Number of bytes to request when calling ``socket.recv()``. Default is 233 | 8192. 234 | 235 | ``--send-bytes=INT`` 236 | Number of bytes to send to socket.send(). Default is 1. 237 | Multiples of 9000 should avoid partly-filled TCP packets. 238 | 239 | .. deprecated:: 1.3 240 | 241 | ``--outbuf-overflow=INT`` 242 | A temporary file should be created if the pending output is larger than 243 | this. Default is 1048576 (1MB). 244 | 245 | ``--outbuf-high-watermark=INT`` 246 | The app_iter will pause when pending output is larger than this value 247 | and will resume once enough data is written to the socket to fall below 248 | this threshold. Default is 16777216 (16MB). 249 | 250 | ``--inbuf-overflow=INT`` 251 | A temporary file should be created if the pending input is larger than 252 | this. Default is 524288 (512KB). 253 | 254 | ``--connection-limit=INT`` 255 | Stop creating new channels if too many are already active. Default is 256 | 100. 257 | 258 | ``--cleanup-interval=INT`` 259 | Minimum seconds between cleaning up inactive channels. Default is 30. See 260 | ``--channel-timeout``. 261 | 262 | ``--channel-timeout=INT`` 263 | Maximum number of seconds to leave inactive connections open. Default is 264 | 120. 'Inactive' is defined as 'has received no data from the client and has 265 | sent no data to the client'. 266 | 267 | ``--channel-request-lookahead=INT`` 268 | Sets the amount of requests we can continue to read from the socket, while 269 | we are processing current requests. The default value won't allow any 270 | lookahead, increase it above ``0`` to enable. 271 | 272 | When enabled this inserts a callable ``waitress.client_disconnected`` into 273 | the environment that allows the task to check if the client disconnected 274 | while waiting for the response at strategic points in the execution and to 275 | cancel the operation. 276 | 277 | Default: ``0`` 278 | 279 | ``--[no-]log-socket-errors`` 280 | Toggle whether premature client disconnect tracebacks ought to be logged. 281 | On by default. 282 | 283 | ``--max-request-header-size=INT`` 284 | Maximum size of all request headers combined. Default is 262144 (256KB). 285 | 286 | ``--max-request-body-size=INT`` 287 | Maximum size of request body. Default is 1073741824 (1GB). 288 | 289 | ``--[no-]expose-tracebacks`` 290 | Toggle whether to expose tracebacks of unhandled exceptions to the client. 291 | Off by default. 292 | 293 | ``--asyncore-loop-timeout=INT`` 294 | The timeout value in seconds passed to ``asyncore.loop()``. Default is 1. 295 | 296 | ``--asyncore-use-poll`` 297 | The use_poll argument passed to ``asyncore.loop()``. Helps overcome open 298 | file descriptors limit. Default is False. 299 | -------------------------------------------------------------------------------- /docs/socket-activation.rst: -------------------------------------------------------------------------------- 1 | Socket Activation 2 | ----------------- 3 | 4 | While waitress does not support the various implementations of socket activation, 5 | for example using systemd or launchd, it is prepared to receive pre-bound sockets 6 | from init systems, process and socket managers, or other launchers that can provide 7 | pre-bound sockets. 8 | 9 | The following shows a code example starting waitress with two pre-bound Internet sockets. 10 | 11 | .. code-block:: python 12 | 13 | import socket 14 | import waitress 15 | 16 | 17 | def app(environ, start_response): 18 | content_length = environ.get('CONTENT_LENGTH', None) 19 | if content_length is not None: 20 | content_length = int(content_length) 21 | body = environ['wsgi.input'].read(content_length) 22 | content_length = str(len(body)) 23 | start_response( 24 | '200 OK', 25 | [('Content-Length', content_length), ('Content-Type', 'text/plain')] 26 | ) 27 | return [body] 28 | 29 | 30 | if __name__ == '__main__': 31 | sockets = [ 32 | socket.socket(socket.AF_INET, socket.SOCK_STREAM), 33 | socket.socket(socket.AF_INET, socket.SOCK_STREAM)] 34 | sockets[0].bind(('127.0.0.1', 8080)) 35 | sockets[1].bind(('127.0.0.1', 9090)) 36 | waitress.serve(app, sockets=sockets) 37 | for socket in sockets: 38 | socket.close() 39 | 40 | Generally, to implement socket activation for a given init system, a wrapper 41 | script uses the init system specific libraries to retrieve the sockets from 42 | the init system. Afterwards it starts waitress, passing the sockets with the parameter 43 | ``sockets``. Note that the sockets have to be bound, which all init systems 44 | supporting socket activation do. 45 | 46 | -------------------------------------------------------------------------------- /docs/usage.rst: -------------------------------------------------------------------------------- 1 | .. _usage: 2 | 3 | ===== 4 | Usage 5 | ===== 6 | 7 | The following code will run waitress on port 8080 on all available IP addresses, both IPv4 and IPv6. 8 | 9 | .. code-block:: python 10 | 11 | from waitress import serve 12 | serve(wsgiapp, listen='*:8080') 13 | 14 | Press :kbd:`Ctrl-C` (or :kbd:`Ctrl-Break` on Windows) to exit the server. 15 | 16 | The following will run waitress on port 8080 on all available IPv4 addresses, but not IPv6. 17 | 18 | .. code-block:: python 19 | 20 | from waitress import serve 21 | serve(wsgiapp, host='0.0.0.0', port=8080) 22 | 23 | By default Waitress binds to any IPv4 address on port 8080. 24 | You can omit the ``host`` and ``port`` arguments and just call ``serve`` with the WSGI app as a single argument: 25 | 26 | .. code-block:: python 27 | 28 | from waitress import serve 29 | serve(wsgiapp) 30 | 31 | If you want to serve your application through a UNIX domain socket (to serve a downstream HTTP server/proxy such as nginx, lighttpd, and so on), call ``serve`` with the ``unix_socket`` argument: 32 | 33 | .. code-block:: python 34 | 35 | from waitress import serve 36 | serve(wsgiapp, unix_socket='/path/to/unix.sock') 37 | 38 | Needless to say, this configuration won't work on Windows. 39 | 40 | Exceptions generated by your application will be shown on the console by 41 | default. See :ref:`access-logging` to change this. 42 | 43 | There's an entry point for :term:`PasteDeploy` (``egg:waitress#main``) that 44 | lets you use Waitress's WSGI gateway from a configuration file, e.g.: 45 | 46 | .. code-block:: ini 47 | 48 | [server:main] 49 | use = egg:waitress#main 50 | listen = 127.0.0.1:8080 51 | 52 | Using ``host`` and ``port`` is also supported: 53 | 54 | .. code-block:: ini 55 | 56 | [server:main] 57 | host = 127.0.0.1 58 | port = 8080 59 | 60 | The :term:`PasteDeploy` syntax for UNIX domain sockets is analogous: 61 | 62 | .. code-block:: ini 63 | 64 | [server:main] 65 | use = egg:waitress#main 66 | unix_socket = /path/to/unix.sock 67 | 68 | You can find more settings to tweak (arguments to ``waitress.serve`` or 69 | equivalent settings in PasteDeploy) in :ref:`arguments`. 70 | 71 | Additionally, there is a command line runner called ``waitress-serve``, which 72 | can be used in development and in situations where the likes of 73 | :term:`PasteDeploy` is not necessary: 74 | 75 | .. code-block:: bash 76 | 77 | # Listen on both IPv4 and IPv6 on port 8041 78 | waitress-serve --listen=*:8041 myapp:wsgifunc 79 | 80 | # Listen on only IPv4 on port 8041 81 | waitress-serve --port=8041 myapp:wsgifunc 82 | 83 | Heroku 84 | ------ 85 | 86 | Waitress can be used to serve WSGI apps on Heroku, include waitress in your requirements.txt file and update the Procfile as following: 87 | 88 | .. code-block:: bash 89 | 90 | web: waitress-serve \ 91 | --listen "*:$PORT" \ 92 | --trusted-proxy '*' \ 93 | --trusted-proxy-headers 'x-forwarded-for x-forwarded-proto x-forwarded-port' \ 94 | --log-untrusted-proxy-headers \ 95 | --clear-untrusted-proxy-headers \ 96 | --threads ${WEB_CONCURRENCY:-4} \ 97 | myapp:wsgifunc 98 | 99 | The proxy config informs Waitress to trust the `forwarding headers `_ set by the Heroku load balancer. 100 | It also allows for setting the standard ``WEB_CONCURRENCY`` environment variable to tweak the number of requests handled by Waitress at a time. 101 | 102 | Note that Waitress uses a thread-based model and careful effort should be taken to ensure that requests do not take longer than 30 seconds or Heroku will inform the client that the request failed even though the request is still being processed by Waitress and occupying a thread until it completes. 103 | 104 | For more information on this, see :ref:`runner`. 105 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools >= 41"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [tool.black] 6 | target-version = ['py39', 'py310', 'py311', 'py312', 'py313'] 7 | exclude = ''' 8 | /( 9 | \.git 10 | | .tox 11 | )/ 12 | ''' 13 | 14 | # This next section only exists for people that have their editors 15 | # automatically call isort, black already sorts entries on its own when run. 16 | [tool.isort] 17 | profile = "black" 18 | multi_line_output = 3 19 | src_paths = ["src", "tests"] 20 | skip_glob = ["docs/*"] 21 | include_trailing_comma = true 22 | force_grid_wrap = false 23 | combine_as_imports = true 24 | line_length = 88 25 | force_sort_within_sections = true 26 | default_section = "THIRDPARTY" 27 | known_first_party = "waitress" 28 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = waitress 3 | version = 3.0.2 4 | description = Waitress WSGI server 5 | long_description = file: README.rst, CHANGES.txt 6 | long_description_content_type = text/x-rst 7 | keywords = waitress wsgi server http 8 | license = ZPL 2.1 9 | classifiers = 10 | Development Status :: 6 - Mature 11 | Environment :: Web Environment 12 | Intended Audience :: Developers 13 | License :: OSI Approved :: Zope Public License 14 | Programming Language :: Python 15 | Programming Language :: Python :: 3 16 | Programming Language :: Python :: 3.9 17 | Programming Language :: Python :: 3.10 18 | Programming Language :: Python :: 3.11 19 | Programming Language :: Python :: 3.12 20 | Programming Language :: Python :: 3.13 21 | Programming Language :: Python :: Implementation :: CPython 22 | Programming Language :: Python :: Implementation :: PyPy 23 | Operating System :: OS Independent 24 | Topic :: Internet :: WWW/HTTP 25 | Topic :: Internet :: WWW/HTTP :: WSGI 26 | url = https://github.com/Pylons/waitress 27 | project_urls = 28 | Documentation = https://docs.pylonsproject.org/projects/waitress/en/latest/index.html 29 | Changelog = https://docs.pylonsproject.org/projects/waitress/en/latest/index.html#change-history 30 | Issue Tracker = https://github.com/Pylons/waitress/issues 31 | 32 | author = Zope Foundation and Contributors 33 | author_email = zope-dev@zope.org 34 | maintainer = Pylons Project 35 | maintainer_email = pylons-discuss@googlegroups.com 36 | 37 | [options] 38 | package_dir= 39 | =src 40 | packages = find: 41 | python_requires = >=3.9.0 42 | 43 | [options.entry_points] 44 | paste.server_runner = 45 | main = waitress:serve_paste 46 | console_scripts = 47 | waitress-serve = waitress.runner:run 48 | 49 | [options.packages.find] 50 | where = src 51 | 52 | [options.extras_require] 53 | testing = 54 | pytest 55 | pytest-cov 56 | coverage>=7.6.0 57 | 58 | docs = 59 | Sphinx>=1.8.1 60 | docutils 61 | pylons-sphinx-themes>=1.0.9 62 | 63 | [tool:pytest] 64 | python_files = test_*.py 65 | # For the benefit of test_wasyncore.py 66 | python_classes = Test* 67 | testpaths = 68 | tests 69 | addopts = --cov -W always 70 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup() 4 | -------------------------------------------------------------------------------- /src/waitress/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from waitress.server import create_server 4 | 5 | 6 | def serve(app, **kw): 7 | _server = kw.pop("_server", create_server) # test shim 8 | _quiet = kw.pop("_quiet", False) # test shim 9 | _profile = kw.pop("_profile", False) # test shim 10 | if not _quiet: # pragma: no cover 11 | # idempotent if logging has already been set up 12 | logging.basicConfig() 13 | server = _server(app, **kw) 14 | if not _quiet: # pragma: no cover 15 | server.print_listen("Serving on http://{}:{}") 16 | if _profile: # pragma: no cover 17 | profile("server.run()", globals(), locals(), (), False) 18 | else: 19 | server.run() 20 | 21 | 22 | def serve_paste(app, global_conf, **kw): 23 | serve(app, **kw) 24 | return 0 25 | 26 | 27 | def profile(cmd, globals, locals, sort_order, callers): # pragma: no cover 28 | # runs a command under the profiler and print profiling output at shutdown 29 | import os 30 | import profile 31 | import pstats 32 | import tempfile 33 | 34 | fd, fn = tempfile.mkstemp() 35 | try: 36 | profile.runctx(cmd, globals, locals, fn) 37 | stats = pstats.Stats(fn) 38 | stats.strip_dirs() 39 | # calls,time,cumulative and cumulative,calls,time are useful 40 | stats.sort_stats(*(sort_order or ("cumulative", "calls", "time"))) 41 | if callers: 42 | stats.print_callers(0.3) 43 | else: 44 | stats.print_stats(0.3) 45 | finally: 46 | os.remove(fn) 47 | -------------------------------------------------------------------------------- /src/waitress/__main__.py: -------------------------------------------------------------------------------- 1 | from waitress.runner import run # pragma nocover 2 | 3 | run() # pragma nocover 4 | -------------------------------------------------------------------------------- /src/waitress/buffers.py: -------------------------------------------------------------------------------- 1 | ############################################################################## 2 | # 3 | # Copyright (c) 2001-2004 Zope Foundation and Contributors. 4 | # All Rights Reserved. 5 | # 6 | # This software is subject to the provisions of the Zope Public License, 7 | # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. 8 | # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED 9 | # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 10 | # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS 11 | # FOR A PARTICULAR PURPOSE. 12 | # 13 | ############################################################################## 14 | """Buffers 15 | """ 16 | from io import BytesIO 17 | 18 | # copy_bytes controls the size of temp. strings for shuffling data around. 19 | COPY_BYTES = 1 << 18 # 256K 20 | 21 | # The maximum number of bytes to buffer in a simple string. 22 | STRBUF_LIMIT = 8192 23 | 24 | 25 | class FileBasedBuffer: 26 | remain = 0 27 | 28 | def __init__(self, file, from_buffer=None): 29 | self.file = file 30 | if from_buffer is not None: 31 | from_file = from_buffer.getfile() 32 | read_pos = from_file.tell() 33 | from_file.seek(0) 34 | while True: 35 | data = from_file.read(COPY_BYTES) 36 | if not data: 37 | break 38 | file.write(data) 39 | self.remain = int(file.tell() - read_pos) 40 | from_file.seek(read_pos) 41 | file.seek(read_pos) 42 | 43 | def __len__(self): 44 | return self.remain 45 | 46 | def __bool__(self): 47 | return True 48 | 49 | def append(self, s): 50 | file = self.file 51 | read_pos = file.tell() 52 | file.seek(0, 2) 53 | file.write(s) 54 | file.seek(read_pos) 55 | self.remain = self.remain + len(s) 56 | 57 | def get(self, numbytes=-1, skip=False): 58 | file = self.file 59 | if not skip: 60 | read_pos = file.tell() 61 | if numbytes < 0: 62 | # Read all 63 | res = file.read() 64 | else: 65 | res = file.read(numbytes) 66 | if skip: 67 | self.remain -= len(res) 68 | else: 69 | file.seek(read_pos) 70 | return res 71 | 72 | def skip(self, numbytes, allow_prune=0): 73 | if self.remain < numbytes: 74 | raise ValueError( 75 | "Can't skip %d bytes in buffer of %d bytes" % (numbytes, self.remain) 76 | ) 77 | self.file.seek(numbytes, 1) 78 | self.remain = self.remain - numbytes 79 | 80 | def newfile(self): 81 | raise NotImplementedError() 82 | 83 | def prune(self): 84 | file = self.file 85 | if self.remain == 0: 86 | read_pos = file.tell() 87 | file.seek(0, 2) 88 | sz = file.tell() 89 | file.seek(read_pos) 90 | if sz == 0: 91 | # Nothing to prune. 92 | return 93 | nf = self.newfile() 94 | while True: 95 | data = file.read(COPY_BYTES) 96 | if not data: 97 | break 98 | nf.write(data) 99 | self.file = nf 100 | 101 | def getfile(self): 102 | return self.file 103 | 104 | def close(self): 105 | if hasattr(self.file, "close"): 106 | self.file.close() 107 | self.remain = 0 108 | 109 | 110 | class TempfileBasedBuffer(FileBasedBuffer): 111 | def __init__(self, from_buffer=None): 112 | FileBasedBuffer.__init__(self, self.newfile(), from_buffer) 113 | 114 | def newfile(self): 115 | from tempfile import TemporaryFile 116 | 117 | return TemporaryFile("w+b") 118 | 119 | 120 | class BytesIOBasedBuffer(FileBasedBuffer): 121 | def __init__(self, from_buffer=None): 122 | if from_buffer is not None: 123 | FileBasedBuffer.__init__(self, BytesIO(), from_buffer) 124 | else: 125 | # Shortcut. :-) 126 | self.file = BytesIO() 127 | 128 | def newfile(self): 129 | return BytesIO() 130 | 131 | 132 | def _is_seekable(fp): 133 | if hasattr(fp, "seekable"): 134 | return fp.seekable() 135 | return hasattr(fp, "seek") and hasattr(fp, "tell") 136 | 137 | 138 | class ReadOnlyFileBasedBuffer(FileBasedBuffer): 139 | # used as wsgi.file_wrapper 140 | 141 | def __init__(self, file, block_size=32768): 142 | self.file = file 143 | self.block_size = block_size # for __iter__ 144 | 145 | # This is for the benefit of anyone that is attempting to wrap this 146 | # wsgi.file_wrapper in a WSGI middleware and wants to seek, this is 147 | # useful for instance for support Range requests 148 | if _is_seekable(self.file): 149 | if hasattr(self.file, "seekable"): 150 | self.seekable = self.file.seekable 151 | 152 | self.seek = self.file.seek 153 | self.tell = self.file.tell 154 | 155 | def prepare(self, size=None): 156 | if _is_seekable(self.file): 157 | start_pos = self.file.tell() 158 | self.file.seek(0, 2) 159 | end_pos = self.file.tell() 160 | self.file.seek(start_pos) 161 | fsize = end_pos - start_pos 162 | if size is None: 163 | self.remain = fsize 164 | else: 165 | self.remain = min(fsize, size) 166 | return self.remain 167 | 168 | def get(self, numbytes=-1, skip=False): 169 | # never read more than self.remain (it can be user-specified) 170 | if numbytes == -1 or numbytes > self.remain: 171 | numbytes = self.remain 172 | file = self.file 173 | if not skip: 174 | read_pos = file.tell() 175 | res = file.read(numbytes) 176 | if skip: 177 | self.remain -= len(res) 178 | else: 179 | file.seek(read_pos) 180 | return res 181 | 182 | def __iter__(self): # called by task if self.filelike has no seek/tell 183 | return self 184 | 185 | def next(self): 186 | val = self.file.read(self.block_size) 187 | if not val: 188 | raise StopIteration 189 | return val 190 | 191 | __next__ = next # py3 192 | 193 | def append(self, s): 194 | raise NotImplementedError 195 | 196 | 197 | class OverflowableBuffer: 198 | """ 199 | This buffer implementation has four stages: 200 | - No data 201 | - Bytes-based buffer 202 | - BytesIO-based buffer 203 | - Temporary file storage 204 | The first two stages are fastest for simple transfers. 205 | """ 206 | 207 | overflowed = False 208 | buf = None 209 | strbuf = b"" # Bytes-based buffer. 210 | 211 | def __init__(self, overflow): 212 | # overflow is the maximum to be stored in a StringIO buffer. 213 | self.overflow = overflow 214 | 215 | def __len__(self): 216 | buf = self.buf 217 | if buf is not None: 218 | # use buf.__len__ rather than len(buf) FBO of not getting 219 | # OverflowError on Python 2 220 | return buf.__len__() 221 | else: 222 | return self.strbuf.__len__() 223 | 224 | def __bool__(self): 225 | # use self.__len__ rather than len(self) FBO of not getting 226 | # OverflowError on Python 2 227 | return self.__len__() > 0 228 | 229 | def _create_buffer(self): 230 | strbuf = self.strbuf 231 | if len(strbuf) >= self.overflow: 232 | self._set_large_buffer() 233 | else: 234 | self._set_small_buffer() 235 | buf = self.buf 236 | if strbuf: 237 | buf.append(self.strbuf) 238 | self.strbuf = b"" 239 | return buf 240 | 241 | def _set_small_buffer(self): 242 | oldbuf = self.buf 243 | self.buf = BytesIOBasedBuffer(oldbuf) 244 | 245 | # Attempt to close the old buffer 246 | if hasattr(oldbuf, "close"): 247 | oldbuf.close() 248 | 249 | self.overflowed = False 250 | 251 | def _set_large_buffer(self): 252 | oldbuf = self.buf 253 | self.buf = TempfileBasedBuffer(oldbuf) 254 | 255 | # Attempt to close the old buffer 256 | if hasattr(oldbuf, "close"): 257 | oldbuf.close() 258 | 259 | self.overflowed = True 260 | 261 | def append(self, s): 262 | buf = self.buf 263 | if buf is None: 264 | strbuf = self.strbuf 265 | if len(strbuf) + len(s) < STRBUF_LIMIT: 266 | self.strbuf = strbuf + s 267 | return 268 | buf = self._create_buffer() 269 | buf.append(s) 270 | # use buf.__len__ rather than len(buf) FBO of not getting 271 | # OverflowError on Python 2 272 | sz = buf.__len__() 273 | if not self.overflowed: 274 | if sz >= self.overflow: 275 | self._set_large_buffer() 276 | 277 | def get(self, numbytes=-1, skip=False): 278 | buf = self.buf 279 | if buf is None: 280 | strbuf = self.strbuf 281 | if not skip: 282 | return strbuf 283 | buf = self._create_buffer() 284 | return buf.get(numbytes, skip) 285 | 286 | def skip(self, numbytes, allow_prune=False): 287 | buf = self.buf 288 | if buf is None: 289 | if allow_prune and numbytes == len(self.strbuf): 290 | # We could slice instead of converting to 291 | # a buffer, but that would eat up memory in 292 | # large transfers. 293 | self.strbuf = b"" 294 | return 295 | buf = self._create_buffer() 296 | buf.skip(numbytes, allow_prune) 297 | 298 | def prune(self): 299 | """ 300 | A potentially expensive operation that removes all data 301 | already retrieved from the buffer. 302 | """ 303 | buf = self.buf 304 | if buf is None: 305 | self.strbuf = b"" 306 | return 307 | buf.prune() 308 | if self.overflowed: 309 | # use buf.__len__ rather than len(buf) FBO of not getting 310 | # OverflowError on Python 2 311 | sz = buf.__len__() 312 | if sz < self.overflow: 313 | # Revert to a faster buffer. 314 | self._set_small_buffer() 315 | 316 | def getfile(self): 317 | buf = self.buf 318 | if buf is None: 319 | buf = self._create_buffer() 320 | return buf.getfile() 321 | 322 | def close(self): 323 | buf = self.buf 324 | if buf is not None: 325 | buf.close() 326 | -------------------------------------------------------------------------------- /src/waitress/compat.py: -------------------------------------------------------------------------------- 1 | import platform 2 | 3 | # Fix for issue reported in https://github.com/Pylons/waitress/issues/138, 4 | # Python on Windows may not define IPPROTO_IPV6 in socket. 5 | import socket 6 | import sys 7 | import warnings 8 | 9 | # True if we are running on Windows 10 | WIN = platform.system() == "Windows" 11 | 12 | MAXINT = sys.maxsize 13 | HAS_IPV6 = socket.has_ipv6 14 | 15 | if hasattr(socket, "IPPROTO_IPV6") and hasattr(socket, "IPV6_V6ONLY"): 16 | IPPROTO_IPV6 = socket.IPPROTO_IPV6 17 | IPV6_V6ONLY = socket.IPV6_V6ONLY 18 | else: # pragma: no cover 19 | if WIN: 20 | IPPROTO_IPV6 = 41 21 | IPV6_V6ONLY = 27 22 | else: 23 | warnings.warn( 24 | "OS does not support required IPv6 socket flags. This is requirement " 25 | "for Waitress. Please open an issue at https://github.com/Pylons/waitress. " 26 | "IPv6 support has been disabled.", 27 | RuntimeWarning, 28 | ) 29 | HAS_IPV6 = False 30 | -------------------------------------------------------------------------------- /src/waitress/proxy_headers.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | 3 | from .utilities import BadRequest, logger, undquote 4 | 5 | PROXY_HEADERS = frozenset( 6 | { 7 | "X_FORWARDED_FOR", 8 | "X_FORWARDED_HOST", 9 | "X_FORWARDED_PROTO", 10 | "X_FORWARDED_PORT", 11 | "X_FORWARDED_BY", 12 | "FORWARDED", 13 | } 14 | ) 15 | 16 | Forwarded = namedtuple("Forwarded", ["by", "for_", "host", "proto"]) 17 | 18 | 19 | class MalformedProxyHeader(Exception): 20 | def __init__(self, header, reason, value): 21 | self.header = header 22 | self.reason = reason 23 | self.value = value 24 | super().__init__(header, reason, value) 25 | 26 | 27 | def proxy_headers_middleware( 28 | app, 29 | trusted_proxy=None, 30 | trusted_proxy_count=1, 31 | trusted_proxy_headers=None, 32 | clear_untrusted=True, 33 | log_untrusted=False, 34 | logger=logger, 35 | ): 36 | def translate_proxy_headers(environ, start_response): 37 | untrusted_headers = PROXY_HEADERS 38 | remote_peer = environ["REMOTE_ADDR"] 39 | if trusted_proxy == "*" or remote_peer == trusted_proxy: 40 | try: 41 | untrusted_headers = parse_proxy_headers( 42 | environ, 43 | trusted_proxy_count=trusted_proxy_count, 44 | trusted_proxy_headers=trusted_proxy_headers, 45 | logger=logger, 46 | ) 47 | except MalformedProxyHeader as ex: 48 | logger.warning( 49 | 'Malformed proxy header "%s" from "%s": %s value: %s', 50 | ex.header, 51 | remote_peer, 52 | ex.reason, 53 | ex.value, 54 | ) 55 | error = BadRequest(f'Header "{ex.header}" malformed.') 56 | return error.wsgi_response(environ, start_response) 57 | 58 | # Clear out the untrusted proxy headers 59 | if clear_untrusted: 60 | clear_untrusted_headers( 61 | environ, untrusted_headers, log_warning=log_untrusted, logger=logger 62 | ) 63 | 64 | return app(environ, start_response) 65 | 66 | return translate_proxy_headers 67 | 68 | 69 | def parse_proxy_headers( 70 | environ, trusted_proxy_count, trusted_proxy_headers, logger=logger 71 | ): 72 | if trusted_proxy_headers is None: 73 | trusted_proxy_headers = set() 74 | 75 | forwarded_for = [] 76 | forwarded_host = forwarded_proto = forwarded_port = forwarded = "" 77 | client_addr = None 78 | untrusted_headers = set(PROXY_HEADERS) 79 | 80 | def raise_for_multiple_values(): 81 | raise ValueError("Unspecified behavior for multiple values found in header") 82 | 83 | if "x-forwarded-for" in trusted_proxy_headers and "HTTP_X_FORWARDED_FOR" in environ: 84 | try: 85 | forwarded_for = [] 86 | 87 | raw_forwarded_for = environ["HTTP_X_FORWARDED_FOR"].split(",") 88 | for forward_hop in raw_forwarded_for: 89 | forward_hop = forward_hop.strip() 90 | forward_hop = undquote(forward_hop) 91 | 92 | # Make sure that all IPv6 addresses are surrounded by brackets, 93 | # this is assuming that the IPv6 representation here does not 94 | # include a port number. 95 | 96 | if "." not in forward_hop and ( 97 | ":" in forward_hop and forward_hop[-1] != "]" 98 | ): 99 | forwarded_for.append(f"[{forward_hop}]") 100 | else: 101 | forwarded_for.append(forward_hop) 102 | 103 | forwarded_for = forwarded_for[-trusted_proxy_count:] 104 | client_addr = forwarded_for[0] 105 | 106 | untrusted_headers.remove("X_FORWARDED_FOR") 107 | environ["HTTP_X_FORWARDED_FOR"] = ",".join( 108 | raw_forwarded_for[-trusted_proxy_count:] 109 | ).strip() 110 | except Exception as ex: 111 | raise MalformedProxyHeader( 112 | "X-Forwarded-For", str(ex), environ["HTTP_X_FORWARDED_FOR"] 113 | ) 114 | 115 | if ( 116 | "x-forwarded-host" in trusted_proxy_headers 117 | and "HTTP_X_FORWARDED_HOST" in environ 118 | ): 119 | try: 120 | forwarded_host_multiple = [] 121 | 122 | raw_forwarded_host = environ["HTTP_X_FORWARDED_HOST"].split(",") 123 | for forward_host in raw_forwarded_host: 124 | forward_host = forward_host.strip() 125 | forward_host = undquote(forward_host) 126 | forwarded_host_multiple.append(forward_host) 127 | 128 | forwarded_host_multiple = forwarded_host_multiple[-trusted_proxy_count:] 129 | forwarded_host = forwarded_host_multiple[0] 130 | 131 | untrusted_headers.remove("X_FORWARDED_HOST") 132 | environ["HTTP_X_FORWARDED_HOST"] = ",".join( 133 | raw_forwarded_host[-trusted_proxy_count:] 134 | ).strip() 135 | except Exception as ex: 136 | raise MalformedProxyHeader( 137 | "X-Forwarded-Host", str(ex), environ["HTTP_X_FORWARDED_HOST"] 138 | ) 139 | 140 | if "x-forwarded-proto" in trusted_proxy_headers: 141 | try: 142 | forwarded_proto = undquote(environ.get("HTTP_X_FORWARDED_PROTO", "")) 143 | if "," in forwarded_proto: 144 | raise_for_multiple_values() 145 | untrusted_headers.remove("X_FORWARDED_PROTO") 146 | except Exception as ex: 147 | raise MalformedProxyHeader( 148 | "X-Forwarded-Proto", str(ex), environ["HTTP_X_FORWARDED_PROTO"] 149 | ) 150 | 151 | if "x-forwarded-port" in trusted_proxy_headers: 152 | try: 153 | forwarded_port = undquote(environ.get("HTTP_X_FORWARDED_PORT", "")) 154 | if "," in forwarded_port: 155 | raise_for_multiple_values() 156 | untrusted_headers.remove("X_FORWARDED_PORT") 157 | except Exception as ex: 158 | raise MalformedProxyHeader( 159 | "X-Forwarded-Port", str(ex), environ["HTTP_X_FORWARDED_PORT"] 160 | ) 161 | 162 | if "x-forwarded-by" in trusted_proxy_headers: 163 | # Waitress itself does not use X-Forwarded-By, but we can not 164 | # remove it so it can get set in the environ 165 | untrusted_headers.remove("X_FORWARDED_BY") 166 | 167 | if "forwarded" in trusted_proxy_headers: 168 | forwarded = environ.get("HTTP_FORWARDED", None) 169 | untrusted_headers = PROXY_HEADERS - {"FORWARDED"} 170 | 171 | # If the Forwarded header exists, it gets priority 172 | if forwarded: 173 | proxies = [] 174 | raw_forwarded = forwarded.split(",") 175 | try: 176 | for forwarded_element in raw_forwarded: 177 | # Remove whitespace that may have been introduced when 178 | # appending a new entry 179 | forwarded_element = forwarded_element.strip() 180 | 181 | forwarded_for = forwarded_host = forwarded_proto = "" 182 | forwarded_port = forwarded_by = "" 183 | 184 | for pair in forwarded_element.split(";"): 185 | pair = pair.lower() 186 | 187 | if not pair: 188 | continue 189 | 190 | token, equals, value = pair.partition("=") 191 | 192 | if equals != "=": 193 | raise ValueError('Invalid forwarded-pair missing "="') 194 | 195 | if token.strip() != token: 196 | raise ValueError("Token may not be surrounded by whitespace") 197 | 198 | if value.strip() != value: 199 | raise ValueError("Value may not be surrounded by whitespace") 200 | 201 | if token == "by": 202 | forwarded_by = undquote(value) 203 | 204 | elif token == "for": 205 | forwarded_for = undquote(value) 206 | 207 | elif token == "host": 208 | forwarded_host = undquote(value) 209 | 210 | elif token == "proto": 211 | forwarded_proto = undquote(value) 212 | 213 | else: 214 | logger.warning("Unknown Forwarded token: %s" % token) 215 | 216 | proxies.append( 217 | Forwarded( 218 | forwarded_by, forwarded_for, forwarded_host, forwarded_proto 219 | ) 220 | ) 221 | except Exception as ex: 222 | raise MalformedProxyHeader("Forwarded", str(ex), environ["HTTP_FORWARDED"]) 223 | 224 | proxies = proxies[-trusted_proxy_count:] 225 | environ["HTTP_FORWARDED"] = ",".join( 226 | raw_forwarded[-trusted_proxy_count:] 227 | ).strip() 228 | 229 | # Iterate backwards and fill in some values, the oldest entry that 230 | # contains the information we expect is the one we use. We expect 231 | # that intermediate proxies may re-write the host header or proto, 232 | # but the oldest entry is the one that contains the information the 233 | # client expects when generating URL's 234 | # 235 | # Forwarded: for="[2001:db8::1]";host="example.com:8443";proto="https" 236 | # Forwarded: for=192.0.2.1;host="example.internal:8080" 237 | # 238 | # (After HTTPS header folding) should mean that we use as values: 239 | # 240 | # Host: example.com 241 | # Protocol: https 242 | # Port: 8443 243 | 244 | for proxy in proxies[::-1]: 245 | client_addr = proxy.for_ or client_addr 246 | forwarded_host = proxy.host or forwarded_host 247 | forwarded_proto = proxy.proto or forwarded_proto 248 | 249 | if forwarded_proto: 250 | forwarded_proto = forwarded_proto.lower() 251 | 252 | if forwarded_proto not in {"http", "https"}: 253 | raise MalformedProxyHeader( 254 | "Forwarded Proto=" if forwarded else "X-Forwarded-Proto", 255 | "unsupported proto value", 256 | forwarded_proto, 257 | ) 258 | 259 | # Set the URL scheme to the proxy provided proto 260 | environ["wsgi.url_scheme"] = forwarded_proto 261 | 262 | if not forwarded_port: 263 | if forwarded_proto == "http": 264 | forwarded_port = "80" 265 | 266 | if forwarded_proto == "https": 267 | forwarded_port = "443" 268 | 269 | if forwarded_host: 270 | if ":" in forwarded_host and forwarded_host[-1] != "]": 271 | host, port = forwarded_host.rsplit(":", 1) 272 | host, port = host.strip(), str(port) 273 | 274 | # We trust the port in the Forwarded Host/X-Forwarded-Host over 275 | # X-Forwarded-Port, or whatever we got from Forwarded 276 | # Proto/X-Forwarded-Proto. 277 | 278 | if forwarded_port != port: 279 | forwarded_port = port 280 | 281 | # We trust the proxy server's forwarded Host 282 | environ["SERVER_NAME"] = host 283 | environ["HTTP_HOST"] = forwarded_host 284 | else: 285 | # We trust the proxy server's forwarded Host 286 | environ["SERVER_NAME"] = forwarded_host 287 | environ["HTTP_HOST"] = forwarded_host 288 | 289 | if forwarded_port: 290 | if forwarded_port not in {"443", "80"}: 291 | environ["HTTP_HOST"] = "{}:{}".format( 292 | forwarded_host, forwarded_port 293 | ) 294 | elif forwarded_port == "80" and environ["wsgi.url_scheme"] != "http": 295 | environ["HTTP_HOST"] = "{}:{}".format( 296 | forwarded_host, forwarded_port 297 | ) 298 | elif forwarded_port == "443" and environ["wsgi.url_scheme"] != "https": 299 | environ["HTTP_HOST"] = "{}:{}".format( 300 | forwarded_host, forwarded_port 301 | ) 302 | 303 | if forwarded_port: 304 | environ["SERVER_PORT"] = str(forwarded_port) 305 | 306 | if client_addr: 307 | if ":" in client_addr and client_addr[-1] != "]": 308 | addr, port = client_addr.rsplit(":", 1) 309 | environ["REMOTE_ADDR"] = strip_brackets(addr.strip()) 310 | environ["REMOTE_PORT"] = port.strip() 311 | else: 312 | environ["REMOTE_ADDR"] = strip_brackets(client_addr.strip()) 313 | environ["REMOTE_HOST"] = environ["REMOTE_ADDR"] 314 | 315 | return untrusted_headers 316 | 317 | 318 | def strip_brackets(addr): 319 | if addr[0] == "[" and addr[-1] == "]": 320 | return addr[1:-1] 321 | return addr 322 | 323 | 324 | def clear_untrusted_headers( 325 | environ, untrusted_headers, log_warning=False, logger=logger 326 | ): 327 | untrusted_headers_removed = [ 328 | header 329 | for header in untrusted_headers 330 | if environ.pop("HTTP_" + header, False) is not False 331 | ] 332 | 333 | if log_warning and untrusted_headers_removed: 334 | untrusted_headers_removed = [ 335 | "-".join(x.capitalize() for x in header.split("_")) 336 | for header in untrusted_headers_removed 337 | ] 338 | logger.warning( 339 | "Removed untrusted headers (%s). Waitress recommends these be " 340 | "removed upstream.", 341 | ", ".join(untrusted_headers_removed), 342 | ) 343 | -------------------------------------------------------------------------------- /src/waitress/receiver.py: -------------------------------------------------------------------------------- 1 | ############################################################################## 2 | # 3 | # Copyright (c) 2001, 2002 Zope Foundation and Contributors. 4 | # All Rights Reserved. 5 | # 6 | # This software is subject to the provisions of the Zope Public License, 7 | # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. 8 | # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED 9 | # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 10 | # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS 11 | # FOR A PARTICULAR PURPOSE. 12 | # 13 | ############################################################################## 14 | """Data Chunk Receiver 15 | """ 16 | 17 | from waitress.rfc7230 import CHUNK_EXT_RE, ONLY_HEXDIG_RE 18 | from waitress.utilities import BadRequest, find_double_newline 19 | 20 | 21 | class FixedStreamReceiver: 22 | # See IStreamConsumer 23 | completed = False 24 | error = None 25 | 26 | def __init__(self, cl, buf): 27 | self.remain = cl 28 | self.buf = buf 29 | 30 | def __len__(self): 31 | return self.buf.__len__() 32 | 33 | def received(self, data): 34 | "See IStreamConsumer" 35 | rm = self.remain 36 | 37 | if rm < 1: 38 | self.completed = True # Avoid any chance of spinning 39 | 40 | return 0 41 | datalen = len(data) 42 | 43 | if rm <= datalen: 44 | self.buf.append(data[:rm]) 45 | self.remain = 0 46 | self.completed = True 47 | 48 | return rm 49 | else: 50 | self.buf.append(data) 51 | self.remain -= datalen 52 | 53 | return datalen 54 | 55 | def getfile(self): 56 | return self.buf.getfile() 57 | 58 | def getbuf(self): 59 | return self.buf 60 | 61 | 62 | class ChunkedReceiver: 63 | chunk_remainder = 0 64 | validate_chunk_end = False 65 | control_line = b"" 66 | chunk_end = b"" 67 | all_chunks_received = False 68 | trailer = b"" 69 | completed = False 70 | error = None 71 | 72 | # max_control_line = 1024 73 | # max_trailer = 65536 74 | 75 | def __init__(self, buf): 76 | self.buf = buf 77 | 78 | def __len__(self): 79 | return self.buf.__len__() 80 | 81 | def received(self, s): 82 | # Returns the number of bytes consumed. 83 | 84 | if self.completed: 85 | return 0 86 | orig_size = len(s) 87 | 88 | while s: 89 | rm = self.chunk_remainder 90 | 91 | if rm > 0: 92 | # Receive the remainder of a chunk. 93 | to_write = s[:rm] 94 | self.buf.append(to_write) 95 | written = len(to_write) 96 | s = s[written:] 97 | 98 | self.chunk_remainder -= written 99 | 100 | if self.chunk_remainder == 0: 101 | self.validate_chunk_end = True 102 | elif self.validate_chunk_end: 103 | s = self.chunk_end + s 104 | 105 | pos = s.find(b"\r\n") 106 | 107 | if pos < 0 and len(s) < 2: 108 | self.chunk_end = s 109 | s = b"" 110 | else: 111 | self.chunk_end = b"" 112 | 113 | if pos == 0: 114 | # Chop off the terminating CR LF from the chunk 115 | s = s[2:] 116 | else: 117 | self.error = BadRequest("Chunk not properly terminated") 118 | self.all_chunks_received = True 119 | 120 | # Always exit this loop 121 | self.validate_chunk_end = False 122 | elif not self.all_chunks_received: 123 | # Receive a control line. 124 | s = self.control_line + s 125 | pos = s.find(b"\r\n") 126 | 127 | if pos < 0: 128 | # Control line not finished. 129 | self.control_line = s 130 | s = b"" 131 | else: 132 | # Control line finished. 133 | line = s[:pos] 134 | s = s[pos + 2 :] 135 | self.control_line = b"" 136 | 137 | if line: 138 | # Begin a new chunk. 139 | semi = line.find(b";") 140 | 141 | if semi >= 0: 142 | extinfo = line[semi:] 143 | valid_ext_info = CHUNK_EXT_RE.match(extinfo) 144 | 145 | if not valid_ext_info: 146 | self.error = BadRequest("Invalid chunk extension") 147 | self.all_chunks_received = True 148 | 149 | break 150 | 151 | line = line[:semi] 152 | 153 | if not ONLY_HEXDIG_RE.match(line): 154 | self.error = BadRequest("Invalid chunk size") 155 | self.all_chunks_received = True 156 | 157 | break 158 | 159 | # Can not fail due to matching against the regular 160 | # expression above 161 | sz = int(line, 16) # hexadecimal 162 | 163 | if sz > 0: 164 | # Start a new chunk. 165 | self.chunk_remainder = sz 166 | else: 167 | # Finished chunks. 168 | self.all_chunks_received = True 169 | # else expect a control line. 170 | else: 171 | # Receive the trailer. 172 | trailer = self.trailer + s 173 | 174 | if trailer.startswith(b"\r\n"): 175 | # No trailer. 176 | self.completed = True 177 | 178 | return orig_size - (len(trailer) - 2) 179 | pos = find_double_newline(trailer) 180 | 181 | if pos < 0: 182 | # Trailer not finished. 183 | self.trailer = trailer 184 | s = b"" 185 | else: 186 | # Finished the trailer. 187 | self.completed = True 188 | self.trailer = trailer[:pos] 189 | 190 | return orig_size - (len(trailer) - pos) 191 | 192 | return orig_size 193 | 194 | def getfile(self): 195 | return self.buf.getfile() 196 | 197 | def getbuf(self): 198 | return self.buf 199 | -------------------------------------------------------------------------------- /src/waitress/rfc7230.py: -------------------------------------------------------------------------------- 1 | """ 2 | This contains a bunch of RFC7230 definitions and regular expressions that are 3 | needed to properly parse HTTP messages. 4 | """ 5 | 6 | import re 7 | 8 | HEXDIG = "[0-9a-fA-F]" 9 | DIGIT = "[0-9]" 10 | 11 | WS = "[ \t]" 12 | OWS = WS + "{0,}?" 13 | RWS = WS + "{1,}?" 14 | BWS = OWS 15 | 16 | # RFC 7230 Section 3.2.6 "Field Value Components": 17 | # tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" 18 | # / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" 19 | # / DIGIT / ALPHA 20 | # obs-text = %x80-FF 21 | TCHAR = r"[!#$%&'*+\-.^_`|~0-9A-Za-z]" 22 | OBS_TEXT = r"\x80-\xff" 23 | 24 | TOKEN = TCHAR + "{1,}" 25 | 26 | # RFC 5234 Appendix B.1 "Core Rules": 27 | # VCHAR = %x21-7E 28 | # ; visible (printing) characters 29 | VCHAR = r"\x21-\x7e" 30 | 31 | # The '\\' between \x5b and \x5d is needed to escape \x5d (']') 32 | QDTEXT = "[\t \x21\x23-\x5b\\\x5d-\x7e" + OBS_TEXT + "]" 33 | 34 | QUOTED_PAIR = r"\\" + "([\t " + VCHAR + OBS_TEXT + "])" 35 | QUOTED_STRING = '"(?:(?:' + QDTEXT + ")|(?:" + QUOTED_PAIR + '))*"' 36 | 37 | # header-field = field-name ":" OWS field-value OWS 38 | # field-name = token 39 | # field-value = *( field-content / obs-fold ) 40 | # field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] 41 | # field-vchar = VCHAR / obs-text 42 | 43 | # Errata from: https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4189 44 | # changes field-content to: 45 | # 46 | # field-content = field-vchar [ 1*( SP / HTAB / field-vchar ) 47 | # field-vchar ] 48 | 49 | FIELD_VCHAR = "[" + VCHAR + OBS_TEXT + "]" 50 | # Field content is more greedy than the ABNF, in that it will match the whole value 51 | FIELD_CONTENT = FIELD_VCHAR + "+(?:[ \t]+" + FIELD_VCHAR + "+)*" 52 | # Which allows the field value here to just see if there is even a value in the first place 53 | FIELD_VALUE = "(?:" + FIELD_CONTENT + ")?" 54 | 55 | # chunk-ext = *( ";" chunk-ext-name [ "=" chunk-ext-val ] ) 56 | # chunk-ext-name = token 57 | # chunk-ext-val = token / quoted-string 58 | 59 | CHUNK_EXT_NAME = TOKEN 60 | CHUNK_EXT_VAL = "(?:" + TOKEN + ")|(?:" + QUOTED_STRING + ")" 61 | CHUNK_EXT = ( 62 | "(?:;(?P" + CHUNK_EXT_NAME + ")(?:=(?P" + CHUNK_EXT_VAL + "))?)*" 63 | ) 64 | 65 | # Pre-compiled regular expressions for use elsewhere 66 | ONLY_HEXDIG_RE = re.compile(("^" + HEXDIG + "+$").encode("latin-1")) 67 | ONLY_DIGIT_RE = re.compile(("^" + DIGIT + "+$").encode("latin-1")) 68 | HEADER_FIELD_RE = re.compile( 69 | ( 70 | "^(?P" + TOKEN + "):" + OWS + "(?P" + FIELD_VALUE + ")" + OWS + "$" 71 | ).encode("latin-1") 72 | ) 73 | QUOTED_PAIR_RE = re.compile(QUOTED_PAIR) 74 | QUOTED_STRING_RE = re.compile(QUOTED_STRING) 75 | CHUNK_EXT_RE = re.compile(("^" + CHUNK_EXT + "$").encode("latin-1")) 76 | -------------------------------------------------------------------------------- /src/waitress/runner.py: -------------------------------------------------------------------------------- 1 | ############################################################################## 2 | # 3 | # Copyright (c) 2013 Zope Foundation and Contributors. 4 | # All Rights Reserved. 5 | # 6 | # This software is subject to the provisions of the Zope Public License, 7 | # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. 8 | # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED 9 | # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 10 | # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS 11 | # FOR A PARTICULAR PURPOSE. 12 | # 13 | ############################################################################## 14 | """Command line runner.""" 15 | 16 | import getopt 17 | import logging 18 | import os 19 | import os.path 20 | import sys 21 | import traceback 22 | 23 | from waitress import serve 24 | from waitress.adjustments import Adjustments, AppResolutionError 25 | from waitress.utilities import logger 26 | 27 | HELP = """\ 28 | Usage: 29 | 30 | {0} [OPTS] [MODULE:OBJECT] 31 | 32 | Standard options: 33 | 34 | --help 35 | Show this information. 36 | 37 | --app=MODULE:OBJECT 38 | Run the given callable object the WSGI application. 39 | 40 | You can specify the WSGI application using this flag or as a positional 41 | argument. 42 | 43 | --call 44 | Call the given object to get the WSGI application. 45 | 46 | --host=ADDR 47 | Hostname or IP address on which to listen, default is '0.0.0.0', 48 | which means "all IP addresses on this host". 49 | 50 | Note: May not be used together with --listen 51 | 52 | --port=PORT 53 | TCP port on which to listen, default is '8080' 54 | 55 | Note: May not be used together with --listen 56 | 57 | --listen=ip:port 58 | Tell waitress to listen on an ip port combination. 59 | 60 | Example: 61 | 62 | --listen=127.0.0.1:8080 63 | --listen=[::1]:8080 64 | --listen=*:8080 65 | 66 | This option may be used multiple times to listen on multiple sockets. 67 | A wildcard for the hostname is also supported and will bind to both 68 | IPv4/IPv6 depending on whether they are enabled or disabled. 69 | 70 | --server-name=NAME 71 | This is the value that will be placed in the WSGI environment as 72 | ``SERVER_NAME``, the only time that this value is used in the WSGI 73 | environment for a request is if the client sent a HTTP/1.0 request 74 | without a ``Host`` header set, and no other proxy headers. 75 | 76 | The default is value is ``waitress.invalid``, if your WSGI application 77 | is creating URL's that include this as the hostname and you are using a 78 | reverse proxy setup, you may want to validate that your reverse proxy 79 | is sending the appropriate headers. 80 | 81 | In most situations you will not need to set this value. 82 | 83 | --[no-]ipv4 84 | Toggle on/off IPv4 support. 85 | 86 | Example: 87 | 88 | --no-ipv4 89 | 90 | This will disable IPv4 socket support. This affects wildcard matching 91 | when generating the list of sockets. 92 | 93 | --[no-]ipv6 94 | Toggle on/off IPv6 support. 95 | 96 | Example: 97 | 98 | --no-ipv6 99 | 100 | This will turn on IPv6 socket support. This affects wildcard matching 101 | when generating a list of sockets. 102 | 103 | --unix-socket=PATH 104 | Path of Unix socket. If a socket path is specified, a Unix domain 105 | socket is made instead of the usual inet domain socket. 106 | 107 | Not available on Windows. 108 | 109 | --unix-socket-perms=PERMS 110 | Octal permissions to use for the Unix domain socket, default is 111 | '600'. 112 | 113 | --url-scheme=STR 114 | Default wsgi.url_scheme value, default is 'http'. 115 | 116 | --url-prefix=STR 117 | The 'SCRIPT_NAME' WSGI environment value. Setting this to anything 118 | except the empty string will cause the WSGI 'SCRIPT_NAME' value to be 119 | the value passed minus any trailing slashes you add, and it will cause 120 | the 'PATH_INFO' of any request which is prefixed with this value to be 121 | stripped of the prefix. Default is the empty string. 122 | 123 | --ident=STR 124 | Server identity used in the 'Server' header in responses. Default 125 | is 'waitress'. 126 | 127 | --trusted-proxy=IP 128 | IP address of a remote peer allowed to override various WSGI environment 129 | variables using proxy headers. 130 | 131 | For unix sockets, set this value to 'localhost' instead of an IP 132 | address. 133 | 134 | The value '*' (wildcard) may be used to signify that all remote peers 135 | are to be trusted. 136 | 137 | --trusted-proxy-count=INT 138 | How many proxies we trust when chained. For example, 139 | 140 | X-Forwarded-For: 192.0.2.1, "[2001:db8::1]" 141 | 142 | or 143 | 144 | Forwarded: for=192.0.2.1, For="[2001:db8::1]" 145 | 146 | means there were (potentially), two proxies involved. If we know there 147 | is only 1 valid proxy, then that initial IP address "192.0.2.1" is not 148 | trusted and we completely ignore it. 149 | 150 | If there are two trusted proxies in the path, this value should be set 151 | to 2. If there are more proxies, this value should be set higher. 152 | 153 | Default: 1 154 | 155 | --trusted-proxy-headers=LIST 156 | Which of the proxy headers should we trust, this is a set where you 157 | either specify "forwarded" or one or more of "x-forwarded-host", 158 | "x-forwarded-for", "x-forwarded-proto", "x-forwarded-port", 159 | "x-forwarded-by". 160 | 161 | This list of trusted headers is used when 'trusted_proxy' is set and 162 | will allow waitress to modify the WSGI environment using the values 163 | provided by the proxy. 164 | 165 | It is an error to set this value without setting --trusted-proxy. 166 | 167 | WARNING: If --trusted-proxy is set, the default is 'x-forwarded-proto' 168 | to match older versions of Waitress. Users should explicitly opt-in by 169 | selecting the headers to be trusted as future versions of waitress will 170 | use an empty default. 171 | 172 | --[no-]log-untrusted-proxy-headers 173 | Should waitress log warning messages about proxy headers that are being 174 | sent from upstream that are not trusted by --trusted-proxy-headers but 175 | are being cleared due to --clear-untrusted-proxy-headers? 176 | 177 | This may be useful for debugging if you expect your upstream proxy 178 | server to only send specific headers. 179 | 180 | It is a no-op to set this value without also setting 181 | --clear-untrusted-proxy-headers and --trusted-proxy. 182 | 183 | --[no-]clear-untrusted-proxy-headers 184 | This tells Waitress to remove any untrusted proxy headers ("Forwarded", 185 | "X-Forwared-For", "X-Forwarded-By", "X-Forwarded-Host", 186 | "X-Forwarded-Port", "X-Forwarded-Proto") not explicitly allowed by 187 | --trusted-proxy-headers. 188 | 189 | This is active by default. 190 | 191 | It is an error to set this value without setting --trusted-proxy. 192 | 193 | Tuning options: 194 | 195 | --threads=INT 196 | Number of threads used to process application logic, default is 4. 197 | 198 | --backlog=INT 199 | Connection backlog for the server. Default is 1024. 200 | 201 | --recv-bytes=INT 202 | Number of bytes to request when calling socket.recv(). Default is 203 | 8192. 204 | 205 | --send-bytes=INT 206 | Number of bytes to send to socket.send(). Default is 18000. 207 | Multiples of 9000 should avoid partly-filled TCP packets. 208 | 209 | --outbuf-overflow=INT 210 | A temporary file should be created if the pending output is larger 211 | than this. Default is 1048576 (1MB). 212 | 213 | --outbuf-high-watermark=INT 214 | The app_iter will pause when pending output is larger than this value 215 | and will resume once enough data is written to the socket to fall below 216 | this threshold. Default is 16777216 (16MB). 217 | 218 | --inbuf-overflow=INT 219 | A temporary file should be created if the pending input is larger 220 | than this. Default is 524288 (512KB). 221 | 222 | --connection-limit=INT 223 | Stop creating new channels if too many are already active. 224 | Default is 100. 225 | 226 | --cleanup-interval=INT 227 | Minimum seconds between cleaning up inactive channels. Default 228 | is 30. See '--channel-timeout'. 229 | 230 | --channel-timeout=INT 231 | Maximum number of seconds to leave inactive connections open. 232 | Default is 120. 'Inactive' is defined as 'has received no data 233 | from the client and has sent no data to the client'. 234 | 235 | --channel-request-lookahead=INT 236 | Sets the amount of requests we can continue to read from the socket, 237 | while we are processing current requests. The default value won't allow 238 | any lookahead, increase it above '0' to enable. 239 | 240 | When enabled this inserts a callable 'waitress.client_disconnected' 241 | into the environment that allows the task to check if the client 242 | disconnected while waiting for the response at strategic points in the 243 | execution and to cancel the operation. 244 | 245 | Default: '0' 246 | 247 | --[no-]log-socket-errors 248 | Toggle whether premature client disconnect tracebacks ought to be 249 | logged. On by default. 250 | 251 | --max-request-header-size=INT 252 | Maximum size of all request headers combined. Default is 262144 253 | (256KB). 254 | 255 | --max-request-body-size=INT 256 | Maximum size of request body. Default is 1073741824 (1GB). 257 | 258 | --[no-]expose-tracebacks 259 | Toggle whether to expose tracebacks of unhandled exceptions to the 260 | client. Off by default. 261 | 262 | --asyncore-loop-timeout=INT 263 | The timeout value in seconds passed to asyncore.loop(). Default is 1. 264 | 265 | --asyncore-use-poll 266 | The use_poll argument passed to asyncore.loop(). Helps overcome 267 | open file descriptors limit. Default is False. 268 | 269 | --channel-request-lookahead=INT 270 | Allows channels to stay readable and buffer more requests up to the 271 | given maximum even if a request is already being processed. This allows 272 | detecting if a client closed the connection while its request is being 273 | processed. Default is 0. 274 | 275 | """ 276 | 277 | 278 | def show_help(stream, name, error=None): # pragma: no cover 279 | if error is not None: 280 | print(f"Error: {error}\n", file=stream) 281 | print(HELP.format(name), file=stream) 282 | 283 | 284 | def run(argv=sys.argv, _serve=serve): 285 | """Command line runner.""" 286 | # Add the current directory onto sys.path 287 | sys.path.append(os.getcwd()) 288 | 289 | name = os.path.basename(argv[0]) 290 | 291 | try: 292 | kw = Adjustments.parse_args(argv[1:]) 293 | except getopt.GetoptError as exc: 294 | show_help(sys.stderr, name, str(exc)) 295 | return 1 296 | except AppResolutionError as exc: 297 | show_help(sys.stderr, name, str(exc)) 298 | traceback.print_exc(file=sys.stderr) 299 | return 1 300 | 301 | if kw["help"]: 302 | show_help(sys.stdout, name) 303 | return 0 304 | 305 | # set a default level for the logger only if it hasn't been set explicitly 306 | # note that this level does not override any parent logger levels, 307 | # handlers, etc but without it no log messages are emitted by default 308 | if logger.level == logging.NOTSET: 309 | logger.setLevel(logging.INFO) 310 | 311 | app = kw["app"] 312 | 313 | # These arguments are specific to the runner, not waitress itself. 314 | del kw["help"], kw["app"] 315 | 316 | _serve(app, **kw) 317 | return 0 318 | -------------------------------------------------------------------------------- /src/waitress/server.py: -------------------------------------------------------------------------------- 1 | ############################################################################## 2 | # 3 | # Copyright (c) 2001, 2002 Zope Foundation and Contributors. 4 | # All Rights Reserved. 5 | # 6 | # This software is subject to the provisions of the Zope Public License, 7 | # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. 8 | # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED 9 | # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 10 | # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS 11 | # FOR A PARTICULAR PURPOSE. 12 | # 13 | ############################################################################## 14 | 15 | import os 16 | import os.path 17 | import socket 18 | import time 19 | 20 | from waitress import trigger 21 | from waitress.adjustments import Adjustments 22 | from waitress.channel import HTTPChannel 23 | from waitress.compat import IPPROTO_IPV6, IPV6_V6ONLY 24 | from waitress.task import ThreadedTaskDispatcher 25 | from waitress.utilities import cleanup_unix_socket 26 | 27 | from . import wasyncore 28 | from .proxy_headers import proxy_headers_middleware 29 | 30 | 31 | def create_server( 32 | application, 33 | map=None, 34 | _start=True, # test shim 35 | _sock=None, # test shim 36 | _dispatcher=None, # test shim 37 | **kw, # adjustments 38 | ): 39 | """ 40 | if __name__ == '__main__': 41 | server = create_server(app) 42 | server.run() 43 | """ 44 | if application is None: 45 | raise ValueError( 46 | 'The "app" passed to ``create_server`` was ``None``. You forgot ' 47 | "to return a WSGI app within your application." 48 | ) 49 | adj = Adjustments(**kw) 50 | 51 | if map is None: # pragma: nocover 52 | map = {} 53 | 54 | dispatcher = _dispatcher 55 | if dispatcher is None: 56 | dispatcher = ThreadedTaskDispatcher() 57 | dispatcher.set_thread_count(adj.threads) 58 | 59 | if adj.unix_socket and hasattr(socket, "AF_UNIX"): 60 | sockinfo = (socket.AF_UNIX, socket.SOCK_STREAM, None, None) 61 | return UnixWSGIServer( 62 | application, 63 | map, 64 | _start, 65 | _sock, 66 | dispatcher=dispatcher, 67 | adj=adj, 68 | sockinfo=sockinfo, 69 | ) 70 | 71 | effective_listen = [] 72 | last_serv = None 73 | if not adj.sockets: 74 | for sockinfo in adj.listen: 75 | # When TcpWSGIServer is called, it registers itself in the map. This 76 | # side-effect is all we need it for, so we don't store a reference to 77 | # or return it to the user. 78 | last_serv = TcpWSGIServer( 79 | application, 80 | map, 81 | _start, 82 | _sock, 83 | dispatcher=dispatcher, 84 | adj=adj, 85 | sockinfo=sockinfo, 86 | ) 87 | effective_listen.append( 88 | (last_serv.effective_host, last_serv.effective_port) 89 | ) 90 | 91 | for sock in adj.sockets: 92 | sockinfo = (sock.family, sock.type, sock.proto, sock.getsockname()) 93 | if sock.family == socket.AF_INET or sock.family == socket.AF_INET6: 94 | last_serv = TcpWSGIServer( 95 | application, 96 | map, 97 | _start, 98 | sock, 99 | dispatcher=dispatcher, 100 | adj=adj, 101 | bind_socket=False, 102 | sockinfo=sockinfo, 103 | ) 104 | effective_listen.append( 105 | (last_serv.effective_host, last_serv.effective_port) 106 | ) 107 | elif hasattr(socket, "AF_UNIX") and sock.family == socket.AF_UNIX: 108 | last_serv = UnixWSGIServer( 109 | application, 110 | map, 111 | _start, 112 | sock, 113 | dispatcher=dispatcher, 114 | adj=adj, 115 | bind_socket=False, 116 | sockinfo=sockinfo, 117 | ) 118 | effective_listen.append( 119 | (last_serv.effective_host, last_serv.effective_port) 120 | ) 121 | 122 | # We are running a single server, so we can just return the last server, 123 | # saves us from having to create one more object 124 | if len(effective_listen) == 1: 125 | # In this case we have no need to use a MultiSocketServer 126 | return last_serv 127 | 128 | log_info = last_serv.log_info 129 | # Return a class that has a utility function to print out the sockets it's 130 | # listening on, and has a .run() function. All of the TcpWSGIServers 131 | # registered themselves in the map above. 132 | return MultiSocketServer(map, adj, effective_listen, dispatcher, log_info) 133 | 134 | 135 | # This class is only ever used if we have multiple listen sockets. It allows 136 | # the serve() API to call .run() which starts the wasyncore loop, and catches 137 | # SystemExit/KeyboardInterrupt so that it can attempt to cleanly shut down. 138 | class MultiSocketServer: 139 | asyncore = wasyncore # test shim 140 | 141 | def __init__( 142 | self, 143 | map=None, 144 | adj=None, 145 | effective_listen=None, 146 | dispatcher=None, 147 | log_info=None, 148 | ): 149 | self.adj = adj 150 | self.map = map 151 | self.effective_listen = effective_listen 152 | self.task_dispatcher = dispatcher 153 | self.log_info = log_info 154 | 155 | def print_listen(self, format_str): # pragma: nocover 156 | for l in self.effective_listen: 157 | l = list(l) 158 | 159 | if ":" in l[0]: 160 | l[0] = f"[{l[0]}]" 161 | 162 | self.log_info(format_str.format(*l)) 163 | 164 | def run(self): 165 | try: 166 | self.asyncore.loop( 167 | timeout=self.adj.asyncore_loop_timeout, 168 | map=self.map, 169 | use_poll=self.adj.asyncore_use_poll, 170 | ) 171 | except (SystemExit, KeyboardInterrupt): 172 | self.close() 173 | 174 | def close(self): 175 | self.task_dispatcher.shutdown() 176 | wasyncore.close_all(self.map) 177 | 178 | 179 | class BaseWSGIServer(wasyncore.dispatcher): 180 | channel_class = HTTPChannel 181 | next_channel_cleanup = 0 182 | socketmod = socket # test shim 183 | asyncore = wasyncore # test shim 184 | in_connection_overflow = False 185 | 186 | def __init__( 187 | self, 188 | application, 189 | map=None, 190 | _start=True, # test shim 191 | _sock=None, # test shim 192 | dispatcher=None, # dispatcher 193 | adj=None, # adjustments 194 | sockinfo=None, # opaque object 195 | bind_socket=True, 196 | **kw, 197 | ): 198 | if adj is None: 199 | adj = Adjustments(**kw) 200 | 201 | if adj.trusted_proxy or adj.clear_untrusted_proxy_headers: 202 | # wrap the application to deal with proxy headers 203 | # we wrap it here because webtest subclasses the TcpWSGIServer 204 | # directly and thus doesn't run any code that's in create_server 205 | application = proxy_headers_middleware( 206 | application, 207 | trusted_proxy=adj.trusted_proxy, 208 | trusted_proxy_count=adj.trusted_proxy_count, 209 | trusted_proxy_headers=adj.trusted_proxy_headers, 210 | clear_untrusted=adj.clear_untrusted_proxy_headers, 211 | log_untrusted=adj.log_untrusted_proxy_headers, 212 | logger=self.logger, 213 | ) 214 | 215 | if map is None: 216 | # use a nonglobal socket map by default to hopefully prevent 217 | # conflicts with apps and libs that use the wasyncore global socket 218 | # map ala https://github.com/Pylons/waitress/issues/63 219 | map = {} 220 | if sockinfo is None: 221 | sockinfo = adj.listen[0] 222 | 223 | self.sockinfo = sockinfo 224 | self.family = sockinfo[0] 225 | self.socktype = sockinfo[1] 226 | self.application = application 227 | self.adj = adj 228 | self.trigger = trigger.trigger(map) 229 | if dispatcher is None: 230 | dispatcher = ThreadedTaskDispatcher() 231 | dispatcher.set_thread_count(self.adj.threads) 232 | 233 | self.task_dispatcher = dispatcher 234 | self.asyncore.dispatcher.__init__(self, _sock, map=map) 235 | if _sock is None: 236 | self.create_socket(self.family, self.socktype) 237 | if self.family == socket.AF_INET6: # pragma: nocover 238 | self.socket.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 1) 239 | 240 | self.set_reuse_addr() 241 | 242 | if bind_socket: 243 | self.bind_server_socket() 244 | 245 | self.effective_host, self.effective_port = self.getsockname() 246 | self.server_name = adj.server_name 247 | self.active_channels = {} 248 | if _start: 249 | self.accept_connections() 250 | 251 | def bind_server_socket(self): 252 | raise NotImplementedError # pragma: no cover 253 | 254 | def getsockname(self): 255 | raise NotImplementedError # pragma: no cover 256 | 257 | def accept_connections(self): 258 | self.accepting = True 259 | self.socket.listen(self.adj.backlog) # Get around asyncore NT limit 260 | 261 | def add_task(self, task): 262 | self.task_dispatcher.add_task(task) 263 | 264 | def readable(self): 265 | now = time.time() 266 | if now >= self.next_channel_cleanup: 267 | self.next_channel_cleanup = now + self.adj.cleanup_interval 268 | self.maintenance(now) 269 | 270 | if self.accepting: 271 | if ( 272 | not self.in_connection_overflow 273 | and len(self._map) >= self.adj.connection_limit 274 | ): 275 | self.in_connection_overflow = True 276 | self.logger.warning( 277 | "total open connections reached the connection limit, " 278 | "no longer accepting new connections" 279 | ) 280 | elif ( 281 | self.in_connection_overflow 282 | and len(self._map) < self.adj.connection_limit 283 | ): 284 | self.in_connection_overflow = False 285 | self.logger.info( 286 | "total open connections dropped below the connection limit, " 287 | "listening again" 288 | ) 289 | return not self.in_connection_overflow 290 | return False 291 | 292 | def writable(self): 293 | return False 294 | 295 | def handle_read(self): 296 | pass 297 | 298 | def handle_connect(self): 299 | pass 300 | 301 | def handle_accept(self): 302 | try: 303 | v = self.accept() 304 | if v is None: 305 | return 306 | conn, addr = v 307 | self.set_socket_options(conn) 308 | except OSError: 309 | # Linux: On rare occasions we get a bogus socket back from 310 | # accept. socketmodule.c:makesockaddr complains that the 311 | # address family is unknown. We don't want the whole server 312 | # to shut down because of this. 313 | # macOS: On occasions when the remote has already closed the socket 314 | # before we got around to accepting it, when we try to set the 315 | # socket options it will fail. So instead just we log the error and 316 | # continue 317 | if self.adj.log_socket_errors: 318 | self.logger.warning("server accept() threw an exception", exc_info=True) 319 | return 320 | addr = self.fix_addr(addr) 321 | self.channel_class(self, conn, addr, self.adj, map=self._map) 322 | 323 | def run(self): 324 | try: 325 | self.asyncore.loop( 326 | timeout=self.adj.asyncore_loop_timeout, 327 | map=self._map, 328 | use_poll=self.adj.asyncore_use_poll, 329 | ) 330 | except (SystemExit, KeyboardInterrupt): 331 | self.task_dispatcher.shutdown() 332 | 333 | def pull_trigger(self): 334 | self.trigger.pull_trigger() 335 | 336 | def set_socket_options(self, conn): 337 | pass 338 | 339 | def fix_addr(self, addr): 340 | return addr 341 | 342 | def maintenance(self, now): 343 | """ 344 | Closes channels that have not had any activity in a while. 345 | 346 | The timeout is configured through adj.channel_timeout (seconds). 347 | """ 348 | cutoff = now - self.adj.channel_timeout 349 | for channel in self.active_channels.values(): 350 | if (not channel.requests) and channel.last_activity < cutoff: 351 | channel.will_close = True 352 | 353 | def print_listen(self, format_str): # pragma: no cover 354 | self.log_info(format_str.format(self.effective_host, self.effective_port)) 355 | 356 | def close(self): 357 | self.trigger.close() 358 | return wasyncore.dispatcher.close(self) 359 | 360 | 361 | class TcpWSGIServer(BaseWSGIServer): 362 | def bind_server_socket(self): 363 | (_, _, _, sockaddr) = self.sockinfo 364 | self.bind(sockaddr) 365 | 366 | def getsockname(self): 367 | # Return the IP address, port as numeric 368 | return self.socketmod.getnameinfo( 369 | self.socket.getsockname(), 370 | self.socketmod.NI_NUMERICHOST | self.socketmod.NI_NUMERICSERV, 371 | ) 372 | 373 | def set_socket_options(self, conn): 374 | for level, optname, value in self.adj.socket_options: 375 | conn.setsockopt(level, optname, value) 376 | 377 | 378 | if hasattr(socket, "AF_UNIX"): 379 | 380 | class UnixWSGIServer(BaseWSGIServer): 381 | def __init__( 382 | self, 383 | application, 384 | map=None, 385 | _start=True, # test shim 386 | _sock=None, # test shim 387 | dispatcher=None, # dispatcher 388 | adj=None, # adjustments 389 | sockinfo=None, # opaque object 390 | **kw, 391 | ): 392 | if sockinfo is None: 393 | sockinfo = (socket.AF_UNIX, socket.SOCK_STREAM, None, None) 394 | 395 | super().__init__( 396 | application, 397 | map=map, 398 | _start=_start, 399 | _sock=_sock, 400 | dispatcher=dispatcher, 401 | adj=adj, 402 | sockinfo=sockinfo, 403 | **kw, 404 | ) 405 | 406 | def bind_server_socket(self): 407 | cleanup_unix_socket(self.adj.unix_socket) 408 | self.bind(self.adj.unix_socket) 409 | if os.path.exists(self.adj.unix_socket): 410 | os.chmod(self.adj.unix_socket, self.adj.unix_socket_perms) 411 | 412 | def getsockname(self): 413 | return ("unix", self.socket.getsockname()) 414 | 415 | def fix_addr(self, addr): 416 | return ("localhost", None) 417 | 418 | 419 | # Compatibility alias. 420 | WSGIServer = TcpWSGIServer 421 | -------------------------------------------------------------------------------- /src/waitress/trigger.py: -------------------------------------------------------------------------------- 1 | ############################################################################## 2 | # 3 | # Copyright (c) 2001-2005 Zope Foundation and Contributors. 4 | # All Rights Reserved. 5 | # 6 | # This software is subject to the provisions of the Zope Public License, 7 | # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. 8 | # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED 9 | # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 10 | # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS 11 | # FOR A PARTICULAR PURPOSE 12 | # 13 | ############################################################################## 14 | 15 | import errno 16 | import os 17 | import socket 18 | import threading 19 | 20 | from . import wasyncore 21 | 22 | # Wake up a call to select() running in the main thread. 23 | # 24 | # This is useful in a context where you are using Medusa's I/O 25 | # subsystem to deliver data, but the data is generated by another 26 | # thread. Normally, if Medusa is in the middle of a call to 27 | # select(), new output data generated by another thread will have 28 | # to sit until the call to select() either times out or returns. 29 | # If the trigger is 'pulled' by another thread, it should immediately 30 | # generate a READ event on the trigger object, which will force the 31 | # select() invocation to return. 32 | # 33 | # A common use for this facility: letting Medusa manage I/O for a 34 | # large number of connections; but routing each request through a 35 | # thread chosen from a fixed-size thread pool. When a thread is 36 | # acquired, a transaction is performed, but output data is 37 | # accumulated into buffers that will be emptied more efficiently 38 | # by Medusa. [picture a server that can process database queries 39 | # rapidly, but doesn't want to tie up threads waiting to send data 40 | # to low-bandwidth connections] 41 | # 42 | # The other major feature provided by this class is the ability to 43 | # move work back into the main thread: if you call pull_trigger() 44 | # with a thunk argument, when select() wakes up and receives the 45 | # event it will call your thunk from within that thread. The main 46 | # purpose of this is to remove the need to wrap thread locks around 47 | # Medusa's data structures, which normally do not need them. [To see 48 | # why this is true, imagine this scenario: A thread tries to push some 49 | # new data onto a channel's outgoing data queue at the same time that 50 | # the main thread is trying to remove some] 51 | 52 | 53 | class _triggerbase: 54 | """OS-independent base class for OS-dependent trigger class.""" 55 | 56 | kind = None # subclass must set to "pipe" or "loopback"; used by repr 57 | 58 | def __init__(self): 59 | self._closed = False 60 | 61 | # `lock` protects the `thunks` list from being traversed and 62 | # appended to simultaneously. 63 | self.lock = threading.Lock() 64 | 65 | # List of no-argument callbacks to invoke when the trigger is 66 | # pulled. These run in the thread running the wasyncore mainloop, 67 | # regardless of which thread pulls the trigger. 68 | self.thunks = [] 69 | 70 | def readable(self): 71 | return True 72 | 73 | def writable(self): 74 | return False 75 | 76 | def handle_connect(self): 77 | pass 78 | 79 | def handle_close(self): 80 | self.close() 81 | 82 | # Override the wasyncore close() method, because it doesn't know about 83 | # (so can't close) all the gimmicks we have open. Subclass must 84 | # supply a _close() method to do platform-specific closing work. _close() 85 | # will be called iff we're not already closed. 86 | def close(self): 87 | if not self._closed: 88 | self._closed = True 89 | self.del_channel() 90 | self._close() # subclass does OS-specific stuff 91 | 92 | def pull_trigger(self, thunk=None): 93 | if thunk: 94 | with self.lock: 95 | self.thunks.append(thunk) 96 | self._physical_pull() 97 | 98 | def handle_read(self): 99 | try: 100 | self.recv(8192) 101 | except OSError: 102 | return 103 | with self.lock: 104 | for thunk in self.thunks: 105 | try: 106 | thunk() 107 | except: 108 | nil, t, v, tbinfo = wasyncore.compact_traceback() 109 | self.log_info(f"exception in trigger thunk: ({t}:{v} {tbinfo})") 110 | self.thunks = [] 111 | 112 | 113 | if os.name == "posix": 114 | 115 | class trigger(_triggerbase, wasyncore.file_dispatcher): 116 | kind = "pipe" 117 | 118 | def __init__(self, map): 119 | _triggerbase.__init__(self) 120 | r, self.trigger = self._fds = os.pipe() 121 | wasyncore.file_dispatcher.__init__(self, r, map=map) 122 | 123 | def _close(self): 124 | for fd in self._fds: 125 | os.close(fd) 126 | self._fds = [] 127 | wasyncore.file_dispatcher.close(self) 128 | 129 | def _physical_pull(self): 130 | os.write(self.trigger, b"x") 131 | 132 | else: # pragma: no cover 133 | # Windows version; uses just sockets, because a pipe isn't select'able 134 | # on Windows. 135 | 136 | class trigger(_triggerbase, wasyncore.dispatcher): 137 | kind = "loopback" 138 | 139 | def __init__(self, map): 140 | _triggerbase.__init__(self) 141 | 142 | # Get a pair of connected sockets. The trigger is the 'w' 143 | # end of the pair, which is connected to 'r'. 'r' is put 144 | # in the wasyncore socket map. "pulling the trigger" then 145 | # means writing something on w, which will wake up r. 146 | 147 | w = socket.socket() 148 | # Disable buffering -- pulling the trigger sends 1 byte, 149 | # and we want that sent immediately, to wake up wasyncore's 150 | # select() ASAP. 151 | w.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) 152 | 153 | count = 0 154 | while True: 155 | count += 1 156 | # Bind to a local port; for efficiency, let the OS pick 157 | # a free port for us. 158 | # Unfortunately, stress tests showed that we may not 159 | # be able to connect to that port ("Address already in 160 | # use") despite that the OS picked it. This appears 161 | # to be a race bug in the Windows socket implementation. 162 | # So we loop until a connect() succeeds (almost always 163 | # on the first try). See the long thread at 164 | # http://mail.zope.org/pipermail/zope/2005-July/160433.html 165 | # for hideous details. 166 | a = socket.socket() 167 | a.bind(("127.0.0.1", 0)) 168 | connect_address = a.getsockname() # assigned (host, port) pair 169 | a.listen(1) 170 | try: 171 | w.connect(connect_address) 172 | break # success 173 | except OSError as detail: 174 | if getattr(detail, "winerror", None) != errno.WSAEADDRINUSE: 175 | # "Address already in use" is the only error 176 | # I've seen on two WinXP Pro SP2 boxes, under 177 | # Pythons 2.3.5 and 2.4.1. 178 | raise 179 | # (10048, 'Address already in use') 180 | # assert count <= 2 # never triggered in Tim's tests 181 | if count >= 10: # I've never seen it go above 2 182 | a.close() 183 | w.close() 184 | raise RuntimeError("Cannot bind trigger!") 185 | # Close `a` and try again. Note: I originally put a short 186 | # sleep() here, but it didn't appear to help or hurt. 187 | a.close() 188 | 189 | r, addr = a.accept() # r becomes wasyncore's (self.)socket 190 | a.close() 191 | self.trigger = w 192 | wasyncore.dispatcher.__init__(self, r, map=map) 193 | 194 | def _close(self): 195 | # self.socket is r, and self.trigger is w, from __init__ 196 | self.socket.close() 197 | self.trigger.close() 198 | 199 | def _physical_pull(self): 200 | self.trigger.send(b"x") 201 | -------------------------------------------------------------------------------- /src/waitress/utilities.py: -------------------------------------------------------------------------------- 1 | ############################################################################## 2 | # 3 | # Copyright (c) 2004 Zope Foundation and Contributors. 4 | # All Rights Reserved. 5 | # 6 | # This software is subject to the provisions of the Zope Public License, 7 | # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. 8 | # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED 9 | # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 10 | # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS 11 | # FOR A PARTICULAR PURPOSE. 12 | # 13 | ############################################################################## 14 | """Utility functions 15 | """ 16 | 17 | import calendar 18 | import errno 19 | import logging 20 | import os 21 | import re 22 | import stat 23 | import time 24 | 25 | from .rfc7230 import QUOTED_PAIR_RE, QUOTED_STRING_RE 26 | 27 | logger = logging.getLogger("waitress") 28 | queue_logger = logging.getLogger("waitress.queue") 29 | 30 | 31 | def find_double_newline(s): 32 | """Returns the position just after a double newline in the given string.""" 33 | pos = s.find(b"\r\n\r\n") 34 | 35 | if pos >= 0: 36 | pos += 4 37 | 38 | return pos 39 | 40 | 41 | def concat(*args): 42 | return "".join(args) 43 | 44 | 45 | def join(seq, field=" "): 46 | return field.join(seq) 47 | 48 | 49 | def group(s): 50 | return "(" + s + ")" 51 | 52 | 53 | short_days = ["sun", "mon", "tue", "wed", "thu", "fri", "sat"] 54 | long_days = [ 55 | "sunday", 56 | "monday", 57 | "tuesday", 58 | "wednesday", 59 | "thursday", 60 | "friday", 61 | "saturday", 62 | ] 63 | 64 | short_day_reg = group(join(short_days, "|")) 65 | long_day_reg = group(join(long_days, "|")) 66 | 67 | daymap = {} 68 | 69 | for i in range(7): 70 | daymap[short_days[i]] = i 71 | daymap[long_days[i]] = i 72 | 73 | hms_reg = join(3 * [group("[0-9][0-9]")], ":") 74 | 75 | months = [ 76 | "jan", 77 | "feb", 78 | "mar", 79 | "apr", 80 | "may", 81 | "jun", 82 | "jul", 83 | "aug", 84 | "sep", 85 | "oct", 86 | "nov", 87 | "dec", 88 | ] 89 | 90 | monmap = {} 91 | 92 | for i in range(12): 93 | monmap[months[i]] = i + 1 94 | 95 | months_reg = group(join(months, "|")) 96 | 97 | # From draft-ietf-http-v11-spec-07.txt/3.3.1 98 | # Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 99 | # Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036 100 | # Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format 101 | 102 | # rfc822 format 103 | rfc822_date = join( 104 | [ 105 | concat(short_day_reg, ","), # day 106 | group("[0-9][0-9]?"), # date 107 | months_reg, # month 108 | group("[0-9]+"), # year 109 | hms_reg, # hour minute second 110 | "gmt", 111 | ], 112 | " ", 113 | ) 114 | 115 | rfc822_reg = re.compile(rfc822_date) 116 | 117 | 118 | def unpack_rfc822(m): 119 | g = m.group 120 | 121 | return ( 122 | int(g(4)), # year 123 | monmap[g(3)], # month 124 | int(g(2)), # day 125 | int(g(5)), # hour 126 | int(g(6)), # minute 127 | int(g(7)), # second 128 | 0, 129 | 0, 130 | 0, 131 | ) 132 | 133 | 134 | # rfc850 format 135 | rfc850_date = join( 136 | [ 137 | concat(long_day_reg, ","), 138 | join([group("[0-9][0-9]?"), months_reg, group("[0-9]+")], "-"), 139 | hms_reg, 140 | "gmt", 141 | ], 142 | " ", 143 | ) 144 | 145 | rfc850_reg = re.compile(rfc850_date) 146 | 147 | 148 | # they actually unpack the same way 149 | def unpack_rfc850(m): 150 | g = m.group 151 | yr = g(4) 152 | 153 | if len(yr) == 2: 154 | yr = "19" + yr 155 | 156 | return ( 157 | int(yr), # year 158 | monmap[g(3)], # month 159 | int(g(2)), # day 160 | int(g(5)), # hour 161 | int(g(6)), # minute 162 | int(g(7)), # second 163 | 0, 164 | 0, 165 | 0, 166 | ) 167 | 168 | 169 | # parsdate.parsedate - ~700/sec. 170 | # parse_http_date - ~1333/sec. 171 | 172 | weekdayname = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] 173 | monthname = [ 174 | None, 175 | "Jan", 176 | "Feb", 177 | "Mar", 178 | "Apr", 179 | "May", 180 | "Jun", 181 | "Jul", 182 | "Aug", 183 | "Sep", 184 | "Oct", 185 | "Nov", 186 | "Dec", 187 | ] 188 | 189 | 190 | def build_http_date(when): 191 | year, month, day, hh, mm, ss, wd, y, z = time.gmtime(when) 192 | 193 | return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( 194 | weekdayname[wd], 195 | day, 196 | monthname[month], 197 | year, 198 | hh, 199 | mm, 200 | ss, 201 | ) 202 | 203 | 204 | def parse_http_date(d): 205 | d = d.lower() 206 | m = rfc850_reg.match(d) 207 | 208 | if m and m.end() == len(d): 209 | retval = int(calendar.timegm(unpack_rfc850(m))) 210 | else: 211 | m = rfc822_reg.match(d) 212 | 213 | if m and m.end() == len(d): 214 | retval = int(calendar.timegm(unpack_rfc822(m))) 215 | else: 216 | return 0 217 | 218 | return retval 219 | 220 | 221 | def undquote(value): 222 | if value.startswith('"') and value.endswith('"'): 223 | # So it claims to be DQUOTE'ed, let's validate that 224 | matches = QUOTED_STRING_RE.match(value) 225 | 226 | if matches and matches.end() == len(value): 227 | # Remove the DQUOTE's from the value 228 | value = value[1:-1] 229 | 230 | # Remove all backslashes that are followed by a valid vchar or 231 | # obs-text 232 | value = QUOTED_PAIR_RE.sub(r"\1", value) 233 | 234 | return value 235 | elif not value.startswith('"') and not value.endswith('"'): 236 | return value 237 | 238 | raise ValueError("Invalid quoting in value") 239 | 240 | 241 | def cleanup_unix_socket(path): 242 | try: 243 | st = os.stat(path) 244 | except OSError as exc: 245 | if exc.errno != errno.ENOENT: 246 | raise # pragma: no cover 247 | else: 248 | if stat.S_ISSOCK(st.st_mode): 249 | try: 250 | os.remove(path) 251 | except OSError: # pragma: no cover 252 | # avoid race condition error during tests 253 | pass 254 | 255 | 256 | class Error: 257 | code = 500 258 | reason = "Internal Server Error" 259 | 260 | def __init__(self, body): 261 | self.body = body 262 | 263 | def to_response(self, ident=None): 264 | status = f"{self.code} {self.reason}" 265 | body = f"{self.reason}\r\n\r\n{self.body}" 266 | ident = ident if ident else "server" 267 | tag = f"\r\n\r\n(generated by {ident})" 268 | body = (body + tag).encode("utf-8") 269 | headers = [("Content-Type", "text/plain; charset=utf-8")] 270 | 271 | return status, headers, body 272 | 273 | def wsgi_response(self, environ, start_response): 274 | status, headers, body = self.to_response() 275 | start_response(status, headers) 276 | yield body 277 | 278 | 279 | class BadRequest(Error): 280 | code = 400 281 | reason = "Bad Request" 282 | 283 | 284 | class RequestHeaderFieldsTooLarge(BadRequest): 285 | code = 431 286 | reason = "Request Header Fields Too Large" 287 | 288 | 289 | class RequestEntityTooLarge(BadRequest): 290 | code = 413 291 | reason = "Request Entity Too Large" 292 | 293 | 294 | class InternalServerError(Error): 295 | code = 500 296 | reason = "Internal Server Error" 297 | 298 | 299 | class ServerNotImplemented(Error): 300 | code = 501 301 | reason = "Not Implemented" 302 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # This file is necessary to make this directory a package. 3 | -------------------------------------------------------------------------------- /tests/fixtureapps/__init__.py: -------------------------------------------------------------------------------- 1 | # package (for -m) 2 | -------------------------------------------------------------------------------- /tests/fixtureapps/badcl.py: -------------------------------------------------------------------------------- 1 | def app(environ, start_response): # pragma: no cover 2 | body = b"abcdefghi" 3 | cl = len(body) 4 | if environ["PATH_INFO"] == "/short_body": 5 | cl = len(body) + 1 6 | if environ["PATH_INFO"] == "/long_body": 7 | cl = len(body) - 1 8 | start_response( 9 | "200 OK", [("Content-Length", str(cl)), ("Content-Type", "text/plain")] 10 | ) 11 | return [body] 12 | -------------------------------------------------------------------------------- /tests/fixtureapps/echo.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | import json 3 | 4 | 5 | def app_body_only(environ, start_response): # pragma: no cover 6 | cl = environ.get("CONTENT_LENGTH", None) 7 | if cl is not None: 8 | cl = int(cl) 9 | body = environ["wsgi.input"].read(cl) 10 | cl = str(len(body)) 11 | start_response( 12 | "200 OK", 13 | [ 14 | ("Content-Length", cl), 15 | ("Content-Type", "text/plain"), 16 | ], 17 | ) 18 | return [body] 19 | 20 | 21 | def app(environ, start_response): # pragma: no cover 22 | cl = environ.get("CONTENT_LENGTH", None) 23 | if cl is not None: 24 | cl = int(cl) 25 | request_body = environ["wsgi.input"].read(cl) 26 | cl = str(len(request_body)) 27 | meta = { 28 | "method": environ["REQUEST_METHOD"], 29 | "path_info": environ["PATH_INFO"], 30 | "script_name": environ["SCRIPT_NAME"], 31 | "query_string": environ["QUERY_STRING"], 32 | "content_length": cl, 33 | "scheme": environ["wsgi.url_scheme"], 34 | "remote_addr": environ["REMOTE_ADDR"], 35 | "remote_host": environ["REMOTE_HOST"], 36 | "server_port": environ["SERVER_PORT"], 37 | "server_name": environ["SERVER_NAME"], 38 | "headers": { 39 | k[len("HTTP_") :]: v for k, v in environ.items() if k.startswith("HTTP_") 40 | }, 41 | } 42 | response = json.dumps(meta).encode("utf8") + b"\r\n\r\n" + request_body 43 | start_response( 44 | "200 OK", 45 | [ 46 | ("Content-Length", str(len(response))), 47 | ("Content-Type", "text/plain"), 48 | ], 49 | ) 50 | return [response] 51 | 52 | 53 | Echo = namedtuple( 54 | "Echo", 55 | ( 56 | "method path_info script_name query_string content_length scheme " 57 | "remote_addr remote_host server_port server_name headers body" 58 | ), 59 | ) 60 | 61 | 62 | def parse_response(response): 63 | meta, body = response.split(b"\r\n\r\n", 1) 64 | meta = json.loads(meta.decode("utf8")) 65 | return Echo(body=body, **meta) 66 | -------------------------------------------------------------------------------- /tests/fixtureapps/error.py: -------------------------------------------------------------------------------- 1 | def app(environ, start_response): # pragma: no cover 2 | cl = environ.get("CONTENT_LENGTH", None) 3 | if cl is not None: 4 | cl = int(cl) 5 | body = environ["wsgi.input"].read(cl) 6 | cl = str(len(body)) 7 | if environ["PATH_INFO"] == "/before_start_response": 8 | raise ValueError("wrong") 9 | write = start_response( 10 | "200 OK", [("Content-Length", cl), ("Content-Type", "text/plain")] 11 | ) 12 | if environ["PATH_INFO"] == "/after_write_cb": 13 | write("abc") 14 | if environ["PATH_INFO"] == "/in_generator": 15 | 16 | def foo(): 17 | yield "abc" 18 | raise ValueError 19 | 20 | return foo() 21 | raise ValueError("wrong") 22 | -------------------------------------------------------------------------------- /tests/fixtureapps/error_traceback.py: -------------------------------------------------------------------------------- 1 | def app(environ, start_response): # pragma: no cover 2 | raise ValueError("Invalid application: " + chr(8364)) 3 | -------------------------------------------------------------------------------- /tests/fixtureapps/filewrapper.py: -------------------------------------------------------------------------------- 1 | import io 2 | import os 3 | 4 | here = os.path.dirname(os.path.abspath(__file__)) 5 | fn = os.path.join(here, "groundhog1.jpg") 6 | 7 | 8 | class KindaFilelike: # pragma: no cover 9 | def __init__(self, bytes): 10 | self.bytes = bytes 11 | 12 | def read(self, n): 13 | bytes = self.bytes[:n] 14 | self.bytes = self.bytes[n:] 15 | return bytes 16 | 17 | 18 | class UnseekableIOBase(io.RawIOBase): # pragma: no cover 19 | def __init__(self, bytes): 20 | self.buf = io.BytesIO(bytes) 21 | 22 | def writable(self): 23 | return False 24 | 25 | def readable(self): 26 | return True 27 | 28 | def seekable(self): 29 | return False 30 | 31 | def read(self, n): 32 | return self.buf.read(n) 33 | 34 | 35 | def app(environ, start_response): # pragma: no cover 36 | path_info = environ["PATH_INFO"] 37 | if path_info.startswith("/filelike"): 38 | f = open(fn, "rb") 39 | f.seek(0, 2) 40 | cl = f.tell() 41 | f.seek(0) 42 | if path_info == "/filelike": 43 | headers = [ 44 | ("Content-Length", str(cl)), 45 | ("Content-Type", "image/jpeg"), 46 | ] 47 | elif path_info == "/filelike_nocl": 48 | headers = [("Content-Type", "image/jpeg")] 49 | elif path_info == "/filelike_shortcl": 50 | # short content length 51 | headers = [ 52 | ("Content-Length", "1"), 53 | ("Content-Type", "image/jpeg"), 54 | ] 55 | else: 56 | # long content length (/filelike_longcl) 57 | headers = [ 58 | ("Content-Length", str(cl + 10)), 59 | ("Content-Type", "image/jpeg"), 60 | ] 61 | else: 62 | with open(fn, "rb") as fp: 63 | data = fp.read() 64 | cl = len(data) 65 | f = KindaFilelike(data) 66 | if path_info == "/notfilelike": 67 | headers = [ 68 | ("Content-Length", str(len(data))), 69 | ("Content-Type", "image/jpeg"), 70 | ] 71 | elif path_info == "/notfilelike_iobase": 72 | headers = [ 73 | ("Content-Length", str(len(data))), 74 | ("Content-Type", "image/jpeg"), 75 | ] 76 | f = UnseekableIOBase(data) 77 | elif path_info == "/notfilelike_nocl": 78 | headers = [("Content-Type", "image/jpeg")] 79 | elif path_info == "/notfilelike_shortcl": 80 | # short content length 81 | headers = [ 82 | ("Content-Length", "1"), 83 | ("Content-Type", "image/jpeg"), 84 | ] 85 | else: 86 | # long content length (/notfilelike_longcl) 87 | headers = [ 88 | ("Content-Length", str(cl + 10)), 89 | ("Content-Type", "image/jpeg"), 90 | ] 91 | 92 | start_response("200 OK", headers) 93 | return environ["wsgi.file_wrapper"](f, 8192) 94 | -------------------------------------------------------------------------------- /tests/fixtureapps/getline.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | if __name__ == "__main__": 4 | try: 5 | from urllib.request import URLError, urlopen 6 | except ImportError: 7 | from urllib2 import URLError, urlopen 8 | 9 | url = sys.argv[1] 10 | headers = {"Content-Type": "text/plain; charset=utf-8"} 11 | try: 12 | resp = urlopen(url) 13 | line = resp.readline().decode("ascii") # py3 14 | except URLError: 15 | line = "failed to read %s" % url 16 | sys.stdout.write(line) 17 | sys.stdout.flush() 18 | -------------------------------------------------------------------------------- /tests/fixtureapps/groundhog1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Pylons/waitress/ed0149beb3c91def3150ac3cbd57df250a241a9c/tests/fixtureapps/groundhog1.jpg -------------------------------------------------------------------------------- /tests/fixtureapps/nocl.py: -------------------------------------------------------------------------------- 1 | def chunks(l, n): # pragma: no cover 2 | """Yield successive n-sized chunks from l.""" 3 | for i in range(0, len(l), n): 4 | yield l[i : i + n] 5 | 6 | 7 | def gen(body): # pragma: no cover 8 | yield from chunks(body, 10) 9 | 10 | 11 | def app(environ, start_response): # pragma: no cover 12 | cl = environ.get("CONTENT_LENGTH", None) 13 | if cl is not None: 14 | cl = int(cl) 15 | body = environ["wsgi.input"].read(cl) 16 | start_response("200 OK", [("Content-Type", "text/plain")]) 17 | if environ["PATH_INFO"] == "/list": 18 | return [body] 19 | if environ["PATH_INFO"] == "/list_lentwo": 20 | return [body[0:1], body[1:]] 21 | return gen(body) 22 | -------------------------------------------------------------------------------- /tests/fixtureapps/runner.py: -------------------------------------------------------------------------------- 1 | def app(): # pragma: no cover 2 | return None 3 | 4 | 5 | def returns_app(): # pragma: no cover 6 | return app 7 | -------------------------------------------------------------------------------- /tests/fixtureapps/sleepy.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | 4 | def app(environ, start_response): # pragma: no cover 5 | if environ["PATH_INFO"] == "/sleepy": 6 | time.sleep(2) 7 | body = b"sleepy returned" 8 | else: 9 | body = b"notsleepy returned" 10 | cl = str(len(body)) 11 | start_response("200 OK", [("Content-Length", cl), ("Content-Type", "text/plain")]) 12 | return [body] 13 | -------------------------------------------------------------------------------- /tests/fixtureapps/toolarge.py: -------------------------------------------------------------------------------- 1 | def app(environ, start_response): # pragma: no cover 2 | body = b"abcdef" 3 | cl = len(body) 4 | start_response( 5 | "200 OK", [("Content-Length", str(cl)), ("Content-Type", "text/plain")] 6 | ) 7 | return [body] 8 | -------------------------------------------------------------------------------- /tests/fixtureapps/writecb.py: -------------------------------------------------------------------------------- 1 | def app(environ, start_response): # pragma: no cover 2 | path_info = environ["PATH_INFO"] 3 | if path_info == "/no_content_length": 4 | headers = [] 5 | else: 6 | headers = [("Content-Length", "9")] 7 | write = start_response("200 OK", headers) 8 | if path_info == "/long_body": 9 | write(b"abcdefghij") 10 | elif path_info == "/short_body": 11 | write(b"abcdefgh") 12 | else: 13 | write(b"abcdefghi") 14 | return [] 15 | -------------------------------------------------------------------------------- /tests/test_init.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | 4 | class Test_serve(unittest.TestCase): 5 | def _callFUT(self, app, **kw): 6 | from waitress import serve 7 | 8 | return serve(app, **kw) 9 | 10 | def test_it(self): 11 | server = DummyServerFactory() 12 | app = object() 13 | result = self._callFUT(app, _server=server, _quiet=True) 14 | self.assertEqual(server.app, app) 15 | self.assertIsNone(result) 16 | self.assertTrue(server.ran) 17 | 18 | 19 | class Test_serve_paste(unittest.TestCase): 20 | def _callFUT(self, app, **kw): 21 | from waitress import serve_paste 22 | 23 | return serve_paste(app, None, **kw) 24 | 25 | def test_it(self): 26 | server = DummyServerFactory() 27 | app = object() 28 | result = self._callFUT(app, _server=server, _quiet=True) 29 | self.assertEqual(server.app, app) 30 | self.assertEqual(result, 0) 31 | self.assertTrue(server.ran) 32 | 33 | 34 | class DummyServerFactory: 35 | ran = False 36 | 37 | def __call__(self, app, **kw): 38 | self.adj = DummyAdj(kw) 39 | self.app = app 40 | self.kw = kw 41 | return self 42 | 43 | def run(self): 44 | self.ran = True 45 | 46 | 47 | class DummyAdj: 48 | verbose = False 49 | 50 | def __init__(self, kw): 51 | self.__dict__.update(kw) 52 | -------------------------------------------------------------------------------- /tests/test_receiver.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import pytest 4 | 5 | 6 | class TestFixedStreamReceiver(unittest.TestCase): 7 | def _makeOne(self, cl, buf): 8 | from waitress.receiver import FixedStreamReceiver 9 | 10 | return FixedStreamReceiver(cl, buf) 11 | 12 | def test_received_remain_lt_1(self): 13 | buf = DummyBuffer() 14 | inst = self._makeOne(0, buf) 15 | result = inst.received("a") 16 | self.assertEqual(result, 0) 17 | self.assertTrue(inst.completed) 18 | 19 | def test_received_remain_lte_datalen(self): 20 | buf = DummyBuffer() 21 | inst = self._makeOne(1, buf) 22 | result = inst.received("aa") 23 | self.assertEqual(result, 1) 24 | self.assertTrue(inst.completed) 25 | self.assertEqual(inst.completed, 1) 26 | self.assertEqual(inst.remain, 0) 27 | self.assertListEqual(buf.data, ["a"]) 28 | 29 | def test_received_remain_gt_datalen(self): 30 | buf = DummyBuffer() 31 | inst = self._makeOne(10, buf) 32 | result = inst.received("aa") 33 | self.assertEqual(result, 2) 34 | self.assertFalse(inst.completed) 35 | self.assertEqual(inst.remain, 8) 36 | self.assertListEqual(buf.data, ["aa"]) 37 | 38 | def test_getfile(self): 39 | buf = DummyBuffer() 40 | inst = self._makeOne(10, buf) 41 | self.assertEqual(inst.getfile(), buf) 42 | 43 | def test_getbuf(self): 44 | buf = DummyBuffer() 45 | inst = self._makeOne(10, buf) 46 | self.assertEqual(inst.getbuf(), buf) 47 | 48 | def test___len__(self): 49 | buf = DummyBuffer(["1", "2"]) 50 | inst = self._makeOne(10, buf) 51 | self.assertEqual(inst.__len__(), 2) 52 | 53 | 54 | class TestChunkedReceiver(unittest.TestCase): 55 | def _makeOne(self, buf): 56 | from waitress.receiver import ChunkedReceiver 57 | 58 | return ChunkedReceiver(buf) 59 | 60 | def test_alreadycompleted(self): 61 | buf = DummyBuffer() 62 | inst = self._makeOne(buf) 63 | inst.completed = True 64 | result = inst.received(b"a") 65 | self.assertEqual(result, 0) 66 | self.assertTrue(inst.completed) 67 | 68 | def test_received_remain_gt_zero(self): 69 | buf = DummyBuffer() 70 | inst = self._makeOne(buf) 71 | inst.chunk_remainder = 100 72 | result = inst.received(b"a") 73 | self.assertEqual(inst.chunk_remainder, 99) 74 | self.assertEqual(result, 1) 75 | self.assertFalse(inst.completed) 76 | 77 | def test_received_control_line_notfinished(self): 78 | buf = DummyBuffer() 79 | inst = self._makeOne(buf) 80 | result = inst.received(b"a") 81 | self.assertEqual(inst.control_line, b"a") 82 | self.assertEqual(result, 1) 83 | self.assertFalse(inst.completed) 84 | 85 | def test_received_control_line_finished_garbage_in_input(self): 86 | buf = DummyBuffer() 87 | inst = self._makeOne(buf) 88 | result = inst.received(b"garbage\r\n") 89 | self.assertEqual(result, 9) 90 | self.assertTrue(inst.error) 91 | 92 | def test_received_control_line_finished_all_chunks_not_received(self): 93 | buf = DummyBuffer() 94 | inst = self._makeOne(buf) 95 | result = inst.received(b"a;discard\r\n") 96 | self.assertEqual(inst.control_line, b"") 97 | self.assertEqual(inst.chunk_remainder, 10) 98 | self.assertFalse(inst.all_chunks_received) 99 | self.assertEqual(result, 11) 100 | self.assertFalse(inst.completed) 101 | 102 | def test_received_control_line_finished_all_chunks_received(self): 103 | buf = DummyBuffer() 104 | inst = self._makeOne(buf) 105 | result = inst.received(b"0;discard\r\n") 106 | self.assertEqual(inst.control_line, b"") 107 | self.assertTrue(inst.all_chunks_received) 108 | self.assertEqual(result, 11) 109 | self.assertFalse(inst.completed) 110 | 111 | def test_received_trailer_startswith_crlf(self): 112 | buf = DummyBuffer() 113 | inst = self._makeOne(buf) 114 | inst.all_chunks_received = True 115 | result = inst.received(b"\r\n") 116 | self.assertEqual(result, 2) 117 | self.assertTrue(inst.completed) 118 | 119 | def test_received_trailer_startswith_lf(self): 120 | buf = DummyBuffer() 121 | inst = self._makeOne(buf) 122 | inst.all_chunks_received = True 123 | result = inst.received(b"\n") 124 | self.assertEqual(result, 1) 125 | self.assertFalse(inst.completed) 126 | 127 | def test_received_trailer_not_finished(self): 128 | buf = DummyBuffer() 129 | inst = self._makeOne(buf) 130 | inst.all_chunks_received = True 131 | result = inst.received(b"a") 132 | self.assertEqual(result, 1) 133 | self.assertFalse(inst.completed) 134 | 135 | def test_received_trailer_finished(self): 136 | buf = DummyBuffer() 137 | inst = self._makeOne(buf) 138 | inst.all_chunks_received = True 139 | result = inst.received(b"abc\r\n\r\n") 140 | self.assertEqual(inst.trailer, b"abc\r\n\r\n") 141 | self.assertEqual(result, 7) 142 | self.assertTrue(inst.completed) 143 | 144 | def test_getfile(self): 145 | buf = DummyBuffer() 146 | inst = self._makeOne(buf) 147 | self.assertEqual(inst.getfile(), buf) 148 | 149 | def test_getbuf(self): 150 | buf = DummyBuffer() 151 | inst = self._makeOne(buf) 152 | self.assertEqual(inst.getbuf(), buf) 153 | 154 | def test___len__(self): 155 | buf = DummyBuffer(["1", "2"]) 156 | inst = self._makeOne(buf) 157 | self.assertEqual(len(inst), 2) 158 | 159 | def test_received_chunk_is_properly_terminated(self): 160 | buf = DummyBuffer() 161 | inst = self._makeOne(buf) 162 | data = b"4\r\nWiki\r\n" 163 | result = inst.received(data) 164 | self.assertEqual(result, len(data)) 165 | self.assertFalse(inst.completed) 166 | self.assertEqual(buf.data[0], b"Wiki") 167 | 168 | def test_received_chunk_not_properly_terminated(self): 169 | from waitress.utilities import BadRequest 170 | 171 | buf = DummyBuffer() 172 | inst = self._makeOne(buf) 173 | data = b"4\r\nWikibadchunk\r\n" 174 | result = inst.received(data) 175 | self.assertEqual(result, len(data)) 176 | self.assertFalse(inst.completed) 177 | self.assertEqual(buf.data[0], b"Wiki") 178 | self.assertIsInstance(inst.error, BadRequest) 179 | 180 | def test_received_multiple_chunks(self): 181 | buf = DummyBuffer() 182 | inst = self._makeOne(buf) 183 | data = ( 184 | b"4\r\n" 185 | b"Wiki\r\n" 186 | b"5\r\n" 187 | b"pedia\r\n" 188 | b"E\r\n" 189 | b" in\r\n" 190 | b"\r\n" 191 | b"chunks.\r\n" 192 | b"0\r\n" 193 | b"\r\n" 194 | ) 195 | result = inst.received(data) 196 | self.assertEqual(result, len(data)) 197 | self.assertTrue(inst.completed) 198 | self.assertEqual(b"".join(buf.data), b"Wikipedia in\r\n\r\nchunks.") 199 | self.assertIsNone(inst.error) 200 | 201 | def test_received_multiple_chunks_split(self): 202 | buf = DummyBuffer() 203 | inst = self._makeOne(buf) 204 | data1 = b"4\r\nWiki\r" 205 | result = inst.received(data1) 206 | self.assertEqual(result, len(data1)) 207 | 208 | data2 = ( 209 | b"\n5\r\n" 210 | b"pedia\r\n" 211 | b"E\r\n" 212 | b" in\r\n" 213 | b"\r\n" 214 | b"chunks.\r\n" 215 | b"0\r\n" 216 | b"\r\n" 217 | ) 218 | 219 | result = inst.received(data2) 220 | self.assertEqual(result, len(data2)) 221 | 222 | self.assertTrue(inst.completed) 223 | self.assertEqual(b"".join(buf.data), b"Wikipedia in\r\n\r\nchunks.") 224 | self.assertIsNone(inst.error) 225 | 226 | 227 | class TestChunkedReceiverParametrized: 228 | def _makeOne(self, buf): 229 | from waitress.receiver import ChunkedReceiver 230 | 231 | return ChunkedReceiver(buf) 232 | 233 | @pytest.mark.parametrize( 234 | "invalid_extension", [b"\n", b"invalid=", b"\r", b"invalid = true"] 235 | ) 236 | def test_received_invalid_extensions(self, invalid_extension): 237 | from waitress.utilities import BadRequest 238 | 239 | buf = DummyBuffer() 240 | inst = self._makeOne(buf) 241 | data = b"4;" + invalid_extension + b"\r\ntest\r\n" 242 | result = inst.received(data) 243 | assert result == len(data) 244 | assert isinstance(inst.error, BadRequest) 245 | assert inst.error.body == "Invalid chunk extension" 246 | 247 | @pytest.mark.parametrize( 248 | "valid_extension", [b"test", b"valid=true", b"valid=true;other=true"] 249 | ) 250 | def test_received_valid_extensions(self, valid_extension): 251 | # While waitress may ignore extensions in Chunked Encoding, we do want 252 | # to make sure that we don't fail when we do encounter one that is 253 | # valid 254 | buf = DummyBuffer() 255 | inst = self._makeOne(buf) 256 | data = b"4;" + valid_extension + b"\r\ntest\r\n" 257 | result = inst.received(data) 258 | assert result == len(data) 259 | assert inst.error is None 260 | 261 | @pytest.mark.parametrize( 262 | "invalid_size", [b"0x04", b"+0x04", b"x04", b"+04", b" 04", b" 0x04"] 263 | ) 264 | def test_received_invalid_size(self, invalid_size): 265 | from waitress.utilities import BadRequest 266 | 267 | buf = DummyBuffer() 268 | inst = self._makeOne(buf) 269 | data = invalid_size + b"\r\ntest\r\n" 270 | result = inst.received(data) 271 | assert result == len(data) 272 | assert isinstance(inst.error, BadRequest) 273 | assert inst.error.body == "Invalid chunk size" 274 | 275 | 276 | class DummyBuffer: 277 | def __init__(self, data=None): 278 | if data is None: 279 | data = [] 280 | self.data = data 281 | 282 | def append(self, s): 283 | self.data.append(s) 284 | 285 | def getfile(self): 286 | return self 287 | 288 | def __len__(self): 289 | return len(self.data) 290 | -------------------------------------------------------------------------------- /tests/test_regression.py: -------------------------------------------------------------------------------- 1 | ############################################################################## 2 | # 3 | # Copyright (c) 2005 Zope Foundation and Contributors. 4 | # All Rights Reserved. 5 | # 6 | # This software is subject to the provisions of the Zope Public License, 7 | # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. 8 | # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED 9 | # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 10 | # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS 11 | # FOR A PARTICULAR PURPOSE. 12 | # 13 | ############################################################################## 14 | """Tests for waitress.channel maintenance logic 15 | """ 16 | import doctest 17 | 18 | 19 | class FakeSocket: # pragma: no cover 20 | data = "" 21 | setblocking = lambda *_: None 22 | close = lambda *_: None 23 | 24 | def __init__(self, no): 25 | self.no = no 26 | 27 | def fileno(self): 28 | return self.no 29 | 30 | def getpeername(self): 31 | return ("localhost", self.no) 32 | 33 | def send(self, data): 34 | self.data += data 35 | return len(data) 36 | 37 | def recv(self, data): 38 | return "data" 39 | 40 | 41 | def zombies_test(): 42 | """Regression test for HTTPChannel.maintenance method 43 | 44 | Bug: This method checks for channels that have been "inactive" for a 45 | configured time. The bug was that last_activity is set at creation time 46 | but never updated during async channel activity (reads and writes), so 47 | any channel older than the configured timeout will be closed when a new 48 | channel is created, regardless of activity. 49 | 50 | >>> import time 51 | >>> import waitress.adjustments 52 | >>> config = waitress.adjustments.Adjustments() 53 | 54 | >>> from waitress.server import HTTPServer 55 | >>> class TestServer(HTTPServer): 56 | ... def bind(self, (ip, port)): 57 | ... print "Listening on %s:%d" % (ip or '*', port) 58 | >>> sb = TestServer('127.0.0.1', 80, start=False, verbose=True) 59 | Listening on 127.0.0.1:80 60 | 61 | First we confirm the correct behavior, where a channel with no activity 62 | for the timeout duration gets closed. 63 | 64 | >>> from waitress.channel import HTTPChannel 65 | >>> socket = FakeSocket(42) 66 | >>> channel = HTTPChannel(sb, socket, ('localhost', 42)) 67 | 68 | >>> channel.connected 69 | True 70 | 71 | >>> channel.last_activity -= int(config.channel_timeout) + 1 72 | 73 | >>> channel.next_channel_cleanup[0] = channel.creation_time - int( 74 | ... config.cleanup_interval) - 1 75 | 76 | >>> socket2 = FakeSocket(7) 77 | >>> channel2 = HTTPChannel(sb, socket2, ('localhost', 7)) 78 | 79 | >>> channel.connected 80 | False 81 | 82 | Write Activity 83 | -------------- 84 | 85 | Now we make sure that if there is activity the channel doesn't get closed 86 | incorrectly. 87 | 88 | >>> channel2.connected 89 | True 90 | 91 | >>> channel2.last_activity -= int(config.channel_timeout) + 1 92 | 93 | >>> channel2.handle_write() 94 | 95 | >>> channel2.next_channel_cleanup[0] = channel2.creation_time - int( 96 | ... config.cleanup_interval) - 1 97 | 98 | >>> socket3 = FakeSocket(3) 99 | >>> channel3 = HTTPChannel(sb, socket3, ('localhost', 3)) 100 | 101 | >>> channel2.connected 102 | True 103 | 104 | Read Activity 105 | -------------- 106 | 107 | We should test to see that read activity will update a channel as well. 108 | 109 | >>> channel3.connected 110 | True 111 | 112 | >>> channel3.last_activity -= int(config.channel_timeout) + 1 113 | 114 | >>> import waitress.parser 115 | >>> channel3.parser_class = ( 116 | ... waitress.parser.HTTPRequestParser) 117 | >>> channel3.handle_read() 118 | 119 | >>> channel3.next_channel_cleanup[0] = channel3.creation_time - int( 120 | ... config.cleanup_interval) - 1 121 | 122 | >>> socket4 = FakeSocket(4) 123 | >>> channel4 = HTTPChannel(sb, socket4, ('localhost', 4)) 124 | 125 | >>> channel3.connected 126 | True 127 | 128 | Main loop window 129 | ---------------- 130 | 131 | There is also a corner case we'll do a shallow test for where a 132 | channel can be closed waiting for the main loop. 133 | 134 | >>> channel4.last_activity -= 1 135 | 136 | >>> last_active = channel4.last_activity 137 | 138 | >>> channel4.set_async() 139 | 140 | >>> channel4.last_activity != last_active 141 | True 142 | """ 143 | 144 | 145 | def test_suite(): 146 | return doctest.DocTestSuite() 147 | -------------------------------------------------------------------------------- /tests/test_runner.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | import os 3 | import sys 4 | import unittest 5 | 6 | from waitress import runner 7 | 8 | 9 | class Test_run(unittest.TestCase): 10 | def match_output(self, argv, code, regex): 11 | argv = ["waitress-serve"] + argv 12 | with capture() as captured: 13 | self.assertEqual(runner.run(argv=argv), code) 14 | self.assertRegex(captured.getvalue(), regex) 15 | captured.close() 16 | 17 | def test_bad(self): 18 | self.match_output(["--bad-opt"], 1, "^Error: option --bad-opt not recognized") 19 | 20 | def test_help(self): 21 | self.match_output(["--help"], 0, "^Usage:\n\n waitress-serve") 22 | 23 | def test_no_app(self): 24 | self.match_output([], 1, "^Error: Specify an application") 25 | 26 | def test_multiple_apps_app(self): 27 | self.match_output(["a:a", "b:b"], 1, "^Error: Provide only one WSGI app") 28 | self.match_output(["--app=a:a", "b:b"], 1, "^Error: Provide only one WSGI app") 29 | 30 | def test_bad_apps_app(self): 31 | self.match_output(["a"], 1, "No module named 'a'") 32 | 33 | def test_bad_app_module(self): 34 | self.match_output(["nonexistent:a"], 1, "No module named 'nonexistent'") 35 | 36 | def test_cwd_added_to_path(self): 37 | def null_serve(app, **kw): 38 | pass 39 | 40 | sys_path = sys.path 41 | current_dir = os.getcwd() 42 | try: 43 | os.chdir(os.path.dirname(__file__)) 44 | argv = [ 45 | "waitress-serve", 46 | "fixtureapps.runner:app", 47 | ] 48 | self.assertEqual(runner.run(argv=argv, _serve=null_serve), 0) 49 | finally: 50 | sys.path = sys_path 51 | os.chdir(current_dir) 52 | 53 | def test_bad_app_object(self): 54 | self.match_output( 55 | ["tests.fixtureapps.runner:a"], 56 | 1, 57 | "module 'tests.fixtureapps.runner' has no attribute 'a'", 58 | ) 59 | 60 | def test_simple_call(self): 61 | from tests.fixtureapps import runner as _apps 62 | 63 | def check_server(app, **kw): 64 | self.assertIs(app, _apps.app) 65 | self.assertDictEqual(kw, {"port": "80"}) 66 | 67 | argv = [ 68 | "waitress-serve", 69 | "--port=80", 70 | "tests.fixtureapps.runner:app", 71 | ] 72 | self.assertEqual(runner.run(argv=argv, _serve=check_server), 0) 73 | 74 | def test_returned_app(self): 75 | from tests.fixtureapps import runner as _apps 76 | 77 | def check_server(app, **kw): 78 | self.assertIs(app, _apps.app) 79 | self.assertDictEqual(kw, {"port": "80"}) 80 | 81 | argv = [ 82 | "waitress-serve", 83 | "--port=80", 84 | "--call", 85 | "tests.fixtureapps.runner:returns_app", 86 | ] 87 | self.assertEqual(runner.run(argv=argv, _serve=check_server), 0) 88 | 89 | 90 | @contextlib.contextmanager 91 | def capture(): 92 | from io import StringIO 93 | 94 | fd = StringIO() 95 | old_stdout, old_stderr = sys.stdout, sys.stderr 96 | sys.stdout = fd 97 | sys.stderr = fd 98 | try: 99 | yield fd 100 | finally: 101 | sys.stdout, sys.stderr = old_stdout, old_stderr 102 | -------------------------------------------------------------------------------- /tests/test_trigger.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import unittest 4 | 5 | if not sys.platform.startswith("win"): 6 | 7 | class Test_trigger(unittest.TestCase): 8 | def _makeOne(self, map): 9 | from waitress.trigger import trigger 10 | 11 | self.inst = trigger(map) 12 | return self.inst 13 | 14 | def tearDown(self): 15 | self.inst.close() # prevent __del__ warning from file_dispatcher 16 | 17 | def test__close(self): 18 | map = {} 19 | inst = self._makeOne(map) 20 | fd1, fd2 = inst._fds 21 | inst.close() 22 | self.assertRaises(OSError, os.read, fd1, 1) 23 | self.assertRaises(OSError, os.read, fd2, 1) 24 | 25 | def test__physical_pull(self): 26 | map = {} 27 | inst = self._makeOne(map) 28 | inst._physical_pull() 29 | r = os.read(inst._fds[0], 1) 30 | self.assertEqual(r, b"x") 31 | 32 | def test_readable(self): 33 | map = {} 34 | inst = self._makeOne(map) 35 | self.assertTrue(inst.readable()) 36 | 37 | def test_writable(self): 38 | map = {} 39 | inst = self._makeOne(map) 40 | self.assertFalse(inst.writable()) 41 | 42 | def test_handle_connect(self): 43 | map = {} 44 | inst = self._makeOne(map) 45 | self.assertIsNone(inst.handle_connect()) 46 | 47 | def test_close(self): 48 | map = {} 49 | inst = self._makeOne(map) 50 | self.assertIsNone(inst.close()) 51 | self.assertTrue(inst._closed) 52 | 53 | def test_handle_close(self): 54 | map = {} 55 | inst = self._makeOne(map) 56 | self.assertIsNone(inst.handle_close()) 57 | self.assertTrue(inst._closed) 58 | 59 | def test_pull_trigger_nothunk(self): 60 | map = {} 61 | inst = self._makeOne(map) 62 | self.assertIsNone(inst.pull_trigger()) 63 | r = os.read(inst._fds[0], 1) 64 | self.assertEqual(r, b"x") 65 | 66 | def test_pull_trigger_thunk(self): 67 | map = {} 68 | inst = self._makeOne(map) 69 | self.assertIsNone(inst.pull_trigger(True)) 70 | self.assertEqual(len(inst.thunks), 1) 71 | r = os.read(inst._fds[0], 1) 72 | self.assertEqual(r, b"x") 73 | 74 | def test_handle_read_socket_error(self): 75 | map = {} 76 | inst = self._makeOne(map) 77 | result = inst.handle_read() 78 | self.assertIsNone(result) 79 | 80 | def test_handle_read_no_socket_error(self): 81 | map = {} 82 | inst = self._makeOne(map) 83 | inst.pull_trigger() 84 | result = inst.handle_read() 85 | self.assertIsNone(result) 86 | 87 | def test_handle_read_thunk(self): 88 | map = {} 89 | inst = self._makeOne(map) 90 | inst.pull_trigger() 91 | L = [] 92 | inst.thunks = [lambda: L.append(True)] 93 | result = inst.handle_read() 94 | self.assertIsNone(result) 95 | self.assertListEqual(L, [True]) 96 | self.assertListEqual(inst.thunks, []) 97 | 98 | def test_handle_read_thunk_error(self): 99 | map = {} 100 | inst = self._makeOne(map) 101 | 102 | def errorthunk(): 103 | raise ValueError 104 | 105 | inst.pull_trigger(errorthunk) 106 | L = [] 107 | inst.log_info = lambda *arg: L.append(arg) 108 | result = inst.handle_read() 109 | self.assertIsNone(result) 110 | self.assertEqual(len(L), 1) 111 | self.assertListEqual(inst.thunks, []) 112 | -------------------------------------------------------------------------------- /tests/test_utilities.py: -------------------------------------------------------------------------------- 1 | ############################################################################## 2 | # 3 | # Copyright (c) 2002 Zope Foundation and Contributors. 4 | # All Rights Reserved. 5 | # 6 | # This software is subject to the provisions of the Zope Public License, 7 | # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. 8 | # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED 9 | # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 10 | # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS 11 | # FOR A PARTICULAR PURPOSE. 12 | # 13 | ############################################################################## 14 | 15 | import unittest 16 | 17 | 18 | class Test_parse_http_date(unittest.TestCase): 19 | def _callFUT(self, v): 20 | from waitress.utilities import parse_http_date 21 | 22 | return parse_http_date(v) 23 | 24 | def test_rfc850(self): 25 | val = "Tuesday, 08-Feb-94 14:15:29 GMT" 26 | result = self._callFUT(val) 27 | self.assertEqual(result, 760716929) 28 | 29 | def test_rfc822(self): 30 | val = "Sun, 08 Feb 1994 14:15:29 GMT" 31 | result = self._callFUT(val) 32 | self.assertEqual(result, 760716929) 33 | 34 | def test_neither(self): 35 | val = "" 36 | result = self._callFUT(val) 37 | self.assertEqual(result, 0) 38 | 39 | 40 | class Test_build_http_date(unittest.TestCase): 41 | def test_rountdrip(self): 42 | from time import time 43 | 44 | from waitress.utilities import build_http_date, parse_http_date 45 | 46 | t = int(time()) 47 | self.assertEqual(t, parse_http_date(build_http_date(t))) 48 | 49 | 50 | class Test_unpack_rfc850(unittest.TestCase): 51 | def _callFUT(self, val): 52 | from waitress.utilities import rfc850_reg, unpack_rfc850 53 | 54 | return unpack_rfc850(rfc850_reg.match(val.lower())) 55 | 56 | def test_it(self): 57 | val = "Tuesday, 08-Feb-94 14:15:29 GMT" 58 | result = self._callFUT(val) 59 | self.assertTupleEqual(result, (1994, 2, 8, 14, 15, 29, 0, 0, 0)) 60 | 61 | 62 | class Test_unpack_rfc_822(unittest.TestCase): 63 | def _callFUT(self, val): 64 | from waitress.utilities import rfc822_reg, unpack_rfc822 65 | 66 | return unpack_rfc822(rfc822_reg.match(val.lower())) 67 | 68 | def test_it(self): 69 | val = "Sun, 08 Feb 1994 14:15:29 GMT" 70 | result = self._callFUT(val) 71 | self.assertTupleEqual(result, (1994, 2, 8, 14, 15, 29, 0, 0, 0)) 72 | 73 | 74 | class Test_find_double_newline(unittest.TestCase): 75 | def _callFUT(self, val): 76 | from waitress.utilities import find_double_newline 77 | 78 | return find_double_newline(val) 79 | 80 | def test_empty(self): 81 | self.assertEqual(self._callFUT(b""), -1) 82 | 83 | def test_one_linefeed(self): 84 | self.assertEqual(self._callFUT(b"\n"), -1) 85 | 86 | def test_double_linefeed(self): 87 | self.assertEqual(self._callFUT(b"\n\n"), -1) 88 | 89 | def test_one_crlf(self): 90 | self.assertEqual(self._callFUT(b"\r\n"), -1) 91 | 92 | def test_double_crfl(self): 93 | self.assertEqual(self._callFUT(b"\r\n\r\n"), 4) 94 | 95 | def test_mixed(self): 96 | self.assertEqual(self._callFUT(b"\n\n00\r\n\r\n"), 8) 97 | 98 | 99 | class TestBadRequest(unittest.TestCase): 100 | def _makeOne(self): 101 | from waitress.utilities import BadRequest 102 | 103 | return BadRequest(1) 104 | 105 | def test_it(self): 106 | inst = self._makeOne() 107 | self.assertEqual(inst.body, 1) 108 | 109 | 110 | class Test_undquote(unittest.TestCase): 111 | def _callFUT(self, value): 112 | from waitress.utilities import undquote 113 | 114 | return undquote(value) 115 | 116 | def test_empty(self): 117 | self.assertEqual(self._callFUT(""), "") 118 | 119 | def test_quoted(self): 120 | self.assertEqual(self._callFUT('"test"'), "test") 121 | 122 | def test_unquoted(self): 123 | self.assertEqual(self._callFUT("test"), "test") 124 | 125 | def test_quoted_backslash_quote(self): 126 | self.assertEqual(self._callFUT('"\\""'), '"') 127 | 128 | def test_quoted_htab(self): 129 | self.assertEqual(self._callFUT('"\t"'), "\t") 130 | 131 | def test_quoted_backslash_htab(self): 132 | self.assertEqual(self._callFUT('"\\\t"'), "\t") 133 | 134 | def test_quoted_backslash_invalid(self): 135 | self.assertRaises(ValueError, self._callFUT, '"\\"') 136 | 137 | def test_invalid_quoting(self): 138 | self.assertRaises(ValueError, self._callFUT, '"test') 139 | 140 | def test_invalid_quoting_single_quote(self): 141 | self.assertRaises(ValueError, self._callFUT, '"') 142 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = 3 | lint, 4 | py39,py310,py311,py312,py313,pypy39,pypy310 5 | coverage, 6 | docs 7 | isolated_build = True 8 | 9 | [testenv] 10 | commands = 11 | python --version 12 | python -mpytest \ 13 | pypy39: --no-cov \ 14 | pypy310: --no-cov \ 15 | {posargs:} 16 | extras = 17 | testing 18 | setenv = 19 | COVERAGE_FILE=.coverage.{envname} 20 | 21 | [testenv:coverage] 22 | skip_install = True 23 | commands = 24 | coverage combine 25 | coverage xml 26 | coverage report --fail-under=100 27 | deps = 28 | coverage 29 | setenv = 30 | COVERAGE_FILE=.coverage 31 | 32 | [testenv:docs] 33 | allowlist_externals = 34 | make 35 | commands = 36 | make -C docs html BUILDDIR={envdir} "SPHINXOPTS=-W -E -D suppress_warnings=ref.term" 37 | extras = 38 | docs 39 | 40 | [testenv:lint] 41 | skip_install = True 42 | commands = 43 | isort --check-only --df src/waitress tests 44 | black --check --diff . 45 | check-manifest 46 | # flake8 src/waitress/ tests 47 | # build sdist/wheel 48 | python -m build . 49 | twine check dist/* 50 | deps = 51 | black 52 | build 53 | check-manifest 54 | flake8 55 | flake8-bugbear 56 | isort 57 | readme_renderer 58 | twine 59 | 60 | [testenv:format] 61 | skip_install = true 62 | commands = 63 | isort src/waitress tests 64 | black . 65 | deps = 66 | black 67 | isort 68 | 69 | [testenv:build] 70 | skip_install = true 71 | commands = 72 | # clean up build/ and dist/ folders 73 | python -c 'import shutil; shutil.rmtree("build", ignore_errors=True)' 74 | # Make sure we aren't forgetting anything 75 | check-manifest 76 | # build sdist/wheel 77 | python -m build . 78 | # Verify all is well 79 | twine check dist/* 80 | 81 | deps = 82 | build 83 | check-manifest 84 | readme_renderer 85 | twine 86 | --------------------------------------------------------------------------------