├── blib2to3
├── __init__.py
├── __init__.pyi
├── pgen2
│ ├── __init__.py
│ ├── __init__.pyi
│ ├── literals.pyi
│ ├── grammar.pyi
│ ├── tokenize.pyi
│ ├── driver.pyi
│ ├── parse.pyi
│ ├── token.pyi
│ ├── token.py
│ ├── literals.py
│ ├── pgen.pyi
│ ├── grammar.py
│ ├── parse.py
│ ├── driver.py
│ ├── conv.py
│ └── pgen.py
├── README
├── PatternGrammar.txt
├── pygram.py
├── pygram.pyi
├── pytree.pyi
├── Grammar.txt
└── LICENSE
├── docs
├── contributing.md
├── authors.md
├── license.md
├── change_log.md
├── testimonials.md
├── show_your_style.md
├── requirements.txt
├── editor_integration.md
├── the_black_code_style.md
├── contributing_to_black.md
├── installation_and_usage.md
├── ignoring_unmodified_files.md
├── version_control_integration.md
├── _static
│ ├── logo2.png
│ ├── logo2-readme.png
│ ├── license.svg
│ └── pypi_template.svg
├── environment.yml
├── reference
│ ├── reference_summary.rst
│ ├── reference_exceptions.rst
│ ├── reference_classes.rst
│ └── reference_functions.rst
├── Makefile
├── make.bat
├── index.rst
└── conf.py
├── tests
├── include_exclude_tests
│ └── b
│ │ ├── exclude
│ │ ├── a.pie
│ │ ├── a.py
│ │ └── a.pyi
│ │ ├── dont_exclude
│ │ ├── a.pie
│ │ ├── a.py
│ │ └── a.pyi
│ │ └── .definitely_exclude
│ │ ├── a.pie
│ │ ├── a.py
│ │ └── a.pyi
├── force_pyi.py
├── string_prefixes.py
├── fstring.py
├── python2_unicode_literals.py
├── stub.pyi
├── force_py36.py
├── python2.py
├── slices.py
├── debug_visitor.py
├── class_blank_parentheses.py
├── function2.py
├── comments5.py
├── comments3.py
├── string_quotes.py
├── comments.py
├── import_spacing.py
├── comments4.py
├── cantfit.py
├── empty_lines.py
├── class_methods_new_line.py
├── composition.py
├── fmtonoff.py
├── comments2.py
├── function.py
└── expression.diff
├── setup.cfg
├── .coveragerc
├── readthedocs.yml
├── .gitignore
├── MANIFEST.in
├── .pre-commit-hooks.yaml
├── .flake8
├── .appveyor.yml
├── Pipfile
├── .github
├── ISSUE_TEMPLATE.md
└── CODE_OF_CONDUCT.md
├── .pre-commit-config.yaml
├── mypy.ini
├── .travis.yml
├── LICENSE
├── CONTRIBUTING.md
├── setup.py
└── plugin
└── black.vim
/blib2to3/__init__.py:
--------------------------------------------------------------------------------
1 | #empty
2 |
--------------------------------------------------------------------------------
/docs/contributing.md:
--------------------------------------------------------------------------------
1 | ../CONTRIBUTING.md
--------------------------------------------------------------------------------
/docs/authors.md:
--------------------------------------------------------------------------------
1 | _build/generated/authors.md
--------------------------------------------------------------------------------
/docs/license.md:
--------------------------------------------------------------------------------
1 | _build/generated/license.md
--------------------------------------------------------------------------------
/tests/include_exclude_tests/b/exclude/a.pie:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/include_exclude_tests/b/exclude/a.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/include_exclude_tests/b/exclude/a.pyi:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/change_log.md:
--------------------------------------------------------------------------------
1 | _build/generated/change_log.md
--------------------------------------------------------------------------------
/tests/include_exclude_tests/b/dont_exclude/a.pie:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/include_exclude_tests/b/dont_exclude/a.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/include_exclude_tests/b/dont_exclude/a.pyi:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/testimonials.md:
--------------------------------------------------------------------------------
1 | _build/generated/testimonials.md
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [bdist_wheel]
2 | python-tag = py36
3 |
--------------------------------------------------------------------------------
/blib2to3/__init__.pyi:
--------------------------------------------------------------------------------
1 | # Stubs for lib2to3 (Python 3.6)
2 |
--------------------------------------------------------------------------------
/docs/show_your_style.md:
--------------------------------------------------------------------------------
1 | _build/generated/show_your_style.md
--------------------------------------------------------------------------------
/tests/include_exclude_tests/b/.definitely_exclude/a.pie:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/include_exclude_tests/b/.definitely_exclude/a.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/include_exclude_tests/b/.definitely_exclude/a.pyi:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | recommonmark==0.4.0
2 | Sphinx==1.7.2
3 |
--------------------------------------------------------------------------------
/docs/editor_integration.md:
--------------------------------------------------------------------------------
1 | _build/generated/editor_integration.md
--------------------------------------------------------------------------------
/docs/the_black_code_style.md:
--------------------------------------------------------------------------------
1 | _build/generated/the_black_code_style.md
--------------------------------------------------------------------------------
/docs/contributing_to_black.md:
--------------------------------------------------------------------------------
1 | _build/generated/contributing_to_black.md
--------------------------------------------------------------------------------
/docs/installation_and_usage.md:
--------------------------------------------------------------------------------
1 | _build/generated/installation_and_usage.md
--------------------------------------------------------------------------------
/.coveragerc:
--------------------------------------------------------------------------------
1 | [report]
2 | omit =
3 | blib2to3/*
4 | */site-packages/*
5 |
--------------------------------------------------------------------------------
/docs/ignoring_unmodified_files.md:
--------------------------------------------------------------------------------
1 | _build/generated/ignoring_unmodified_files.md
--------------------------------------------------------------------------------
/docs/version_control_integration.md:
--------------------------------------------------------------------------------
1 | _build/generated/version_control_integration.md
--------------------------------------------------------------------------------
/docs/_static/logo2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/peterbe/black/master/docs/_static/logo2.png
--------------------------------------------------------------------------------
/tests/force_pyi.py:
--------------------------------------------------------------------------------
1 | def f(): ...
2 |
3 | def g(): ...
4 | # output
5 | def f(): ...
6 | def g(): ...
7 |
--------------------------------------------------------------------------------
/docs/_static/logo2-readme.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/peterbe/black/master/docs/_static/logo2-readme.png
--------------------------------------------------------------------------------
/readthedocs.yml:
--------------------------------------------------------------------------------
1 | name: jupyterhub
2 | type: sphinx
3 | conda:
4 | file: docs/environment.yml
5 | python:
6 | version: 3
7 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .coverage
2 | _build
3 | .DS_Store
4 | .vscode
5 | docs/_static/pypi.svg
6 | .tox
7 | __pycache__
8 | black.egg-info
9 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include *.rst *.md LICENSE
2 | recursive-include blib2to3 *.txt *.py LICENSE
3 | recursive-include tests *.txt *.out *.diff *.py *.pyi *.pie
4 |
--------------------------------------------------------------------------------
/blib2to3/pgen2/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
2 | # Licensed to PSF under a Contributor Agreement.
3 |
4 | """The pgen2 package."""
5 |
--------------------------------------------------------------------------------
/docs/environment.yml:
--------------------------------------------------------------------------------
1 | name: black_docs
2 | channels:
3 | - conda-forge
4 | dependencies:
5 | - python>=3.6
6 | - Sphinx==1.7.2
7 | - pip:
8 | - recommonmark==0.4.0
9 | - git+https://git@github.com/ambv/black.git
10 |
--------------------------------------------------------------------------------
/.pre-commit-hooks.yaml:
--------------------------------------------------------------------------------
1 | - id: black
2 | name: black
3 | description: 'Black: The uncompromising Python code formatter'
4 | entry: black
5 | language: python
6 | language_version: python3.6
7 | types: [python]
8 |
--------------------------------------------------------------------------------
/tests/string_prefixes.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3.6
2 |
3 | name = R"Łukasz"
4 | F"hello {name}"
5 | B"hello"
6 |
7 | # output
8 |
9 |
10 | #!/usr/bin/env python3.6
11 |
12 | name = r"Łukasz"
13 | f"hello {name}"
14 | b"hello"
15 |
--------------------------------------------------------------------------------
/blib2to3/pgen2/__init__.pyi:
--------------------------------------------------------------------------------
1 | # Stubs for lib2to3.pgen2 (Python 3.6)
2 |
3 | import os
4 | import sys
5 | from typing import Text, Union
6 |
7 | if sys.version_info >= (3, 6):
8 | _Path = Union[Text, os.PathLike]
9 | else:
10 | _Path = Text
11 |
--------------------------------------------------------------------------------
/docs/reference/reference_summary.rst:
--------------------------------------------------------------------------------
1 | Developer reference
2 | ===================
3 |
4 | *Contents are subject to change.*
5 |
6 | .. toctree::
7 | :maxdepth: 2
8 |
9 | reference_classes
10 | reference_functions
11 | reference_exceptions
12 |
--------------------------------------------------------------------------------
/tests/fstring.py:
--------------------------------------------------------------------------------
1 | f"f-string without formatted values is just a string"
2 | f"{{NOT a formatted value}}"
3 | f"some f-string with {a} {few():.2f} {formatted.values!r}"
4 | f"{f'{nested} inner'} outer"
5 | f"space between opening braces: { {a for a in (1, 2, 3)}}"
6 |
--------------------------------------------------------------------------------
/.flake8:
--------------------------------------------------------------------------------
1 | # This is an example .flake8 config, used when developing *Black* itself.
2 | # Keep in sync with setup.cfg which is used for source packages.
3 |
4 | [flake8]
5 | ignore = E203, E266, E501, W503
6 | max-line-length = 80
7 | max-complexity = 18
8 | select = B,C,E,F,W,T4,B9
9 |
--------------------------------------------------------------------------------
/blib2to3/pgen2/literals.pyi:
--------------------------------------------------------------------------------
1 | # Stubs for lib2to3.pgen2.literals (Python 3.6)
2 |
3 | from typing import Dict, Match, Text
4 |
5 | simple_escapes: Dict[Text, Text]
6 |
7 | def escape(m: Match) -> Text: ...
8 | def evalString(s: Text) -> Text: ...
9 | def test() -> None: ...
10 |
--------------------------------------------------------------------------------
/tests/python2_unicode_literals.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 | from __future__ import unicode_literals
3 |
4 | u'hello'
5 | U"hello"
6 | Ur"hello"
7 |
8 | # output
9 |
10 |
11 | #!/usr/bin/env python2
12 | from __future__ import unicode_literals
13 |
14 | "hello"
15 | "hello"
16 | r"hello"
17 |
--------------------------------------------------------------------------------
/.appveyor.yml:
--------------------------------------------------------------------------------
1 | install:
2 | - C:\Python36\python.exe -m pip install mypy
3 | - C:\Python36\python.exe -m pip install -e .
4 |
5 | # Not a C# project
6 | build: off
7 |
8 | test_script:
9 | - C:\Python36\python.exe tests/test_black.py
10 | - C:\Python36\python.exe -m mypy black.py tests/test_black.py
11 |
--------------------------------------------------------------------------------
/docs/reference/reference_exceptions.rst:
--------------------------------------------------------------------------------
1 | *Black* exceptions
2 | ==================
3 |
4 | *Contents are subject to change.*
5 |
6 | .. currentmodule:: black
7 |
8 | .. autoexception:: black.CannotSplit
9 |
10 | .. autoexception:: black.FormatError
11 |
12 | .. autoexception:: black.FormatOn
13 |
14 | .. autoexception:: black.FormatOff
15 |
16 | .. autoexception:: black.NothingChanged
17 |
--------------------------------------------------------------------------------
/tests/stub.pyi:
--------------------------------------------------------------------------------
1 | class C:
2 | ...
3 |
4 | class B:
5 | ...
6 |
7 | class A:
8 | def f(self) -> int:
9 | ...
10 |
11 | def g(self) -> str: ...
12 |
13 | def g():
14 | ...
15 |
16 | def h(): ...
17 |
18 | # output
19 | class C: ...
20 | class B: ...
21 |
22 | class A:
23 | def f(self) -> int: ...
24 | def g(self) -> str: ...
25 |
26 | def g(): ...
27 | def h(): ...
28 |
--------------------------------------------------------------------------------
/Pipfile:
--------------------------------------------------------------------------------
1 | [[source]]
2 | url = "https://pypi.python.org/simple"
3 | verify_ssl = true
4 | name = "pypi"
5 |
6 | [packages]
7 | attrs = ">=17.4.0"
8 | click = ">=6.5"
9 | appdirs = "*"
10 |
11 | [dev-packages]
12 | pre-commit = "*"
13 | coverage = "*"
14 | flake8 = "*"
15 | flake8-bugbear = "*"
16 | flake8-mypy = "*"
17 | mypy = "*"
18 | readme_renderer = "*"
19 | recommonmark = "*"
20 | Sphinx = "*"
21 | setuptools = ">=39.2.0"
22 | twine = ">=1.11.0"
23 | wheel = ">=0.31.1"
24 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | Howdy! Sorry you're having trouble. To expedite your experience,
2 | provide some basics for me:
3 |
4 | Operating system:
5 | Python version:
6 | Black version:
7 | Does also happen on master:
8 |
9 | To answer the last question, follow these steps:
10 | * create a new virtualenv (make sure it's the same Python version);
11 | * clone this repository;
12 | * run `pip install -e .`;
13 | * make sure it's sane by running `python setup.py test`; and
14 | * run `black` like you did last time.
15 |
--------------------------------------------------------------------------------
/blib2to3/README:
--------------------------------------------------------------------------------
1 | A subset of lib2to3 taken from Python 3.7.0b2.
2 | Commit hash: 9c17e3a1987004b8bcfbe423953aad84493a7984
3 |
4 | Reasons for forking:
5 | - consistent handling of f-strings for users of Python < 3.6.2
6 | - backport of BPO-33064 that fixes parsing files with trailing commas after
7 | *args and **kwargs
8 | - backport of GH-6143 that restores the ability to reformat legacy usage of
9 | `async`
10 | - support all types of string literals
11 | - better ability to debug (better reprs)
12 | - INDENT and DEDENT don't hold whitespace and comment prefixes
13 | - ability to Cythonize
14 |
--------------------------------------------------------------------------------
/.github/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Treat each other well
2 |
3 | Everyone participating in the Black project, and in particular in the
4 | issue tracker, pull requests, and social media activity, is expected
5 | to treat other people with respect and more generally to follow the
6 | guidelines articulated in the [Python Community Code of
7 | Conduct](https://www.python.org/psf/codeofconduct/).
8 |
9 | At the same time, humor is encouraged. In fact, basic familiarity with
10 | Monty Python's Flying Circus is expected. We are not savages.
11 |
12 | And if you *really* need to slap somebody, do it with a fish while
13 | dancing.
14 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | # Note: don't use this config for your own repositories. Instead, see
2 | # "Version control integration" in README.md.
3 | - repo: local
4 | hooks:
5 | - id: black
6 | name: black
7 | language: system
8 | entry: python3 -m black
9 | files: ^(black|setup|tests/test_black|docs/conf)\.py$
10 | - id: flake8
11 | name: flake8
12 | language: system
13 | entry: flake8
14 | files: ^(black|setup|tests/test_black)\.py$
15 | - id: mypy
16 | name: mypy
17 | language: system
18 | entry: mypy
19 | files: ^(black|setup|tests/test_black)\.py$
20 |
--------------------------------------------------------------------------------
/tests/force_py36.py:
--------------------------------------------------------------------------------
1 | # The input source must not contain any Py36-specific syntax (e.g. argument type
2 | # annotations, trailing comma after *rest) or this test becomes invalid.
3 | def long_function_name(argument_one, argument_two, argument_three, argument_four, argument_five, argument_six, *rest): ...
4 | # output
5 | # The input source must not contain any Py36-specific syntax (e.g. argument type
6 | # annotations, trailing comma after *rest) or this test becomes invalid.
7 | def long_function_name(
8 | argument_one,
9 | argument_two,
10 | argument_three,
11 | argument_four,
12 | argument_five,
13 | argument_six,
14 | *rest,
15 | ):
16 | ...
17 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SPHINXPROJ = black
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
--------------------------------------------------------------------------------
/tests/python2.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | import sys
4 |
5 | print >> sys.stderr , "Warning:" ,
6 | print >> sys.stderr , "this is a blast from the past."
7 | print >> sys.stderr , "Look, a repr:", `sys`
8 |
9 |
10 | def function((_globals, _locals)):
11 | exec ur"print 'hi from exec!'" in _globals, _locals
12 |
13 |
14 | function((globals(), locals()))
15 |
16 |
17 | # output
18 |
19 |
20 | #!/usr/bin/env python2
21 |
22 | import sys
23 |
24 | print >>sys.stderr, "Warning:",
25 | print >>sys.stderr, "this is a blast from the past."
26 | print >>sys.stderr, "Look, a repr:", ` sys `
27 |
28 |
29 | def function((_globals, _locals)):
30 | exec ur"print 'hi from exec!'" in _globals, _locals
31 |
32 |
33 | function((globals(), locals()))
34 |
--------------------------------------------------------------------------------
/mypy.ini:
--------------------------------------------------------------------------------
1 | [mypy]
2 | # Specify the target platform details in config, so your developers are
3 | # free to run mypy on Windows, Linux, or macOS and get consistent
4 | # results.
5 | python_version=3.6
6 | platform=linux
7 |
8 | # flake8-mypy expects the two following for sensible formatting
9 | show_column_numbers=True
10 |
11 | # show error messages from unrelated files
12 | follow_imports=normal
13 |
14 | # suppress errors about unsatisfied imports
15 | ignore_missing_imports=True
16 |
17 | # be strict
18 | disallow_untyped_calls=True
19 | warn_return_any=True
20 | strict_optional=True
21 | warn_no_return=True
22 | warn_redundant_casts=True
23 | warn_unused_ignores=True
24 |
25 | # The following are off by default. Flip them on if you feel
26 | # adventurous.
27 | disallow_untyped_defs=True
28 | check_untyped_defs=True
29 |
30 | # No incremental mode
31 | cache_dir=/dev/null
32 |
--------------------------------------------------------------------------------
/blib2to3/pgen2/grammar.pyi:
--------------------------------------------------------------------------------
1 | # Stubs for lib2to3.pgen2.grammar (Python 3.6)
2 |
3 | from blib2to3.pgen2 import _Path
4 |
5 | from typing import Any, Dict, List, Optional, Text, Tuple, TypeVar
6 |
7 | _P = TypeVar('_P')
8 | _Label = Tuple[int, Optional[Text]]
9 | _DFA = List[List[Tuple[int, int]]]
10 | _DFAS = Tuple[_DFA, Dict[int, int]]
11 |
12 | class Grammar:
13 | symbol2number: Dict[Text, int]
14 | number2symbol: Dict[int, Text]
15 | states: List[_DFA]
16 | dfas: Dict[int, _DFAS]
17 | labels: List[_Label]
18 | keywords: Dict[Text, int]
19 | tokens: Dict[int, int]
20 | symbol2label: Dict[Text, int]
21 | start: int
22 | def __init__(self) -> None: ...
23 | def dump(self, filename: _Path) -> None: ...
24 | def load(self, filename: _Path) -> None: ...
25 | def copy(self: _P) -> _P: ...
26 | def report(self) -> None: ...
27 |
28 | opmap_raw: Text
29 | opmap: Dict[Text, Text]
30 |
--------------------------------------------------------------------------------
/blib2to3/PatternGrammar.txt:
--------------------------------------------------------------------------------
1 | # Copyright 2006 Google, Inc. All Rights Reserved.
2 | # Licensed to PSF under a Contributor Agreement.
3 |
4 | # A grammar to describe tree matching patterns.
5 | # Not shown here:
6 | # - 'TOKEN' stands for any token (leaf node)
7 | # - 'any' stands for any node (leaf or interior)
8 | # With 'any' we can still specify the sub-structure.
9 |
10 | # The start symbol is 'Matcher'.
11 |
12 | Matcher: Alternatives ENDMARKER
13 |
14 | Alternatives: Alternative ('|' Alternative)*
15 |
16 | Alternative: (Unit | NegatedUnit)+
17 |
18 | Unit: [NAME '='] ( STRING [Repeater]
19 | | NAME [Details] [Repeater]
20 | | '(' Alternatives ')' [Repeater]
21 | | '[' Alternatives ']'
22 | )
23 |
24 | NegatedUnit: 'not' (STRING | NAME [Details] | '(' Alternatives ')')
25 |
26 | Repeater: '*' | '+' | '{' NUMBER [',' NUMBER] '}'
27 |
28 | Details: '<' Alternatives '>'
29 |
--------------------------------------------------------------------------------
/tests/slices.py:
--------------------------------------------------------------------------------
1 | slice[a.b : c.d]
2 | slice[d :: d + 1]
3 | slice[d + 1 :: d]
4 | slice[d::d]
5 | slice[0]
6 | slice[-1]
7 | slice[:-1]
8 | slice[::-1]
9 | slice[:c, c - 1]
10 | slice[c, c + 1, d::]
11 | slice[ham[c::d] :: 1]
12 | slice[ham[cheese ** 2 : -1] : 1 : 1, ham[1:2]]
13 | slice[:-1:]
14 | slice[lambda: None : lambda: None]
15 | slice[lambda x, y, *args, really=2, **kwargs: None :, None::]
16 | slice[1 or 2 : True and False]
17 | slice[not so_simple : 1 < val <= 10]
18 | slice[(1 for i in range(42)) : x]
19 | slice[:: [i for i in range(42)]]
20 |
21 |
22 | async def f():
23 | slice[await x : [i async for i in arange(42)] : 42]
24 |
25 |
26 | # These are from PEP-8:
27 | ham[1:9], ham[1:9:3], ham[:9:3], ham[1::3], ham[1:9:]
28 | ham[lower:upper], ham[lower:upper:], ham[lower::step]
29 | # ham[lower+offset : upper+offset]
30 | ham[: upper_fn(x) : step_fn(x)], ham[:: step_fn(x)]
31 | ham[lower + offset : upper + offset]
32 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 | set SPHINXPROJ=black
13 |
14 | if "%1" == "" goto help
15 |
16 | %SPHINXBUILD% >NUL 2>NUL
17 | if errorlevel 9009 (
18 | echo.
19 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
20 | echo.installed, then set the SPHINXBUILD environment variable to point
21 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
22 | echo.may add the Sphinx directory to PATH.
23 | echo.
24 | echo.If you don't have Sphinx installed, grab it from
25 | echo.http://sphinx-doc.org/
26 | exit /b 1
27 | )
28 |
29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
30 | goto end
31 |
32 | :help
33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
34 |
35 | :end
36 | popd
37 |
--------------------------------------------------------------------------------
/docs/_static/license.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/_static/pypi_template.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | sudo: required
2 | dist: xenial
3 | language: python
4 | cache: pip
5 | before_install:
6 | - if [[ $TRAVIS_PYTHON_VERSION == '3.7-dev' ]]; then sudo add-apt-repository ppa:deadsnakes/ppa -y; fi
7 | - if [[ $TRAVIS_PYTHON_VERSION == '3.7-dev' ]]; then sudo sudo apt-get update; fi
8 | install:
9 | - pip install coverage coveralls flake8 flake8-bugbear mypy
10 | - pip install -e .
11 | script:
12 | - coverage run tests/test_black.py
13 | - if [[ $TRAVIS_PYTHON_VERSION == '3.6' ]]; then mypy black.py tests/test_black.py; fi
14 | - if [[ $TRAVIS_PYTHON_VERSION == '3.6-dev' ]]; then flake8 black.py tests/test_black.py; fi
15 | - if [[ $TRAVIS_PYTHON_VERSION == '3.7-dev' ]]; then black --check --verbose black.py setup.py tests/test_black.py docs/conf.py; fi
16 | after_success:
17 | - coveralls
18 | notifications:
19 | on_success: change
20 | on_failure: always
21 | matrix:
22 | include:
23 | - python: 3.6
24 | - python: 3.6-dev
25 | - python: 3.7-dev
26 |
--------------------------------------------------------------------------------
/blib2to3/pgen2/tokenize.pyi:
--------------------------------------------------------------------------------
1 | # Stubs for lib2to3.pgen2.tokenize (Python 3.6)
2 | # NOTE: Only elements from __all__ are present.
3 |
4 | from typing import Callable, Iterable, Iterator, List, Text, Tuple
5 | from blib2to3.pgen2.token import * # noqa
6 |
7 |
8 | _Coord = Tuple[int, int]
9 | _TokenEater = Callable[[int, Text, _Coord, _Coord, Text], None]
10 | _TokenInfo = Tuple[int, Text, _Coord, _Coord, Text]
11 |
12 |
13 | class TokenError(Exception): ...
14 | class StopTokenizing(Exception): ...
15 |
16 | def tokenize(readline: Callable[[], Text], tokeneater: _TokenEater = ...) -> None: ...
17 |
18 | class Untokenizer:
19 | tokens: List[Text]
20 | prev_row: int
21 | prev_col: int
22 | def __init__(self) -> None: ...
23 | def add_whitespace(self, start: _Coord) -> None: ...
24 | def untokenize(self, iterable: Iterable[_TokenInfo]) -> Text: ...
25 | def compat(self, token: Tuple[int, Text], iterable: Iterable[_TokenInfo]) -> None: ...
26 |
27 | def untokenize(iterable: Iterable[_TokenInfo]) -> Text: ...
28 | def generate_tokens(
29 | readline: Callable[[], Text]
30 | ) -> Iterator[_TokenInfo]: ...
31 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2018 Łukasz Langa
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/blib2to3/pgen2/driver.pyi:
--------------------------------------------------------------------------------
1 | # Stubs for lib2to3.pgen2.driver (Python 3.6)
2 |
3 | import os
4 | import sys
5 | from typing import Any, Callable, IO, Iterable, List, Optional, Text, Tuple, Union
6 |
7 | from logging import Logger
8 | from blib2to3.pytree import _Convert, _NL
9 | from blib2to3.pgen2 import _Path
10 | from blib2to3.pgen2.grammar import Grammar
11 |
12 |
13 | class Driver:
14 | grammar: Grammar
15 | logger: Logger
16 | convert: _Convert
17 | def __init__(self, grammar: Grammar, convert: Optional[_Convert] = ..., logger: Optional[Logger] = ...) -> None: ...
18 | def parse_tokens(self, tokens: Iterable[Any], debug: bool = ...) -> _NL: ...
19 | def parse_stream_raw(self, stream: IO[Text], debug: bool = ...) -> _NL: ...
20 | def parse_stream(self, stream: IO[Text], debug: bool = ...) -> _NL: ...
21 | def parse_file(self, filename: _Path, encoding: Optional[Text] = ..., debug: bool = ...) -> _NL: ...
22 | def parse_string(self, text: Text, debug: bool = ...) -> _NL: ...
23 |
24 | def load_grammar(gt: Text = ..., gp: Optional[Text] = ..., save: bool = ..., force: bool = ..., logger: Optional[Logger] = ...) -> Grammar: ...
25 |
--------------------------------------------------------------------------------
/docs/reference/reference_classes.rst:
--------------------------------------------------------------------------------
1 | *Black* classes
2 | ===============
3 |
4 | *Contents are subject to change.*
5 |
6 | .. currentmodule:: black
7 |
8 | :class:`BracketTracker`
9 | -------------------------
10 |
11 | .. autoclass:: black.BracketTracker
12 | :members:
13 |
14 | :class:`EmptyLineTracker`
15 | -------------------------
16 |
17 | .. autoclass:: black.EmptyLineTracker
18 | :members:
19 |
20 | :class:`Line`
21 | -------------
22 |
23 | .. autoclass:: black.Line
24 | :members:
25 | :special-members: __str__, __bool__
26 |
27 | :class:`LineGenerator`
28 | ----------------------
29 |
30 | .. autoclass:: black.LineGenerator
31 | :show-inheritance:
32 | :members:
33 |
34 | :class:`Report`
35 | ---------------
36 |
37 | .. autoclass:: black.Report
38 | :members:
39 | :special-members: __str__
40 |
41 | :class:`UnformattedLines`
42 | -------------------------
43 |
44 | .. autoclass:: black.UnformattedLines
45 | :show-inheritance:
46 | :members:
47 | :special-members: __str__
48 |
49 | :class:`Visitor`
50 | ----------------
51 |
52 | .. autoclass:: black.Visitor
53 | :show-inheritance:
54 | :members:
55 |
--------------------------------------------------------------------------------
/blib2to3/pgen2/parse.pyi:
--------------------------------------------------------------------------------
1 | # Stubs for lib2to3.pgen2.parse (Python 3.6)
2 |
3 | from typing import Any, Dict, List, Optional, Sequence, Set, Text, Tuple
4 |
5 | from blib2to3.pgen2.grammar import Grammar, _DFAS
6 | from blib2to3.pytree import _NL, _Convert, _RawNode
7 |
8 | _Context = Sequence[Any]
9 |
10 | class ParseError(Exception):
11 | msg: Text
12 | type: int
13 | value: Optional[Text]
14 | context: _Context
15 | def __init__(self, msg: Text, type: int, value: Optional[Text], context: _Context) -> None: ...
16 |
17 | class Parser:
18 | grammar: Grammar
19 | convert: _Convert
20 | stack: List[Tuple[_DFAS, int, _RawNode]]
21 | rootnode: Optional[_NL]
22 | used_names: Set[Text]
23 | def __init__(self, grammar: Grammar, convert: Optional[_Convert] = ...) -> None: ...
24 | def setup(self, start: Optional[int] = ...) -> None: ...
25 | def addtoken(self, type: int, value: Optional[Text], context: _Context) -> bool: ...
26 | def classify(self, type: int, value: Optional[Text], context: _Context) -> int: ...
27 | def shift(self, type: int, value: Optional[Text], newstate: int, context: _Context) -> None: ...
28 | def push(self, type: int, newdfa: _DFAS, newstate: int, context: _Context) -> None: ...
29 | def pop(self) -> None: ...
30 |
--------------------------------------------------------------------------------
/tests/debug_visitor.py:
--------------------------------------------------------------------------------
1 | @dataclass
2 | class DebugVisitor(Visitor[T]):
3 | tree_depth: int = 0
4 |
5 | def visit_default(self, node: LN) -> Iterator[T]:
6 | indent = ' ' * (2 * self.tree_depth)
7 | if isinstance(node, Node):
8 | _type = type_repr(node.type)
9 | out(f'{indent}{_type}', fg='yellow')
10 | self.tree_depth += 1
11 | for child in node.children:
12 | yield from self.visit(child)
13 |
14 | self.tree_depth -= 1
15 | out(f'{indent}/{_type}', fg='yellow', bold=False)
16 | else:
17 | _type = token.tok_name.get(node.type, str(node.type))
18 | out(f'{indent}{_type}', fg='blue', nl=False)
19 | if node.prefix:
20 | # We don't have to handle prefixes for `Node` objects since
21 | # that delegates to the first child anyway.
22 | out(f' {node.prefix!r}', fg='green', bold=False, nl=False)
23 | out(f' {node.value!r}', fg='blue', bold=False)
24 |
25 | @classmethod
26 | def show(cls, code: str) -> None:
27 | """Pretty-prints a given string of `code`.
28 |
29 | Convenience method for debugging.
30 | """
31 | v: DebugVisitor[None] = DebugVisitor()
32 | list(v.visit(lib2to3_parse(code)))
33 |
--------------------------------------------------------------------------------
/tests/class_blank_parentheses.py:
--------------------------------------------------------------------------------
1 | class SimpleClassWithBlankParentheses():
2 | pass
3 | class ClassWithSpaceParentheses ( ):
4 | first_test_data = 90
5 | second_test_data = 100
6 | def test_func(self):
7 | return None
8 | class ClassWithEmptyFunc(object):
9 |
10 | def func_with_blank_parentheses():
11 | return 5
12 |
13 |
14 | def public_func_with_blank_parentheses():
15 | return None
16 | def class_under_the_func_with_blank_parentheses():
17 | class InsideFunc():
18 | pass
19 | class NormalClass (
20 | ):
21 | def func_for_testing(self, first, second):
22 | sum = first + second
23 | return sum
24 |
25 |
26 | # output
27 |
28 |
29 | class SimpleClassWithBlankParentheses:
30 | pass
31 |
32 |
33 | class ClassWithSpaceParentheses:
34 | first_test_data = 90
35 | second_test_data = 100
36 |
37 | def test_func(self):
38 | return None
39 |
40 |
41 | class ClassWithEmptyFunc(object):
42 | def func_with_blank_parentheses():
43 | return 5
44 |
45 |
46 | def public_func_with_blank_parentheses():
47 | return None
48 |
49 |
50 | def class_under_the_func_with_blank_parentheses():
51 | class InsideFunc:
52 | pass
53 |
54 |
55 | class NormalClass:
56 | def func_for_testing(self, first, second):
57 | sum = first + second
58 | return sum
59 |
--------------------------------------------------------------------------------
/tests/function2.py:
--------------------------------------------------------------------------------
1 | def f(
2 | a,
3 | **kwargs,
4 | ) -> A:
5 | with cache_dir():
6 | if something:
7 | result = (
8 | CliRunner().invoke(black.main, [str(src1), str(src2), "--diff", "--check"])
9 | )
10 | return A(
11 | very_long_argument_name1=very_long_value_for_the_argument,
12 | very_long_argument_name2=very_long_value_for_the_argument,
13 | **kwargs,
14 | )
15 | def g():
16 | "Docstring."
17 | def inner():
18 | pass
19 | print("Inner defs should breathe a little.")
20 | def h():
21 | def inner():
22 | pass
23 | print("Inner defs should breathe a little.")
24 |
25 | # output
26 |
27 | def f(a, **kwargs) -> A:
28 | with cache_dir():
29 | if something:
30 | result = CliRunner().invoke(
31 | black.main, [str(src1), str(src2), "--diff", "--check"]
32 | )
33 | return A(
34 | very_long_argument_name1=very_long_value_for_the_argument,
35 | very_long_argument_name2=very_long_value_for_the_argument,
36 | **kwargs,
37 | )
38 |
39 |
40 | def g():
41 | "Docstring."
42 |
43 | def inner():
44 | pass
45 |
46 | print("Inner defs should breathe a little.")
47 |
48 |
49 | def h():
50 | def inner():
51 | pass
52 |
53 | print("Inner defs should breathe a little.")
54 |
--------------------------------------------------------------------------------
/blib2to3/pgen2/token.pyi:
--------------------------------------------------------------------------------
1 | # Stubs for lib2to3.pgen2.token (Python 3.6)
2 |
3 | import sys
4 | from typing import Dict, Text
5 |
6 | ENDMARKER: int
7 | NAME: int
8 | NUMBER: int
9 | STRING: int
10 | NEWLINE: int
11 | INDENT: int
12 | DEDENT: int
13 | LPAR: int
14 | RPAR: int
15 | LSQB: int
16 | RSQB: int
17 | COLON: int
18 | COMMA: int
19 | SEMI: int
20 | PLUS: int
21 | MINUS: int
22 | STAR: int
23 | SLASH: int
24 | VBAR: int
25 | AMPER: int
26 | LESS: int
27 | GREATER: int
28 | EQUAL: int
29 | DOT: int
30 | PERCENT: int
31 | BACKQUOTE: int
32 | LBRACE: int
33 | RBRACE: int
34 | EQEQUAL: int
35 | NOTEQUAL: int
36 | LESSEQUAL: int
37 | GREATEREQUAL: int
38 | TILDE: int
39 | CIRCUMFLEX: int
40 | LEFTSHIFT: int
41 | RIGHTSHIFT: int
42 | DOUBLESTAR: int
43 | PLUSEQUAL: int
44 | MINEQUAL: int
45 | STAREQUAL: int
46 | SLASHEQUAL: int
47 | PERCENTEQUAL: int
48 | AMPEREQUAL: int
49 | VBAREQUAL: int
50 | CIRCUMFLEXEQUAL: int
51 | LEFTSHIFTEQUAL: int
52 | RIGHTSHIFTEQUAL: int
53 | DOUBLESTAREQUAL: int
54 | DOUBLESLASH: int
55 | DOUBLESLASHEQUAL: int
56 | OP: int
57 | COMMENT: int
58 | NL: int
59 | if sys.version_info >= (3,):
60 | RARROW: int
61 | if sys.version_info >= (3, 5):
62 | AT: int
63 | ATEQUAL: int
64 | AWAIT: int
65 | ASYNC: int
66 | ERRORTOKEN: int
67 | N_TOKENS: int
68 | NT_OFFSET: int
69 | tok_name: Dict[int, Text]
70 |
71 | def ISTERMINAL(x: int) -> bool: ...
72 | def ISNONTERMINAL(x: int) -> bool: ...
73 | def ISEOF(x: int) -> bool: ...
74 |
--------------------------------------------------------------------------------
/tests/comments5.py:
--------------------------------------------------------------------------------
1 | while True:
2 | if something.changed:
3 | do.stuff() # trailing comment
4 | # Comment belongs to the `if` block.
5 | # This one belongs to the `while` block.
6 |
7 | # Should this one, too? I guess so.
8 |
9 | # This one is properly standalone now.
10 |
11 | for i in range(100):
12 | # first we do this
13 | if i % 33 == 0:
14 | break
15 |
16 | # then we do this
17 | print(i)
18 | # and finally we loop around
19 |
20 | with open(some_temp_file) as f:
21 | data = f.read()
22 |
23 | try:
24 | with open(some_other_file) as w:
25 | w.write(data)
26 |
27 | except OSError:
28 | print("problems")
29 |
30 | import sys
31 |
32 |
33 | # leading function comment
34 | def wat():
35 | ...
36 | # trailing function comment
37 |
38 |
39 | # SECTION COMMENT
40 |
41 |
42 | # leading 1
43 | @deco1
44 | # leading 2
45 | @deco2(with_args=True)
46 | # leading 3
47 | @deco3
48 | def decorated1():
49 | ...
50 |
51 |
52 | # leading 1
53 | @deco1
54 | # leading 2
55 | @deco2(with_args=True)
56 | # leading function comment
57 | def decorated1():
58 | ...
59 |
60 |
61 | # Note: crappy but inevitable. The current design of EmptyLineTracker doesn't
62 | # allow this to work correctly. The user will have to split those lines by
63 | # hand.
64 | some_instruction
65 | # This comment should be split from `some_instruction` by two lines but isn't.
66 | def g():
67 | ...
68 |
69 |
70 | if __name__ == "__main__":
71 | main()
72 |
--------------------------------------------------------------------------------
/blib2to3/pgen2/token.py:
--------------------------------------------------------------------------------
1 | """Token constants (from "token.h")."""
2 |
3 | # Taken from Python (r53757) and modified to include some tokens
4 | # originally monkeypatched in by pgen2.tokenize
5 |
6 | #--start constants--
7 | ENDMARKER = 0
8 | NAME = 1
9 | NUMBER = 2
10 | STRING = 3
11 | NEWLINE = 4
12 | INDENT = 5
13 | DEDENT = 6
14 | LPAR = 7
15 | RPAR = 8
16 | LSQB = 9
17 | RSQB = 10
18 | COLON = 11
19 | COMMA = 12
20 | SEMI = 13
21 | PLUS = 14
22 | MINUS = 15
23 | STAR = 16
24 | SLASH = 17
25 | VBAR = 18
26 | AMPER = 19
27 | LESS = 20
28 | GREATER = 21
29 | EQUAL = 22
30 | DOT = 23
31 | PERCENT = 24
32 | BACKQUOTE = 25
33 | LBRACE = 26
34 | RBRACE = 27
35 | EQEQUAL = 28
36 | NOTEQUAL = 29
37 | LESSEQUAL = 30
38 | GREATEREQUAL = 31
39 | TILDE = 32
40 | CIRCUMFLEX = 33
41 | LEFTSHIFT = 34
42 | RIGHTSHIFT = 35
43 | DOUBLESTAR = 36
44 | PLUSEQUAL = 37
45 | MINEQUAL = 38
46 | STAREQUAL = 39
47 | SLASHEQUAL = 40
48 | PERCENTEQUAL = 41
49 | AMPEREQUAL = 42
50 | VBAREQUAL = 43
51 | CIRCUMFLEXEQUAL = 44
52 | LEFTSHIFTEQUAL = 45
53 | RIGHTSHIFTEQUAL = 46
54 | DOUBLESTAREQUAL = 47
55 | DOUBLESLASH = 48
56 | DOUBLESLASHEQUAL = 49
57 | AT = 50
58 | ATEQUAL = 51
59 | OP = 52
60 | COMMENT = 53
61 | NL = 54
62 | RARROW = 55
63 | AWAIT = 56
64 | ASYNC = 57
65 | ERRORTOKEN = 58
66 | N_TOKENS = 59
67 | NT_OFFSET = 256
68 | #--end constants--
69 |
70 | tok_name = {}
71 | for _name, _value in list(globals().items()):
72 | if type(_value) is type(0):
73 | tok_name[_value] = _name
74 |
75 |
76 | def ISTERMINAL(x):
77 | return x < NT_OFFSET
78 |
79 | def ISNONTERMINAL(x):
80 | return x >= NT_OFFSET
81 |
82 | def ISEOF(x):
83 | return x == ENDMARKER
84 |
--------------------------------------------------------------------------------
/tests/comments3.py:
--------------------------------------------------------------------------------
1 | def func():
2 | x = """
3 | a really long string
4 | """
5 | lcomp3 = [
6 | # This one is actually too long to fit in a single line.
7 | element.split("\n", 1)[0]
8 | # yup
9 | for element in collection.select_elements()
10 | # right
11 | if element is not None
12 | ]
13 | # Capture each of the exceptions in the MultiError along with each of their causes and contexts
14 | if isinstance(exc_value, MultiError):
15 | embedded = []
16 | for exc in exc_value.exceptions:
17 | if exc not in _seen:
18 | embedded.append(
19 | # This should be left alone (before)
20 | traceback.TracebackException.from_exception(
21 | exc,
22 | limit=limit,
23 | lookup_lines=lookup_lines,
24 | capture_locals=capture_locals,
25 | # copy the set of _seen exceptions so that duplicates
26 | # shared between sub-exceptions are not omitted
27 | _seen=set(_seen),
28 | )
29 | # This should be left alone (after)
30 | )
31 |
32 | # everything is fine if the expression isn't nested
33 | traceback.TracebackException.from_exception(
34 | exc,
35 | limit=limit,
36 | lookup_lines=lookup_lines,
37 | capture_locals=capture_locals,
38 | # copy the set of _seen exceptions so that duplicates
39 | # shared between sub-exceptions are not omitted
40 | _seen=set(_seen),
41 | )
42 |
--------------------------------------------------------------------------------
/blib2to3/pgen2/literals.py:
--------------------------------------------------------------------------------
1 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
2 | # Licensed to PSF under a Contributor Agreement.
3 |
4 | """Safely evaluate Python string literals without using eval()."""
5 |
6 | import re
7 |
8 | simple_escapes = {"a": "\a",
9 | "b": "\b",
10 | "f": "\f",
11 | "n": "\n",
12 | "r": "\r",
13 | "t": "\t",
14 | "v": "\v",
15 | "'": "'",
16 | '"': '"',
17 | "\\": "\\"}
18 |
19 | def escape(m):
20 | all, tail = m.group(0, 1)
21 | assert all.startswith("\\")
22 | esc = simple_escapes.get(tail)
23 | if esc is not None:
24 | return esc
25 | if tail.startswith("x"):
26 | hexes = tail[1:]
27 | if len(hexes) < 2:
28 | raise ValueError("invalid hex string escape ('\\%s')" % tail)
29 | try:
30 | i = int(hexes, 16)
31 | except ValueError:
32 | raise ValueError("invalid hex string escape ('\\%s')" % tail) from None
33 | else:
34 | try:
35 | i = int(tail, 8)
36 | except ValueError:
37 | raise ValueError("invalid octal string escape ('\\%s')" % tail) from None
38 | return chr(i)
39 |
40 | def evalString(s):
41 | assert s.startswith("'") or s.startswith('"'), repr(s[:1])
42 | q = s[0]
43 | if s[:3] == q*3:
44 | q = q*3
45 | assert s.endswith(q), repr(s[-len(q):])
46 | assert len(s) >= 2*len(q)
47 | s = s[len(q):-len(q)]
48 | return re.sub(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})", escape, s)
49 |
50 | def test():
51 | for i in range(256):
52 | c = chr(i)
53 | s = repr(c)
54 | e = evalString(s)
55 | if e != c:
56 | print(i, c, s, e)
57 |
58 |
59 | if __name__ == "__main__":
60 | test()
61 |
--------------------------------------------------------------------------------
/tests/string_quotes.py:
--------------------------------------------------------------------------------
1 | '\''
2 | '"'
3 | "'"
4 | "\""
5 | "Hello"
6 | "Don't do that"
7 | 'Here is a "'
8 | 'What\'s the deal here?'
9 | "What's the deal \"here\"?"
10 | "And \"here\"?"
11 | """Strings with "" in them"""
12 | '''Strings with "" in them'''
13 | '''Here's a "'''
14 | '''Here's a " '''
15 | '''Just a normal triple
16 | quote'''
17 | f"just a normal {f} string"
18 | f'''This is a triple-quoted {f}-string'''
19 | f'MOAR {" ".join([])}'
20 | f"MOAR {' '.join([])}"
21 | r"raw string ftw"
22 | r'Date d\'expiration:(.*)'
23 | r'Tricky "quote'
24 | r'Not-so-tricky \"quote'
25 | rf'{yay}'
26 | '\n\
27 | The \"quick\"\n\
28 | brown fox\n\
29 | jumps over\n\
30 | the \'lazy\' dog.\n\
31 | '
32 | re.compile(r'[\\"]')
33 | "x = ''; y = \"\""
34 | "x = '''; y = \"\""
35 | "x = ''''; y = \"\""
36 | "x = '' ''; y = \"\""
37 | "x = ''; y = \"\"\""
38 | "x = '''; y = \"\"\"\""
39 | "x = ''''; y = \"\"\"\"\""
40 | "x = '' ''; y = \"\"\"\"\""
41 | 'unnecessary \"\"escaping'
42 | "unnecessary \'\'escaping"
43 | '\\""'
44 | "\\''"
45 |
46 | # output
47 |
48 | "'"
49 | '"'
50 | "'"
51 | '"'
52 | "Hello"
53 | "Don't do that"
54 | 'Here is a "'
55 | "What's the deal here?"
56 | 'What\'s the deal "here"?'
57 | 'And "here"?'
58 | """Strings with "" in them"""
59 | """Strings with "" in them"""
60 | '''Here's a "'''
61 | """Here's a " """
62 | """Just a normal triple
63 | quote"""
64 | f"just a normal {f} string"
65 | f"""This is a triple-quoted {f}-string"""
66 | f'MOAR {" ".join([])}'
67 | f"MOAR {' '.join([])}"
68 | r"raw string ftw"
69 | r"Date d\'expiration:(.*)"
70 | r'Tricky "quote'
71 | r"Not-so-tricky \"quote"
72 | rf"{yay}"
73 | "\n\
74 | The \"quick\"\n\
75 | brown fox\n\
76 | jumps over\n\
77 | the 'lazy' dog.\n\
78 | "
79 | re.compile(r'[\\"]')
80 | "x = ''; y = \"\""
81 | "x = '''; y = \"\""
82 | "x = ''''; y = \"\""
83 | "x = '' ''; y = \"\""
84 | 'x = \'\'; y = """'
85 | 'x = \'\'\'; y = """"'
86 | 'x = \'\'\'\'; y = """""'
87 | 'x = \'\' \'\'; y = """""'
88 | 'unnecessary ""escaping'
89 | "unnecessary ''escaping"
90 | '\\""'
91 | "\\''"
92 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | .. black documentation master file, created by
2 | sphinx-quickstart on Fri Mar 23 10:53:30 2018.
3 |
4 | The uncompromising code formatter
5 | =================================
6 |
7 | By using *Black*, you agree to cede control over minutiae of
8 | hand-formatting. In return, *Black* gives you speed, determinism, and
9 | freedom from `pycodestyle` nagging about formatting. You will save time
10 | and mental energy for more important matters.
11 |
12 | *Black* makes code review faster by producing the smallest diffs
13 | possible. Blackened code looks the same regardless of the project
14 | you're reading. Formatting becomes transparent after a while and you
15 | can focus on the content instead.
16 |
17 | .. note::
18 |
19 | `Black is an early pre-release `_.
20 |
21 |
22 | Testimonials
23 | ------------
24 |
25 | **Dusty Phillips**, `writer `_:
26 |
27 | *Black is opinionated so you don't have to be.*
28 |
29 | **Hynek Schlawack**, creator of `attrs `_, core
30 | developer of Twisted and CPython:
31 |
32 | *An auto-formatter that doesn't suck is all I want for Xmas!*
33 |
34 | **Carl Meyer**, `Django `_ core developer:
35 |
36 | *At least the name is good.*
37 |
38 | **Kenneth Reitz**, creator of `requests `_
39 | and `pipenv `_:
40 |
41 | *This vastly improves the formatting of our code. Thanks a ton!*
42 |
43 | Contents
44 | --------
45 |
46 | .. toctree::
47 | :maxdepth: 2
48 |
49 | installation_and_usage
50 | the_black_code_style
51 | editor_integration
52 | version_control_integration
53 | ignoring_unmodified_files
54 | contributing
55 | change_log
56 | reference/reference_summary
57 | authors
58 |
59 | Indices and tables
60 | ==================
61 |
62 | * :ref:`genindex`
63 | * :ref:`modindex`
64 | * :ref:`search`
65 |
--------------------------------------------------------------------------------
/blib2to3/pygram.py:
--------------------------------------------------------------------------------
1 | # Copyright 2006 Google, Inc. All Rights Reserved.
2 | # Licensed to PSF under a Contributor Agreement.
3 |
4 | """Export the Python grammar and symbols."""
5 |
6 | # Python imports
7 | import os
8 |
9 | # Local imports
10 | from .pgen2 import token
11 | from .pgen2 import driver
12 | from . import pytree
13 |
14 | # The grammar file
15 | _GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt")
16 | _PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
17 | "PatternGrammar.txt")
18 |
19 |
20 | class Symbols(object):
21 |
22 | def __init__(self, grammar):
23 | """Initializer.
24 |
25 | Creates an attribute for each grammar symbol (nonterminal),
26 | whose value is the symbol's type (an int >= 256).
27 | """
28 | for name, symbol in grammar.symbol2number.items():
29 | setattr(self, name, symbol)
30 |
31 |
32 | def initialize(cache_dir=None):
33 | global python_grammar
34 | global python_grammar_no_print_statement
35 | global python_grammar_no_print_statement_no_exec_statement
36 | global python_symbols
37 | global pattern_grammar
38 | global pattern_symbols
39 |
40 | # Python 2
41 | python_grammar = driver.load_packaged_grammar("blib2to3", _GRAMMAR_FILE,
42 | cache_dir)
43 |
44 | python_symbols = Symbols(python_grammar)
45 |
46 | # Python 2 + from __future__ import print_function
47 | python_grammar_no_print_statement = python_grammar.copy()
48 | del python_grammar_no_print_statement.keywords["print"]
49 |
50 | # Python 3
51 | python_grammar_no_print_statement_no_exec_statement = python_grammar.copy()
52 | del python_grammar_no_print_statement_no_exec_statement.keywords["print"]
53 | del python_grammar_no_print_statement_no_exec_statement.keywords["exec"]
54 |
55 | pattern_grammar = driver.load_packaged_grammar("blib2to3", _PATTERN_GRAMMAR_FILE,
56 | cache_dir)
57 | pattern_symbols = Symbols(pattern_grammar)
58 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to Black
2 |
3 | Welcome! Happy to see you willing to make the project better. Have you
4 | read the entire [user documentation](http://black.readthedocs.io/en/latest/)
5 | yet?
6 |
7 |
8 | ## Bird's eye view
9 |
10 | In terms of inspiration, *Black* is about as configurable as *gofmt*.
11 | This is deliberate.
12 |
13 | Bug reports and fixes are always welcome! Please follow the issue
14 | template on GitHub for best results.
15 |
16 | Before you suggest a new feature or configuration knob, ask yourself why
17 | you want it. If it enables better integration with some workflow, fixes
18 | an inconsistency, speeds things up, and so on - go for it! On the other
19 | hand, if your answer is "because I don't like a particular formatting"
20 | then you're not ready to embrace *Black* yet. Such changes are unlikely
21 | to get accepted. You can still try but prepare to be disappointed.
22 |
23 |
24 | ## Technicalities
25 |
26 | Development on the latest version of Python is preferred. As of this
27 | writing it's 3.6.4. You can use any operating system. I am using macOS
28 | myself and CentOS at work.
29 |
30 | Install all development dependencies using:
31 | ```
32 | $ pipenv install --dev
33 | $ pre-commit install
34 | ```
35 | If you haven't used `pipenv` before but are comfortable with virtualenvs,
36 | just run `pip install pipenv` in the virtualenv you're already using and
37 | invoke the command above from the cloned Black repo. It will do the
38 | correct thing.
39 |
40 | Before submitting pull requests, run tests with:
41 | ```
42 | $ python setup.py test
43 | ```
44 |
45 |
46 | ## Hygiene
47 |
48 | If you're fixing a bug, add a test. Run it first to confirm it fails,
49 | then fix the bug, run it again to confirm it's really fixed.
50 |
51 | If adding a new feature, add a test. In fact, always add a test. But
52 | wait, before adding any large feature, first open an issue for us to
53 | discuss the idea first.
54 |
55 |
56 | ## Finally
57 |
58 | Thanks again for your interest in improving the project! You're taking
59 | action when most people decide to sit and watch.
60 |
--------------------------------------------------------------------------------
/tests/comments.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # fmt: on
3 | # Some license here.
4 | #
5 | # Has many lines. Many, many lines.
6 | # Many, many, many lines.
7 | """Module docstring.
8 |
9 | Possibly also many, many lines.
10 | """
11 |
12 | import os.path
13 | import sys
14 |
15 | import a
16 | from b.c import X # some noqa comment
17 |
18 | try:
19 | import fast
20 | except ImportError:
21 | import slow as fast
22 |
23 |
24 | # Some comment before a function.
25 |
26 |
27 | def function(default=None):
28 | """Docstring comes first.
29 |
30 | Possibly many lines.
31 | """
32 | # FIXME: Some comment about why this function is crap but still in production.
33 | import inner_imports
34 |
35 | if inner_imports.are_evil():
36 | # Explains why we have this if.
37 | # In great detail indeed.
38 | x = X()
39 | return x.method1() # type: ignore
40 |
41 | # This return is also commented for some reason.
42 | return default
43 |
44 |
45 | # Explains why we use global state.
46 | GLOBAL_STATE = {"a": a(1), "b": a(2), "c": a(3)}
47 |
48 |
49 | # Another comment!
50 | # This time two lines.
51 |
52 |
53 | class Foo:
54 | """Docstring for class Foo. Example from Sphinx docs."""
55 |
56 | #: Doc comment for class attribute Foo.bar.
57 | #: It can have multiple lines.
58 | bar = 1
59 |
60 | flox = 1.5 #: Doc comment for Foo.flox. One line only.
61 |
62 | baz = 2
63 | """Docstring for class attribute Foo.baz."""
64 |
65 | def __init__(self):
66 | #: Doc comment for instance attribute qux.
67 | self.qux = 3
68 |
69 | self.spam = 4
70 | """Docstring for instance attribute spam."""
71 |
72 |
73 | @fast(really=True)
74 | async def wat():
75 | async with X.open_async() as x: # Some more comments
76 | result = await x.method1()
77 | # Comment after ending a block.
78 | if result:
79 | print("A OK", file=sys.stdout)
80 | # Comment between things.
81 | print()
82 |
83 |
84 | # Some closing comments.
85 | # Maybe Vim or Emacs directives for formatting.
86 | # Who knows.
87 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | # Copyright (C) 2018 Łukasz Langa
2 | import ast
3 | import re
4 | from setuptools import setup
5 | import sys
6 |
7 | assert sys.version_info >= (3, 6, 0), "black requires Python 3.6+"
8 | from pathlib import Path # noqa E402
9 |
10 | CURRENT_DIR = Path(__file__).parent
11 |
12 |
13 | def get_long_description() -> str:
14 | readme_md = CURRENT_DIR / "README.md"
15 | with open(readme_md, encoding="utf8") as ld_file:
16 | return ld_file.read()
17 |
18 |
19 | def get_version() -> str:
20 | black_py = CURRENT_DIR / "black.py"
21 | _version_re = re.compile(r"__version__\s+=\s+(?P.*)")
22 | with open(black_py, "r", encoding="utf8") as f:
23 | match = _version_re.search(f.read())
24 | version = match.group("version") if match is not None else '"unknown"'
25 | return str(ast.literal_eval(version))
26 |
27 |
28 | setup(
29 | name="black",
30 | version=get_version(),
31 | description="The uncompromising code formatter.",
32 | long_description=get_long_description(),
33 | long_description_content_type="text/markdown",
34 | keywords="automation formatter yapf autopep8 pyfmt gofmt rustfmt",
35 | author="Łukasz Langa",
36 | author_email="lukasz@langa.pl",
37 | url="https://github.com/ambv/black",
38 | license="MIT",
39 | py_modules=["black"],
40 | packages=["blib2to3", "blib2to3.pgen2"],
41 | package_data={"blib2to3": ["*.txt"]},
42 | python_requires=">=3.6",
43 | zip_safe=False,
44 | install_requires=["click>=6.5", "attrs>=17.4.0", "appdirs"],
45 | test_suite="tests.test_black",
46 | classifiers=[
47 | "Development Status :: 4 - Beta",
48 | "Environment :: Console",
49 | "Intended Audience :: Developers",
50 | "License :: OSI Approved :: MIT License",
51 | "Operating System :: OS Independent",
52 | "Programming Language :: Python",
53 | "Programming Language :: Python :: 3.6",
54 | "Programming Language :: Python :: 3 :: Only",
55 | "Topic :: Software Development :: Libraries :: Python Modules",
56 | "Topic :: Software Development :: Quality Assurance",
57 | ],
58 | entry_points={"console_scripts": ["black=black:main"]},
59 | )
60 |
--------------------------------------------------------------------------------
/blib2to3/pgen2/pgen.pyi:
--------------------------------------------------------------------------------
1 | # Stubs for lib2to3.pgen2.pgen (Python 3.6)
2 |
3 | from typing import Any, Dict, IO, Iterable, Iterator, List, Optional, Text, Tuple
4 | from mypy_extensions import NoReturn
5 |
6 | from blib2to3.pgen2 import _Path, grammar
7 | from blib2to3.pgen2.tokenize import _TokenInfo
8 |
9 | class PgenGrammar(grammar.Grammar): ...
10 |
11 | class ParserGenerator:
12 | filename: _Path
13 | stream: IO[Text]
14 | generator: Iterator[_TokenInfo]
15 | first: Dict[Text, Dict[Text, int]]
16 | def __init__(self, filename: _Path, stream: Optional[IO[Text]] = ...) -> None: ...
17 | def make_grammar(self) -> PgenGrammar: ...
18 | def make_first(self, c: PgenGrammar, name: Text) -> Dict[int, int]: ...
19 | def make_label(self, c: PgenGrammar, label: Text) -> int: ...
20 | def addfirstsets(self) -> None: ...
21 | def calcfirst(self, name: Text) -> None: ...
22 | def parse(self) -> Tuple[Dict[Text, List[DFAState]], Text]: ...
23 | def make_dfa(self, start: NFAState, finish: NFAState) -> List[DFAState]: ...
24 | def dump_nfa(self, name: Text, start: NFAState, finish: NFAState) -> List[DFAState]: ...
25 | def dump_dfa(self, name: Text, dfa: Iterable[DFAState]) -> None: ...
26 | def simplify_dfa(self, dfa: List[DFAState]) -> None: ...
27 | def parse_rhs(self) -> Tuple[NFAState, NFAState]: ...
28 | def parse_alt(self) -> Tuple[NFAState, NFAState]: ...
29 | def parse_item(self) -> Tuple[NFAState, NFAState]: ...
30 | def parse_atom(self) -> Tuple[NFAState, NFAState]: ...
31 | def expect(self, type: int, value: Optional[Any] = ...) -> Text: ...
32 | def gettoken(self) -> None: ...
33 | def raise_error(self, msg: str, *args: Any) -> NoReturn: ...
34 |
35 | class NFAState:
36 | arcs: List[Tuple[Optional[Text], NFAState]]
37 | def __init__(self) -> None: ...
38 | def addarc(self, next: NFAState, label: Optional[Text] = ...) -> None: ...
39 |
40 | class DFAState:
41 | nfaset: Dict[NFAState, Any]
42 | isfinal: bool
43 | arcs: Dict[Text, DFAState]
44 | def __init__(self, nfaset: Dict[NFAState, Any], final: NFAState) -> None: ...
45 | def addarc(self, next: DFAState, label: Text) -> None: ...
46 | def unifystate(self, old: DFAState, new: DFAState) -> None: ...
47 | def __eq__(self, other: Any) -> bool: ...
48 |
49 | def generate_grammar(filename: _Path = ...) -> PgenGrammar: ...
50 |
--------------------------------------------------------------------------------
/tests/import_spacing.py:
--------------------------------------------------------------------------------
1 | """The asyncio package, tracking PEP 3156."""
2 |
3 | # flake8: noqa
4 |
5 | from logging import (
6 | ERROR,
7 | )
8 | import sys
9 |
10 | # This relies on each of the submodules having an __all__ variable.
11 | from .base_events import *
12 | from .coroutines import *
13 | from .events import * # comment here
14 |
15 | from .futures import *
16 | from .locks import * # comment here
17 | from .protocols import *
18 |
19 | from ..runners import * # comment here
20 | from ..queues import *
21 | from ..streams import *
22 |
23 | from some_library import (
24 | Just, Enough, Libraries, To, Fit, In, This, Nice, Split, Which, We, No, Longer, Use
25 | )
26 | from name_of_a_company.extremely_long_project_name.component.ttypes import CuteLittleServiceHandlerFactoryyy
27 | from name_of_a_company.extremely_long_project_name.extremely_long_component_name.ttypes import *
28 |
29 | from .a.b.c.subprocess import *
30 | from . import (tasks)
31 | from . import (A, B, C)
32 | from . import SomeVeryLongNameAndAllOfItsAdditionalLetters1, \
33 | SomeVeryLongNameAndAllOfItsAdditionalLetters2
34 |
35 | __all__ = (
36 | base_events.__all__
37 | + coroutines.__all__
38 | + events.__all__
39 | + futures.__all__
40 | + locks.__all__
41 | + protocols.__all__
42 | + runners.__all__
43 | + queues.__all__
44 | + streams.__all__
45 | + tasks.__all__
46 | )
47 |
48 |
49 | # output
50 |
51 |
52 | """The asyncio package, tracking PEP 3156."""
53 |
54 | # flake8: noqa
55 |
56 | from logging import ERROR
57 | import sys
58 |
59 | # This relies on each of the submodules having an __all__ variable.
60 | from .base_events import *
61 | from .coroutines import *
62 | from .events import * # comment here
63 |
64 | from .futures import *
65 | from .locks import * # comment here
66 | from .protocols import *
67 |
68 | from ..runners import * # comment here
69 | from ..queues import *
70 | from ..streams import *
71 |
72 | from some_library import (
73 | Just,
74 | Enough,
75 | Libraries,
76 | To,
77 | Fit,
78 | In,
79 | This,
80 | Nice,
81 | Split,
82 | Which,
83 | We,
84 | No,
85 | Longer,
86 | Use,
87 | )
88 | from name_of_a_company.extremely_long_project_name.component.ttypes import (
89 | CuteLittleServiceHandlerFactoryyy
90 | )
91 | from name_of_a_company.extremely_long_project_name.extremely_long_component_name.ttypes import *
92 |
93 | from .a.b.c.subprocess import *
94 | from . import tasks
95 | from . import A, B, C
96 | from . import (
97 | SomeVeryLongNameAndAllOfItsAdditionalLetters1,
98 | SomeVeryLongNameAndAllOfItsAdditionalLetters2,
99 | )
100 |
101 | __all__ = (
102 | base_events.__all__
103 | + coroutines.__all__
104 | + events.__all__
105 | + futures.__all__
106 | + locks.__all__
107 | + protocols.__all__
108 | + runners.__all__
109 | + queues.__all__
110 | + streams.__all__
111 | + tasks.__all__
112 | )
113 |
--------------------------------------------------------------------------------
/blib2to3/pygram.pyi:
--------------------------------------------------------------------------------
1 | # Stubs for lib2to3.pygram (Python 3.6)
2 |
3 | import os
4 | from typing import Any, Union
5 | from blib2to3.pgen2.grammar import Grammar
6 |
7 | class Symbols:
8 | def __init__(self, grammar: Grammar) -> None: ...
9 |
10 | class python_symbols(Symbols):
11 | and_expr: int
12 | and_test: int
13 | annassign: int
14 | arglist: int
15 | argument: int
16 | arith_expr: int
17 | assert_stmt: int
18 | async_funcdef: int
19 | async_stmt: int
20 | atom: int
21 | augassign: int
22 | break_stmt: int
23 | classdef: int
24 | comp_for: int
25 | comp_if: int
26 | comp_iter: int
27 | comp_op: int
28 | comparison: int
29 | compound_stmt: int
30 | continue_stmt: int
31 | decorated: int
32 | decorator: int
33 | decorators: int
34 | del_stmt: int
35 | dictsetmaker: int
36 | dotted_as_name: int
37 | dotted_as_names: int
38 | dotted_name: int
39 | encoding_decl: int
40 | eval_input: int
41 | except_clause: int
42 | exec_stmt: int
43 | expr: int
44 | expr_stmt: int
45 | exprlist: int
46 | factor: int
47 | file_input: int
48 | flow_stmt: int
49 | for_stmt: int
50 | funcdef: int
51 | global_stmt: int
52 | if_stmt: int
53 | import_as_name: int
54 | import_as_names: int
55 | import_from: int
56 | import_name: int
57 | import_stmt: int
58 | lambdef: int
59 | listmaker: int
60 | not_test: int
61 | old_comp_for: int
62 | old_comp_if: int
63 | old_comp_iter: int
64 | old_lambdef: int
65 | old_test: int
66 | or_test: int
67 | parameters: int
68 | pass_stmt: int
69 | power: int
70 | print_stmt: int
71 | raise_stmt: int
72 | return_stmt: int
73 | shift_expr: int
74 | simple_stmt: int
75 | single_input: int
76 | sliceop: int
77 | small_stmt: int
78 | star_expr: int
79 | stmt: int
80 | subscript: int
81 | subscriptlist: int
82 | suite: int
83 | term: int
84 | test: int
85 | testlist: int
86 | testlist1: int
87 | testlist_gexp: int
88 | testlist_safe: int
89 | testlist_star_expr: int
90 | tfpdef: int
91 | tfplist: int
92 | tname: int
93 | trailer: int
94 | try_stmt: int
95 | typedargslist: int
96 | varargslist: int
97 | vfpdef: int
98 | vfplist: int
99 | vname: int
100 | while_stmt: int
101 | with_item: int
102 | with_stmt: int
103 | with_var: int
104 | xor_expr: int
105 | yield_arg: int
106 | yield_expr: int
107 | yield_stmt: int
108 |
109 | class pattern_symbols(Symbols):
110 | Alternative: int
111 | Alternatives: int
112 | Details: int
113 | Matcher: int
114 | NegatedUnit: int
115 | Repeater: int
116 | Unit: int
117 |
118 | python_grammar: Grammar
119 | python_grammar_no_print_statement: Grammar
120 | python_grammar_no_print_statement_no_exec_statement: Grammar
121 | python_grammar_no_exec_statement: Grammar
122 | pattern_grammar: Grammar
123 |
124 | def initialize(cache_dir: Union[str, os.PathLike, None]) -> None: ...
125 |
--------------------------------------------------------------------------------
/docs/reference/reference_functions.rst:
--------------------------------------------------------------------------------
1 | *Black* functions
2 | =================
3 |
4 | *Contents are subject to change.*
5 |
6 | .. currentmodule:: black
7 |
8 | Assertions and checks
9 | ---------------------
10 |
11 | .. autofunction:: black.assert_equivalent
12 |
13 | .. autofunction:: black.assert_stable
14 |
15 | .. autofunction:: black.can_omit_invisible_parens
16 |
17 | .. autofunction:: black.is_empty_tuple
18 |
19 | .. autofunction:: black.is_import
20 |
21 | .. autofunction:: black.is_line_short_enough
22 |
23 | .. autofunction:: black.is_multiline_string
24 |
25 | .. autofunction:: black.is_one_tuple
26 |
27 | .. autofunction:: black.is_python36
28 |
29 | .. autofunction:: black.is_split_after_delimiter
30 |
31 | .. autofunction:: black.is_split_before_delimiter
32 |
33 | .. autofunction:: black.is_stub_body
34 |
35 | .. autofunction:: black.is_stub_suite
36 |
37 | .. autofunction:: black.is_vararg
38 |
39 | .. autofunction:: black.is_yield
40 |
41 |
42 | Formatting
43 | ----------
44 |
45 | .. autofunction:: black.format_file_contents
46 |
47 | .. autofunction:: black.format_file_in_place
48 |
49 | .. autofunction:: black.format_stdin_to_stdout
50 |
51 | .. autofunction:: black.format_str
52 |
53 | .. autofunction:: black.reformat_one
54 |
55 | .. autofunction:: black.schedule_formatting
56 |
57 | File operations
58 | ---------------
59 |
60 | .. autofunction:: black.dump_to_file
61 |
62 | .. autofunction:: black.gen_python_files_in_dir
63 |
64 | Parsing
65 | -------
66 |
67 | .. autofunction:: black.decode_bytes
68 |
69 | .. autofunction:: black.lib2to3_parse
70 |
71 | .. autofunction:: black.lib2to3_unparse
72 |
73 | Split functions
74 | ---------------
75 |
76 | .. autofunction:: black.delimiter_split
77 |
78 | .. autofunction:: black.left_hand_split
79 |
80 | .. autofunction:: black.right_hand_split
81 |
82 | .. autofunction:: black.standalone_comment_split
83 |
84 | .. autofunction:: black.split_line
85 |
86 | .. autofunction:: black.bracket_split_succeeded_or_raise
87 |
88 | Caching
89 | -------
90 |
91 | .. autofunction:: black.filter_cached
92 |
93 | .. autofunction:: black.get_cache_info
94 |
95 | .. autofunction:: black.read_cache
96 |
97 | .. autofunction:: black.write_cache
98 |
99 | Utilities
100 | ---------
101 |
102 | .. py:function:: black.DebugVisitor.show(code: str) -> None
103 |
104 | Pretty-print the lib2to3 AST of a given string of `code`.
105 |
106 | .. autofunction:: black.diff
107 |
108 | .. autofunction:: black.ensure_visible
109 |
110 | .. autofunction:: black.enumerate_reversed
111 |
112 | .. autofunction:: black.enumerate_with_length
113 |
114 | .. autofunction:: black.generate_comments
115 |
116 | .. autofunction:: black.make_comment
117 |
118 | .. autofunction:: black.maybe_make_parens_invisible_in_atom
119 |
120 | .. autofunction:: black.max_delimiter_priority_in_atom
121 |
122 | .. autofunction:: black.normalize_prefix
123 |
124 | .. autofunction:: black.normalize_string_quotes
125 |
126 | .. autofunction:: black.normalize_invisible_parens
127 |
128 | .. autofunction:: black.preceding_leaf
129 |
130 | .. autofunction:: black.sub_twice
131 |
132 | .. autofunction:: black.whitespace
133 |
--------------------------------------------------------------------------------
/blib2to3/pytree.pyi:
--------------------------------------------------------------------------------
1 | # Stubs for lib2to3.pytree (Python 3.6)
2 |
3 | import sys
4 | from typing import Any, Callable, Dict, Iterator, List, Optional, Text, Tuple, TypeVar, Union
5 |
6 | from blib2to3.pgen2.grammar import Grammar
7 |
8 | _P = TypeVar('_P')
9 | _NL = Union[Node, Leaf]
10 | _Context = Tuple[Text, int, int]
11 | _Results = Dict[Text, _NL]
12 | _RawNode = Tuple[int, Text, _Context, Optional[List[_NL]]]
13 | _Convert = Callable[[Grammar, _RawNode], Any]
14 |
15 | HUGE: int
16 |
17 | def type_repr(type_num: int) -> Text: ...
18 |
19 | class Base:
20 | type: int
21 | parent: Optional[Node]
22 | prefix: Text
23 | children: List[_NL]
24 | was_changed: bool
25 | was_checked: bool
26 | def __eq__(self, other: Any) -> bool: ...
27 | def _eq(self: _P, other: _P) -> bool: ...
28 | def clone(self: _P) -> _P: ...
29 | def post_order(self) -> Iterator[_NL]: ...
30 | def pre_order(self) -> Iterator[_NL]: ...
31 | def replace(self, new: Union[_NL, List[_NL]]) -> None: ...
32 | def get_lineno(self) -> int: ...
33 | def changed(self) -> None: ...
34 | def remove(self) -> Optional[int]: ...
35 | @property
36 | def next_sibling(self) -> Optional[_NL]: ...
37 | @property
38 | def prev_sibling(self) -> Optional[_NL]: ...
39 | def leaves(self) -> Iterator[Leaf]: ...
40 | def depth(self) -> int: ...
41 | def get_suffix(self) -> Text: ...
42 | if sys.version_info < (3,):
43 | def get_prefix(self) -> Text: ...
44 | def set_prefix(self, prefix: Text) -> None: ...
45 |
46 | class Node(Base):
47 | fixers_applied: List[Any]
48 | def __init__(self, type: int, children: List[_NL], context: Optional[Any] = ..., prefix: Optional[Text] = ..., fixers_applied: Optional[List[Any]] = ...) -> None: ...
49 | def set_child(self, i: int, child: _NL) -> None: ...
50 | def insert_child(self, i: int, child: _NL) -> None: ...
51 | def append_child(self, child: _NL) -> None: ...
52 |
53 | class Leaf(Base):
54 | lineno: int
55 | column: int
56 | value: Text
57 | fixers_applied: List[Any]
58 | def __init__(self, type: int, value: Text, context: Optional[_Context] = ..., prefix: Optional[Text] = ..., fixers_applied: List[Any] = ...) -> None: ...
59 | # bolted on attributes by Black
60 | bracket_depth: int
61 | opening_bracket: Leaf
62 |
63 | def convert(gr: Grammar, raw_node: _RawNode) -> _NL: ...
64 |
65 | class BasePattern:
66 | type: int
67 | content: Optional[Text]
68 | name: Optional[Text]
69 | def optimize(self) -> BasePattern: ... # sic, subclasses are free to optimize themselves into different patterns
70 | def match(self, node: _NL, results: Optional[_Results] = ...) -> bool: ...
71 | def match_seq(self, nodes: List[_NL], results: Optional[_Results] = ...) -> bool: ...
72 | def generate_matches(self, nodes: List[_NL]) -> Iterator[Tuple[int, _Results]]: ...
73 |
74 | class LeafPattern(BasePattern):
75 | def __init__(self, type: Optional[int] = ..., content: Optional[Text] = ..., name: Optional[Text] = ...) -> None: ...
76 |
77 | class NodePattern(BasePattern):
78 | wildcards: bool
79 | def __init__(self, type: Optional[int] = ..., content: Optional[Text] = ..., name: Optional[Text] = ...) -> None: ...
80 |
81 | class WildcardPattern(BasePattern):
82 | min: int
83 | max: int
84 | def __init__(self, content: Optional[Text] = ..., min: int = ..., max: int = ..., name: Optional[Text] = ...) -> None: ...
85 |
86 | class NegatedPattern(BasePattern):
87 | def __init__(self, content: Optional[Text] = ...) -> None: ...
88 |
89 | def generate_matches(patterns: List[BasePattern], nodes: List[_NL]) -> Iterator[Tuple[int, _Results]]: ...
90 |
--------------------------------------------------------------------------------
/tests/comments4.py:
--------------------------------------------------------------------------------
1 | class C:
2 | @pytest.mark.parametrize(
3 | ("post_data", "message"),
4 | [
5 | # metadata_version errors.
6 | (
7 | {},
8 | "None is an invalid value for Metadata-Version. "
9 | "Error: This field is required. "
10 | "see "
11 | "https://packaging.python.org/specifications/core-metadata",
12 | ),
13 | (
14 | {"metadata_version": "-1"},
15 | "'-1' is an invalid value for Metadata-Version. "
16 | "Error: Unknown Metadata Version "
17 | "see "
18 | "https://packaging.python.org/specifications/core-metadata",
19 | ),
20 | # name errors.
21 | (
22 | {"metadata_version": "1.2"},
23 | "'' is an invalid value for Name. "
24 | "Error: This field is required. "
25 | "see "
26 | "https://packaging.python.org/specifications/core-metadata",
27 | ),
28 | (
29 | {"metadata_version": "1.2", "name": "foo-"},
30 | "'foo-' is an invalid value for Name. "
31 | "Error: Must start and end with a letter or numeral and "
32 | "contain only ascii numeric and '.', '_' and '-'. "
33 | "see "
34 | "https://packaging.python.org/specifications/core-metadata",
35 | ),
36 | # version errors.
37 | (
38 | {"metadata_version": "1.2", "name": "example"},
39 | "'' is an invalid value for Version. "
40 | "Error: This field is required. "
41 | "see "
42 | "https://packaging.python.org/specifications/core-metadata",
43 | ),
44 | (
45 | {"metadata_version": "1.2", "name": "example", "version": "dog"},
46 | "'dog' is an invalid value for Version. "
47 | "Error: Must start and end with a letter or numeral and "
48 | "contain only ascii numeric and '.', '_' and '-'. "
49 | "see "
50 | "https://packaging.python.org/specifications/core-metadata",
51 | ),
52 | ],
53 | )
54 | def test_fails_invalid_post_data(
55 | self, pyramid_config, db_request, post_data, message
56 | ):
57 | pyramid_config.testing_securitypolicy(userid=1)
58 | db_request.POST = MultiDict(post_data)
59 |
60 |
61 | def foo(list_a, list_b):
62 | results = (
63 | User.query.filter(User.foo == "bar")
64 | .filter( # Because foo.
65 | db.or_(User.field_a.astext.in_(list_a), User.field_b.astext.in_(list_b))
66 | )
67 | .filter(User.xyz.is_(None))
68 | # Another comment about the filtering on is_quux goes here.
69 | .filter(db.not_(User.is_pending.astext.cast(db.Boolean).is_(True)))
70 | .order_by(User.created_at.desc())
71 | .with_for_update(key_share=True)
72 | .all()
73 | )
74 | return results
75 |
76 |
77 | def foo2(list_a, list_b):
78 | # Standalone comment reasonably placed.
79 | return (
80 | User.query.filter(User.foo == "bar")
81 | .filter(
82 | db.or_(User.field_a.astext.in_(list_a), User.field_b.astext.in_(list_b))
83 | )
84 | .filter(User.xyz.is_(None))
85 | )
86 |
87 |
88 | def foo3(list_a, list_b):
89 | return (
90 | # Standlone comment but weirdly placed.
91 | User.query.filter(User.foo == "bar")
92 | .filter(
93 | db.or_(User.field_a.astext.in_(list_a), User.field_b.astext.in_(list_b))
94 | )
95 | .filter(User.xyz.is_(None))
96 | )
97 |
--------------------------------------------------------------------------------
/tests/cantfit.py:
--------------------------------------------------------------------------------
1 | # long variable name
2 | this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = 0
3 | this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = 1 # with a comment
4 | this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = [
5 | 1, 2, 3
6 | ]
7 | this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = function()
8 | this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = function(
9 | arg1, arg2, arg3
10 | )
11 | this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = function(
12 | [1, 2, 3], arg1, [1, 2, 3], arg2, [1, 2, 3], arg3
13 | )
14 | # long function name
15 | normal_name = but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying()
16 | normal_name = but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying(
17 | arg1, arg2, arg3
18 | )
19 | normal_name = but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying(
20 | [1, 2, 3], arg1, [1, 2, 3], arg2, [1, 2, 3], arg3
21 | )
22 | # long arguments
23 | normal_name = normal_function_name(
24 | "but with super long string arguments that on their own exceed the line limit so there's no way it can ever fit",
25 | "eggs with spam and eggs and spam with eggs with spam and eggs and spam with eggs with spam and eggs and spam with eggs",
26 | this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it=0,
27 | )
28 | string_variable_name = (
29 | "a string that is waaaaaaaayyyyyyyy too long, even in parens, there's nothing you can do" # noqa
30 | )
31 | for key in """
32 | hostname
33 | port
34 | username
35 | """.split():
36 | if key in self.connect_kwargs:
37 | raise ValueError(err.format(key))
38 |
39 |
40 | # output
41 |
42 |
43 | # long variable name
44 | this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = (
45 | 0
46 | )
47 | this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = (
48 | 1
49 | ) # with a comment
50 | this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = [
51 | 1,
52 | 2,
53 | 3,
54 | ]
55 | this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = (
56 | function()
57 | )
58 | this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = function(
59 | arg1, arg2, arg3
60 | )
61 | this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = function(
62 | [1, 2, 3], arg1, [1, 2, 3], arg2, [1, 2, 3], arg3
63 | )
64 | # long function name
65 | normal_name = (
66 | but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying()
67 | )
68 | normal_name = but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying(
69 | arg1, arg2, arg3
70 | )
71 | normal_name = but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying(
72 | [1, 2, 3], arg1, [1, 2, 3], arg2, [1, 2, 3], arg3
73 | )
74 | # long arguments
75 | normal_name = normal_function_name(
76 | "but with super long string arguments that on their own exceed the line limit so there's no way it can ever fit",
77 | "eggs with spam and eggs and spam with eggs with spam and eggs and spam with eggs with spam and eggs and spam with eggs",
78 | this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it=0,
79 | )
80 | string_variable_name = "a string that is waaaaaaaayyyyyyyy too long, even in parens, there's nothing you can do" # noqa
81 | for key in """
82 | hostname
83 | port
84 | username
85 | """.split():
86 | if key in self.connect_kwargs:
87 | raise ValueError(err.format(key))
88 |
--------------------------------------------------------------------------------
/plugin/black.vim:
--------------------------------------------------------------------------------
1 | " black.vim
2 | " Author: Łukasz Langa
3 | " Created: Mon Mar 26 23:27:53 2018 -0700
4 | " Requires: Vim Ver7.0+
5 | " Version: 1.0
6 | "
7 | " Documentation:
8 | " This plugin formats Python files.
9 | "
10 | " History:
11 | " 1.0:
12 | " - initial version
13 |
14 | if v:version < 700 || !has('python3')
15 | echo "This script requires vim7.0+ with Python 3.6 support."
16 | finish
17 | endif
18 |
19 | if exists("g:load_black")
20 | finish
21 | endif
22 |
23 | let g:load_black = "py1.0"
24 | if !exists("g:black_virtualenv")
25 | let g:black_virtualenv = "~/.vim/black"
26 | endif
27 | if !exists("g:black_fast")
28 | let g:black_fast = 0
29 | endif
30 | if !exists("g:black_linelength")
31 | let g:black_linelength = 88
32 | endif
33 |
34 | python3 << endpython3
35 | import sys
36 | import vim
37 |
38 | def _get_python_binary(exec_prefix):
39 | if sys.platform[:3] == "win":
40 | return exec_prefix / 'python.exe'
41 | return exec_prefix / 'bin' / 'python3'
42 |
43 | def _get_pip(venv_path):
44 | if sys.platform[:3] == "win":
45 | return venv_path / 'Scripts' / 'pip.exe'
46 | return venv_path / 'bin' / 'pip'
47 |
48 | def _get_virtualenv_site_packages(venv_path, pyver):
49 | if sys.platform[:3] == "win":
50 | return venv_path / 'Lib' / 'site-packages'
51 | return venv_path / 'lib' / f'python{pyver[0]}.{pyver[1]}' / 'site-packages'
52 |
53 | def _initialize_black_env(upgrade=False):
54 | pyver = sys.version_info[:2]
55 | if pyver < (3, 6):
56 | print("Sorry, Black requires Python 3.6+ to run.")
57 | return False
58 |
59 | from pathlib import Path
60 | import subprocess
61 | import venv
62 | virtualenv_path = Path(vim.eval("g:black_virtualenv")).expanduser()
63 | virtualenv_site_packages = str(_get_virtualenv_site_packages(virtualenv_path, pyver))
64 | first_install = False
65 | if not virtualenv_path.is_dir():
66 | print('Please wait, one time setup for Black.')
67 | _executable = sys.executable
68 | try:
69 | sys.executable = str(_get_python_binary(Path(sys.exec_prefix)))
70 | print(f'Creating a virtualenv in {virtualenv_path}...')
71 | print('(this path can be customized in .vimrc by setting g:black_virtualenv)')
72 | venv.create(virtualenv_path, with_pip=True)
73 | finally:
74 | sys.executable = _executable
75 | first_install = True
76 | if first_install:
77 | print('Installing Black with pip...')
78 | if upgrade:
79 | print('Upgrading Black with pip...')
80 | if first_install or upgrade:
81 | subprocess.run([str(_get_pip(virtualenv_path)), 'install', '-U', 'black'], stdout=subprocess.PIPE)
82 | print('DONE! You are all set, thanks for waiting ✨ 🍰 ✨')
83 | if first_install:
84 | print('Pro-tip: to upgrade Black in the future, use the :BlackUpgrade command and restart Vim.\n')
85 | if sys.path[0] != virtualenv_site_packages:
86 | sys.path.insert(0, virtualenv_site_packages)
87 | return True
88 |
89 | if _initialize_black_env():
90 | import black
91 | import time
92 |
93 | def Black():
94 | start = time.time()
95 | fast = bool(int(vim.eval("g:black_fast")))
96 | line_length = int(vim.eval("g:black_linelength"))
97 | buffer_str = '\n'.join(vim.current.buffer) + '\n'
98 | try:
99 | new_buffer_str = black.format_file_contents(buffer_str, line_length=line_length, fast=fast)
100 | except black.NothingChanged:
101 | print(f'Already well formatted, good job. (took {time.time() - start:.4f}s)')
102 | except Exception as exc:
103 | print(exc)
104 | else:
105 | vim.current.buffer[:] = new_buffer_str.split('\n')[:-1]
106 | print(f'Reformatted in {time.time() - start:.4f}s.')
107 |
108 | def BlackUpgrade():
109 | _initialize_black_env(upgrade=True)
110 |
111 | def BlackVersion():
112 | print(f'Black, version {black.__version__} on Python {sys.version}.')
113 |
114 | endpython3
115 |
116 | command! Black :py3 Black()
117 | command! BlackUpgrade :py3 BlackUpgrade()
118 | command! BlackVersion :py3 BlackVersion()
119 |
120 | nmap ,= :Black
121 | vmap ,= :Black
122 |
--------------------------------------------------------------------------------
/tests/empty_lines.py:
--------------------------------------------------------------------------------
1 | """Docstring."""
2 |
3 |
4 | # leading comment
5 | def f():
6 | NO = ''
7 | SPACE = ' '
8 | DOUBLESPACE = ' '
9 |
10 | t = leaf.type
11 | p = leaf.parent # trailing comment
12 | v = leaf.value
13 |
14 | if t in ALWAYS_NO_SPACE:
15 | pass
16 | if t == token.COMMENT: # another trailing comment
17 | return DOUBLESPACE
18 |
19 |
20 | assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}"
21 |
22 |
23 | prev = leaf.prev_sibling
24 | if not prev:
25 | prevp = preceding_leaf(p)
26 | if not prevp or prevp.type in OPENING_BRACKETS:
27 |
28 |
29 | return NO
30 |
31 |
32 | if prevp.type == token.EQUAL:
33 | if prevp.parent and prevp.parent.type in {
34 | syms.typedargslist,
35 | syms.varargslist,
36 | syms.parameters,
37 | syms.arglist,
38 | syms.argument,
39 | }:
40 | return NO
41 |
42 | elif prevp.type == token.DOUBLESTAR:
43 | if prevp.parent and prevp.parent.type in {
44 | syms.typedargslist,
45 | syms.varargslist,
46 | syms.parameters,
47 | syms.arglist,
48 | syms.dictsetmaker,
49 | }:
50 | return NO
51 |
52 | ###############################################################################
53 | # SECTION BECAUSE SECTIONS
54 | ###############################################################################
55 |
56 | def g():
57 | NO = ''
58 | SPACE = ' '
59 | DOUBLESPACE = ' '
60 |
61 | t = leaf.type
62 | p = leaf.parent
63 | v = leaf.value
64 |
65 | # Comment because comments
66 |
67 | if t in ALWAYS_NO_SPACE:
68 | pass
69 | if t == token.COMMENT:
70 | return DOUBLESPACE
71 |
72 | # Another comment because more comments
73 | assert p is not None, f'INTERNAL ERROR: hand-made leaf without parent: {leaf!r}'
74 |
75 | prev = leaf.prev_sibling
76 | if not prev:
77 | prevp = preceding_leaf(p)
78 |
79 | if not prevp or prevp.type in OPENING_BRACKETS:
80 | # Start of the line or a bracketed expression.
81 | # More than one line for the comment.
82 | return NO
83 |
84 | if prevp.type == token.EQUAL:
85 | if prevp.parent and prevp.parent.type in {
86 | syms.typedargslist,
87 | syms.varargslist,
88 | syms.parameters,
89 | syms.arglist,
90 | syms.argument,
91 | }:
92 | return NO
93 |
94 |
95 | # output
96 |
97 |
98 | """Docstring."""
99 |
100 |
101 | # leading comment
102 | def f():
103 | NO = ""
104 | SPACE = " "
105 | DOUBLESPACE = " "
106 |
107 | t = leaf.type
108 | p = leaf.parent # trailing comment
109 | v = leaf.value
110 |
111 | if t in ALWAYS_NO_SPACE:
112 | pass
113 | if t == token.COMMENT: # another trailing comment
114 | return DOUBLESPACE
115 |
116 | assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}"
117 |
118 | prev = leaf.prev_sibling
119 | if not prev:
120 | prevp = preceding_leaf(p)
121 | if not prevp or prevp.type in OPENING_BRACKETS:
122 |
123 | return NO
124 |
125 | if prevp.type == token.EQUAL:
126 | if prevp.parent and prevp.parent.type in {
127 | syms.typedargslist,
128 | syms.varargslist,
129 | syms.parameters,
130 | syms.arglist,
131 | syms.argument,
132 | }:
133 | return NO
134 |
135 | elif prevp.type == token.DOUBLESTAR:
136 | if prevp.parent and prevp.parent.type in {
137 | syms.typedargslist,
138 | syms.varargslist,
139 | syms.parameters,
140 | syms.arglist,
141 | syms.dictsetmaker,
142 | }:
143 | return NO
144 |
145 |
146 | ###############################################################################
147 | # SECTION BECAUSE SECTIONS
148 | ###############################################################################
149 |
150 |
151 | def g():
152 | NO = ""
153 | SPACE = " "
154 | DOUBLESPACE = " "
155 |
156 | t = leaf.type
157 | p = leaf.parent
158 | v = leaf.value
159 |
160 | # Comment because comments
161 |
162 | if t in ALWAYS_NO_SPACE:
163 | pass
164 | if t == token.COMMENT:
165 | return DOUBLESPACE
166 |
167 | # Another comment because more comments
168 | assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}"
169 |
170 | prev = leaf.prev_sibling
171 | if not prev:
172 | prevp = preceding_leaf(p)
173 |
174 | if not prevp or prevp.type in OPENING_BRACKETS:
175 | # Start of the line or a bracketed expression.
176 | # More than one line for the comment.
177 | return NO
178 |
179 | if prevp.type == token.EQUAL:
180 | if prevp.parent and prevp.parent.type in {
181 | syms.typedargslist,
182 | syms.varargslist,
183 | syms.parameters,
184 | syms.arglist,
185 | syms.argument,
186 | }:
187 | return NO
188 |
--------------------------------------------------------------------------------
/tests/class_methods_new_line.py:
--------------------------------------------------------------------------------
1 | class ClassSimplest:
2 | pass
3 | class ClassWithSingleField:
4 | a = 1
5 | class ClassWithJustTheDocstring:
6 | """Just a docstring."""
7 | class ClassWithInit:
8 | def __init__(self):
9 | pass
10 | class ClassWithTheDocstringAndInit:
11 | """Just a docstring."""
12 | def __init__(self):
13 | pass
14 | class ClassWithInitAndVars:
15 | cls_var = 100
16 | def __init__(self):
17 | pass
18 | class ClassWithInitAndVarsAndDocstring:
19 | """Test class"""
20 | cls_var = 100
21 | def __init__(self):
22 | pass
23 | class ClassWithDecoInit:
24 | @deco
25 | def __init__(self):
26 | pass
27 | class ClassWithDecoInitAndVars:
28 | cls_var = 100
29 | @deco
30 | def __init__(self):
31 | pass
32 | class ClassWithDecoInitAndVarsAndDocstring:
33 | """Test class"""
34 | cls_var = 100
35 | @deco
36 | def __init__(self):
37 | pass
38 | class ClassSimplestWithInner:
39 | class Inner:
40 | pass
41 | class ClassSimplestWithInnerWithDocstring:
42 | class Inner:
43 | """Just a docstring."""
44 | def __init__(self):
45 | pass
46 | class ClassWithSingleFieldWithInner:
47 | a = 1
48 | class Inner:
49 | pass
50 | class ClassWithJustTheDocstringWithInner:
51 | """Just a docstring."""
52 | class Inner:
53 | pass
54 | class ClassWithInitWithInner:
55 | class Inner:
56 | pass
57 | def __init__(self):
58 | pass
59 | class ClassWithInitAndVarsWithInner:
60 | cls_var = 100
61 | class Inner:
62 | pass
63 | def __init__(self):
64 | pass
65 | class ClassWithInitAndVarsAndDocstringWithInner:
66 | """Test class"""
67 | cls_var = 100
68 | class Inner:
69 | pass
70 | def __init__(self):
71 | pass
72 | class ClassWithDecoInitWithInner:
73 | class Inner:
74 | pass
75 | @deco
76 | def __init__(self):
77 | pass
78 | class ClassWithDecoInitAndVarsWithInner:
79 | cls_var = 100
80 | class Inner:
81 | pass
82 | @deco
83 | def __init__(self):
84 | pass
85 | class ClassWithDecoInitAndVarsAndDocstringWithInner:
86 | """Test class"""
87 | cls_var = 100
88 | class Inner:
89 | pass
90 | @deco
91 | def __init__(self):
92 | pass
93 | class ClassWithDecoInitAndVarsAndDocstringWithInner2:
94 | """Test class"""
95 | class Inner:
96 | pass
97 | cls_var = 100
98 | @deco
99 | def __init__(self):
100 | pass
101 |
102 |
103 | # output
104 |
105 |
106 | class ClassSimplest:
107 | pass
108 |
109 |
110 | class ClassWithSingleField:
111 | a = 1
112 |
113 |
114 | class ClassWithJustTheDocstring:
115 | """Just a docstring."""
116 |
117 |
118 | class ClassWithInit:
119 | def __init__(self):
120 | pass
121 |
122 |
123 | class ClassWithTheDocstringAndInit:
124 | """Just a docstring."""
125 |
126 | def __init__(self):
127 | pass
128 |
129 |
130 | class ClassWithInitAndVars:
131 | cls_var = 100
132 |
133 | def __init__(self):
134 | pass
135 |
136 |
137 | class ClassWithInitAndVarsAndDocstring:
138 | """Test class"""
139 |
140 | cls_var = 100
141 |
142 | def __init__(self):
143 | pass
144 |
145 |
146 | class ClassWithDecoInit:
147 | @deco
148 | def __init__(self):
149 | pass
150 |
151 |
152 | class ClassWithDecoInitAndVars:
153 | cls_var = 100
154 |
155 | @deco
156 | def __init__(self):
157 | pass
158 |
159 |
160 | class ClassWithDecoInitAndVarsAndDocstring:
161 | """Test class"""
162 |
163 | cls_var = 100
164 |
165 | @deco
166 | def __init__(self):
167 | pass
168 |
169 |
170 | class ClassSimplestWithInner:
171 | class Inner:
172 | pass
173 |
174 |
175 | class ClassSimplestWithInnerWithDocstring:
176 | class Inner:
177 | """Just a docstring."""
178 |
179 | def __init__(self):
180 | pass
181 |
182 |
183 | class ClassWithSingleFieldWithInner:
184 | a = 1
185 |
186 | class Inner:
187 | pass
188 |
189 |
190 | class ClassWithJustTheDocstringWithInner:
191 | """Just a docstring."""
192 |
193 | class Inner:
194 | pass
195 |
196 |
197 | class ClassWithInitWithInner:
198 | class Inner:
199 | pass
200 |
201 | def __init__(self):
202 | pass
203 |
204 |
205 | class ClassWithInitAndVarsWithInner:
206 | cls_var = 100
207 |
208 | class Inner:
209 | pass
210 |
211 | def __init__(self):
212 | pass
213 |
214 |
215 | class ClassWithInitAndVarsAndDocstringWithInner:
216 | """Test class"""
217 |
218 | cls_var = 100
219 |
220 | class Inner:
221 | pass
222 |
223 | def __init__(self):
224 | pass
225 |
226 |
227 | class ClassWithDecoInitWithInner:
228 | class Inner:
229 | pass
230 |
231 | @deco
232 | def __init__(self):
233 | pass
234 |
235 |
236 | class ClassWithDecoInitAndVarsWithInner:
237 | cls_var = 100
238 |
239 | class Inner:
240 | pass
241 |
242 | @deco
243 | def __init__(self):
244 | pass
245 |
246 |
247 | class ClassWithDecoInitAndVarsAndDocstringWithInner:
248 | """Test class"""
249 |
250 | cls_var = 100
251 |
252 | class Inner:
253 | pass
254 |
255 | @deco
256 | def __init__(self):
257 | pass
258 |
259 |
260 | class ClassWithDecoInitAndVarsAndDocstringWithInner2:
261 | """Test class"""
262 |
263 | class Inner:
264 | pass
265 |
266 | cls_var = 100
267 |
268 | @deco
269 | def __init__(self):
270 | pass
271 |
--------------------------------------------------------------------------------
/blib2to3/pgen2/grammar.py:
--------------------------------------------------------------------------------
1 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
2 | # Licensed to PSF under a Contributor Agreement.
3 |
4 | """This module defines the data structures used to represent a grammar.
5 |
6 | These are a bit arcane because they are derived from the data
7 | structures used by Python's 'pgen' parser generator.
8 |
9 | There's also a table here mapping operators to their names in the
10 | token module; the Python tokenize module reports all operators as the
11 | fallback token code OP, but the parser needs the actual token code.
12 |
13 | """
14 |
15 | # Python imports
16 | import pickle
17 |
18 | # Local imports
19 | from . import token
20 |
21 |
22 | class Grammar(object):
23 | """Pgen parsing tables conversion class.
24 |
25 | Once initialized, this class supplies the grammar tables for the
26 | parsing engine implemented by parse.py. The parsing engine
27 | accesses the instance variables directly. The class here does not
28 | provide initialization of the tables; several subclasses exist to
29 | do this (see the conv and pgen modules).
30 |
31 | The load() method reads the tables from a pickle file, which is
32 | much faster than the other ways offered by subclasses. The pickle
33 | file is written by calling dump() (after loading the grammar
34 | tables using a subclass). The report() method prints a readable
35 | representation of the tables to stdout, for debugging.
36 |
37 | The instance variables are as follows:
38 |
39 | symbol2number -- a dict mapping symbol names to numbers. Symbol
40 | numbers are always 256 or higher, to distinguish
41 | them from token numbers, which are between 0 and
42 | 255 (inclusive).
43 |
44 | number2symbol -- a dict mapping numbers to symbol names;
45 | these two are each other's inverse.
46 |
47 | states -- a list of DFAs, where each DFA is a list of
48 | states, each state is a list of arcs, and each
49 | arc is a (i, j) pair where i is a label and j is
50 | a state number. The DFA number is the index into
51 | this list. (This name is slightly confusing.)
52 | Final states are represented by a special arc of
53 | the form (0, j) where j is its own state number.
54 |
55 | dfas -- a dict mapping symbol numbers to (DFA, first)
56 | pairs, where DFA is an item from the states list
57 | above, and first is a set of tokens that can
58 | begin this grammar rule (represented by a dict
59 | whose values are always 1).
60 |
61 | labels -- a list of (x, y) pairs where x is either a token
62 | number or a symbol number, and y is either None
63 | or a string; the strings are keywords. The label
64 | number is the index in this list; label numbers
65 | are used to mark state transitions (arcs) in the
66 | DFAs.
67 |
68 | start -- the number of the grammar's start symbol.
69 |
70 | keywords -- a dict mapping keyword strings to arc labels.
71 |
72 | tokens -- a dict mapping token numbers to arc labels.
73 |
74 | """
75 |
76 | def __init__(self):
77 | self.symbol2number = {}
78 | self.number2symbol = {}
79 | self.states = []
80 | self.dfas = {}
81 | self.labels = [(0, "EMPTY")]
82 | self.keywords = {}
83 | self.tokens = {}
84 | self.symbol2label = {}
85 | self.start = 256
86 |
87 | def dump(self, filename):
88 | """Dump the grammar tables to a pickle file."""
89 | with open(filename, "wb") as f:
90 | pickle.dump(self.__dict__, f, pickle.HIGHEST_PROTOCOL)
91 |
92 | def load(self, filename):
93 | """Load the grammar tables from a pickle file."""
94 | with open(filename, "rb") as f:
95 | d = pickle.load(f)
96 | self.__dict__.update(d)
97 |
98 | def loads(self, pkl):
99 | """Load the grammar tables from a pickle bytes object."""
100 | self.__dict__.update(pickle.loads(pkl))
101 |
102 | def copy(self):
103 | """
104 | Copy the grammar.
105 | """
106 | new = self.__class__()
107 | for dict_attr in ("symbol2number", "number2symbol", "dfas", "keywords",
108 | "tokens", "symbol2label"):
109 | setattr(new, dict_attr, getattr(self, dict_attr).copy())
110 | new.labels = self.labels[:]
111 | new.states = self.states[:]
112 | new.start = self.start
113 | return new
114 |
115 | def report(self):
116 | """Dump the grammar tables to standard output, for debugging."""
117 | from pprint import pprint
118 | print("s2n")
119 | pprint(self.symbol2number)
120 | print("n2s")
121 | pprint(self.number2symbol)
122 | print("states")
123 | pprint(self.states)
124 | print("dfas")
125 | pprint(self.dfas)
126 | print("labels")
127 | pprint(self.labels)
128 | print("start", self.start)
129 |
130 |
131 | # Map from operator to number (since tokenize doesn't do this)
132 |
133 | opmap_raw = """
134 | ( LPAR
135 | ) RPAR
136 | [ LSQB
137 | ] RSQB
138 | : COLON
139 | , COMMA
140 | ; SEMI
141 | + PLUS
142 | - MINUS
143 | * STAR
144 | / SLASH
145 | | VBAR
146 | & AMPER
147 | < LESS
148 | > GREATER
149 | = EQUAL
150 | . DOT
151 | % PERCENT
152 | ` BACKQUOTE
153 | { LBRACE
154 | } RBRACE
155 | @ AT
156 | @= ATEQUAL
157 | == EQEQUAL
158 | != NOTEQUAL
159 | <> NOTEQUAL
160 | <= LESSEQUAL
161 | >= GREATEREQUAL
162 | ~ TILDE
163 | ^ CIRCUMFLEX
164 | << LEFTSHIFT
165 | >> RIGHTSHIFT
166 | ** DOUBLESTAR
167 | += PLUSEQUAL
168 | -= MINEQUAL
169 | *= STAREQUAL
170 | /= SLASHEQUAL
171 | %= PERCENTEQUAL
172 | &= AMPEREQUAL
173 | |= VBAREQUAL
174 | ^= CIRCUMFLEXEQUAL
175 | <<= LEFTSHIFTEQUAL
176 | >>= RIGHTSHIFTEQUAL
177 | **= DOUBLESTAREQUAL
178 | // DOUBLESLASH
179 | //= DOUBLESLASHEQUAL
180 | -> RARROW
181 | """
182 |
183 | opmap = {}
184 | for line in opmap_raw.splitlines():
185 | if line:
186 | op, name = line.split()
187 | opmap[op] = getattr(token, name)
188 |
--------------------------------------------------------------------------------
/tests/composition.py:
--------------------------------------------------------------------------------
1 | class C:
2 | def test(self) -> None:
3 | with patch("black.out", print):
4 | self.assertEqual(
5 | unstyle(str(report)), "1 file reformatted, 1 file failed to reformat."
6 | )
7 | self.assertEqual(
8 | unstyle(str(report)),
9 | "1 file reformatted, 1 file left unchanged, 1 file failed to reformat.",
10 | )
11 | self.assertEqual(
12 | unstyle(str(report)),
13 | "2 files reformatted, 1 file left unchanged, "
14 | "1 file failed to reformat.",
15 | )
16 | self.assertEqual(
17 | unstyle(str(report)),
18 | "2 files reformatted, 2 files left unchanged, "
19 | "2 files failed to reformat.",
20 | )
21 | for i in (a,):
22 | if (
23 | # Rule 1
24 | i % 2 == 0
25 | # Rule 2
26 | and i % 3 == 0
27 | ):
28 | while (
29 | # Just a comment
30 | call()
31 | # Another
32 | ):
33 | print(i)
34 | xxxxxxxxxxxxxxxx = Yyyy2YyyyyYyyyyy(
35 | push_manager=context.request.resource_manager,
36 | max_items_to_push=num_items,
37 | batch_size=Yyyy2YyyyYyyyyYyyy.FULL_SIZE,
38 | ).push(
39 | # Only send the first n items.
40 | items=items[:num_items]
41 | )
42 | return (
43 | "Utterly failed doctest test for %s\n"
44 | ' File "%s", line %s, in %s\n\n%s'
45 | % (test.name, test.filename, lineno, lname, err)
46 | )
47 |
48 | def omitting_trailers(self) -> None:
49 | get_collection(
50 | hey_this_is_a_very_long_call, it_has_funny_attributes, really=True
51 | )[OneLevelIndex]
52 | get_collection(
53 | hey_this_is_a_very_long_call, it_has_funny_attributes, really=True
54 | )[OneLevelIndex][TwoLevelIndex][ThreeLevelIndex][FourLevelIndex]
55 | d[0][1][2][3][4][5][6][7][8][9][10][11][12][13][14][15][16][17][18][19][20][21][
56 | 22
57 | ]
58 | assignment = (
59 | some.rather.elaborate.rule() and another.rule.ending_with.index[123]
60 | )
61 |
62 | def easy_asserts(self) -> None:
63 | assert {
64 | key1: value1,
65 | key2: value2,
66 | key3: value3,
67 | key4: value4,
68 | key5: value5,
69 | key6: value6,
70 | key7: value7,
71 | key8: value8,
72 | key9: value9,
73 | } == expected, "Not what we expected"
74 |
75 | assert expected == {
76 | key1: value1,
77 | key2: value2,
78 | key3: value3,
79 | key4: value4,
80 | key5: value5,
81 | key6: value6,
82 | key7: value7,
83 | key8: value8,
84 | key9: value9,
85 | }, "Not what we expected"
86 |
87 | assert expected == {
88 | key1: value1,
89 | key2: value2,
90 | key3: value3,
91 | key4: value4,
92 | key5: value5,
93 | key6: value6,
94 | key7: value7,
95 | key8: value8,
96 | key9: value9,
97 | }
98 |
99 | def tricky_asserts(self) -> None:
100 | assert {
101 | key1: value1,
102 | key2: value2,
103 | key3: value3,
104 | key4: value4,
105 | key5: value5,
106 | key6: value6,
107 | key7: value7,
108 | key8: value8,
109 | key9: value9,
110 | } == expected(
111 | value, is_going_to_be="too long to fit in a single line", srsly=True
112 | ), "Not what we expected"
113 |
114 | assert {
115 | key1: value1,
116 | key2: value2,
117 | key3: value3,
118 | key4: value4,
119 | key5: value5,
120 | key6: value6,
121 | key7: value7,
122 | key8: value8,
123 | key9: value9,
124 | } == expected, (
125 | "Not what we expected and the message is too long to fit in one line"
126 | )
127 |
128 | assert expected(
129 | value, is_going_to_be="too long to fit in a single line", srsly=True
130 | ) == {
131 | key1: value1,
132 | key2: value2,
133 | key3: value3,
134 | key4: value4,
135 | key5: value5,
136 | key6: value6,
137 | key7: value7,
138 | key8: value8,
139 | key9: value9,
140 | }, "Not what we expected"
141 |
142 | assert expected == {
143 | key1: value1,
144 | key2: value2,
145 | key3: value3,
146 | key4: value4,
147 | key5: value5,
148 | key6: value6,
149 | key7: value7,
150 | key8: value8,
151 | key9: value9,
152 | }, (
153 | "Not what we expected and the message is too long to fit "
154 | "in one line because it's too long"
155 | )
156 |
157 | dis_c_instance_method = """\
158 | %3d 0 LOAD_FAST 1 (x)
159 | 2 LOAD_CONST 1 (1)
160 | 4 COMPARE_OP 2 (==)
161 | 6 LOAD_FAST 0 (self)
162 | 8 STORE_ATTR 0 (x)
163 | 10 LOAD_CONST 0 (None)
164 | 12 RETURN_VALUE
165 | """ % (
166 | _C.__init__.__code__.co_firstlineno + 1,
167 | )
168 |
169 | assert (
170 | expectedexpectedexpectedexpectedexpectedexpectedexpectedexpectedexpect
171 | == {
172 | key1: value1,
173 | key2: value2,
174 | key3: value3,
175 | key4: value4,
176 | key5: value5,
177 | key6: value6,
178 | key7: value7,
179 | key8: value8,
180 | key9: value9,
181 | }
182 | )
183 |
--------------------------------------------------------------------------------
/tests/fmtonoff.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import asyncio
3 | import sys
4 |
5 | from third_party import X, Y, Z
6 |
7 | from library import some_connection, \
8 | some_decorator
9 | # fmt: off
10 | from third_party import (X,
11 | Y, Z)
12 | # fmt: on
13 | f'trigger 3.6 mode'
14 | # fmt: off
15 | def func_no_args():
16 | a; b; c
17 | if True: raise RuntimeError
18 | if False: ...
19 | for i in range(10):
20 | print(i)
21 | continue
22 | exec('new-style exec', {}, {})
23 | return None
24 | async def coroutine(arg, exec=False):
25 | 'Single-line docstring. Multiline is harder to reformat.'
26 | async with some_connection() as conn:
27 | await conn.do_what_i_mean('SELECT bobby, tables FROM xkcd', timeout=2)
28 | await asyncio.sleep(1)
29 | @asyncio.coroutine
30 | @some_decorator(
31 | with_args=True,
32 | many_args=[1,2,3]
33 | )
34 | def function_signature_stress_test(number:int,no_annotation=None,text:str='default',* ,debug:bool=False,**kwargs) -> str:
35 | return text[number:-1]
36 | # fmt: on
37 | def spaces(a=1, b=(), c=[], d={}, e=True, f=-1, g=1 if False else 2, h="", i=r''):
38 | offset = attr.ib(default=attr.Factory( lambda: _r.uniform(10000, 200000)))
39 | assert task._cancel_stack[:len(old_stack)] == old_stack
40 | def spaces_types(a: int = 1, b: tuple = (), c: list = [], d: dict = {}, e: bool = True, f: int = -1, g: int = 1 if False else 2, h: str = "", i: str = r''): ...
41 | def spaces2(result= _core.Value(None)):
42 | ...
43 | def example(session):
44 | # fmt: off
45 | result = session\
46 | .query(models.Customer.id)\
47 | .filter(models.Customer.account_id == account_id,
48 | models.Customer.email == email_address)\
49 | .order_by(models.Customer.id.asc())\
50 | .all()
51 | # fmt: on
52 | def long_lines():
53 | if True:
54 | typedargslist.extend(
55 | gen_annotated_params(ast_args.kwonlyargs, ast_args.kw_defaults, parameters, implicit_default=True)
56 | )
57 | # fmt: off
58 | a = (
59 | unnecessary_bracket()
60 | )
61 | # fmt: on
62 | _type_comment_re = re.compile(
63 | r"""
64 | ^
65 | [\t ]*
66 | \#[ ]type:[ ]*
67 | (?P
68 | [^#\t\n]+?
69 | )
70 | (? to match
71 | # a trailing space which is why we need the silliness below
72 | (?
77 | (?:\#[^\n]*)?
78 | \n?
79 | )
80 | $
81 | """, # fmt: off
82 | re.MULTILINE | re.VERBOSE
83 | )
84 | # fmt: on
85 | def single_literal_yapf_disable():
86 | """Black does not support this."""
87 | BAZ = {
88 | (1, 2, 3, 4),
89 | (5, 6, 7, 8),
90 | (9, 10, 11, 12),
91 | } # yapf: disable
92 | # fmt: off
93 | # No formatting to the end of the file
94 | l=[1,2,3]
95 | d={'a':1,
96 | 'b':2}
97 |
98 | # output
99 |
100 |
101 | #!/usr/bin/env python3
102 | import asyncio
103 | import sys
104 |
105 | from third_party import X, Y, Z
106 |
107 | from library import some_connection, some_decorator
108 |
109 | # fmt: off
110 | from third_party import (X,
111 | Y, Z)
112 | # fmt: on
113 | f"trigger 3.6 mode"
114 | # fmt: off
115 | def func_no_args():
116 | a; b; c
117 | if True: raise RuntimeError
118 | if False: ...
119 | for i in range(10):
120 | print(i)
121 | continue
122 | exec('new-style exec', {}, {})
123 | return None
124 | async def coroutine(arg, exec=False):
125 | 'Single-line docstring. Multiline is harder to reformat.'
126 | async with some_connection() as conn:
127 | await conn.do_what_i_mean('SELECT bobby, tables FROM xkcd', timeout=2)
128 | await asyncio.sleep(1)
129 | @asyncio.coroutine
130 | @some_decorator(
131 | with_args=True,
132 | many_args=[1,2,3]
133 | )
134 | def function_signature_stress_test(number:int,no_annotation=None,text:str='default',* ,debug:bool=False,**kwargs) -> str:
135 | return text[number:-1]
136 | # fmt: on
137 | def spaces(a=1, b=(), c=[], d={}, e=True, f=-1, g=1 if False else 2, h="", i=r""):
138 | offset = attr.ib(default=attr.Factory(lambda: _r.uniform(10000, 200000)))
139 | assert task._cancel_stack[: len(old_stack)] == old_stack
140 |
141 |
142 | def spaces_types(
143 | a: int = 1,
144 | b: tuple = (),
145 | c: list = [],
146 | d: dict = {},
147 | e: bool = True,
148 | f: int = -1,
149 | g: int = 1 if False else 2,
150 | h: str = "",
151 | i: str = r"",
152 | ):
153 | ...
154 |
155 |
156 | def spaces2(result=_core.Value(None)):
157 | ...
158 |
159 |
160 | def example(session):
161 | # fmt: off
162 | result = session\
163 | .query(models.Customer.id)\
164 | .filter(models.Customer.account_id == account_id,
165 | models.Customer.email == email_address)\
166 | .order_by(models.Customer.id.asc())\
167 | .all()
168 | # fmt: on
169 |
170 |
171 | def long_lines():
172 | if True:
173 | typedargslist.extend(
174 | gen_annotated_params(
175 | ast_args.kwonlyargs,
176 | ast_args.kw_defaults,
177 | parameters,
178 | implicit_default=True,
179 | )
180 | )
181 | # fmt: off
182 | a = (
183 | unnecessary_bracket()
184 | )
185 | # fmt: on
186 | _type_comment_re = re.compile(
187 | r"""
188 | ^
189 | [\t ]*
190 | \#[ ]type:[ ]*
191 | (?P
192 | [^#\t\n]+?
193 | )
194 | (? to match
195 | # a trailing space which is why we need the silliness below
196 | (?
201 | (?:\#[^\n]*)?
202 | \n?
203 | )
204 | $
205 | """, # fmt: off
206 | re.MULTILINE | re.VERBOSE,
207 | )
208 | # fmt: on
209 |
210 |
211 | def single_literal_yapf_disable():
212 | """Black does not support this."""
213 | BAZ = {(1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12)} # yapf: disable
214 |
215 |
216 | # fmt: off
217 | # No formatting to the end of the file
218 | l=[1,2,3]
219 | d={'a':1,
220 | 'b':2}
221 |
--------------------------------------------------------------------------------
/tests/comments2.py:
--------------------------------------------------------------------------------
1 | # Please keep __all__ alphabetized within each category.
2 |
3 | __all__ = [
4 | # Super-special typing primitives.
5 | 'Any',
6 | 'Callable',
7 | 'ClassVar',
8 |
9 | # ABCs (from collections.abc).
10 | 'AbstractSet', # collections.abc.Set.
11 | 'ByteString',
12 | 'Container',
13 |
14 | # Concrete collection types.
15 | 'Counter',
16 | 'Deque',
17 | 'Dict',
18 | 'DefaultDict',
19 | 'List',
20 | 'Set',
21 | 'FrozenSet',
22 | 'NamedTuple', # Not really a type.
23 | 'Generator',
24 | ]
25 |
26 | not_shareables = [
27 | # singletons
28 | True,
29 | False,
30 | NotImplemented, ...,
31 | # builtin types and objects
32 | type,
33 | object,
34 | object(),
35 | Exception(),
36 | 42,
37 | 100.0,
38 | "spam",
39 | # user-defined types and objects
40 | Cheese,
41 | Cheese("Wensleydale"),
42 | SubBytes(b"spam"),
43 | ]
44 |
45 | if 'PYTHON' in os.environ:
46 | add_compiler(compiler_from_env())
47 | else:
48 | # for compiler in compilers.values():
49 | # add_compiler(compiler)
50 | add_compiler(compilers[(7.0, 32)])
51 | # add_compiler(compilers[(7.1, 64)])
52 |
53 | # Comment before function.
54 | def inline_comments_in_brackets_ruin_everything():
55 | if typedargslist:
56 | parameters.children = [
57 | children[0], # (1
58 | body,
59 | children[-1], # )1
60 | ]
61 | else:
62 | parameters.children = [
63 | parameters.children[0], # (2 what if this was actually long
64 | body,
65 | parameters.children[-1], # )2
66 | ]
67 | if (self._proc is not None
68 | # has the child process finished?
69 | and self._returncode is None
70 | # the child process has finished, but the
71 | # transport hasn't been notified yet?
72 | and self._proc.poll() is None):
73 | pass
74 | # no newline before or after
75 | short = [
76 | # one
77 | 1,
78 | # two
79 | 2]
80 |
81 | # no newline after
82 | call(arg1, arg2, """
83 | short
84 | """, arg3=True)
85 |
86 | ############################################################################
87 |
88 | call2(
89 | #short
90 | arg1,
91 | #but
92 | arg2,
93 | #multiline
94 | """
95 | short
96 | """,
97 | # yup
98 | arg3=True)
99 | lcomp = [
100 | element # yup
101 | for element in collection # yup
102 | if element is not None # right
103 | ]
104 | lcomp2 = [
105 | # hello
106 | element
107 | # yup
108 | for element in collection
109 | # right
110 | if element is not None
111 | ]
112 | lcomp3 = [
113 | # This one is actually too long to fit in a single line.
114 | element.split('\n', 1)[0]
115 | # yup
116 | for element in collection.select_elements()
117 | # right
118 | if element is not None
119 | ]
120 | while True:
121 | if False:
122 | continue
123 |
124 | # and round and round we go
125 | # and round and round we go
126 |
127 | # let's return
128 | return Node(
129 | syms.simple_stmt,
130 | [
131 | Node(statement, result),
132 | Leaf(token.NEWLINE, '\n'), # FIXME: \r\n?
133 | ],
134 | )
135 |
136 |
137 | #######################
138 | ### SECTION COMMENT ###
139 | #######################
140 |
141 |
142 | instruction()
143 |
144 | # END COMMENTS
145 | # MORE END COMMENTS
146 |
147 |
148 | # output
149 |
150 |
151 | # Please keep __all__ alphabetized within each category.
152 |
153 | __all__ = [
154 | # Super-special typing primitives.
155 | "Any",
156 | "Callable",
157 | "ClassVar",
158 | # ABCs (from collections.abc).
159 | "AbstractSet", # collections.abc.Set.
160 | "ByteString",
161 | "Container",
162 | # Concrete collection types.
163 | "Counter",
164 | "Deque",
165 | "Dict",
166 | "DefaultDict",
167 | "List",
168 | "Set",
169 | "FrozenSet",
170 | "NamedTuple", # Not really a type.
171 | "Generator",
172 | ]
173 |
174 | not_shareables = [
175 | # singletons
176 | True,
177 | False,
178 | NotImplemented,
179 | ...,
180 | # builtin types and objects
181 | type,
182 | object,
183 | object(),
184 | Exception(),
185 | 42,
186 | 100.0,
187 | "spam",
188 | # user-defined types and objects
189 | Cheese,
190 | Cheese("Wensleydale"),
191 | SubBytes(b"spam"),
192 | ]
193 |
194 | if "PYTHON" in os.environ:
195 | add_compiler(compiler_from_env())
196 | else:
197 | # for compiler in compilers.values():
198 | # add_compiler(compiler)
199 | add_compiler(compilers[(7.0, 32)])
200 | # add_compiler(compilers[(7.1, 64)])
201 |
202 | # Comment before function.
203 | def inline_comments_in_brackets_ruin_everything():
204 | if typedargslist:
205 | parameters.children = [children[0], body, children[-1]] # (1 # )1
206 | else:
207 | parameters.children = [
208 | parameters.children[0], # (2 what if this was actually long
209 | body,
210 | parameters.children[-1], # )2
211 | ]
212 | if (
213 | self._proc is not None
214 | # has the child process finished?
215 | and self._returncode is None
216 | # the child process has finished, but the
217 | # transport hasn't been notified yet?
218 | and self._proc.poll() is None
219 | ):
220 | pass
221 | # no newline before or after
222 | short = [
223 | # one
224 | 1,
225 | # two
226 | 2,
227 | ]
228 |
229 | # no newline after
230 | call(
231 | arg1,
232 | arg2,
233 | """
234 | short
235 | """,
236 | arg3=True,
237 | )
238 |
239 | ############################################################################
240 |
241 | call2(
242 | # short
243 | arg1,
244 | # but
245 | arg2,
246 | # multiline
247 | """
248 | short
249 | """,
250 | # yup
251 | arg3=True,
252 | )
253 | lcomp = [
254 | element for element in collection if element is not None # yup # yup # right
255 | ]
256 | lcomp2 = [
257 | # hello
258 | element
259 | # yup
260 | for element in collection
261 | # right
262 | if element is not None
263 | ]
264 | lcomp3 = [
265 | # This one is actually too long to fit in a single line.
266 | element.split("\n", 1)[0]
267 | # yup
268 | for element in collection.select_elements()
269 | # right
270 | if element is not None
271 | ]
272 | while True:
273 | if False:
274 | continue
275 |
276 | # and round and round we go
277 | # and round and round we go
278 |
279 | # let's return
280 | return Node(
281 | syms.simple_stmt,
282 | [Node(statement, result), Leaf(token.NEWLINE, "\n")], # FIXME: \r\n?
283 | )
284 |
285 |
286 | #######################
287 | ### SECTION COMMENT ###
288 | #######################
289 |
290 |
291 | instruction()
292 |
293 | # END COMMENTS
294 | # MORE END COMMENTS
295 |
--------------------------------------------------------------------------------
/tests/function.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import asyncio
3 | import sys
4 |
5 | from third_party import X, Y, Z
6 |
7 | from library import some_connection, \
8 | some_decorator
9 | f'trigger 3.6 mode'
10 | def func_no_args():
11 | a; b; c
12 | if True: raise RuntimeError
13 | if False: ...
14 | for i in range(10):
15 | print(i)
16 | continue
17 | exec("new-style exec", {}, {})
18 | return None
19 | async def coroutine(arg, exec=False):
20 | "Single-line docstring. Multiline is harder to reformat."
21 | async with some_connection() as conn:
22 | await conn.do_what_i_mean('SELECT bobby, tables FROM xkcd', timeout=2)
23 | await asyncio.sleep(1)
24 | @asyncio.coroutine
25 | @some_decorator(
26 | with_args=True,
27 | many_args=[1,2,3]
28 | )
29 | def function_signature_stress_test(number:int,no_annotation=None,text:str="default",* ,debug:bool=False,**kwargs) -> str:
30 | return text[number:-1]
31 | def spaces(a=1, b=(), c=[], d={}, e=True, f=-1, g=1 if False else 2, h="", i=r''):
32 | offset = attr.ib(default=attr.Factory( lambda: _r.uniform(10000, 200000)))
33 | assert task._cancel_stack[:len(old_stack)] == old_stack
34 | def spaces_types(a: int = 1, b: tuple = (), c: list = [], d: dict = {}, e: bool = True, f: int = -1, g: int = 1 if False else 2, h: str = "", i: str = r''): ...
35 | def spaces2(result= _core.Value(None)):
36 | assert fut is self._read_fut, (fut, self._read_fut)
37 | # EMPTY LINE WITH WHITESPACE (this comment will be removed)
38 | def example(session):
39 | result = session.query(models.Customer.id).filter(
40 | models.Customer.account_id == account_id,
41 | models.Customer.email == email_address,
42 | ).order_by(
43 | models.Customer.id.asc()
44 | ).all()
45 | def long_lines():
46 | if True:
47 | typedargslist.extend(
48 | gen_annotated_params(ast_args.kwonlyargs, ast_args.kw_defaults, parameters, implicit_default=True)
49 | )
50 | typedargslist.extend(
51 | gen_annotated_params(
52 | ast_args.kwonlyargs, ast_args.kw_defaults, parameters, implicit_default=True,
53 | # trailing standalone comment
54 | )
55 | )
56 | _type_comment_re = re.compile(
57 | r"""
58 | ^
59 | [\t ]*
60 | \#[ ]type:[ ]*
61 | (?P
62 | [^#\t\n]+?
63 | )
64 | (? to match
65 | # a trailing space which is why we need the silliness below
66 | (?
71 | (?:\#[^\n]*)?
72 | \n?
73 | )
74 | $
75 | """, re.MULTILINE | re.VERBOSE
76 | )
77 | def trailing_comma():
78 | mapping = {
79 | A: 0.25 * (10.0 / 12),
80 | B: 0.1 * (10.0 / 12),
81 | C: 0.1 * (10.0 / 12),
82 | D: 0.1 * (10.0 / 12),
83 | }
84 | def f(
85 | a,
86 | **kwargs,
87 | ) -> A:
88 | return (
89 | yield from A(
90 | very_long_argument_name1=very_long_value_for_the_argument,
91 | very_long_argument_name2=very_long_value_for_the_argument,
92 | **kwargs,
93 | )
94 | )
95 | def __await__(): return (yield)
96 |
97 | # output
98 |
99 |
100 | #!/usr/bin/env python3
101 | import asyncio
102 | import sys
103 |
104 | from third_party import X, Y, Z
105 |
106 | from library import some_connection, some_decorator
107 |
108 | f"trigger 3.6 mode"
109 |
110 |
111 | def func_no_args():
112 | a
113 | b
114 | c
115 | if True:
116 | raise RuntimeError
117 | if False:
118 | ...
119 | for i in range(10):
120 | print(i)
121 | continue
122 | exec("new-style exec", {}, {})
123 | return None
124 |
125 |
126 | async def coroutine(arg, exec=False):
127 | "Single-line docstring. Multiline is harder to reformat."
128 | async with some_connection() as conn:
129 | await conn.do_what_i_mean("SELECT bobby, tables FROM xkcd", timeout=2)
130 | await asyncio.sleep(1)
131 |
132 |
133 | @asyncio.coroutine
134 | @some_decorator(with_args=True, many_args=[1, 2, 3])
135 | def function_signature_stress_test(
136 | number: int,
137 | no_annotation=None,
138 | text: str = "default",
139 | *,
140 | debug: bool = False,
141 | **kwargs,
142 | ) -> str:
143 | return text[number:-1]
144 |
145 |
146 | def spaces(a=1, b=(), c=[], d={}, e=True, f=-1, g=1 if False else 2, h="", i=r""):
147 | offset = attr.ib(default=attr.Factory(lambda: _r.uniform(10000, 200000)))
148 | assert task._cancel_stack[: len(old_stack)] == old_stack
149 |
150 |
151 | def spaces_types(
152 | a: int = 1,
153 | b: tuple = (),
154 | c: list = [],
155 | d: dict = {},
156 | e: bool = True,
157 | f: int = -1,
158 | g: int = 1 if False else 2,
159 | h: str = "",
160 | i: str = r"",
161 | ):
162 | ...
163 |
164 |
165 | def spaces2(result=_core.Value(None)):
166 | assert fut is self._read_fut, (fut, self._read_fut)
167 |
168 |
169 | def example(session):
170 | result = (
171 | session.query(models.Customer.id)
172 | .filter(
173 | models.Customer.account_id == account_id,
174 | models.Customer.email == email_address,
175 | )
176 | .order_by(models.Customer.id.asc())
177 | .all()
178 | )
179 |
180 |
181 | def long_lines():
182 | if True:
183 | typedargslist.extend(
184 | gen_annotated_params(
185 | ast_args.kwonlyargs,
186 | ast_args.kw_defaults,
187 | parameters,
188 | implicit_default=True,
189 | )
190 | )
191 | typedargslist.extend(
192 | gen_annotated_params(
193 | ast_args.kwonlyargs,
194 | ast_args.kw_defaults,
195 | parameters,
196 | implicit_default=True,
197 | # trailing standalone comment
198 | )
199 | )
200 | _type_comment_re = re.compile(
201 | r"""
202 | ^
203 | [\t ]*
204 | \#[ ]type:[ ]*
205 | (?P
206 | [^#\t\n]+?
207 | )
208 | (? to match
209 | # a trailing space which is why we need the silliness below
210 | (?
215 | (?:\#[^\n]*)?
216 | \n?
217 | )
218 | $
219 | """,
220 | re.MULTILINE | re.VERBOSE,
221 | )
222 |
223 |
224 | def trailing_comma():
225 | mapping = {
226 | A: 0.25 * (10.0 / 12),
227 | B: 0.1 * (10.0 / 12),
228 | C: 0.1 * (10.0 / 12),
229 | D: 0.1 * (10.0 / 12),
230 | }
231 |
232 |
233 | def f(a, **kwargs) -> A:
234 | return (
235 | yield from A(
236 | very_long_argument_name1=very_long_value_for_the_argument,
237 | very_long_argument_name2=very_long_value_for_the_argument,
238 | **kwargs,
239 | )
240 | )
241 |
242 |
243 | def __await__():
244 | return (yield)
245 |
--------------------------------------------------------------------------------
/blib2to3/Grammar.txt:
--------------------------------------------------------------------------------
1 | # Grammar for 2to3. This grammar supports Python 2.x and 3.x.
2 |
3 | # NOTE WELL: You should also follow all the steps listed at
4 | # https://devguide.python.org/grammar/
5 |
6 | # Start symbols for the grammar:
7 | # file_input is a module or sequence of commands read from an input file;
8 | # single_input is a single interactive statement;
9 | # eval_input is the input for the eval() and input() functions.
10 | # NB: compound_stmt in single_input is followed by extra NEWLINE!
11 | file_input: (NEWLINE | stmt)* ENDMARKER
12 | single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
13 | eval_input: testlist NEWLINE* ENDMARKER
14 |
15 | decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
16 | decorators: decorator+
17 | decorated: decorators (classdef | funcdef | async_funcdef)
18 | async_funcdef: ASYNC funcdef
19 | funcdef: 'def' NAME parameters ['->' test] ':' suite
20 | parameters: '(' [typedargslist] ')'
21 | typedargslist: ((tfpdef ['=' test] ',')*
22 | ('*' [tname] (',' tname ['=' test])* [',' ['**' tname [',']]] | '**' tname [','])
23 | | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
24 | tname: NAME [':' test]
25 | tfpdef: tname | '(' tfplist ')'
26 | tfplist: tfpdef (',' tfpdef)* [',']
27 | varargslist: ((vfpdef ['=' test] ',')*
28 | ('*' [vname] (',' vname ['=' test])* [',' ['**' vname [',']]] | '**' vname [','])
29 | | vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
30 | vname: NAME
31 | vfpdef: vname | '(' vfplist ')'
32 | vfplist: vfpdef (',' vfpdef)* [',']
33 |
34 | stmt: simple_stmt | compound_stmt
35 | simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
36 | small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt |
37 | import_stmt | global_stmt | exec_stmt | assert_stmt)
38 | expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
39 | ('=' (yield_expr|testlist_star_expr))*)
40 | annassign: ':' test ['=' test]
41 | testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
42 | augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
43 | '<<=' | '>>=' | '**=' | '//=')
44 | # For normal and annotated assignments, additional restrictions enforced by the interpreter
45 | print_stmt: 'print' ( [ test (',' test)* [','] ] |
46 | '>>' test [ (',' test)+ [','] ] )
47 | del_stmt: 'del' exprlist
48 | pass_stmt: 'pass'
49 | flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
50 | break_stmt: 'break'
51 | continue_stmt: 'continue'
52 | return_stmt: 'return' [testlist]
53 | yield_stmt: yield_expr
54 | raise_stmt: 'raise' [test ['from' test | ',' test [',' test]]]
55 | import_stmt: import_name | import_from
56 | import_name: 'import' dotted_as_names
57 | import_from: ('from' ('.'* dotted_name | '.'+)
58 | 'import' ('*' | '(' import_as_names ')' | import_as_names))
59 | import_as_name: NAME ['as' NAME]
60 | dotted_as_name: dotted_name ['as' NAME]
61 | import_as_names: import_as_name (',' import_as_name)* [',']
62 | dotted_as_names: dotted_as_name (',' dotted_as_name)*
63 | dotted_name: NAME ('.' NAME)*
64 | global_stmt: ('global' | 'nonlocal') NAME (',' NAME)*
65 | exec_stmt: 'exec' expr ['in' test [',' test]]
66 | assert_stmt: 'assert' test [',' test]
67 |
68 | compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt
69 | async_stmt: ASYNC (funcdef | with_stmt | for_stmt)
70 | if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
71 | while_stmt: 'while' test ':' suite ['else' ':' suite]
72 | for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
73 | try_stmt: ('try' ':' suite
74 | ((except_clause ':' suite)+
75 | ['else' ':' suite]
76 | ['finally' ':' suite] |
77 | 'finally' ':' suite))
78 | with_stmt: 'with' with_item (',' with_item)* ':' suite
79 | with_item: test ['as' expr]
80 | with_var: 'as' expr
81 | # NB compile.c makes sure that the default except clause is last
82 | except_clause: 'except' [test [(',' | 'as') test]]
83 | suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
84 |
85 | # Backward compatibility cruft to support:
86 | # [ x for x in lambda: True, lambda: False if x() ]
87 | # even while also allowing:
88 | # lambda x: 5 if x else 2
89 | # (But not a mix of the two)
90 | testlist_safe: old_test [(',' old_test)+ [',']]
91 | old_test: or_test | old_lambdef
92 | old_lambdef: 'lambda' [varargslist] ':' old_test
93 |
94 | test: or_test ['if' or_test 'else' test] | lambdef
95 | or_test: and_test ('or' and_test)*
96 | and_test: not_test ('and' not_test)*
97 | not_test: 'not' not_test | comparison
98 | comparison: expr (comp_op expr)*
99 | comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
100 | star_expr: '*' expr
101 | expr: xor_expr ('|' xor_expr)*
102 | xor_expr: and_expr ('^' and_expr)*
103 | and_expr: shift_expr ('&' shift_expr)*
104 | shift_expr: arith_expr (('<<'|'>>') arith_expr)*
105 | arith_expr: term (('+'|'-') term)*
106 | term: factor (('*'|'@'|'/'|'%'|'//') factor)*
107 | factor: ('+'|'-'|'~') factor | power
108 | power: [AWAIT] atom trailer* ['**' factor]
109 | atom: ('(' [yield_expr|testlist_gexp] ')' |
110 | '[' [listmaker] ']' |
111 | '{' [dictsetmaker] '}' |
112 | '`' testlist1 '`' |
113 | NAME | NUMBER | STRING+ | '.' '.' '.')
114 | listmaker: (test|star_expr) ( old_comp_for | (',' (test|star_expr))* [','] )
115 | testlist_gexp: (test|star_expr) ( old_comp_for | (',' (test|star_expr))* [','] )
116 | lambdef: 'lambda' [varargslist] ':' test
117 | trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
118 | subscriptlist: subscript (',' subscript)* [',']
119 | subscript: test | [test] ':' [test] [sliceop]
120 | sliceop: ':' [test]
121 | exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
122 | testlist: test (',' test)* [',']
123 | dictsetmaker: ( ((test ':' test | '**' expr)
124 | (comp_for | (',' (test ':' test | '**' expr))* [','])) |
125 | ((test | star_expr)
126 | (comp_for | (',' (test | star_expr))* [','])) )
127 |
128 | classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
129 |
130 | arglist: argument (',' argument)* [',']
131 |
132 | # "test '=' test" is really "keyword '=' test", but we have no such token.
133 | # These need to be in a single rule to avoid grammar that is ambiguous
134 | # to our LL(1) parser. Even though 'test' includes '*expr' in star_expr,
135 | # we explicitly match '*' here, too, to give it proper precedence.
136 | # Illegal combinations and orderings are blocked in ast.c:
137 | # multiple (test comp_for) arguments are blocked; keyword unpackings
138 | # that precede iterable unpackings are blocked; etc.
139 | argument: ( test [comp_for] |
140 | test '=' test |
141 | '**' test |
142 | '*' test )
143 |
144 | comp_iter: comp_for | comp_if
145 | comp_for: [ASYNC] 'for' exprlist 'in' or_test [comp_iter]
146 | comp_if: 'if' old_test [comp_iter]
147 |
148 | # As noted above, testlist_safe extends the syntax allowed in list
149 | # comprehensions and generators. We can't use it indiscriminately in all
150 | # derivations using a comp_for-like pattern because the testlist_safe derivation
151 | # contains comma which clashes with trailing comma in arglist.
152 | #
153 | # This was an issue because the parser would not follow the correct derivation
154 | # when parsing syntactically valid Python code. Since testlist_safe was created
155 | # specifically to handle list comprehensions and generator expressions enclosed
156 | # with parentheses, it's safe to only use it in those. That avoids the issue; we
157 | # can parse code like set(x for x in [],).
158 | #
159 | # The syntax supported by this set of rules is not a valid Python 3 syntax,
160 | # hence the prefix "old".
161 | #
162 | # See https://bugs.python.org/issue27494
163 | old_comp_iter: old_comp_for | old_comp_if
164 | old_comp_for: [ASYNC] 'for' exprlist 'in' testlist_safe [old_comp_iter]
165 | old_comp_if: 'if' old_test [old_comp_iter]
166 |
167 | testlist1: test (',' test)*
168 |
169 | # not used in grammar, but may appear in "node" passed from Parser to Compiler
170 | encoding_decl: NAME
171 |
172 | yield_expr: 'yield' [yield_arg]
173 | yield_arg: 'from' test | testlist
174 |
--------------------------------------------------------------------------------
/blib2to3/pgen2/parse.py:
--------------------------------------------------------------------------------
1 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
2 | # Licensed to PSF under a Contributor Agreement.
3 |
4 | """Parser engine for the grammar tables generated by pgen.
5 |
6 | The grammar table must be loaded first.
7 |
8 | See Parser/parser.c in the Python distribution for additional info on
9 | how this parsing engine works.
10 |
11 | """
12 |
13 | # Local imports
14 | from . import token
15 |
16 | class ParseError(Exception):
17 | """Exception to signal the parser is stuck."""
18 |
19 | def __init__(self, msg, type, value, context):
20 | Exception.__init__(self, "%s: type=%r, value=%r, context=%r" %
21 | (msg, type, value, context))
22 | self.msg = msg
23 | self.type = type
24 | self.value = value
25 | self.context = context
26 |
27 | class Parser(object):
28 | """Parser engine.
29 |
30 | The proper usage sequence is:
31 |
32 | p = Parser(grammar, [converter]) # create instance
33 | p.setup([start]) # prepare for parsing
34 | :
35 | if p.addtoken(...): # parse a token; may raise ParseError
36 | break
37 | root = p.rootnode # root of abstract syntax tree
38 |
39 | A Parser instance may be reused by calling setup() repeatedly.
40 |
41 | A Parser instance contains state pertaining to the current token
42 | sequence, and should not be used concurrently by different threads
43 | to parse separate token sequences.
44 |
45 | See driver.py for how to get input tokens by tokenizing a file or
46 | string.
47 |
48 | Parsing is complete when addtoken() returns True; the root of the
49 | abstract syntax tree can then be retrieved from the rootnode
50 | instance variable. When a syntax error occurs, addtoken() raises
51 | the ParseError exception. There is no error recovery; the parser
52 | cannot be used after a syntax error was reported (but it can be
53 | reinitialized by calling setup()).
54 |
55 | """
56 |
57 | def __init__(self, grammar, convert=None):
58 | """Constructor.
59 |
60 | The grammar argument is a grammar.Grammar instance; see the
61 | grammar module for more information.
62 |
63 | The parser is not ready yet for parsing; you must call the
64 | setup() method to get it started.
65 |
66 | The optional convert argument is a function mapping concrete
67 | syntax tree nodes to abstract syntax tree nodes. If not
68 | given, no conversion is done and the syntax tree produced is
69 | the concrete syntax tree. If given, it must be a function of
70 | two arguments, the first being the grammar (a grammar.Grammar
71 | instance), and the second being the concrete syntax tree node
72 | to be converted. The syntax tree is converted from the bottom
73 | up.
74 |
75 | A concrete syntax tree node is a (type, value, context, nodes)
76 | tuple, where type is the node type (a token or symbol number),
77 | value is None for symbols and a string for tokens, context is
78 | None or an opaque value used for error reporting (typically a
79 | (lineno, offset) pair), and nodes is a list of children for
80 | symbols, and None for tokens.
81 |
82 | An abstract syntax tree node may be anything; this is entirely
83 | up to the converter function.
84 |
85 | """
86 | self.grammar = grammar
87 | self.convert = convert or (lambda grammar, node: node)
88 |
89 | def setup(self, start=None):
90 | """Prepare for parsing.
91 |
92 | This *must* be called before starting to parse.
93 |
94 | The optional argument is an alternative start symbol; it
95 | defaults to the grammar's start symbol.
96 |
97 | You can use a Parser instance to parse any number of programs;
98 | each time you call setup() the parser is reset to an initial
99 | state determined by the (implicit or explicit) start symbol.
100 |
101 | """
102 | if start is None:
103 | start = self.grammar.start
104 | # Each stack entry is a tuple: (dfa, state, node).
105 | # A node is a tuple: (type, value, context, children),
106 | # where children is a list of nodes or None, and context may be None.
107 | newnode = (start, None, None, [])
108 | stackentry = (self.grammar.dfas[start], 0, newnode)
109 | self.stack = [stackentry]
110 | self.rootnode = None
111 | self.used_names = set() # Aliased to self.rootnode.used_names in pop()
112 |
113 | def addtoken(self, type, value, context):
114 | """Add a token; return True iff this is the end of the program."""
115 | # Map from token to label
116 | ilabel = self.classify(type, value, context)
117 | # Loop until the token is shifted; may raise exceptions
118 | while True:
119 | dfa, state, node = self.stack[-1]
120 | states, first = dfa
121 | arcs = states[state]
122 | # Look for a state with this label
123 | for i, newstate in arcs:
124 | t, v = self.grammar.labels[i]
125 | if ilabel == i:
126 | # Look it up in the list of labels
127 | assert t < 256
128 | # Shift a token; we're done with it
129 | self.shift(type, value, newstate, context)
130 | # Pop while we are in an accept-only state
131 | state = newstate
132 | while states[state] == [(0, state)]:
133 | self.pop()
134 | if not self.stack:
135 | # Done parsing!
136 | return True
137 | dfa, state, node = self.stack[-1]
138 | states, first = dfa
139 | # Done with this token
140 | return False
141 | elif t >= 256:
142 | # See if it's a symbol and if we're in its first set
143 | itsdfa = self.grammar.dfas[t]
144 | itsstates, itsfirst = itsdfa
145 | if ilabel in itsfirst:
146 | # Push a symbol
147 | self.push(t, self.grammar.dfas[t], newstate, context)
148 | break # To continue the outer while loop
149 | else:
150 | if (0, state) in arcs:
151 | # An accepting state, pop it and try something else
152 | self.pop()
153 | if not self.stack:
154 | # Done parsing, but another token is input
155 | raise ParseError("too much input",
156 | type, value, context)
157 | else:
158 | # No success finding a transition
159 | raise ParseError("bad input", type, value, context)
160 |
161 | def classify(self, type, value, context):
162 | """Turn a token into a label. (Internal)"""
163 | if type == token.NAME:
164 | # Keep a listing of all used names
165 | self.used_names.add(value)
166 | # Check for reserved words
167 | ilabel = self.grammar.keywords.get(value)
168 | if ilabel is not None:
169 | return ilabel
170 | ilabel = self.grammar.tokens.get(type)
171 | if ilabel is None:
172 | raise ParseError("bad token", type, value, context)
173 | return ilabel
174 |
175 | def shift(self, type, value, newstate, context):
176 | """Shift a token. (Internal)"""
177 | dfa, state, node = self.stack[-1]
178 | newnode = (type, value, context, None)
179 | newnode = self.convert(self.grammar, newnode)
180 | if newnode is not None:
181 | node[-1].append(newnode)
182 | self.stack[-1] = (dfa, newstate, node)
183 |
184 | def push(self, type, newdfa, newstate, context):
185 | """Push a nonterminal. (Internal)"""
186 | dfa, state, node = self.stack[-1]
187 | newnode = (type, None, context, [])
188 | self.stack[-1] = (dfa, newstate, node)
189 | self.stack.append((newdfa, 0, newnode))
190 |
191 | def pop(self):
192 | """Pop a nonterminal. (Internal)"""
193 | popdfa, popstate, popnode = self.stack.pop()
194 | newnode = self.convert(self.grammar, popnode)
195 | if newnode is not None:
196 | if self.stack:
197 | dfa, state, node = self.stack[-1]
198 | node[-1].append(newnode)
199 | else:
200 | self.rootnode = newnode
201 | self.rootnode.used_names = self.used_names
202 |
--------------------------------------------------------------------------------
/blib2to3/pgen2/driver.py:
--------------------------------------------------------------------------------
1 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
2 | # Licensed to PSF under a Contributor Agreement.
3 |
4 | # Modifications:
5 | # Copyright 2006 Google, Inc. All Rights Reserved.
6 | # Licensed to PSF under a Contributor Agreement.
7 |
8 | """Parser driver.
9 |
10 | This provides a high-level interface to parse a file into a syntax tree.
11 |
12 | """
13 |
14 | __author__ = "Guido van Rossum "
15 |
16 | __all__ = ["Driver", "load_grammar"]
17 |
18 | # Python imports
19 | import codecs
20 | import io
21 | import os
22 | import logging
23 | import pkgutil
24 | import sys
25 |
26 | # Pgen imports
27 | from . import grammar, parse, token, tokenize, pgen
28 |
29 |
30 | class Driver(object):
31 |
32 | def __init__(self, grammar, convert=None, logger=None):
33 | self.grammar = grammar
34 | if logger is None:
35 | logger = logging.getLogger()
36 | self.logger = logger
37 | self.convert = convert
38 |
39 | def parse_tokens(self, tokens, debug=False):
40 | """Parse a series of tokens and return the syntax tree."""
41 | # XXX Move the prefix computation into a wrapper around tokenize.
42 | p = parse.Parser(self.grammar, self.convert)
43 | p.setup()
44 | lineno = 1
45 | column = 0
46 | indent_columns = []
47 | type = value = start = end = line_text = None
48 | prefix = ""
49 | for quintuple in tokens:
50 | type, value, start, end, line_text = quintuple
51 | if start != (lineno, column):
52 | assert (lineno, column) <= start, ((lineno, column), start)
53 | s_lineno, s_column = start
54 | if lineno < s_lineno:
55 | prefix += "\n" * (s_lineno - lineno)
56 | lineno = s_lineno
57 | column = 0
58 | if column < s_column:
59 | prefix += line_text[column:s_column]
60 | column = s_column
61 | if type in (tokenize.COMMENT, tokenize.NL):
62 | prefix += value
63 | lineno, column = end
64 | if value.endswith("\n"):
65 | lineno += 1
66 | column = 0
67 | continue
68 | if type == token.OP:
69 | type = grammar.opmap[value]
70 | if debug:
71 | self.logger.debug("%s %r (prefix=%r)",
72 | token.tok_name[type], value, prefix)
73 | if type in {token.INDENT, token.DEDENT}:
74 | _prefix = prefix
75 | prefix = ""
76 | if type == token.DEDENT:
77 | _indent_col = indent_columns.pop()
78 | prefix, _prefix = self._partially_consume_prefix(_prefix, _indent_col)
79 | if p.addtoken(type, value, (prefix, start)):
80 | if debug:
81 | self.logger.debug("Stop.")
82 | break
83 | prefix = ""
84 | if type == token.INDENT:
85 | indent_columns.append(len(value))
86 | if _prefix.startswith(value):
87 | # Don't double-indent. Since we're delaying the prefix that
88 | # would normally belong to INDENT, we need to put the value
89 | # at the end versus at the beginning.
90 | _prefix = _prefix[len(value):] + value
91 | if type in {token.INDENT, token.DEDENT}:
92 | prefix = _prefix
93 | lineno, column = end
94 | if value.endswith("\n"):
95 | lineno += 1
96 | column = 0
97 | else:
98 | # We never broke out -- EOF is too soon (how can this happen???)
99 | raise parse.ParseError("incomplete input",
100 | type, value, (prefix, start))
101 | return p.rootnode
102 |
103 | def parse_stream_raw(self, stream, debug=False):
104 | """Parse a stream and return the syntax tree."""
105 | tokens = tokenize.generate_tokens(stream.readline)
106 | return self.parse_tokens(tokens, debug)
107 |
108 | def parse_stream(self, stream, debug=False):
109 | """Parse a stream and return the syntax tree."""
110 | return self.parse_stream_raw(stream, debug)
111 |
112 | def parse_file(self, filename, encoding=None, debug=False):
113 | """Parse a file and return the syntax tree."""
114 | with io.open(filename, "r", encoding=encoding) as stream:
115 | return self.parse_stream(stream, debug)
116 |
117 | def parse_string(self, text, debug=False):
118 | """Parse a string and return the syntax tree."""
119 | tokens = tokenize.generate_tokens(io.StringIO(text).readline)
120 | return self.parse_tokens(tokens, debug)
121 |
122 | def _partially_consume_prefix(self, prefix, column):
123 | lines = []
124 | current_line = ""
125 | current_column = 0
126 | wait_for_nl = False
127 | for char in prefix:
128 | current_line += char
129 | if wait_for_nl:
130 | if char == '\n':
131 | if current_line.strip() and current_column < column:
132 | res = ''.join(lines)
133 | return res, prefix[len(res):]
134 |
135 | lines.append(current_line)
136 | current_line = ""
137 | current_column = 0
138 | wait_for_nl = False
139 | elif char == ' ':
140 | current_column += 1
141 | elif char == '\t':
142 | current_column += 4
143 | elif char == '\n':
144 | # enexpected empty line
145 | current_column = 0
146 | else:
147 | # indent is finished
148 | wait_for_nl = True
149 | return ''.join(lines), current_line
150 |
151 |
152 | def _generate_pickle_name(gt, cache_dir=None):
153 | head, tail = os.path.splitext(gt)
154 | if tail == ".txt":
155 | tail = ""
156 | name = head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
157 | if cache_dir:
158 | return os.path.join(cache_dir, os.path.basename(name))
159 | else:
160 | return name
161 |
162 |
163 | def load_grammar(gt="Grammar.txt", gp=None,
164 | save=True, force=False, logger=None):
165 | """Load the grammar (maybe from a pickle)."""
166 | if logger is None:
167 | logger = logging.getLogger()
168 | gp = _generate_pickle_name(gt) if gp is None else gp
169 | if force or not _newer(gp, gt):
170 | logger.info("Generating grammar tables from %s", gt)
171 | g = pgen.generate_grammar(gt)
172 | if save:
173 | logger.info("Writing grammar tables to %s", gp)
174 | try:
175 | g.dump(gp)
176 | except OSError as e:
177 | logger.info("Writing failed: %s", e)
178 | else:
179 | g = grammar.Grammar()
180 | g.load(gp)
181 | return g
182 |
183 |
184 | def _newer(a, b):
185 | """Inquire whether file a was written since file b."""
186 | if not os.path.exists(a):
187 | return False
188 | if not os.path.exists(b):
189 | return True
190 | return os.path.getmtime(a) >= os.path.getmtime(b)
191 |
192 |
193 | def load_packaged_grammar(package, grammar_source, cache_dir=None):
194 | """Normally, loads a pickled grammar by doing
195 | pkgutil.get_data(package, pickled_grammar)
196 | where *pickled_grammar* is computed from *grammar_source* by adding the
197 | Python version and using a ``.pickle`` extension.
198 |
199 | However, if *grammar_source* is an extant file, load_grammar(grammar_source)
200 | is called instead. This facilitates using a packaged grammar file when needed
201 | but preserves load_grammar's automatic regeneration behavior when possible.
202 |
203 | """
204 | if os.path.isfile(grammar_source):
205 | gp = _generate_pickle_name(grammar_source, cache_dir) if cache_dir else None
206 | return load_grammar(grammar_source, gp=gp)
207 | pickled_name = _generate_pickle_name(os.path.basename(grammar_source), cache_dir)
208 | data = pkgutil.get_data(package, pickled_name)
209 | g = grammar.Grammar()
210 | g.loads(data)
211 | return g
212 |
213 |
214 | def main(*args):
215 | """Main program, when run as a script: produce grammar pickle files.
216 |
217 | Calls load_grammar for each argument, a path to a grammar text file.
218 | """
219 | if not args:
220 | args = sys.argv[1:]
221 | logging.basicConfig(level=logging.INFO, stream=sys.stdout,
222 | format='%(message)s')
223 | for gt in args:
224 | load_grammar(gt, save=True, force=True)
225 | return True
226 |
227 | if __name__ == "__main__":
228 | sys.exit(int(not main()))
229 |
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Configuration file for the Sphinx documentation builder.
4 | #
5 | # This file does only contain a selection of the most common options. For a
6 | # full list see the documentation:
7 | # http://www.sphinx-doc.org/en/stable/config
8 |
9 | # -- Path setup --------------------------------------------------------------
10 |
11 | # If extensions (or modules to document with autodoc) are in another directory,
12 | # add these directories to sys.path here. If the directory is relative to the
13 | # documentation root, use os.path.abspath to make it absolute, like shown here.
14 | #
15 | import ast
16 | from pathlib import Path
17 | import re
18 | import shutil
19 | import string
20 |
21 | from recommonmark.parser import CommonMarkParser
22 |
23 |
24 | CURRENT_DIR = Path(__file__).parent
25 |
26 |
27 | def get_version():
28 | black_py = CURRENT_DIR / ".." / "black.py"
29 | _version_re = re.compile(r"__version__\s+=\s+(?P.*)")
30 | with open(str(black_py), "r", encoding="utf8") as f:
31 | version = _version_re.search(f.read()).group("version")
32 | return str(ast.literal_eval(version))
33 |
34 |
35 | def make_pypi_svg(version):
36 | template = CURRENT_DIR / "_static" / "pypi_template.svg"
37 | target = CURRENT_DIR / "_static" / "pypi.svg"
38 | with open(str(template), "r", encoding="utf8") as f:
39 | svg = string.Template(f.read()).substitute(version=version)
40 | with open(str(target), "w", encoding="utf8") as f:
41 | f.write(svg)
42 |
43 |
44 | def make_filename(line):
45 | non_letters = re.compile(r"[^a-z]+")
46 | filename = line[3:].rstrip().lower()
47 | filename = non_letters.sub("_", filename)
48 | if filename.startswith("_"):
49 | filename = filename[1:]
50 | if filename.endswith("_"):
51 | filename = filename[:-1]
52 | return filename + ".md"
53 |
54 |
55 | def generate_sections_from_readme():
56 | target_dir = CURRENT_DIR / "_build" / "generated"
57 | readme = CURRENT_DIR / ".." / "README.md"
58 | shutil.rmtree(str(target_dir), ignore_errors=True)
59 | target_dir.mkdir(parents=True)
60 |
61 | output = None
62 | target_dir = target_dir.relative_to(CURRENT_DIR)
63 | with open(str(readme), "r", encoding="utf8") as f:
64 | for line in f:
65 | if line.startswith("## "):
66 | if output is not None:
67 | output.close()
68 | filename = make_filename(line)
69 | output_path = CURRENT_DIR / filename
70 | if output_path.is_symlink() or output_path.is_file():
71 | output_path.unlink()
72 | output_path.symlink_to(target_dir / filename)
73 | output = open(str(output_path), "w", encoding="utf8")
74 | output.write(
75 | "[//]: # (NOTE: THIS FILE IS AUTOGENERATED FROM README.md)\n\n"
76 | )
77 |
78 | if output is None:
79 | continue
80 |
81 | if line.startswith("##"):
82 | line = line[1:]
83 |
84 | output.write(line)
85 |
86 |
87 | # -- Project information -----------------------------------------------------
88 |
89 | project = "Black"
90 | copyright = "2018, Łukasz Langa and contributors to Black"
91 | author = "Łukasz Langa and contributors to Black"
92 |
93 | # Autopopulate version
94 | # The full version, including alpha/beta/rc tags.
95 | release = get_version()
96 | # The short X.Y version.
97 | version = release
98 | for sp in "abcfr":
99 | version = version.split(sp)[0]
100 | make_pypi_svg(release)
101 | generate_sections_from_readme()
102 |
103 |
104 | # -- General configuration ---------------------------------------------------
105 |
106 | # If your documentation needs a minimal Sphinx version, state it here.
107 | #
108 | # needs_sphinx = '1.0'
109 |
110 | # Add any Sphinx extension module names here, as strings. They can be
111 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
112 | # ones.
113 | extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx", "sphinx.ext.napoleon"]
114 |
115 | # Add any paths that contain templates here, relative to this directory.
116 | templates_path = ["_templates"]
117 |
118 | source_parsers = {".md": CommonMarkParser}
119 |
120 | # The suffix(es) of source filenames.
121 | # You can specify multiple suffix as a list of string:
122 | source_suffix = [".rst", ".md"]
123 |
124 | # The master toctree document.
125 | master_doc = "index"
126 |
127 | # The language for content autogenerated by Sphinx. Refer to documentation
128 | # for a list of supported languages.
129 | #
130 | # This is also used if you do content translation via gettext catalogs.
131 | # Usually you set "language" from the command line for these cases.
132 | language = None
133 |
134 | # List of patterns, relative to source directory, that match files and
135 | # directories to ignore when looking for source files.
136 | # This pattern also affects html_static_path and html_extra_path .
137 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
138 |
139 | # The name of the Pygments (syntax highlighting) style to use.
140 | pygments_style = "sphinx"
141 |
142 |
143 | # -- Options for HTML output -------------------------------------------------
144 |
145 | # The theme to use for HTML and HTML Help pages. See the documentation for
146 | # a list of builtin themes.
147 | #
148 | html_theme = "alabaster"
149 |
150 | html_sidebars = {
151 | "**": [
152 | "about.html",
153 | "navigation.html",
154 | "relations.html",
155 | "sourcelink.html",
156 | "searchbox.html",
157 | ]
158 | }
159 |
160 | html_theme_options = {
161 | "show_related": False,
162 | "description": "“Any color you like.”",
163 | "github_button": True,
164 | "github_user": "ambv",
165 | "github_repo": "black",
166 | "github_type": "star",
167 | "show_powered_by": True,
168 | "fixed_sidebar": True,
169 | "logo": "logo2.png",
170 | }
171 |
172 |
173 | # Add any paths that contain custom static files (such as style sheets) here,
174 | # relative to this directory. They are copied after the builtin static files,
175 | # so a file named "default.css" will overwrite the builtin "default.css".
176 | html_static_path = ["_static"]
177 |
178 | # Custom sidebar templates, must be a dictionary that maps document names
179 | # to template names.
180 | #
181 | # The default sidebars (for documents that don't match any pattern) are
182 | # defined by theme itself. Builtin themes are using these templates by
183 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
184 | # 'searchbox.html']``.
185 | #
186 | # html_sidebars = {}
187 |
188 |
189 | # -- Options for HTMLHelp output ---------------------------------------------
190 |
191 | # Output file base name for HTML help builder.
192 | htmlhelp_basename = "blackdoc"
193 |
194 |
195 | # -- Options for LaTeX output ------------------------------------------------
196 |
197 | latex_elements = {
198 | # The paper size ('letterpaper' or 'a4paper').
199 | #
200 | # 'papersize': 'letterpaper',
201 | # The font size ('10pt', '11pt' or '12pt').
202 | #
203 | # 'pointsize': '10pt',
204 | # Additional stuff for the LaTeX preamble.
205 | #
206 | # 'preamble': '',
207 | # Latex figure (float) alignment
208 | #
209 | # 'figure_align': 'htbp',
210 | }
211 |
212 | # Grouping the document tree into LaTeX files. List of tuples
213 | # (source start file, target name, title,
214 | # author, documentclass [howto, manual, or own class]).
215 | latex_documents = [
216 | (
217 | master_doc,
218 | "black.tex",
219 | "Documentation for Black",
220 | "Łukasz Langa and contributors to Black",
221 | "manual",
222 | )
223 | ]
224 |
225 |
226 | # -- Options for manual page output ------------------------------------------
227 |
228 | # One entry per manual page. List of tuples
229 | # (source start file, name, description, authors, manual section).
230 | man_pages = [(master_doc, "black", "Documentation for Black", [author], 1)]
231 |
232 |
233 | # -- Options for Texinfo output ----------------------------------------------
234 |
235 | # Grouping the document tree into Texinfo files. List of tuples
236 | # (source start file, target name, title, author,
237 | # dir menu entry, description, category)
238 | texinfo_documents = [
239 | (
240 | master_doc,
241 | "Black",
242 | "Documentation for Black",
243 | author,
244 | "Black",
245 | "The uncompromising Python code formatter",
246 | "Miscellaneous",
247 | )
248 | ]
249 |
250 |
251 | # -- Options for Epub output -------------------------------------------------
252 |
253 | # Bibliographic Dublin Core info.
254 | epub_title = project
255 | epub_author = author
256 | epub_publisher = author
257 | epub_copyright = copyright
258 |
259 | # The unique identifier of the text. This can be a ISBN number
260 | # or the project homepage.
261 | #
262 | # epub_identifier = ''
263 |
264 | # A unique identification for the text.
265 | #
266 | # epub_uid = ''
267 |
268 | # A list of files that should not be packed into the epub file.
269 | epub_exclude_files = ["search.html"]
270 |
271 |
272 | # -- Extension configuration -------------------------------------------------
273 |
274 | autodoc_member_order = "bysource"
275 |
276 | # -- Options for intersphinx extension ---------------------------------------
277 |
278 | # Example configuration for intersphinx: refer to the Python standard library.
279 | intersphinx_mapping = {"https://docs.python.org/3/": None}
280 |
--------------------------------------------------------------------------------
/blib2to3/pgen2/conv.py:
--------------------------------------------------------------------------------
1 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
2 | # Licensed to PSF under a Contributor Agreement.
3 |
4 | """Convert graminit.[ch] spit out by pgen to Python code.
5 |
6 | Pgen is the Python parser generator. It is useful to quickly create a
7 | parser from a grammar file in Python's grammar notation. But I don't
8 | want my parsers to be written in C (yet), so I'm translating the
9 | parsing tables to Python data structures and writing a Python parse
10 | engine.
11 |
12 | Note that the token numbers are constants determined by the standard
13 | Python tokenizer. The standard token module defines these numbers and
14 | their names (the names are not used much). The token numbers are
15 | hardcoded into the Python tokenizer and into pgen. A Python
16 | implementation of the Python tokenizer is also available, in the
17 | standard tokenize module.
18 |
19 | On the other hand, symbol numbers (representing the grammar's
20 | non-terminals) are assigned by pgen based on the actual grammar
21 | input.
22 |
23 | Note: this module is pretty much obsolete; the pgen module generates
24 | equivalent grammar tables directly from the Grammar.txt input file
25 | without having to invoke the Python pgen C program.
26 |
27 | """
28 |
29 | # Python imports
30 | import re
31 |
32 | # Local imports
33 | from pgen2 import grammar, token
34 |
35 |
36 | class Converter(grammar.Grammar):
37 | """Grammar subclass that reads classic pgen output files.
38 |
39 | The run() method reads the tables as produced by the pgen parser
40 | generator, typically contained in two C files, graminit.h and
41 | graminit.c. The other methods are for internal use only.
42 |
43 | See the base class for more documentation.
44 |
45 | """
46 |
47 | def run(self, graminit_h, graminit_c):
48 | """Load the grammar tables from the text files written by pgen."""
49 | self.parse_graminit_h(graminit_h)
50 | self.parse_graminit_c(graminit_c)
51 | self.finish_off()
52 |
53 | def parse_graminit_h(self, filename):
54 | """Parse the .h file written by pgen. (Internal)
55 |
56 | This file is a sequence of #define statements defining the
57 | nonterminals of the grammar as numbers. We build two tables
58 | mapping the numbers to names and back.
59 |
60 | """
61 | try:
62 | f = open(filename)
63 | except OSError as err:
64 | print("Can't open %s: %s" % (filename, err))
65 | return False
66 | self.symbol2number = {}
67 | self.number2symbol = {}
68 | lineno = 0
69 | for line in f:
70 | lineno += 1
71 | mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line)
72 | if not mo and line.strip():
73 | print("%s(%s): can't parse %s" % (filename, lineno,
74 | line.strip()))
75 | else:
76 | symbol, number = mo.groups()
77 | number = int(number)
78 | assert symbol not in self.symbol2number
79 | assert number not in self.number2symbol
80 | self.symbol2number[symbol] = number
81 | self.number2symbol[number] = symbol
82 | return True
83 |
84 | def parse_graminit_c(self, filename):
85 | """Parse the .c file written by pgen. (Internal)
86 |
87 | The file looks as follows. The first two lines are always this:
88 |
89 | #include "pgenheaders.h"
90 | #include "grammar.h"
91 |
92 | After that come four blocks:
93 |
94 | 1) one or more state definitions
95 | 2) a table defining dfas
96 | 3) a table defining labels
97 | 4) a struct defining the grammar
98 |
99 | A state definition has the following form:
100 | - one or more arc arrays, each of the form:
101 | static arc arcs__[] = {
102 | {, },
103 | ...
104 | };
105 | - followed by a state array, of the form:
106 | static state states_[] = {
107 | {, arcs__},
108 | ...
109 | };
110 |
111 | """
112 | try:
113 | f = open(filename)
114 | except OSError as err:
115 | print("Can't open %s: %s" % (filename, err))
116 | return False
117 | # The code below essentially uses f's iterator-ness!
118 | lineno = 0
119 |
120 | # Expect the two #include lines
121 | lineno, line = lineno+1, next(f)
122 | assert line == '#include "pgenheaders.h"\n', (lineno, line)
123 | lineno, line = lineno+1, next(f)
124 | assert line == '#include "grammar.h"\n', (lineno, line)
125 |
126 | # Parse the state definitions
127 | lineno, line = lineno+1, next(f)
128 | allarcs = {}
129 | states = []
130 | while line.startswith("static arc "):
131 | while line.startswith("static arc "):
132 | mo = re.match(r"static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$",
133 | line)
134 | assert mo, (lineno, line)
135 | n, m, k = list(map(int, mo.groups()))
136 | arcs = []
137 | for _ in range(k):
138 | lineno, line = lineno+1, next(f)
139 | mo = re.match(r"\s+{(\d+), (\d+)},$", line)
140 | assert mo, (lineno, line)
141 | i, j = list(map(int, mo.groups()))
142 | arcs.append((i, j))
143 | lineno, line = lineno+1, next(f)
144 | assert line == "};\n", (lineno, line)
145 | allarcs[(n, m)] = arcs
146 | lineno, line = lineno+1, next(f)
147 | mo = re.match(r"static state states_(\d+)\[(\d+)\] = {$", line)
148 | assert mo, (lineno, line)
149 | s, t = list(map(int, mo.groups()))
150 | assert s == len(states), (lineno, line)
151 | state = []
152 | for _ in range(t):
153 | lineno, line = lineno+1, next(f)
154 | mo = re.match(r"\s+{(\d+), arcs_(\d+)_(\d+)},$", line)
155 | assert mo, (lineno, line)
156 | k, n, m = list(map(int, mo.groups()))
157 | arcs = allarcs[n, m]
158 | assert k == len(arcs), (lineno, line)
159 | state.append(arcs)
160 | states.append(state)
161 | lineno, line = lineno+1, next(f)
162 | assert line == "};\n", (lineno, line)
163 | lineno, line = lineno+1, next(f)
164 | self.states = states
165 |
166 | # Parse the dfas
167 | dfas = {}
168 | mo = re.match(r"static dfa dfas\[(\d+)\] = {$", line)
169 | assert mo, (lineno, line)
170 | ndfas = int(mo.group(1))
171 | for i in range(ndfas):
172 | lineno, line = lineno+1, next(f)
173 | mo = re.match(r'\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$',
174 | line)
175 | assert mo, (lineno, line)
176 | symbol = mo.group(2)
177 | number, x, y, z = list(map(int, mo.group(1, 3, 4, 5)))
178 | assert self.symbol2number[symbol] == number, (lineno, line)
179 | assert self.number2symbol[number] == symbol, (lineno, line)
180 | assert x == 0, (lineno, line)
181 | state = states[z]
182 | assert y == len(state), (lineno, line)
183 | lineno, line = lineno+1, next(f)
184 | mo = re.match(r'\s+("(?:\\\d\d\d)*")},$', line)
185 | assert mo, (lineno, line)
186 | first = {}
187 | rawbitset = eval(mo.group(1))
188 | for i, c in enumerate(rawbitset):
189 | byte = ord(c)
190 | for j in range(8):
191 | if byte & (1< Python2 > COBOL
97 | Life is Life
98 | call()
99 | call(arg)
100 | -call(kwarg='hey')
101 | -call(arg, kwarg='hey')
102 | -call(arg, another, kwarg='hey', **kwargs)
103 | -call(this_is_a_very_long_variable_which_will_force_a_delimiter_split, arg, another, kwarg='hey', **kwargs) # note: no trailing comma pre-3.6
104 | +call(kwarg="hey")
105 | +call(arg, kwarg="hey")
106 | +call(arg, another, kwarg="hey", **kwargs)
107 | +call(
108 | + this_is_a_very_long_variable_which_will_force_a_delimiter_split,
109 | + arg,
110 | + another,
111 | + kwarg="hey",
112 | + **kwargs
113 | +) # note: no trailing comma pre-3.6
114 | call(*gidgets[:2])
115 | call(a, *gidgets[:2])
116 | call(**self.screen_kwargs)
117 | call(b, **self.screen_kwargs)
118 | lukasz.langa.pl
119 | @@ -93,23 +114,25 @@
120 | 1.0 .real
121 | ....__class__
122 | list[str]
123 | dict[str, int]
124 | tuple[str, ...]
125 | -tuple[str, int, float, dict[str, int],]
126 | +tuple[str, int, float, dict[str, int]]
127 | very_long_variable_name_filters: t.List[
128 | t.Tuple[str, t.Union[str, t.List[t.Optional[str]]]],
129 | ]
130 | xxxx_xxxxx_xxxx_xxx: Callable[..., List[SomeClass]] = classmethod( # type: ignore
131 | sync(async_xxxx_xxx_xxxx_xxxxx_xxxx_xxx.__func__)
132 | )
133 | -xxxx_xxx_xxxx_xxxxx_xxxx_xxx: Callable[..., List[SomeClass]] = classmethod( # type: ignore
134 | - sync(async_xxxx_xxx_xxxx_xxxxx_xxxx_xxx.__func__)
135 | -)
136 | xxxx_xxx_xxxx_xxxxx_xxxx_xxx: Callable[
137 | ..., List[SomeClass]
138 | -] = classmethod(sync(async_xxxx_xxx_xxxx_xxxxx_xxxx_xxx.__func__)) # type: ignore
139 | +] = classmethod( # type: ignore
140 | + sync(async_xxxx_xxx_xxxx_xxxxx_xxxx_xxx.__func__)
141 | +)
142 | +xxxx_xxx_xxxx_xxxxx_xxxx_xxx: Callable[..., List[SomeClass]] = classmethod(
143 | + sync(async_xxxx_xxx_xxxx_xxxxx_xxxx_xxx.__func__)
144 | +) # type: ignore
145 | slice[0]
146 | slice[0:1]
147 | slice[0:1:2]
148 | slice[:]
149 | slice[:-1]
150 | @@ -133,108 +156,160 @@
151 | numpy[-(c + 1) :, d]
152 | numpy[:, l[-2]]
153 | numpy[:, ::-1]
154 | numpy[np.newaxis, :]
155 | (str or None) if (sys.version_info[0] > (3,)) else (str or bytes or None)
156 | -{'2.7': dead, '3.7': long_live or die_hard}
157 | -{'2.7', '3.6', '3.7', '3.8', '3.9', '4.0' if gilectomy else '3.10'}
158 | +{"2.7": dead, "3.7": long_live or die_hard}
159 | +{"2.7", "3.6", "3.7", "3.8", "3.9", "4.0" if gilectomy else "3.10"}
160 | [1, 2, 3, 4, 5, 6, 7, 8, 9, 10 or A, 11 or B, 12 or C]
161 | (SomeName)
162 | SomeName
163 | (Good, Bad, Ugly)
164 | (i for i in (1, 2, 3))
165 | ((i ** 2) for i in (1, 2, 3))
166 | -((i ** 2) for i, _ in ((1, 'a'), (2, 'b'), (3, 'c')))
167 | +((i ** 2) for i, _ in ((1, "a"), (2, "b"), (3, "c")))
168 | (((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3))
169 | (*starred)
170 | -{"id": "1","type": "type","started_at": now(),"ended_at": now() + timedelta(days=10),"priority": 1,"import_session_id": 1,**kwargs}
171 | +{
172 | + "id": "1",
173 | + "type": "type",
174 | + "started_at": now(),
175 | + "ended_at": now() + timedelta(days=10),
176 | + "priority": 1,
177 | + "import_session_id": 1,
178 | + **kwargs,
179 | +}
180 | a = (1,)
181 | -b = 1,
182 | +b = (1,)
183 | c = 1
184 | d = (1,) + a + (2,)
185 | e = (1,).count(1)
186 | f = 1, *range(10)
187 | -what_is_up_with_those_new_coord_names = (coord_names + set(vars_to_create)) + set(vars_to_remove)
188 | -what_is_up_with_those_new_coord_names = (coord_names | set(vars_to_create)) - set(vars_to_remove)
189 | -result = session.query(models.Customer.id).filter(models.Customer.account_id == account_id, models.Customer.email == email_address).order_by(models.Customer.id.asc(),).all()
190 | +what_is_up_with_those_new_coord_names = (coord_names + set(vars_to_create)) + set(
191 | + vars_to_remove
192 | +)
193 | +what_is_up_with_those_new_coord_names = (coord_names | set(vars_to_create)) - set(
194 | + vars_to_remove
195 | +)
196 | +result = (
197 | + session.query(models.Customer.id)
198 | + .filter(
199 | + models.Customer.account_id == account_id, models.Customer.email == email_address
200 | + )
201 | + .order_by(models.Customer.id.asc())
202 | + .all()
203 | +)
204 | Ø = set()
205 | authors.łukasz.say_thanks()
206 | mapping = {
207 | A: 0.25 * (10.0 / 12),
208 | B: 0.1 * (10.0 / 12),
209 | C: 0.1 * (10.0 / 12),
210 | D: 0.1 * (10.0 / 12),
211 | }
212 |
213 | +
214 | def gen():
215 | yield from outside_of_generator
216 | a = (yield)
217 |
218 | +
219 | async def f():
220 | await some.complicated[0].call(with_args=(True or (1 is not 1)))
221 | -print(* [] or [1])
222 | +
223 | +
224 | +print(*[] or [1])
225 | print(**{1: 3} if False else {x: x for x in range(3)})
226 | -print(* lambda x: x)
227 | -assert(not Test),("Short message")
228 | -assert this is ComplexTest and not requirements.fit_in_a_single_line(force=False), "Short message"
229 | -assert(((parens is TooMany)))
230 | -for x, in (1,), (2,), (3,): ...
231 | -for y in (): ...
232 | -for z in (i for i in (1, 2, 3)): ...
233 | -for i in (call()): ...
234 | -for j in (1 + (2 + 3)): ...
235 | -while(this and that): ...
236 | -a = aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp in qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz
237 | -a = aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp not in qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz
238 | -a = aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp is qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz
239 | -a = aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp is not qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz
240 | -if (
241 | - threading.current_thread() != threading.main_thread() and
242 | - threading.current_thread() != threading.main_thread() or
243 | - signal.getsignal(signal.SIGINT) != signal.default_int_handler
244 | -):
245 | - return True
246 | -if (
247 | - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa |
248 | - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
249 | -):
250 | - return True
251 | -if (
252 | - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa &
253 | - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
254 | -):
255 | - return True
256 | -if (
257 | - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +
258 | - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
259 | -):
260 | - return True
261 | -if (
262 | - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa -
263 | - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
264 | -):
265 | - return True
266 | -if (
267 | - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa *
268 | - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
269 | -):
270 | - return True
271 | -if (
272 | - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa /
273 | - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
274 | -):
275 | - return True
276 | -if (
277 | - ~ aaaa.a + aaaa.b - aaaa.c * aaaa.d / aaaa.e | aaaa.f & aaaa.g % aaaa.h ^ aaaa.i << aaaa.k >> aaaa.l ** aaaa.m // aaaa.n
278 | -):
279 | - return True
280 | -if (
281 | - ~ aaaaaaaa.a + aaaaaaaa.b - aaaaaaaa.c @ aaaaaaaa.d / aaaaaaaa.e | aaaaaaaa.f & aaaaaaaa.g % aaaaaaaa.h ^ aaaaaaaa.i << aaaaaaaa.k >> aaaaaaaa.l ** aaaaaaaa.m // aaaaaaaa.n
282 | -):
283 | - return True
284 | -if (
285 | - ~ aaaaaaaaaaaaaaaa.a + aaaaaaaaaaaaaaaa.b - aaaaaaaaaaaaaaaa.c * aaaaaaaaaaaaaaaa.d @ aaaaaaaaaaaaaaaa.e | aaaaaaaaaaaaaaaa.f & aaaaaaaaaaaaaaaa.g % aaaaaaaaaaaaaaaa.h ^ aaaaaaaaaaaaaaaa.i << aaaaaaaaaaaaaaaa.k >> aaaaaaaaaaaaaaaa.l ** aaaaaaaaaaaaaaaa.m // aaaaaaaaaaaaaaaa.n
286 | +print(*lambda x: x)
287 | +assert not Test, "Short message"
288 | +assert this is ComplexTest and not requirements.fit_in_a_single_line(
289 | + force=False
290 | +), "Short message"
291 | +assert parens is TooMany
292 | +for (x,) in (1,), (2,), (3,):
293 | + ...
294 | +for y in ():
295 | + ...
296 | +for z in (i for i in (1, 2, 3)):
297 | + ...
298 | +for i in call():
299 | + ...
300 | +for j in 1 + (2 + 3):
301 | + ...
302 | +while this and that:
303 | + ...
304 | +a = (
305 | + aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp
306 | + in qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz
307 | +)
308 | +a = (
309 | + aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp
310 | + not in qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz
311 | +)
312 | +a = (
313 | + aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp
314 | + is qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz
315 | +)
316 | +a = (
317 | + aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp
318 | + is not qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz
319 | +)
320 | +if (
321 | + threading.current_thread() != threading.main_thread()
322 | + and threading.current_thread() != threading.main_thread()
323 | + or signal.getsignal(signal.SIGINT) != signal.default_int_handler
324 | +):
325 | + return True
326 | +if (
327 | + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
328 | + | aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
329 | +):
330 | + return True
331 | +if (
332 | + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
333 | + & aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
334 | +):
335 | + return True
336 | +if (
337 | + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
338 | + + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
339 | +):
340 | + return True
341 | +if (
342 | + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
343 | + - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
344 | +):
345 | + return True
346 | +if (
347 | + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
348 | + * aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
349 | +):
350 | + return True
351 | +if (
352 | + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
353 | + / aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
354 | +):
355 | + return True
356 | +if (
357 | + ~aaaa.a + aaaa.b - aaaa.c * aaaa.d / aaaa.e
358 | + | aaaa.f & aaaa.g % aaaa.h ^ aaaa.i << aaaa.k >> aaaa.l ** aaaa.m // aaaa.n
359 | +):
360 | + return True
361 | +if (
362 | + ~aaaaaaaa.a + aaaaaaaa.b - aaaaaaaa.c @ aaaaaaaa.d / aaaaaaaa.e
363 | + | aaaaaaaa.f & aaaaaaaa.g % aaaaaaaa.h
364 | + ^ aaaaaaaa.i << aaaaaaaa.k >> aaaaaaaa.l ** aaaaaaaa.m // aaaaaaaa.n
365 | +):
366 | + return True
367 | +if (
368 | + ~aaaaaaaaaaaaaaaa.a
369 | + + aaaaaaaaaaaaaaaa.b
370 | + - aaaaaaaaaaaaaaaa.c * aaaaaaaaaaaaaaaa.d @ aaaaaaaaaaaaaaaa.e
371 | + | aaaaaaaaaaaaaaaa.f & aaaaaaaaaaaaaaaa.g % aaaaaaaaaaaaaaaa.h
372 | + ^ aaaaaaaaaaaaaaaa.i
373 | + << aaaaaaaaaaaaaaaa.k
374 | + >> aaaaaaaaaaaaaaaa.l ** aaaaaaaaaaaaaaaa.m // aaaaaaaaaaaaaaaa.n
375 | ):
376 | return True
377 | last_call()
378 | # standalone comment at ENDMARKER
379 |
--------------------------------------------------------------------------------
/blib2to3/pgen2/pgen.py:
--------------------------------------------------------------------------------
1 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
2 | # Licensed to PSF under a Contributor Agreement.
3 |
4 | # Pgen imports
5 | from . import grammar, token, tokenize
6 |
7 | class PgenGrammar(grammar.Grammar):
8 | pass
9 |
10 | class ParserGenerator(object):
11 |
12 | def __init__(self, filename, stream=None):
13 | close_stream = None
14 | if stream is None:
15 | stream = open(filename)
16 | close_stream = stream.close
17 | self.filename = filename
18 | self.stream = stream
19 | self.generator = tokenize.generate_tokens(stream.readline)
20 | self.gettoken() # Initialize lookahead
21 | self.dfas, self.startsymbol = self.parse()
22 | if close_stream is not None:
23 | close_stream()
24 | self.first = {} # map from symbol name to set of tokens
25 | self.addfirstsets()
26 |
27 | def make_grammar(self):
28 | c = PgenGrammar()
29 | names = list(self.dfas.keys())
30 | names.sort()
31 | names.remove(self.startsymbol)
32 | names.insert(0, self.startsymbol)
33 | for name in names:
34 | i = 256 + len(c.symbol2number)
35 | c.symbol2number[name] = i
36 | c.number2symbol[i] = name
37 | for name in names:
38 | dfa = self.dfas[name]
39 | states = []
40 | for state in dfa:
41 | arcs = []
42 | for label, next in sorted(state.arcs.items()):
43 | arcs.append((self.make_label(c, label), dfa.index(next)))
44 | if state.isfinal:
45 | arcs.append((0, dfa.index(state)))
46 | states.append(arcs)
47 | c.states.append(states)
48 | c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name))
49 | c.start = c.symbol2number[self.startsymbol]
50 | return c
51 |
52 | def make_first(self, c, name):
53 | rawfirst = self.first[name]
54 | first = {}
55 | for label in sorted(rawfirst):
56 | ilabel = self.make_label(c, label)
57 | ##assert ilabel not in first # XXX failed on <> ... !=
58 | first[ilabel] = 1
59 | return first
60 |
61 | def make_label(self, c, label):
62 | # XXX Maybe this should be a method on a subclass of converter?
63 | ilabel = len(c.labels)
64 | if label[0].isalpha():
65 | # Either a symbol name or a named token
66 | if label in c.symbol2number:
67 | # A symbol name (a non-terminal)
68 | if label in c.symbol2label:
69 | return c.symbol2label[label]
70 | else:
71 | c.labels.append((c.symbol2number[label], None))
72 | c.symbol2label[label] = ilabel
73 | return ilabel
74 | else:
75 | # A named token (NAME, NUMBER, STRING)
76 | itoken = getattr(token, label, None)
77 | assert isinstance(itoken, int), label
78 | assert itoken in token.tok_name, label
79 | if itoken in c.tokens:
80 | return c.tokens[itoken]
81 | else:
82 | c.labels.append((itoken, None))
83 | c.tokens[itoken] = ilabel
84 | return ilabel
85 | else:
86 | # Either a keyword or an operator
87 | assert label[0] in ('"', "'"), label
88 | value = eval(label)
89 | if value[0].isalpha():
90 | # A keyword
91 | if value in c.keywords:
92 | return c.keywords[value]
93 | else:
94 | c.labels.append((token.NAME, value))
95 | c.keywords[value] = ilabel
96 | return ilabel
97 | else:
98 | # An operator (any non-numeric token)
99 | itoken = grammar.opmap[value] # Fails if unknown token
100 | if itoken in c.tokens:
101 | return c.tokens[itoken]
102 | else:
103 | c.labels.append((itoken, None))
104 | c.tokens[itoken] = ilabel
105 | return ilabel
106 |
107 | def addfirstsets(self):
108 | names = list(self.dfas.keys())
109 | names.sort()
110 | for name in names:
111 | if name not in self.first:
112 | self.calcfirst(name)
113 | #print name, self.first[name].keys()
114 |
115 | def calcfirst(self, name):
116 | dfa = self.dfas[name]
117 | self.first[name] = None # dummy to detect left recursion
118 | state = dfa[0]
119 | totalset = {}
120 | overlapcheck = {}
121 | for label, next in state.arcs.items():
122 | if label in self.dfas:
123 | if label in self.first:
124 | fset = self.first[label]
125 | if fset is None:
126 | raise ValueError("recursion for rule %r" % name)
127 | else:
128 | self.calcfirst(label)
129 | fset = self.first[label]
130 | totalset.update(fset)
131 | overlapcheck[label] = fset
132 | else:
133 | totalset[label] = 1
134 | overlapcheck[label] = {label: 1}
135 | inverse = {}
136 | for label, itsfirst in overlapcheck.items():
137 | for symbol in itsfirst:
138 | if symbol in inverse:
139 | raise ValueError("rule %s is ambiguous; %s is in the"
140 | " first sets of %s as well as %s" %
141 | (name, symbol, label, inverse[symbol]))
142 | inverse[symbol] = label
143 | self.first[name] = totalset
144 |
145 | def parse(self):
146 | dfas = {}
147 | startsymbol = None
148 | # MSTART: (NEWLINE | RULE)* ENDMARKER
149 | while self.type != token.ENDMARKER:
150 | while self.type == token.NEWLINE:
151 | self.gettoken()
152 | # RULE: NAME ':' RHS NEWLINE
153 | name = self.expect(token.NAME)
154 | self.expect(token.OP, ":")
155 | a, z = self.parse_rhs()
156 | self.expect(token.NEWLINE)
157 | #self.dump_nfa(name, a, z)
158 | dfa = self.make_dfa(a, z)
159 | #self.dump_dfa(name, dfa)
160 | oldlen = len(dfa)
161 | self.simplify_dfa(dfa)
162 | newlen = len(dfa)
163 | dfas[name] = dfa
164 | #print name, oldlen, newlen
165 | if startsymbol is None:
166 | startsymbol = name
167 | return dfas, startsymbol
168 |
169 | def make_dfa(self, start, finish):
170 | # To turn an NFA into a DFA, we define the states of the DFA
171 | # to correspond to *sets* of states of the NFA. Then do some
172 | # state reduction. Let's represent sets as dicts with 1 for
173 | # values.
174 | assert isinstance(start, NFAState)
175 | assert isinstance(finish, NFAState)
176 | def closure(state):
177 | base = {}
178 | addclosure(state, base)
179 | return base
180 | def addclosure(state, base):
181 | assert isinstance(state, NFAState)
182 | if state in base:
183 | return
184 | base[state] = 1
185 | for label, next in state.arcs:
186 | if label is None:
187 | addclosure(next, base)
188 | states = [DFAState(closure(start), finish)]
189 | for state in states: # NB states grows while we're iterating
190 | arcs = {}
191 | for nfastate in state.nfaset:
192 | for label, next in nfastate.arcs:
193 | if label is not None:
194 | addclosure(next, arcs.setdefault(label, {}))
195 | for label, nfaset in sorted(arcs.items()):
196 | for st in states:
197 | if st.nfaset == nfaset:
198 | break
199 | else:
200 | st = DFAState(nfaset, finish)
201 | states.append(st)
202 | state.addarc(st, label)
203 | return states # List of DFAState instances; first one is start
204 |
205 | def dump_nfa(self, name, start, finish):
206 | print("Dump of NFA for", name)
207 | todo = [start]
208 | for i, state in enumerate(todo):
209 | print(" State", i, state is finish and "(final)" or "")
210 | for label, next in state.arcs:
211 | if next in todo:
212 | j = todo.index(next)
213 | else:
214 | j = len(todo)
215 | todo.append(next)
216 | if label is None:
217 | print(" -> %d" % j)
218 | else:
219 | print(" %s -> %d" % (label, j))
220 |
221 | def dump_dfa(self, name, dfa):
222 | print("Dump of DFA for", name)
223 | for i, state in enumerate(dfa):
224 | print(" State", i, state.isfinal and "(final)" or "")
225 | for label, next in sorted(state.arcs.items()):
226 | print(" %s -> %d" % (label, dfa.index(next)))
227 |
228 | def simplify_dfa(self, dfa):
229 | # This is not theoretically optimal, but works well enough.
230 | # Algorithm: repeatedly look for two states that have the same
231 | # set of arcs (same labels pointing to the same nodes) and
232 | # unify them, until things stop changing.
233 |
234 | # dfa is a list of DFAState instances
235 | changes = True
236 | while changes:
237 | changes = False
238 | for i, state_i in enumerate(dfa):
239 | for j in range(i+1, len(dfa)):
240 | state_j = dfa[j]
241 | if state_i == state_j:
242 | #print " unify", i, j
243 | del dfa[j]
244 | for state in dfa:
245 | state.unifystate(state_j, state_i)
246 | changes = True
247 | break
248 |
249 | def parse_rhs(self):
250 | # RHS: ALT ('|' ALT)*
251 | a, z = self.parse_alt()
252 | if self.value != "|":
253 | return a, z
254 | else:
255 | aa = NFAState()
256 | zz = NFAState()
257 | aa.addarc(a)
258 | z.addarc(zz)
259 | while self.value == "|":
260 | self.gettoken()
261 | a, z = self.parse_alt()
262 | aa.addarc(a)
263 | z.addarc(zz)
264 | return aa, zz
265 |
266 | def parse_alt(self):
267 | # ALT: ITEM+
268 | a, b = self.parse_item()
269 | while (self.value in ("(", "[") or
270 | self.type in (token.NAME, token.STRING)):
271 | c, d = self.parse_item()
272 | b.addarc(c)
273 | b = d
274 | return a, b
275 |
276 | def parse_item(self):
277 | # ITEM: '[' RHS ']' | ATOM ['+' | '*']
278 | if self.value == "[":
279 | self.gettoken()
280 | a, z = self.parse_rhs()
281 | self.expect(token.OP, "]")
282 | a.addarc(z)
283 | return a, z
284 | else:
285 | a, z = self.parse_atom()
286 | value = self.value
287 | if value not in ("+", "*"):
288 | return a, z
289 | self.gettoken()
290 | z.addarc(a)
291 | if value == "+":
292 | return a, z
293 | else:
294 | return a, a
295 |
296 | def parse_atom(self):
297 | # ATOM: '(' RHS ')' | NAME | STRING
298 | if self.value == "(":
299 | self.gettoken()
300 | a, z = self.parse_rhs()
301 | self.expect(token.OP, ")")
302 | return a, z
303 | elif self.type in (token.NAME, token.STRING):
304 | a = NFAState()
305 | z = NFAState()
306 | a.addarc(z, self.value)
307 | self.gettoken()
308 | return a, z
309 | else:
310 | self.raise_error("expected (...) or NAME or STRING, got %s/%s",
311 | self.type, self.value)
312 |
313 | def expect(self, type, value=None):
314 | if self.type != type or (value is not None and self.value != value):
315 | self.raise_error("expected %s/%s, got %s/%s",
316 | type, value, self.type, self.value)
317 | value = self.value
318 | self.gettoken()
319 | return value
320 |
321 | def gettoken(self):
322 | tup = next(self.generator)
323 | while tup[0] in (tokenize.COMMENT, tokenize.NL):
324 | tup = next(self.generator)
325 | self.type, self.value, self.begin, self.end, self.line = tup
326 | #print token.tok_name[self.type], repr(self.value)
327 |
328 | def raise_error(self, msg, *args):
329 | if args:
330 | try:
331 | msg = msg % args
332 | except:
333 | msg = " ".join([msg] + list(map(str, args)))
334 | raise SyntaxError(msg, (self.filename, self.end[0],
335 | self.end[1], self.line))
336 |
337 | class NFAState(object):
338 |
339 | def __init__(self):
340 | self.arcs = [] # list of (label, NFAState) pairs
341 |
342 | def addarc(self, next, label=None):
343 | assert label is None or isinstance(label, str)
344 | assert isinstance(next, NFAState)
345 | self.arcs.append((label, next))
346 |
347 | class DFAState(object):
348 |
349 | def __init__(self, nfaset, final):
350 | assert isinstance(nfaset, dict)
351 | assert isinstance(next(iter(nfaset)), NFAState)
352 | assert isinstance(final, NFAState)
353 | self.nfaset = nfaset
354 | self.isfinal = final in nfaset
355 | self.arcs = {} # map from label to DFAState
356 |
357 | def addarc(self, next, label):
358 | assert isinstance(label, str)
359 | assert label not in self.arcs
360 | assert isinstance(next, DFAState)
361 | self.arcs[label] = next
362 |
363 | def unifystate(self, old, new):
364 | for label, next in self.arcs.items():
365 | if next is old:
366 | self.arcs[label] = new
367 |
368 | def __eq__(self, other):
369 | # Equality test -- ignore the nfaset instance variable
370 | assert isinstance(other, DFAState)
371 | if self.isfinal != other.isfinal:
372 | return False
373 | # Can't just return self.arcs == other.arcs, because that
374 | # would invoke this method recursively, with cycles...
375 | if len(self.arcs) != len(other.arcs):
376 | return False
377 | for label, next in self.arcs.items():
378 | if next is not other.arcs.get(label):
379 | return False
380 | return True
381 |
382 | __hash__ = None # For Py3 compatibility.
383 |
384 | def generate_grammar(filename="Grammar.txt"):
385 | p = ParserGenerator(filename)
386 | return p.make_grammar()
387 |
--------------------------------------------------------------------------------