├── velin ├── __main__.py ├── examples_section_utils.py ├── __init__.py └── ref.py ├── .gitignore ├── example.rst ├── .pre-commit-hooks.yaml ├── .flake8 ├── examples ├── example-0.rst └── example-0.expected ├── .pre-commit-config.yaml ├── LICENSE ├── tests └── test_examples.py ├── pyproject.toml └── Readme.md /velin/__main__.py: -------------------------------------------------------------------------------- 1 | from velin.ref import main 2 | 3 | main() 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .pytest_cache/ 2 | *.ipynb 3 | dist 4 | 5 | __pycache__ 6 | -------------------------------------------------------------------------------- /example.rst: -------------------------------------------------------------------------------- 1 | This is an example text, that I'll try to reformat. It should be relatively long to make sure I can deal with long lines. 2 | 3 | Here is a second line, or paragraph, not sure 4 | With some second lines. 5 | 6 | Here is some indented stuff 7 | 8 | And back to a third 9 | -------------------------------------------------------------------------------- /.pre-commit-hooks.yaml: -------------------------------------------------------------------------------- 1 | - id: velin 2 | name: Velin 3 | description: This hook attempt to reformat docstrings using numpydoc format. 4 | entry: velin 5 | language: python 6 | language_version: python3 7 | types: [text] 8 | types_or: [python] 9 | args: ["--write", "--no-fixers"] 10 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | ignore = 3 | # E203: whitespace before ':' - doesn't work well with black 4 | # E402: module level import not at top of file 5 | # E501: line too long - let black worry about that 6 | # E731: do not assign a lambda expression, use a def 7 | # W503: line break before binary operator 8 | E203, E402, E501, E731, W503 9 | exclude = 10 | .eggs 11 | doc 12 | builtins = 13 | ellipsis 14 | -------------------------------------------------------------------------------- /examples/example-0.rst: -------------------------------------------------------------------------------- 1 | Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum 2 | -------------------------------------------------------------------------------- /examples/example-0.expected: -------------------------------------------------------------------------------- 1 | Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor 2 | incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis 3 | nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. 4 | Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu 5 | fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in 6 | culpa qui officia deserunt mollit anim id est laborum 7 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | ci: 2 | autoupdate_schedule: weekly 3 | 4 | # https://pre-commit.com/ 5 | repos: 6 | - repo: https://github.com/pre-commit/pre-commit-hooks 7 | rev: v4.6.0 8 | hooks: 9 | - id: trailing-whitespace 10 | - id: end-of-file-fixer 11 | - id: check-docstring-first 12 | - id: check-yaml 13 | - id: check-toml 14 | - id: mixed-line-ending 15 | - repo: https://github.com/astral-sh/ruff-pre-commit 16 | rev: v0.6.3 17 | hooks: 18 | - id: ruff 19 | args: [--fix] 20 | - repo: https://github.com/psf/black-pre-commit-mirror 21 | rev: 24.8.0 22 | hooks: 23 | - id: black 24 | - repo: https://github.com/keewis/blackdoc 25 | rev: v0.3.9 26 | hooks: 27 | - id: blackdoc 28 | additional_dependencies: ["black==24.8.0"] 29 | - id: blackdoc-autoupdate-black 30 | - repo: https://github.com/rbubley/mirrors-prettier 31 | rev: v3.3.3 32 | hooks: 33 | - id: prettier 34 | args: [--cache-location=.prettier_cache/cache] 35 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2020 Matthias Bussonnier 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /tests/test_examples.py: -------------------------------------------------------------------------------- 1 | import glob 2 | 3 | import pytest 4 | 5 | from velin import compute_indents, reformat 6 | 7 | test_files = glob.glob("examples/*.rst") 8 | 9 | expected = [f[:-3] + "expected" for f in test_files] 10 | 11 | 12 | @pytest.mark.parametrize("test_input,expected", zip(test_files, expected)) 13 | def test_reformat_1(test_input, expected): 14 | with open(test_input) as f: 15 | inp = f.read() 16 | with open(expected) as f: 17 | exp = f.read() 18 | assert reformat(inp) == exp.strip("\n") 19 | 20 | 21 | @pytest.mark.parametrize( 22 | "test_input,expected", 23 | [ 24 | ( 25 | """this 26 | is an 27 | example 28 | """, 29 | [0, 0, 0], 30 | ), 31 | ( 32 | """this 33 | 34 | example 35 | """, 36 | [0, None, 0], 37 | ), 38 | ( 39 | """ this 40 | 41 | example 42 | """, 43 | [1, None, 0], 44 | ), 45 | ( 46 | """ this 47 | 48 | example 49 | """, 50 | [1, None, 2], 51 | ), 52 | ], 53 | ) 54 | def test_blocks(test_input, expected): 55 | assert compute_indents(test_input.splitlines()) == expected 56 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["flit_core >=3.2,<4"] 3 | build-backend = "flit_core.buildapi" 4 | 5 | [project] 6 | name = "velin" 7 | authors = [{name = "Matthias Bussonnier", email = "bussonniermatthias@gmail.com"}] 8 | classifiers = ["License :: OSI Approved :: MIT License"] 9 | readme = "Readme.md" 10 | license = {file = "LICENSE"} 11 | dynamic = ["version","description"] 12 | requires-python=">=3.8" 13 | dependencies=[ 14 | "numpydoc", 15 | "pygments", 16 | "black", 17 | "there" 18 | ] 19 | 20 | [project.scripts] 21 | velin = "velin:main" 22 | 23 | [project.urls] 24 | Home = "https://github.com/Carreau/velin" 25 | 26 | [tool.ruff] 27 | builtins = ["ellipsis"] 28 | exclude = [ 29 | ".git", 30 | ".eggs", 31 | "build", 32 | "dist", 33 | "__pycache__", 34 | "docs", 35 | ] 36 | target-version = "py310" 37 | 38 | extend-include = ["*.ipynb"] 39 | line-length = 100 40 | 41 | [tool.ruff.lint] 42 | select = [ 43 | "F", # Pyflakes 44 | "E", # Pycodestyle 45 | "I", # isort 46 | "UP", # Pyupgrade 47 | "TID", # tidy imports 48 | ] 49 | ignore = [ 50 | "E402", # E402: module level import not at top of file 51 | "E501", # E501: line too long - let black worry about that 52 | "E731", # E731: do not assign a lambda expression, use a def 53 | ] 54 | fixable = ["I", "TID"] 55 | extend-safe-fixes = [ 56 | "TID252", # absolute imports 57 | ] 58 | 59 | 60 | [tool.ruff.lint.isort] 61 | known-first-party = ["velin"] 62 | known-third-party = [] 63 | 64 | [tool.ruff.lint.flake8-tidy-imports] 65 | ban-relative-imports = "all" 66 | -------------------------------------------------------------------------------- /velin/examples_section_utils.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | from itertools import chain, cycle 3 | 4 | import black 5 | 6 | 7 | def reformat(lines, indent=4): 8 | text = "\n".join(lines) 9 | if "doctest:" in text: 10 | return lines 11 | try: 12 | mode = black.FileMode() 13 | mode.line_length -= indent + 4 14 | return black.format_str(text, mode=black.FileMode()).splitlines() 15 | except Exception as e: 16 | raise ValueError("could not reformat:" + repr(text)) from e 17 | 18 | 19 | def insert_promt(lines): 20 | new = [] 21 | for p, line in zip(chain([">>> "], cycle(["... "])), lines): 22 | new.append(p + line) 23 | return new 24 | 25 | 26 | def splitblank(list): 27 | items = [] 28 | current = [] 29 | for line in list: 30 | if not line.strip(): 31 | if current: 32 | items.append(current) 33 | current = [] 34 | else: 35 | current.append(line) 36 | if current: 37 | items.append(current) 38 | return items 39 | 40 | 41 | InOut = namedtuple("InOut", ["in_", "out"]) 42 | Text = namedtuple("Text", ["in_", "out"]) 43 | 44 | 45 | def InOutText(a, b): 46 | if not a: 47 | return Text(a, b) 48 | else: 49 | return InOut(a, b) 50 | 51 | 52 | def splitcode(lines): 53 | """ 54 | Split a block of lines without blank lines into categories. 55 | 56 | Code lines start with >>> or ..., 57 | then outputs, start with none of the two above. 58 | 59 | """ 60 | items = [] 61 | in_ = [] 62 | out = [] 63 | if not lines[0].startswith(">>>"): 64 | return [InOutText([], lines)] 65 | 66 | state = "notcode" 67 | for i, line in enumerate(lines): 68 | if line.startswith(">>> ") and state == "notcode": 69 | state = "code" 70 | if in_ or out: 71 | items.append(InOutText(in_, out)) 72 | in_, out = [], [] 73 | 74 | in_.append(line[4:]) 75 | # ... can appear in pandas output. 76 | elif (line.startswith("... ") or line.startswith(">>> ")) and state == "code": 77 | in_.append(line[4:]) 78 | else: 79 | state = "notcode" 80 | out.append(line) 81 | if in_ or out: 82 | items.append(InOutText(in_, out)) 83 | return items 84 | 85 | 86 | def reformat_example_lines(ex, indent=4): 87 | from there import print 88 | 89 | oo = [] 90 | # print(ex) 91 | try: 92 | blocks = splitblank(ex) 93 | for block in blocks: 94 | # print(block) 95 | codes = splitcode(block) 96 | for in_, out in codes: 97 | oo.extend(insert_promt(reformat(in_, indent=4))) 98 | if out: 99 | oo.extend(out) 100 | oo.append("") 101 | return oo[:-1] 102 | except Exception: 103 | print(block) 104 | 105 | raise 106 | -------------------------------------------------------------------------------- /Readme.md: -------------------------------------------------------------------------------- 1 | # Vélin 2 | 3 | French for Vellum 4 | 5 | > Vellum is prepared animal skin or "membrane", typically used as a material for writing on. Parchment is another term 6 | > for this material, and if vellum is distinguished from this, it is by vellum being made from calfskin, as opposed to 7 | > that from other animals,[1] or otherwise being of higher quality 8 | 9 | ## install 10 | 11 | You may need to get a modified version of numpydoc depending on the stage of development. 12 | 13 | ``` 14 | $ git clone https://github.com/Carreau/velin 15 | $ cd velin 16 | $ pip install -e . 17 | ``` 18 | 19 | (You will need a quite recent pip and flit to do so) 20 | 21 | ## Autoreformat docstrings 22 | 23 | This assume your docstrings are in RST/Numpydoc format, and will try to fix 24 | common formatting mistakes and typo. 25 | 26 | ``` 27 | velin [--write] or 28 | ``` 29 | 30 | Without `--write` vélin will print the suggested diff, with `--write` it will _attempt_ to update the files. 31 | 32 | ## options 33 | 34 | (likely not up to date, make sure to run `velin --help` to check for new,changed 35 | or removed options) 36 | 37 | ``` 38 | $ velin --help 39 | usage: velin [-h] [--context context] [--unsafe] [--check] [--no-diff] [--black] [--with-placeholder] [--no-color] [--compact] [--no-fail] 40 | [--space-in-see-also-title] [--space-in-notes-title] [--no-fixers] [--write] 41 | path [path ...] 42 | 43 | reformat the docstrigns of some file 44 | 45 | positional arguments: 46 | path Files or folder to reformat 47 | 48 | optional arguments: 49 | -h, --help show this help message and exit 50 | --context context Number of context lines in the diff 51 | --unsafe Lift some safety feature (don't fail if updating the docstring is not indempotent 52 | --check Print the list of files/lines number and exit with a non-0 exit status, Use it for CI. 53 | --no-diff Do not print the diff 54 | --black Do not run black on examples 55 | --with-placeholder insert missing sections/parameters placehoders 56 | --no-color 57 | --compact Please ignore 58 | --no-fail 59 | --space-in-see-also-title 60 | --space-in-notes-title 61 | --no-fixers try to only reformat and does not run fixers heuristics 62 | --write Try to write the updated docstring to the files 63 | ``` 64 | 65 | ## --no-fixers 66 | 67 | Beyond reformatting, vélin will by default try to run a number of heuristics to update your docstrings: 68 | 69 | - Remove non existing but documented parameters, 70 | - Rename parameter with typos, 71 | - insert space before colon when necessary. 72 | 73 | Unfortunately sometime those heuristics can remove actual content, for example in the malformed DocString below, the 74 | Return section would be removed 75 | 76 | ``` 77 | def sum(a, b): 78 | """ 79 | Parameters 80 | ---------- 81 | a : int 82 | a number 83 | b : int 84 | another number 85 | Returns 86 | ------- 87 | s : 88 | sum of a and b 89 | 90 | See Also 91 | -------- 92 | substract 93 | """ 94 | return a + b 95 | 96 | ``` 97 | 98 | As there is a missing blank line before return Numpydoc will parse this a 5 99 | parameters, `a`, `b`, `Returns`, `-------` and `s`. As only `a` and `b` are in 100 | the signature, it will remove the other. 101 | 102 | While in this case it will try not to do that because we detect that `------` is 103 | likely an underline, there are other case where it's unclear what to do. 104 | 105 | You can thus disable those fixers by passing the option `--no-fixers` 106 | 107 | ## setup.cfg 108 | 109 | Ignore files with ignore_patterns, `filename` or `filename:qualified_name`. 110 | You can (try to), put patterns in there, but it's not guarantied to work yet. 111 | 112 | ``` 113 | [velin] 114 | ignore_patterns = 115 | path/to/a.py:Class.func 116 | path/to/b.py:Class.func 117 | ``` 118 | 119 | ## kind of things it fixes 120 | 121 | - Spacing around colon, 122 | - If one parameter has typo wrt function signature: fix it. 123 | - Insert all missing parameters with placeholders. 124 | -------------------------------------------------------------------------------- /velin/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | French for Vellum 3 | 4 | > Vellum is prepared animal skin or "membrane", typically used as a material for 5 | > writing on. Parchment is another term > for this material, and if vellum is 6 | > distinguished from this, it is by vellum being made from calfskin, as opposed to 7 | > that from other animals,[1] or otherwise being of higher quality 8 | 9 | Tools to automatically reformat docstrings based using numpydoc format. 10 | 11 | """ 12 | 13 | import textwrap 14 | 15 | from there import print 16 | 17 | from velin.ref import NumpyDocString, main 18 | 19 | __version__ = "0.0.12" 20 | 21 | 22 | def parse(input): 23 | """ 24 | parse an input string into token/tree. 25 | 26 | For now only return a list of tokens 27 | 28 | """ 29 | tokens = [] 30 | for line in input.splitlines(): 31 | tokens.extend(line.split(" ")) 32 | return tokens 33 | 34 | 35 | def transform(tokens): 36 | """ 37 | Accumulate tokens in lines. 38 | 39 | Add token (and white spaces) to a line until it overflow 80 chars. 40 | """ 41 | lines = [] 42 | current_line = [] 43 | for t in tokens: 44 | if sum([len(x) + 1 for x in current_line]) + len(t) > 80: 45 | lines.append(current_line) 46 | current_line = [] 47 | current_line.append(t) 48 | if current_line: 49 | lines.append(current_line) 50 | return lines 51 | 52 | 53 | def format(lines): 54 | s = "" 55 | for line in lines: 56 | s = s + " ".join(line) 57 | s = s + "\n" 58 | return s[:-1] 59 | 60 | 61 | def compute_indents(lines): 62 | """ 63 | Given a list of lines, compute indentation in number of spaces. 64 | 65 | Indentation is only supported if spaces, tabs raise a NotImplementedError as we don't know how wide a tab is. 66 | We also treat complete blank lines as `None` indentation. 67 | 68 | """ 69 | assert isinstance(lines, list) 70 | results = [] 71 | for line in lines: 72 | s = line.lstrip() 73 | if not s: 74 | results.append(None) 75 | continue 76 | indent = len(line) - len(s) 77 | if "\t" in line[:indent]: 78 | raise NotImplementedError 79 | 80 | results.append(indent) 81 | return results 82 | 83 | 84 | class TryNext(Exception): 85 | pass 86 | 87 | 88 | class Header: 89 | def __init__(self, title, level): 90 | self.title = title 91 | # if str(title) not in NumpyDocString.sections.keys(): 92 | # print(f'??? |{title}|') 93 | self.level = level 94 | 95 | def __repr__(self): 96 | return f"
" 97 | 98 | def __str__(self): 99 | tt = str(self.title) 100 | return tt + "\n" + "=" * len(tt) 101 | 102 | def _repr_html_(self): 103 | return f"" + str(self.title) + f"" 104 | 105 | @classmethod 106 | def parse(cls, lines): 107 | if len(lines) < 2: 108 | raise TryNext 109 | assert lines 110 | warnings = [] 111 | title, _, wn = Raw.parse(lines) 112 | lgth = len(title.lines[0]) 113 | if ( 114 | len(set(lines[1])) == 1 115 | and len(lines[1]) != 1 116 | and len(lines[1]) != lgth 117 | and ">>>" not in lines[0] 118 | and "::" not in lines[1] 119 | ): 120 | 121 | warnings.append("======= WRONG LEN? ======") 122 | warnings.append("L0: " + lines[0]) 123 | warnings.append("L1: " + lines[1]) 124 | warnings.append("=========================") 125 | level = allunders(lines[1], lgth) 126 | return cls(title, level), lines[2:], wn 127 | 128 | 129 | def allunders(line, lenght): 130 | 131 | if not len(set(line)) == 1 or not len(line) == lenght: 132 | raise TryNext 133 | if next(iter(set(line))) not in "-=~`": 134 | raise TryNext 135 | return 1 136 | 137 | 138 | class Any: 139 | def __init__(self, line): 140 | assert isinstance(line, str) 141 | self.lines = [line] 142 | 143 | def __repr__(self): 144 | return f"<{self.__class__.__name__}: {self.lines!r} >" 145 | 146 | def __str__(self): 147 | return self.lines[0] 148 | 149 | @classmethod 150 | def parse(cls, lines): 151 | return cls(lines[0]), lines[1:], [] 152 | 153 | 154 | class Raw(Any): 155 | pass 156 | 157 | 158 | class RawTilNextHeader: 159 | @classmethod 160 | def parse(cls, lines): 161 | ll = [] 162 | for i, line in enumerate(lines): 163 | try: 164 | Header.parse(lines[i:]) 165 | break 166 | except TryNext: 167 | pass 168 | 169 | ll.append(line) 170 | else: 171 | return cls(ll), [], [] 172 | 173 | return cls(ll), lines[i:], [] 174 | 175 | def __init__(self, items): 176 | assert isinstance(items, list) 177 | self.items = items 178 | 179 | def __repr__(self): 180 | return f"<{self.__class__.__name__}: {self.items!r} >" 181 | 182 | def __str__(self): 183 | return "\n".join(self.items) 184 | 185 | def _repr_html_(self): 186 | return """
""" + str(self) + """
""" 187 | 188 | 189 | class DescriptionList: 190 | @classmethod 191 | def parse(cls, lines): 192 | dct = {} 193 | key, values = None, [] 194 | for i, line in enumerate(lines): 195 | try: 196 | Header.parse(lines[i:]) 197 | dct[key] = values 198 | break 199 | except TryNext: 200 | pass 201 | 202 | if not line.startswith(" "): 203 | dct[key] = values 204 | key, values = line.strip(), [] 205 | if "," in key: 206 | raise TryNext 207 | else: 208 | values.append(line) 209 | if None in dct: 210 | del dct[None] 211 | return cls(dct), lines[i:], [] 212 | 213 | def __init__(self, items): 214 | assert isinstance(items, dict) 215 | self.items = items 216 | 217 | def __repr__(self): 218 | return f"<{self.__class__.__name__}: {self.items!r} >" 219 | 220 | def __str__(self): 221 | def _f(v): 222 | return "\n".join(v) 223 | 224 | return "\n".join(f"{k}:\n{_f(v)}" for k, v in self.items.items()) 225 | 226 | def _repr_html_(self): 227 | def _f(v): 228 | return "\n".join(v) 229 | 230 | return ( 231 | """
""" 232 | + "\n".join( 233 | f"
{k}
\n
{_f(v)}
" for k, v in self.items.items() 234 | ) 235 | + """
""" 236 | ) 237 | 238 | 239 | class Listing: 240 | @classmethod 241 | def parse(cls, lines): 242 | assert "," in lines[0], lines[0] 243 | listing = [line.strip() for line in lines[0].split(",")] 244 | return cls(listing), lines[1:], [] 245 | 246 | def __init__(self, listing): 247 | self.listing = listing 248 | 249 | def __repr__(self): 250 | return f"<{self.__class__.__name__}: {self.listing!r} >" 251 | 252 | def __str__(self): 253 | return ",".join([f"{k}" for k in self.listing]) 254 | 255 | def _repr_html_(self): 256 | return ( 257 | """
    """ + "\n".join(f"
  • {k}
  • " for k in self.listing) + """
""" 258 | ) 259 | 260 | 261 | class Base: 262 | def attrs(self): 263 | stuff = {} 264 | for it in dir(self): 265 | if it.startswith("_"): 266 | continue 267 | att = getattr(self, it) 268 | if callable(att): 269 | continue 270 | stuff[it] = att 271 | return stuff 272 | 273 | def __repr__(self): 274 | return ( 275 | f"<{self.__class__.__name__} " 276 | + ", ".join([k + ":" + repr(x) for k, x in self.attrs().items()]) 277 | + ">" 278 | ) 279 | 280 | 281 | class EntryParser(Base): 282 | @classmethod 283 | def parse(cls, lines): 284 | if len(lines) == 1: 285 | l0, l1 = lines[0], "" 286 | rest = [] 287 | elif len(lines) < 3: 288 | l0, l1 = lines 289 | rest = [] 290 | else: 291 | l0, l1, *rest = lines 292 | indent = len(l1) - len(l1.lstrip()) 293 | i = 0 294 | if indent: 295 | cont = [l1] 296 | for i, line in enumerate(rest): 297 | if line.startswith(" " * indent) or not line.strip(): 298 | cont.append(line) 299 | else: 300 | break 301 | else: 302 | i += 1 303 | else: 304 | i = 0 305 | cont = [] 306 | rest = lines[1:] 307 | if ":" in l0: 308 | try: 309 | head, t = (x.strip() for x in l0.split(":", maxsplit=1)) 310 | except ValueError: 311 | print("... Entry TryNext", lines[:5]) 312 | raise TryNext 313 | else: 314 | head, t = l0.strip(), "" 315 | 316 | if " " in head: 317 | if not t and not cont: 318 | # print('... list of things ? ', head) 319 | return [cls(h.strip(), "", []) for h in head.split(",")], rest[i:] 320 | 321 | if "See Also" in head: 322 | print("-------------->", lines[:3]) 323 | 324 | return [cls(head, t, cont)], rest[i:] 325 | 326 | def __init__(self, head, t, rest): 327 | # assert (head.strip() or t.strip() or [r.strip() for r in rest]) 328 | self.head = head 329 | self.t = t 330 | self.rest = rest 331 | 332 | def __str__(self): 333 | r = "\n".join(self.rest) 334 | if r: 335 | extra = f"\n{r}" 336 | else: 337 | extra = "" 338 | return f"""{self.head}:{self.t}{extra}""" 339 | 340 | def _format_head(self, head, resolver): 341 | return resolver(head) 342 | 343 | def _format_type(self, key, resolver): 344 | if self.rest: 345 | return key 346 | else: 347 | return "" 348 | 349 | def _format_core(self, core, resolver): 350 | if self.rest: 351 | return "
" + "\n".join(core) + "
" 352 | else: 353 | return f"
{self.t}
" 354 | 355 | def _repr_html_(self, resolver): 356 | 357 | return f"
{self._format_head(self.head, resolver)}:{self._format_type(self.t, resolver)}
{self._format_core(self.rest, resolver)}
" 358 | 359 | 360 | class DeflistParser(Base): 361 | def __init__(self, entries): 362 | self.entries = entries 363 | 364 | @classmethod 365 | def parse(cls, lines): 366 | ents = [] 367 | while lines: 368 | if not lines[0].strip(): 369 | lines = lines[1:] 370 | continue 371 | try: 372 | Header.parse(lines) 373 | break 374 | except TryNext: 375 | pass 376 | e, lines = EntryParser.parse(lines) 377 | ents.extend(e) 378 | 379 | return cls(ents), lines 380 | 381 | def __str__(self): 382 | return "\n".join(str(x) for x in self.entries) 383 | 384 | def _repr_html_(self, resolver=None): 385 | 386 | return ( 387 | """
(Deflist)""" 388 | + "\n".join(x._repr_html_(resolver) for x in self.entries) 389 | + """
""" 390 | ) 391 | 392 | 393 | class Mapping: 394 | @classmethod 395 | def parse(cls, lines): 396 | mapping = {} 397 | k = None 398 | for i, line in enumerate(lines): 399 | if not line.strip(): 400 | continue 401 | try: 402 | Header.parse(lines[i:]) 403 | break 404 | except TryNext: 405 | pass 406 | if line.startswith(" ") and k: 407 | try: 408 | mapping[k.strip()] += line.strip() 409 | except TypeError: 410 | raise TryNext 411 | 412 | if ":" in line: 413 | k, v = line.split(":", maxsplit=1) 414 | mapping[k.strip()] = v 415 | elif "," not in line: 416 | k, v = line.strip(), None 417 | mapping[k] = v 418 | else: 419 | for k in line.split(","): 420 | mapping[k.strip()] = None 421 | 422 | return cls(mapping), lines[i:], [] 423 | 424 | def __init__(self, mapping): 425 | self.mapping = mapping 426 | 427 | def __repr__(self): 428 | return f"<{self.__class__.__name__}: {self.mapping!r} >" 429 | 430 | def __str__(self): 431 | return "\n".join([f"{k}: {v}" for k, v in self.mapping.items()]) 432 | 433 | def _format_one(self, key, resolver): 434 | return resolver(key) 435 | 436 | def _format_pair(self, key, value, resolver): 437 | k = self._format_one(key, resolver) 438 | v = value 439 | return f"
{k}
\n
{v}
" 440 | 441 | def _repr_html_(self, resolver): 442 | 443 | return ( 444 | """
(Mapping)""" 445 | + "\n".join( 446 | [self._format_pair(k, v, resolver) for k, v in self.mapping.items()] 447 | ) 448 | + """
""" 449 | ) 450 | 451 | 452 | class CodeBlock: 453 | @classmethod 454 | def parse(cls, lines): 455 | if not lines[0].startswith((">>>", " >>>")): 456 | raise TryNext 457 | 458 | _lines = [] 459 | for i, line in enumerate(lines): 460 | if not line.strip(): 461 | break 462 | _lines.append(line) 463 | else: 464 | return cls(_lines), [], [] 465 | return cls(_lines), lines[i:], [] 466 | 467 | def __init__(self, lines): 468 | self.lines = lines 469 | 470 | def __repr__(self): 471 | return f"<{self.__class__.__name__}: {self.lines!r} >" 472 | 473 | def _repr_html_(self): 474 | return "
" + "\n".join(self.lines) + "
" 475 | 476 | def __str__(self): 477 | return "\n".join(self.lines) 478 | 479 | 480 | class Doc: 481 | @classmethod 482 | def parse(cls, lines, *, name=None, sig=None): 483 | parsed = [] 484 | warnings = [] 485 | while lines: 486 | for t in ( 487 | Section.parse, 488 | Header.parse, 489 | BlankLine.parse, 490 | CodeBlock.parse, 491 | Paragraph.parse, 492 | RawTilNextHeader.parse, 493 | failed, 494 | ): 495 | try: 496 | node, lines_, wn = t(lines) 497 | warnings.extend(wn) 498 | if isinstance(node, list): 499 | parsed.extend(node) 500 | else: 501 | parsed.append(node) 502 | if len(lines_) >= len(lines): 503 | raise ValueError("Could not parse", lines) 504 | lines = lines_ 505 | break 506 | except TryNext: 507 | pass 508 | return cls(parsed, name=name, sig=sig), warnings 509 | 510 | def __init__(self, nodes, name=None, sig=None): 511 | self.nodes = nodes 512 | self.name = name 513 | self.sig = sig 514 | self.backrefs = [] 515 | 516 | def __repr__(self): 517 | return ( 518 | f"<{self.__class__.__name__}\n" 519 | + "\n".join([textwrap.indent(repr(n), " ") for n in self.nodes]) 520 | + "\n>" 521 | ) 522 | 523 | def see_also(self): 524 | for i, p in enumerate(self.nodes): 525 | if isinstance(p, Section) and p.header.title.lines[0] == "See Also": 526 | break 527 | else: 528 | return [] 529 | 530 | node = self.nodes[i + 1] 531 | if isinstance(node, Mapping): 532 | return node.mapping.keys() 533 | elif isinstance(node, DeflistParser): 534 | return [x.head for x in node.entries] 535 | else: 536 | print("not a mapping", repr(node)) 537 | pass 538 | 539 | def _repr_html_(self, resolver=lambda x: None): 540 | base = """ 541 | 542 | 543 | 544 | 545 | Document 546 | 547 | 548 | 549 | 550 | {} 551 | {} 552 | {} 553 | 554 | 555 | """ 556 | 557 | h1 = "" 558 | if self.name: 559 | h1 += self.name 560 | if self.sig: 561 | h1 += self.sig 562 | if h1: 563 | h1 = "

" + h1 + "

" 564 | 565 | def f_(it): 566 | return f"{it}" 567 | 568 | if self.backrefs: 569 | br_html = "

Back references

" + ", ".join( 570 | f_(b) for b in self.backrefs 571 | ) 572 | else: 573 | br_html = "" 574 | 575 | def _resolver(k): 576 | ref = resolver(k) 577 | if ref is None: 578 | # print("could not resolve", k, f"({self.name})") 579 | return k 580 | else: 581 | # print("resolved", k, f"({self.name})") 582 | return f"{k}" 583 | 584 | hrepr = [] 585 | for n in self.nodes: 586 | if isinstance(n, Mapping | DeflistParser): 587 | hrepr.append(n._repr_html_(_resolver)) 588 | else: 589 | hrepr.append(n._repr_html_()) 590 | 591 | return base.format(h1, "\n".join(hrepr), br_html) 592 | 593 | 594 | class Section: 595 | """ 596 | start with a header, but have custom parsing because we know about it in numpydoc. 597 | """ 598 | 599 | @classmethod 600 | def parse(cls, lines): 601 | warnings = [] 602 | header, rest, wn = Header.parse(lines) 603 | warnings.extend(wn) 604 | aliases = { 605 | "Return": "Returns", 606 | "Arguments": "Parameters", 607 | "arguments": "Parameters", 608 | "additional keyword arguments:": "Parameters", 609 | "Other parameters": "Other Parameters", 610 | "Exceptions": "Raises", 611 | } 612 | ht = header.title.lines[0] 613 | if ht in aliases: 614 | warnings.append(f"Found `{ht}`, did you mean `{aliases[ht]}` ?") 615 | header.title.lines[0] = aliases[ht] 616 | ht = header.title.lines[0] 617 | if ht in ( 618 | "Parameters", 619 | "Returns", 620 | "Class Attributes", 621 | "Options", 622 | "Attributes", 623 | "Yields", 624 | "Raises", 625 | "Exceptions", 626 | "Methods", 627 | "Warns", 628 | "Other Parameters", 629 | "Warnings", 630 | "Arguments", 631 | ): 632 | try: 633 | try: 634 | core, rest, _ = Paragraph.parse(rest) 635 | except TryNext: 636 | core, rest = DeflistParser.parse(rest) 637 | except TryNext: 638 | core, rest, wn = DescriptionList.parse(rest) 639 | warnings.extend(wn) 640 | return [cls(header), core], rest, warnings 641 | elif header.title.lines[0] in ("See Also", "Returns", "See also"): 642 | if header.title.lines[0] == "See also": 643 | header.title.lines[0] = "See Also" 644 | try: 645 | core, rest = DeflistParser.parse(rest) 646 | except TryNext: 647 | print("Deflist failed trying Mapping... ") 648 | core, rest, wn = Mapping.parse(rest) 649 | warnings.extend(wn) 650 | # core, rest, wn = DescriptionList.parse(rest) 651 | # warnings.extend(wn) 652 | 653 | return [cls(header), core], rest, warnings 654 | elif header.title.lines[0] in ("Examples",): 655 | return [cls(header)], rest, warnings 656 | elif header.title.lines[0] in ("Notes", "References"): 657 | return [cls(header)], rest, warnings 658 | 659 | else: 660 | raise ValueError(repr(header.title.lines[0])) 661 | 662 | def __init__(self, header): 663 | self.header = header 664 | 665 | def __repr__(self): 666 | return f"<{self.__class__.__name__}: {self.header!r}>" 667 | 668 | def __str__(self): 669 | return str(self.header) 670 | 671 | def _repr_html_(self): 672 | return self.header._repr_html_() 673 | 674 | 675 | class Paragraph: 676 | @classmethod 677 | def parse(cls, lines): 678 | _lines = [] 679 | l0 = lines[0] 680 | if not l0 or l0.startswith(" "): 681 | raise TryNext 682 | if len(lines) >= 2: 683 | if lines[1].startswith(" ") and lines[1].strip(): 684 | # second line indented this _is_ a deflist 685 | raise TryNext 686 | for i, line in enumerate(lines): 687 | if line and not line.startswith(" "): 688 | _lines.append(line) 689 | else: 690 | break 691 | if not _lines: 692 | raise TryNext 693 | return cls(_lines), lines[i + 1 :], [] 694 | 695 | def __init__(self, lines): 696 | self.lines = lines 697 | 698 | def __repr__(self): 699 | return f"<{self.__class__.__name__}: {self.lines}>" 700 | 701 | def __str__(self): 702 | return "\n".join(self.lines) 703 | 704 | def _repr_html_(self): 705 | return "

" + " ".join(self.lines) + "

" 706 | 707 | 708 | class BlankLine: 709 | @classmethod 710 | def parse(self, lines): 711 | if not lines[0].strip(): 712 | return BlankLine(), lines[1:], [] 713 | raise TryNext 714 | 715 | def __init__(self): 716 | pass 717 | 718 | def __repr__(self): 719 | return f"<{self.__class__.__name__}>" 720 | 721 | def __str__(self): 722 | return "" 723 | 724 | def _repr_html_(self): 725 | return "" 726 | 727 | 728 | def failed(lines): 729 | raise ValueError("nothign managed to parse", lines) 730 | 731 | 732 | def parsedoc(doc, *, name=None, sig=None): 733 | 734 | try: 735 | NumpyDocString(doc) 736 | lines = dedentfirst(doc).splitlines() 737 | return Doc.parse(lines, name=name, sig=sig) 738 | except Exception: 739 | return "" 740 | 741 | 742 | def find_indent_blocks(lines): 743 | """ 744 | Given a list of lines find _block_ by level of indentation 745 | 746 | - A block is considered a sequence of one or more lines, separated 747 | - once one level of indentation is encounter we don't split anymore, it will be up to the sub-parser. 748 | - A whitespace or empty line yield another block. 749 | """ 750 | if isinstance(lines, str): 751 | raise ValueError("split please") 752 | indents = compute_indents(lines) 753 | assert len(indents) == len(lines) 754 | l0 = lines[0] 755 | indent_level = indents[0] 756 | if indent_level is None: 757 | indent_level = 0 758 | assert indent_level == 0 759 | current_block = [l0] 760 | 761 | n_empty_lines = 0 762 | 763 | blocks = [] 764 | for new_level, line in zip(indents[1:], lines[1:]): 765 | if new_level is None: 766 | n_empty_lines += 1 767 | continue 768 | 769 | if n_empty_lines and new_level == 0: 770 | current_block.extend([""] * n_empty_lines) 771 | n_empty_lines = 0 772 | blocks.append((indent_level, current_block)) 773 | current_block = [line] 774 | indent_level = new_level 775 | continue 776 | if n_empty_lines: 777 | current_block.extend([""] * n_empty_lines) 778 | n_empty_lines = 0 779 | 780 | if indent_level == 0 and new_level: 781 | # start a new block 782 | blocks.append((0, current_block)) 783 | current_block = [line[new_level:]] 784 | indent_level = new_level 785 | continue 786 | 787 | # we are already in indented blocks. 788 | if new_level >= indent_level: 789 | current_block.append(line[indent_level:]) 790 | elif new_level < indent_level: 791 | blocks.append((indent_level, current_block)) 792 | current_block = [line] 793 | indent_level = new_level 794 | continue 795 | if current_block: 796 | blocks.append((indent_level, current_block)) 797 | current_block = [] 798 | 799 | assert len(current_block) == 0 800 | return blocks 801 | 802 | 803 | def dedentfirst(docstring): 804 | from textwrap import dedent 805 | 806 | lines = docstring.splitlines() 807 | ln = dedent("\n".join(lines[1:])).splitlines() 808 | l0 = lines[0] 809 | return "\n".join([dedent(l0)] + ln) 810 | 811 | 812 | def reformat(input): 813 | return format(transform(parse(format(transform(parse(input)))))) 814 | 815 | 816 | # def main(): 817 | # 818 | # for filename in sys.argv[1:]: 819 | # p = Path(filename) 820 | # 821 | # with p.open() as f: 822 | # data = f.read() 823 | # print(reformat) 824 | 825 | 826 | if __name__ == "__main__": 827 | main() 828 | -------------------------------------------------------------------------------- /velin/ref.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import ast 3 | import difflib 4 | import re 5 | import sys 6 | from configparser import ConfigParser 7 | from pathlib import Path 8 | from textwrap import indent 9 | 10 | import numpydoc.docscrape as nds 11 | from numpydoc.docscrape import Parameter 12 | 13 | from velin.examples_section_utils import reformat_example_lines 14 | 15 | 16 | def f(a, b, *args, **kwargs): 17 | """ 18 | Parameters 19 | ---------- 20 | a: int 21 | its a 22 | u : int 23 | its b 24 | args : stuff 25 | var 26 | kwargs : stuff 27 | kwargs 28 | 29 | Returns 30 | ------- 31 | nothing: None 32 | 33 | See Also 34 | -------- 35 | a, b, c 36 | 37 | """ 38 | 39 | 40 | def g(a, b): 41 | """ 42 | Parameters 43 | ---------- 44 | a : int 45 | its a 46 | b : int 47 | its b 48 | args : stuff 49 | var 50 | kwargs : stuff 51 | kwargs 52 | Returns 53 | ------- 54 | nothing: None 55 | 56 | See Also 57 | -------- 58 | a, b, c 59 | 60 | """ 61 | 62 | 63 | class NodeVisitor(ast.NodeVisitor): 64 | def __init__(self, config): 65 | self.config = config 66 | self.items = [] 67 | self.stack = [] 68 | 69 | def visit(self, node): 70 | if type(node) in (ast.ClassDef, ast.FunctionDef): 71 | self.stack.append(node.name) 72 | # print(self.stack) 73 | oname = ".".join(self.stack) 74 | if oname in self.config["skip"]: 75 | print("SKIPPING", oname) 76 | self.stack.pop() 77 | return 78 | super().visit(node) 79 | self.stack.pop() 80 | elif type(node) in ( 81 | ast.Load, 82 | ast.Name, 83 | ast.Call, 84 | ast.Compare, 85 | ast.Attribute, 86 | ast.Expr, 87 | ast.arguments, 88 | ast.Import, 89 | ast.alias, 90 | ast.Constant, 91 | ast.Store, 92 | ast.Assign, 93 | ast.arg, 94 | ): 95 | super().visit(node) 96 | else: 97 | # print(type(node)) 98 | super().visit(node) 99 | 100 | def visit_FunctionDef(self, node): 101 | # we can collect function args, and _could_ check the Parameter section. 102 | # for nn in node.args.args: 103 | # nnn = nn.arg 104 | # for k in [l for l in dir(node) if not l.startswith('_')]: 105 | # sub = getattr(node, k) 106 | # print(k, sub) 107 | # for u in [l for l in dir(sub) if not l.startswith('_')]: 108 | # ss = getattr(sub, u) 109 | # print(' ', u, ss) 110 | 111 | # node.args.kwarg # single object 112 | # node.args.vararg # object 113 | # node.args.ks_defaults #list 114 | # print(node.returns) 115 | # node.args.defaults + 116 | 117 | meta = { 118 | "simple": [ 119 | arg 120 | for arg in node.args.posonlyargs + node.args.args + node.args.kwonlyargs 121 | ], 122 | "varargs": None, 123 | "varkwargs": None, 124 | } 125 | if node.args.kwarg: 126 | meta["varkwargs"] = node.args.kwarg 127 | dir(node.args.kwarg) 128 | if node.args.vararg: 129 | meta["varargs"] = node.args.vararg 130 | # print(arg.arg) 131 | # for k in [l for l in dir(arg) if not l.startswith('_')]: 132 | # sub = getattr(arg, k) 133 | # print(' ', k, sub) 134 | 135 | self.items.append((node, meta, ".".join(self.stack))) 136 | self.generic_visit(node) 137 | 138 | def visit_ClassDef(self, node): 139 | # we can collect function args, and _could_ check the Parameter section. 140 | # for nn in node.args.args: 141 | # nnn = nn.arg 142 | 143 | # self.items.append((node, None)) 144 | self.generic_visit(node) 145 | 146 | 147 | BLACK_REFORMAT = True 148 | 149 | 150 | class NumpyDocString(nds.NumpyDocString): 151 | """ 152 | subclass a littel bit more lenient on parsing 153 | """ 154 | 155 | aliases = { 156 | "Parameters": ( 157 | "options", 158 | "parameter", 159 | "parameters", 160 | "paramters", 161 | "parmeters", 162 | "paramerters", 163 | "arguments", 164 | "args", 165 | ), 166 | "Attributes": ("properties",), 167 | "Yields": ("signals",), 168 | } 169 | 170 | def normalize(self): 171 | """ 172 | Apply a bunch of heuristic that try to normalise the data. 173 | """ 174 | if params := self["Parameters"]: 175 | for i, p in enumerate(params): 176 | if not p.type and (":" in p.name) and not p.name.endswith(":"): 177 | if p.name.startswith(".."): 178 | continue 179 | if re.match(":\w+:`", p.name): 180 | print("may have a directive", p.name) 181 | try: 182 | name, type_ = ( 183 | _.strip() for _ in p.name.split(": ", maxsplit=1) 184 | ) 185 | except Exception as e: 186 | raise type(e)(p.name) 187 | params[i] = nds.Parameter(name, type_, p[2]) 188 | 189 | def parse_examples(self, lines, indent=4): 190 | # this is bad practice be we do want normalisation here for now 191 | # to check that parse->format->parse is idempotent. 192 | # this can be done if we had a separate "normalize" step. 193 | global BLACK_REFORMAT 194 | if BLACK_REFORMAT: 195 | try: 196 | lines = reformat_example_lines(lines, indent=indent) 197 | except Exception: 198 | print("black failed") 199 | print("\n".join(lines)) 200 | raise 201 | 202 | return lines 203 | 204 | def to_json(self): 205 | 206 | res = {k: v for (k, v) in self.__dict__.items() if ((k not in {"_doc"}) and v)} 207 | res["_parsed_data"] = {k: v for (k, v) in res["_parsed_data"].items() if v} 208 | 209 | return res 210 | 211 | @classmethod 212 | def from_json(cls, obj): 213 | nds = cls("") 214 | nds.__dict__.update(obj) 215 | # print(obj['_parsed_data'].keys()) 216 | nds._parsed_data["Parameters"] = [ 217 | Parameter(a, b, c) for (a, b, c) in nds._parsed_data.get("Parameters", []) 218 | ] 219 | 220 | for it in ( 221 | "Returns", 222 | "Yields", 223 | "Extended Summary", 224 | "Receives", 225 | "Other Parameters", 226 | "Raises", 227 | "Warns", 228 | "Warnings", 229 | "See Also", 230 | "Notes", 231 | "References", 232 | "Examples", 233 | "Attributes", 234 | "Methods", 235 | ): 236 | if it not in nds._parsed_data: 237 | nds._parsed_data[it] = [] 238 | for it in ("index",): 239 | if it not in nds._parsed_data: 240 | nds._parsed_data[it] = {} 241 | return nds 242 | 243 | def __init__(self, *args, **kwargs): 244 | self.ordered_sections = [] 245 | super().__init__(*args, **kwargs) 246 | 247 | def __setitem__(self, key, value): 248 | if key in ["Extended Summary", "Summary"]: 249 | value = [d.rstrip() for d in value] 250 | 251 | if key in ("Examples"): 252 | value = self.parse_examples(value) 253 | super().__setitem__(key, value) 254 | assert key not in self.ordered_sections 255 | self.ordered_sections.append(key) 256 | 257 | def _guess_header(self, header): 258 | if header in self.sections: 259 | return header 260 | # handle missing trailing `s`, and trailing `:` 261 | for s in self.sections: 262 | if s.lower().startswith(header.rstrip(":").lower()): 263 | return s 264 | for k, v in self.aliases.items(): 265 | if header.lower() in v: 266 | return k 267 | raise ValueError("Cound not find match for section:", header) 268 | 269 | def _read_sections(self): 270 | for name, data in super()._read_sections(): 271 | name = self._guess_header(name) 272 | yield name, data 273 | 274 | def _parse_param_list(self, *args, **kwargs): 275 | """ 276 | Normalize parameters 277 | """ 278 | parms = super()._parse_param_list(*args, **kwargs) 279 | out = [] 280 | for name, type_, desc in parms: 281 | out.append( 282 | nds.Parameter(name.strip(), type_.strip(), [d.rstrip() for d in desc]) 283 | ) 284 | return out 285 | 286 | 287 | def w(orig): 288 | """Util function that shows whitespace as `·`.""" 289 | lines = [] 290 | for line in orig: 291 | if line[0] in "+-": 292 | # ll.append(l.replace(' ', '⎵')) 293 | lines.append(line.replace(" ", "·")) 294 | 295 | else: 296 | lines.append(line) 297 | processed_lines = [] 298 | for line in lines: 299 | if line.endswith("\n"): 300 | processed_lines.append(line[:-1]) 301 | else: 302 | processed_lines.append(line[:]) 303 | return processed_lines 304 | 305 | 306 | class Config: 307 | """ 308 | 309 | Here are some of the config options 310 | 311 | - F100: Items spacing in Return/Raise/Yield/Parameters/See Also 312 | - A(auto) - spaces if some sections have blank lines. (default) 313 | - B(compact) - no spaces between items, ever. 314 | - C(spaced) - spaces between items, always 315 | 316 | """ 317 | 318 | _compact_param = True 319 | _space_in_see_also_title = False 320 | _space_in_notes_title = False 321 | _run_fixers = True 322 | 323 | def __init__(self, conf): 324 | self._conf = conf 325 | 326 | def __getattr__(self, key): 327 | if key in self._conf: 328 | return self._conf[key] 329 | else: 330 | return getattr(type(self), "_" + key) 331 | 332 | 333 | class SectionFormatter: 334 | """ 335 | Render section of the docs, based on some configuration. Not having 336 | configuration is great, but everybody has their pet peevs, and I'm hpping we 337 | can progressively convince them to adopt a standard. 338 | """ 339 | 340 | def __init__(self, *, conf): 341 | assert isinstance(conf, Config) 342 | self.config = conf 343 | 344 | @classmethod 345 | def format_Signature(self, s, compact): 346 | return s + "\n" 347 | 348 | @classmethod 349 | def format_Summary(self, s, compact): 350 | if len(s) == 1 and not s[0].strip(): 351 | return "" 352 | return "\n".join(s) + "\n" 353 | 354 | @classmethod 355 | def format_Extended_Summary(self, es, compact): 356 | return "\n".join(es) + "\n" 357 | 358 | def _format_ps(self, name, ps, compact): 359 | res, try_other = self._format_ps_pref(name, ps, compact=True) 360 | if not try_other or self.config.compact_param: 361 | return res 362 | return self._format_ps_pref(name, ps, compact=False)[0] 363 | 364 | def _format_ps_pref(self, name, ps, *, compact): 365 | try_other = False 366 | out = name + "\n" 367 | out += "-" * len(name) + "\n" 368 | for i, p in enumerate(ps): 369 | if (not compact) and i: 370 | out += "\n" 371 | if p.type: 372 | out += f"""{p.name.strip()} : {p.type.strip()}\n""" 373 | else: 374 | out += f"""{p.name.strip()}\n""" 375 | if p.desc: 376 | if any([line.strip() == "" for line in p.desc]): 377 | try_other = True 378 | 379 | out += indent("\n".join(p.desc), " ") 380 | out += "\n" 381 | return out, try_other 382 | 383 | def format_Parameters(self, ps, compact): 384 | return self._format_ps("Parameters", ps, compact) 385 | 386 | def format_Methods(self, ps, compact): 387 | return self._format_ps("Methods", ps, compact) 388 | 389 | def format_Other_Parameters(self, ps, compact): 390 | return self._format_ps("Other Parameters", ps, compact) 391 | 392 | def format_See_Also(self, sas, compact): 393 | 394 | res = self.format_See_Also_impl(sas, True, force_compact=compact) 395 | if res is not None: 396 | return res 397 | return self.format_See_Also_impl(sas, False, force_compact=compact) 398 | 399 | def format_See_Also_impl(self, sas, compact, force_compact, *varargs, **varkwargs): 400 | """ 401 | Format a see also section. 402 | 403 | 404 | """ 405 | out = "See Also\n" 406 | out += "--------\n" 407 | if self.config.space_in_see_also_title: 408 | out += "\n" 409 | 410 | for a, b in sas: 411 | if b: 412 | desc = b[0] 413 | else: 414 | desc = None 415 | if len(b) > 1: 416 | rest_desc = b[1:] 417 | else: 418 | rest_desc = [] 419 | _first = True 420 | for ref, type_ in a: 421 | # if len(a) > 1: 422 | # assert type_ is None, a # matplotlib mlab cohere 423 | if not _first: 424 | out += ", " 425 | if type_ is not None: 426 | out += f":{type_}:`{ref}`" 427 | else: 428 | out += f"{ref}" 429 | _first = False 430 | 431 | if desc: 432 | if len(a) > 1 or (not compact): 433 | out += f" :\n {desc}" 434 | else: 435 | attempt = f" : {desc}" 436 | if len(out.splitlines()[-1] + attempt) > 80 and not force_compact: 437 | return None 438 | out += attempt 439 | for rd in rest_desc: 440 | out += "\n " + rd 441 | out += "\n" 442 | return out 443 | 444 | @classmethod 445 | def format_References(cls, lines, compact): 446 | out = "References\n" 447 | out += "----------\n" 448 | out += "\n".join(lines) 449 | out += "\n" 450 | return out 451 | 452 | def format_Notes(self, lines, compact): 453 | out = "Notes\n" 454 | out += "-----\n" 455 | if self.config.space_in_notes_title: 456 | out += "\n" 457 | out += "\n".join(lines) 458 | out += "\n" 459 | return out 460 | 461 | @classmethod 462 | def format_Examples(cls, lines, compact): 463 | out = "Examples\n" 464 | out += "--------\n" 465 | out += "\n".join(lines) 466 | out += "\n" 467 | return out 468 | 469 | @classmethod 470 | def format_Warnings(cls, lines, compact): 471 | out = "Warnings\n" 472 | out += "--------\n" 473 | out += "\n".join(lines) 474 | out += "\n" 475 | return out 476 | 477 | @classmethod 478 | def format_Warns(cls, ps, compact): 479 | return cls.format_RRY("Warns", ps) 480 | 481 | @classmethod 482 | def format_Raises(cls, ps, compact): 483 | return cls.format_RRY("Raises", ps) 484 | 485 | @classmethod 486 | def format_Yields(cls, ps, compact): 487 | return cls.format_RRY("Yields", ps) 488 | 489 | @classmethod 490 | def format_Returns(cls, ps, compact): 491 | return cls.format_RRY("Returns", ps) 492 | 493 | @classmethod 494 | def format_Attributes(cls, ps, compact): 495 | return cls.format_RRY("Attributes", ps) 496 | 497 | @classmethod 498 | def format_RRY(cls, name, ps): 499 | out = name + "\n" 500 | out += "-" * len(name) + "\n" 501 | 502 | if name == "Returns": 503 | if len(ps) > 1: 504 | # do heuristic to check we actually have a description list and not a paragraph 505 | pass 506 | 507 | for i, p in enumerate(ps): 508 | # if i: 509 | # out += "\n" 510 | if p.type and re.match("\w+:`", p.type): 511 | print( 512 | "Warning numpydoc may have misparsed this section.", p.name, p.type 513 | ) 514 | if p.name and p.type: 515 | out += f"""{p.name.strip()} : {p.type.strip()}\n""" 516 | elif p.name: 517 | out += f"""{p.name.strip()}\n""" 518 | else: 519 | out += f"""{p.type.strip()}\n""" 520 | if p.desc: 521 | out += indent("\n".join(p.desc), " ") 522 | out += "\n" 523 | return out 524 | 525 | 526 | def dedend_docstring(docstring): 527 | import textwrap 528 | 529 | lines = docstring.splitlines() 530 | if len(lines) >= 2: 531 | l0, *lns = docstring.split("\n") 532 | l0 = textwrap.dedent(l0) 533 | lns = textwrap.dedent("\n".join(lns)).split("\n") 534 | docstring = [l0] + lns 535 | else: 536 | docstring = textwrap.dedent(docstring).split("\n") 537 | return "\n".join(docstring) 538 | 539 | 540 | def parameter_fixer(params, meta_arg, meta, fname, func_name, config, doc): 541 | assert "Parameters" in doc 542 | incorrect_number = False 543 | jump_to_location = False 544 | if not config.run_fixers: 545 | return params, jump_to_location 546 | pnames = [o.strip() for p in params for o in p.name.split(",") if p.name] 547 | if meta_arg and meta_arg[0] in ["self", "cls"]: 548 | meta_arg = meta_arg[1:] 549 | doc_missing = set(meta_arg) - set(pnames) - {"cls"} 550 | doc_extra = {x for x in set(pnames) - set(meta_arg) if not x.startswith("*")} - { 551 | "cls", 552 | } 553 | for p in params: 554 | if p[1].startswith("<"): 555 | jump_to_location = True 556 | assert doc_extra != {""}, (set(meta), set(meta_arg), params) 557 | # don't considert template parameter from numpy/scipy 558 | doc_extra = {x for x in doc_extra if not (("$" in x) or ("%" in x))} 559 | 560 | def rename_param(source, target): 561 | renamed = False 562 | for i, p in enumerate(params): 563 | if p.name == source: 564 | params[i] = nds.Parameter(target, *p[1:]) 565 | renamed = True 566 | break 567 | return renamed 568 | 569 | if doc_missing and doc_extra: 570 | # we need to match them maybe: 571 | # are we missing *, ** in args, and kwargs ? 572 | 573 | for stars in ("*", "**"): 574 | n_star_missing = doc_missing.intersection({stars + k for k in doc_extra}) 575 | if n_star_missing: 576 | correct = list(n_star_missing)[0] 577 | incorrect = correct[len(stars) :] 578 | rename_param(incorrect, correct) 579 | doc_missing.remove(correct) 580 | doc_extra.remove(incorrect) 581 | for param in list(doc_extra): 582 | if ( 583 | param.startswith(('"', "'")) 584 | and param.endswith(('"', "'")) 585 | and param[1:-1] in doc_missing 586 | ): 587 | correct = param[1:-1] 588 | rename_param(param, correct) 589 | print("unquote", param, "to", correct) 590 | doc_missing.remove(correct) 591 | doc_extra.remove(param) 592 | 593 | if len(doc_missing) == len(doc_extra) == 1: 594 | correct = list(doc_missing)[0] 595 | incorrect = list(doc_extra)[0] 596 | do_rename = True 597 | if "*" in correct and ("*" not in incorrect): 598 | if correct.replace("*", "") != incorrect.replace("*", ""): 599 | # this is likely undocumented **kwargs. 600 | do_rename = False 601 | 602 | if do_rename: 603 | if rename_param(incorrect, correct): 604 | print(f"{fname}:{func_name}") 605 | print(f" renamed {incorrect!r} to {correct!r}") 606 | doc_missing = {} 607 | doc_extra = {} 608 | else: 609 | print(" could not fix:", doc_missing, doc_extra) 610 | if doc_missing and not doc_extra and config.with_placeholder: 611 | for param in doc_missing: 612 | if "*" in param: 613 | continue 614 | annotation_str = "" 615 | current_param = [m for m in meta["simple"] if m.arg == param] 616 | assert len(current_param) == 1, (current_param, meta, param) 617 | current_param = current_param[0] 618 | if type(current_param.annotation).__name__ == "Name": 619 | annotation_str = str(current_param.annotation.id) 620 | doc["Parameters"].append( 621 | nds.Parameter( 622 | param, 623 | f"{annotation_str}", 624 | [""], 625 | ) 626 | ) 627 | elif ( 628 | (not doc_missing) 629 | and doc_extra 630 | and ("Parameters" in doc) # always True 631 | and (not meta["varkwargs"]) 632 | ): 633 | print(f"{fname}:{func_name}") 634 | to_remove = [p for p in doc["Parameters"] if p[0] in doc_extra] 635 | for remove_me in to_remove: 636 | if " " in remove_me.name and not remove_me.type and not remove_me.desc: 637 | # this is likely some extra text 638 | continue 639 | print( 640 | f" removing parameters {remove_me!r}", 641 | ) 642 | params.remove(remove_me) 643 | elif doc_missing or doc_extra: 644 | incorrect_number = True 645 | print(f"{fname}:{func_name}") 646 | if doc_missing: 647 | print(" missing:", doc_missing) 648 | if doc_extra: 649 | print(" extra:", doc_extra) 650 | 651 | return params, jump_to_location, incorrect_number 652 | 653 | 654 | def compute_new_doc(docstr, fname, *, level, compact, meta, func_name, config): 655 | """ 656 | compute a new docstring that shoudl be numpydoc compliant. 657 | 658 | Parameters 659 | ---------- 660 | docstr : str 661 | docstring to reformat 662 | fname : str 663 | filename of the file beign reformatted (for error messages) 664 | level : int 665 | indentation level 666 | compact : bool 667 | use compact formating in definition list. 668 | meta : list 669 | meta info about the function to verify docstrings, for example 670 | list of parameters. 671 | func_name : str 672 | function name for debug. 673 | config : 674 | 675 | 676 | Returns 677 | ------- 678 | str 679 | new docstring 680 | Numpydoc 681 | parsed numpydoc object 682 | bool 683 | bool 684 | 685 | """ 686 | assert config is not None 687 | 688 | fail_check = False 689 | INDENT = level * " " 690 | NINDENT = "\n" + INDENT 691 | original_docstr = docstr 692 | if len(docstr.splitlines()) <= 1: 693 | return "", NumpyDocString(""), None, fail_check 694 | shortdoc = bool(docstr.splitlines()[0].strip()) 695 | short_with_space = False 696 | if not docstr.startswith(NINDENT): 697 | if original_docstr[0] == " ": 698 | short_with_space = True 699 | 700 | long_end = True 701 | long_with_space = True 702 | if original_docstr.splitlines()[-1].strip(): 703 | long_end = False 704 | if original_docstr.splitlines()[-2].strip(): 705 | long_with_space = False 706 | 707 | try: 708 | doc = NumpyDocString(dedend_docstring(docstr)) 709 | except Exception as e: 710 | raise type(e)(f"error in {fname}:{func_name}") from e 711 | if config.run_fixers: 712 | doc.normalize() 713 | meta_arg = [m.arg for m in meta["simple"]] 714 | if meta["varargs"]: 715 | meta_arg.append("*" + meta["varargs"].arg) 716 | if meta["varkwargs"]: 717 | meta_arg.append("**" + meta["varkwargs"].arg) 718 | 719 | jump_to_location = False 720 | if (params := doc["Parameters"]) and meta: 721 | 722 | params, jump_to_location, incorrect_number = parameter_fixer( 723 | params, meta_arg, meta, fname, func_name, config, doc 724 | ) 725 | if incorrect_number: 726 | fail_check = True 727 | 728 | fmt = "" 729 | start = True 730 | # ordered_section is a local patch to that records the docstring order. 731 | df = SectionFormatter(conf=config) 732 | for s in getattr(doc, "ordered_sections", doc.sections): 733 | if doc[s]: 734 | f = getattr(df, "format_" + s.replace(" ", "_")) 735 | res = f(doc[s], compact) 736 | if not res: 737 | continue 738 | if not start: 739 | fmt += "\n" 740 | start = False 741 | fmt += res 742 | fmt = indent(fmt, INDENT) + INDENT 743 | 744 | # hack to detect if we have seen a header section. 745 | if "----" in fmt or True: 746 | if long_with_space: 747 | fmt = fmt.rstrip(" ") + NINDENT 748 | else: 749 | fmt = fmt.rstrip(" ") + INDENT 750 | if shortdoc: 751 | fmt = fmt.lstrip() 752 | if short_with_space: 753 | fmt = " " + fmt 754 | else: 755 | fmt = "\n" + fmt 756 | if not long_end: 757 | fmt = fmt.rstrip() 758 | assert fmt 759 | # we can't just do that as See Also and a few other would be sphinxified. 760 | # return indent(str(doc),' ')+'\n ' 761 | if fmt != original_docstr: 762 | fail_check = True 763 | return fmt, doc, jump_to_location, fail_check 764 | 765 | 766 | def reformat_file(data, filename, compact, unsafe, fail=False, config=None, obj_p=None): 767 | """ 768 | Parameters 769 | ---------- 770 | compact : bool 771 | wether to use compact formatting 772 | data : 773 | 774 | unsafe : bool 775 | 776 | fail : 777 | 778 | config : 779 | 780 | filename : 781 | 782 | 783 | """ 784 | return _reformat_file( 785 | data, filename, compact, unsafe, fail=False, config=None, obj_p=None 786 | )[0] 787 | 788 | 789 | def _reformat_file( 790 | data, filename, compact, unsafe, fail=False, config=None, obj_p=None 791 | ): 792 | """ 793 | Parameters 794 | ---------- 795 | compact : bool 796 | wether to use compact formatting 797 | data : 798 | 799 | unsafe : bool 800 | 801 | fail : 802 | 803 | config : 804 | 805 | filename : 806 | 807 | 808 | Returns 809 | ------- 810 | str 811 | The new file 812 | bool 813 | Whether this file should fail under the --check flag 814 | 815 | """ 816 | fail_check = False 817 | assert config is not None 818 | 819 | tree = ast.parse(data, filename) 820 | new = data 821 | 822 | # funcs = [t for t in tree.body if isinstance(t, ast.FunctionDef)] 823 | funcs = NodeVisitor({"skip": obj_p}) 824 | funcs.visit(tree) 825 | funcs = funcs.items 826 | for i, (func, meta, qname) in enumerate(funcs[:]): 827 | # print(i, "==", func.name, "==") 828 | try: 829 | e0 = func.body[0] 830 | if not isinstance(e0, ast.Expr): 831 | continue 832 | # e0.value is _likely_ a Constant node. 833 | docstring = e0.value.s 834 | func_name = func.name 835 | except AttributeError: 836 | continue 837 | if not isinstance(docstring, str): 838 | continue 839 | start, nindent, _ = ( 840 | func.body[0].lineno, 841 | func.body[0].col_offset, 842 | func.body[0].end_lineno, 843 | ) 844 | # if not docstring in data: 845 | # print(f"skip {file}: {func.name}, can't do replacement yet") 846 | try: 847 | new_doc, d_, jump_to_loc, _fail_check = compute_new_doc( 848 | docstring, 849 | filename, 850 | level=nindent, 851 | compact=compact, 852 | meta=meta, 853 | func_name=func_name, 854 | config=config, 855 | ) 856 | if _fail_check: 857 | fail_check = True 858 | if jump_to_loc: 859 | print("mvim", f"+{start}", filename) 860 | pass 861 | # call editor with file and line number 862 | elif not unsafe: 863 | _, d2, _, _fail_check = compute_new_doc( 864 | docstring, 865 | filename, 866 | level=nindent, 867 | compact=compact, 868 | meta=meta, 869 | func_name=func_name, 870 | config=config, 871 | ) 872 | if _fail_check: 873 | fail_check = True 874 | if not d2._parsed_data == d_._parsed_data: 875 | secs1 = { 876 | k: v 877 | for k, v in d2._parsed_data.items() 878 | if v != d_._parsed_data[k] 879 | } 880 | secs2 = { 881 | k: v 882 | for k, v in d_._parsed_data.items() 883 | if v != d2._parsed_data[k] 884 | } 885 | raise ValueError( 886 | "Numpydoc parsing seem to differ after reformatting, this may be a reformatting bug. Rerun with `velin --unsafe " 887 | + str(filename) 888 | + ":" 889 | + qname 890 | + "`\n" 891 | + str(secs1) 892 | + "\n" 893 | + str(secs2), 894 | ) 895 | except Exception as e: 896 | print(f"something went wrong with {filename}:{qname} :\n\n{docstring}") 897 | if fail: 898 | raise 899 | else: 900 | print(e) 901 | continue 902 | if not docstring.strip(): 903 | print("DOCSTRING IS EMPTY !!!", func.name) 904 | # test(docstring, file) 905 | if new_doc.strip() and new_doc != docstring: 906 | # need_changes.append(str(filename) + f":{start}:{func.name}") 907 | if ('"""' in new_doc) or ("'''" in new_doc): 908 | # print( 909 | # "SKIPPING", filename, func.name, "triple quote not handled", new_doc 910 | # ) 911 | pass 912 | else: 913 | # if docstring not in new: 914 | # print("ESCAPE issue:", docstring) 915 | new = new.replace(docstring, new_doc) 916 | fail_check = True 917 | return new, fail_check 918 | 919 | 920 | class SkipPattern: 921 | def __init__(self, value): 922 | if ":" in value: 923 | self.file_pattern, self.obj_pattern = value.split(":") 924 | else: 925 | self.file_pattern, self.obj_pattern = value, None 926 | 927 | @property 928 | def file(self): 929 | if "*" in self.file_pattern: 930 | return self.file_pattern + "" 931 | else: 932 | return ".*" + self.file_pattern + ".*" 933 | 934 | def __repr__(self): 935 | return f"" 936 | 937 | 938 | def main(): 939 | _config = ConfigParser() 940 | patterns = [] 941 | if Path("setup.cfg").exists(): 942 | _config.read("setup.cfg") 943 | patterns = [ 944 | SkipPattern(x.strip()) 945 | for x in _config.get("velin", "ignore_patterns", fallback="").split("\n") 946 | if x 947 | ] 948 | 949 | parser = argparse.ArgumentParser(description="reformat the docstrigns of some file") 950 | parser.add_argument( 951 | "paths", 952 | metavar="path", 953 | type=str, 954 | nargs="+", 955 | help="Files or folder to reformat", 956 | ) 957 | parser.add_argument( 958 | "--context", 959 | metavar="context", 960 | type=int, 961 | default=3, 962 | help="Number of context lines in the diff", 963 | ) 964 | parser.add_argument( 965 | "--unsafe", 966 | action="store_true", 967 | help="Lift some safety feature (don't fail if updating the docstring is not indempotent", 968 | ) 969 | parser.add_argument( 970 | "--check", 971 | action="store_true", 972 | help="Print the list of files/lines number and exit with a non-0 exit status, Use it for CI.", 973 | ) 974 | parser.add_argument( 975 | "--no-diff", 976 | action="store_false", 977 | dest="print_diff", 978 | help="Do not print the diff", 979 | ) 980 | parser.add_argument( 981 | "--black", 982 | action="store_true", 983 | dest="run_black", 984 | help="Do not run black on examples", 985 | ) 986 | parser.add_argument( 987 | "--with-placeholder", 988 | action="store_true", 989 | dest="with_placeholder", 990 | help="insert missing sections/parameters placehoders", 991 | ) 992 | parser.add_argument("--no-color", action="store_false", dest="do_highlight") 993 | parser.add_argument("--compact", action="store_true", help="Please ignore") 994 | parser.add_argument("--no-fail", action="store_false", dest="fail") 995 | parser.add_argument( 996 | "--space-in-see-also-title", action="store_true", dest="space_in_see_also_title" 997 | ) 998 | parser.add_argument( 999 | "--space-in-notes-title", action="store_true", dest="space_in_notes_title" 1000 | ) 1001 | parser.add_argument( 1002 | "--no-fixers", 1003 | action="store_false", 1004 | dest="run_fixers", 1005 | help="try to only reformat and does not run fixers heuristics", 1006 | ) 1007 | parser.add_argument( 1008 | "--write", 1009 | dest="write", 1010 | action="store_true", 1011 | help="Try to write the updated docstring to the files", 1012 | ) 1013 | parser.add_argument( 1014 | "--verbose", 1015 | action="store_true", 1016 | help="increase the verbosity of the output", 1017 | ) 1018 | 1019 | args = parser.parse_args() 1020 | 1021 | config = Config( 1022 | { 1023 | "with_placeholder": args.with_placeholder, 1024 | "compact_param": args.compact, 1025 | "space_in_see_also_title": args.space_in_see_also_title, 1026 | "space_in_notes_title": args.space_in_notes_title, 1027 | "run_fixers": args.run_fixers, 1028 | } 1029 | ) 1030 | global BLACK_REFORMAT 1031 | if args.run_black: 1032 | BLACK_REFORMAT = True 1033 | else: 1034 | BLACK_REFORMAT = False 1035 | 1036 | global print 1037 | if args.verbose: 1038 | try: 1039 | from there import print 1040 | except ImportError: 1041 | pass 1042 | 1043 | to_format = [] 1044 | 1045 | for f in args.paths: 1046 | p = Path(f) 1047 | if p.is_dir(): 1048 | for sf in p.glob("**/*.py"): 1049 | to_format.append(sf) 1050 | else: 1051 | to_format.append(p) 1052 | 1053 | def to_skip(file, patterns): 1054 | for p in patterns: 1055 | if re.match(p.file, file): 1056 | if p.obj_pattern is None: 1057 | return True 1058 | else: 1059 | continue 1060 | return False 1061 | 1062 | need_changes = [] 1063 | for file in to_format: 1064 | if to_skip(str(file), patterns): 1065 | print("ignoring", file) 1066 | continue 1067 | 1068 | try: 1069 | with open(file) as f: 1070 | data = f.read() 1071 | except Exception as e: 1072 | # continue 1073 | continue 1074 | raise RuntimeError(f"Fail reading {file}") from e 1075 | 1076 | obj_p = [p.obj_pattern for p in patterns if re.match(p.file, str(file))] 1077 | new, _fail_check = _reformat_file( 1078 | data, 1079 | file, 1080 | args.compact, 1081 | args.unsafe, 1082 | fail=args.fail, 1083 | config=config, 1084 | obj_p=obj_p, 1085 | ) 1086 | # test(docstring, file) 1087 | if new != data: 1088 | need_changes.append(str(file)) 1089 | 1090 | dold = data.splitlines() 1091 | dnew = new.splitlines() 1092 | diffs = list( 1093 | difflib.unified_diff( 1094 | dold, dnew, n=args.context, fromfile=str(file), tofile=str(file) 1095 | ), 1096 | ) 1097 | 1098 | if args.print_diff and not args.write: 1099 | code = "\n".join(diffs) 1100 | 1101 | if args.do_highlight: 1102 | from pygments import highlight 1103 | from pygments.formatters import TerminalFormatter 1104 | from pygments.lexers import DiffLexer 1105 | 1106 | code = highlight(code, DiffLexer(), TerminalFormatter()) 1107 | 1108 | print(code) 1109 | if args.write: 1110 | with open(file, "w") as f: 1111 | f.write(new) 1112 | elif _fail_check: 1113 | need_changes.append(str(file)) 1114 | 1115 | if args.check: 1116 | if len(need_changes) != 0: 1117 | sys.exit( 1118 | "Some files/functions need updates:\n - " + "\n - ".join(need_changes) 1119 | ) 1120 | else: 1121 | sys.exit(0) 1122 | --------------------------------------------------------------------------------